hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2c0e286930dd876ebcf8ffacab4cb8d169bb79d5 | 1,219 | py | Python | docker/features/migration_utils.py | krdpk17/twitter-neo4j | bb7e62743651082726db373d118dcc90cce48532 | [
"Apache-2.0"
] | 1 | 2020-04-30T07:09:43.000Z | 2020-04-30T07:09:43.000Z | docker/features/migration_utils.py | krdpk17/twitter-neo4j | bb7e62743651082726db373d118dcc90cce48532 | [
"Apache-2.0"
] | null | null | null | docker/features/migration_utils.py | krdpk17/twitter-neo4j | bb7e62743651082726db373d118dcc90cce48532 | [
"Apache-2.0"
] | 1 | 2020-05-14T22:33:31.000Z | 2020-05-14T22:33:31.000Z | import pdb
import argparse
from config.load_config import load_config
load_config()
class CommandOptions:
dmcheckuserscreenname = None
class Migration:
def __init__(self):
#tested
self.cmd_options = CommandOptions()
def read_command(self):
#tested
parser = argparse.ArgumentParser(description='Migration tool')
parser.add_argument('--dmcheckuserscreenname', metavar="Screen name of DM user",
help='DM check service old data migration to service based approach')
args = parser.parse_args()
self.cmd_options.dmcheckuserscreenname = args.dmcheckuserscreenname
def handle_migration(self):
#tested
if self.cmd_options.dmcheckuserscreenname:
self.__handle_dmcheck_migration()
def __handle_dmcheck_migration(self):
from libs.cypher_store_migration_tools import DMCheckCypherStoreMigrationIntf
dm_check_migration = DMCheckCypherStoreMigrationIntf(self.cmd_options.dmcheckuserscreenname)
dm_check_migration.migrate_user_link_to_client()
def main():
migration = Migration()
migration.read_command()
migration.handle_migration()
if __name__ == "__main__": main() | 32.945946 | 100 | 0.725185 | 126 | 1,219 | 6.666667 | 0.420635 | 0.033333 | 0.066667 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.201805 | 1,219 | 37 | 101 | 32.945946 | 0.863309 | 0.014766 | 0 | 0 | 0 | 0 | 0.10759 | 0.019183 | 0 | 0 | 0 | 0 | 0 | 1 | 0.185185 | false | 0 | 0.148148 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c0f7bcba9525064478def51911f22f43bace868 | 2,055 | py | Python | django_js_reverse/tests/unit_tests.py | hyperair/django-js-reverse | d3e6648778b9eda69acf25616c0bfb9274f5e7b4 | [
"BSD-3-Clause"
] | null | null | null | django_js_reverse/tests/unit_tests.py | hyperair/django-js-reverse | d3e6648778b9eda69acf25616c0bfb9274f5e7b4 | [
"BSD-3-Clause"
] | null | null | null | django_js_reverse/tests/unit_tests.py | hyperair/django-js-reverse | d3e6648778b9eda69acf25616c0bfb9274f5e7b4 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#-*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import sys
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from django.test.client import Client
from django.utils import unittest
from django.test import TestCase
from django.test.utils import override_settings
class JSReverseViewTestCase(TestCase):
client = None
urls = 'django_js_reverse.tests.test_urls'
def setUp(self):
self.client = Client()
def test_view_no_url_args(self):
response = self.client.post('/jsreverse/')
self.assertContains(response, "'test_no_url_args', ['test_no_url_args/', []]")
def test_view_one_url_arg(self):
response = self.client.post('/jsreverse/')
self.assertContains(response, "'test_one_url_args', ['test_one_url_args/%(arg_one)s/', ['arg_one']]")
def test_view_two_url_args(self):
response = self.client.post('/jsreverse/')
self.assertContains(
response, "'test_two_url_args', ['test_two_url_args/%(arg_one)s\\u002D%(arg_two)s/', ['arg_one','arg_two']]")
def test_level1_namespaced_url(self):
response = self.client.post('/jsreverse/')
self.assertContains(response, "'ns1:foo', ['ns1/foo/', []]")
def test_level2_namespaced_url(self):
response = self.client.post('/jsreverse/')
self.assertContains(response, "'ns1:ns2:bar', ['ns1/ns2/bar/', []]")
@override_settings(JS_REVERSE_JS_VAR_NAME='Foo')
def test_js_var_name_changed_valid(self):
response = self.client.post('/jsreverse/')
self.assertContains(response, 'this.Foo = (function () {')
@override_settings(JS_REVERSE_JS_VAR_NAME='1test')
def test_js_var_name_changed_invalid(self):
from django.core.exceptions import ImproperlyConfigured
with self.assertRaises(ImproperlyConfigured):
self.client.post('/jsreverse/')
if __name__ == '__main__':
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..') + os.sep)
unittest.main()
| 35.431034 | 121 | 0.686618 | 264 | 2,055 | 5.022727 | 0.291667 | 0.060332 | 0.073906 | 0.121418 | 0.426848 | 0.405732 | 0.371041 | 0.319759 | 0.319759 | 0.273756 | 0 | 0.008159 | 0.164964 | 2,055 | 57 | 122 | 36.052632 | 0.764569 | 0.019951 | 0 | 0.146341 | 0 | 0.02439 | 0.22664 | 0.091451 | 0 | 0 | 0 | 0 | 0.170732 | 1 | 0.195122 | false | 0 | 0.195122 | 0 | 0.463415 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c0ffd20ed5cf644e56c8cdc6baf64027cec15dd | 4,034 | py | Python | scripts/test_and_fuse.py | ubiquity6/MVSNet | 7dc026acb019d270e79de7be4a5cfcb33863127f | [
"MIT"
] | 7 | 2019-07-15T08:49:38.000Z | 2019-11-30T01:09:12.000Z | scripts/test_and_fuse.py | ubiquity6/MVSNet | 7dc026acb019d270e79de7be4a5cfcb33863127f | [
"MIT"
] | 10 | 2019-07-17T00:00:29.000Z | 2022-03-11T23:50:36.000Z | scripts/test_and_fuse.py | ubiquity6/MVSNet | 7dc026acb019d270e79de7be4a5cfcb33863127f | [
"MIT"
] | 3 | 2019-08-02T09:06:32.000Z | 2021-07-06T11:49:55.000Z | import os
import subprocess
import argparse
import utils as ut
import time
"""
A simple script for running prediction on a multiple sessions, fusing the resulting point clouds,
and then uploading the results to sketchfab, as well as copying them to a more convenient location
on the file system.
"""
def write_results(args, urls):
try:
with open(args.results_path, 'a+') as f:
new_line = '{}, {}, {}, {}, {}, {} \n'.format(
args.model_dir, args.ckpt_step, urls, args.prob_threshold, args.disp_threshold, args.num_consistent)
f.write(new_line)
except Exception as e:
logger.error('Failed to write results with exception {}'.format(e))
pass # While it is too bad if results fail to write, we don't want to stop the process over it
def test_and_fuse(args, dense_folder, ply_folder):
if args.no_test is not True:
ut.test(dense_folder, args.ckpt_step, args.model_dir)
ut.clear_old_points(dense_folder)
ut.fuse(dense_folder, args.fusibile_path, args.prob_threshold,
args.disp_threshold, args.num_consistent)
ply_paths = ut.get_fusion_plys(dense_folder)
urls = ut.handle_plys(ply_paths, dense_folder, ply_folder, args)
print('Sketchfab urls {}'.format(urls))
write_results(args, urls)
return urls
def main(args):
all_urls = []
start_time = time.time()
dir_name = '{}_prob_{}_disp_{}_consis_{}'.format(start_time,
args.prob_threshold, args.disp_threshold, args.num_consistent)
ply_folder = os.path.join(args.ply_folder, dir_name)
print('Final PLY files will be written to {}'.format(ply_folder))
os.mkdir(ply_folder)
# If test_data_root is a session dir we test on that, otherwise we test on subdirs
if os.path.isfile(os.path.join(args.test_folder_root, 'covisibility.json')):
urls = test_and_fuse(args, args.test_folder_root, ply_folder)
all_urls.append(urls)
else:
for d in os.listdir(args.test_folder_root):
dense_folder = os.path.join(args.test_folder_root, d)
try:
urls = test_and_fuse(args, dense_folder, ply_folder)
all_urls.append(urls)
except Exception as e:
print('Failed to test and fuse on dense folder {}'.format(dense_folder))
print('Models uploaded to:', all_urls)
write_results(args, all_urls)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--ckpt_step', type=str,
help="The ckpt_step of saved model -- see test.py")
parser.add_argument('--model_dir', type=str,
help="The directory of saved model -- see test.py")
parser.add_argument('--test_folder_root', type=str,
default='../data/atlas', help="The directory where the sessions to be tested are located")
parser.add_argument('--fusibile_path', type=str,
default='/home/chrisheinrich/fusibile/fusibile', help="The path to the compiled fusibile executable")
parser.add_argument('--prob_threshold', type=float, default='0.8')
parser.add_argument('--ply_folder', type=str,
default='/home/chrisheinrich/fused-point-clouds', help="The root directory for storing the saved point cloud output")
parser.add_argument('--disp_threshold', type=float, default='0.25')
parser.add_argument('--num_consistent', type=float, default='3')
parser.add_argument('--no_test', action='store_true',
help='Will not run testing, but only postprocessing, if flag is set')
parser.add_argument('--test_only', action='store_true',
help='Will only run testing, and no fusing or uploading of point clouds.')
parser.add_argument('--results_path', type=str,
default='./sketchfab_links.csv', help="The path to where to write teh sketchfab results")
args = parser.parse_args()
main(args)
| 47.458824 | 141 | 0.658899 | 558 | 4,034 | 4.562724 | 0.306452 | 0.038885 | 0.073449 | 0.02828 | 0.24077 | 0.15868 | 0.141791 | 0.119796 | 0.092302 | 0.042419 | 0 | 0.001933 | 0.23054 | 4,034 | 84 | 142 | 48.02381 | 0.818299 | 0.041646 | 0 | 0.086957 | 0 | 0 | 0.259137 | 0.034075 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0.014493 | 0.072464 | 0 | 0.130435 | 0.057971 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c11e850d2be5882a095619304bc91ffdaa33b4e | 7,176 | py | Python | pyportall/api/models/geopandas.py | INSPIDE/pyportall | 4dfd0e8908714c15e25e46eff29d6c8eb42a486e | [
"MIT"
] | null | null | null | pyportall/api/models/geopandas.py | INSPIDE/pyportall | 4dfd0e8908714c15e25e46eff29d6c8eb42a486e | [
"MIT"
] | 6 | 2021-02-08T12:34:15.000Z | 2021-07-29T15:24:02.000Z | pyportall/api/models/geopandas.py | INSPIDE/pyportall | 4dfd0e8908714c15e25e46eff29d6c8eb42a486e | [
"MIT"
] | null | null | null | """Portall's GeoDataFrame wrappers."""
from __future__ import annotations
import geopandas as gpd
from typing import Optional
from pydantic.types import UUID4
from pydantic import BaseModel, Field
from pyportall.api.engine.core import APIClient, ENDPOINT_DATAFRAMES
from pyportall.api.models.geojson import FeatureCollection, Feature, Polygon
from pyportall.exceptions import ValidationError
class PortallDataFrame(gpd.GeoDataFrame):
""" GeoDataFrame with Portall superpowers. """
def __init__(self, client: APIClient, name: Optional[str] = None, id: Optional[UUID4] = None, description: Optional[str] = None, *args, **kwargs) -> None:
"""Class constructor to attach the corresponding API client.
Args:
client: API client object to be used to send requests to the dataframe API.
name: Dataframe name in Portall.
id: Dataframe ID in Portall.
description: Dataframe description in Portall.
"""
super().__init__(*args, **kwargs) # Needs to go first, otherwise you get a RecursionError from Pandas
self.client = client
self.name = name
self.id = id
self.description = description
@staticmethod
def from_gdf(gdf: gpd.GeoDataFrame, client: APIClient, name: Optional[str] = None, id: Optional[UUID4] = None, description: Optional[str] = None) -> PortallDataFrame:
"""Build from GeoDataFrame.
Return a PortallDataFrame object out of a standard GeoPandas' GeoDataFrame.
Args:
gdf: GeoDataFrame to build the new PortallDataFrame object from.
client: API client object to be used to send requests to the dataframe API.
name: Dataframe name in Portall.
id: Dataframe ID in Portall.
description: Dataframe description in Portall.
Returns:
A new PortallDataFrame object.
"""
pdf = PortallDataFrame(client, name=name, id=id, description=description)
pdf.__dict__.update(gdf.__dict__)
return pdf
@staticmethod
def from_geojson(geojson: FeatureCollection, client: APIClient, name: Optional[str] = None, id: Optional[UUID4] = None, description: Optional[str] = None) -> PortallDataFrame:
"""Build from GeoJSON.
Return a PortallDataFrame object out of a standard GeoPandas' GeoDataFrame.
Args:
geojson: FeatureCollection GeoJSON to build the new PortallDataFrame object from.
client: API client object to be used to send requests to the dataframe API.
name: Dataframe name in Portall.
id: Dataframe ID in Portall.
description: Dataframe description in Portall.
Returns:
A new PortallDataFrame object.
"""
return PortallDataFrame.from_gdf(gpd.GeoDataFrame.from_features(features=geojson.dict()["features"], crs="EPSG:4326"), client, name=name, id=id, description=description)
@staticmethod
def from_api(pdf_api: PortallDataFrameAPI, client: APIClient) -> PortallDataFrame:
"""Build from a Portall dataframe as returned directly by Portall's API.
Return a PortallDataFrame object out of a Portall dataframe as returned directly by Portall's API.
Args:
pdf_api: PortallDataFrameAPI object to build the new PortallDataFrame object from.
client: API client object to be used to send requests to the dataframe API.
Returns:
A new PortallDataFrame object.
"""
return PortallDataFrame.from_geojson(pdf_api.geojson, client, name=pdf_api.name, id=pdf_api.id, description=pdf_api.description)
def save(self) -> None:
"""Persist dataframe in Portall.
Creates or updates an equivalent, remote PortallDataFrame object in Portall.
"""
try:
pdf_api = PortallDataFrameAPI(id=getattr(self, "id", None), name=getattr(self, "name"), description=getattr(self, "descripton", None), geojson=FeatureCollection.parse_raw(self.to_json()))
except AttributeError:
raise ValidationError
if pdf_api.id is None:
self.client.post(ENDPOINT_DATAFRAMES, body=pdf_api.json(exclude_none=True))
else:
self.client.put(f"{ENDPOINT_DATAFRAMES}{pdf_api.id}/", body=pdf_api.json(exclude_none=True))
def delete(self) -> None:
"""Delete dataframe in Portall.
Deletes remote PortallDataFrame object in Portall. It will not delete the actual Python object.
"""
try:
pdf_api = PortallDataFrameAPI(id=getattr(self, "id", None), name=getattr(self, "name"), description=getattr(self, "descripton", ""), geojson=self.to_json())
except AttributeError:
raise ValidationError
self.client.delete(f"{ENDPOINT_DATAFRAMES}{pdf_api.id}/")
self.id = None
class PortallDataFrameAPI(BaseModel):
""" Representation of a Portall dataframe straight from the API. """
id: Optional[UUID4] = Field(None, example="df30e466-1f68-42e5-8f4c-eceb1ebda89a", description="Portall ID of the saved dataframe in question.")
name: str = Field(..., example="Population")
description: Optional[str] = Field("", example="Population information in my trade areas.")
geojson: FeatureCollection = Field(..., example={
"type": "FeatureCollection",
"features": [
Feature(geometry=Polygon(coordinates=[[[-3.705759292, 40.428465661], [-3.705876855, 40.428428953], [-3.705893649, 40.428328537], [-3.705792879, 40.428264828], [-3.705675317, 40.428301536], [-3.705658523, 40.428401952], [-3.705759292, 40.428465661]]]), properties={"id": 631507574776148991, "value": 80.76923076915}),
Feature(geometry=Polygon(coordinates=[[[-3.705843269, 40.428629786], [-3.705960832, 40.428593078], [-3.705977625, 40.428492662], [-3.705876855, 40.428428953], [-3.705759292, 40.428465661], [-3.705742499, 40.428566077], [-3.705843269, 40.428629786]]]), properties={"id": 631507574776151039, "value": 126.92307692295})
]
})
class Config:
schema_extra = {
"example": {
"id": "df30e466-1f68-42e5-8f4c-eceb1ebda89a",
"name": "Population",
"description": "Population information in my trade areas.",
"geojson": {
"type": "FeatureCollection",
"features": [
Feature(geometry=Polygon(coordinates=[[[-3.705759292, 40.428465661], [-3.705876855, 40.428428953], [-3.705893649, 40.428328537], [-3.705792879, 40.428264828], [-3.705675317, 40.428301536], [-3.705658523, 40.428401952], [-3.705759292, 40.428465661]]]), properties={"id": 631507574776148991, "value": 80.76923076915}),
Feature(geometry=Polygon(coordinates=[[[-3.705843269, 40.428629786], [-3.705960832, 40.428593078], [-3.705977625, 40.428492662], [-3.705876855, 40.428428953], [-3.705759292, 40.428465661], [-3.705742499, 40.428566077], [-3.705843269, 40.428629786]]]), properties={"id": 631507574776151039, "value": 126.92307692295})
]
}
}
}
| 49.489655 | 340 | 0.659978 | 799 | 7,176 | 5.866083 | 0.216521 | 0.024963 | 0.019202 | 0.026883 | 0.654363 | 0.612119 | 0.600597 | 0.524643 | 0.506507 | 0.506507 | 0 | 0.136036 | 0.226589 | 7,176 | 144 | 341 | 49.833333 | 0.708468 | 0.271739 | 0 | 0.25 | 0 | 0 | 0.093109 | 0.028712 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088235 | false | 0 | 0.117647 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c14b2e0a56bed2d4d83e59bf655df2bd19c3e4d | 853 | py | Python | classifier/classify.py | alexander7161/FaceGen | c1697a8bfc3c551a3dc2bc45078e8e4e5ae41368 | [
"MIT"
] | 1 | 2019-12-11T14:21:59.000Z | 2019-12-11T14:21:59.000Z | classifier/classify.py | alexander7161/FaceGen | c1697a8bfc3c551a3dc2bc45078e8e4e5ae41368 | [
"MIT"
] | 31 | 2019-12-11T12:29:46.000Z | 2022-03-12T00:20:52.000Z | classifier/classify.py | alexander7161/FaceGen | c1697a8bfc3c551a3dc2bc45078e8e4e5ae41368 | [
"MIT"
] | null | null | null | from multiclass_model import MulticlassMultiLabelModel
"""
Classify a single image file from the filesystem.
"""
parser = ArgumentParser()
parser.add_argument('--runname', '-n', dest='run_name',
type=str,
help='Name for this run, will otherwise not try to load model.')
parser.add_argument('--file', '-f', dest='file',
type=str, default="./face_data/female_senior/users_reHt5vV4soc2BtN5cYUJpSgUClk1_faces_4kbr2K6XTV8UmNXvEDkF.jpg",
help='File to test')
parser.add_argument('--m', '-m', dest='model',
type=str, default="cnn",
help='declare what model to use.')
args = parser.parse_args()
model = MulticlassMultiLabelModel(
epochs=1, batch_size=1, run_name=args.run_name)
model.load_weights()
print(model.predict(args.file))
| 34.12 | 132 | 0.645955 | 101 | 853 | 5.306931 | 0.574257 | 0.050373 | 0.095149 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016616 | 0.223916 | 853 | 24 | 133 | 35.541667 | 0.793051 | 0 | 0 | 0 | 0 | 0 | 0.287688 | 0.114322 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c1614ff444bb3635f1507d1bf14312b36ddae29 | 621 | py | Python | PycharmProjects/OpenCV/Dhairya_OpenCV/10_TemplateMatching.py | dhairyashah1/Eklavya20-CatchPracticeBot | 60434bf5e280d7495eab75b21566bd1eb3bbd14e | [
"Unlicense"
] | 6 | 2021-03-29T10:25:39.000Z | 2021-06-03T18:13:57.000Z | PycharmProjects/OpenCV/Dhairya_OpenCV/10_TemplateMatching.py | meshtag/Eklavya20-CatchPracticeBot | f0e625768aa49cd43df9fec379c8d7919be784b9 | [
"Unlicense"
] | null | null | null | PycharmProjects/OpenCV/Dhairya_OpenCV/10_TemplateMatching.py | meshtag/Eklavya20-CatchPracticeBot | f0e625768aa49cd43df9fec379c8d7919be784b9 | [
"Unlicense"
] | 1 | 2021-01-27T13:03:06.000Z | 2021-01-27T13:03:06.000Z | import numpy as np
import cv2
img = cv2.imread("opencv-template-matching-python-tutorial[1].jpg")
template = cv2.imread("opencv-template-for-matching[1].jpg",0)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
w,h = template.shape[::-1]
#w=width h=height
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
#match template returns a gray element so we convrt image to gray
#arg:: image, template, matching-method
threshold = 0.8
loc = np.where(res >= threshold)
for pt in zip(*loc[::-1]):
cv2.rectangle(img,pt,(pt[0]+w, pt[1]+h),(255,255,0),4)
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows() | 34.5 | 67 | 0.731079 | 107 | 621 | 4.196262 | 0.504673 | 0.040089 | 0.066815 | 0.10245 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.053571 | 0.098229 | 621 | 18 | 68 | 34.5 | 0.748214 | 0.190016 | 0 | 0 | 0 | 0 | 0.169661 | 0.163673 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c16ce8056608e7cc7b6acf8f32232a293b97088 | 425 | py | Python | examples/do_report.py | adamczarnecki/tradedoubler_api_client | d209428c18276ea4c488b0106926ae9de7abb235 | [
"MIT"
] | null | null | null | examples/do_report.py | adamczarnecki/tradedoubler_api_client | d209428c18276ea4c488b0106926ae9de7abb235 | [
"MIT"
] | null | null | null | examples/do_report.py | adamczarnecki/tradedoubler_api_client | d209428c18276ea4c488b0106926ae9de7abb235 | [
"MIT"
] | null | null | null | from tradedoubler_api_client import Tradedoubler
import pprint
if __name__ == '__main__':
pp = pprint.PrettyPrinter(indent=4, compact=True, sort_dicts=False)
td = Tradedoubler('credentials.json')
report = td.reporting().get_transactions(fromDate='20210601', toDate='20210610')
report.filter_sales()
report.csv(path='reports')
for transaction in report.items:
print(pp.pprint(transaction))
| 28.333333 | 84 | 0.729412 | 50 | 425 | 5.94 | 0.78 | 0.053872 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047222 | 0.152941 | 425 | 14 | 85 | 30.357143 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0.110588 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.3 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c16d47b41feddc992dbdd6379a059841d159bb5 | 4,107 | py | Python | welib/vortilib/elements/examples/InviscidVortexPatch.py | moonieann/welib | 0e430ad3ca034d0d2d60bdb7bbe06c947ce08f52 | [
"MIT"
] | 24 | 2019-07-24T23:37:10.000Z | 2022-03-30T20:40:40.000Z | welib/vortilib/elements/examples/InviscidVortexPatch.py | moonieann/welib | 0e430ad3ca034d0d2d60bdb7bbe06c947ce08f52 | [
"MIT"
] | null | null | null | welib/vortilib/elements/examples/InviscidVortexPatch.py | moonieann/welib | 0e430ad3ca034d0d2d60bdb7bbe06c947ce08f52 | [
"MIT"
] | 11 | 2019-03-14T13:47:04.000Z | 2022-03-31T15:47:27.000Z | """
Vorticity and tangential velocity for a 2D inviscid vortex patch
See:
[1] Chapter 33, p.402, Branlard - Wind turbine aerodynamics and vorticity based methods, Springer 2017
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from vortilib.tools.colors import fColrs
from vortilib.tools.clean_exceptions import *
from vortilib.tools.curves import streamQuiver
from vortilib.elements.InviscidVortexPatch import *
# --- Plot vorticity distribution for different k values
fig,ax = plt.subplots(1, 1, sharey=False, figsize=(6.4,4.8)) # (6.4,4.8)
fig.subplots_adjust(left=0.12, right=0.95, top=0.95, bottom=0.11, hspace=0.20, wspace=0.20)
r=np.linspace(0,2,100)
theta=r*0
for i,k in enumerate([1/2,1,2,4]):
omega = ivp_omega(r, theta, k=k, polarIn=True)
ax.plot(r, omega, color=fColrs(i+1), label=r'$k={}$'.format(k))
ax.set_xlabel('$r$ [m]')
ax.set_ylabel(r'$\omega_z$ [1/s]')
ax.autoscale(tight=True)
ax.legend()
ax.tick_params(direction='in')
ax.set_title('Inviscid Vortex Patch - Vorticity')
# --- Plot tangential velocity for different k values
fig,ax = plt.subplots(1, 1, sharey=False, figsize=(6.4,4.8)) # (6.4,4.8)
fig.subplots_adjust(left=0.12, right=0.95, top=0.95, bottom=0.11, hspace=0.20, wspace=0.20)
r=np.linspace(0,2,100)
theta=r*0
ax.plot(r, r/2,'k--', label=r'$r/2$ slope')
for i,k in enumerate([1/2,1,2,4]):
ur,utheta = ivp_u(r, theta, k=k, polarIn=True, polarOut=True)
ax.plot(r, utheta, color=fColrs(i+1), label=r'$k={}$'.format(k))
ax.set_xlabel('$r$ [m]')
ax.set_ylabel(r'$u_\theta$ [m/s]')
ax.autoscale(tight=True)
ax.set_ylim([0,0.35])
ax.legend()
ax.tick_params(direction='in')
ax.set_title('Inviscid Vortex Patch - Velocity')
# --- Plot circulation
fig,ax = plt.subplots(1, 1, sharey=False, figsize=(6.4,4.8)) # (6.4,4.8)
fig.subplots_adjust(left=0.12, right=0.95, top=0.95, bottom=0.11, hspace=0.20, wspace=0.20)
r = np.linspace(0,2,100)
for i,k in enumerate([1/2,1,2,4]):
Gamma = ivp_Gamma(r, k=k)
ax.plot(r, Gamma/np.pi, color=fColrs(i+1), label=r'$k={}$'.format(k))
ax.set_xlabel('$r$ [m]')
ax.set_ylabel(r'$\Gamma/\pi$ [m$^2$/s]')
ax.autoscale(tight=True)
ax.set_ylim([0,0.7])
ax.legend()
ax.tick_params(direction='in')
ax.set_title('Inviscid Vortex Patch - Circulation')
# --- Plot velocity field
k=1
Gamma=ivp_Gamma([3],k=k)
# Control points
nX=100
nY=101
minSpeed=0
maxSpeed=1 # Scaled by max
vX = np.linspace(-4,4,nX)
vY = np.linspace(-4,4,nY)
XCP,YCP = np.meshgrid(vX,vY)
Xcp = XCP.flatten()
Ycp = YCP.flatten()
Zcp = Xcp*0
# Velocity field
Ux, Uy, _ = ivp_u(Xcp, Ycp, k=k)
Ux = Ux.reshape(XCP.shape)
Uy = Uy.reshape(XCP.shape)
Speed = np.sqrt((Ux**2+Uy**2))
print('min: ',np.min(Speed.ravel()),' - max: ',np.max(Speed.ravel()))
Speed= Speed/ np.max(Speed) # TODO can easiy be computed analytically
# Plot
fig,ax = plt.subplots(1, 1, sharey=False, figsize=(6.2,4.6)) # (6.4,4.8)
fig.subplots_adjust(left=0.12, right=0.98, top=0.96, bottom=0.12, hspace=0.20, wspace=0.20)
im = ax.contourf(XCP, YCP, Speed, levels=np.linspace(minSpeed,maxSpeed,250), vmin=minSpeed, vmax=maxSpeed)
cb=fig.colorbar(im)
yseed=np.linspace(0.1,3.8,15)
start=np.array([yseed*0,yseed])
sp=ax.streamplot(vX,vY,Ux,Uy,color='k',start_points=start.T,linewidth=0.7,density=30,arrowstyle='-')
qv=streamQuiver(ax,sp,n=7,scale=40,angles='xy')
ax.set_xlabel('x [m]')
ax.set_ylabel('y [m]')
ax.set_aspect('equal','box')
ax.set_title('Inviscid Vortex Patch - Velocity Field')
# --- Check that circulation match analytical value
def circulationSurvey(r, nTheta=100):
theta=np.linspace(0,2*np.pi,nTheta+1)
dTheta=theta[1]-theta[0]
Xcp=r*np.cos(theta)
Ycp=r*np.sin(theta)
Zcp=0*Xcp
Ux, Uy, _ = ivp_u(Xcp, Ycp, k=k)
Ut = Uy * np.cos(theta) - Ux * np.sin(theta)
GammaTheory = ivp_Gamma([r], k=k)[0]
#GammaCalc = 2*np.pi * r*Ut[0]
GammaCalc = r* np.trapz(Ut,theta)
return GammaCalc, GammaTheory
print(circulationSurvey(0.1))
print(circulationSurvey(0.5))
print(circulationSurvey(0.9))
print(circulationSurvey(1.0))
print(circulationSurvey(2.0))
print(circulationSurvey(3.0))
plt.show()
| 32.338583 | 106 | 0.685415 | 760 | 4,107 | 3.657895 | 0.253947 | 0.026978 | 0.007554 | 0.010072 | 0.408993 | 0.401079 | 0.373381 | 0.357194 | 0.345683 | 0.345683 | 0 | 0.062517 | 0.1159 | 4,107 | 126 | 107 | 32.595238 | 0.703112 | 0.131239 | 0 | 0.291667 | 0 | 0 | 0.080723 | 0 | 0 | 0 | 0 | 0.007937 | 0 | 1 | 0.010417 | false | 0 | 0.072917 | 0 | 0.09375 | 0.072917 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c17cc77f2646b8be82a171526ec121e5aa52c9c | 7,771 | py | Python | compss/programming_model/bindings/python/src/pycompss/api/multinode.py | eflows4hpc/compss | c497f6d34722103c6c8f83ebc314b495573ce054 | [
"Apache-2.0"
] | null | null | null | compss/programming_model/bindings/python/src/pycompss/api/multinode.py | eflows4hpc/compss | c497f6d34722103c6c8f83ebc314b495573ce054 | [
"Apache-2.0"
] | null | null | null | compss/programming_model/bindings/python/src/pycompss/api/multinode.py | eflows4hpc/compss | c497f6d34722103c6c8f83ebc314b495573ce054 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2002-2021 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""
PyCOMPSs API - MultiNode
==================
This file contains the class MultiNode, needed for the MultiNode
definition through the decorator.
"""
import os
from pycompss.util.typing_helper import typing
from functools import wraps
import pycompss.util.context as context
from pycompss.api.commons.constants import COMPUTING_NODES
from pycompss.api.commons.constants import LEGACY_COMPUTING_NODES
from pycompss.api.commons.implementation_types import IMPL_MULTI_NODE
from pycompss.api.commons.error_msgs import not_in_pycompss
from pycompss.util.exceptions import NotInPyCOMPSsException
from pycompss.util.arguments import check_arguments
from pycompss.api.commons.decorator import process_computing_nodes
from pycompss.api.commons.decorator import keep_arguments
from pycompss.api.commons.decorator import CORE_ELEMENT_KEY
from pycompss.runtime.task.core_element import CE
if __debug__:
import logging
logger = logging.getLogger(__name__)
MANDATORY_ARGUMENTS = set() # type: typing.Set[str]
SUPPORTED_ARGUMENTS = {COMPUTING_NODES}
DEPRECATED_ARGUMENTS = {LEGACY_COMPUTING_NODES}
SLURM_SKIP_VARS = [
"SLURM_JOBID",
"SLURM_JOB_ID",
"SLURM_USER",
"SLURM_QOS",
"SLURM_PARTITION",
]
class MultiNode(object):
"""
This decorator also preserves the argspec, but includes the __init__ and
__call__ methods, useful on MultiNode task creation.
"""
__slots__ = [
"decorator_name",
"args",
"kwargs",
"scope",
"core_element",
"core_element_configured",
]
def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None:
"""Store arguments passed to the decorator.
self = itself.
args = not used.
kwargs = dictionary with the given constraints.
:param args: Arguments
:param kwargs: Keyword arguments
"""
decorator_name = "".join(("@", MultiNode.__name__.lower()))
# super(MultiNode, self).__init__(decorator_name, *args, **kwargs)
self.decorator_name = decorator_name
self.args = args
self.kwargs = kwargs
self.scope = context.in_pycompss()
self.core_element = None # type: typing.Any
self.core_element_configured = False
if self.scope:
# Check the arguments
check_arguments(
MANDATORY_ARGUMENTS,
DEPRECATED_ARGUMENTS,
SUPPORTED_ARGUMENTS | DEPRECATED_ARGUMENTS,
list(kwargs.keys()),
decorator_name,
)
# Get the computing nodes
process_computing_nodes(decorator_name, self.kwargs)
def __call__(self, user_function: typing.Callable) -> typing.Callable:
"""Parse and set the multinode parameters within the task core element.
:param user_function: Function to decorate.
:return: Decorated function.
"""
@wraps(user_function)
def multinode_f(*args: typing.Any, **kwargs: typing.Any) -> typing.Any:
if not self.scope:
raise NotInPyCOMPSsException(not_in_pycompss("MultiNode"))
if __debug__:
logger.debug("Executing multinode_f wrapper.")
if (
context.in_master() or context.is_nesting_enabled()
) and not self.core_element_configured:
# master code - or worker with nesting enabled
self.__configure_core_element__(kwargs)
if context.in_worker():
old_slurm_env = set_slurm_environment()
# Set the computing_nodes variable in kwargs for its usage
# in @task decorator
kwargs[COMPUTING_NODES] = self.kwargs[COMPUTING_NODES]
with keep_arguments(args, kwargs, prepend_strings=True):
# Call the method
ret = user_function(*args, **kwargs)
if context.in_worker():
reset_slurm_environment(old_slurm_env)
return ret
multinode_f.__doc__ = user_function.__doc__
return multinode_f
def __configure_core_element__(self, kwargs: dict) -> None:
"""Include the registering info related to @multinode.
IMPORTANT! Updates self.kwargs[CORE_ELEMENT_KEY].
:param kwargs: Keyword arguments received from call.
:return: None
"""
if __debug__:
logger.debug("Configuring @multinode core element.")
# Resolve @multinode specific parameters
impl_type = IMPL_MULTI_NODE
if CORE_ELEMENT_KEY in kwargs:
# Core element has already been created in a higher level decorator
# (e.g. @constraint)
kwargs[CORE_ELEMENT_KEY].set_impl_type(impl_type)
else:
# @binary is in the top of the decorators stack.
# Instantiate a new core element object, update it and include
# it into kwarg
core_element = CE()
core_element.set_impl_type(impl_type)
kwargs[CORE_ELEMENT_KEY] = core_element
# Set as configured
self.core_element_configured = True
def set_slurm_environment() -> dict:
"""Set SLURM environment.
:return: old Slurm environment
"""
num_nodes = int(os.environ["COMPSS_NUM_NODES"])
num_threads = int(os.environ["COMPSS_NUM_THREADS"])
total_processes = num_nodes * num_threads
hostnames = os.environ["COMPSS_HOSTNAMES"]
nodes = set(hostnames.split(","))
old_slurm_env = remove_slurm_environment()
# set slurm environment with COMPSs variables
os.environ["SLURM_NTASKS"] = str(total_processes)
os.environ["SLURM_NNODES"] = str(num_nodes)
os.environ["SLURM_JOB_NUM_NODES"] = str(num_nodes)
os.environ["SLURM_NODELIST"] = ",".join(nodes)
os.environ["SLURM_JOB_NODELIST"] = ",".join(nodes)
os.environ["SLURM_TASKS_PER_NODE"] = "".join(
(str(num_threads), "(x", str(num_nodes), ")")
)
os.environ["SLURM_CPUS_PER_NODE"] = "".join(
(str(num_threads), "(x", str(num_nodes), ")")
)
return old_slurm_env
def remove_slurm_environment() -> dict:
"""Removes the Slurm vars from environment
:return: removed Slurm vars
"""
old_slurm_env = dict()
for key, value in os.environ.items():
if key.startswith("SLURM"):
if key not in SLURM_SKIP_VARS:
old_slurm_env[key] = value
os.environ.pop(key)
# TODO: ISSUE DECTECTED - WAS NOT RETURNING old_slurm_env: ASK JORGE
return old_slurm_env
def reset_slurm_environment(old_slurm_env: typing.Optional[dict] = None) -> None:
"""Reestablishes SLURM environment.
:return: None
"""
if old_slurm_env:
for key, value in old_slurm_env.items():
os.environ[key] = value
# ########################################################################### #
# ################## MultiNode DECORATOR ALTERNATIVE NAME ################### #
# ########################################################################### #
multinode = MultiNode
| 33.49569 | 81 | 0.641745 | 901 | 7,771 | 5.281909 | 0.295228 | 0.04854 | 0.025426 | 0.03236 | 0.159067 | 0.122925 | 0.034461 | 0.015129 | 0.015129 | 0.015129 | 0 | 0.002218 | 0.245657 | 7,771 | 231 | 82 | 33.640693 | 0.809621 | 0.293012 | 0 | 0.076923 | 0 | 0 | 0.073935 | 0.004535 | 0 | 0 | 0 | 0.004329 | 0 | 1 | 0.059829 | false | 0 | 0.128205 | 0 | 0.239316 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c1997660877b65834f3fc00d39a51db4387c509 | 11,188 | py | Python | tools/interface/LAMMPS.py | wichoi77/alamode | f0b3f4cc9903a807006b8f2d183de77dd461f61c | [
"MIT"
] | 1 | 2021-01-27T19:05:03.000Z | 2021-01-27T19:05:03.000Z | tools/interface/LAMMPS.py | wichoi77/alamode | f0b3f4cc9903a807006b8f2d183de77dd461f61c | [
"MIT"
] | null | null | null | tools/interface/LAMMPS.py | wichoi77/alamode | f0b3f4cc9903a807006b8f2d183de77dd461f61c | [
"MIT"
] | 1 | 2021-04-26T14:01:15.000Z | 2021-04-26T14:01:15.000Z | #
# LAMMPS.py
#
# Interface to LAMMPS (http://lammps.sandia.gov)
#
# Copyright (c) 2017 Terumasa Tadano
#
# This file is distributed under the terms of the MIT license.
# Please see the file 'LICENCE.txt' in the root directory
# or http://opensource.org/licenses/mit-license.php for information.
#
import numpy as np
def read_lammps_structure(file_in):
f = open(file_in, 'r')
header_comment = f.readline()
common_settings = []
for line in f:
if "Atoms" in line:
break
common_settings.append(line.rstrip())
atoms = []
for line in f:
if line.strip():
atoms.append(line.rstrip().split())
atoms = np.array(atoms)
nat = len(atoms)
kd = np.array(atoms[:, 1], dtype=np.int)
x = np.array(atoms[:, 2:5], dtype=np.float64)
return common_settings, nat, x, kd
def write_lammps_structure(prefix, counter, header, nzerofills,
common_settings, nat, kd, x_cart, disp):
filename = prefix + str(counter).zfill(nzerofills) + ".lammps"
f = open(filename, 'w')
f.write("%s\n" % header)
for line in common_settings:
f.write("%s\n" % line)
f.write("%s\n\n" % "Atoms")
for i in range(nat):
f.write("%5d %3d" % (i + 1, kd[i]))
for j in range(3):
f.write("%20.15f" % (x_cart[i][j] + disp[i][j]))
f.write("\n")
f.write("\n")
f.close()
def get_coordinate_LAMMPS(lammps_dump_file):
add_flag = False
coord = []
with open(lammps_dump_file) as f:
for line in f:
if "ITEM:" in line and "ITEM: ATOMS id xu yu zu" not in line:
add_flag = False
continue
elif "ITEM: ATOMS id xu yu zu" in line:
add_flag = True
continue
if add_flag:
if line.strip():
entries = line.strip().split()
coord_atom = [int(entries[0]),
[float(t) for t in entries[1:]]]
coord.append(coord_atom)
# This sort is necessary since the order atoms of LAMMPS dump files
# may change from the input structure file.
coord_sorted = sorted(coord)
coord = []
for coord_atom in coord_sorted:
coord.extend(coord_atom[1])
return np.array(coord)
def get_atomicforces_LAMMPS(lammps_dump_file):
add_flag = False
force = []
with open(lammps_dump_file) as f:
for line in f:
if "ITEM:" in line and "ITEM: ATOMS id fx fy fz " not in line:
add_flag = False
continue
elif "ITEM: ATOMS id fx fy fz " in line:
add_flag = True
continue
if add_flag:
if line.strip():
entries = line.strip().split()
force_atom = [int(entries[0]),
[float(t) for t in entries[1:]]]
force.append(force_atom)
force_sorted = sorted(force)
force = []
for force_atom in force_sorted:
force.extend(force_atom[1])
return np.array(force)
def get_coordinate_and_force_LAMMPS(lammps_dump_file):
add_flag = False
ret = []
with open(lammps_dump_file) as f:
for line in f:
if "ITEM:" in line and "ITEM: ATOMS id xu yu zu fx fy fz" not in line:
add_flag = False
continue
elif "ITEM: ATOMS id xu yu zu fx fy fz" in line:
add_flag = True
continue
if add_flag:
if line.strip():
entries = line.strip().split()
data_atom = [int(entries[0]),
[float(t) for t in entries[1:4]],
[float(t) for t in entries[4:]]]
ret.append(data_atom)
# This sort is necessary since the order atoms of LAMMPS dump files
# may change from the input structure file.
ret_sorted = sorted(ret)
ret_x = []
ret_f = []
for ret_atom in ret_sorted:
ret_x.extend(ret_atom[1])
ret_f.extend(ret_atom[2])
return np.array(ret_x), np.array(ret_f)
def print_displacements_LAMMPS(lammps_files, nat, x_cart0,
conversion_factor, file_offset):
if file_offset is None:
disp_offset = np.zeros((nat, 3))
else:
_, nat_tmp, x0_offset, _ = read_lammps_structure(file_offset)
if nat_tmp != nat:
print("File %s contains too many/few position entries"
% file_offset)
disp_offset = x0_offset - x_cart0
# Automatic detection of the input format
is_dumped_file = False
f = open(lammps_files[0], 'r')
for line in f:
if "ITEM: TIMESTEP" in line:
is_dumped_file = True
break
f.close()
if is_dumped_file:
# This version supports reading the data from MD trajectory
for search_target in lammps_files:
x = get_coordinate_LAMMPS(search_target)
ndata = len(x) // (3 * nat)
x = np.reshape(x, (ndata, nat, 3))
for idata in range(ndata):
disp = x[idata, :, :] - x_cart0 - disp_offset
disp *= conversion_factor
for i in range(nat):
print("%20.14f %20.14f %20.14f" % (disp[i, 0],
disp[i, 1],
disp[i, 2]))
else:
for search_target in lammps_files:
_, nat_tmp, x_cart, _ = read_lammps_structure(search_target)
if nat_tmp != nat:
print("File %s contains too many/few position entries" %
search_target)
disp = x_cart - x_cart0 - disp_offset
disp *= conversion_factor
for i in range(nat):
print("%20.14f %20.14f %20.14f" % (disp[i, 0],
disp[i, 1],
disp[i, 2]))
def print_atomicforces_LAMMPS(lammps_files, nat,
conversion_factor, file_offset):
if file_offset is None:
force_offset = np.zeros((nat, 3))
else:
data = get_atomicforces_LAMMPS(file_offset)
try:
force_offset = np.reshape(data, (nat, 3))
except:
print("File %s contains too many position entries" % file_offset)
# Automatic detection of the input format
is_dumped_file = False
f = open(lammps_files[0], 'r')
for line in f:
if "ITEM: TIMESTEP" in line:
is_dumped_file = True
break
f.close()
for search_target in lammps_files:
force = get_atomicforces_LAMMPS(search_target)
ndata = len(force) // (3 * nat)
force = np.reshape(force, (ndata, nat, 3))
for idata in range(ndata):
f = force[idata, :, :] - force_offset
f *= conversion_factor
for i in range(nat):
print("%19.11E %19.11E %19.11E" % (f[i][0], f[i][1], f[i][2]))
def print_displacements_and_forces_LAMMPS(lammps_files, nat,
x_cart0,
conversion_factor_disp,
conversion_factor_force,
file_offset):
if file_offset is None:
disp_offset = np.zeros((nat, 3))
force_offset = np.zeros((nat, 3))
else:
x0_offset, force_offset = get_coordinate_and_force_LAMMPS(file_offset)
try:
x0_offset = np.reshape(x0_offset, (nat, 3))
force_offset = np.reshape(force_offset, (nat, 3))
except:
print("File %s contains too many/few entries" % file_offset)
disp_offset = x0_offset - x_cart0
# Automatic detection of the input format
is_dumped_file = False
f = open(lammps_files[0], 'r')
for line in f:
if "ITEM: TIMESTEP" in line:
is_dumped_file = True
break
f.close()
if is_dumped_file:
# This version supports reading the data from MD trajectory
for search_target in lammps_files:
x, force = get_coordinate_and_force_LAMMPS(search_target)
ndata = len(x) // (3 * nat)
x = np.reshape(x, (ndata, nat, 3))
force = np.reshape(force, (ndata, nat, 3))
for idata in range(ndata):
disp = x[idata, :, :] - x_cart0 - disp_offset
disp *= conversion_factor_disp
f = force[idata, :, :] - force_offset
f *= conversion_factor_force
for i in range(nat):
print("%20.14f %20.14f %20.14f %20.8E %15.8E %15.8E" % (disp[i, 0],
disp[i, 1],
disp[i, 2],
f[i, 0],
f[i, 1],
f[i, 2]))
def get_unit_conversion_factor(str_unit):
Bohr_radius = 0.52917721067
Rydberg_to_eV = 13.60569253
disp_conv_factor = 1.0
energy_conv_factor = 1.0
force_conv_factor = 1.0
if str_unit == "ev":
disp_conv_factor = 1.0
energy_conv_factor = 1.0
elif str_unit == "rydberg":
disp_conv_factor = 1.0 / Bohr_radius
energy_conv_factor = 1.0 / Rydberg_to_eV
elif str_unit == "hartree":
disp_conv_factor = 1.0 / Bohr_radius
energy_conv_factor = 0.5 / Rydberg_to_eV
else:
print("This cannot happen")
exit(1)
force_conv_factor = energy_conv_factor / disp_conv_factor
return disp_conv_factor, force_conv_factor, energy_conv_factor
def parse(lammps_init, dump_files, dump_file_offset, str_unit,
print_disp, print_force, print_energy):
_, nat, x_cart0, _ = read_lammps_structure(lammps_init)
scale_disp, scale_force, _ = get_unit_conversion_factor(str_unit)
if print_disp is True and print_force is True:
print_displacements_and_forces_LAMMPS(dump_files, nat,
x_cart0,
scale_disp,
scale_force,
dump_file_offset)
elif print_disp is True:
print_displacements_LAMMPS(dump_files, nat, x_cart0,
scale_disp,
dump_file_offset)
elif print_force is True:
print_atomicforces_LAMMPS(dump_files, nat,
scale_force,
dump_file_offset)
elif print_energy is True:
print("Error: --get energy is not supported for LAMMPS")
exit(1)
| 30.568306 | 87 | 0.517876 | 1,376 | 11,188 | 4 | 0.139535 | 0.02907 | 0.014717 | 0.014535 | 0.626635 | 0.572311 | 0.535792 | 0.496366 | 0.454397 | 0.427507 | 0 | 0.025833 | 0.391044 | 11,188 | 365 | 88 | 30.652055 | 0.782034 | 0.06498 | 0 | 0.541833 | 0 | 0.003984 | 0.060728 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039841 | false | 0 | 0.003984 | 0 | 0.063745 | 0.083665 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c19bea1407933bda4ea01581f3e0b456aadbfa3 | 4,550 | py | Python | Week_12/wave2D.py | MECH3750/2021-Tutorials | e813f2a97d9b71ad0e304a35e8c66d21ed63ee0c | [
"MIT"
] | 5 | 2021-08-03T01:40:40.000Z | 2021-09-14T12:07:28.000Z | Week_12/wave2D.py | MECH3750/2021-Tutorials | e813f2a97d9b71ad0e304a35e8c66d21ed63ee0c | [
"MIT"
] | null | null | null | Week_12/wave2D.py | MECH3750/2021-Tutorials | e813f2a97d9b71ad0e304a35e8c66d21ed63ee0c | [
"MIT"
] | 11 | 2021-08-03T02:48:49.000Z | 2021-11-08T06:47:11.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 16 11:53:02 2019
@author: uqcleon4
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
class WaveEquationFD:
def __init__(self, N, D, Mx, My):
self.N = N
self.D = D
self.Mx = Mx
self.My = My
self.tend = 6
self.xmin = 0
self.xmax = 2
self.ymin = 0
self.ymax = 2
self.initialization()
self.eqnApprox()
def initialization(self):
self.dx = (self.xmax - self.xmin)/self.Mx
self.dy = (self.ymax - self.ymin)/self.My
self.x = np.arange(self.xmin, self.xmax+self.dx, self.dx)
self.y = np.arange(self.ymin, self.ymax+self.dy, self.dy)
#----- Initial condition -----#
self.u0 = lambda r, s: 0.1*np.sin(np.pi*r)*np.sin(np.pi*s/2)
#----- Initial velocity -----#
self.v0 = lambda a, b: 0
#----- Boundary conditions -----#
self.bxyt = lambda left, right, time: 0
self.dt = (self.tend - 0)/self.N
self.t = np.arange(0, self.tend+self.dt/2, self.dt)
# Assertion for the condition of r < 1, for stability
r = 4*self.D*self.dt**2/(self.dx**2+self.dy**2)
assert r < 1, "r is bigger than 1!"
def eqnApprox(self):
#----- Approximation equation properties -----#
self.rx = self.D*self.dt**2/self.dx**2
self.ry = self.D*self.dt**2/self.dy**2
self.rxy1 = 1 - self.rx - self.ry
self.rxy2 = self.rxy1*2
#----- Initialization matrix u for solution -----#
self.u = np.zeros((self.Mx+1, self.My+1))
self.ut = np.zeros((self.Mx+1, self.My+1))
self.u_1 = self.u.copy()
#----- Fills initial condition and initial velocity -----#
for j in range(1, self.Mx):
for i in range(1, self.My):
self.u[i, j] = self.u0(self.x[i], self.y[j])
self.ut[i, j] = self.v0(self.x[i], self.y[j])
def solve_and_animate(self):
u_2 = np.zeros((self.Mx+1, self.My+1))
xx, yy = np.meshgrid(self.x, self.y)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
wframe = None
k = 0
nsteps = self.N
while k < nsteps:
if wframe:
ax.collections.remove(wframe)
self.t = k*self.dt
#----- Fills in boundary condition along y-axis (vertical, columns 0 and Mx) -----#
for i in range(self.My+1):
self.u[i, 0] = self.bxyt(self.x[0], self.y[i], self.t)
self.u[i, self.Mx] = self.bxyt(
self.x[self.Mx], self.y[i], self.t)
for j in range(self.Mx+1):
self.u[0, j] = self.bxyt(self.x[j], self.y[0], self.t)
self.u[self.My, j] = self.bxyt(
self.x[j], self.y[self.My], self.t)
if k == 0:
for j in range(1, self.My):
for i in range(1, self.Mx):
self.u[i, j] = (
.5*(self.rx*(self.u_1[i-1, j] + self.u_1[i+1, j]))
+ .5 *
(self.ry*(self.u_1[i, j-1] + self.u_1[i, j+1]))
+ self.rxy1*self.u[i, j] + self.dt*self.ut[i, j]
)
else:
for j in range(1, self.My):
for i in range(1, self.Mx):
self.u[i, j] = (
self.rx*(self.u_1[i-1, j] + self.u_1[i+1, j])
+ self.ry*(self.u_1[i, j-1] + self.u_1[i, j+1])
+ self.rxy2*self.u[i, j] - u_2[i, j]
)
u_2 = self.u_1.copy()
self.u_1 = self.u.copy()
wframe = ax.plot_surface(
xx, yy, self.u, cmap=cm.coolwarm, linewidth=2,
antialiased=False)
ax.set_xlim3d(0, 2.0)
ax.set_ylim3d(0, 2.0)
ax.set_zlim3d(-1.5, 1.5)
ax.set_xticks([0, 0.5, 1.0, 1.5, 2.0])
ax.set_yticks([0, 0.5, 1.0, 1.5, 2.0])
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("U")
plt.pause(0.01)
k += 0.5
def main():
simulator = WaveEquationFD(200, 0.25, 50, 50)
simulator.solve_and_animate()
plt.show()
if __name__ == "__main__":
main()
# N = 200
# D = 0.25
# Mx = 50
# My = 50
| 29.354839 | 95 | 0.466593 | 692 | 4,550 | 3.008671 | 0.221098 | 0.060038 | 0.0317 | 0.026897 | 0.269933 | 0.227185 | 0.180115 | 0.180115 | 0.150817 | 0.104707 | 0 | 0.054666 | 0.368791 | 4,550 | 154 | 96 | 29.545455 | 0.670265 | 0.105495 | 0 | 0.080808 | 0 | 0 | 0.007915 | 0 | 0 | 0 | 0 | 0 | 0.010101 | 1 | 0.050505 | false | 0 | 0.040404 | 0 | 0.10101 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c1bea01e54768ccbaecf68a5c46e239fa4bb01f | 11,443 | py | Python | pillow_heif/reader.py | aptalca/pillow_heif | 75b50b923244d35480ae9942da113287f531e460 | [
"Apache-2.0"
] | null | null | null | pillow_heif/reader.py | aptalca/pillow_heif | 75b50b923244d35480ae9942da113287f531e460 | [
"Apache-2.0"
] | null | null | null | pillow_heif/reader.py | aptalca/pillow_heif | 75b50b923244d35480ae9942da113287f531e460 | [
"Apache-2.0"
] | null | null | null | """
Functions and classes for heif images to read.
"""
import builtins
import pathlib
from functools import partial
from warnings import warn
from _pillow_heif_cffi import ffi, lib
from .constants import (
HeifFiletype,
HeifColorProfileType,
HeifChroma,
HeifChannel,
HeifColorspace,
HeifBrand,
)
from .error import check_libheif_error
from ._options import options
class HeifFile:
def __init__(self, *, size: tuple, has_alpha: bool, bit_depth: int, data, stride, **kwargs):
self.size = size
self.has_alpha = has_alpha
self.mode = "RGBA" if has_alpha else "RGB"
self.bit_depth = bit_depth
self.data = data
self.stride = stride
self.info = {
"brand": kwargs.get("brand", HeifBrand.UNKNOWN),
"exif": kwargs.get("exif", None),
"metadata": kwargs.get("metadata", []),
"color_profile": kwargs.get("color_profile", {}),
}
if self.info["color_profile"]:
if self.info["color_profile"]["type"] in ("rICC", "prof"):
self.info["icc_profile"] = self.info["color_profile"]["data"]
else:
self.info["nclx_profile"] = self.info["color_profile"]["data"]
def __repr__(self):
return (
f"<{self.__class__.__name__} {self.size[0]}x{self.size[1]} {self.mode} "
f"with {str(len(self.data)) + ' bytes' if self.data else 'no'} data>"
)
def load(self):
return self # already loaded
def close(self) -> None:
self.data = None
class UndecodedHeifFile(HeifFile):
def __init__(self, heif_handle, *, apply_transformations: bool, convert_hdr_to_8bit: bool, **kwargs):
self._heif_handle = heif_handle
self.apply_transformations = apply_transformations
self.convert_hdr_to_8bit = convert_hdr_to_8bit
super().__init__(data=None, stride=None, **kwargs)
def load(self):
self.data, self.stride = _read_heif_image(self._heif_handle, self)
self.close()
self.__class__ = HeifFile
return self
def close(self) -> None:
# Don't call super().close() here, we don't need to free bytes.
if hasattr(self, "_heif_handle"):
del self._heif_handle
def check_heif(fp):
"""
Wrapper around `libheif.heif_check_filetype`.
Note: If `fp` contains less 12 bytes, then returns `HeifFiletype.NO`.
:param fp: A filename (string), pathlib.Path object, file object or bytes.
The file object must implement ``file.read``, ``file.seek`` and ``file.tell`` methods,
and be opened in binary mode.
:returns: `HeifFiletype`
"""
magic = _get_bytes(fp, 12)
return HeifFiletype.NO if len(magic) < 12 else lib.heif_check_filetype(magic, len(magic))
def is_supported(fp) -> bool:
"""
Checks if `fp` contains a supported file type, by calling :py:func:`~pillow_heif.reader.check_heif` function.
If `heif_filetype_yes_supported` or `heif_filetype_maybe` then returns True.
If `heif_filetype_no` then returns False.
OPTIONS
"strict": `bool` determine what to return for `heif_filetype_yes_unsupported`.
"avif": `bool` determine will be `avif` files marked as supported.
If it is False from start, then pillow_heif was build without codecs for AVIF and you should not set it to true.
"""
magic = _get_bytes(fp, 12)
heif_filetype = check_heif(magic)
if heif_filetype == HeifFiletype.NO or (not options().avif and magic[8:12] in (b"avif", b"avis")):
return False
if heif_filetype in (HeifFiletype.YES_SUPPORTED, HeifFiletype.MAYBE):
return True
return not options().strict
def open_heif(fp, *, apply_transformations: bool = True, convert_hdr_to_8bit: bool = True) -> UndecodedHeifFile:
d = _get_bytes(fp)
ctx = lib.heif_context_alloc()
collect = _keep_refs(lib.heif_context_free, data=d)
ctx = ffi.gc(ctx, collect, size=len(d))
return _read_heif_context(ctx, d, apply_transformations, convert_hdr_to_8bit)
def read_heif(fp, *, apply_transformations: bool = True, convert_hdr_to_8bit: bool = True) -> HeifFile:
heif_file = open_heif(
fp,
apply_transformations=apply_transformations,
convert_hdr_to_8bit=convert_hdr_to_8bit,
)
return heif_file.load()
def _get_bytes(fp, length=None):
if isinstance(fp, (str, pathlib.Path)):
with builtins.open(fp, "rb") as f:
return f.read(length or -1)
if hasattr(fp, "read"):
offset = fp.tell() if hasattr(fp, "tell") else None
b = fp.read(length or -1)
if offset is not None and hasattr(fp, "seek"):
fp.seek(offset)
return b
return bytes(fp)[:length]
def _keep_refs(destructor, **refs):
"""
Keep refs to passed arguments until `inner` callback exist.
This prevents collecting parent objects until all children are collected.
"""
def inner(cdata):
return destructor(cdata)
inner._refs = refs
return inner
def _read_heif_context(ctx, d, apply_transformations: bool, convert_hdr_to_8bit: bool) -> UndecodedHeifFile:
brand = lib.heif_main_brand(d[:12], 12)
error = lib.heif_context_read_from_memory_without_copy(ctx, d, len(d), ffi.NULL)
check_libheif_error(error)
p_handle = ffi.new("struct heif_image_handle **")
error = lib.heif_context_get_primary_image_handle(ctx, p_handle)
check_libheif_error(error)
collect = _keep_refs(lib.heif_image_handle_release, ctx=ctx)
handle = ffi.gc(p_handle[0], collect)
return _read_heif_handle(handle, apply_transformations, convert_hdr_to_8bit, brand=brand)
def _read_heif_handle(handle, apply_transformations: bool, convert_hdr_to_8bit: bool, **kwargs) -> UndecodedHeifFile:
_width = lib.heif_image_handle_get_width(handle)
_height = lib.heif_image_handle_get_height(handle)
_has_alpha = bool(lib.heif_image_handle_has_alpha_channel(handle))
_bit_depth = lib.heif_image_handle_get_luma_bits_per_pixel(handle)
_metadata = _read_metadata(handle)
_exif = _retrieve_exif(_metadata)
_color_profile = _read_color_profile(handle)
return UndecodedHeifFile(
handle,
size=(_width, _height),
has_alpha=_has_alpha,
bit_depth=_bit_depth,
apply_transformations=apply_transformations,
convert_hdr_to_8bit=convert_hdr_to_8bit,
exif=_exif,
metadata=_metadata,
color_profile=_color_profile,
**kwargs,
)
def _read_metadata(handle) -> list:
block_count = lib.heif_image_handle_get_number_of_metadata_blocks(handle, ffi.NULL)
if block_count == 0:
return []
metadata = []
ids = ffi.new("heif_item_id[]", block_count)
lib.heif_image_handle_get_list_of_metadata_block_IDs(handle, ffi.NULL, ids, block_count)
for each_item in ids:
metadata_type = lib.heif_image_handle_get_metadata_type(handle, each_item)
metadata_type = ffi.string(metadata_type).decode()
data_length = lib.heif_image_handle_get_metadata_size(handle, each_item)
if data_length > 0:
p_data = ffi.new("char[]", data_length)
error = lib.heif_image_handle_get_metadata(handle, each_item, p_data)
check_libheif_error(error)
data_buffer = ffi.buffer(p_data, data_length)
data = bytes(data_buffer)
if metadata_type == "Exif":
data = data[4:] # skip TIFF header, first 4 bytes
metadata.append({"type": metadata_type, "data": data})
return metadata
def _retrieve_exif(metadata: list):
_result = None
_purge = []
for i, v in enumerate(metadata):
if v["type"] == "Exif":
_purge.append(i)
if not _result and v["data"] and v["data"][0:4] == b"Exif":
_result = v["data"]
for e in reversed(_purge):
del metadata[e]
return _result
def _read_color_profile(handle) -> dict:
profile_type = lib.heif_image_handle_get_color_profile_type(handle)
if profile_type == HeifColorProfileType.NOT_PRESENT:
return {}
if profile_type == HeifColorProfileType.NCLX:
_type = "nclx"
pp_data = ffi.new("struct heif_color_profile_nclx **")
data_length = ffi.sizeof("struct heif_color_profile_nclx")
error = lib.heif_image_handle_get_nclx_color_profile(handle, pp_data)
p_data = pp_data[0]
ffi.release(pp_data)
else:
_type = "prof" if profile_type == HeifColorProfileType.PROF else "rICC"
data_length = lib.heif_image_handle_get_raw_color_profile_size(handle)
if data_length == 0:
return {"type": _type, "data": b""}
p_data = ffi.new("char[]", data_length)
error = lib.heif_image_handle_get_raw_color_profile(handle, p_data)
check_libheif_error(error)
data_buffer = ffi.buffer(p_data, data_length)
return {"type": _type, "data": bytes(data_buffer)}
def _read_heif_image(handle, heif_file: UndecodedHeifFile):
colorspace = HeifColorspace.RGB
if heif_file.convert_hdr_to_8bit or heif_file.bit_depth <= 8:
chroma = HeifChroma.INTERLEAVED_RGBA if heif_file.has_alpha else HeifChroma.INTERLEAVED_RGB
else:
if heif_file.has_alpha:
chroma = HeifChroma.INTERLEAVED_RRGGBBAA_BE
else:
chroma = HeifChroma.INTERLEAVED_RRGGBB_BE
p_options = lib.heif_decoding_options_alloc()
p_options = ffi.gc(p_options, lib.heif_decoding_options_free)
p_options.ignore_transformations = int(not heif_file.apply_transformations)
p_options.convert_hdr_to_8bit = int(heif_file.convert_hdr_to_8bit)
p_img = ffi.new("struct heif_image **")
error = lib.heif_decode_image(handle, p_img, colorspace, chroma, p_options)
check_libheif_error(error)
img = p_img[0]
p_stride = ffi.new("int *")
p_data = lib.heif_image_get_plane_readonly(img, HeifChannel.INTERLEAVED, p_stride)
stride = p_stride[0]
data_length = heif_file.size[1] * stride
# Release image as soon as no references to p_data left
collect = partial(_release_heif_image, img)
p_data = ffi.gc(p_data, collect, size=data_length)
# ffi.buffer obligatory keeps a reference to p_data
data_buffer = ffi.buffer(p_data, data_length)
return data_buffer, stride
def _release_heif_image(img, _p_data=None) -> None:
lib.heif_image_release(img)
# heif_image_handle_get_number_of_thumbnails
# heif_image_handle_get_list_of_thumbnail_IDs
# heif_image_handle_get_thumbnail
# --------------------------------------------------------------------
# DEPRECATED FUNCTIONS.
def check(fp):
warn("Function `check` is deprecated, use `check_heif` instead.", DeprecationWarning)
return check_heif(fp) # pragma: no cover
def open(fp, *, apply_transformations=True, convert_hdr_to_8bit=True): # pylint: disable=redefined-builtin
warn("Function `open` is deprecated, use `open_heif` instead.", DeprecationWarning)
return open_heif(
fp, apply_transformations=apply_transformations, convert_hdr_to_8bit=convert_hdr_to_8bit
) # pragma: no cover
def read(fp, *, apply_transformations=True, convert_hdr_to_8bit=True):
warn("Function `read` is deprecated, use `read_heif` instead.", DeprecationWarning)
return read_heif(
fp, apply_transformations=apply_transformations, convert_hdr_to_8bit=convert_hdr_to_8bit
) # pragma: no cover
| 37.765677 | 117 | 0.682339 | 1,540 | 11,443 | 4.744156 | 0.175325 | 0.030797 | 0.036135 | 0.04818 | 0.281276 | 0.238434 | 0.174377 | 0.144676 | 0.127293 | 0.108404 | 0 | 0.005976 | 0.210347 | 11,443 | 302 | 118 | 37.890728 | 0.802568 | 0.133357 | 0 | 0.127273 | 0 | 0.009091 | 0.071713 | 0.010303 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109091 | false | 0 | 0.036364 | 0.013636 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c208d59b07ff9834de306f19efb98e51d775ac7 | 660 | py | Python | excercises/6-0001/problem_sets/ps1/ps1b.py | obsessedyouth/simulacra | 530155664daf1aff06cb575c4c4073acbacdb32d | [
"MIT"
] | null | null | null | excercises/6-0001/problem_sets/ps1/ps1b.py | obsessedyouth/simulacra | 530155664daf1aff06cb575c4c4073acbacdb32d | [
"MIT"
] | null | null | null | excercises/6-0001/problem_sets/ps1/ps1b.py | obsessedyouth/simulacra | 530155664daf1aff06cb575c4c4073acbacdb32d | [
"MIT"
] | null | null | null | annual_salary = float(input("Enter your annual salary"))
portion_saved = float(input("Enter the percent of your salary to save, as a decimal"))
total_cost = float(input("Enter the cost of your dream home"))
semi_annual_raise = float(input("Enter the semiannual raise, as a decimal"))
portion_down_payment = 0.25 * total_cost
current_savings = 0
months = 0
while current_savings < portion_down_payment:
if months % 6 == 0 and months != 0:
annual_salary += annual_salary * semi_annual_raise
r = (current_savings * 0.04) / 12
current_savings += ((annual_salary/12) * portion_saved) + r
months += 1
print("Number of months: ", months) | 34.736842 | 86 | 0.716667 | 101 | 660 | 4.49505 | 0.39604 | 0.132159 | 0.132159 | 0.118943 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029466 | 0.177273 | 660 | 19 | 87 | 34.736842 | 0.804788 | 0 | 0 | 0 | 0 | 0 | 0.257186 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c2296a7af80752c885721e618ab896d2c0978d7 | 2,110 | py | Python | tests/test_heartbeat.py | maxerbubba/stepfunctions_activity_worker | 846e9a2351f96b20855588845fba8be9a1a72a7b | [
"MIT"
] | 6 | 2018-11-29T18:37:35.000Z | 2021-06-07T14:14:16.000Z | tests/test_heartbeat.py | maxerbubba/stepfunctions_activity_worker | 846e9a2351f96b20855588845fba8be9a1a72a7b | [
"MIT"
] | 6 | 2018-11-30T16:14:32.000Z | 2019-10-16T15:41:16.000Z | tests/test_heartbeat.py | maxerbubba/stepfunctions_activity_worker | 846e9a2351f96b20855588845fba8be9a1a72a7b | [
"MIT"
] | 1 | 2019-10-11T05:00:08.000Z | 2019-10-11T05:00:08.000Z | """Tests for stepfunctions_activity_worker.heartbeat.
Since Heartbeat inherits from threading.Timer we're testing against the
functionality that we override/extending to it.
"""
from unittest import mock
import pytest
from stepfunctions_activity_worker.heartbeat import Heartbeat
@pytest.fixture
def heartbeat_args():
"""Return default kwargs for Heartbeat."""
args = (4, mock.Mock())
kwargs = {"args": ("foo", "bar"), "kwargs": {"hello": "world"}}
return args, kwargs
@pytest.fixture(autouse=True)
def patch_threading_timer_inheritance():
"""Patch Heartbeat.start() & Heartbeat.cancel()."""
Heartbeat.start = mock.Mock()
Heartbeat.cancel = mock.Mock()
def test_Heartbeat_enter_calls_start(heartbeat_args):
"""Test Heartbeat calls .start() on enter."""
args, kwargs = heartbeat_args
heartbeat = Heartbeat(*args, **kwargs)
with heartbeat:
heartbeat.start.assert_called_once()
def test_Heartbeat_exit_calls_cancel(heartbeat_args):
"""Test Heartbeat calls .cancel() on exit."""
args, kwargs = heartbeat_args
heartbeat = Heartbeat(*args, **kwargs)
with heartbeat:
pass
heartbeat.cancel.assert_called_once()
def test_Heartbeat_run_calls_function_until_finished(heartbeat_args):
"""Test Heartbeat calls passed function on run."""
args, kwargs = heartbeat_args
heartbeat = Heartbeat(*args, **kwargs)
function = args[1]
heartbeat.finished = mock.Mock()
heartbeat.finished.is_set.side_effect = (False, False, False, True)
heartbeat.run()
calls = [mock.call(*kwargs["args"], **kwargs["kwargs"])] * 3
function.assert_has_calls(calls)
def test_Heartbeat_run_calls_wait_until_finished(heartbeat_args):
"""Test Heartbeat calls wait with provided interval."""
args, kwargs = heartbeat_args
heartbeat = Heartbeat(*args, **kwargs)
interval = args[0]
heartbeat.finished = mock.Mock()
heartbeat.finished.is_set.side_effect = (False, False, False, True)
heartbeat.run()
calls = [mock.call(interval)] * 3
heartbeat.finished.wait.assert_has_calls(calls)
| 27.051282 | 71 | 0.7109 | 255 | 2,110 | 5.694118 | 0.278431 | 0.125344 | 0.044077 | 0.071625 | 0.466942 | 0.402204 | 0.358127 | 0.297521 | 0.227273 | 0.227273 | 0 | 0.002849 | 0.168246 | 2,110 | 77 | 72 | 27.402597 | 0.824501 | 0.203318 | 0 | 0.390244 | 0 | 0 | 0.021898 | 0 | 0 | 0 | 0 | 0 | 0.097561 | 1 | 0.146341 | false | 0.02439 | 0.073171 | 0 | 0.243902 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c2794286b828cae460d295bd29cdfc5c27b425e | 632 | py | Python | python/reverseString.py | l0latgithub/codediary | a0327d2ee1137a542886d0af85129692711cd68a | [
"MIT"
] | null | null | null | python/reverseString.py | l0latgithub/codediary | a0327d2ee1137a542886d0af85129692711cd68a | [
"MIT"
] | null | null | null | python/reverseString.py | l0latgithub/codediary | a0327d2ee1137a542886d0af85129692711cd68a | [
"MIT"
] | null | null | null | class Solution:
def reverseString(self, s: List[str]) -> None:
"""
Do not return anything, modify s in-place instead.
Write a function that reverses a string. The input
string is given as an array of characters char[].
Do not allocate extra space for another array, you must
do this by modifying the input array in-place with O(1) extra memory.
You may assume all the characters consist of printable ascii characters.
"""
lo, hi =0, len(s)-1
while lo<hi:
s[lo],s[hi]=s[hi],s[lo]
lo+=1
hi-=1 | 33.263158 | 80 | 0.571203 | 92 | 632 | 3.923913 | 0.630435 | 0.024931 | 0.027701 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012107 | 0.346519 | 632 | 19 | 81 | 33.263158 | 0.861985 | 0.561709 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c280a854d5dd0052e5104746e9e594265513c8d | 1,251 | py | Python | otptags.py | azukacchi/scriptsdump | c54d1dcf42ac0e5242786fa4364c7532ed7b5446 | [
"MIT"
] | 2 | 2021-09-10T03:21:33.000Z | 2021-11-14T20:02:53.000Z | otptags.py | azukacchi/scriptsdump | c54d1dcf42ac0e5242786fa4364c7532ed7b5446 | [
"MIT"
] | null | null | null | otptags.py | azukacchi/scriptsdump | c54d1dcf42ac0e5242786fa4364c7532ed7b5446 | [
"MIT"
] | null | null | null | import re
from bs4 import BeautifulSoup
import os.path
from datetime import date
import requests
import csv
import time
delay = 6
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
filename = 'ao3tags.csv'
filepath = os.path.join(BASE_DIR, filename)
def tagcount():
urls = [] # list of link to your otp tags
today = date.today()
counts = [today]
for url in urls:
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
# works count for a tag
heading = soup.find('h2',{'class':'heading'}).get_text()
regex1 = r'(?=(\d+) Works)'
count = int(re.findall(regex1, heading)[0])
counts.append(count)
# number of works with a certain age rating ranging from
# Teen and up, General, Not rated, Mature, Explicit
regex2 = r'(?=\((\d+)\))'
rating = soup.find('dd',{'id':'include_rating_tags'}).get_text()
ratings = re.findall(regex2, rating)
counts.append(', '.join(ratings))
time.sleep(delay)
data = [counts]
with open(filepath, 'a') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerows(data)
if __name__ == '__main__':
tagcount()
| 26.617021 | 72 | 0.605116 | 158 | 1,251 | 4.677215 | 0.56962 | 0.032476 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00974 | 0.261391 | 1,251 | 46 | 73 | 27.195652 | 0.790043 | 0.1247 | 0 | 0 | 0 | 0 | 0.089908 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0 | 0.212121 | 0 | 0.242424 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c293d8c5436830faf8721c88cbacb4b46a95b30 | 412 | py | Python | INF1511/Chapter3/matrix1.py | GalliWare/UNISA-studies | 32bab94930b176c4dfe943439781ef102896fab5 | [
"Unlicense"
] | null | null | null | INF1511/Chapter3/matrix1.py | GalliWare/UNISA-studies | 32bab94930b176c4dfe943439781ef102896fab5 | [
"Unlicense"
] | null | null | null | INF1511/Chapter3/matrix1.py | GalliWare/UNISA-studies | 32bab94930b176c4dfe943439781ef102896fab5 | [
"Unlicense"
] | null | null | null | table = [[0 for i in range(3)] for j in range(3)]
print("Enter values for a matrix of order 3 x 3")
for d1 in range(3):
for d2 in range(3):
table[d1][d2] = int(input())
print("Elements of the matrix are %a" % table)
print("Elements of the matrix are ")
for row in table:
print(row)
s = 0
for row in table:
for n in row:
s += n
print("The sum of the elements in the matrix is %d" % s)
| 27.466667 | 56 | 0.618932 | 82 | 412 | 3.109756 | 0.353659 | 0.109804 | 0.12549 | 0.086275 | 0.211765 | 0.211765 | 0 | 0 | 0 | 0 | 0 | 0.039088 | 0.254854 | 412 | 14 | 57 | 29.428571 | 0.791531 | 0 | 0 | 0.142857 | 0 | 0 | 0.337379 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.357143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c2944dad267b75f5e9b14f426638486e1db3d9f | 2,880 | py | Python | djangosige/apps/base/views_mixins.py | CTECHSUL/SG | 0d4822b3826e015ad24690815bb9c52952431ea7 | [
"MIT"
] | 330 | 2017-07-03T08:41:24.000Z | 2022-03-31T04:34:17.000Z | djangosige/apps/base/views_mixins.py | CTECHSUL/SG | 0d4822b3826e015ad24690815bb9c52952431ea7 | [
"MIT"
] | 107 | 2017-07-03T22:21:35.000Z | 2022-03-30T08:10:24.000Z | djangosige/apps/base/views_mixins.py | matfurrier/SIGEsistema | 6b0072741809c5e5077d201862ea76d839161735 | [
"MIT"
] | 258 | 2017-06-27T20:11:46.000Z | 2022-03-20T21:46:34.000Z | from __future__ import unicode_literals
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.shortcuts import redirect
class SuperUserRequiredMixin(object):
@method_decorator(login_required(login_url='login:loginview'))
def dispatch(self, request, *args, **kwargs):
if not request.user.is_superuser:
messages.add_message(
request,
messages.WARNING,
u'Apenas o administrador tem permissão para realizar esta operação.',
'permission_warning')
return redirect('base:index')
return super(SuperUserRequiredMixin, self).dispatch(request, *args, **kwargs)
class CheckPermissionMixin(object):
permission_codename = ''
def dispatch(self, request, *args, **kwargs):
if not self.check_user_permissions(request):
messages.add_message(
request,
messages.WARNING,
u'Usuário não tem permissão para realizar esta operação.',
'permission_warning')
return redirect('base:index')
return super(CheckPermissionMixin, self).dispatch(request, *args, **kwargs)
def check_user_permissions(self, request):
if not isinstance(self.permission_codename, list):
self.permission_codename = [self.permission_codename]
perms = []
for permission in self.permission_codename:
if '.' not in permission:
permission = str(
request.resolver_match.app_name) + '.' + str(permission)
perms.append(permission)
return len(self.permission_codename) and (request.user.is_superuser or request.user.has_perms(perms))
def check_user_delete_permission(self, request, object):
codename = str(object._meta.app_label) + '.delete_' + \
str(object.__name__.lower())
if not request.user.has_perm(codename):
messages.add_message(
request,
messages.WARNING,
u'Usuário não tem permissão para realizar esta operação.',
'permission_warning')
return False
return True
class FormValidationMessageMixin(object):
# Mensagem de sucesso padrao
success_message = "<b>%(descricao)s </b>adicionado(a) a base de dados com sucesso."
def get_success_message(self, cleaned_data):
return self.success_message % dict(cleaned_data, descricao=str(self.object))
def form_valid(self, form):
messages.success(
self.request, self.get_success_message(form.cleaned_data))
return redirect(self.success_url)
def form_invalid(self, form, **kwargs):
return self.render_to_response(self.get_context_data(form=form, **kwargs))
| 38.918919 | 109 | 0.657292 | 313 | 2,880 | 5.86262 | 0.329073 | 0.058856 | 0.059946 | 0.040872 | 0.276839 | 0.245232 | 0.245232 | 0.222888 | 0.182561 | 0.182561 | 0 | 0 | 0.253125 | 2,880 | 73 | 110 | 39.452055 | 0.853092 | 0.009028 | 0 | 0.310345 | 0 | 0 | 0.117461 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12069 | false | 0 | 0.086207 | 0.034483 | 0.465517 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c2a8fd306dc8e296f6feb602bca22708c15866f | 1,601 | py | Python | source/CheckJobStatusFunction/app.py | kwwaikar/aws-data-exchange-publisher-coordinator-1 | 8bfec78ff64432d2b2804050116994d76928cb81 | [
"MIT"
] | null | null | null | source/CheckJobStatusFunction/app.py | kwwaikar/aws-data-exchange-publisher-coordinator-1 | 8bfec78ff64432d2b2804050116994d76928cb81 | [
"MIT"
] | null | null | null | source/CheckJobStatusFunction/app.py | kwwaikar/aws-data-exchange-publisher-coordinator-1 | 8bfec78ff64432d2b2804050116994d76928cb81 | [
"MIT"
] | null | null | null | import boto3
import os
import logging
from datetime import datetime
def lambda_handler(event, context):
"""This function checks and returns the import assets job status"""
try:
global log_level
log_level = str(os.environ.get('LOG_LEVEL')).upper()
valid_log_levels = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
if log_level not in valid_log_levels:
log_level = 'ERROR'
logging.getLogger().setLevel(log_level)
logging.debug(f'{event=}')
dataexchange = boto3.client(service_name='dataexchange')
product_id = event['ProductId']
dataset_id = event['DatasetId']
revision_id = event['RevisionId']
job_id = event['JobId']
job_response = dataexchange.get_job(JobId=job_id)
logging.debug(f'get job = {job_response}')
job_status = job_response['State']
metrics = {
"Version": os.getenv('Version'),
"TimeStamp": datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f'),
"ProductId": product_id,
"DatasetId": dataset_id,
"RevisionId": revision_id,
"JobId": job_id,
"JobStatus": job_status
}
logging.info(f'Metrics:{metrics}')
except Exception as e:
logging.error(e)
raise e
return {
"StatusCode": 200,
"ProductId": product_id,
"DatasetId": dataset_id,
"RevisionId": revision_id,
"JobId": job_id,
"JobStatus": job_status
}
| 28.589286 | 77 | 0.562773 | 171 | 1,601 | 5.081871 | 0.438596 | 0.055236 | 0.034522 | 0.06214 | 0.193326 | 0.193326 | 0.193326 | 0.193326 | 0.193326 | 0.193326 | 0 | 0.004562 | 0.315428 | 1,601 | 55 | 78 | 29.109091 | 0.788321 | 0.038101 | 0 | 0.238095 | 0 | 0 | 0.188641 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02381 | false | 0 | 0.095238 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c2ec23931c6b9f6cf2d97ddfef973d0d7caa687 | 12,363 | py | Python | app/models.py | eellkk/flask_learn | 18810a09a7384af15d3767b56274c990fe1c154b | [
"MIT"
] | null | null | null | app/models.py | eellkk/flask_learn | 18810a09a7384af15d3767b56274c990fe1c154b | [
"MIT"
] | null | null | null | app/models.py | eellkk/flask_learn | 18810a09a7384af15d3767b56274c990fe1c154b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 20 11:26:41 2016
@author: Administrator
"""
#从__init__.py中导入db,db = SQLAlchemt()
from . import db,login_manager
#导入Werkzeug中的security模块支持密码散列
from werkzeug.security import generate_password_hash,check_password_hash
from flask.ext.login import UserMixin,AnonymousUserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app,url_for
from datetime import datetime
import hashlib
from flask import request
from markdown import markdown
import bleach
from app.exceptions import ValidationError
class Follow(db.Model):
__tablename__ = 'follows'
follower_id = db.Column(db.Integer,db.ForeignKey('users.id'),primary_key=True)
followed_id = db.Column(db.Integer,db.ForeignKey('users.id'),primary_key=True)
timestamp = db.Column(db.DateTime,default=datetime.utcnow)
#定义User模型
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key=True)
email = db.Column(db.String(64),unique=True,index=True)
username = db.Column(db.String(64),unique=True,index=True)
role_id = db.Column(db.Integer,db.ForeignKey('role.id'))
name = db.Column(db.String(64))
location = db.Column(db.String(64))
about_me = db.Column(db.Text())
member_since = db.Column(db.DateTime(),default = datetime.utcnow)
last_seen = db.Column(db.DateTime(),default = datetime.utcnow)
avatar_hash = db.Column(db.String(32))
posts = db.relationship('Post',backref='author',lazy='dynamic')
followed = db.relationship('Follow',foreign_keys=[Follow.follower_id],backref=db.backref('follower',lazy='joined'),lazy='dynamic',cascade='all,delete-orphan')
followers = db.relationship('Follow',foreign_keys=[Follow.followed_id],backref=db.backref('followed',lazy='joined'),lazy='dynamic',cascade='all,delete-orphan')
comments = db.relationship('Comment',backref='author',lazy='dynamic')
def __repr__(self):
return '<User %s>' % self.username
password_hash = db.Column(db.String(128))
@property
def password(self):
raise AttributeError('password is not a readable attribute')
#只写密码
@password.setter
def password(self,password):
self.password_hash = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.password_hash,password)
confirmed = db.Column(db.Boolean,default=False)
#生成一个令牌 有效期默认为3600秒
def generate_confirmation_token(self,expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'],expiration)
return s.dumps({'confirm':self.id})
#检验令牌 如果通过,把新添加的confirmed属性设置为True
def confirm(self,token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def __init__(self,**kwargs):
super(User,self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['FLASKY_ADMIN']:
self.role = Role.query.filter_by(permissions=0xff).first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
if self.email is not None and self.avatar_hash is None:
self.avatar_hash = hashlib.md5(self.email.encode('utf-8')).hexdigest()
self.follow(self)
def can(self,permissions):
return self.role is not None and (self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def gravatar(self,size=100,default='identicon',rating='g'):
if request.is_secure:
url = 'https://secure.gravatar.com/avatar'
else:
url = 'http://www.gravatar.com/avatar'
hash = self.avatar_hash or hashlib.md5(self.email.encode('utf-8')).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(url=url,hash=hash,size=size,default=default,rating=rating)
@staticmethod
def generate_fake(count=100):
from sqlalchemy.exc import IntegrityError
from random import seed
import forgery_py
seed()
for i in range(count):
u = User(email=forgery_py.internet.email_address(),
username=forgery_py.internet.user_name(True),
password=forgery_py.lorem_ipsum.word(),
confirmed=True,
name=forgery_py.name.full_name(),
location=forgery_py.address.city(),
about_me=forgery_py.lorem_ipsum.sentence(),
member_since=forgery_py.date.date(True))
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
def follow(self,user):
if not self.is_following(user):
f = Follow(follower=self,followed=user)
db.session.add(f)
def unfollow(self,user):
f = self.followed.filter_by(followed_id=user.id).first()
if f:
db.session.delete(f)
def is_following(self,user):
return self.followed.filter_by(followed_id=user.id).first() is not None
def is_followed_by(self,user):
return self.followers.filter_by(follower_id=user.id).first() is not None
@property
def followed_posts(self):
return Post.query.join(Follow,Follow.followed_id == Post.author_id).filter(Follow.follower_id == self.id)
@staticmethod
def follow_self():
for user in User.query.all():
if not user.is_following(user):
user.follow(user)
db.session.add(user)
db.session.commit()
def generate_auth_token(self,expiration):
s = Serializer(current_app.config['SECRET_KEY'],expires_in=expiration)
return s.dumps({'id':self.id})
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def to_json(self):
json_user = {
'url':url_for('api.get_post',id=self.id,_external=True),
'username':self.username,
'member_since':self.member_since,
'lasr_seen':self.last_seen,
'posts':url_for('api.get_user_posts',id=self.id,_external=True),
'followed_posts':url_for('api.get_user_followed_posts',id=self.id,_external=True),
'post_count':self.posts.count()
}
return json_user
class AnonymousUser(AnonymousUserMixin):
def can(self,permission):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
class Role(db.Model):
__talbename__ = 'roles'
id = db.Column(db.Integer,primary_key=True)
name = db.Column(db.String(64),unique=True)
users = db.relationship('User',backref='role',lazy='dynamic')
default = db.Column(db.Boolean,default = False,index=True)
permissions = db.Column(db.Integer)
@staticmethod
def insert_roles():
roles = {
'User':(Permission.FOLLOW|Permission.COMMENT|Permission.WRITE_ARTICLES,True),
'Moderator':(Permission.FOLLOW|Permission.COMMENT|Permission.WRITE_ARTICLES|Permission.MODERATE_COMMENTS,False),
'Administrator':(0xff,False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %s>' % self.name
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Permission:
FOLLOW = 0x01
COMMENT =0x02
WRITE_ARTICLES = 0x04
MODERATE_COMMENTS = 0x08
ADMINISTER = 0x80
class Post(db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer,primary_key=True)
body = db.Column(db.Text)
timestamp = db.Column(db.DateTime,index=True,default=datetime.utcnow)
author_id = db.Column(db.Integer,db.ForeignKey('users.id'))
body_html = db.Column(db.Text)
comments = db.relationship('Comment',backref='post',lazy='dynamic')
@staticmethod
def generate_fake(count=100):
from random import seed,randint
import forgery_py
seed()
user_count = User.query.count()
for i in range(count):
u =User.query.offset(randint(0,user_count-1)).first()
p = Post(body=forgery_py.lorem_ipsum.sentences(randint(1,3)),timestamp=forgery_py.date.date(True),author=u)
db.session.add(p)
db.session.commit()
@staticmethod
def on_changed_body(target,value,oldvalue,initiator):
allowed_tags = ['a','abbr','acronym','b','blockquote','code','em','i','li','ol','pre','strong','ul','h1','h2','h3','p']
target.body_html = bleach.linkify(bleach.clean(markdown(value,output_format='html'),tags=allowed_tags,strip=True))
def to_json(self):
json_post = {
'url':url_for('api.get_post',id=self.id,_external = True),
'body':self.body,
'body_html':self.body_html,
'author':url_for('api.get_user',id=self.author_id,_external = True),
'comments':url_for('api.get_comments',id=self.id,_external=True),
'comment_count':self.comments.count()
}
return json_post
@staticmethod
def from_json(json_post):
body = json_post.get('body')
if body is None or body == '':
raise ValidationError('post does not have a body.')
return Post(body=body)
db.event.listen(Post.body,'set',Post.on_changed_body)
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer,primary_key = True)
body = db.Column(db.Text)
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime,index=True,default=datetime.utcnow)
disabled = db.Column(db.Boolean)
author_id = db.Column(db.Integer,db.ForeignKey('users.id'))
post_id = db.Column(db.Integer,db.ForeignKey('posts.id'))
@staticmethod
def on_change_body(target,value,oldvalue,initiator):
allowed_tags = ['a','abbr','acronym','b','code','em','i','strong']
target.body_html=bleach.linkify(bleach.clean(markdown(value,output_format='html'),tags=allowed_tags,strip=True))
def to_json(self):
json_comment = {
'url':url_for('api.get_comment',id=self.id,_external=True),
'post':url_for('api.get_post',id=self.post_id,external=True),
'body':self.body,
'body_html':self.body_html,
'timestamp':self.timestamp,
'author':url_for('api.get_user',id=self.author_id,_external=True)
}
return json_comment
@staticmethod
def from_json(json_comment):
body = json_comment.get('body')
if body is None or body == '':
raise ValidationError('comment does not have a body')
return Comment(body=body)
db.event.listen(Comment.body,'set',Comment.on_change_body)
| 37.01497 | 164 | 0.604708 | 1,488 | 12,363 | 4.869624 | 0.183468 | 0.034226 | 0.042782 | 0.025807 | 0.427822 | 0.356611 | 0.301132 | 0.223296 | 0.19183 | 0.170301 | 0 | 0.00834 | 0.272588 | 12,363 | 334 | 165 | 37.01497 | 0.797398 | 0.017552 | 0 | 0.270588 | 0 | 0 | 0.079864 | 0.00602 | 0 | 0 | 0.002374 | 0 | 0 | 1 | 0.129412 | false | 0.039216 | 0.062745 | 0.043137 | 0.498039 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2c3065c1d277ab3a50d50ca18e17bde26be17ca4 | 2,657 | py | Python | main.py | firestalk/vk_delpost | 79ae0ad94e65a81da2a103370dccd7765cff3db5 | [
"MIT"
] | null | null | null | main.py | firestalk/vk_delpost | 79ae0ad94e65a81da2a103370dccd7765cff3db5 | [
"MIT"
] | null | null | null | main.py | firestalk/vk_delpost | 79ae0ad94e65a81da2a103370dccd7765cff3db5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from splinter import Browser
from time import sleep
import re
import json
import sys
class VkDelPost:
def __init__(self):
self.account = self.load_cfg()
def main(self):
email = self.account['email']
passw = self.account['passw']
with Browser('phantomjs') as browser:
url = "http://vk.com"
browser.visit(url)
browser.fill('email', email)
browser.fill('pass', passw)
browser.find_by_id('quick_login_button').click()
mypage = browser.find_by_id('myprofile')
if mypage:
print('Logged In')
else:
print('Login failed')
self.dump_page(browser.html)
return False
profile_str = browser.find_by_id('myprofile_wrap').first
profile = re.findall(r'href="([^."]+)', profile_str.html)[0]
url = "http://vk.com" + profile
while True:
sleep(2)
browser.visit(url)
sec_chk = browser.find_by_id('check_msg')
if sec_chk:
print("Security check page.")
self.sec_page(browser)
return False
pagetxt = browser.html
id_lst = re.findall(r'id="post_delete([^.]\d+_\d+)"', pagetxt)
if len(id_lst) > 0:
print("Post Count: {}".format(len(id_lst)))
for i in id_lst:
button = browser.find_by_id('post_delete' + str(i))
button.click()
print('Posts deleted')
else:
print('Post deletion button not found')
return False
def sec_page(self, browser):
print('TODO: This part is not done yet.')
ppref = browser.find_by_xpath('/html/body/div[9]/div/div/div/div[3]/div[3]/div/div/div/div/table/tbody/tr[1]/td[1]/div')
print(ppref)
ppost = browser.find_by_xpath('/html/body/div[9]/div/div/div/div[3]/div[3]/div/div/div/div/table/tbody/tr[1]/td[3]/span')
print(ppost)
def dump_page(self, html):
with open('pagedump.html', 'w') as htp:
try:
htp.write(html)
except UnicodeEncodeError:
htp.write(str(html.encode(sys.stdout.encoding, errors='replace')))
return
def load_cfg(self):
with open('config.json', 'r') as cfg:
data = json.load(cfg)
return data
if __name__ == "__main__":
vkd = VkDelPost()
vkd.main()
| 34.506494 | 129 | 0.519759 | 321 | 2,657 | 4.155763 | 0.386293 | 0.053973 | 0.053973 | 0.056222 | 0.146927 | 0.110945 | 0.110945 | 0.110945 | 0.110945 | 0.110945 | 0 | 0.008676 | 0.349266 | 2,657 | 76 | 130 | 34.960526 | 0.762869 | 0.016184 | 0 | 0.106061 | 0 | 0.030303 | 0.192956 | 0.078101 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075758 | false | 0.030303 | 0.075758 | 0 | 0.242424 | 0.136364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
257385445594750f579250d74f0144116e915ee8 | 19,130 | py | Python | ptest/usb_session.py | athena255/FlightSoftware | c3fd7dcc6c265fad9843f8992b60d5a773c99f23 | [
"MIT"
] | null | null | null | ptest/usb_session.py | athena255/FlightSoftware | c3fd7dcc6c265fad9843f8992b60d5a773c99f23 | [
"MIT"
] | 1 | 2020-09-20T20:11:06.000Z | 2020-09-20T20:11:06.000Z | ptest/usb_session.py | athena255/FlightSoftware | c3fd7dcc6c265fad9843f8992b60d5a773c99f23 | [
"MIT"
] | null | null | null | import time
import datetime
import serial
import threading
import json
import traceback
import queue
import os
import pty
import subprocess
from multiprocessing import Process
import glob
from elasticsearch import Elasticsearch
from .data_consumers import Datastore, Logger
from .http_cmd import create_usb_session_endpoint
class USBSession(object):
'''
Represents a connection session with a Flight Computer's state system.
This class is used by the simulation software and user command prompt to read and write to a
flight computer's state.
This object is thread-safe; if an instance of this class is shared between the MATLAB simulation
interface (an instance of Simulation) and the user command line (an instance of StateCmdPrompt),
they won't trip over each other in setting/receiving variables from the connected flight computer.
'''
def __init__(self, device_name, uplink_console, port, is_teensy, simulation_run_dir):
'''
Initializes state session with a device.
'''
# Device connection
self.device_name = device_name
self.port = port
self.is_teensy = is_teensy
# Uplink console
self.uplink_console = uplink_console
# Data logging
self.datastore = Datastore(device_name, simulation_run_dir)
self.logger = Logger(device_name, simulation_run_dir)
self.raw_logger = Logger(device_name + "_raw", simulation_run_dir)
self.telem_save_dir = simulation_run_dir
#Start downlink parser. Compile it if it is not available.
downlink_parser_filepath = ".pio/build/gsw_downlink_parser/program"
if not os.path.exists(downlink_parser_filepath):
print("Compiling the downlink parser.")
os.system("pio run -e gsw_downlink_parser > /dev/null")
master_fd, slave_fd = pty.openpty()
self.downlink_parser = subprocess.Popen([downlink_parser_filepath], stdin=master_fd, stdout=master_fd)
self.dp_console = serial.Serial(os.ttyname(slave_fd), 9600, timeout=1)
self.telem_save_dir = simulation_run_dir
# Open a connection to elasticsearch
self.es = Elasticsearch([{'host':"127.0.0.1",'port':"9200"}])
# Simulation
self.overriden_variables = set()
def connect(self, console_port, baud_rate):
'''
Starts serial connection to the desired device.
Args:
- console_port: Serial port to connect to.
- baud_rate: Baud rate of connection.
'''
try:
self.console = serial.Serial(console_port, baud_rate)
self.start_time = datetime.datetime.now() # This is t = 0 on the Teensy, +/- a few milliseconds.
self.device_write_lock = threading.Lock() # Lock to prevent multiple writes to device at the same time.
# Queues used to manage interface between the check_msgs_thread and calls to read_state or write_state
self.field_requests = queue.Queue()
self.field_responses = queue.Queue()
self.datastore.start()
self.logger.start()
self.raw_logger.start()
self.running_logger = True
self.check_msgs_thread = threading.Thread(
name=f"{self.device_name} logger thread",
target=self.check_console_msgs)
self.check_msgs_thread.start()
print(f"Opened connection to {self.device_name}.")
except serial.SerialException:
print(f"Unable to open serial port for {self.device_name}.")
return False
try:
self.flask_app = create_usb_session_endpoint(self)
self.flask_app.config["uplink_console"] = self.uplink_console
self.flask_app.config["console"] = self.console
self.http_thread = Process(name=f"{self.device_name} HTTP Command Endpoint", target=self.flask_app.run, kwargs={"port": self.port})
self.http_thread.start()
print(f"{self.device_name} HTTP command endpoint is running at http://localhost:{self.port}")
return True
except:
print(f"Unable to start {self.device_name} HTTP command endpoint at http://localhost:{self.port}")
return False
def check_console_msgs(self):
'''
Read device output for debug messages and state variable updates. Record debug messages
to the logging file, and update the console's record of the state.
'''
while self.running_logger:
try:
# Read line coming from device and parse it
if self.console.inWaiting() > 0:
line = self.console.readline().rstrip()
self.raw_logger.put("Received: " + line.decode("utf-8"))
else:
continue
data = json.loads(line)
data['time'] = str(self.start_time + datetime.timedelta(milliseconds=data['t']))
if 'msg' in data:
# The logline represents a debugging message created by Flight Software. Report the message to the logger.
logline = f"[{data['time']}] ({data['svrty']}) {data['msg']}"
self.logger.put(logline, add_time = False)
elif 'telem' in data:
logline = f"[{data['time']}] Received requested telemetry from spacecraft.\n"
logline += data['telem']
print("\n" + logline)
self.logger.put(logline, add_time = False)
#log data to a timestamped file
telem_bytes = data['telem'].split(r'\x')
telem_bytes.remove("")
telem_file = open(os.path.join(self.telem_save_dir ,f"telem[{data['time']}].txt"), "wb")
for byte in telem_bytes:
telem_file.write(int(byte, 16).to_bytes(1, byteorder='big'))
telem_file.close()
elif 'uplink' in data:
if data['uplink'] and data['len']:
logline = f"[{data['time']}] Successfully sent telemetry to FlightSoftware.\n"
logline += str(data['uplink'])
else:
logline = f"[{data['time']}] Failed to send telemetry to FlightSoftware."
print("\n" + logline)
self.logger.put(logline, add_time = False)
else:
if 'err' in data:
# The log line represents an error in retrieving or writing state data that
# was caused by a USBSession client improperly setting/retrieving a value.
# Report this failure to the logger.
logline = f"[{data['time']}] (ERROR) Tried to {data['mode']} state value named \"{data['field']}\" but encountered an error: {data['err']}"
self.logger.put(logline, add_time = False)
data['val'] = None
else:
# A valid telemetry field was returned. Manage it.
self.datastore.put(data)
self.field_responses.put(data)
except ValueError:
logline = f'[RAW] {line}'
self.logger.put(logline)
except serial.SerialException:
print('Error: unable to read serial port for {}. Exiting.'.
format(self.device_name))
self.disconnect()
except:
traceback.print_exc()
print('Unspecified error. Exiting.')
self.disconnect()
def _wait_for_state(self, field, timeout = None):
"""
Helper function used by both read_state and write_state to wait for a desired value
to be reported back by the connected device.
"""
self.field_requests.put(field)
try:
data = self.field_responses.get(True, timeout)
return data['val']
except queue.Empty:
return None
def read_state(self, field, timeout = None):
'''
Read state.
Read the value of the state field associated with the given field name on the flight controller.
'''
if not self.running_logger: return
json_cmd = {'mode': ord('r'), 'field': str(field)}
json_cmd = json.dumps(json_cmd) + "\n"
self.device_write_lock.acquire()
self.console.write(json_cmd.encode())
self.device_write_lock.release()
self.raw_logger.put("Sent: " + json_cmd.rstrip())
return self._wait_for_state(field)
def str_to_val(self, field):
'''
Automatically detects floats, ints and bools
Returns a float, int or bool
'''
if 'nan' in field:
return float("NAN")
elif '.' in field:
return float(field)
elif field == 'true':
return True
elif field == 'false':
return False
else:
return int(field)
def smart_read(self, field, **kwargs):
'''
Turns a string state field read into the actual desired vals.
Returns list of vals, or the val itself. Vals can be bools, ints, or floats.
Raises NameError if no state field was found.
'''
ret = self.read_state(field, kwargs.get('timeout'))
if ret is None:
raise NameError(f"State field: {field} not found.")
# begin type inference
if ',' in ret:
# ret is a list
list_of_strings = ret.split(',')
list_of_strings = [x for x in list_of_strings if x is not '']
list_of_vals = [self.str_to_val(x) for x in list_of_strings]
return list_of_vals
else:
return self.str_to_val(ret)
def _write_state_basic(self, fields, vals, timeout = None):
'''
Write multiple state fields to the device at once.
'''
if not self.running_logger: return
assert len(fields) == len(vals)
assert len(fields) <= 20, "Flight Software can't handle more than 20 state field writes at a time"
json_cmds = ""
for field, val in zip(fields, vals):
json_cmd = {
'mode': ord('w'),
'field': str(field),
'val': str(val)
}
json_cmd = json.dumps(json_cmd) + "\n"
json_cmds += json_cmd
if len(json_cmds) >= 512:
print("Error: Flight Software can't handle input buffers >= 512 bytes.")
return False
self.device_write_lock.acquire()
self.console.write(json_cmds.encode())
self.device_write_lock.release()
self.raw_logger.put("Sent: " + json_cmds)
returned_vals = []
for field in fields:
returned_vals.append(self._wait_for_state(field, timeout))
if returned_vals[0] is None:
return False
returned_vals = returned_vals[0].split(",")
returned_vals = [x for x in returned_vals if x is not ""]
if (returned_vals[0].replace('.','').replace('-','')).isnumeric():
numeric_returned_vals = [float(x) for x in returned_vals]
if type(vals[0]) == str:
vals = vals[0]
vals = [float(x) for x in vals.split(",") if x is not '']
return numeric_returned_vals == vals
return returned_vals == vals
def write_multiple_states(self, fields, vals, timeout=None):
'''
Write multiple states and check the write operation with feedback.
Overwrite the value of the state field with the given state field name on the flight computer, and
then verify that the state was actually set. Do not write the state if the variable is being overriden
by the user. (This is the function that sim should exclusively use.)
'''
# Filter out fields that are being overridden by the user
field_val_pairs = [
field_val_pair for field_val_pair in zip(fields, vals)
if field_val_pair[0] not in self.overriden_variables
]
fields, vals = zip(*field_val_pairs)
return self._write_state_basic(list(fields), list(vals), timeout)
def _val_to_str(self, val):
'''
Convert a state value or list of values into a single string writable to
a state.
Currently, the supported types are integers, doubles, integer vectors,
double vectors, and booleans.
'''
if type(val) not in (list, tuple):
if type(val) is bool:
return 'true' if val else 'false'
else:
return str(val)
else:
val_str = ''
for _val in val:
val_str += self._val_to_str(_val) + ', '
return val_str[:len(val_str) - 2]
def write_state(self, field, *args, **kwargs):
'''
Write state and check write operation with feedback.
Overwrite the value of the state field with the given state field name on the flight computer, and
then verify that the state was actually set. Do not write the state if the variable is being overriden
by the user. (This is a function that sim should exclusively use.)
'''
return self.write_multiple_states([field], [self._val_to_str(args)], kwargs.get('timeout'))
def send_uplink(self, filename):
'''
Gets the uplink packet from the given file. Sends the hex
representation of the packet and the length of the packet
to the console to be processed by FlightSoftware
'''
# Get the uplink packet from the uplink sbd file
try:
file = open(filename, "rb")
except:
logline = f"Error: File {filename} doesn't exist"
self.raw_logger.put(logline)
return False
uplink_packet = file.read()
uplink_packet_length = len(uplink_packet)
file.close()
uplink_packet = str(''.join(r'\x'+hex(byte)[2:] for byte in uplink_packet)) #get the hex representation of the packet bytes
# Send a command to the console to process the uplink packet
json_cmd = {
'mode': ord('u'),
'val': uplink_packet,
'length': uplink_packet_length
}
json_cmd = json.dumps(json_cmd) + "\n"
self.device_write_lock.acquire()
self.console.write(json_cmd.encode())
self.device_write_lock.release()
self.raw_logger.put("Sent: " + json_cmd)
return True
def uplink(self, fields, vals, timeout=None):
'''
Create an uplink packet from the provided data and save it
locally to disk.
The send_uplink function can be used to send this uplink to
the flight controller.
Returns: false if the uplink could not be created, true otherwise.
The uplink might not be possible to create if it uses unrecognized
state fields or if its size exceeds 70 bytes.
'''
if not self.running_logger: return
# Filter out fields that are being overridden by the user
field_val_pairs = [
field_val_pair for field_val_pair in zip(fields, vals)
if field_val_pair[0] not in self.overriden_variables
]
fields, vals = zip(*field_val_pairs)
success = self.uplink_console.create_uplink(fields, vals, "uplink.sbd")
# If the uplink packet exists, send it to the FlightSoftware console
if success and os.path.exists("uplink.sbd"):
success &= self.send_uplink("uplink.sbd")
os.remove("uplink.sbd")
os.remove("uplink.json")
return success
else:
if os.path.exists("uplink.json"): os.remove("uplink.json")
return False
def parsetelem(self):
'''
Provide the latest downlink telemetry file that was received from the
spacecraft to the downlink producer, and then return the parsed value
of the latest completed downlink frame as a JSON object.
'''
#get newest file
telem_files = glob.iglob(os.path.join(self.telem_save_dir, 'telem*'))
try:
newest_telem_file = max(telem_files, key=os.path.basename)
except ValueError:
return "No telemetry to parse."
self.dp_console.write((newest_telem_file+"\n").encode())
telem_json_data = json.loads(self.dp_console.readline().rstrip())
if telem_json_data is not None:
telem_json_data = telem_json_data["data"]
return telem_json_data
def dbtelem(self):
'''
Run parsetelem(), and dump the results into the Elasticsearch database.
This function is useful because it allows database-connected technologies,
such as the telemetry webserver and OpenMCT, to consume downlink data.
'''
jsonObj = self.parsetelem()
if not isinstance(jsonObj, dict):
print("Error parsing telemetry.")
return False
failed = False
for field in jsonObj:
value = jsonObj[field]
data=json.dumps({
field: value,
"time": str(datetime.datetime.now().isoformat())
})
res = self.es.index(index='statefield_report_'+str(self.device_name.lower()), doc_type='report', body=data)
if not res['result'] == 'created':
failed = True
return not failed
def override_state(self, field, *args, **kwargs):
'''
Override state and check write operation with feedback.
Behaves the same way as write_state(), but is strictly written for a state variable that is overriden
by the user, i.e. is no longer set by the simulation.
'''
self.overriden_variables.add(field)
return self._write_state_basic([field], [self._val_to_str(args)], kwargs.get('timeout'))
def release_override(self, field):
'''
Release override of simulation state.
If the state wasn't currently being overriden, then this functions just
acts as a no-op.
'''
self.overriden_variables.discard(field)
def disconnect(self):
'''Quits the program and stores message log and field telemetry to file.'''
print(f' - Terminating console connection to and saving logging/telemetry data for {self.device_name}.')
# End threads
self.running_logger = False
self.check_msgs_thread.join()
self.console.close()
self.dp_console.close()
self.http_thread.terminate()
self.http_thread.join()
self.http_thread.terminate()
self.http_thread.join()
self.datastore.stop()
self.logger.stop()
self.raw_logger.stop()
| 38.961303 | 163 | 0.595086 | 2,372 | 19,130 | 4.668212 | 0.185919 | 0.016256 | 0.013908 | 0.012011 | 0.256931 | 0.209428 | 0.17195 | 0.123544 | 0.123544 | 0.111623 | 0 | 0.003206 | 0.315107 | 19,130 | 490 | 164 | 39.040816 | 0.841933 | 0.246262 | 0 | 0.243986 | 0 | 0 | 0.119573 | 0.004605 | 0 | 0 | 0 | 0 | 0.006873 | 1 | 0.061856 | false | 0 | 0.051546 | 0 | 0.223368 | 0.044674 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
257840704f660f29957142fc99721907a1db3293 | 29,073 | py | Python | test/test_nfvbench.py | hashnfv/hashnfv-nfvbench | 8da439b932537748d379c7bd3bdf560ef739b203 | [
"Apache-2.0"
] | null | null | null | test/test_nfvbench.py | hashnfv/hashnfv-nfvbench | 8da439b932537748d379c7bd3bdf560ef739b203 | [
"Apache-2.0"
] | null | null | null | test/test_nfvbench.py | hashnfv/hashnfv-nfvbench | 8da439b932537748d379c7bd3bdf560ef739b203 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2016 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import logging
import os
import sys
from attrdict import AttrDict
from nfvbench.config import config_loads
from nfvbench.credentials import Credentials
from nfvbench.fluentd import FluentLogHandler
import nfvbench.log
from nfvbench.network import Interface
from nfvbench.network import Network
from nfvbench.specs import ChainType
from nfvbench.specs import Encaps
import nfvbench.traffic_gen.traffic_utils as traffic_utils
import pytest
__location__ = os.path.realpath(os.path.join(os.getcwd(),
os.path.dirname(__file__)))
@pytest.fixture
def openstack_vxlan_spec():
return AttrDict(
{
'openstack': AttrDict({
'vswitch': "VTS",
'encaps': Encaps.VxLAN}),
'run_spec': AttrDict({
'use_vpp': True
})
}
)
# =========================================================================
# PVP Chain tests
# =========================================================================
def test_chain_interface():
iface = Interface('testname', 'vpp', tx_packets=1234, rx_packets=4321)
assert iface.name == 'testname'
assert iface.device == 'vpp'
assert iface.get_packet_count('tx') == 1234
assert iface.get_packet_count('rx') == 4321
assert iface.get_packet_count('wrong_key') == 0
# pylint: disable=redefined-outer-name
@pytest.fixture(scope='session')
def iface1():
return Interface('iface1', 'trex', tx_packets=10000, rx_packets=1234)
@pytest.fixture(scope='session')
def iface2():
return Interface('iface2', 'n9k', tx_packets=1234, rx_packets=9901)
@pytest.fixture(scope='session')
def iface3():
return Interface('iface3', 'n9k', tx_packets=9900, rx_packets=1234)
@pytest.fixture(scope='session')
def iface4():
return Interface('iface4', 'vpp', tx_packets=1234, rx_packets=9801)
@pytest.fixture(scope='session')
def net1(iface1, iface2, iface3, iface4):
return Network([iface1, iface2, iface3, iface4], reverse=False)
@pytest.fixture(scope='session')
def net2(iface1, iface2, iface3):
return Network([iface1, iface2, iface3], reverse=True)
def test_chain_network(net1, net2, iface1, iface2, iface3, iface4):
assert [iface1, iface2, iface3, iface4] == net1.get_interfaces()
assert [iface3, iface2, iface1] == net2.get_interfaces()
net2.add_interface(iface4)
assert [iface4, iface3, iface2, iface1] == net2.get_interfaces()
# pylint: enable=redefined-outer-name
# pylint: disable=pointless-string-statement
"""
def test_chain_analysis(net1, monkeypatch, openstack_vxlan_spec):
def mock_empty(self, *args, **kwargs):
pass
monkeypatch.setattr(ServiceChain, '_setup', mock_empty)
f = ServiceChain(AttrDict({'service_chain': 'DUMMY'}), [], {'tor': {}}, openstack_vxlan_spec,
lambda x, y, z: None)
result = f.get_analysis([net1])
assert result[1]['packet_drop_count'] == 99
assert result[1]['packet_drop_percentage'] == 0.99
assert result[2]['packet_drop_count'] == 1
assert result[2]['packet_drop_percentage'] == 0.01
assert result[3]['packet_drop_count'] == 99
assert result[3]['packet_drop_percentage'] == 0.99
net1.reverse = True
result = f.get_analysis([net1])
assert result[1]['packet_drop_count'] == 0
assert result[1]['packet_drop_percentage'] == 0.0
assert result[2]['packet_drop_count'] == 0
assert result[2]['packet_drop_percentage'] == 0.0
assert result[3]['packet_drop_count'] == 0
assert result[3]['packet_drop_percentage'] == 0.0
@pytest.fixture
def pvp_chain(monkeypatch, openstack_vxlan_spec):
tor_vni1 = Interface('vni-4097', 'n9k', 50, 77)
vsw_vni1 = Interface('vxlan_tunnel0', 'vpp', 77, 48)
vsw_vif1 = Interface('VirtualEthernet0/0/2', 'vpp', 48, 77)
vsw_vif2 = Interface('VirtualEthernet0/0/3', 'vpp', 77, 47)
vsw_vni2 = Interface('vxlan_tunnel1', 'vpp', 43, 77)
tor_vni2 = Interface('vni-4098', 'n9k', 77, 40)
def mock_init(self, *args, **kwargs):
self.vni_ports = [4097, 4098]
self.specs = openstack_vxlan_spec
self.clients = {
'vpp': AttrDict({
'set_interface_counters': lambda: None,
})
}
self.worker = AttrDict({
'run': lambda: None,
})
def mock_empty(self, *args, **kwargs):
pass
def mock_get_network(self, traffic_port, vni_id, reverse=False):
if vni_id == 0:
return Network([tor_vni1, vsw_vni1, vsw_vif1], reverse)
else:
return Network([tor_vni2, vsw_vni2, vsw_vif2], reverse)
def mock_get_data(self):
return {}
monkeypatch.setattr(PVPChain, '_get_network', mock_get_network)
monkeypatch.setattr(PVPChain, '_get_data', mock_get_data)
monkeypatch.setattr(PVPChain, '_setup', mock_empty)
monkeypatch.setattr(VxLANWorker, '_clear_interfaces', mock_empty)
monkeypatch.setattr(PVPChain, '_generate_traffic', mock_empty)
monkeypatch.setattr(PVPChain, '__init__', mock_init)
return PVPChain(None, None, {'vm': None, 'vpp': None, 'tor': None, 'traffic': None}, None)
def test_pvp_chain_run(pvp_chain):
result = pvp_chain.run()
expected_result = {
'raw_data': {},
'stats': None,
'packet_analysis': {
'direction-forward': [
OrderedDict([
('interface', 'vni-4097'),
('device', 'n9k'),
('packet_count', 50)
]),
OrderedDict([
('interface', 'vxlan_tunnel0'),
('device', 'vpp'),
('packet_count', 48),
('packet_drop_count', 2),
('packet_drop_percentage', 4.0)
]),
OrderedDict([
('interface', 'VirtualEthernet0/0/2'),
('device', 'vpp'),
('packet_count', 48),
('packet_drop_count', 0),
('packet_drop_percentage', 0.0)
]),
OrderedDict([
('interface', 'VirtualEthernet0/0/3'),
('device', 'vpp'),
('packet_count', 47),
('packet_drop_count', 1),
('packet_drop_percentage', 2.0)
]),
OrderedDict([
('interface', 'vxlan_tunnel1'),
('device', 'vpp'),
('packet_count', 43),
('packet_drop_count', 4),
('packet_drop_percentage', 8.0)
]),
OrderedDict([
('interface', 'vni-4098'),
('device', 'n9k'),
('packet_count', 40),
('packet_drop_count', 3),
('packet_drop_percentage', 6.0)
])
],
'direction-reverse': [
OrderedDict([
('interface', 'vni-4098'),
('device', 'n9k'),
('packet_count', 77)
]),
OrderedDict([
('interface', 'vxlan_tunnel1'),
('device', 'vpp'),
('packet_count', 77),
('packet_drop_count', 0),
('packet_drop_percentage', 0.0)
]),
OrderedDict([
('interface', 'VirtualEthernet0/0/3'),
('device', 'vpp'),
('packet_count', 77),
('packet_drop_count', 0),
('packet_drop_percentage', 0.0)
]),
OrderedDict([
('interface', 'VirtualEthernet0/0/2'),
('device', 'vpp'),
('packet_count', 77),
('packet_drop_count', 0),
('packet_drop_percentage', 0.0)
]),
OrderedDict([
('interface', 'vxlan_tunnel0'),
('device', 'vpp'),
('packet_count', 77),
('packet_drop_count', 0),
('packet_drop_percentage', 0.0)
]),
OrderedDict([
('interface', 'vni-4097'),
('device', 'n9k'),
('packet_count', 77),
('packet_drop_count', 0),
('packet_drop_percentage', 0.0)
])
]
}
}
assert result == expected_result
"""
# =========================================================================
# PVVP Chain tests
# =========================================================================
"""
@pytest.fixture
def pvvp_chain(monkeypatch, openstack_vxlan_spec):
tor_vni1 = Interface('vni-4097', 'n9k', 50, 77)
vsw_vni1 = Interface('vxlan_tunnel0', 'vpp', 77, 48)
vsw_vif1 = Interface('VirtualEthernet0/0/2', 'vpp', 48, 77)
vsw_vif3 = Interface('VirtualEthernet0/0/0', 'vpp', 77, 47)
vsw_vif4 = Interface('VirtualEthernet0/0/1', 'vpp', 45, 77)
vsw_vif2 = Interface('VirtualEthernet0/0/3', 'vpp', 77, 44)
vsw_vni2 = Interface('vxlan_tunnel1', 'vpp', 43, 77)
tor_vni2 = Interface('vni-4098', 'n9k', 77, 40)
def mock_init(self, *args, **kwargs):
self.vni_ports = [4099, 4100]
self.v2vnet = V2VNetwork()
self.specs = openstack_vxlan_spec
self.clients = {
'vpp': AttrDict({
'get_v2v_network': lambda reverse=None: Network([vsw_vif3, vsw_vif4], reverse),
'set_interface_counters': lambda pvvp=None: None,
'set_v2v_counters': lambda: None,
})
}
self.worker = AttrDict({
'run': lambda: None,
})
def mock_empty(self, *args, **kwargs):
pass
def mock_get_network(self, traffic_port, vni_id, reverse=False):
if vni_id == 0:
return Network([tor_vni1, vsw_vni1, vsw_vif1], reverse)
else:
return Network([tor_vni2, vsw_vni2, vsw_vif2], reverse)
def mock_get_data(self):
return {}
monkeypatch.setattr(PVVPChain, '_get_network', mock_get_network)
monkeypatch.setattr(PVVPChain, '_get_data', mock_get_data)
monkeypatch.setattr(PVVPChain, '_setup', mock_empty)
monkeypatch.setattr(VxLANWorker, '_clear_interfaces', mock_empty)
monkeypatch.setattr(PVVPChain, '_generate_traffic', mock_empty)
monkeypatch.setattr(PVVPChain, '__init__', mock_init)
return PVVPChain(None, None, {'vm': None, 'vpp': None, 'tor': None, 'traffic': None}, None)
def test_pvvp_chain_run(pvvp_chain):
result = pvvp_chain.run()
expected_result = {
'raw_data': {},
'stats': None,
'packet_analysis':
{'direction-forward': [
OrderedDict([
('interface', 'vni-4097'),
('device', 'n9k'),
('packet_count', 50)
]),
OrderedDict([
('interface', 'vxlan_tunnel0'),
('device', 'vpp'),
('packet_count', 48),
('packet_drop_count', 2),
('packet_drop_percentage', 4.0)
]),
OrderedDict([
('interface', 'VirtualEthernet0/0/2'),
('device', 'vpp'),
('packet_count', 48),
('packet_drop_count', 0),
('packet_drop_percentage', 0.0)
]),
OrderedDict([
('interface', 'VirtualEthernet0/0/0'),
('device', 'vpp'),
('packet_count', 47),
('packet_drop_count', 1),
('packet_drop_percentage', 2.0)
]),
OrderedDict([
('interface', 'VirtualEthernet0/0/1'),
('device', 'vpp'),
('packet_count', 45),
('packet_drop_count', 2),
('packet_drop_percentage', 4.0)
]),
OrderedDict([
('interface', 'VirtualEthernet0/0/3'),
('device', 'vpp'),
('packet_count', 44),
('packet_drop_count', 1),
('packet_drop_percentage', 2.0)
]),
OrderedDict([
('interface', 'vxlan_tunnel1'),
('device', 'vpp'),
('packet_count', 43),
('packet_drop_count', 1),
('packet_drop_percentage', 2.0)
]),
OrderedDict([
('interface', 'vni-4098'),
('device', 'n9k'),
('packet_count', 40),
('packet_drop_count', 3),
('packet_drop_percentage', 6.0)
])
],
'direction-reverse': [
OrderedDict([
('interface', 'vni-4098'),
('device', 'n9k'),
('packet_count', 77)
]),
OrderedDict([
('interface', 'vxlan_tunnel1'),
('device', 'vpp'),
('packet_count', 77),
('packet_drop_count', 0),
('packet_drop_percentage', 0.0)
]),
OrderedDict([
('interface', 'VirtualEthernet0/0/3'),
('device', 'vpp'),
('packet_count', 77),
('packet_drop_count', 0),
('packet_drop_percentage', 0.0)
]),
OrderedDict([
('interface', 'VirtualEthernet0/0/1'),
('device', 'vpp'),
('packet_count', 77),
('packet_drop_count', 0),
('packet_drop_percentage', 0.0)
]),
OrderedDict([
('interface', 'VirtualEthernet0/0/0'),
('device', 'vpp'),
('packet_count', 77),
('packet_drop_count', 0),
('packet_drop_percentage', 0.0)
]),
OrderedDict([
('interface', 'VirtualEthernet0/0/2'),
('device', 'vpp'),
('packet_count', 77),
('packet_drop_count', 0),
('packet_drop_percentage', 0.0)
]),
OrderedDict([
('interface', 'vxlan_tunnel0'),
('device', 'vpp'),
('packet_count', 77),
('packet_drop_count', 0),
('packet_drop_percentage', 0.0)
]),
OrderedDict([
('interface', 'vni-4097'),
('device', 'n9k'),
('packet_count', 77),
('packet_drop_count', 0),
('packet_drop_percentage', 0.0)
])
]}
}
assert result == expected_result
"""
# =========================================================================
# Traffic client tests
# =========================================================================
def test_parse_rate_str():
parse_rate_str = traffic_utils.parse_rate_str
try:
assert parse_rate_str('100%') == {'rate_percent': '100.0'}
assert parse_rate_str('37.5%') == {'rate_percent': '37.5'}
assert parse_rate_str('100%') == {'rate_percent': '100.0'}
assert parse_rate_str('60pps') == {'rate_pps': '60'}
assert parse_rate_str('60kpps') == {'rate_pps': '60000'}
assert parse_rate_str('6Mpps') == {'rate_pps': '6000000'}
assert parse_rate_str('6gpps') == {'rate_pps': '6000000000'}
assert parse_rate_str('80bps') == {'rate_bps': '80'}
assert parse_rate_str('80bps') == {'rate_bps': '80'}
assert parse_rate_str('80kbps') == {'rate_bps': '80000'}
assert parse_rate_str('80kBps') == {'rate_bps': '640000'}
assert parse_rate_str('80Mbps') == {'rate_bps': '80000000'}
assert parse_rate_str('80 MBps') == {'rate_bps': '640000000'}
assert parse_rate_str('80Gbps') == {'rate_bps': '80000000000'}
except Exception as exc:
assert False, exc.message
def should_raise_error(str):
try:
parse_rate_str(str)
except Exception:
return True
else:
assert False
assert should_raise_error('101')
assert should_raise_error('201%')
assert should_raise_error('10Kbps')
assert should_raise_error('0kbps')
assert should_raise_error('0pps')
assert should_raise_error('-1bps')
def test_rate_conversion():
assert traffic_utils.load_to_bps(50, 10000000000) == pytest.approx(5000000000.0)
assert traffic_utils.load_to_bps(37, 10000000000) == pytest.approx(3700000000.0)
assert traffic_utils.load_to_bps(100, 10000000000) == pytest.approx(10000000000.0)
assert traffic_utils.bps_to_load(5000000000.0, 10000000000) == pytest.approx(50.0)
assert traffic_utils.bps_to_load(3700000000.0, 10000000000) == pytest.approx(37.0)
assert traffic_utils.bps_to_load(10000000000.0, 10000000000) == pytest.approx(100.0)
assert traffic_utils.bps_to_pps(500000, 64) == pytest.approx(744.047619048)
assert traffic_utils.bps_to_pps(388888, 1518) == pytest.approx(31.6066319896)
assert traffic_utils.bps_to_pps(9298322222, 340.3) == pytest.approx(3225895.85831)
assert traffic_utils.pps_to_bps(744.047619048, 64) == pytest.approx(500000)
assert traffic_utils.pps_to_bps(31.6066319896, 1518) == pytest.approx(388888)
assert traffic_utils.pps_to_bps(3225895.85831, 340.3) == pytest.approx(9298322222)
"""
@pytest.fixture
def traffic_client(monkeypatch):
def mock_init(self, *args, **kwargs):
self.run_config = {
'bidirectional': False,
'l2frame_size': '64',
'duration_sec': 30,
'rates': [{'rate_percent': '10'}, {'rate_pps': '1'}]
}
self.config = AttrDict({
'generator_config': {
'intf_speed': 10000000000
},
'ndr_run': True,
'pdr_run': True,
'single_run': False,
'attempts': 1,
'measurement': {
'NDR': 0.0,
'PDR': 0.1,
'load_epsilon': 0.1
}
})
self.runner = AttrDict({
'time_elapsed': lambda: 30,
'stop': lambda: None,
'client': AttrDict({'get_stats': lambda: None})
})
self.current_load = None
self.dummy_stats = {
50.0: 72.6433562831,
25.0: 45.6095059858,
12.5: 0.0,
18.75: 27.218642979,
15.625: 12.68585861,
14.0625: 2.47154392563,
13.28125: 0.000663797066801,
12.890625: 0.0,
13.0859375: 0.0,
13.18359375: 0.00359387347122,
13.671875: 0.307939922531,
13.4765625: 0.0207718516156,
13.57421875: 0.0661795060969
}
def mock_modify_load(self, load):
self.run_config['rates'][0] = {'rate_percent': str(load)}
self.current_load = load
def mock_run_traffic(self):
yield {
'overall': {
'drop_rate_percent': self.dummy_stats[self.current_load],
'rx': {
'total_pkts': 1,
'avg_delay_usec': 0.0,
'max_delay_usec': 0.0,
'min_delay_usec': 0.0
}
}
}
monkeypatch.setattr(TrafficClient, '__init__', mock_init)
monkeypatch.setattr(TrafficClient, 'modify_load', mock_modify_load)
monkeypatch.setattr(TrafficClient, 'run_traffic', mock_run_traffic)
return TrafficClient()
def test_ndr_pdr_search(traffic_client):
expected_results = {
'pdr': {
'l2frame_size': '64',
'initial_rate_type': 'rate_percent',
'stats': {
'overall': {
'drop_rate_percent': 0.0661795060969,
'min_delay_usec': 0.0,
'avg_delay_usec': 0.0,
'max_delay_usec': 0.0
}
},
'load_percent_per_direction': 13.57421875,
'rate_percent': 13.57422547,
'rate_bps': 1357422547.0,
'rate_pps': 2019974.0282738095,
'duration_sec': 30
},
'ndr': {
'l2frame_size': '64',
'initial_rate_type': 'rate_percent',
'stats': {
'overall': {
'drop_rate_percent': 0.0,
'min_delay_usec': 0.0,
'avg_delay_usec': 0.0,
'max_delay_usec': 0.0
}
},
'load_percent_per_direction': 13.0859375,
'rate_percent': 13.08594422,
'rate_bps': 1308594422.0,
'rate_pps': 1947313.1279761905,
'duration_sec': 30
}
}
results = traffic_client.get_ndr_and_pdr()
assert len(results) == 2
for result in results.values():
result.pop('timestamp_sec')
result.pop('time_taken_sec')
assert results == expected_results
"""
# pylint: enable=pointless-string-statement
# =========================================================================
# Other tests
# =========================================================================
def setup_module(module):
nfvbench.log.setup(mute_stdout=True)
def test_no_credentials():
cred = Credentials('/completely/wrong/path/openrc', None, False)
if cred.rc_auth_url:
# shouldn't get valid data unless user set environment variables
assert False
else:
assert True
# Because trex_stl_lib may not be installed when running unit test
# nfvbench.traffic_client will try to import STLError:
# from trex_stl_lib.api import STLError
# will raise ImportError: No module named trex_stl_lib.api
try:
import trex_stl_lib.api
assert trex_stl_lib.api
except ImportError:
# Make up a trex_stl_lib.api.STLError class
class STLError(Exception):
pass
from types import ModuleType
stl_lib_mod = ModuleType('trex_stl_lib')
sys.modules['trex_stl_lib'] = stl_lib_mod
api_mod = ModuleType('trex_stl_lib.api')
stl_lib_mod.api = api_mod
sys.modules['trex_stl_lib.api'] = api_mod
api_mod.STLError = STLError
# pylint: disable=wrong-import-position,ungrouped-imports
from nfvbench.traffic_client import Device
from nfvbench.traffic_client import IpBlock
# pylint: enable=wrong-import-position,ungrouped-imports
def test_ip_block():
ipb = IpBlock('10.0.0.0', '0.0.0.1', 256)
assert ipb.get_ip() == '10.0.0.0'
assert ipb.get_ip(255) == '10.0.0.255'
with pytest.raises(IndexError):
ipb.get_ip(256)
# verify with step larger than 1
ipb = IpBlock('10.0.0.0', '0.0.0.2', 256)
assert ipb.get_ip() == '10.0.0.0'
assert ipb.get_ip(1) == '10.0.0.2'
assert ipb.get_ip(128) == '10.0.1.0'
assert ipb.get_ip(255) == '10.0.1.254'
with pytest.raises(IndexError):
ipb.get_ip(256)
def check_config(configs, cc, fc, src_ip, dst_ip, step_ip):
'''Verify that the range configs for each chain have adjacent IP ranges
of the right size and without holes between chains
'''
step = Device.ip_to_int(step_ip)
cfc = 0
sip = Device.ip_to_int(src_ip)
dip = Device.ip_to_int(dst_ip)
for index in range(cc):
config = configs[index]
assert config['ip_src_count'] == config['ip_dst_count']
assert Device.ip_to_int(config['ip_src_addr']) == sip
assert Device.ip_to_int(config['ip_dst_addr']) == dip
count = config['ip_src_count']
cfc += count
sip += count * step
dip += count * step
assert cfc == fc
def create_device(fc, cc, ip, gip, tggip, step_ip):
return Device(0, 0, flow_count=fc, chain_count=cc, ip=ip, gateway_ip=gip, tg_gateway_ip=tggip,
ip_addrs_step=step_ip,
tg_gateway_ip_addrs_step=step_ip,
gateway_ip_addrs_step=step_ip)
def check_device_flow_config(step_ip):
fc = 99999
cc = 10
ip0 = '10.0.0.0'
ip1 = '20.0.0.0'
tggip = '50.0.0.0'
gip = '60.0.0.0'
dev0 = create_device(fc, cc, ip0, gip, tggip, step_ip)
dev1 = create_device(fc, cc, ip1, gip, tggip, step_ip)
dev0.set_destination(dev1)
configs = dev0.get_stream_configs(ChainType.EXT)
check_config(configs, cc, fc, ip0, ip1, step_ip)
def test_device_flow_config():
check_device_flow_config('0.0.0.1')
check_device_flow_config('0.0.0.2')
def test_device_ip_range():
def ip_range_overlaps(ip0, ip1, flows):
tggip = '50.0.0.0'
gip = '60.0.0.0'
dev0 = create_device(flows, 10, ip0, gip, tggip, '0.0.0.1')
dev1 = create_device(flows, 10, ip1, gip, tggip, '0.0.0.1')
dev0.set_destination(dev1)
return dev0.ip_range_overlaps()
assert not ip_range_overlaps('10.0.0.0', '20.0.0.0', 10000)
assert ip_range_overlaps('10.0.0.0', '10.0.1.0', 10000)
assert ip_range_overlaps('10.0.0.0', '10.0.1.0', 257)
assert ip_range_overlaps('10.0.1.0', '10.0.0.0', 257)
def test_config():
refcfg = {1: 100, 2: {21: 100, 22: 200}, 3: None}
res1 = {1: 10, 2: {21: 100, 22: 200}, 3: None}
res2 = {1: 100, 2: {21: 1000, 22: 200}, 3: None}
res3 = {1: 100, 2: {21: 100, 22: 200}, 3: "abc"}
assert config_loads("{}", refcfg) == refcfg
assert config_loads("{1: 10}", refcfg) == res1
assert config_loads("{2: {21: 1000}}", refcfg) == res2
assert config_loads('{3: "abc"}', refcfg) == res3
# correctly fails
# pairs of input string and expected subset (None if identical)
fail_pairs = [
["{4: 0}", None],
["{2: {21: 100, 30: 50}}", "{2: {30: 50}}"],
["{2: {0: 1, 1: 2}, 5: 5}", None],
["{1: 'abc', 2: {21: 0}}", "{1: 'abc'}"],
["{2: 100}", None]
]
for fail_pair in fail_pairs:
with pytest.raises(Exception) as e_info:
config_loads(fail_pair[0], refcfg)
expected = fail_pair[1]
if expected is None:
expected = fail_pair[0]
assert expected in e_info.value.message
# whitelist keys
flavor = {'flavor': {'vcpus': 2, 'ram': 8192, 'disk': 0,
'extra_specs': {'hw:cpu_policy': 'dedicated'}}}
new_flavor = {'flavor': {'vcpus': 2, 'ram': 8192, 'disk': 0,
'extra_specs': {'hw:cpu_policy': 'dedicated', 'hw:numa_nodes': 2}}}
assert config_loads("{'flavor': {'extra_specs': {'hw:numa_nodes': 2}}}", flavor,
whitelist_keys=['alpha', 'extra_specs']) == new_flavor
def test_fluentd():
logger = logging.getLogger('fluent-logger')
class FluentdConfig(dict):
def __getattr__(self, attr):
return self.get(attr)
fluentd_configs = [
FluentdConfig({
'logging_tag': 'nfvbench',
'result_tag': 'resultnfvbench',
'ip': '127.0.0.1',
'port': 7081
}),
FluentdConfig({
'logging_tag': 'nfvbench',
'result_tag': 'resultnfvbench',
'ip': '127.0.0.1',
'port': 24224
}),
FluentdConfig({
'logging_tag': None,
'result_tag': 'resultnfvbench',
'ip': '127.0.0.1',
'port': 7082
}),
FluentdConfig({
'logging_tag': 'nfvbench',
'result_tag': None,
'ip': '127.0.0.1',
'port': 7083
})
]
handler = FluentLogHandler(fluentd_configs=fluentd_configs)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
logger.info('test')
logger.warning('test %d', 100)
try:
raise Exception("test")
except Exception:
logger.exception("got exception")
| 34.98556 | 98 | 0.52836 | 3,137 | 29,073 | 4.668154 | 0.167357 | 0.011609 | 0.030729 | 0.028681 | 0.509151 | 0.468929 | 0.42584 | 0.372166 | 0.357484 | 0.340686 | 0 | 0.084915 | 0.316651 | 29,073 | 830 | 99 | 35.027711 | 0.652187 | 0.072782 | 0 | 0.2 | 0 | 0 | 0.131651 | 0.002731 | 0 | 0 | 0 | 0 | 0.254902 | 1 | 0.094118 | false | 0.003922 | 0.07451 | 0.035294 | 0.219608 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
257a8f3de4b6bc7e806d488851674359ff3825e1 | 5,141 | py | Python | tests/costnonlinear.py | rafaelrojasmiliani/gsplines | 663b10f6d53b498a1e892d9eb32a345153de36d2 | [
"MIT"
] | 3 | 2021-08-28T01:42:40.000Z | 2021-12-02T22:39:45.000Z | tests/costnonlinear.py | rafaelrojasmiliani/gsplines | 663b10f6d53b498a1e892d9eb32a345153de36d2 | [
"MIT"
] | null | null | null | tests/costnonlinear.py | rafaelrojasmiliani/gsplines | 663b10f6d53b498a1e892d9eb32a345153de36d2 | [
"MIT"
] | null | null | null | """
Test the cost function from the problem 1010
"""
import numpy as np
import sympy as sp
import quadpy
import unittest
from opttrj.costnonlinear import cCostNonLinear
from itertools import tee
class cMyCost(cCostNonLinear):
def runningCost(self, _t, _tauv, _u):
pass
def runningCostGradient(self, _t, _tauv, _u):
pass
class cMyTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(cMyTest, self).__init__(*args, **kwargs)
np.random.seed()
self.N_ = np.random.randint(2, 6)
self.dim_ = np.random.randint(2, 8)
self.wp_ = np.random.rand(self.N_ + 1, self.dim_)
self.T_ = 100.0
def testWaypoints(self):
wp = np.random.rand(self.N_ + 1, 2)
Ni = 10
Ngl = 10
cost = cMyCost(wp, self.T_, Ni, Ngl)
from matplotlib import pyplot as plt
plt.plot(wp[:, 0], wp[:, 1], 'b-')
plt.plot(cost.wp_[:, 0], cost.wp_[:, 1], 'ro')
plt.plot(wp[:, 0], wp[:, 1], 'b*')
plt.show()
def test_u2wp(self):
dim = 2
wp = np.random.rand(self.N_ + 1, dim)
Ni = np.random.randint(1, 10)
Ngl = 10
cost = cMyCost(wp, self.T_, Ni, Ngl)
u = np.zeros((cost.ushape_, ))
u = cost.wp2u(u)
U = u.reshape(-1, dim)
from matplotlib import pyplot as plt
plt.plot(wp[:, 0], wp[:, 1], 'b-')
plt.plot(U[:, 0], U[:, 1], 'ro')
plt.plot(wp[:, 0], wp[:, 1], 'b*')
plt.plot(cost.wp_[:, 0], cost.wp_[:, 1], 'g+')
plt.title('Ni = {:d}, N = {:d}'.format(Ni, self.N_))
plt.show()
plt.clf()
u += 0.01 * (np.random.rand(u.shape[0]) - 0.5)
u = cost.u2wp(u)
plt.plot(wp[:, 0], wp[:, 1], 'b-')
plt.plot(wp[:, 0], wp[:, 1], 'b*')
plt.plot(cost.wp_[:, 0], cost.wp_[:, 1], 'g+')
plt.title('Ni = {:d}, N = {:d}'.format(Ni, self.N_))
plt.show()
def test_uwpindexing(self):
Ni = 10
Ngl = 10
cost = cMyCost(self.wp_, self.T_, Ni, Ngl)
u = np.zeros((cost.ushape_, ))
u = cost.wp2u(u)
for ui, wipx_i in enumerate(cost.uToWp_):
e = abs(cost.wp_[wipx_i[0], wipx_i[1]] - u[ui])
assert e < 1.0e-10
u = np.random.rand(cost.ushape_)
cost.u2wp(u)
for ui, wipx_i in enumerate(cost.uToWp_):
e = abs(cost.wp_[wipx_i[0], wipx_i[1]] - u[ui])
assert e < 1.0e-10
def test_run_eval_grad(self):
Ni = 10
Ngl = 10
cost = myCost(self.wp_, self.T_, Ni, Ngl)
u = np.random.rand(cost.ushape_)
tauv = 0.5 + np.random.rand(cost.N_)
mygradient = np.vectorize(
lambda t, inter: cost.runningCostGradient(t, tauv, u),
signature='(),()->(n)')
grad = mygradient(0.0, 0.0)
assert grad.ndim == 1 and grad.shape[0] == cost.ushape_ + cost.N_
grad = mygradient([0, 1, 2], 0.0)
assert grad.ndim == 2 and grad.shape[1] == cost.ushape_ + \
cost.N_ and grad.shape[0] == 3
x = np.hstack([tauv, u])
res = cost(x)
assert np.isscalar(res)
res = cost.gradient(x)
assert res.ndim == 1 and res.shape[0] == cost.ushape_ + cost.N_
def testdomain2window(self):
Ni = 10
Ngl = 10
cost = myCost(self.wp_, self.T_, Ni, Ngl)
tauv = 0.5 + np.random.rand(cost.N_)
t0 = 0.0
for iinter, taui in enumerate(tauv):
tf = t0 + taui
tarray = np.arange(t0, tf, 0.05)[1:]
for t in tarray:
s, taui2, iinter2 = cost.domain2window(t, tauv)
assert iinter2 == iinter and taui2 - taui < 1.0e-9, '''
Interval fro domain2window (Nominal) = {:d}
Interval fro iteration (Testing) = {:d}
size of tauv = {:d}
taui Nominal = {:14.7e}
taui Test = {:14.7e}
sNom = {:14.7e}
t = {:14.7e}
t0 = {:14.7e}
tf = {:14.7e}
'''.format(iinter2, iinter, tauv.shape[0], taui2, taui, s, t,
t0, tf)
t0 = tf
class myCost(cCostNonLinear):
def __init__(self, _wp, _T, _Ni, _Ngl):
super().__init__(_wp, _T, _Ni, _Ngl)
self.runninf_cost_gradient_buff = np.zeros((self.ushape_ + self.N_, ))
def runningCost(self, _t, _tauv, _u, _y=None, _inter=None):
return 0.0
def runningCostGradient(self,
_t,
_tauv,
_u,
_y=None,
_inter=None,
_dydtau=None,
_dydu=None):
return self.runninf_cost_gradient_buff
def main():
unittest.main()
if __name__ == '__main__':
main()
| 30.60119 | 78 | 0.471309 | 666 | 5,141 | 3.474474 | 0.202703 | 0.041487 | 0.041487 | 0.025929 | 0.474935 | 0.434313 | 0.346586 | 0.337943 | 0.273552 | 0.267502 | 0 | 0.046482 | 0.380665 | 5,141 | 167 | 79 | 30.784431 | 0.680276 | 0.008559 | 0 | 0.341085 | 0 | 0 | 0.129204 | 0 | 0 | 0 | 0 | 0 | 0.054264 | 1 | 0.093023 | false | 0.015504 | 0.062016 | 0.015504 | 0.193798 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
257a9b3e29b5b9199eb2a23693635e96b83f0500 | 737 | py | Python | src/server/BroadcastThread.py | spectacularGavin/rasp-sec-camera | 3730b7a93e9fd30bfffe9529ed990528a32c5a5c | [
"MIT"
] | null | null | null | src/server/BroadcastThread.py | spectacularGavin/rasp-sec-camera | 3730b7a93e9fd30bfffe9529ed990528a32c5a5c | [
"MIT"
] | null | null | null | src/server/BroadcastThread.py | spectacularGavin/rasp-sec-camera | 3730b7a93e9fd30bfffe9529ed990528a32c5a5c | [
"MIT"
] | null | null | null | from threading import Thread
from subprocess import Popen, PIPE
from wsgiref.simple_server import WSGIServer
class BroadcastThread(Thread):
def __init__(self, converter: Popen, websocket_server: WSGIServer):
super(BroadcastThread, self).__init__()
self.converter = converter
self.websocket_server = websocket_server
def run(self):
print('in BroadcastThread')
try:
while True:
buf = self.converter.stdout.read1(32768)
if buf:
self.websocket_server.manager.broadcast(buf, binary=True)
elif self.converter.poll() is not None:
break
finally:
self.converter.stdout.close() | 35.095238 | 77 | 0.625509 | 76 | 737 | 5.894737 | 0.539474 | 0.145089 | 0.075893 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011605 | 0.298507 | 737 | 21 | 78 | 35.095238 | 0.854932 | 0 | 0 | 0 | 0 | 0 | 0.02439 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.157895 | 0 | 0.315789 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
257bddd8105c20a3bf215b1b22c1c4992b667b8c | 5,630 | py | Python | manga_py/providers/mangadex_org_v2.py | Abijithkrishna/manga-py | 03b142ecb944ef37a36e5095ffa580209021e3b0 | [
"MIT"
] | 337 | 2019-08-27T16:14:50.000Z | 2022-03-29T09:58:22.000Z | manga_py/providers/mangadex_org_v2.py | Abijithkrishna/manga-py | 03b142ecb944ef37a36e5095ffa580209021e3b0 | [
"MIT"
] | 225 | 2019-08-25T15:02:01.000Z | 2022-03-31T06:36:09.000Z | manga_py/providers/mangadex_org_v2.py | Abijithkrishna/manga-py | 03b142ecb944ef37a36e5095ffa580209021e3b0 | [
"MIT"
] | 41 | 2019-10-04T13:28:02.000Z | 2022-03-19T08:18:34.000Z | import re
from manga_py.provider import Provider
from .helpers.std import Std
from html import escape
class MangaDexOrg(Provider, Std):
__content = None
__chapters = None
__languages = None
__countries = {
'': 'Other',
'bd': 'Bengali',
'bg': 'Bulgarian',
'br': 'Portuguese (Br)',
'cn': 'Chinese (Simp)',
'ct': 'Catalan',
'cz': 'Czech',
'de': 'German',
'dk': 'Danish',
'es': 'Spanish (Es)',
'fi': 'Finnish',
'fr': 'French',
'gb': 'English',
'gr': 'Greek',
'hk': 'Chinese (Trad)',
'hu': 'Hungarian',
'id': 'Indonesian',
'il': 'Hebrew',
'in': 'Hindi',
'ir': 'Persian',
'it': 'Italian',
'jp': 'Japanese',
'kr': 'Korean',
'lt': 'Lithuanian',
'mm': 'Burmese',
'mn': 'Mongolian',
'mx': 'Spanish (LATAM)',
'my': 'Malay',
'nl': 'Dutch',
'no': 'Norwegian',
'ph': 'Filipino',
'pl': 'Polish',
'pt': 'Portuguese (Pt)',
'ro': 'Romanian',
'rs': 'Serbo-Croatian',
'ru': 'Russian',
'sa': 'Arabic',
'se': 'Swedish',
'th': 'Thai',
'tr': 'Turkish',
'ua': 'Ukrainian',
'vn': 'Vietnamese',
}
def _get(self, part):
return self.http().requests('{}/api/v2/{}'.format(
self.domain,
part.format(self.manga_idx())),
).json()
def get_archive_name(self) -> str:
prev = super().get_archive_name()
code = self.chapter['language']
return '{}-{}'.format(prev, self.__countries.get(code, 'Other'))
def get_chapter_index(self) -> str:
return self.chapter['chapter'].replace('.', '-')
def manga_idx(self):
return self.re.search(r'/(?:manga|title)/(\d+)', self.get_url()).group(1)
def get_content(self):
return 'nope'
def get_manga_name(self) -> str:
self.__content = self._get('manga/{}').get('data', {})
return self.__content.get('title')
def get_chapters(self):
_ch = self._chapters
if len(self._languages) > 1:
languages = self._quest_languages()
_ch = self.filter_chapters(_ch, languages)
translator = self.arg('translator')
if translator is not None:
_ch = self.filter_chapters_translator(_ch, translator)
return _ch
def get_files(self):
content = self._get(f'chapter/{self.chapter["hash"]}').get('data', {})
server = content['server']
_hash = content['hash']
return [f'{server}{_hash}/{img}' for img in content['pages']]
def get_cover(self) -> str:
return self.content['mainCover']
def chapter_for_json(self) -> str:
return '{}-{}'.format(self.chapter['volume'] or '0', self.chapter['chapter'])
@property
def _chapters(self):
if self.__chapters is None:
self.__chapters = self._get('manga/{}/chapters').get('data', {})
return self.__chapters.get('chapters', [])
def _quest_languages(self):
arg_language = self.arg('language')
if arg_language is None:
languages = self.quest(
[],
'Available languages:\n{}\n\n'
'Please, select your lang (empty for all, comma for delimiter lang):'.format(
'\n'.join(self._languages)
))
else:
languages = arg_language
return list([lng.strip() for lng in languages.split(',')])
@property
def _languages(self) -> list:
if self.__languages is None:
self.__languages = list(set([ch['language'] for ch in self._chapters]))
return self.__languages
def filter_chapters(self, chapters, languages: list) -> list:
if len(languages) == 0 or languages[0] == '':
return chapters
return [chapter for chapter in chapters if chapter['language'] in languages]
def filter_chapters_translator(self, chapters, translator: str) -> list:
enc_translator = escape(translator)
return [chapter for chapter in chapters if len(set(self._translators(chapter)) & {enc_translator}) > 0]
def _translators(self, chapter):
groups = self.__chapters.get('groups', [])
return [g['name'] for g in groups if g['id'] in chapter['groups']]
# region specified data for eduhoribe/comic-builder
def chapter_details(self, chapter) -> dict:
return {
'chapter': chapter['chapter'],
'volume': chapter['volume'],
'title': chapter['title'],
'language': chapter['language'],
'publisher': 'See "publishers"',
'publishers': self._translators(chapter)
}
@staticmethod
def _flat_array(arg):
if arg is None:
return ['']
if type(arg) == list:
return arg
if type(arg) == str:
return [arg]
raise TypeError('Unknown type!')
def manga_details(self):
author = self._flat_array(self.__content.get('author', ''))
artist = self._flat_array(self.__content.get('artist', ''))
return {
'id': self.manga_idx(),
'title': self.__content['title'],
'description': self.__content['description'],
'authors': [author for author in {*author, *artist} if author != ''],
'sauce': self.original_url,
'covers': {'main': self.__content.get('mainCover')}
}
# endregion
main = MangaDexOrg
| 30.934066 | 111 | 0.539076 | 596 | 5,630 | 4.921141 | 0.338926 | 0.033754 | 0.019093 | 0.011592 | 0.042278 | 0.042278 | 0.023866 | 0 | 0 | 0 | 0 | 0.001774 | 0.298934 | 5,630 | 181 | 112 | 31.104972 | 0.741323 | 0.01048 | 0 | 0.026667 | 0 | 0 | 0.172773 | 0.013111 | 0 | 0 | 0 | 0 | 0 | 1 | 0.126667 | false | 0 | 0.026667 | 0.046667 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
257bfa0c3f881287a5a522f4b4310e46894a8022 | 1,494 | py | Python | setup.py | VerseGroup/vg-em | 3c9a93aa9c3c3cba2bace9259bab8b2668b54069 | [
"MIT"
] | 2 | 2022-01-13T18:33:25.000Z | 2022-01-13T18:34:56.000Z | setup.py | VerseGroup/EM-python | 3c9a93aa9c3c3cba2bace9259bab8b2668b54069 | [
"MIT"
] | null | null | null | setup.py | VerseGroup/EM-python | 3c9a93aa9c3c3cba2bace9259bab8b2668b54069 | [
"MIT"
] | null | null | null | import pathlib
from setuptools import setup, find_packages
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
VERSION = "1.3.0"
DESCRIPTION="VerseGroups encryption manager class (RSA and Fernet wrapped AES sessions through RSA) for secure transmission of data. Also includes utilities such as hashing, salting and base64 encoding."
KEYWORDS=['RSA', 'FERNET', 'hash', 'vgem', 'Encryption Manager', 'Encryption', 'Verse Group']
setup(
name="vgem",
version=VERSION,
description=DESCRIPTION,
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/VerseGroup/vgem-python",
author="VERSEGROUPLLC",
author_email="officialversegroupllc@gmail.com",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: POSIX :: BSD",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
],
include_package_data=True,
python_requires='>=3.6',
install_requires=["cryptography", "pycparser", "cffi"],
packages=find_packages(exclude=("tests",)),
keywords=KEYWORDS
) | 38.307692 | 203 | 0.663989 | 161 | 1,494 | 6.062112 | 0.608696 | 0.076844 | 0.102459 | 0.106557 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012521 | 0.198126 | 1,494 | 39 | 204 | 38.307692 | 0.80217 | 0 | 0 | 0 | 0 | 0.027778 | 0.520401 | 0.020736 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.055556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
257cd5d4b4e8ff0ea36956f5af38747aff84e4d6 | 24,314 | py | Python | passl/modeling/backbones/discrete_vae.py | lmk123568/PASSL | a4974a665b164b71831b38b5bd8b849615a17f12 | [
"Apache-2.0"
] | null | null | null | passl/modeling/backbones/discrete_vae.py | lmk123568/PASSL | a4974a665b164b71831b38b5bd8b849615a17f12 | [
"Apache-2.0"
] | null | null | null | passl/modeling/backbones/discrete_vae.py | lmk123568/PASSL | a4974a665b164b71831b38b5bd8b849615a17f12 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on OpenAI DALL-E and lucidrains' DALLE-pytorch code bases
# https://github.com/openai/DALL-E
# https://github.com/lucidrains/DALLE-pytorch
import os
#import wget
import paddle
import paddle.nn as nn
#
#logit_laplace_eps = 0.1
#
#
#def map_pixels(x):
# return (1 - 2 * logit_laplace_eps) * x + logit_laplace_eps
#
#
#def unmap_pixels(x):
# return paddle.clip((x - logit_laplace_eps) / (1 - 2 * logit_laplace_eps), 0, 1)
#
#
#
#class Identity(nn.Layer):
# def __init__(self):
# super(Identity, self).__init__()
#
# def forward(self, inputs):
# return inputs
#
#
class EncoderBlock(nn.Layer):
def __init__(self, n_in, n_out, n_layers):
super(EncoderBlock, self).__init__()
n_hid = n_out // 4
self.post_gain = 1 / (n_layers**2)
self.id_path = nn.Conv2D(n_in, n_out,
1) if n_in != n_out else Identity()
self.res_path = nn.Sequential(
('relu_1', nn.ReLU()),
('conv_1', nn.Conv2D(n_in, n_hid, 3, padding=1)),
('relu_2', nn.ReLU()),
('conv_2', nn.Conv2D(n_hid, n_hid, 3, padding=1)),
('relu_3', nn.ReLU()),
('conv_3', nn.Conv2D(n_hid, n_hid, 3, padding=1)),
('relu_4', nn.ReLU()), ('conv_4', nn.Conv2D(n_hid, n_out, 1)))
def forward(self, x):
return self.id_path(x) + self.post_gain * self.res_path(x)
class Encoder(nn.Layer):
def __init__(self,
group_count=4,
n_hid=256,
n_blk_per_group=2,
input_channels=3,
vocab_size=8192):
super(Encoder, self).__init__()
self.vocab_size = vocab_size
blk_range = range(n_blk_per_group)
n_layers = group_count * n_blk_per_group
self.blocks = nn.Sequential(
('input', nn.Conv2D(input_channels, 1 * n_hid, 7, padding=3)),
('group_1',
nn.Sequential(
*[(f'block_{i + 1}',
EncoderBlock(1 * n_hid, 1 * n_hid, n_layers=n_layers))
for i in blk_range],
('pool', nn.MaxPool2D(kernel_size=2)),
)),
('group_2',
nn.Sequential(
*[(f'block_{i + 1}',
EncoderBlock(1 * n_hid if i == 0 else 2 * n_hid,
2 * n_hid,
n_layers=n_layers)) for i in blk_range],
('pool', nn.MaxPool2D(kernel_size=2)),
)),
('group_3',
nn.Sequential(
*[(f'block_{i + 1}',
EncoderBlock(2 * n_hid if i == 0 else 4 * n_hid,
4 * n_hid,
n_layers=n_layers)) for i in blk_range],
('pool', nn.MaxPool2D(kernel_size=2)),
)),
('group_4',
nn.Sequential(
*[(f'block_{i + 1}',
EncoderBlock(4 * n_hid if i == 0 else 8 * n_hid,
8 * n_hid,
n_layers=n_layers)) for i in blk_range], )),
('output',
nn.Sequential(
('relu', nn.ReLU()),
('conv', nn.Conv2D(8 * n_hid, vocab_size, 1)),
)),
)
def forward(self, x):
return self.blocks(x)
class DecoderBlock(nn.Layer):
def __init__(self, n_in, n_out, n_layers):
super(DecoderBlock, self).__init__()
n_hid = n_out // 4
self.post_gain = 1 / (n_layers**2)
self.id_path = nn.Conv2D(n_in, n_out,
1) if n_in != n_out else Identity()
self.res_path = nn.Sequential(
('relu_1', nn.ReLU()), ('conv_1', nn.Conv2D(n_in, n_hid, 1)),
('relu_2', nn.ReLU()),
('conv_2', nn.Conv2D(n_hid, n_hid, 3, padding=1)),
('relu_3', nn.ReLU()),
('conv_3', nn.Conv2D(n_hid, n_hid, 3, padding=1)),
('relu_4', nn.ReLU()),
('conv_4', nn.Conv2D(n_hid, n_out, 3, padding=1)))
def forward(self, x):
return self.id_path(x) + self.post_gain * self.res_path(x)
class Decoder(nn.Layer):
def __init__(self,
group_count=4,
n_init=128,
n_hid=256,
n_blk_per_group=2,
output_channels=3,
vocab_size=8192):
super(Decoder, self).__init__()
self.vocab_size = vocab_size
blk_range = range(n_blk_per_group)
n_layers = group_count * n_blk_per_group
self.blocks = nn.Sequential(
('input', nn.Conv2D(vocab_size, n_init, 1)),
('group_1',
nn.Sequential(
*[(f'block_{i + 1}',
DecoderBlock(n_init if i == 0 else 8 * n_hid,
8 * n_hid,
n_layers=n_layers)) for i in blk_range],
('upsample', nn.Upsample(scale_factor=2, mode='nearest')),
)),
('group_2',
nn.Sequential(
*[(f'block_{i + 1}',
DecoderBlock(8 * n_hid if i == 0 else 4 * n_hid,
4 * n_hid,
n_layers=n_layers)) for i in blk_range],
('upsample', nn.Upsample(scale_factor=2, mode='nearest')),
)),
('group_3',
nn.Sequential(
*[(f'block_{i + 1}',
DecoderBlock(4 * n_hid if i == 0 else 2 * n_hid,
2 * n_hid,
n_layers=n_layers)) for i in blk_range],
('upsample', nn.Upsample(scale_factor=2, mode='nearest')),
)),
('group_4',
nn.Sequential(
*[(f'block_{i + 1}',
DecoderBlock(2 * n_hid if i == 0 else 1 * n_hid,
1 * n_hid,
n_layers=n_layers)) for i in blk_range], )),
('output',
nn.Sequential(
('relu', nn.ReLU()),
('conv', nn.Conv2D(1 * n_hid, 2 * output_channels, 1)),
)),
)
def forward(self, x):
return self.blocks(x)
model_dict = {
'encoder': [
'Encoder',
r'https://passl.bj.bcebos.com/vision_transformers/beit/encoder.pdparams',
'encoder.pdparams'
],
'decoder': [
'Decoder',
r'https://passl.bj.bcebos.com/vision_transformers/beit/decoder.pdparams',
'decoder.pdparams'
]
}
def load_model(model_name, model_dir):
model_fn, url, file_name = model_dict[model_name]
model = eval(model_fn)()
model_path = os.path.join(model_dir, file_name)
if not os.path.exists(model_path):
if not os.path.exists(model_dir):
os.makedirs(model_dir)
#wget.download(url, out=model_path)
params = paddle.load(model_path)
model.set_state_dict(params)
model.eval()
return model
#
#
#class DalleVAE(nn.Layer):
# def __init__(self, group_count=4, n_init=128, n_hid=256, n_blk_per_group=2, input_channels=3, output_channels=3, vocab_size=8192):
# super(DiscreteVAE, self).__init__()
# self.vocab_size = vocab_size
# self.encoder = Encoder()
# self.decoder = Decoder()
# self.l1_loss = paddle.nn.loss.L1Loss(reduction='none')
#
# def encode(self, x):
# return self.encoder(x)
#
# def decode(self, z):
# return self.decoder(z)
#
#
# def logit_laplace_loss(self, x, x_stats):
# ## x [ B, 3, 256, 256 ]
# ## x_stats [ B, 6, 256, 256 ]
# # mu
# mu = x_stats[:,:3]
# #
# lnb = x_stats[:,3:]
# log_norm = -paddle.log(x * (1 - x)) - lnb - paddle.log(paddle.to_tensor(2.0))
# #print("log_norm", log_norm)
# log_compare = -self.l1_loss(paddle.log(x/(1-x)), mu) / paddle.exp(lnb)
# #print("log_compare", log_compare)
# return -(log_norm+log_compare)
#
# def gumbel_softmax(self, z_logits, temperature):
#
# def sample_gumbel(shape, eps=1e-20):
# U = paddle.fluid.layers.uniform_random(shape,min=0,max=1)
# return -paddle.log(-paddle.log(U + eps) + eps)
#
# def gumbel_softmax_sample(logits, temperature):
# y = logits + sample_gumbel(logits.shape)
# return nn.functional.softmax( y / temperature, axis=1)
#
# return gumbel_softmax_sample(z_logits, temperature)
#
#
# def forward(self, x, temperature):
# # [B, vocab_size, 32, 32]
# z_logits = self.encoder(x)
# q_y = nn.functional.softmax(z_logits, axis=1)
# log_q_y = paddle.log(q_y+1e-20)
# kl_loss = q_y*(log_q_y-paddle.log(paddle.to_tensor(1.0/self.vocab_size)))
# # to [B, 32, 32]
# kl_loss = paddle.sum(kl_loss, axis=[1])
# # to [B]
# kl_loss = paddle.mean(kl_loss, axis=[1,2])
# #print(kl_loss)
#
# z = self.gumbel_softmax(z_logits, temperature)
# x_stats = self.decoder(z)
# recon_loss = self.logit_laplace_loss(x, x_stats)
# recon_loss = paddle.mean(recon_loss, axis=[1, 2, 3])
# #print(recon_loss)
#
# return recon_loss, kl_loss
#
#
#
#def load_model(model_name, pretrained=False):
# model_fn, url, file_name = model_dict[model_name]
# model = model_fn()
#
# if pretrained:
# model_path = os.path.join('pretrained_models', file_name)
# if not os.path.isfile(model_path):
# if not os.path.exists('pretrained_models'):
# os.mkdir('pretrained_models')
# wget.download(url, out=model_path)
# params = paddle.load(model_path)
# model.set_dict(params)
#
# model.eval()
# return model
from math import sqrt
import os
import paddle
from paddle import nn, einsum
import paddle.nn.functional as F
from einops import rearrange
from .builder import BACKBONES
def top_k(logits, thres=0.5):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = paddle.topk(logits, k)
probs = paddle.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
class BasicVAE(nn.Layer):
def get_codebook_indices(self, images):
raise NotImplementedError()
def decode(self, img_seq):
raise NotImplementedError()
def get_codebook_probs(self, img_seq):
raise NotImplementedError()
def get_image_tokens_size(self):
pass
def get_image_size(self):
pass
class ResBlock(nn.Layer):
def __init__(self, chan_in, hidden_size, chan_out):
super().__init__()
self.net = nn.Sequential(
nn.Conv2D(chan_in, hidden_size, 3, padding=1), nn.ReLU(),
nn.Conv2D(hidden_size, hidden_size, 3, padding=1), nn.ReLU(),
nn.Conv2D(hidden_size, chan_out, 1))
def forward(self, x):
return self.net(x) + x
@BACKBONES.register()
class DiscreteVAE(BasicVAE):
def __init__(self,
image_size=256,
num_tokens=512,
codebook_dim=512,
num_layers=3,
hidden_dim=64,
channels=3,
smooth_l1_loss=False,
temperature=0.9,
straight_through=False,
kl_div_loss_weight=0.):
super().__init__()
# assert log2(image_size).is_integer(), 'image size must be a power of 2'
assert num_layers >= 1, 'number of layers must be greater than or equal to 1'
self.image_size = image_size
self.num_tokens = num_tokens
self.num_layers = num_layers
self.temperature = temperature
self.straight_through = straight_through
self.codebook = nn.Embedding(num_tokens, codebook_dim)
enc_layers = []
dec_layers = []
enc_in = channels
dec_in = codebook_dim
for layer_id in range(num_layers):
enc_layers.append(
nn.Sequential(
nn.Conv2D(enc_in, hidden_dim, 4, stride=2, padding=1),
nn.ReLU()))
enc_layers.append(
ResBlock(chan_in=hidden_dim,
hidden_size=hidden_dim,
chan_out=hidden_dim))
enc_in = hidden_dim
dec_layers.append(
nn.Sequential(
nn.ConvTranspose2D(dec_in,
hidden_dim,
4,
stride=2,
padding=1), nn.ReLU()))
dec_layers.append(
ResBlock(chan_in=hidden_dim,
hidden_size=hidden_dim,
chan_out=hidden_dim))
dec_in = hidden_dim
enc_layers.append(nn.Conv2D(hidden_dim, num_tokens, 1))
dec_layers.append(nn.Conv2D(hidden_dim, channels, 1))
self.encoder = nn.Sequential(*enc_layers)
self.decoder = nn.Sequential(*dec_layers)
self.loss_fn = F.smooth_l1_loss if smooth_l1_loss else F.mse_loss
self.kl_div_loss_weight = kl_div_loss_weight
def get_image_size(self):
return self.image_size
def get_image_tokens_size(self):
return self.image_size // 8
@paddle.no_grad()
@eval_decorator
def get_codebook_indices(self, images):
logits = self.forward(images, return_logits=True)
codebook_indices = logits.argmax(dim=1)
return codebook_indices
@paddle.no_grad()
@eval_decorator
def get_codebook_probs(self, images):
logits = self.forward(images, return_logits=True)
return nn.Softmax(dim=1)(logits)
def decode(self, img_seq):
image_embeds = self.codebook(img_seq)
b, n, d = image_embeds.shape
h = w = int(sqrt(n))
image_embeds = rearrange(image_embeds, 'b (h w) d -> b d h w', h=h, w=w)
images = self.decoder(image_embeds)
return images
def forward(self,
img,
return_loss=False,
return_recons=False,
return_logits=False,
temp=None):
device, num_tokens, image_size, kl_div_loss_weight = img.device, self.num_tokens, self.image_size, self.kl_div_loss_weight
assert img.shape[-1] == image_size and img.shape[
-2] == image_size, f'input must have the correct image size {image_size}'
logits = self.encoder(img)
if return_logits:
return logits # return logits for getting hard image indices for DALL-E training
temp = default(temp, self.temperature)
soft_one_hot = F.gumbel_softmax(logits,
tau=temp,
dim=1,
hard=self.straight_through)
sampled = einsum('b n h w, n d -> b d h w', soft_one_hot,
self.codebook.weight)
out = self.decoder(sampled)
if not return_loss:
return out
# reconstruction loss
recon_loss = self.loss_fn(img, out)
# kl divergence
logits = rearrange(logits, 'b n h w -> b (h w) n')
qy = F.softmax(logits, dim=-1)
log_qy = paddle.log(qy + 1e-10)
log_uniform = paddle.log(
paddle.to_tensor([1. / num_tokens], device=device))
kl_div = F.kl_div(log_uniform,
log_qy,
None,
None,
'batchmean',
log_target=True)
loss = recon_loss + (kl_div * kl_div_loss_weight)
if not return_recons:
return loss
return loss, out
@BACKBONES.register()
class Dalle_VAE(BasicVAE):
def __init__(self, image_size):
super().__init__()
self.encoder = Encoder()
self.decoder = Decoder()
self.image_size = image_size
def decode(self, img_seq):
bsz = img_seq.size()[0]
img_seq = img_seq.view(bsz, self.image_size // 8, self.image_size // 8)
z = F.one_hot(img_seq,
num_classes=self.encoder.vocab_size).permute(0, 3, 1,
2).float()
return self.decoder(z).float()
def get_codebook_indices(self, images):
z_logits = self.encoder(images)
return paddle.argmax(z_logits, axis=1)
def get_codebook_probs(self, images):
z_logits = self.encoder(images)
return nn.Softmax(dim=1)(z_logits)
def forward(self, img_seq_prob, no_process=False):
if no_process:
return self.decoder(img_seq_prob.float()).float()
else:
bsz, seq_len, num_class = img_seq_prob.size()
z = img_seq_prob.view(bsz, self.image_size // 8,
self.image_size // 8, self.encoder.vocab_size)
return self.decoder(z.permute(0, 3, 1, 2).float()).float()
class Identity(nn.Layer):
def __init__(self):
super(Identity, self).__init__()
def forward(self, inputs):
return inputs
class EncoderBlock(nn.Layer):
def __init__(self, n_in, n_out, n_layers):
super(EncoderBlock, self).__init__()
n_hid = n_out // 4
self.post_gain = 1 / (n_layers**2)
self.id_path = nn.Conv2D(n_in, n_out,
1) if n_in != n_out else Identity()
self.res_path = nn.Sequential(
('relu_1', nn.ReLU()),
('conv_1', nn.Conv2D(n_in, n_hid, 3, padding=1)),
('relu_2', nn.ReLU()),
('conv_2', nn.Conv2D(n_hid, n_hid, 3, padding=1)),
('relu_3', nn.ReLU()),
('conv_3', nn.Conv2D(n_hid, n_hid, 3, padding=1)),
('relu_4', nn.ReLU()), ('conv_4', nn.Conv2D(n_hid, n_out, 1)))
def forward(self, x):
return self.id_path(x) + self.post_gain * self.res_path(x)
class Encoder(nn.Layer):
def __init__(self,
group_count=4,
n_hid=256,
n_blk_per_group=2,
input_channels=3,
vocab_size=8192):
super(Encoder, self).__init__()
self.vocab_size = vocab_size
blk_range = range(n_blk_per_group)
n_layers = group_count * n_blk_per_group
self.blocks = nn.Sequential(
('input', nn.Conv2D(input_channels, 1 * n_hid, 7, padding=3)),
('group_1',
nn.Sequential(
*[(f'block_{i + 1}',
EncoderBlock(1 * n_hid, 1 * n_hid, n_layers=n_layers))
for i in blk_range],
('pool', nn.MaxPool2D(kernel_size=2)),
)),
('group_2',
nn.Sequential(
*[(f'block_{i + 1}',
EncoderBlock(1 * n_hid if i == 0 else 2 * n_hid,
2 * n_hid,
n_layers=n_layers)) for i in blk_range],
('pool', nn.MaxPool2D(kernel_size=2)),
)),
('group_3',
nn.Sequential(
*[(f'block_{i + 1}',
EncoderBlock(2 * n_hid if i == 0 else 4 * n_hid,
4 * n_hid,
n_layers=n_layers)) for i in blk_range],
('pool', nn.MaxPool2D(kernel_size=2)),
)),
('group_4',
nn.Sequential(
*[(f'block_{i + 1}',
EncoderBlock(4 * n_hid if i == 0 else 8 * n_hid,
8 * n_hid,
n_layers=n_layers)) for i in blk_range], )),
('output',
nn.Sequential(
('relu', nn.ReLU()),
('conv', nn.Conv2D(8 * n_hid, vocab_size, 1)),
)),
)
def forward(self, x):
return self.blocks(x)
class DecoderBlock(nn.Layer):
def __init__(self, n_in, n_out, n_layers):
super(DecoderBlock, self).__init__()
n_hid = n_out // 4
self.post_gain = 1 / (n_layers**2)
self.id_path = nn.Conv2D(n_in, n_out,
1) if n_in != n_out else Identity()
self.res_path = nn.Sequential(
('relu_1', nn.ReLU()), ('conv_1', nn.Conv2D(n_in, n_hid, 1)),
('relu_2', nn.ReLU()),
('conv_2', nn.Conv2D(n_hid, n_hid, 3, padding=1)),
('relu_3', nn.ReLU()),
('conv_3', nn.Conv2D(n_hid, n_hid, 3, padding=1)),
('relu_4', nn.ReLU()),
('conv_4', nn.Conv2D(n_hid, n_out, 3, padding=1)))
def forward(self, x):
return self.id_path(x) + self.post_gain * self.res_path(x)
class Decoder(nn.Layer):
def __init__(self,
group_count=4,
n_init=128,
n_hid=256,
n_blk_per_group=2,
output_channels=3,
vocab_size=8192):
super(Decoder, self).__init__()
self.vocab_size = vocab_size
blk_range = range(n_blk_per_group)
n_layers = group_count * n_blk_per_group
self.blocks = nn.Sequential(
('input', nn.Conv2D(vocab_size, n_init, 1)),
('group_1',
nn.Sequential(
*[(f'block_{i + 1}',
DecoderBlock(n_init if i == 0 else 8 * n_hid,
8 * n_hid,
n_layers=n_layers)) for i in blk_range],
('upsample', nn.Upsample(scale_factor=2, mode='nearest')),
)),
('group_2',
nn.Sequential(
*[(f'block_{i + 1}',
DecoderBlock(8 * n_hid if i == 0 else 4 * n_hid,
4 * n_hid,
n_layers=n_layers)) for i in blk_range],
('upsample', nn.Upsample(scale_factor=2, mode='nearest')),
)),
('group_3',
nn.Sequential(
*[(f'block_{i + 1}',
DecoderBlock(4 * n_hid if i == 0 else 2 * n_hid,
2 * n_hid,
n_layers=n_layers)) for i in blk_range],
('upsample', nn.Upsample(scale_factor=2, mode='nearest')),
)),
('group_4',
nn.Sequential(
*[(f'block_{i + 1}',
DecoderBlock(2 * n_hid if i == 0 else 1 * n_hid,
1 * n_hid,
n_layers=n_layers)) for i in blk_range], )),
('output',
nn.Sequential(
('relu', nn.ReLU()),
('conv', nn.Conv2D(1 * n_hid, 2 * output_channels, 1)),
)),
)
def forward(self, x):
return self.blocks(x)
| 33.816412 | 135 | 0.521181 | 3,084 | 24,314 | 3.852464 | 0.11284 | 0.027944 | 0.013467 | 0.02424 | 0.596583 | 0.55593 | 0.515866 | 0.492719 | 0.47698 | 0.461325 | 0 | 0.028343 | 0.35716 | 24,314 | 718 | 136 | 33.86351 | 0.731798 | 0.168874 | 0 | 0.661224 | 0 | 0 | 0.053723 | 0 | 0 | 0 | 0 | 0 | 0.004082 | 1 | 0.087755 | false | 0.008163 | 0.020408 | 0.028571 | 0.195918 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
257d2b7a3c0cb97294399826eb46725bc5e9506f | 33,921 | py | Python | Index.py | Trabalho-APC-DASH/Painel-APC | 1920c7e1a188ba350268f926c0bf69552f0ab4a2 | [
"MIT"
] | null | null | null | Index.py | Trabalho-APC-DASH/Painel-APC | 1920c7e1a188ba350268f926c0bf69552f0ab4a2 | [
"MIT"
] | null | null | null | Index.py | Trabalho-APC-DASH/Painel-APC | 1920c7e1a188ba350268f926c0bf69552f0ab4a2 | [
"MIT"
] | null | null | null | # VERSÃO FINAL - ÚNICA - V1.0
# ALTERADA EM 03/04 -- 18:30
# EXPORTAÇÕES:
import plotly.express as px
from pandas import read_excel
from dash import Dash, dcc, html, Input, Output, State
import plotly.graph_objects as go
# IMPORTAÇÃO DE BOOTSTRAP PARA FAZER SITE:
import dash_bootstrap_components as dbc
# DECLARAÇÃO DO 1º DATAFRAME:
df1 = read_excel("https://github.com/Trabalho-APC-DASH/Painel-APC/blob/main/Banco%20de%20Dados/Brasil-Exportacao_cafe_por_pais.xlsx?raw=true")
# DECLARAÇÃO DO 2º DATAFRAME:
df2 = read_excel('https://github.com/Trabalho-APC-DASH/Painel-APC/blob/main/Banco%20de%20Dados/UnidadesReceita.xlsx?raw=true')
# DECLARAÇÃO DO 3º DATAFRAME:
df3 = read_excel('https://github.com/Trabalho-APC-DASH/Painel-APC/blob/main/Banco%20de%20Dados/Preco_Medio.xlsx?raw=true')
# DECLARAÇÃO DO 4º DATAFRAME:
df4 = read_excel('https://github.com/Trabalho-APC-DASH/Painel-APC/blob/main/Banco%20de%20Dados/Paises_exportadores_cafe.xlsx?raw=true')
# INÍCIO A ORGANIZAÇÃO DE DADOS:
# =========================================================================
# DATAFRAME 1)
# ORGANIZAÇÃO DAS OPÇÕES PARA O DROPDOWN:
def funcao_unique(lista):
resultado = []
unicidade = set(lista)
for elemento in unicidade:
resultado.append(elemento)
return resultado
opcoes = funcao_unique(df1['CONTINENTE'])
opcoes.insert(0, 'Todos os Continentes')
del opcoes[1]
opcoes2 = ['ARÁBICA (Por sacas de 60kg)', 'CONILLON (Por sacas de 60kg)', 'SOLÚVEL (Por sacas de 60kg)', 'TORRADO (Por sacas de 60kg)', 'TOTAL']
# DECLARAÇÃO DE COMO O GRÁFICO IRÁ SER ORGANIZADO:
fig1 = px.bar(df1, x="CONTINENTE", y="TOTAL", color="PAÍS DESTINO", title='Compra de Café Brasileiro por País')
# ===========================================================================
# DATAFRAME 2)
# TRANSFORMAÇÃO DO DF2 PARA UMA LISTA MODIFICÁVEL:
lista = df2.values
# DECLARAÇÃO DO DATAFRAME OFICIAL DO DF2:
dfOf1 = []
# REORGANIZAÇÃO DO DF2:
for n in lista:
dfOf1 += [[n[0], n[1], 'Importação Jan/Fev2022']]
dfOf1 += [[n[0], n[3], 'Exportação Jan/Fev2022']]
dfOf1 += [[n[0], n[5], 'Importação Jan/Fev2021']]
dfOf1 += [[n[0], n[7], 'Exportação Jan/Fev2021']]
# DECLARAÇÃO DE COMO O GRÁFICO IRÁ SER ORGANIZADO:
fig2 = px.bar(dfOf1, x=0, y=1, color=2, barmode="group", title='Exportação/Importação por Receita Federal', labels={
'0': 'Unidade Da Receita Federal',
'1': 'Sacas (60kg)',
'2': 'Tipo'
})
fig2.update_layout(
paper_bgcolor='rgba(0, 0, 0, 0.2)',
font_color='white',
legend_bgcolor='rgba(0, 0, 0, 0)'
)
# REPETIÇÃO PARA CRIAR UMA LISTA PARA SER UTILIZADA NO DROPDOWN DO GRÁFICO 2:
receita_filtragem = []
for n in lista:
receita_filtragem += [n[0]]
receita_filtragem.insert(0, 'Todos')
# ============================================================================
# DATAFRAME 3)
# MEMORIZAÇÃO DAS COLUNAS DA PRIMEIRA LINHA PRESENTE NO DATAFRAME 3:
opcoes3 = []
for n in df3:
opcoes3 += [n]
# EXCLUSÃO DE DADOS DESNECESSÁRIOS PARA EXIBIÇÃO NO GRÁFICO:
del opcoes3[0]
del opcoes3[6]
# DECLARAÇÃO PRIMÁRIA DE COMO O GRÁFICO IRÁ SER ORGANIZADO:
fig3 = go.Figure()
for cafe in opcoes3:
fig3.add_trace(go.Scatter(x=df3['Mês/Ano'], y=df3[cafe],
mode='lines', name=cafe))
# ATUALIZAÇÃO DE TÍTULO E NOMEAÇÃO DA PARTE VERTICAL DO GRÁFICO E HORIZONTAL:
fig3.update_layout(title='Preço Médio do Café Brasileiro',
xaxis_title='Ano',
yaxis_title='Preço (US$)')
# INSERÇÃO DE UMA NOVA OPÇÃO PARA O DROPDOWN:
opcoes3.insert(0, 'Todos os Tipos de Café')
# ==============================================================================
# DATAFRAME 4)
# TRANSFORMAÇÃO DO DF4 PARA UMA LISTA MODIFICÁVEL:
Lista3 = df4.values
# LISTA DE TODOS OS PAÍSES DIVIDIDO POR CONTINENTES PARA SER UTILIZADO NO PASSSO MAIS ABAIXO:
Oceania = ['Estados Federados da Micronésia', 'Fiji', 'Ilhas Marshall', 'Ilhas Salomão', 'Kiribati' ,'Nauru', 'Nova Zelândia', 'Palau', 'Papua-Nova Guiné', 'Samoa', 'Tonga', 'Tuvalu', 'Vanuatu', 'Ilhas Cook']
América_do_Norte = ['Canadá', 'Estados Unidos da América', 'México']
América_Central = ['Antígua e Barbuda', 'Bahamas', 'Barbados', 'Belize', 'Costa Rica', 'Cuba', 'Dominica', 'El Salvador', 'Granada', 'Guatemala', 'Haiti', 'Honduras', 'Jamaica', 'Nicarágua', 'Panamá', 'República Dominicana', 'Santa Lúcia', 'São Cristóvão e Névis', 'São Vicente e Granadinas', 'Trindade e Tobago']
América_do_Sul = ['Argentina', 'Bolívia', 'Brasil', 'Chile', 'Colômbia', 'Equador', 'Guiana', 'Guiana Francesa', 'Paraguai', 'Peru', 'Suriname', 'Uruguai', 'Venezuela']
Europa = ['Albânia', 'Alemanha', 'Andorra', 'Áustria', 'Bélgica', 'Bielorrússia', 'Bósnia e Herzegovina', 'Bulgária', 'Cazaquistão', 'Chipre', 'Croácia', 'Dinamarca', 'Eslováquia', 'Eslovênia', 'Espanha', 'Estônia', 'Finlândia', 'França', 'Grécia', 'Hungria', 'Irlanda', 'Islândia', 'Itália', 'Letônia', 'Liechtenstein', 'Lituânia', 'Luxemburgo', 'Malta', 'Moldávia', 'Mônaco', 'Montenegro', 'Noruega', 'Países Baixos', 'Polônia', 'Portugal', 'Tchéquia', 'Macedônia do Norte', 'Inglaterra', 'Irlanda do Norte', 'Escócia', 'País de Gales', 'Romênia', 'Rússia', 'San Marino', 'Sérvia', 'Suécia', 'Suíça', 'Turquia', 'Ucrânia', 'Vaticano']
Ásia = ['Timor Leste', 'Birmânia', 'Afeganistão', 'Arábia Saudita', 'Armênia', 'Azerbaijão', 'Bahrein', 'Bangladesh', 'Brunei', 'Butão', 'Camboja', 'Cazaquistão', 'Catar', 'China', 'Chipre', 'Cingapura', 'Coreia do Norte', 'Coreia do Sul', 'Egito', 'Emirados Árabes', 'Filipinas', 'Geórgia', 'Iêmen', 'Índia', 'Indonésia', 'Irã', 'Iraque', 'Israel', 'Japão', 'Jordânia', 'Kuwait', 'Laos', 'Líbano', 'Malásia', 'Maldivas', 'Mianmar', 'Mongólia', 'Nepal', 'Omã', 'Paquistão', 'Quirguistão', 'Rússia', 'Síria', 'Sri Lanka', 'Tajiquistão', 'Tailândia', 'Timor-Leste', 'Turcomenistão', 'Turquia', 'Uzbequistão', 'Vietnã', 'Taiwan', 'República Popular da China']
África = ['África do Sul', 'Angola', 'Argélia', 'Benim', 'Botswana', 'Burquina Faso', 'Burundi', 'Camarões', 'Chade', 'Costa do Marfim', 'Djibouti', 'Egito', 'Eritreia', 'Etiópia', 'Gabão', 'Gâmbia', 'Gana', 'Guiné', 'Guiné-Bissau', 'Guiné Equatorial', 'Madagáscar', 'Cabo Verde', 'Comores', 'São Tomé e Príncipe', 'Seychelles', 'Lesoto', 'Libéria', 'Líbia', 'Malawi', 'Mali', 'Marrocos', 'Mauritânia', 'Moçambique', 'Namíbia', 'Níger', 'Nigéria', 'Quênia', 'República da África Central', 'República Democrática do Congo', 'República do Congo', 'República de Maurício', 'Ruanda', 'Senegal', 'Serra Leoa', 'Somália', 'Eswatini', 'Sudão', 'Sudão do Sul', 'Tanzânia', 'Togo', 'Tunísia', 'Uganda', 'Zâmbia', 'Zimbábue', 'República Popular do Congo']
# DECLARAÇÃO DO DATAFRAME OFICIAL DO DF4:
dfOf3 = []
# INÍCIO DE REPETIÇÃO PARA CADA ELEMENTO DA LISTA "ListaDeFiltro"
for ln in Lista3:
for cont in Oceania: # INÍCIO DE REPETIÇÃO PARA CADA ELEMENTO DA LISTA "Oceania"
if ln[1] == cont: # CASO O PAÍS DA LISTA DE FILTRO SE ENCONTRE NA DA OCEANIA, SEU CONTINENTE SERÁ OCEANIA.
dfOf3 += [[ln[0], ln[1], ln[2],'Oceania']]
for cont in América_do_Norte: # INÍCIO DE REPETIÇÃO PARA CADA ELEMENTO DA LISTA "América_Do_Norte"
if ln[1] == cont: # CASO O PAÍS DA LISTA DE FILTRO SE ENCONTRE NA AMÉRICA DO NORTE, SEU CONTINENTE SERÁ AMÉRICA DO NORTE.
dfOf3 += [[ln[0], ln[1], ln[2], 'América do Norte']]
for cont in América_Central: # MESMA LÓGICA DOS PASSOS ANTERIORES...
if ln[1] == cont:
dfOf3 += [[ln[0], ln[1], ln[2], 'América Central']]
for cont in América_do_Sul:
if ln[1] == cont:
dfOf3 += [[ln[0], ln[1], ln[2], 'América do Sul']]
for cont in Europa:
if ln[1] == cont:
dfOf3 += [[ln[0], ln[1], ln[2], 'Europa']]
for cont in Ásia:
if ln[1] == cont:
dfOf3 += [[ln[0], ln[1], ln[2], 'Ásia']]
for cont in África:
if ln[1] == cont:
dfOf3 += [[ln[0], ln[1], ln[2], 'África']]
# DECLARAÇÃO DE COMO O GRÁFICO IRÁ SER ORGANIZADO:
fig4 = px.scatter_geo(dfOf3, # Definição do DataFrame a ser utilizado
title= 'Produção de Café Anual (Toneladas)',
locations= 0, # As localizações se darão da coluna 0 do DataFrame, que são os ID's
projection= 'orthographic', # Projeção do mapa no tipo Ortográfica
opacity= 1, # Definição da opacidade das bolinhas no mapa
hover_name= 1, # Dado de Nome, que foi definido pela coluna 1 do DataFrame, que é os Países
color= 3, # Definição da separação de cores, definida pela coluna 3 do DataFrame, que são os continentes
hover_data=[2], # Definição de Acrescimo de informação, neste caso a coluna 2 esta sendo acrescentada nos dados do mapa, que são as Produções
labels={'3':'Continente', '0':'País ID', "2":'Produção'} # Renomeação dos tópicos no mapa, para que seja melhor interpretado
)
fig4.update_geos(
landcolor="#06832F",
oceancolor="#1E8AC9",
showocean=True,
lakecolor="#5FC4D0",
)
fig4.update_layout(
paper_bgcolor='rgba(0, 0, 0, 0.2)',
font_color='white',
legend_bgcolor='rgba(0, 0, 0, 0)'
)
# =======================================================================================
# INÍCIO PARA EXECUÇÃO DO LAYOUT E INSERÇÃO DOS GRÁFICOS:
# CONEXÃO DO APP COM O FRAMEWORK BOOTSTRAP:
app = Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])
# ---------------------------------------------------------------------------------------
# CRIAÇÃO EM PARTES DO SITE:
# A) BARRA LATERAL:
# DEFINIÇÕES DE ESTILO PARA A BARRA LATERAL
ESTILO_BARRA_LATERAL = {
"position": "fixed",
"top": 0,
"left": 0,
"bottom": 0,
"width": "16rem",
"padding": "2rem 1rem",
"background-color": "rgba(221, 162, 99, 0)",
}
# INTENS A SEREM UTILIZADO NA BARRA LATERAL:
items = [
dbc.DropdownMenuItem("Gráfico 1", n_clicks=0, id='Drop1'),
dbc.DropdownMenuItem(divider=True),
dbc.DropdownMenuItem("Gŕafico 2", n_clicks=0, id='Drop2'),
dbc.DropdownMenuItem(divider=True),
dbc.DropdownMenuItem("Gráfico 3", n_clicks=0, id='Drop3'),
]
# DEFINIÇÃO DA BARRA LARETAL:
barralateral = html.Div(
[ # TEXTOS:
html.H2("Café☕", className="display-4", style={'color': 'white'}),
html.Hr(),
html.P(
'Confira o movimento de mercado do Café Brasileiro', className="lead", style={'color': 'white'}
),
# ÁREA DE NAVEGAÇÃO:
dbc.Nav(
[
# INSERÇÃO DO DROOPDOWN:
dbc.DropdownMenu(
label="FIltros", children=items, direction="right", color='rgba(255, 101, 0)', style={'border-color': '#a5a5a500'}
),
html.Hr(),
html.P('INFO:', style={'color': 'white', 'margin-top': '2vh'}),
# DEMAIS OPÇÕES (QUE SERÃO OS INFO DE CADA GRÁFICO):
dbc.NavLink("Exportações", id='menu1', style={'cursor': 'pointer'}),
dbc.NavLink("Compra", id='menu2', style={'cursor': 'pointer'}),
dbc.NavLink("Preços", id='menu3', style={'cursor': 'pointer'}),
dbc.NavLink("Produções", id='menu4', style={'cursor': 'pointer'}),
# ÁREA PARA ACESSO AOS DESENVOLVEDORES:
html.P('DESENVOLVEDORES:', style={'color': 'white', 'margin-top': '3vh'}),
dbc.NavLink('Acesse Aqui', id='menu5', style={'cursor': 'pointer'})
],
vertical=True,
pills=True,
),
],
style=ESTILO_BARRA_LATERAL,
)
# -----------------------------------------------------------------------------------------
# DECLARAÇÃO EM PARTES DO SITE:
# B) MODAIS:
# -=-=-=-=-=-=-=-=-=-=-
# O QUE SÃO OS MODAIS?
# OS MODAIS SÃO AS JANELINHAS QUE ABREM QUANDO CLICAMOS NOS BOTÕES.
# -=-=-=-=-=-=-=-=-=-=-
# DECLARAÇÃO DO MODAL DA 1º OPÇÃO DA CAIXA DE SELEÇÃO DA BARRA LATERAL:
modalPrim1 = html.Div(
[
dbc.Modal(
[ # TÍTULO DO MODAL:
dbc.ModalHeader(dbc.ModalTitle("Filtro: Primeiro Gráfico (Barras)", style={'color': 'white'})),
# CORPO DO MODAL:
dbc.ModalBody([
# O CORPO SERÁ UM 'P'ARÁGRAFO E DOIS DROPDOWN'S DE FILTRO DO 1º GRÁFICO:
html.P('Selecione o continente a ser Filtrado:', style={'color': 'white'}),
# 1º DROPDOWN:
dcc.Dropdown(opcoes, value='Todos os Continentes', id='Filtro_Continentes', className='Dropdown1', style={
'background-color': '#c9c9c9',
'border-radius': '14px',
'border-color': 'transparent',
'cursor': 'pointer'
}),
# MAIS UM 'P'ARÁGRAFO:
html.P('Selecione o Tipo de Café a ser Filtrado:', style={'margin-top': '2vh', 'color': 'white'}),
# 2º DROPDOWN:
dcc.Dropdown(opcoes2, value='TOTAL', id='Filtro_Tipo', className='Dropdown2', style={
'background-color': '#c9c9c9',
'border-radius': '14px',
'border-color': 'transparent',
'margin-bottom': '1vh',
'margin-top': '1vh',
'cursor': 'pointer'
}),
]),
# RODAPÉ DO MODAL:
dbc.ModalFooter(
# TEREMOS UM BOTÃO EM SEU RODAPÉ:
dbc.Button(
"Fechar", id="closePrim1", className="ms-auto", n_clicks=0, color='dark', outline=True,
)
),
],
id="modalPrim1",
is_open=False,
size='lg'
),
]
)
# DECLARAÇÃO DO MODAL DA 2º OPÇÃO DA CAIXA DE SELEÇÃO DA BARRA LATERAL:
modalPrim2 = html.Div(
[
dbc.Modal(
[
# TITULO DO MODAL:
dbc.ModalHeader(dbc.ModalTitle("Filtro: Segundo Gráfico (Barras em Grupos)", style={'color': 'white'})),
# CORPO DO MODAL:
dbc.ModalBody([
# O CORPO SERÁ UM 'P'ARÁGRAFO E O DROPDOWN DE FILTRO DO 2º GRÁFICO:
html.P('Selecione a Localização da Receita Federal a Ser filtrada:', style={'color': 'white'}),
# DROPDOWN:
dcc.Dropdown(receita_filtragem, value='Todos', id='filtro4', className='Dropdown4', style={
'background-color': '#c9c9c9',
'border-radius': '14px',
'border-color': 'transparent',
'margin-bottom': '1vh',
'cursor': 'pointer'}),
]),
# RODAPÉ DO MODAL:
dbc.ModalFooter(
# TEREMOS UM BOTÃO EM SEU RODAPÉ:
dbc.Button(
"Fechar", id="closePrim2", className="ms-auto", n_clicks=0, color='dark', outline=True
)
),
],
id="modalPrim2",
is_open=False,
size='lg'
),
]
)
# DECLARAÇÃO DO MODAL DA 3º OPÇÃO DA CAIXA DE SELEÇÃO DA BARRA LATERAL:
modalPrim3 = html.Div(
[
dbc.Modal(
[
# TITULO DO MODAL:
dbc.ModalHeader(dbc.ModalTitle("Filtro: Terceiro Gráfico (Linhas)", style={'color': 'white'})),
# CORPO DO MODAL:
dbc.ModalBody([
# O CORPO SERÁ UM 'P'ARÁGRAFO E O DROPDOWN DE FILTRO DO 3º GRÁFICO:
html.P('Selecione o Tipo de Café a ser filtrado:', style={'color': 'white'}),
# DROPDOWN:
dcc.Dropdown(opcoes3, value='Todos os Tipos de Café', id='filtro3', className='Dropdown3', style={
'background-color': '#c9c9c9',
'border-radius': '14px',
'border-color': 'transparent',
'margin-bottom': '1vh',
'cursor': 'pointer'}),
]),
# RODAPÉ DO MODAL:
dbc.ModalFooter(
# TEREMOS UM BOTÃO EM SEU RODAPÉ:
dbc.Button(
"Fechar", id="closePrim3", className="ms-auto", n_clicks=0, color='dark', outline=True
)
),
],
id="modalPrim3",
is_open=False,
size='lg'
),
]
)
# DECLARAÇÃO DO 1º MODAL:
modal1 = html.Div(
[
dbc.Modal(
[
# TÍTULO DO MODAL:
dbc.ModalHeader(dbc.ModalTitle("Compra de Café Brasileiro", style={'color': 'white'})),
# CORPO DO MODAL:
dbc.ModalBody("Gráfico em barras, representa a quantidade exportada de café brasileiro entre os principais países compradores do produto", style={'color': 'white'}),
# RODAPÉ DO MODAL:
dbc.ModalFooter(
dbc.Button(
"Fechar", id="close1", className="ms-auto", n_clicks=0, color='dark', outline=True
)
),
],
id="modal1",
is_open=False,
size='lg',
),
]
)
# DECLARAÇÃO DO 2º MODAL:
modal2 = html.Div(
[
dbc.Modal(
[ # TÍTULO DO MODAL:
dbc.ModalHeader(dbc.ModalTitle("Importação e Exportação por Receita Federal", style={'color': 'white'})),
# CORPO DO MODAL:
dbc.ModalBody("Dividido entre as receitas federais, este gráfico de barras, divididos em grupos, relata a Exportação e Importação de café.", style={'color': 'white'}),
# RODAPÉ DO MODAL:
dbc.ModalFooter(
# TEREMOS UM BOTÃO EM SEU RODAPÉ:
dbc.Button(
"Fechar", id="close2", className="ms-auto", n_clicks=0, color='dark', outline=True
)
),
],
id="modal2",
is_open=False,
size='lg'
),
]
)
# DECLARAÇÃO DO 3º MODAL:
modal3 = html.Div(
[
dbc.Modal(
[ # TÍTULO DO MODAL:
dbc.ModalHeader(dbc.ModalTitle("Preço Médio do Café Brasileiro", style={'color': 'white'})),
# CORPO DO MODAL:
dbc.ModalBody("Preço médio calculado mensalmente do café brasileiro, estão representadas neste gráfico de Linhas. (Valores em Dólar US$).", style={'color': 'white'}),
# RODAPÉ DO MODAL:
dbc.ModalFooter(
# TEREMOS UM BOTÃO EM SEU RODAPÉ:
dbc.Button(
"Fechar", id="close3", className="ms-auto", n_clicks=0, color='dark', outline=True
)
),
],
id="modal3",
is_open=False,
size='lg'
),
]
)
# DECLARAÇÃO DO 4º MODAL:
modal4 = html.Div(
[
dbc.Modal(
[ # TÍTULO DO MODAL:
dbc.ModalHeader(dbc.ModalTitle("Produção de Café entre Principais Países", style={'color': 'white'})),
# CORPO DO MODAL:
dbc.ModalBody("Os dados de produção do mapa esta localizada em cada ponto de seu local, para navegar entre eles, gire o planeta pressionando e arrastando o mouse.", style={'color': 'white'}),
# RODAPÉ DO MODAL:
dbc.ModalFooter(
# TEREMOS UM BOTÃO EM SEU RODAPÉ:
dbc.Button(
"Fechar", id="close4", className="ms-auto", n_clicks=0, color='dark', outline=True
)
),
],
id="modal4",
is_open=False,
size='lg'
),
]
)
# DECLARAÇÃO DO MODAL DO BOTÃO DE DESENVOLVEDORES:
modalDev = html.Div(
[
dbc.Modal(
[ # TÍTULO DO MODAL:
dbc.ModalHeader(dbc.ModalTitle("Desenvolvedores:", style={'color': 'white'})),
# CORPO DO MODAL:
dbc.ModalBody([
# LISTA DOS INTEGRANTES:
html.Ul([
html.Li('Daniel Rodrigues da Rocha - 211061583', style={'color': 'white'}),
html.Li('Daniel Nunes Duarte - 211062910', style={'color': 'white'}),
html.Li('Dannyeclisson Rodrigo Martins da Costa - 211061592', style={'color': 'white'}),
html.Li('Julia Stefanie Santos Mendonca - 211039564', style={'color': 'white'}),
html.Li('Jesus Gabriel Carvalho Ventura - 211062956', style={'color': 'white'}),
html.Li('Igor de Souza Justino - 211061897', style={'color': 'white'}),
html.Li('Gabriel Fenelon Rocha Goncalves - 211061743', style={'color': 'white'}),
html.Li('Queren Hapuque Pereira Torres - 190094711', style={'color': 'white'}),
html.Li('Gustavo Lima Menezes - 211062938', style={'color': 'white'})
])
]),
# RODAPÉ DO MODAL:
dbc.ModalFooter(
# TEREMOS UM BOTÃO EM SEU RODAPÉ:
dbc.Button(
"Fechar", id="closeDev", className="ms-auto", n_clicks=0, color='dark', outline=True
)
),
],
id="modalDev",
is_open=False,
size='xl'
),
]
)
#-----------------------------------------------------------------------------------------
# DECLARAÇÃO EM PARTES DO SITE:
# C) GRÁFICOS:
# DECLARAÇÃO DO DCC DO 1º GRÁFICO:
grafico1 = [
dcc.Graph(
id='Grafico_dados',
figure=fig1
)
]
# DECLARAÇÃO DO DCC DO 2º GRÁFICO:
grafico2 = [
dcc.Graph(
id='Grafico_dados2',
figure=fig2
)
]
# DECLARAÇÃO DO DCC DO 3º GRÁFICO:
grafico3 = [
dcc.Graph(
id='Grafico_dados3',
figure=fig3
),
]
# DECLARAÇÃO DO DCC DO 4º GRÁFICO:
grafico4 = [
dcc.Graph(
id='Grafico_dados4',
figure=fig4
)
]
# -----------------------------------------------------------------------------------
# DECLARAÇÃO EM PARTES DO SITE:
# D) LINHAS DO SITE:
# ORGANIZAÇÃO EM LINHAS DO SITE, NESTE CASO DA LINHA 1:
Conteudo_Linha1 = [
# A LINHA 1 SERÁ COMPOSTA PELOS GRÁFICOS "grafico1" E "grafico2", QUE SÃO VARIÁVEIS DECLARADAS LOGO ACIMA:
dbc.Col(html.Div(grafico1), width=5),
dbc.Col(html.Div(grafico2), width=5),
]
# ORGANIZAÇÃO EM LINHAS DO SITE, NESTE CASO DA LINHA 2:
Conteudo_Linha2 = [
# A LINHA 2 SERÁ COMPOSTA PELOS GRÁFICOS "grafico3" E "grafico4", QUE SÃO VARIÁVEIS DECLARADAS LOGO ACIMA:
dbc.Col(html.Div(grafico3), width=5),
dbc.Col(html.Div(grafico4), width=5),
]
# --------------------------------------------------------------------------------------
# DECLARAÇÃO FINAL DO SITE:
# E) LAYOUT:
# DECLARAÇÃO DE COMO FICARÁ O LAYOUT:
app.layout = html.Div(className='Tudo', id='Tudo', children=[
html.Div(className='Base', children= [
# DIV PARA A PRIMEIRA LINHA:
html.Div(className='PrimeiraLinha' , children=[
# A PRIMEIRA LINHA TERÁ O CONTEÚDO DA VARIÁVEL 'Conteudo_Linha1':
dbc.Row(
Conteudo_Linha1,
justify="end",
style={'margin-right': '2vw'}
)
]),
# DIV PARA A SEGUNDA LINHA:
html.Div(className='SegundaLinha', children=[
# A SEGUNDA LINHA TERÁ O CONTEÚDO DA VARIÁVEL 'Conteudo_Linha2':
dbc.Row(
Conteudo_Linha2,
justify="end",
style={'margin-right': '2vw'}
),
# DIV PARA A IMAGEM DA LOGO UNB NO FINAL DA PÁGINA:
html.Div([
html.Img(src='./assets/logo.png', id='ImagemId', width=200, className='ImagemClass'),
html.P('Desenvolvido por Alunos da Universidade de Brasília - FGA', id='textofinal', className='textofinalClass', style={'font-weight': 'bold'})
], className='finalClass', style={'margin-top': '4vh'})
])
# INCLUSÃO DAS VARIÁVEIS CRIADAS ACIMA:
]), barralateral, modal1, modal2, modal3, modal4, modalPrim1, modalPrim2, modalPrim3, modalDev])
# =====================================================================================================================
# DEFINIÇÃO DE FUNÇÃO:
# DEFINIÇÃO DE FUNÇÃO PARA FILTRAGEM QUE IRÁ SUBSTITUIR A FUNÇÃO 'LOC' DO PANDAS:
def filtragem(dataframe, pesquisa, coluna):
# ARGUMENTOS: DATAFRAME A SER FILTRADO, REFERÊNCIA DO QUE SERÁ PESQUISADO NOS DADOS E SE HOUVE COLUNAS ESPECIFICADAS PELO USUÁRIO.
Filtro = []
# CASO O USUÁRIO NÃO ESPECIFIQUE UMA COLUNA ESPECÍFICA:
if coluna == None:
# PERCORRER O 'dataframe' INSERIDO NO ARGUMENTO:
for linha in dataframe:
# CASO NA LINHA 0 ACHE O VALOR 'pesquisa' ENTREGUE NO ARGUMENTO:
if linha[0] == pesquisa:
# ADICIONA COLUNA 0, 1, 2, 3, 4, 5, 6 DA LINHA PERCORRIDA À VARIÁVEL 'Filtro':
Filtro += [[linha[0], linha[1], linha[2], linha[3], linha[4], linha[5], linha[6]]]
# CONDIÇÃO RESERVADA PARA O CALLBACK DO 2º GRÁFICO:
elif coluna == 3:
# PERCORRER O 'dataframe' INSERIDO NO ARGUMENTO:
for linha in dataframe:
# CASO NA LINHA 0 ACHE O VALOR 'pesquisa' ENTREGUE NO ARGUMENTO:
if linha[0] == pesquisa:
# ADICIONA COLUNA 0, 1, 2 DA LINHA PERCORRIDA À VARIÁVEL 'Filtro':
Filtro += [[linha[0], linha[1], linha[2]]]
# CASO O USUÁRIO, TAMBÉM, ESPECIFIQUE A COLUNA A SER FILTRADA:
else:
referencia = 2
# PERCORRER OS ELEMENTOS DENTRO DA LISTA 'opcoes2' (LINHA 49)):
for alternativa in opcoes2:
# CASO A COLUNA DE ESCOLHA DO USUÁRIO SEJA IGUAL AO ELEMENTO PERCORRIDO DA REPETIÇÃO ANTERIOR:
if str(coluna) == str(alternativa):
# PERCORRER AS LINHAS DO 'dataframe' INSERIDO NO ARGUMENTO:
for linha in dataframe:
# CASO NA LINHA 0 ACHE O VALOR 'pesquisa' ENTREGUE NO ARGUMENTO:
if linha[0] == pesquisa:
# ADICIONA COLUNA 0, 1 E A COLUNA DO VALOR DE 'referencia' DO MOMENTO, DA LINHA PERCORRIDA À VARIÁVEL 'Filtro':
Filtro += [[linha[0], linha[1], linha[referencia]]]
# CASO A COLUNA DO USUÁRIO NÃO BATA COM O ELEMENTO PERCORRIDO PELA REPTIÇÃO DA LINHA 706, REFERENCIA RECEBE +1:
referencia += 1
return Filtro
# =====================================================================================================================
# INICIAÇÃO AOS CALLBACKS:
# CALLBACK PARA O GRÁFICO 1 (EM BARRAS):
@app.callback(
Output('Grafico_dados', 'figure'),
Input('Filtro_Tipo', 'value'),
Input('Filtro_Continentes', 'value')
)
def update_de_dash(tipo, continente):
dfFl1 = df1.values
if tipo == 'TOTAL':
if continente == 'Todos os Continentes':
fig1 = px.bar(df1, x="CONTINENTE", y="TOTAL", color="PAÍS DESTINO", title='Compra de Café Brasileiro por País por Continente')
else:
filtro = filtragem(dfFl1, str(continente), None)
fig1 = px.bar(filtro, x=0, y=6, color=1, title=f'Compra de Café Brasileiro ({continente})', labels={'0': 'CONTINENTE', '6': 'TOTAL', '1': 'PAÍS DESTINO'})
else:
if continente == 'Todos os Continentes':
fig1 = px.bar(df1, x="CONTINENTE", y=str(tipo), color="PAÍS DESTINO", title=f'Compra de Café {tipo} Brasileiro por Continente')
else:
filtro = filtragem(dfFl1, str(continente), str(tipo))
fig1 = px.bar(filtro, x=0, y=2, color=1, title=f'Compra de Café {tipo} Brasileiro ({continente})', labels={'0': 'CONTINENTE', '1': 'PAÍS DESTINO', '2': tipo})
fig1.update_layout(
paper_bgcolor='rgba(0, 0, 0, 0.2)',
font_color='white',
legend_bgcolor='rgba(0, 0, 0, 0)'
)
return fig1
# ======================================================================================================================
# CALLBACK PARA O GRÁFICO 2 (EM BARRAS POR GRUPO):
@app.callback(
Output('Grafico_dados2', 'figure'),
Input('filtro4', 'value')
)
def UpdatedeDash(value):
if value == 'Todos':
fig2 = px.bar(dfOf1, x=0, y=1, color=2, barmode="group", title='Exportação/Importação por Receita Federal', labels={
'0': 'Unidade Da Receita Federal',
'1': 'Sacas (60kg)',
'2': 'Tipo'
})
else:
fig2filtrada = filtragem(dfOf1, str(value), 3)
fig2 = px.bar(fig2filtrada, x=0, y=1, color=2, barmode="group", title=f'Exportação/Importação da Receita Federal ({value})', labels={
'0': value,
'1': 'Sacas (60kg)',
'2': 'Tipo'
})
fig2.update_layout(
paper_bgcolor='rgba(0, 0, 0, 0.2)',
font_color='white',
legend_bgcolor='rgba(0, 0, 0, 0)'
)
return fig2
# ======================================================================================================================
# CALLBACK PARA O GRÁFICO 3 (LINHAS):
@app.callback(
Output('Grafico_dados3', 'figure'),
Input('filtro3', 'value')
)
def UpdateDeDash1(value):
if value == 'Todos os Tipos de Café':
fig3 = go.Figure()
fig3.add_trace(go.Scatter(x=df3['Mês/Ano'], y=df3['Conillon'],
mode='lines',
name='Conillon'))
fig3.add_trace(go.Scatter(x=df3['Mês/Ano'], y=df3['Arábica'],
mode='lines',
name='Arábica'))
fig3.add_trace(go.Scatter(x=df3['Mês/Ano'], y=df3['Total Café Verde'],
mode='lines', name='Total (Café Verde)'))
fig3.add_trace(go.Scatter(x=df3['Mês/Ano'], y=df3['Torrado'],
mode='lines', name='Torrado'))
fig3.add_trace(go.Scatter(x=df3['Mês/Ano'], y=df3['Solúvel'],
mode='lines', name='Solúvel'))
fig3.add_trace(go.Scatter(x=df3['Mês/Ano'], y=df3['Total Industrializado'],
mode='lines', name='Total (Industrializado)'))
fig3.update_layout(title='Preço Médio do Café Brasileiro',
xaxis_title='Ano',
yaxis_title='Preço Médio (US$)')
else:
fig3 = px.line(df3, x='Mês/Ano', y=str(value), title=f'Preço Médio ({value}) Brasileiro', labels={ str(value) : f'Preço Médio (US$) - {value}'})
fig3.update_layout(
paper_bgcolor='rgba(0, 0, 0, 0.2)',
font_color='white',
legend_bgcolor='rgba(0, 0, 0, 0)'
)
return fig3
# ======================================================================================================================
# CALLBACK PARA OS MODAIS:
# PARA O 1º MODAL DO CAIXA DE SELEÇÃO DA BARRA LATERAL:
@app.callback(
Output("modalPrim1", "is_open"),
[Input("Drop1", "n_clicks"), Input("closePrim1", "n_clicks")],
[State("modalPrim1", "is_open")],
)
def ModalLat1(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
# PARA O 2º MODAL DO CAIXA DE SELEÇÃO DA BARRA LATERAL:
@app.callback(
Output("modalPrim2", "is_open"),
[Input("Drop2", "n_clicks"), Input("closePrim2", "n_clicks")],
[State("modalPrim2", "is_open")],
)
def ModalLat2(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
# PARA O 3º MODAL DO CAIXA DE SELEÇÃO DA BARRA LATERAL:
@app.callback(
Output("modalPrim3", "is_open"),
[Input("Drop3", "n_clicks"), Input("closePrim3", "n_clicks")],
[State("modalPrim3", "is_open")],
)
def ModalLat3(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
# PARA O MODAL 1 DE INFO:
@app.callback(
Output("modal1", "is_open"),
[Input("menu1", "n_clicks"), Input("close1", "n_clicks")],
[State("modal1", "is_open")],
)
def Modal1(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
# PARA O MODAL 2 DE INFO:
@app.callback(
Output("modal2", "is_open"),
[Input("menu2", "n_clicks"), Input("close2", "n_clicks")],
[State("modal2", "is_open")],
)
def Modal2(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
# PARA O MODAL 3 DE INFO:
@app.callback(
Output("modal3", "is_open"),
[Input("menu3", "n_clicks"), Input("close3", "n_clicks")],
[State("modal3", "is_open")],
)
def Modal3(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
# PARA O MODAL 4 DE INFO:
@app.callback(
Output("modal4", "is_open"),
[Input("menu4", "n_clicks"), Input("close4", "n_clicks")],
[State("modal4", "is_open")],
)
def Modal4(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
# PARA O MODAL DO BOTÃO DE DESENVOLVEDORES:
@app.callback(
Output("modalDev", "is_open"),
[Input("menu5", "n_clicks"), Input("closeDev", "n_clicks")],
[State("modalDev", "is_open")],
)
def ModalDev(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
if __name__ == '__main__':
app.run_server(debug=True) | 35.743941 | 745 | 0.538811 | 3,799 | 33,921 | 4.764675 | 0.224796 | 0.015911 | 0.023203 | 0.007182 | 0.417712 | 0.371416 | 0.3544 | 0.33407 | 0.298823 | 0.283355 | 0 | 0.027973 | 0.286519 | 33,921 | 949 | 746 | 35.743941 | 0.719899 | 0.236609 | 0 | 0.372792 | 0 | 0.012367 | 0.298456 | 0.00245 | 0 | 0 | 0 | 0.001054 | 0 | 1 | 0.022968 | false | 0 | 0.021201 | 0 | 0.081272 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
257e22ee1110fa746c97def96ea06dd14c1ece1b | 13,035 | py | Python | py2many/inference.py | nsauzede/py2many | 7a4c8d0bd200c287281fc397bedafd755a44fa64 | [
"MIT"
] | 2 | 2021-05-13T11:44:33.000Z | 2021-05-14T00:37:26.000Z | py2many/inference.py | nsauzede/py2many | 7a4c8d0bd200c287281fc397bedafd755a44fa64 | [
"MIT"
] | null | null | null | py2many/inference.py | nsauzede/py2many | 7a4c8d0bd200c287281fc397bedafd755a44fa64 | [
"MIT"
] | null | null | null | import ast
from ctypes import c_int8, c_int16, c_int32, c_int64
from ctypes import c_uint8, c_uint16, c_uint32, c_uint64
from dataclasses import dataclass
from typing import Optional
from py2many.analysis import get_id
from py2many.clike import CLikeTranspiler
from py2many.tracer import is_enum
@dataclass
class InferMeta:
has_fixed_width_ints: bool
def infer_types(node) -> InferMeta:
visitor = InferTypesTransformer()
visitor.visit(node)
return InferMeta(visitor.has_fixed_width_ints)
def get_inferred_type(node):
if isinstance(node, ast.Name):
if not hasattr(node, "scopes"):
return None
definition = node.scopes.find(get_id(node))
# Prevent infinite recursion
if definition != node and definition is not None:
return get_inferred_type(definition)
elif isinstance(node, ast.Constant) or isinstance(node, ast.NameConstant):
return InferTypesTransformer._infer_primitive(node.value)
if hasattr(node, "annotation"):
return node.annotation
return None
def is_reference(arg):
annotation_has_ref = hasattr(arg, "annotation") and isinstance(
arg.annotation, ast.Subscript
)
if annotation_has_ref:
return True
inferred = get_inferred_type(arg)
annotation_has_ref = hasattr(inferred, "id") and isinstance(
inferred.id, ast.Subscript
)
return annotation_has_ref
class InferTypesTransformer(ast.NodeTransformer):
"""
Tries to infer types
"""
TYPE_DICT = {int: "int", float: "float", str: "str", bool: "bool"}
FIXED_WIDTH_INTS = {
bool,
c_int8,
c_int16,
c_int32,
c_int64,
c_uint8,
c_uint16,
c_uint32,
c_uint64,
}
FIXED_WIDTH_INTS_NAME_LIST = [
"bool",
"c_int8",
"c_int16",
"c_int32",
"c_int64",
"c_uint8",
"c_uint16",
"c_uint32",
"c_uint64",
]
FIXED_WIDTH_INTS_NAME = set(FIXED_WIDTH_INTS_NAME_LIST)
def __init__(self):
self.handling_annotation = False
self.has_fixed_width_ints = False
# TODO: remove this and make the methods into classmethods
self._clike = CLikeTranspiler()
@staticmethod
def _infer_primitive(value) -> Optional[ast.AST]:
t = type(value)
annotation = None
if t in InferTypesTransformer.TYPE_DICT:
annotation = ast.Name(id=InferTypesTransformer.TYPE_DICT[t])
elif t in InferTypesTransformer.FIXED_WIDTH_INTS:
annotation = ast.Name(id=str(t))
elif t != type(None):
raise NotImplementedError(f"{t} not found in TYPE_DICT")
return annotation
def visit_NameConstant(self, node):
annotation = self._infer_primitive(node.value)
if annotation is not None:
node.annotation = annotation
self.generic_visit(node)
return node
def visit_Name(self, node):
annotation = get_inferred_type(node)
if annotation is not None:
node.annotation = annotation
return node
def visit_Constant(self, node):
return self.visit_NameConstant(node)
@staticmethod
def _annotate(node, typename: str):
# ast.parse produces a Module object that needs to be destructured
type_annotation = ast.parse(typename).body[0].value
node.annotation = type_annotation
def visit_List(self, node):
self.generic_visit(node)
if len(node.elts) > 0:
elements = [self.visit(e) for e in node.elts]
if getattr(node, "is_annotation", False):
return node
else:
elt_types = set([get_id(get_inferred_type(e)) for e in elements])
if len(elt_types) == 1 and hasattr(elements[0], "annotation"):
elt_type = get_id(elements[0].annotation)
self._annotate(node, f"List[{elt_type}]")
else:
if not hasattr(node, "annotation"):
node.annotation = ast.Name(id="List")
return node
def visit_Set(self, node):
self.generic_visit(node)
if len(node.elts) > 0:
elements = [self.visit(e) for e in node.elts]
elt_types = set([get_id(get_inferred_type(e)) for e in elements])
if len(elt_types) == 1:
elt_type = get_id(elements[0].annotation)
self._annotate(node, f"Set[{elt_type}]")
else:
if not hasattr(node, "annotation"):
node.annotation = ast.Name(id="Set")
return node
def visit_Dict(self, node):
self.generic_visit(node)
if len(node.keys) > 0:
def typename(e):
get_inferred_type(e) # populates e.annotation
return self._clike._generic_typename_from_annotation(e)
key_types = set([typename(e) for e in node.keys])
only_key_type = next(iter(key_types))
if len(key_types) == 1:
key_type = only_key_type
else:
key_type = "Any"
value_types = set([typename(e) for e in node.values])
only_value_type = next(iter(value_types))
if len(value_types) == 1:
value_type = only_value_type
else:
value_type = "Any"
self._annotate(node, f"Dict[{key_type}, {value_type}]")
else:
if not hasattr(node, "annotation"):
node.annotation = ast.Name(id="Dict")
return node
def visit_Assign(self, node: ast.Assign) -> ast.AST:
self.generic_visit(node)
target = node.targets[0]
annotation = get_inferred_type(node.value)
if annotation is not None:
target.annotation = annotation
return node
def visit_AnnAssign(self, node: ast.AnnAssign) -> ast.AST:
self.generic_visit(node)
node.target.annotation = node.annotation
if get_id(node.annotation) in self.FIXED_WIDTH_INTS_NAME:
self.has_fixed_width_ints = True
return node
def visit_AugAssign(self, node: ast.AugAssign) -> ast.AST:
self.generic_visit(node)
target = node.target
annotation = get_inferred_type(target)
if hasattr(node.value, "annotation") and not annotation:
target.annotation = node.value.annotation
else:
target.annotation = annotation
return node
def visit_Compare(self, node):
self.generic_visit(node)
node.annotation = ast.Name(id="bool")
return node
def visit_Return(self, node):
self.generic_visit(node)
new_type_str = (
get_id(node.value.annotation) if hasattr(node.value, "annotation") else None
)
if new_type_str is None:
return node
for scope in node.scopes:
type_str = None
if isinstance(scope, ast.FunctionDef):
type_str = get_id(scope.returns)
if type_str is not None:
if new_type_str != type_str:
type_str = f"Union[{type_str},{new_type_str}]"
scope.returns.id = type_str
else:
# Do not overwrite source annotation with inferred
if scope.returns is None:
scope.returns = ast.Name(id=new_type_str)
return node
def visit_UnaryOp(self, node):
self.generic_visit(node)
if isinstance(node.operand, ast.Name):
operand = node.scopes.find(get_id(node.operand))
else:
operand = node.operand
if hasattr(operand, "annotation"):
node.annotation = operand.annotation
return node
def _handle_overflow(self, op, left_id, right_id):
widening_op = isinstance(op, ast.Add) or isinstance(op, ast.Mult)
left_idx = (
self.FIXED_WIDTH_INTS_NAME_LIST.index(left_id)
if left_id in self.FIXED_WIDTH_INTS_NAME
else -1
)
right_idx = (
self.FIXED_WIDTH_INTS_NAME_LIST.index(right_id)
if right_id in self.FIXED_WIDTH_INTS_NAME
else -1
)
max_idx = max(left_idx, right_idx)
cint64_idx = self.FIXED_WIDTH_INTS_NAME_LIST.index("c_int64")
if widening_op:
if max_idx not in {
-1,
cint64_idx,
len(self.FIXED_WIDTH_INTS_NAME_LIST) - 1,
}:
# i8 + i8 => i16 for example
return self.FIXED_WIDTH_INTS_NAME_LIST[max_idx + 1]
if left_id == "float" or right_id == "float":
return "float"
return left_id if left_idx > right_idx else right_id
def visit_BinOp(self, node):
self.generic_visit(node)
if isinstance(node.left, ast.Name):
lvar = node.scopes.find(get_id(node.left))
else:
lvar = node.left
if isinstance(node.right, ast.Name):
rvar = node.scopes.find(get_id(node.right))
else:
rvar = node.right
left = lvar.annotation if lvar and hasattr(lvar, "annotation") else None
right = rvar.annotation if rvar and hasattr(rvar, "annotation") else None
if left is None and right is not None:
node.annotation = right
return node
if right is None and left is not None:
node.annotation = left
return node
if right is None and left is None:
return node
# Both operands are annotated. Now we have interesting cases
left_id = get_id(left)
right_id = get_id(right)
if left_id == right_id and left_id == "int":
if not isinstance(node.op, ast.Div) or getattr(
node, "use_integer_div", False
):
node.annotation = left
else:
# TODO: This is not true for dart when using integer division
node.annotation = ast.Name(id="float")
return node
# Does this hold across all languages?
if left_id == "int":
left_id = "c_int32"
if right_id == "int":
right_id = "c_int32"
if (
left_id in self.FIXED_WIDTH_INTS_NAME
and right_id in self.FIXED_WIDTH_INTS_NAME
):
ret = self._handle_overflow(node.op, left_id, right_id)
node.annotation = ast.Name(id=ret)
return node
if left_id == right_id:
# Exceptions: division operator
if isinstance(node.op, ast.Div):
if left_id == "int":
node.annotation = ast.Name(id="float")
return node
node.annotation = left
return node
else:
if left_id in self.FIXED_WIDTH_INTS_NAME:
left_id = "int"
if right_id in self.FIXED_WIDTH_INTS_NAME:
right_id = "int"
if (left_id, right_id) in {("int", "float"), ("float", "int")}:
node.annotation = ast.Name(id="float")
return node
raise Exception(f"type error: {left_id} {type(node.op)} {right_id}")
return node
def visit_ClassDef(self, node):
node.annotation = ast.Name(id=node.name)
return node
def visit_Attribute(self, node):
value_id = get_id(node.value)
if value_id is not None and hasattr(node, "scopes"):
if is_enum(value_id, node.scopes):
node.annotation = node.scopes.find(value_id)
return node
def visit_Call(self, node):
fname = get_id(node.func)
if fname is not None:
fn = node.scopes.find(fname)
if isinstance(fn, ast.ClassDef):
node.annotation = fn
elif isinstance(fn, ast.FunctionDef):
return_type = (
fn.returns if hasattr(fn, "returns") and fn.returns else None
)
if return_type is not None:
node.annotation = return_type
elif fname in {"max", "min"}:
return_type = get_inferred_type(node.args[0])
if return_type is not None:
node.annotation = return_type
elif fname in self.TYPE_DICT.values():
node.annotation = ast.Name(id=fname)
self.generic_visit(node)
return node
def visit_Subscript(self, node):
definition = node.scopes.find(get_id(node.value))
if hasattr(definition, "annotation"):
self._clike._typename_from_annotation(definition)
if hasattr(definition, "container_type"):
_, element_type = definition.container_type
node.annotation = ast.Name(id=element_type)
self.generic_visit(node)
return node
| 33.595361 | 88 | 0.582739 | 1,597 | 13,035 | 4.555416 | 0.123982 | 0.061581 | 0.040412 | 0.037113 | 0.401924 | 0.312165 | 0.267491 | 0.240962 | 0.170447 | 0.109553 | 0 | 0.008808 | 0.329344 | 13,035 | 387 | 89 | 33.682171 | 0.823381 | 0.034906 | 0 | 0.282132 | 0 | 0 | 0.042931 | 0.002549 | 0 | 0 | 0 | 0.002584 | 0 | 1 | 0.07837 | false | 0 | 0.025078 | 0.003135 | 0.250784 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
257ed12cd5a7b5088a1d81e7a2c06131e88f0e37 | 3,086 | py | Python | train.py | cyrusmvahid/LSTNet-Gluon | c2e8b34bcd67220bd87647fdf9d01baa0023133d | [
"Apache-2.0"
] | null | null | null | train.py | cyrusmvahid/LSTNet-Gluon | c2e8b34bcd67220bd87647fdf9d01baa0023133d | [
"Apache-2.0"
] | null | null | null | train.py | cyrusmvahid/LSTNet-Gluon | c2e8b34bcd67220bd87647fdf9d01baa0023133d | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import argparse
import mxnet as mx
from mxnet import nd, gluon, autograd
from dataset import TimeSeriesData
from model import LSTNet
import time
import multiprocessing as mp
def train(file_path, out_path):
ts_data = TimeSeriesData(file_path, window=24*7, horizon=24)
ctx = mx.gpu(0)
min_gpu = 4
num_gpus = min(min_gpu, mx.context.num_gpus())
multi_ctx = [mx.gpu(i) for i in range(num_gpus)]
#multi_ctx = [mx.gpu(0), mx.gpu(1)]
net = LSTNet(
num_series=ts_data.num_series,
conv_hid=100,
gru_hid=100,
skip_gru_hid=5,
skip=24,
ar_window=24)
l1 = gluon.loss.L1Loss()
net.initialize(init=mx.init.Xavier(factor_type="in", magnitude=2.34), ctx=multi_ctx, force_reinit=True)
trainer = gluon.Trainer(net.collect_params(),
optimizer='adam',
optimizer_params={'learning_rate': 0.001 * num_gpus, 'clip_gradient': 10.})
batch_size = 129 * num_gpus
train_data_loader = gluon.data.DataLoader(
ts_data.train, batch_size=batch_size, shuffle=True, num_workers=mp.cpu_count(), last_batch='discard')
#scale = nd.array(ts_data.scale, ctx)
#scale = ts_data.scale.as_in_context(ctx)
epochs = 20
# loss = None
print("Training Start")
for e in range(epochs):
epoch_loss = mx.nd.zeros((1,), ctx)
num_iter = 0
#i = 0
training_start_time = time.time()
for i, (data, label) in enumerate(train_data_loader):
epoch_start_time = time.time()
#data = data.as_in_context(ctx)
data = gluon.utils.split_and_load(data=data, ctx_list=multi_ctx)
#label = label.as_in_context(ctx)
label = gluon.utils.split_and_load(data=label, ctx_list=multi_ctx)
losses = []
outputs = []
# if loss is not None:
# loss.wait_to_read()
with autograd.record():
for X, Y in zip(data, label):
z = net(X)
loss = l1(z, Y)
losses.append(loss)
outputs.append(z)
autograd.backward(losses)
trainer.step(batch_size)
epoch_loss = epoch_loss + loss.mean()
num_iter += 1
#i += 1
nd.waitall()
print("Epoch {:3d}; batch {:3d} : epoch loss {:.4}; TIME:{}".format(e, i, epoch_loss.asscalar() / num_iter, time.time()-epoch_start_time))
print("TRAINING TIME: {}", time.time()-training_start_time)
net.save_parameters(out_path)
print("Training End")
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='LSTNet Time series forecasting')
parser.add_argument('--data', type=str, required=True,
help='path of the data file')
parser.add_argument('--out', type=str, required=True,
help='path of the trained network output')
args = parser.parse_args()
exit(train(args.data, args.out))
| 33.912088 | 146 | 0.598509 | 412 | 3,086 | 4.262136 | 0.368932 | 0.031891 | 0.013667 | 0.023918 | 0.088838 | 0.088838 | 0.036446 | 0.036446 | 0 | 0 | 0 | 0.020796 | 0.283215 | 3,086 | 90 | 147 | 34.288889 | 0.773056 | 0.0849 | 0 | 0 | 0 | 0 | 0.084577 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015385 | false | 0 | 0.123077 | 0 | 0.153846 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2587f8ca4edd7908a4b73ab0a1763c2cc0753214 | 4,882 | py | Python | main.py | prateek-77/rcan-it | 587904556d8127bca83690deaaa26e34e051a576 | [
"MIT"
] | 57 | 2022-01-28T04:44:42.000Z | 2022-03-31T13:26:35.000Z | main.py | chisyliu/rcan-it | eb1794777ffef4eadd8a6a06f4419380a0b17435 | [
"MIT"
] | 6 | 2022-02-08T11:17:19.000Z | 2022-03-27T07:40:18.000Z | main.py | chisyliu/rcan-it | eb1794777ffef4eadd8a6a06f4419380a0b17435 | [
"MIT"
] | 10 | 2022-01-28T07:31:12.000Z | 2022-03-15T01:35:03.000Z | import os
import random
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from ptsr import model
from ptsr.data import Data
from ptsr.config import load_cfg
from ptsr.model import get_num_params
from ptsr.utils import utility, trainer
def init_seed(seed: int):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def get_args():
parser = argparse.ArgumentParser(description='PyTorch Super Resolution')
parser.add_argument('--config-file', type=str,
help='configuration file (yaml)')
parser.add_argument('--config-base', type=str,
help='base configuration file (yaml)', default=None)
parser.add_argument('--distributed', action='store_true',
help='distributed training')
parser.add_argument('--checkpoint', type=str, default=None)
parser.add_argument('--manual-seed', type=int, default=None)
parser.add_argument('--local_world_size', type=int, default=1,
help='number of GPUs each process.')
parser.add_argument('--local_rank', type=int, default=None,
help='node rank for distributed training')
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
return args
def main():
args = get_args()
cfg = load_cfg(args)
if args.distributed: # parameters to initialize the process group
env_dict = {
key: os.environ[key]
for key in ("MASTER_ADDR", "MASTER_PORT", "RANK",
"LOCAL_RANK", "WORLD_SIZE")}
print(f"[{os.getpid()}] Initializing process group with: {env_dict}")
dist.init_process_group(backend="nccl")
print(
f"[{os.getpid()}] world_size = {dist.get_world_size()}, "
+ f"rank = {dist.get_rank()}, backend={dist.get_backend()}"
)
args.rank = int(os.environ["RANK"])
args.local_rank = int(os.environ["LOCAL_RANK"])
n = torch.cuda.device_count() // args.local_world_size
device_ids = list(
range(args.local_rank * n, (args.local_rank + 1) * n))
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
print(
f"[{os.getpid()}] rank = {dist.get_rank()} ({args.rank}), "
+ f"world_size = {dist.get_world_size()}, n = {n}, device_ids = {device_ids}"
)
manual_seed = args.local_rank if args.manual_seed is None \
else args.manual_seed
else:
manual_seed = 0 if args.manual_seed is None else args.manual_seed
device = torch.device('cuda:0')
# init random seeds for reproducibility
init_seed(manual_seed)
cudnn.enabled = True
cudnn.benchmark = True
if args.local_rank == 0 or args.local_rank is None:
print(cfg)
# initialize model, loss and loader
checkpoint = utility.checkpoint(cfg)
_model, _loss = build_model_loss(cfg, args.local_rank, checkpoint, device)
loader = Data(cfg)
t = trainer.Trainer(cfg, args.local_rank, loader,
_model, _loss, device, checkpoint)
checkpoint.load_model(
pre_train=cfg.MODEL.PRE_TRAIN, trainer=t, device=device,
restart=cfg.SOLVER.ITERATION_RESTART, test_mode=cfg.SOLVER.TEST_ONLY,
strict=cfg.MODEL.CKP_STRICT, ignore=cfg.MODEL.CKP_IGNORE)
t.test() if cfg.SOLVER.TEST_ONLY else t.train()
if args.distributed:
dist.destroy_process_group() # tear down the process group
def build_model_loss(cfg, rank, checkpoint, device):
_model = model.Model(cfg, checkpoint).to(device)
if rank is None or rank == 0:
print("Total number of parameters: ", get_num_params(_model))
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
find_unused = cfg.MODEL.STOCHASTIC_DEPTH or (cfg.SOLVER.TAIL_ONLY_ITER > 0)
if cfg.SYSTEM.PARALLEL == "DDP":
_model = nn.parallel.DistributedDataParallel(
_model, device_ids=[rank], output_device=rank, find_unused_parameters=find_unused)
else:
_model = nn.parallel.DataParallel(_model) # parallel on all devices
_loss = None
if not cfg.SOLVER.TEST_ONLY:
_loss = nn.L1Loss().to(device)
return _model, _loss
if __name__ == '__main__':
main()
| 36.432836 | 95 | 0.630274 | 609 | 4,882 | 4.865353 | 0.267652 | 0.039487 | 0.043874 | 0.02025 | 0.096524 | 0.043874 | 0.027 | 0.027 | 0.027 | 0.027 | 0 | 0.002216 | 0.260549 | 4,882 | 133 | 96 | 36.706767 | 0.81856 | 0.070668 | 0 | 0.057692 | 0 | 0 | 0.167235 | 0.017292 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.125 | 0 | 0.182692 | 0.048077 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2588ddff6bc7f93f79f0d46e93db16a0b7b7e0ea | 886 | py | Python | factions.py | Fuih/tqs-bot | d2b0f4b86da1cd0d3fea6fa42529fe2b7f899a76 | [
"MIT"
] | null | null | null | factions.py | Fuih/tqs-bot | d2b0f4b86da1cd0d3fea6fa42529fe2b7f899a76 | [
"MIT"
] | null | null | null | factions.py | Fuih/tqs-bot | d2b0f4b86da1cd0d3fea6fa42529fe2b7f899a76 | [
"MIT"
] | null | null | null | from discord import Color
FACTIONS = {
'Thục': 3,
'Quần': 5,
'Ngụy': 7,
'Ngô': 11
}
FACTION_COLORS = {
FACTIONS['Thục']: Color.red(),
FACTIONS['Quần']: Color.dark_grey(),
FACTIONS['Ngụy']: Color.blue(),
FACTIONS['Ngô']: Color.green(),
FACTIONS['Thục']*FACTIONS['Quần']: Color.dark_red(),
FACTIONS['Thục']*FACTIONS['Ngụy']: Color.purple(),
FACTIONS['Thục']*FACTIONS['Ngô']: Color.from_rgb(255, 255, 0),
FACTIONS['Quần']*FACTIONS['Ngụy']: Color.from_rgb(102, 153, 204),
FACTIONS['Quần']*FACTIONS['Ngô']: Color.teal(),
FACTIONS['Ngụy']*FACTIONS['Ngô']: Color.from_rgb(0,255,255),
}
def get_faction_color(faction):
faction = faction.split('/')
if len(faction) == 1:
return FACTION_COLORS[FACTIONS[faction[0]]]
faction_value = FACTIONS[faction[0]] * FACTIONS[faction[1]]
return FACTION_COLORS[faction_value] | 30.551724 | 69 | 0.633183 | 112 | 886 | 4.901786 | 0.321429 | 0.10929 | 0.116576 | 0.076503 | 0.182149 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043185 | 0.163657 | 886 | 29 | 70 | 30.551724 | 0.697706 | 0 | 0 | 0 | 0 | 0 | 0.085682 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.04 | 0 | 0.16 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2589a0d87beeb28e6dcfc7237ba25fd6df784ac8 | 1,857 | py | Python | tencentcloud/bizlive/v20190313/errorcodes.py | PlasticMem/tencentcloud-sdk-python | 666db85623d51d640a165907a19aef5fba53b38d | [
"Apache-2.0"
] | 465 | 2018-04-27T09:54:59.000Z | 2022-03-29T02:18:01.000Z | tencentcloud/bizlive/v20190313/errorcodes.py | PlasticMem/tencentcloud-sdk-python | 666db85623d51d640a165907a19aef5fba53b38d | [
"Apache-2.0"
] | 91 | 2018-04-27T09:48:11.000Z | 2022-03-12T08:04:04.000Z | tencentcloud/bizlive/v20190313/errorcodes.py | PlasticMem/tencentcloud-sdk-python | 666db85623d51d640a165907a19aef5fba53b38d | [
"Apache-2.0"
] | 232 | 2018-05-02T08:02:46.000Z | 2022-03-30T08:02:48.000Z | # -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 操作失败
FAILEDOPERATION = 'FailedOperation'
# 带宽不足
FAILEDOPERATION_LACKBANDWIDTH = 'FailedOperation.LackBandwidth'
# 内部错误
INTERNALERROR = 'InternalError'
# 调用内部服务错误。
INTERNALERROR_CALLOTHERSVRERROR = 'InternalError.CallOtherSvrError'
# 配置不存在。
INTERNALERROR_CONFIGNOTEXIST = 'InternalError.ConfigNotExist'
# DB执行错误。
INTERNALERROR_DBERROR = 'InternalError.DBError'
# 获取用户账号错误。
INTERNALERROR_GETBIZIDERROR = 'InternalError.GetBizidError'
# 获取流信息失败。
INTERNALERROR_GETSTREAMINFOERROR = 'InternalError.GetStreamInfoError'
# 获取直播源信息错误。
INTERNALERROR_GETUPSTREAMINFOERROR = 'InternalError.GetUpstreamInfoError'
# 无权限操作。
INTERNALERROR_NOTPERMMITOPERAT = 'InternalError.NotPermmitOperat'
# 流状态异常。
INTERNALERROR_STREAMSTATUSERROR = 'InternalError.StreamStatusError'
# 更新数据失败。
INTERNALERROR_UPDATEDATAERROR = 'InternalError.UpdateDataError'
# 参数错误
INVALIDPARAMETER = 'InvalidParameter'
# Json解析失败
INVALIDPARAMETER_JSONPARSEERROR = 'InvalidParameter.JsonParseError'
# 参数取值错误
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# 确认插件是否有IM能力
LIMITEXCEEDED_NOIMABILITY = 'LimitExceeded.NoIMAbility'
# 缺少参数错误
MISSINGPARAMETER = 'MissingParameter'
# 没有空闲机器
RESOURCENOTFOUND_NOIDLE = 'ResourceNotFound.NoIdle'
| 26.528571 | 82 | 0.803446 | 181 | 1,857 | 8.171271 | 0.61326 | 0.040568 | 0.017579 | 0.021636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009163 | 0.118471 | 1,857 | 69 | 83 | 26.913043 | 0.894319 | 0.409801 | 0 | 0 | 0 | 0 | 0.425212 | 0.368768 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2589b69bdb44179b5ee78c49ec9b5fbc3cadace8 | 4,828 | py | Python | symphony/cli/gql/gql/client.py | idoshveki/magma | 8022267bd8b8d94913fbb9a0836880361d785446 | [
"BSD-3-Clause"
] | 2 | 2020-11-05T18:58:26.000Z | 2021-02-09T06:42:49.000Z | symphony/cli/gql/gql/client.py | idoshveki/magma | 8022267bd8b8d94913fbb9a0836880361d785446 | [
"BSD-3-Clause"
] | 10 | 2021-03-31T20:19:00.000Z | 2022-02-19T07:09:57.000Z | symphony/cli/gql/gql/client.py | idoshveki/magma | 8022267bd8b8d94913fbb9a0836880361d785446 | [
"BSD-3-Clause"
] | 3 | 2020-08-20T18:45:34.000Z | 2020-08-20T20:18:42.000Z | #!/usr/bin/env python3
import warnings
from logging import Logger, getLogger
from typing import Any, Dict, Optional, cast
from graphql import (
build_ast_schema,
build_client_schema,
get_introspection_query,
parse,
)
from graphql.language.ast import DocumentNode
from graphql.type.schema import GraphQLSchema
from graphql.utilities.find_deprecated_usages import find_deprecated_usages
from graphql.validation import validate
from .transport.local_schema import LocalSchemaTransport
from .transport.transport import ExtendedExecutionResult, Transport
log: Logger = getLogger(__name__)
class OperationException(Exception):
def __init__(self, err_msg: str, err_id: str) -> None:
message = "Operation failed: %s (id:%s)" % (err_msg, err_id)
super(OperationException, self).__init__(message)
self.err_msg = err_msg
self.err_id = err_id
class RetryError(Exception):
"""Custom exception thrown when retry logic fails"""
def __init__(self, retries_count: int, last_exception: Optional[Exception]) -> None:
message = "Failed %s retries: %s" % (retries_count, last_exception)
super(RetryError, self).__init__(message)
self.last_exception = last_exception
class GraphqlDeprecationWarning(DeprecationWarning):
pass
class Client(object):
schema: Optional[GraphQLSchema]
introspection: Optional[Dict[str, Any]]
transport: Transport
retries: int
def __init__(
self,
schema: Optional[GraphQLSchema] = None,
introspection: Optional[Dict[str, Any]] = None,
type_def: Optional[str] = None,
transport: Optional[Transport] = None,
fetch_schema_from_transport: bool = False,
retries: int = 0,
) -> None:
assert not (
type_def and introspection
), "Cant provide introspection type definition at the same time"
if transport and fetch_schema_from_transport:
assert (
not schema
), "Cant fetch the schema from transport if is already provided"
introspection = transport.execute(
parse(get_introspection_query(descriptions=True))
).data
if introspection:
assert not schema, "Cant provide introspection and schema at the same time"
schema = build_client_schema(introspection)
elif type_def:
assert (
not schema
), "Cant provide Type definition and schema at the same time"
type_def_ast = parse(type_def)
schema = build_ast_schema(type_def_ast)
elif schema and not transport:
transport = LocalSchemaTransport(schema)
self.schema = schema
self.introspection = introspection
self.transport = cast(Transport, transport)
self.retries = retries
def validate(self, document: DocumentNode) -> None:
schema = self.schema
if not schema:
raise Exception(
"Cannot validate locally the document, you need to pass a schema."
)
validation_errors = validate(schema, document)
if validation_errors:
raise validation_errors[0]
usages = find_deprecated_usages(schema, document)
for usage in usages:
message = (
f"Query of deprecated grapqhl field in {usage}"
"Consider upgrading to newer API version."
)
warnings.warn(message, GraphqlDeprecationWarning)
def execute(self, document: DocumentNode, variable_values: Dict[str, Any]) -> str:
if self.schema:
self.validate(document)
result = self._get_result(document, variable_values)
if result.errors:
raise OperationException(
str(cast(Dict[int, str], result.errors)[0]),
result.extensions.get("trace_id", ""),
)
return result.response
def _get_result(
self, document: DocumentNode, variable_values: Dict[str, Any]
) -> ExtendedExecutionResult:
if not self.retries:
return self.transport.execute(document, variable_values)
last_exception = None
retries_count = 0
while retries_count < self.retries:
try:
result = self.transport.execute(document, variable_values)
return result
except Exception as e:
last_exception = e
log.warn(
"Request failed with exception %s. Retrying for the %s time...",
e,
retries_count + 1,
exc_info=True,
)
finally:
retries_count += 1
raise RetryError(retries_count, last_exception)
| 34 | 88 | 0.629453 | 509 | 4,828 | 5.787819 | 0.265226 | 0.028513 | 0.013578 | 0.013238 | 0.114732 | 0.076035 | 0.032587 | 0.032587 | 0 | 0 | 0 | 0.002061 | 0.296396 | 4,828 | 141 | 89 | 34.241135 | 0.865175 | 0.014085 | 0 | 0.034483 | 0 | 0 | 0.103912 | 0 | 0 | 0 | 0 | 0 | 0.034483 | 1 | 0.051724 | false | 0.017241 | 0.086207 | 0 | 0.232759 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
258c8543e27ed658663d2ca6dbb29648a1842bd2 | 1,219 | py | Python | chem-axon-setup/lambdas/trigger_compound_reg_pipeline.py | spatel-gfb/data-lake-as-code | a5479befd55998a24d535d572a78d803c678dd32 | [
"MIT-0"
] | null | null | null | chem-axon-setup/lambdas/trigger_compound_reg_pipeline.py | spatel-gfb/data-lake-as-code | a5479befd55998a24d535d572a78d803c678dd32 | [
"MIT-0"
] | null | null | null | chem-axon-setup/lambdas/trigger_compound_reg_pipeline.py | spatel-gfb/data-lake-as-code | a5479befd55998a24d535d572a78d803c678dd32 | [
"MIT-0"
] | null | null | null | import boto3
import os
import logging
"""
Create a logging function and initiate it.
"""
format_string = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
logger = logging.getLogger('comp-reg-data-load-pipeline-lambda')
handler = logging.StreamHandler()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(format_string)
handler.setFormatter(formatter)
logger.addHandler(handler)
def lambda_handler(event, context):
# Initialise the environment variables required to trigger the AWS Batch Job
awsregion = os.environ.get('AWS_REGION')
# Execute the batch job
batch_client = boto3.client('batch', region_name=awsregion)
execute_cmd = ['python', 'comp_reg_data_load.py', awsregion]
batch_job_id = batch_client.submit_job(jobDefinition='comp-reg-etl-job',
jobQueue='datalake-job-queue',
jobName=f'comp-reg-etl-job',
containerOverrides={'command': execute_cmd})['jobId']
# Log the batch job id triggered
logger.info("The command executed by Lambda function is : " + str(execute_cmd))
logger.info("The AWS Batch Job ID : " + str(batch_job_id))
| 36.939394 | 96 | 0.66612 | 149 | 1,219 | 5.328859 | 0.489933 | 0.060453 | 0.050378 | 0.037783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002107 | 0.221493 | 1,219 | 32 | 97 | 38.09375 | 0.834563 | 0.104184 | 0 | 0 | 0 | 0 | 0.244701 | 0.052987 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.15 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
258ce0c4afc8aff3be542bf864890a33228e3553 | 1,017 | py | Python | auto_derby/terminal.py | DoctrineAlanK/auto-derby | 781e860b06b9686e56feab115d2212251cd99d10 | [
"MIT"
] | 235 | 2021-05-24T12:09:18.000Z | 2022-03-31T03:44:08.000Z | auto_derby/terminal.py | DoctrineAlanK/auto-derby | 781e860b06b9686e56feab115d2212251cd99d10 | [
"MIT"
] | 193 | 2021-05-27T16:49:14.000Z | 2022-03-31T16:38:08.000Z | auto_derby/terminal.py | DoctrineAlanK/auto-derby | 781e860b06b9686e56feab115d2212251cd99d10 | [
"MIT"
] | 89 | 2021-05-30T17:07:24.000Z | 2022-03-27T15:41:04.000Z | # -*- coding=UTF-8 -*-
# pyright: strict
from __future__ import annotations
import contextlib
from typing import Text
from . import sound, window
class PromptDisabled(PermissionError):
def __init__(self):
super().__init__("prompt disabled")
class g:
pause_sound_path = ""
prompt_sound_path = ""
prompt_disabled = False
def pause(message: Text) -> None:
close_msg = window.info(message)
try:
sound.play_file(g.pause_sound_path)
input("Press enter to continue...")
finally:
close_msg()
def prompt(message: Text) -> Text:
if g.prompt_disabled:
raise PromptDisabled
close_msg = window.info("Interaction required in terminal.")
try:
sound.play_file(g.pause_sound_path)
return input(message)
finally:
close_msg()
@contextlib.contextmanager
def prompt_disabled(v: bool):
original = g.prompt_disabled
g.prompt_disabled = v
try:
yield
finally:
g.prompt_disabled = original
| 19.941176 | 64 | 0.662734 | 121 | 1,017 | 5.305785 | 0.438017 | 0.152648 | 0.093458 | 0.070093 | 0.096573 | 0.096573 | 0.096573 | 0.096573 | 0 | 0 | 0 | 0.001295 | 0.240905 | 1,017 | 50 | 65 | 20.34 | 0.830311 | 0.035398 | 0 | 0.285714 | 0 | 0 | 0.075665 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114286 | false | 0 | 0.114286 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
258dcda4d3e81c18066625b31adc90681cca2a6b | 3,717 | py | Python | rkmt/options/convert_options.py | corenel/rknn-model-tools | 8af9c062ea4955a76ba9986a6cab6f771c9e678a | [
"MIT"
] | 1 | 2020-07-09T08:50:50.000Z | 2020-07-09T08:50:50.000Z | rkmt/options/convert_options.py | corenel/rknn-model-tools | 8af9c062ea4955a76ba9986a6cab6f771c9e678a | [
"MIT"
] | null | null | null | rkmt/options/convert_options.py | corenel/rknn-model-tools | 8af9c062ea4955a76ba9986a6cab6f771c9e678a | [
"MIT"
] | 1 | 2020-07-09T08:50:31.000Z | 2020-07-09T08:50:31.000Z | from rkmt.options.base_options import BaseOptions
class ConvertOptions(BaseOptions):
"""Arguments parser for model conversion."""
def initialize(self, parser):
BaseOptions.initialize(self, parser)
parser.add_argument('--platform',
type=str,
help='deep learning framework')
# model config
parser.add_argument('--channel_mean_value',
type=str,
help='mean and scale parameters for pre-process')
parser.add_argument(
'--reorder_channel',
type=str,
help='the permutation order of the dimensions of input image')
# model loading
parser.add_argument('--model_file_path',
type=str,
help='the path of model file')
parser.add_argument('--graph_file_path',
type=str,
help='the path of model graph definition file')
parser.add_argument('--inputs',
nargs='+',
type=str,
help='the input nodes of model')
parser.add_argument('--outputs',
nargs='+',
type=str,
help=' the output nodes of model')
parser.add_argument(
'--input_size_list',
nargs='+',
type=str,
help=
'the size and number of channels of the input tensors corresponding to the input nodes'
)
# model building
parser.add_argument(
'--dataset_file_path',
type=str,
help='a input data set for rectifying quantization parameters')
parser.add_argument(
'--dataset_for_analysis_file_path',
type=str,
help=
'a input data set for analysing quantization accuracy (need to contain one line)'
)
parser.add_argument(
'--no_pre_compile',
action='store_true',
help='whether or not to pre-compile model for specific hardware')
parser.add_argument('--no_quantization',
action='store_true',
help='whether or not to quantize the model')
parser.add_argument('--output_path',
type=str,
help='path to converted model')
# additional flags
parser.add_argument('-v',
'--verbose',
action='store_true',
help='print log form RKNN')
parser.add_argument(
'-a',
'--analyse_accuracy',
action='store_true',
help='whether or not to analysis quantization accuracy')
return parser
def parse(self, additional_args=None, estimator_cls=None):
opt = super().parse(additional_args, estimator_cls)
assert len(opt.channel_mean_value.split(',')) in (4, 5)
assert len(opt.reorder_channel.split(',')) == 3
opt.channel_mean_value = opt.channel_mean_value.replace(',', ' ')
opt.reorder_channel = opt.reorder_channel.replace(',', ' ')
if opt.platform == 'tensorflow':
assert len(opt.inputs) == len(opt.input_size_list)
if opt.input_size_list is not None and len(opt.input_size_list) > 0:
opt.input_size_list = [
list(map(int, input_size.split('x')))
for input_size in opt.input_size_list
]
self.opt = opt
return self.opt
| 38.71875 | 99 | 0.517622 | 378 | 3,717 | 4.917989 | 0.309524 | 0.07262 | 0.137171 | 0.045186 | 0.208714 | 0.157612 | 0.126412 | 0.126412 | 0.073158 | 0.037655 | 0 | 0.001757 | 0.387409 | 3,717 | 95 | 100 | 39.126316 | 0.814668 | 0.026365 | 0 | 0.325 | 0 | 0 | 0.258726 | 0.008864 | 0 | 0 | 0 | 0 | 0.0375 | 1 | 0.025 | false | 0 | 0.0125 | 0 | 0.075 | 0.0125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
258dcfd014f8ea06a5fb2f72aa227e24e3074687 | 12,657 | py | Python | facebook_downloader/__init__.py | coej/facebook_downloader | b354387f818521e431aaeb81ff70890c2905ba1e | [
"MIT"
] | 1 | 2015-08-19T03:51:16.000Z | 2015-08-19T03:51:16.000Z | facebook_downloader/__init__.py | coej/facebook_downloader | b354387f818521e431aaeb81ff70890c2905ba1e | [
"MIT"
] | null | null | null | facebook_downloader/__init__.py | coej/facebook_downloader | b354387f818521e431aaeb81ff70890c2905ba1e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Python 2/3 compatibility
from __future__ import (print_function, unicode_literals, division)
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
__metaclass__ = type
import os
import requests
import json
import time
from datetime import datetime
import pymongo
from enum import Enum
class Nodes(Enum):
post = 'post' # Nodes.post
like = 'like' # Nodes.like
comment = 'comment' # Nodes.comment
reply = 'reply'
insights_metric = 'insights_metric' #Nodes.insights_metric
class Data_Page:
def __init__(self, connection, data, item_types):
from datetime import datetime
self.item_types = item_types
self.data = data['data']
try:
paging = data['paging']
except KeyError:
self.next_page_url = None
self.last_page_url = None
raise StopIteration
self.next_page_url = paging['next'] if 'next' in paging else None
self.prev_page_url = paging['previous'] if 'previous' in paging else None
self.items = []
for item in self.data:
if item_types == Nodes.post:
p = Post(connection, item)
p.data['total_likes_count'] = connection.get_likes_count(p)
p.data['total_comments_count'] = connection.get_comments_count(p)
elif item_types == Nodes.like:
p = Like(connection, item)
elif item_types == Nodes.comment:
p = Comment(connection, item)
elif item_types == Nodes.reply:
p = Comment(connection, item) ## change later?
elif item_types == Nodes.insights_metric:
p = Insights_Metric(connection, item)
else:
raise ValueError(item)
p.data['downloaded_time'] = datetime.now()
self.items.append(p)
class Post:
def __repr__(self):
if len(str(self.data)) > 100:
return 'Post() %s (...) \n' % self.data[:100]
else:
return 'Post() %s \n' % self.data
def __str__(self):
return self.data
def __init__(self, connection, data):
self.data = data
self.post_type = data['type']
try:
self.likes_p1 = Data_Page(connection, data['likes'], item_types=Nodes.like)
except KeyError:
self.likes_p1 = None
try:
self.comments_p1 = Data_Page(connection, data['comments'], item_types=Nodes.comment)
except KeyError:
self.comments_p1 = None
# class Summary:
# json groups are "data", "paging", and sometimes "summary"
# e.g., when you request 5647744585_10151775853479586/likes?summary=true
class Like:
def __repr__(self):
return 'Like(): %s \n' % self.data
def __init__(self, connection, data):
self.data = data
self.item_type = Nodes.like
class Comment:
def __repr__(self):
return 'Comment(): %s \n' % self.data
def __init__(self, connection, data):
self.data = data
self._id = data['id']
self.item_type = Nodes.comment
class Insights_Metric:
def __repr__(self):
return 'Comment(): %s \n' % self.data
def __init__(self, connection, data):
self.data = data
self._id = data['id']
self.item_type = Nodes.insights_metric
class FacebookConnection:
def __init__(self, token=None):
self.token = token
self.update_token()
def token_is_current(self):
r = self.query(node='me', edge=None, fields=None,
query_params=None, pass_errors=True)
#print (r)
if 'error' in r:
if r['error']['type'] == 'OAuthException':
return False
else:
raise ValueError(r['error'])
else:
return True
def update_token(self):
def token_browser_input():
import webbrowser
webbrowser.open_new_tab("https://developers.facebook.com/tools/explorer/")
from builtins import input
self.token = input('token: ')
while not self.token_is_current():
print ("Opening browser to fetch a new token.")
token_browser_input()
#print (self.token)
#print ('finished while loop. self.token:')
#print (self.token)
print ("Token validated for basic user-level access.")
return True
def query_url(self, node, edge=None, query_params=None,
fields=None):
import urllib
import requests
from urllib.parse import urlencode
root = 'https://graph.facebook.com/v2.3'
if not edge: edge = ''
param_kwargs = {'access_token': self.token}
if fields:
field_list_str = ','.join(fields)
param_kwargs['fields'] = field_list_str
if query_params:
param_kwargs.update(query_params)
param_string = urlencode(param_kwargs)
url = '{root}/{node}/{edge}?{params}'.format(
root=root, node=node, edge=edge, params=param_string)
return url
def query(self, node, edge=None, query_params=None,
fields=None, print_url=False, pass_errors=False):
url = self.query_url(node, edge, query_params, fields)
if print_url:
print (url)
return getj(url, pass_errors)
def get_likes_count(self, post_obj):
post_id = post_obj.data['id']
res = self.query(node=post_id, edge='likes',
query_params={'summary':'true'})
try:
likes = int(res['summary']['total_count'])
return likes
except:
print ("[!likes]")
return None
def get_comments_count(self, post_obj):
post_id = post_obj.data['id']
res = self.query(node=post_id, edge='comments',
query_params={'summary':'true'})
try:
likes = int(res['summary']['total_count'])
return likes
except:
print ("[!comments]")
return None
def get_post_insights(self, post_id, show_progress=False):
# we'll have to change this later to work as an update to our post collection
# rather than creating new keys on each post ID in a new collection...
response = self.query(node='{}/insights'.format(post_id),
edge=None,
query_params=None,
fields=None,
print_url=False)
metrics_firstpage = Data_Page(self, response, Nodes.insights_metric)
generator = facebook_paging(self, metrics_firstpage,
show_progress=False, #don't want one tick for each metric
print_urls=False,
first_page_only=True)
metric_list = list(generator)
#then, add extras:
insights_block = {'_id': post_id}
for m in metric_list:
insights_block[m['name']] = m
#insights_block['like_count'] = get_like_count(post_id)
#insights_block['share_count'] = get_share_count(post_id)
return insights_block
def downloader(collection, # pymongo collection object
account_id, # facebook account node
token,
since, # string, e.g., 2015-01-01
until, # string, e.g., 2015-03-31
skip_duplicates=True,
silent=False):
import json
import time
from datetime import datetime
import pymongo
fb = FacebookConnection(token)
response = fb.query(node=account_id,
edge='posts',
query_params={'since': since,
'until': until,
},
fields=None,
print_url=False)
posts_firstpage = Data_Page(fb, response, Nodes.post)
generator = facebook_paging(fb, posts_firstpage,
show_progress=True)
for post in generator:
post['insights'] = fb.get_post_insights(post['id'])
post = post_transformations(post)
try:
wresult = collection.insert(post) #, upsert=True) --> for .update()
if not silent:
print('.', end='')
except pymongo.errors.DuplicateKeyError:
if skip_duplicates and not silent:
print('d', end='')
else: raise
except pymongo.errors.InvalidDocument:
print ('invalid: %s' % post['_id'])
raise
except:
raise
time.sleep(.1)
def printj(data):
print (json.dumps(data, indent=1))
def replace_dot_key(obj):
# Necessary if saving JSON keys with periods in them into a MongoDB document
# (periods aren't allowed).
# use as:
# new_json = json.loads(data, object_hook=remove_dot_key)
for key in obj.keys():
new_key = key.replace(".","_DOT_")
if new_key != key:
obj[new_key] = obj[key]
del obj[key]
return obj
def getj(url, pass_errors=False):
#print url
#response_json = requests.get(url).json()
try:
response_text = requests.get(url).text
except:
print (url)
raise
try:
# can't load JSON objects with a '.' in any key
# into MongoDB.
response_json = json.loads(response_text,
object_hook=replace_dot_key)
except:
print (response_json)
raise
if 'error' in response_json and not pass_errors:
raise ValueError(response_json['error'])
return response_json
def post_transformations(post):
def parse_fb_datetime(datetime_string):
from datetime import datetime
return datetime.strptime(datetime_string,'%Y-%m-%dT%H:%M:%S+0000')
post['_id'] = post['id']
# del post['id']
post['created_datetime'] = parse_fb_datetime(post['created_time'])
post['updated_datetime'] = parse_fb_datetime(post['updated_time'])
return post
def facebook_paging(connection, data_page_one, show_progress=True, print_urls=False,
first_page_only=False):
import time
progress_mark = {
Nodes.comment: 'xC ',
Nodes.like: 'xL ',
Nodes.post: 'xP ',
Nodes.reply: 'xc ',
Nodes.insights_metric: 'xI '
}
# determine whether we're looking at a page of posts, comments, etc.
item_types = data_page_one.item_types
next_page = data_page_one
while True:
# don't yield the whole page-- pass out elements
#yield next_page
current_page_items = next_page.data
for item in current_page_items:
yield item
#f item_types = Node.page:
# yield item
time.sleep(.1)
if show_progress:
print(len(current_page_items), end='')
try:
print(progress_mark[item_types], end='')
except KeyError:
print('?')
if first_page_only:
break
last_page = next_page
next_url = last_page.next_page_url
if print_urls:
print ('\n' + next_url + '\n')
response = getj(next_url)
try:
next_page = Data_Page(connection, response, item_types=item_types)
except StopIteration:
break
# other cases that mean we're out of data pages
if next_page.next_page_url == last_page.next_page_url:
break
elif not next_page.next_page_url:
break
elif next_page.data == last_page.data:
break
if show_progress:
print('| ', end='')
def insight_value(post,name):
insight_node = post['insights'][name]
value = insight_node['values'][0]['value']
return value
def fb_month_range(year, month):
import calendar, datetime
one_day = datetime.timedelta(days=1)
first_weekday, length = calendar.monthrange(year, month)
start = str(datetime.date(year,month,1))
end = str(datetime.date(year,month,length) + one_day) # FB API uses midnight-before-this-day as cutoff for "until"
return (start, end)
| 30.572464 | 118 | 0.572569 | 1,478 | 12,657 | 4.696211 | 0.201624 | 0.020746 | 0.014119 | 0.015128 | 0.191183 | 0.15358 | 0.119723 | 0.119723 | 0.119723 | 0.112664 | 0 | 0.007879 | 0.328119 | 12,657 | 413 | 119 | 30.646489 | 0.808325 | 0.106976 | 0 | 0.285235 | 0 | 0 | 0.067401 | 0.004529 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09396 | false | 0.016779 | 0.083893 | 0.013423 | 0.291946 | 0.083893 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2590970997787e3c270df4ec7937f3094133ac02 | 5,172 | py | Python | commands/create_episodes.py | havanagrawal/wikidata-toolkit | 5f39f449ac48eb4b7d93f9b51efa47a4206953e4 | [
"MIT"
] | 5 | 2019-07-29T15:05:14.000Z | 2020-10-15T03:02:50.000Z | commands/create_episodes.py | havanagrawal/wikidata-toolkit | 5f39f449ac48eb4b7d93f9b51efa47a4206953e4 | [
"MIT"
] | 16 | 2019-07-29T05:59:14.000Z | 2021-12-13T20:06:09.000Z | commands/create_episodes.py | havanagrawal/wikidata-toolkit | 5f39f449ac48eb4b7d93f9b51efa47a4206953e4 | [
"MIT"
] | 8 | 2019-12-20T02:27:11.000Z | 2020-10-15T05:25:40.000Z | import csv
from pywikibot import ItemPage, Site
import properties.wikidata_properties as wp
from utils import RepoUtils
from .errors import SuspiciousTitlesError
def read_titles(filepath):
with open(filepath, "r") as f:
reader = csv.reader(f)
return list(reader)
def create_episode_quickstatements(series_id, season_id, title, series_ordinal, season_ordinal):
"""Prints out QuickStatements that can be used to create an episode item on WikiData"""
print("CREATE")
print(f'LAST|Len|"{title}"')
print(f"LAST|{wp.INSTANCE_OF.pid}|{wp.TELEVISION_SERIES_EPISODE}")
print(f'LAST|{wp.PART_OF_THE_SERIES.pid}|{series_id}|{wp.SERIES_ORDINAL.pid}|"{series_ordinal}"')
print(f'LAST|{wp.SEASON.pid}|{season_id}|{wp.SERIES_ORDINAL.pid}|"{season_ordinal}"')
def create_episode(series_id, season_id, title, series_ordinal, season_ordinal, dry):
"""Creates a season item on WikiData
Arguments
---------
series_id: str
The Wiki ID of the series ItemPage
season_id: str
The Wiki ID of the season ItemPage
title: str
The title of this episode. This is used to set the label.
series_ordinal: int
The ordinal of this episode, within the series
season_ordinal: int
The ordinal of this episode, within the season
dry: bool
Whether or not this function should run in dry-run mode.
In dry-run mode, no real changes are made to WikiData, they are only
logged to stdout.
Returns
-------
episode_id: str
The Wiki ID of the episode item
"""
dry_str = "[DRY-RUN] " if dry else ""
print(f"{dry_str}Creating episode with label='{title}'")
episode = None
if not dry:
repoutil = RepoUtils(Site().data_repository())
season = ItemPage(repoutil.repo, season_id)
season.get()
# Check if season has part_of_the_series set to series_id
if wp.PART_OF_THE_SERIES.pid not in season.claims:
raise ValueError(f"The season {season_id} does not have a PART_OF_THE_SERIES ({wp.PART_OF_THE_SERIES.pid} property). Check the input series and season IDs for correctness.")
actual_series_id = str(season.claims[wp.PART_OF_THE_SERIES.pid][0].getTarget().getID())
if actual_series_id != series_id:
raise ValueError(f"The season {season_id} has PART_OF_THE_SERIES={actual_series_id} but expected={series_id}. Check the input series and season IDs for correctness.")
episode = ItemPage(repoutil.repo)
episode.editLabels({"en": title}, summary="Setting label")
print(f"Created a new Item: {episode.getID()}")
print(f"{dry_str}Setting {wp.INSTANCE_OF}={wp.TELEVISION_SERIES_EPISODE}")
if not dry:
instance_claim = repoutil.new_claim(wp.INSTANCE_OF.pid)
instance_claim.setTarget(ItemPage(repoutil.repo, wp.TELEVISION_SERIES_EPISODE))
episode.addClaim(instance_claim, summary=f"Setting {wp.INSTANCE_OF.pid}")
print(f"{dry_str}Setting {wp.PART_OF_THE_SERIES}={series_id}, with {wp.SERIES_ORDINAL}={series_ordinal}")
if not dry:
series_claim = repoutil.new_claim(wp.PART_OF_THE_SERIES.pid)
series_claim.setTarget(ItemPage(repoutil.repo, series_id))
series_ordinal_claim = repoutil.new_claim(wp.SERIES_ORDINAL.pid)
series_ordinal_claim.setTarget(series_ordinal)
series_claim.addQualifier(series_ordinal_claim)
episode.addClaim(series_claim, summary=f"Setting {wp.PART_OF_THE_SERIES.pid}")
print(f"{dry_str}Setting {wp.SEASON}={season_id}, with {wp.SERIES_ORDINAL}={season_ordinal}")
if not dry:
season_claim = repoutil.new_claim(wp.SEASON.pid)
season_claim.setTarget(ItemPage(repoutil.repo, season_id))
season_ordinal_claim = repoutil.new_claim(wp.SERIES_ORDINAL.pid)
season_ordinal_claim.setTarget(season_ordinal)
season_claim.addQualifier(season_ordinal_claim)
episode.addClaim(season_claim, summary=f"Setting {wp.SEASON.pid}")
return episode.getID() if episode is not None else "Q-1"
def create_episodes(series_id, season_id, titles_file, quickstatements=False, dry=False, confirm_titles=False):
titles = read_titles(titles_file)
maybe_erroneous_titles = check_erroneous_titles(titles)
if maybe_erroneous_titles and not confirm_titles:
raise SuspiciousTitlesError(
"The following titles have an uncommon character in them: \n"
+ "\n".join([" * {t}" for t in maybe_erroneous_titles])
)
episode_ids = []
for series_ordinal, season_ordinal, title in titles:
if quickstatements:
create_episode_quickstatements(series_id, season_id, title, series_ordinal, season_ordinal)
else:
episode_id = create_episode(series_id, season_id, title, series_ordinal, season_ordinal, dry)
episode_ids.append(episode_id)
return episode_ids
def check_erroneous_titles(titles):
uncommon_chars = set("[", "]")
maybe_erroneous_titles = [
title
for title in titles
if any(c in title for c in uncommon_chars)
]
return maybe_erroneous_titles
| 39.480916 | 185 | 0.703016 | 718 | 5,172 | 4.835655 | 0.194986 | 0.067396 | 0.03485 | 0.043203 | 0.382777 | 0.299827 | 0.220334 | 0.156106 | 0.156106 | 0.079493 | 0 | 0.000483 | 0.199923 | 5,172 | 130 | 186 | 39.784615 | 0.838569 | 0.145205 | 0 | 0.051948 | 0 | 0.025974 | 0.242312 | 0.117919 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064935 | false | 0 | 0.064935 | 0 | 0.181818 | 0.12987 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2593b87aa1477b56a51e9ed6f2959196a21fd9b9 | 8,507 | py | Python | modelkit/assets/manager.py | tgenin/modelkit | 2c67b7e12575fa51221f713c2c094030228402ee | [
"MIT"
] | null | null | null | modelkit/assets/manager.py | tgenin/modelkit | 2c67b7e12575fa51221f713c2c094030228402ee | [
"MIT"
] | null | null | null | modelkit/assets/manager.py | tgenin/modelkit | 2c67b7e12575fa51221f713c2c094030228402ee | [
"MIT"
] | null | null | null | import os
import re
from typing import Union, cast
import filelock
from structlog import get_logger
from modelkit.assets import errors
from modelkit.assets.remote import RemoteAssetsStore
from modelkit.assets.settings import AssetsManagerSettings, AssetSpec
from modelkit.assets.versioning import (
VERSION_RE,
filter_versions,
parse_version,
sort_versions,
)
from modelkit.utils.logging import ContextualizedLogging
logger = get_logger(__name__)
class AssetFetchError(Exception):
pass
class AssetsManager:
def __init__(self, **settings):
if isinstance(settings, dict):
settings = AssetsManagerSettings(**settings)
self.assets_dir = settings.assets_dir
self.remote_assets_store = None
if settings.remote_store:
try:
self.remote_assets_store = RemoteAssetsStore(
**settings.remote_store.dict()
)
logger.debug(
"AssetsManager created with remote storage provider",
driver=self.remote_assets_store.driver,
)
except BaseException:
# A remote store was parametrized, but it could not be instantiated
logger.error(
"Failed to instantiate the requested remote storage provider"
)
raise
else:
logger.debug("AssetsManager created without a remote storage provider")
def get_local_versions_info(self, name):
if os.path.isdir(name):
return sort_versions(
d for d in os.listdir(name) if re.fullmatch(VERSION_RE, d)
)
else:
return []
def _fetch_asset(self, spec: AssetSpec):
with ContextualizedLogging(name=spec.name):
local_name = os.path.join(self.assets_dir, *spec.name.split("/"))
local_versions_list = self.get_local_versions_info(local_name)
logger.debug("Local versions list", local_versions_list=local_versions_list)
remote_versions_list = []
if self.remote_assets_store and (
not spec.major_version or not spec.minor_version
):
remote_versions_list = self.remote_assets_store.get_versions_info(
spec.name
)
logger.debug(
"Fetched remote versions list",
remote_versions_list=remote_versions_list,
)
all_versions_list = sort_versions(
list({x for x in local_versions_list + remote_versions_list})
)
if not spec.major_version and not spec.minor_version:
logger.debug("Asset has no version information")
# no version is specified
if not all_versions_list:
# and none exist
# in this case, the asset spec is likely a relative or absolute
# path to a file/directory
if os.path.exists(local_name):
logger.debug(
"Asset is a valid local path relative to ASSETS_DIR",
local_name=local_name,
)
# if the asset spec resolves to MODELKIT_ASSETS_DIR/spec.name
return {"path": local_name}
elif os.path.exists(
os.path.join(os.getcwd(), *spec.name.split("/"))
):
logger.debug(
"Asset is a valid relative local path",
local_name=os.path.exists(
os.path.join(os.getcwd(), *spec.name.split("/"))
),
)
# if the assect spec resolves to cwd/spec.name
return {
"path": os.path.join(os.getcwd(), *spec.name.split("/"))
}
elif os.path.exists(spec.name):
logger.debug(
"Asset is a valid absolute local path",
local_name=os.path.exists(
os.path.join(os.getcwd(), *spec.name.split("/"))
),
)
# if the asset spec is a valid absolute path
return {"path": spec.name}
else:
raise errors.AssetDoesNotExistError(spec.name)
if not spec.major_version or not spec.minor_version:
if not all_versions_list:
raise errors.LocalAssetDoesNotExistError(
name=spec.name,
major=spec.major_version,
minor=spec.minor_version,
local_versions=local_versions_list,
)
# at least one version info is missing, fetch the latest
if not spec.major_version:
spec.major_version, spec.minor_version = parse_version(
all_versions_list[0]
)
elif not spec.minor_version:
spec.major_version, spec.minor_version = parse_version(
filter_versions(all_versions_list, major=spec.major_version)[0]
)
logger.debug(
"Resolved latest version",
major=spec.major_version,
minor=spec.minor_version,
)
version = f"{spec.major_version}.{spec.minor_version}"
with ContextualizedLogging(version=version):
asset_dict = {
"from_cache": True,
"version": version,
"path": os.path.join(
self.assets_dir, *spec.name.split("/"), version
),
}
if version not in local_versions_list:
if self.remote_assets_store:
logger.info(
"Fetching distant asset",
local_versions=local_versions_list,
)
asset_download_info = self.remote_assets_store.download(
spec.name, version, self.assets_dir
)
asset_dict.update({**asset_download_info, "from_cache": False})
else:
raise errors.LocalAssetDoesNotExistError(
name=spec.name,
major=spec.major_version,
minor=spec.minor_version,
local_versions=local_versions_list,
)
if spec.sub_part:
local_sub_part = os.path.join(
*(
list(os.path.split(str(asset_dict["path"])))
+ [p for p in spec.sub_part.split("/") if p]
)
)
asset_dict["path"] = local_sub_part
return asset_dict
def fetch_asset(self, spec: Union[AssetSpec, str], return_info=False):
logger.info("Fetching asset", spec=spec, return_info=return_info)
if isinstance(spec, str):
spec = cast(AssetSpec, AssetSpec.from_string(spec))
lock_path = (
os.path.join(self.assets_dir, ".cache", *spec.name.split("/")) + ".lock"
)
os.makedirs(os.path.dirname(lock_path), exist_ok=True)
with filelock.FileLock(lock_path, timeout=5):
asset_info = self._fetch_asset(spec)
logger.debug("Fetched asset", spec=spec, asset_info=asset_info)
path = asset_info["path"]
if not os.path.exists(path):
logger.error(
"An unknown error occured when fetching asset."
"The path does not exist.",
path=path,
spec=spec,
)
raise AssetFetchError(
f"An unknown error occured when fetching asset {spec}."
f"The path {path} does not exist."
)
if not return_info:
return path
return asset_info
| 41.296117 | 88 | 0.505231 | 823 | 8,507 | 5.031592 | 0.1774 | 0.060855 | 0.042502 | 0.035499 | 0.321178 | 0.28568 | 0.250906 | 0.18015 | 0.162521 | 0.101183 | 0 | 0.000609 | 0.420947 | 8,507 | 205 | 89 | 41.497561 | 0.840032 | 0.046315 | 0 | 0.217877 | 0 | 0 | 0.086881 | 0.00506 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022346 | false | 0.005587 | 0.055866 | 0 | 0.134078 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25992567ed4a32a8d244b10358769547aa3cd880 | 1,783 | py | Python | node/gcn.py | TrueNobility303/Machine-Learning-Cora | 9e35ebfe8d4db20031aff8361e55af8a1404bc93 | [
"MIT"
] | 1 | 2021-07-04T04:25:15.000Z | 2021-07-04T04:25:15.000Z | node/gcn.py | TrueNobility303/Machine-Learning-Graph | 9e35ebfe8d4db20031aff8361e55af8a1404bc93 | [
"MIT"
] | null | null | null | node/gcn.py | TrueNobility303/Machine-Learning-Graph | 9e35ebfe8d4db20031aff8361e55af8a1404bc93 | [
"MIT"
] | 1 | 2021-07-30T03:18:59.000Z | 2021-07-30T03:18:59.000Z | import torch
from torch_geometric.datasets import Planetoid
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
from torch_geometric.nn import GINConv,SAGEConv
from torch.nn import Linear,Sequential,BatchNorm1d,ReLU
device = torch.device('cuda:0')
class Net(torch.nn.Module):
def __init__(self,dim=16):
super(Net, self).__init__()
self.conv1 = GCNConv(1433, 16)
self.conv2 = GCNEConv(16, 7)
#可以选用图同构神经网络GIN
"""
self.conv1 = GINConv(
Sequential(Linear(1433, dim), BatchNorm1d(dim), ReLU(),
Linear(dim, dim), ReLU()))
self.conv2 = GINConv(Linear(dim,7))
"""
def forward(self, data):
x, edge_index = data.x, data.edge_index
x = self.conv1(x, edge_index)
x = F.relu(x)
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
return F.log_softmax(x, dim=1)
dataset = Planetoid(root='/datasets/Cora', name='Cora')
GCN = Net().to(device)
data = dataset[0].to(device)
optimizer = torch.optim.Adam(GCN.parameters(), lr=0.01, weight_decay=5e-4)
def train_one_epoch():
GCN.train()
optimizer.zero_grad()
out = GCN(data)
loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
return loss.item()
def test_one_epoch():
GCN.eval()
_, pred = GCN(data).max(dim=1)
correct = pred[data.test_mask].eq(data.y[data.test_mask]).sum()
accuracy = correct / data.test_mask.sum()
return accuracy.item()
GCN.train()
for epoch in range(200):
loss = train_one_epoch()
acc = test_one_epoch()
if epoch % 1 == 0:
print('epoch',epoch,'loss',loss,'accuracy',acc)
# 固定epoch=200
# GCN acc 81.10% | 28.301587 | 74 | 0.632081 | 253 | 1,783 | 4.324111 | 0.367589 | 0.032907 | 0.04936 | 0.036563 | 0.047532 | 0 | 0 | 0 | 0 | 0 | 0 | 0.032538 | 0.224341 | 1,783 | 63 | 75 | 28.301587 | 0.758496 | 0.026921 | 0 | 0.046512 | 0 | 0 | 0.026885 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.093023 | false | 0 | 0.139535 | 0 | 0.325581 | 0.023256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
259ba1f2708bdca386f6f7be8aad800497a1495c | 6,893 | py | Python | src/data_loading.py | JuanitaSmith/ml_capstone_mailout_prediction | 30d1e1218107d05ab59afc38f51e4c7f3e1d287c | [
"CNRI-Python"
] | 1 | 2021-12-16T17:11:10.000Z | 2021-12-16T17:11:10.000Z | src/data_loading.py | JuanitaSmith/ml_capstone_mailout_prediction | 30d1e1218107d05ab59afc38f51e4c7f3e1d287c | [
"CNRI-Python"
] | null | null | null | src/data_loading.py | JuanitaSmith/ml_capstone_mailout_prediction | 30d1e1218107d05ab59afc38f51e4c7f3e1d287c | [
"CNRI-Python"
] | null | null | null | import pandas as pd
from src.config import path_raw, filename_levels, filename_attributes, filename_levels_sheet, filename_attributes_sheet, \
filename_customer_delimiter
def load_levels(filename, sheet):
"""
Load attribute information levels from excel into a dataframe
Args:
filename (string): name of the attributes level file
sheet (string): sheet from the excel to read
Returns:
dataframe: features mapped to levels
"""
levels = pd.read_excel(filename,
sheet_name=sheet,
engine='openpyxl',
skiprows=1)
# copy level value to cells below, it's normally only filled in for the first line in a category
levels.fillna(method='ffill', axis=0, inplace=True)
# drop empty columns
levels.dropna(axis=1, how='all', inplace=True)
# some levels contains 2 column names in 1 line, split and explode it so that one row contains only one attribute
levels['Attribute'] = levels['Attribute'].astype(str).str.split(' ', n=1)
levels = levels.explode('Attribute')
# remove leading zero's after the split
levels['Attribute'] = levels['Attribute'].str.strip()
# set column 'Attribute' as the index
levels = levels.set_index('Attribute')
# build a dictionary we can use to map an attribute to a level later on
levels_dict = levels['Information level'].to_dict()
return levels, levels_dict
def load_attribute_descriptions(filename, sheet):
"""
Load feature descriptions
Args:
filename (string): name of the attributes level file
sheet (string): sheet from the excel to read
Returns:
attributes: dataset containing feature descriptions
missing_dict: dictionary contain true missing values
missing_dict2: dictionary containing a different kind of missing values where values are 0 (but not missing)
missing_df: dataset containing 0 values for transactional values only
"""
attributes = pd.read_excel(filename,
sheet_name=sheet,
engine='openpyxl',
skiprows=1,
na_values=['…'])
# forward fill column values
attributes.fillna(method='ffill', axis=0, inplace=True)
# drop empty columns
attributes.dropna(axis=1, how='all', inplace=True)
# Build a missing values dictionary containing only the missing values for each column
missing_values = attributes.loc[attributes['Meaning'].str.contains('unknown'), ['Attribute', 'Value']].set_index(
['Attribute'])
missing_values['Value'] = missing_values['Value'].astype(str).str.split(', ')
missing_dict = missing_values['Value'].to_dict()
# build a second missing values dictionary to treat additional values as unknown rather that 0
missing_list = ['unknown', 'no transactions known', 'no transaction known', 'no Online-transactions']
missing_df = attributes.loc[
attributes['Meaning'].str.contains('|'.join(missing_list)), ['Attribute', 'Value']].set_index(['Attribute'])
missing_df['Value'] = missing_df['Value'].astype(str).str.split(', ')
missing_dict2 = missing_df['Value'].to_dict()
missing_list_ekstra = ['no transactions known', 'no transaction known', 'no Online-transactions']
missing_df = attributes.loc[
attributes['Meaning'].str.contains('|'.join(missing_list_ekstra)), ['Attribute', 'Value']].set_index(
['Attribute'])
missing_df
return attributes, missing_dict, missing_dict2, missing_df
def load_dataset(filename, delimiter, na_values, reset_na=None, visualize=False):
"""
Load data with enhanced missing values
Args:
filename: dataset contain demographics values
delimiter: delimiter
na_values: dictionary containing missing values that needs to be
reset_na: dictionary containing a different kind of missing values where values are 0 (but not missing)
visualize: print unique values of certain columns after imputing
Returns:
"""
# EINGEFUEGT_AM is a date/time stamp. Using google translate it's assumed it's the date the customer was added
# to the database. Trimmed this field down to year
custom_date_parser = lambda x: pd.to_datetime(x, errors='ignore').strftime('%Y')
# enhance missing values definition
data = pd.read_csv(filename,
sep=delimiter,
na_values=na_values,
parse_dates=['EINGEFUEGT_AM'],
date_parser=custom_date_parser)
# convert date into year
data['EINGEFUEGT_AM'] = data['EINGEFUEGT_AM'].dt.year
if len(reset_na) > 0:
for i, row in reset_na.iterrows():
if i in list(data.columns):
data[i].fillna(row['Value'], inplace=True)
if visualize:
print('\nUnique values:\n')
print('\nAGER_TYPE: {}'.format(list(data.AGER_TYP.unique())))
print('\nCAMEO_INTL_2015: {}'.format(list(data.CAMEO_INTL_2015.unique())))
print('\nCAMEO_DEUG_2015: {}'.format(list(data.CAMEO_DEUG_2015.unique())))
print('\nCAMEO_DEU_2015: {}'.format(list(data.CAMEO_DEU_2015.unique())))
print('\nEINGEFEUGT_AM: {}'.format(list(data.EINGEFUEGT_AM.unique())))
print('\nD19_GESAMT_DATUM: {}'.format(list(data.D19_GESAMT_DATUM.unique())))
return data
def get_data(data_path):
"""
General entry point to import all datasets
Args:
data_path: path where the dataset can be read from
Returns:
dataframe containing the data
"""
# Get data levels
path = "{}/{}".format(path_raw, filename_levels)
levels, levels_dict = load_levels(path, filename_levels_sheet)
# get attribute descriptions and missing data values
path = "{}/{}".format(path_raw, filename_attributes)
attributes, missing_dict, missing_dict2, missing_df = load_attribute_descriptions(path, filename_attributes_sheet)
# Reading main dataset replacing missing values
df = load_dataset(filename=data_path,
delimiter=filename_customer_delimiter,
na_values=missing_dict,
reset_na=missing_df,
visualize=False)
# Reading main dataset again, treating transaction fields = 0, temporarily as missing values
df_extended_na = load_dataset(data_path,
delimiter=filename_customer_delimiter,
na_values=missing_dict2,
reset_na=[],
visualize=False)
df.set_index('LNR', inplace=True, verify_integrity=True)
df_extended_na.set_index('LNR', inplace=True, verify_integrity=True)
return df, df_extended_na
| 38.294444 | 122 | 0.652691 | 834 | 6,893 | 5.23741 | 0.264988 | 0.044643 | 0.019231 | 0.011676 | 0.332875 | 0.305632 | 0.274267 | 0.223901 | 0.205128 | 0.179029 | 0 | 0.009272 | 0.248948 | 6,893 | 179 | 123 | 38.50838 | 0.833881 | 0.32105 | 0 | 0.153846 | 0 | 0 | 0.128365 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051282 | false | 0 | 0.025641 | 0 | 0.128205 | 0.089744 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
259bf22657fe2b0b15bb9d6863fe4331ffc37d73 | 5,486 | py | Python | src/tespy/components/basics/subsystem_interface.py | anmartens/tespy | 9a543d67cd8266c15cb9940ca640d6a8eda27a28 | [
"MIT"
] | 1 | 2020-02-25T08:41:03.000Z | 2020-02-25T08:41:03.000Z | src/tespy/components/basics/subsystem_interface.py | anmartens/tespy | 9a543d67cd8266c15cb9940ca640d6a8eda27a28 | [
"MIT"
] | null | null | null | src/tespy/components/basics/subsystem_interface.py | anmartens/tespy | 9a543d67cd8266c15cb9940ca640d6a8eda27a28 | [
"MIT"
] | null | null | null | # -*- coding: utf-8
"""Module for class SubsystemInterface.
This file is part of project TESPy (github.com/oemof/tespy). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location
tespy/components/basics/subsystem_interface.py
SPDX-License-Identifier: MIT
"""
from tespy.components.component import Component
from tespy.tools.data_containers import DataContainerSimple as dc_simple
class SubsystemInterface(Component):
r"""
The subsystem interface does not change fluid properties.
**Mandatory Equations**
- :py:meth:`tespy.components.component.Component.fluid_func`
- :py:meth:`tespy.components.component.Component.mass_flow_func`
- Pressure:
:py:meth:`tespy.components.basics.subsystem_interface.SubsystemInterface.variable_equality_func`
- Enthalpy:
:py:meth:`tespy.components.basics.subsystem_interface.SubsystemInterface.variable_equality_func`
Inlets/Outlets
- Specify number of inlets and outlets with :code:`num_inter`,
predefined value: 1.
Image
.. image:: _images/SubsystemInterface.svg
:alt: alternative text
:align: center
Parameters
----------
label : str
The label of the component.
design : list
List containing design parameters (stated as String).
offdesign : list
List containing offdesign parameters (stated as String).
design_path : str
Path to the components design case.
local_offdesign : boolean
Treat this component in offdesign mode in a design calculation.
local_design : boolean
Treat this component in design mode in an offdesign calculation.
char_warnings : boolean
Ignore warnings on default characteristics usage for this component.
printout : boolean
Include this component in the network's results printout.
num_inter : float, dict
Number of interfaces for subsystem.
Note
----
This component passes all fluid properties and mass flow from its inlet to
the outlet.
Example
-------
As connections can only connect a component with a different
component, the subsystem interface is used to connect subsystems with the
rest of your network. It is necessary to specify the number of interfaces
of the subsystem interface, if you want any number other than 1. We will
not go in depth of subsystem usage in this example. Please refer to
:ref:`this section <tespy_subsystems_label>` for more information on
building your own subsystems.
>>> from tespy.components import Sink, Source, SubsystemInterface
>>> from tespy.connections import Connection
>>> from tespy.networks import Network
>>> fluids = ['H2O', 'N2']
>>> nw = Network(fluids=fluids)
>>> nw.set_attr(p_unit='bar', T_unit='C', h_unit='kJ / kg', iterinfo=False)
>>> so1 = Source('source 1')
>>> si1 = Sink('sink 1')
>>> so2 = Source('source 2')
>>> si2 = Sink('sink 2')
>>> IF = SubsystemInterface('subsystem interface', num_inter=2)
>>> IF.component()
'subsystem interface'
>>> len(IF.inlets())
2
The interface does not change the fluid properties in any way.
>>> inc1 = Connection(so1, 'out1', IF, 'in1')
>>> outg1 = Connection(IF, 'out1', si1, 'in1')
>>> inc2 = Connection(so2, 'out1', IF, 'in2')
>>> outg2 = Connection(IF, 'out2', si2, 'in1')
>>> nw.add_conns(inc1, outg1, inc2, outg2)
>>> inc1.set_attr(fluid={'H2O': 1, 'N2': 0}, T=40, p=3, m=100)
>>> inc2.set_attr(fluid={'H2O': 0, 'N2': 1}, T=60, p=1, v=10)
>>> nw.solve('design')
>>> inc1.m.val_SI == outg1.m.val_SI
True
>>> inc2.m.val_SI == outg2.m.val_SI
True
>>> inc1.h.val_SI == outg1.h.val_SI
True
>>> inc2.h.val_SI == outg2.h.val_SI
True
"""
@staticmethod
def component():
return 'subsystem interface'
def get_mandatory_constraints(self):
return {
'mass_flow_constraints': {
'func': self.mass_flow_func, 'deriv': self.mass_flow_deriv,
'constant_deriv': True, 'latex': self.mass_flow_func_doc,
'num_eq': self.num_i},
'fluid_constraints': {
'func': self.fluid_func, 'deriv': self.fluid_deriv,
'constant_deriv': True, 'latex': self.fluid_func_doc,
'num_eq': self.num_nw_fluids * self.num_i},
'pressure_equality_constraints': {
'func': self.pressure_equality_func,
'deriv': self.pressure_equality_deriv,
'constant_deriv': True,
'latex': self.pressure_equality_func_doc,
'num_eq': self.num_i},
'enthalpy_equality_constraints': {
'func': self.enthalpy_equality_func,
'deriv': self.enthalpy_equality_deriv,
'constant_deriv': True,
'latex': self.enthalpy_equality_func_doc,
'num_eq': self.num_i}
}
@staticmethod
def get_variables():
return {'num_inter': dc_simple()}
def inlets(self):
if self.num_inter.is_set:
return ['in' + str(i + 1) for i in range(self.num_inter.val)]
else:
return ['in1']
def outlets(self):
if self.num_inter.is_set:
return ['out' + str(i + 1) for i in range(self.num_inter.val)]
else:
return ['out1']
| 33.248485 | 102 | 0.636165 | 687 | 5,486 | 4.943231 | 0.318777 | 0.047703 | 0.012956 | 0.024735 | 0.209658 | 0.182273 | 0.135453 | 0.106596 | 0.073027 | 0.073027 | 0 | 0.016577 | 0.252279 | 5,486 | 164 | 103 | 33.45122 | 0.811312 | 0.611374 | 0 | 0.232558 | 0 | 0 | 0.151617 | 0.044036 | 0 | 0 | 0 | 0 | 0 | 1 | 0.116279 | false | 0 | 0.046512 | 0.069767 | 0.348837 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
259d44dbda9ce86ab423e568cef5c1816855ebe3 | 6,820 | py | Python | sktime/transformations/series/outlier_detection.py | FedericoGarza/sktime | b21cdd81453abd34c72b42d4b2273b49d29eba30 | [
"BSD-3-Clause"
] | null | null | null | sktime/transformations/series/outlier_detection.py | FedericoGarza/sktime | b21cdd81453abd34c72b42d4b2273b49d29eba30 | [
"BSD-3-Clause"
] | null | null | null | sktime/transformations/series/outlier_detection.py | FedericoGarza/sktime | b21cdd81453abd34c72b42d4b2273b49d29eba30 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Implements transformers for detecting outliers in a time series."""
__author__ = ["aiwalter"]
__all__ = ["HampelFilter"]
import warnings
import numpy as np
import pandas as pd
from sktime.forecasting.model_selection import SlidingWindowSplitter
from sktime.transformations.base import BaseTransformer
class HampelFilter(BaseTransformer):
"""Use HampelFilter to detect outliers based on a sliding window.
Correction of outliers is recommended by means of the sktime.Imputer,
so both can be tuned separately.
Parameters
----------
window_length : int, optional (default=10)
Lenght of the sliding window
n_sigma : int, optional
Defines how strong a point must outly to be an "outlier", by default 3
k : float, optional
A constant scale factor which is dependent on the distribution,
for Gaussian it is approximately 1.4826, by default 1.4826
return_bool : bool, optional
If True, outliers are filled with True and non-outliers with False.
Else, outliers are filled with np.nan.
Notes
-----
Implementation is based on [1]_.
References
----------
.. [1] Hampel F. R., "The influence curve and its role in robust estimation",
Journal of the American Statistical Association, 69, 382–393, 1974
Examples
--------
>>> from sktime.transformations.series.outlier_detection import HampelFilter
>>> from sktime.datasets import load_airline
>>> y = load_airline()
>>> transformer = HampelFilter(window_length=10)
>>> y_hat = transformer.fit_transform(y)
"""
_tags = {
"scitype:transform-input": "Series",
# what is the scitype of X: Series, or Panel
"scitype:transform-output": "Series",
# what scitype is returned: Primitives, Series, Panel
"scitype:instancewise": True, # is this an instance-wise transform?
"X_inner_mtype": ["pd.DataFrame", "pd.Series"],
# which mtypes do _fit/_predict support for X?
"y_inner_mtype": "None", # which mtypes do _fit/_predict support for y?
"fit_is_empty": True,
"handles-missing-data": True,
"skip-inverse-transform": True,
"univariate-only": False,
}
def __init__(self, window_length=10, n_sigma=3, k=1.4826, return_bool=False):
self.window_length = window_length
self.n_sigma = n_sigma
self.k = k
self.return_bool = return_bool
super(HampelFilter, self).__init__()
def _transform(self, X, y=None):
"""Transform X and return a transformed version.
private _transform containing the core logic, called from transform
Parameters
----------
X : pd.Series or pd.DataFrame
Data to be transformed
y : ignored argument for interface compatibility
Additional data, e.g., labels for transformation
Returns
-------
Xt : pd.Series or pd.DataFrame, same type as X
transformed version of X
"""
Z = X.copy()
# multivariate
if isinstance(Z, pd.DataFrame):
for col in Z:
Z[col] = self._transform_series(Z[col])
# univariate
else:
Z = self._transform_series(Z)
Xt = Z
return Xt
def _transform_series(self, Z):
"""Logic internal to the algorithm for transforming the input series.
Parameters
----------
Z : pd.Series
Returns
-------
pd.Series
"""
# warn if nan values in Series, as user might mix them
# up with outliers otherwise
if Z.isnull().values.any():
warnings.warn(
"""Series contains nan values, more nan might be
added if there are outliers"""
)
cv = SlidingWindowSplitter(
window_length=self.window_length, step_length=1, start_with_window=True
)
half_window_length = int(self.window_length / 2)
Z = _hampel_filter(
Z=Z,
cv=cv,
n_sigma=self.n_sigma,
half_window_length=half_window_length,
k=self.k,
)
# data post-processing
if self.return_bool:
Z = Z.apply(lambda x: True if np.isnan(x) else False)
return Z
@classmethod
def get_test_params(cls):
"""Return testing parameter settings for the estimator.
Returns
-------
params : dict or list of dict, default = {}
Parameters to create testing instances of the class
Each dict are parameters to construct an "interesting" test instance, i.e.,
`MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.
`create_test_instance` uses the first (or only) dictionary in `params`
"""
return {"window_length": 3}
def _hampel_filter(Z, cv, n_sigma, half_window_length, k):
for i in cv.split(Z):
cv_window = i[0]
cv_median = np.nanmedian(Z[cv_window])
cv_sigma = k * np.nanmedian(np.abs(Z[cv_window] - cv_median))
# find outliers at start and end of z
if (
cv_window[0] <= half_window_length
or cv_window[-1] >= len(Z) - half_window_length
) and (cv_window[0] in [0, len(Z) - cv.window_length - 1]):
# first half of the first window
if cv_window[0] <= half_window_length:
idx_range = range(cv_window[0], half_window_length + 1)
# last half of the last window
else:
idx_range = range(len(Z) - half_window_length - 1, len(Z))
for j in idx_range:
Z.iloc[j] = _compare(
value=Z.iloc[j],
cv_median=cv_median,
cv_sigma=cv_sigma,
n_sigma=n_sigma,
)
else:
idx = cv_window[0] + half_window_length
Z.iloc[idx] = _compare(
value=Z.iloc[idx],
cv_median=cv_median,
cv_sigma=cv_sigma,
n_sigma=n_sigma,
)
return Z
def _compare(value, cv_median, cv_sigma, n_sigma):
"""Identify an outlier.
Parameters
----------
value : int/float
cv_median : int/float
cv_sigma : int/float
n_sigma : int/float
Returns
-------
int/float or np.nan
Returns value if value it is not an outlier,
else np.nan (or True/False if return_bool==True)
"""
if np.abs(value - cv_median) > n_sigma * cv_sigma:
return np.nan
else:
return value
| 31.284404 | 88 | 0.590469 | 855 | 6,820 | 4.553216 | 0.31345 | 0.061649 | 0.041099 | 0.013357 | 0.097611 | 0.065245 | 0.052402 | 0.021577 | 0.021577 | 0.021577 | 0 | 0.011514 | 0.312317 | 6,820 | 217 | 89 | 31.428571 | 0.818337 | 0.431818 | 0 | 0.134831 | 0 | 0 | 0.069274 | 0.020603 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067416 | false | 0 | 0.05618 | 0 | 0.213483 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
259e2c5f61cd5489f4613bd21c2dec609dd81df0 | 1,012 | py | Python | main.py | PaulMakesStuff/RaspberryPiWeatherDisplay | d9507d9cf45ecd71b6d0a322033dd998d3843632 | [
"MIT"
] | 1 | 2021-03-06T16:03:56.000Z | 2021-03-06T16:03:56.000Z | main.py | PaulMakesStuff/RaspberryPiWeatherDisplay | d9507d9cf45ecd71b6d0a322033dd998d3843632 | [
"MIT"
] | null | null | null | main.py | PaulMakesStuff/RaspberryPiWeatherDisplay | d9507d9cf45ecd71b6d0a322033dd998d3843632 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import time
import signal
import buttonshim
from weather import displayWeather
import os
def flash_led(interval, times, r, g, b):
for i in range( times ):
buttonshim.set_pixel(r, g, b)
time.sleep( interval )
buttonshim.set_pixel(0, 0, 0)
time.sleep( interval )
def button_flash():
flash_led(0.025, 3, 255, 255, 255)
def set_color(r, g, b):
buttonshim.set_pixel(r, g, b)
@buttonshim.on_press(buttonshim.BUTTON_A)
def button_a(button, pressed):
set_color(255, 165, 0)
displayWeather()
set_color(0,0,0)
@buttonshim.on_hold(buttonshim.BUTTON_B)
def button_b_hold(button):
flash_led(0.025, 3, 0, 0, 255)
os.system("sudo reboot now")
@buttonshim.on_hold(buttonshim.BUTTON_C)
def button_c_hold(button):
flash_led(0.025, 3, 255, 0, 0)
os.system("sudo shutdown now")
flash_led(0.025, 3, 0, 255, 0)
set_color(255, 165, 0)
displayWeather()
set_color(0,0,0)
signal.pause() | 24.095238 | 42 | 0.652174 | 161 | 1,012 | 3.944099 | 0.285714 | 0.025197 | 0.018898 | 0.075591 | 0.418898 | 0.31811 | 0.198425 | 0.125984 | 0.125984 | 0.125984 | 0 | 0.086076 | 0.219368 | 1,012 | 42 | 43 | 24.095238 | 0.717722 | 0.020751 | 0 | 0.30303 | 0 | 0 | 0.033684 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.151515 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
259ef3d9c03a15a85a5e68fc83639850d12d9247 | 3,057 | py | Python | size_constrained_clustering/shrinkage.py | vergilijus/size_constrained_clustering | be520ee0535b3f73d779e498a9046ef77d69355d | [
"MIT"
] | 26 | 2020-07-04T11:30:09.000Z | 2022-02-04T22:12:24.000Z | size_constrained_clustering/shrinkage.py | vergilijus/size_constrained_clustering | be520ee0535b3f73d779e498a9046ef77d69355d | [
"MIT"
] | 4 | 2020-07-04T14:50:49.000Z | 2022-03-23T22:09:08.000Z | size_constrained_clustering/shrinkage.py | vergilijus/size_constrained_clustering | be520ee0535b3f73d779e498a9046ef77d69355d | [
"MIT"
] | 15 | 2020-08-19T10:37:25.000Z | 2022-03-21T05:00:26.000Z | #!usr/bin/python 3.6
#-*-coding:utf-8-*-
'''
@file: shrinkage.py, shrinkage clustering
@Author: Jing Wang (jingw2@foxmail.com)
@Date: 06/24/2020
@Paper reference: Shrinkage Clustering: A fast and \
size-constrained clustering algorithm for biomedical applications
'''
import os
import sys
path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(path)
import base
from scipy.spatial.distance import cdist
import numpy as np
import random
class Shrinkage(base.Base):
def __init__(self, n_clusters, size_min=1, max_iters=1000, \
distance_func=cdist, random_state=42):
'''
Args:
n_clusters (int): number of clusters
max_iters (int): maximum iterations
distance_func (object): callable function with input (X, centers) / None, by default is l2-distance
random_state (int): random state to initiate, by default it is 42
'''
super(Shrinkage, self).__init__(n_clusters, max_iters, distance_func)
np.random.seed(random_state)
random.seed(random_state)
self.size_min = size_min
assert isinstance(size_min, int)
assert size_min >= 1
def fit(self, X):
n_samples, n_features = X.shape
assert self.size_min <= n_samples // self.n_clusters
# calculate similarity matrix, larger similarity means more resemblance
S = self.distance_func(X, X)
S /= np.max(S)
S = 1 - S
# initialize
A, S_tilde = self._init(S)
iters = 0
while True:
# remove empty clusters
cluster_size = np.sum(A, axis=0)
keep_cluster = np.where(cluster_size >= self.size_min)[0]
A = A[:, keep_cluster]
# permute cluster membership
M = S_tilde @ A
v = np.min(M - np.sum(M * A, axis=1).reshape((-1, 1)), axis=1)
X_bar = np.argmin(v)
C_prime = np.argmin(M[X_bar])
K = A.shape[1]
A[X_bar] = np.zeros(K)
A[X_bar, C_prime] = 1
if abs(np.sum(v)) < 1e-5 or iters >= self.max_iters:
break
iters += 1
self.labels_ = np.argmax(A, axis=1)
self.cluster_centers_ = self.update_centers(X, A)
def _init(self, S):
'''
Initialize A and S_tilde
'''
n_samples, _ = S.shape
A = np.zeros((n_samples, self.n_clusters))
A[range(n_samples), [random.choice(range(self.n_clusters)) for _ in range(n_samples)]] = 1
S_tilde = 1 - 2 * S
return A, S_tilde
def update_centers(self, X, labels):
'''
Update centers
Args:
X (array like): (n_samples, n_features)
labels (array like): (n_samples, n_clusters), one-hot array
Return:
centers (array like): (n_clusters, n_features)
'''
centers = (X.T.dot(labels)).T / np.sum(labels, axis=0).reshape((-1, 1))
return centers
| 31.193878 | 111 | 0.57213 | 411 | 3,057 | 4.075426 | 0.350365 | 0.042985 | 0.031045 | 0.025075 | 0.046567 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020554 | 0.315669 | 3,057 | 97 | 112 | 31.515464 | 0.780115 | 0.283284 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.06 | 1 | 0.08 | false | 0 | 0.12 | 0 | 0.26 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25a0e9db9daf776824e6238029ee56df46f2c287 | 1,678 | py | Python | plot/plot_bins.py | jacobdeasy/flexible-ehr | ce26ce718cf5cf18a18d38f273a84324dbd5f4b2 | [
"MIT"
] | 12 | 2020-03-11T06:04:53.000Z | 2021-12-06T04:33:24.000Z | plot/plot_bins.py | jacobdeasy/flexible-ehr | ce26ce718cf5cf18a18d38f273a84324dbd5f4b2 | [
"MIT"
] | null | null | null | plot/plot_bins.py | jacobdeasy/flexible-ehr | ce26ce718cf5cf18a18d38f273a84324dbd5f4b2 | [
"MIT"
] | 1 | 2021-02-23T07:01:18.000Z | 2021-02-23T07:01:18.000Z | import argparse, numpy as np, os
import matplotlib as mpl, matplotlib.cm as cm, matplotlib.pyplot as plt
mpl.rcParams["axes.spines.right"] = False
mpl.rcParams["axes.spines.top"] = False
mpl.rc('font', family='serif')
def plot_bins(var, n_bins, bounds=None):
# Calculate percentiles
v = np.load(os.path.join('data', 'value_dict.npy')).item()
p = []
for i, vals in enumerate(v.values()):
p += [np.percentile(vals, np.arange(0, 100+(100//n_bins), 100//n_bins))]
p = dict(zip(v.keys(), p))
# Plot
vals = v[var] if bounds is None else v[var][(bounds[0] < v[var]) & (v[var] < bounds[1])]
counts, bins = np.histogram(vals, bins=100)
cols = np.digitize(bins[:-1], p[var]) - 1
cmap = cm.rainbow(np.linspace(0, 1, n_bins))
plt.bar(bins[:-1], counts, width=bins[1:]-bins[:-1], color=cmap[cols], align='edge')
plt.xlim(bins[0], bins[-1])
plt.xlabel(var, fontsize=15)
plt.xticks(np.linspace(bins[0], bins[-1], 5), fontsize=12)
plt.ylim(0, max(counts))
# plt.ylabel('Frequency', fontsize=15)
plt.yticks(np.linspace(0, max(counts), 5), fontsize=12)
plt.savefig(os.path.join('figs', f'{var}_{n_bins}bins_dist.pdf'))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Extract episodes from per-subject data.')
parser.add_argument('var', type=str,
help='Variable to visualize.')
parser.add_argument('-n', '--n_bins', type=int,
default=20,
help='Number of bins to visualize')
parser.add_argument('-b', '--bounds', nargs='+',
default=None,
help='Lower and upper bounds for plotting purposes')
args, _ = parser.parse_known_args()
bounds = [int(b) for b in args.bounds]
plot_bins(args.var, args.n_bins, bounds)
| 34.958333 | 89 | 0.670441 | 273 | 1,678 | 4.025641 | 0.432234 | 0.031847 | 0.046406 | 0.038217 | 0.050955 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027548 | 0.134684 | 1,678 | 47 | 90 | 35.702128 | 0.729339 | 0.037545 | 0 | 0 | 0 | 0 | 0.160149 | 0.01676 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0 | 0.057143 | 0 | 0.085714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25a339aa4cddbd0bc3c3654b51a81e46ed37cd4b | 283 | py | Python | tasks/aws.py | elieof/ansible-role-artifacts_to_s3 | 5e6a0f68c66c4714b0a7439c0eac2325e14a0a1a | [
"MIT"
] | null | null | null | tasks/aws.py | elieof/ansible-role-artifacts_to_s3 | 5e6a0f68c66c4714b0a7439c0eac2325e14a0a1a | [
"MIT"
] | null | null | null | tasks/aws.py | elieof/ansible-role-artifacts_to_s3 | 5e6a0f68c66c4714b0a7439c0eac2325e14a0a1a | [
"MIT"
] | null | null | null | import boto3
# client = boto3.client('s3')
# response = client.list_buckets()
# print(response)
s3 = boto3.resource('s3')
object_acl = s3.ObjectAcl('elieof-eoo','/artifacts/projects/autotest_artifacts_to_s3/releases/1.0.0/git_package.zip')
object_acl.put(ACL='authenticated-read') | 28.3 | 117 | 0.759717 | 41 | 283 | 5.073171 | 0.658537 | 0.105769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041985 | 0.074205 | 283 | 10 | 118 | 28.3 | 0.751908 | 0.268551 | 0 | 0 | 0 | 0 | 0.514706 | 0.367647 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25a8cb527fbad1a5590666d38407b531a7390a95 | 3,995 | py | Python | src/scripts/data_processing/fill_missing_values_with_interpolation.py | arnabbiswas1/k_tab_sept_roc_auc_binary_classification_KFold | 7a5a91e52d460fd25133b76d5241462a4aedc474 | [
"Apache-2.0"
] | null | null | null | src/scripts/data_processing/fill_missing_values_with_interpolation.py | arnabbiswas1/k_tab_sept_roc_auc_binary_classification_KFold | 7a5a91e52d460fd25133b76d5241462a4aedc474 | [
"Apache-2.0"
] | null | null | null | src/scripts/data_processing/fill_missing_values_with_interpolation.py | arnabbiswas1/k_tab_sept_roc_auc_binary_classification_KFold | 7a5a91e52d460fd25133b76d5241462a4aedc474 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import src.config.constants as constants
import src.common as common
import src.munging as process_data
import src.ts as ts_util
def reverse_min_max_scaling(logger, source_df, target_df, features, scaler_dict):
for name in features:
mm = scaler_dict[name]
target_df.loc[:, name] = mm.inverse_transform(source_df[[name]])
return target_df
def impute_data(logger, df_scaled, scaler_dict, fill_with, features, file_name):
logger.info(f"Imputing data with {fill_with}")
df_filled = df_scaled.copy()
# for k in range(0, len(df_scaled)):
# if k not in [3839, 4285]:
# logger.info(k)
# logger.info(psutil.virtual_memory().available * 100 / psutil.virtual_memory().total)
# df_filled.iloc[k] = fill_with(df_scaled.iloc[k].reset_index(drop=True))
# else:
# logger.info(df_scaled.iloc[k])
# logger.info(df_scaled.iloc[k].reset_index(drop=True))
# if k == 4285:
# logger.info(fill_with(df_scaled.iloc[k].reset_index(drop=True)))
for k in range(0, len(df_scaled)):
df_filled.iloc[k] = fill_with(df_scaled.iloc[k].reset_index(drop=True))
df_reverted = df_scaled.copy()
logger.info("Reverting back the scaling")
df_reverted = reverse_min_max_scaling(
logger=logger,
source_df=df_filled,
target_df=df_reverted,
features=features,
scaler_dict=scaler_dict,
)
del df_filled
common.trigger_gc(logger)
df_reverted.to_parquet(f"{constants.FEATURES_DATA_DIR}/{file_name}", index=True)
logger.info(
f"Stored imputed data to features to {constants.FEATURES_DATA_DIR}/{file_name}"
)
del df_reverted
common.trigger_gc(logger)
def main():
try:
# Create a Stream only logger
logger = common.get_logger("generate_features")
logger.info("Starting to generate features")
TARGET = "claim"
train_df, test_df, _ = process_data.read_processed_data(
logger,
constants.PROCESSED_DATA_DIR,
train=True,
test=True,
sample_submission=True,
)
combined_df = pd.concat([train_df.drop(TARGET, axis=1), test_df])
features = train_df.drop([TARGET], axis=1).columns
logger.info("Null description before imputation")
logger.info(process_data.check_null(combined_df))
scaler_dict = {}
combined_df_min_max = combined_df.copy()
for name in features:
logger.info(f"Min-Max scaling {name}")
mm = MinMaxScaler()
mm.fit(combined_df[[name]])
combined_df_min_max.loc[:, name] = mm.transform(combined_df[[name]])
scaler_dict[name] = mm
impute_data(
logger=logger,
df_scaled=combined_df_min_max,
scaler_dict=scaler_dict,
fill_with=ts_util.fill_with_gauss,
features=features,
file_name="imputed_data_w_gaussian.parquet",
)
impute_data(
logger=logger,
df_scaled=combined_df_min_max,
scaler_dict=scaler_dict,
fill_with=ts_util.fill_with_po3,
features=features,
file_name="imputed_data_w_pol_3.parquet",
)
impute_data(
logger=logger,
df_scaled=combined_df_min_max,
scaler_dict=scaler_dict,
fill_with=ts_util.fill_with_lin,
features=features,
file_name="imputed_data_w_lin.parquet",
)
impute_data(
logger=logger,
df_scaled=combined_df_min_max,
scaler_dict=scaler_dict,
fill_with=ts_util.fill_with_mix,
features=features,
file_name="imputed_data_w_mix.parquet",
)
except Exception as ex:
print(ex)
if __name__ == "__main__":
main()
| 31.456693 | 98 | 0.624781 | 513 | 3,995 | 4.551657 | 0.230019 | 0.06424 | 0.033405 | 0.041113 | 0.398287 | 0.376017 | 0.315632 | 0.253961 | 0.220985 | 0.220985 | 0 | 0.007274 | 0.277347 | 3,995 | 126 | 99 | 31.706349 | 0.801524 | 0.124656 | 0 | 0.290323 | 0 | 0 | 0.114491 | 0.05538 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.064516 | 0 | 0.107527 | 0.010753 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25a9b4d8e6473092e7beb77c5938cbaa57f66a4a | 8,344 | py | Python | 3d_fitting/transfer2video_v3.py | duguqiankun/NeuralVoicePuppetry | 26b87d98a1ecfe6e4a6738641e6436ab1a9ece31 | [
"BSD-3-Clause"
] | null | null | null | 3d_fitting/transfer2video_v3.py | duguqiankun/NeuralVoicePuppetry | 26b87d98a1ecfe6e4a6738641e6436ab1a9ece31 | [
"BSD-3-Clause"
] | null | null | null | 3d_fitting/transfer2video_v3.py | duguqiankun/NeuralVoicePuppetry | 26b87d98a1ecfe6e4a6738641e6436ab1a9ece31 | [
"BSD-3-Clause"
] | 1 | 2021-12-21T08:20:34.000Z | 2021-12-21T08:20:34.000Z | import os
# os.environ['CUDA_VISIBLE_DEVICES'] = '6'
from facenet_pytorch import MTCNN
from core.options import ImageFittingOptions
import cv2
import face_alignment
import numpy as np
from core import get_recon_model
import os
import torch
import pickle
import core.utils as utils
import torch.nn as nn
import matplotlib.pyplot as plt
from PIL import Image
from inpainter import Inpainter
from inpainter.options import Options
import torchvision.transforms as transforms
import time
def load_target(start,end,path):
mydict={}
for i in range(start,end-1):
coeffs = pickle.load(open(f'{path}/{i:04d}_coeffs.pkl','br'))
crop_img = Image.open(f'{path}/{i:04d}_crop.jpg')
lmk = pickle.load(open(f'{path}/{i:04d}_lms_proj.pkl','br'))[0]
mydict[f'{i:04d}']=[coeffs,crop_img,lmk]
return mydict
def process_img(bg, fg, V_writer, bbox,args):
face_w = bbox[2] - bbox[0]
face_h = bbox[3] - bbox[1]
resized = cv2.resize(fg,(face_w,face_h))
_bg = bg.copy()
_bg[bbox[1]:bbox[3],bbox[0]:bbox[2]]=resized
# cv2.imshow('',_bg)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
V_writer.write(_bg)
def resample(exp,src_rate,target_rate):
L,D = exp.shape
xp = np.arange(0,L/src_rate,1/src_rate).reshape(-1)
x = np.arange(0,xp[-1],1/target_rate).reshape(-1)
out = np.zeros([x.shape[0],D], dtype=np.float32)
if xp.shape[0] != exp[:,0].shape[0]:
xp = xp[:-1]
for i in range(D):
buff = np.interp(x,xp,exp[:,i])
out[:,i] = buff
return out
if __name__=='__main__':
args = ImageFittingOptions()
args = args.parse()
device = 'cuda:0'
#face detection
mtcnn = MTCNN(select_largest=False, device=device)
fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._3D, flip_input=False, device=device)
opt = Options()
#pytorch render
recon_model = get_recon_model(model=args.recon_model, device=device, batch_size=1, img_size=opt.IMG_size)
inpainter =Inpainter.Inpainter(opt,opt.model_path)
out_folder = opt.ouput
if not os.path.exists(out_folder):
os.mkdir(out_folder)
if not os.path.exists(f'{out_folder}/debug'):
os.mkdir(f'{out_folder}/debug')
if not os.path.exists(f'{out_folder}/processed'):
os.mkdir(f'{out_folder}/processed')
if not os.path.exists(f'{out_folder}/frames'):
os.mkdir(f'{out_folder}/frames')
target = opt.target_path
if opt.src_expression.endswith('.pkl'):
src_expression = pickle.load(open(f'{opt.src_expression}','br'))
print("expression",src_expression.shape)
src_expression = src_expression
#src_expression = resample(src_expression,60,30)
else:
src_expression = [x for x in os.listdir(opt.src_expression) if x.endswith('coeffs.pkl')]
src_expression = sorted(src_expression)
#print(src_expression)
src_expression = [ pickle.load(open(f'{opt.src_expression}/{x}','br'))[:, 80:144] for x in src_expression]
src_size = len(src_expression)
print('experesion length',len(src_expression))
index_start = 0
end_index = int(len(os.listdir(target))/4)
print('target end index',end_index)
target_info = load_target(index_start,end_index,target)
#extract background frames
background_frames = {}
cap = cv2.VideoCapture(f'{ opt.background_v}')
frame_cnt = 0
while 1:
ret,background = cap.read()
if not ret:
break
if opt.cuthead:
if frame_cnt>19:
background_frames[f'{frame_cnt-20:04d}']=background
else:
background_frames[f'{frame_cnt:04d}']=background
frame_cnt+=1
if frame_cnt > 2000:
break
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
fps = opt.FPS
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
# height = int(512)
# width = int(512)
print("fps", fps)
print("frame_height", height)
print("frame_width", width)
V_writer = cv2.VideoWriter(f'{out_folder}/videorendered.mp4',fourcc, fps, (width,height))
id_coeff, exp_coeff, tex_coeff, angles, gamma, translation = recon_model.split_coeffs(target_info[f'{0:04d}'][0])
previous_trans = translation
previous_angle = angles
previous_idcoeff = id_coeff
previous_tex_coeff = tex_coeff
previous_exp = src_expression[0]
pdist = nn.PairwiseDistance(p=2)
t0 = time.time()
for i,exp in enumerate(src_expression[:-1]):
if i > 1500:
break
ID = i+index_start
ID = ID%(len(target_info))
target_coeffs = target_info[f'{ID:04d}'][0]
# render 3D face
target_img = target_info[f'{ID:04d}'][1]
id_coeff, exp_coeff, tex_coeff, angles, gamma, translation = recon_model.split_coeffs(target_coeffs)
new_translation = opt.mvg_lamda*previous_trans+(1-opt.mvg_lamda)*translation
new_angles = opt.mvg_lamda*previous_angle+(1-opt.mvg_lamda)*angles
new_exp = opt.src_exp_lamda*previous_exp+(1-opt.src_exp_lamda)*exp
if i>0:
previous_trans = new_translation
previous_angle = new_angles
previous_exp = new_exp
new_coeffes = recon_model.merge_coeffs( previous_idcoeff.cuda(), torch.Tensor(exp).cuda().view(1,64), tex_coeff.cuda(), new_angles.cuda(), gamma.cuda(), new_translation.cuda() )
result = recon_model(new_coeffes)
#load landmark
landmark = target_info[f'{ID:04d}'][2]
lmk_index = [2,3,4,5,6,7,8,9,10,11,12,13,14,29]
landmark_select = landmark[lmk_index]
mask = np.zeros((opt.IMG_size,opt.IMG_size,3))
pts = landmark_select.reshape((-1,1,2))
pts = np.array(pts,dtype=np.int32)
mask = cv2.fillPoly(mask,[pts],(255,255,255))
kernal = np.ones((3,3),np.uint)
mask = cv2.dilate(mask,kernel=kernal,iterations=2)
mask = transforms.ToTensor()(mask.astype(np.float32))
# norm
render = (result['rendered_img'] / 255 * 2 -1)[0,:,:,:3]
render = render.permute(2, 0, 1)
img_array_crop = np.asarray(target_img)/255
TARGET = transforms.ToTensor()(img_array_crop.astype(np.float32))
TARGET = 2.0 * TARGET - 1.0
fake = inpainter(TARGET,render,mask)
fg = Inpainter.tensor2im(fake.clone())
fg = fg[:,:,::-1]
#debug
_render_copy = ((render.permute(1,2,0)+1)/2*255).cpu().numpy()[:,:,::-1]
_render_copy = _render_copy.astype(np.uint8)
saved = np.ones((opt.IMG_size,opt.IMG_size*3,3),dtype=np.uint8)
saved[:,:opt.IMG_size,:] = _render_copy
saved[:,opt.IMG_size:512,:] = fg
mask = mask == 0
print(render.shape)
print(mask.shape)
intermediate = torch.where(mask, TARGET, render.cpu())
intermediate = np.transpose(intermediate.numpy(),[1,2,0])
intermediate = np.array( (intermediate[:,:,::-1] + 1)/2*255, np.uint8)
saved[:,512:,:] = intermediate
cv2.imwrite(f'{out_folder}/debug/{ID:04d}.jpg',saved)
# save for deflicker
cv2.imwrite(f'{out_folder}/processed/{ID:05d}.jpg',fg)
#resize back
bg = background_frames[f'{ID:04d}']
x1, y1, x2, y2 = opt.bbox
crop = bg[y1:y2, x1:x2]
crop = cv2.resize(crop,(256,256 ))
cv2.imwrite(f'{out_folder}/frames/{ID:05d}.jpg',crop)
# # print(fg.shape)
# cv2.imshow('crop',intermediate)
# #cv2.imshow('fg',fg)
# cv2.waitKey()
# cv2.destroyAllWindows()
process_img(bg,fg,V_writer,opt.bbox,args)
c = i+1
t1 = time.time()
# print('time', (t1-t0)/c)
# print('FPS', 1/((t1 - t0)/c))
V_writer.release()
| 29.90681 | 186 | 0.590125 | 1,119 | 8,344 | 4.225201 | 0.216265 | 0.054992 | 0.021151 | 0.01269 | 0.157995 | 0.103849 | 0.094966 | 0.066201 | 0.049069 | 0.030457 | 0 | 0.039506 | 0.271932 | 8,344 | 279 | 187 | 29.90681 | 0.738765 | 0.058365 | 0 | 0.042169 | 0 | 0 | 0.07732 | 0.03588 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018072 | false | 0 | 0.108434 | 0 | 0.138554 | 0.048193 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25afa92abc41404c658c8f5eea275176109abd43 | 11,308 | py | Python | prune_layer_v5_weightingByKernel.py | bolifeyo/Pruned-YOLO | fc1b7203fdb8a4fb8eda8491e5ad2683eab5d159 | [
"Apache-2.0"
] | 33 | 2021-03-18T11:34:14.000Z | 2021-12-28T06:21:47.000Z | prune_layer_v5_weightingByKernel.py | bolifeyo/Pruned-YOLO | fc1b7203fdb8a4fb8eda8491e5ad2683eab5d159 | [
"Apache-2.0"
] | 10 | 2021-03-19T03:35:26.000Z | 2022-01-11T06:30:18.000Z | prune_layer_v5_weightingByKernel.py | bolifeyo/Pruned-YOLO | fc1b7203fdb8a4fb8eda8491e5ad2683eab5d159 | [
"Apache-2.0"
] | 10 | 2021-03-24T11:55:46.000Z | 2022-01-23T03:38:06.000Z | import argparse
import torch.nn.functional as F
import torch.optim.lr_scheduler as lr_scheduler
import torch.utils.data
import torch.nn as nn
import test # import test.py to get mAP after each epoch
from models.yolo import *
from models.experimental import *
from models.common import *
from utils.datasets import *
from utils.general import *
from utils.torch_utils import *
def channel_count_rough(model):
total = 0
for m in model.modules():
if isinstance(m, torch.nn.BatchNorm2d):
total += m.weight.data.shape[0] # channels numbers
return total
def grab_thresh(model, overall_ratio):
total = 0
for m in model.modules():
if isinstance(m, torch.nn.BatchNorm2d):
total += m.weight.data.shape[0] # channels numbers
bn = torch.zeros(total)
index = 0
last_m_weight = None
bn_layer_mean_list, bn_layer_var_list = [], []
for m in model.modules():
if isinstance(m, torch.nn.Conv2d):
last_m_weight = m.weight.data.abs().clone()
if isinstance(m, torch.nn.BatchNorm2d):
kernel_weight = last_m_weight
weight_alpha = torch.mean( kernel_weight.view(kernel_weight.size()[0], -1), dim=1 )
bn_weight = m.weight.data.abs().clone()
assert weight_alpha.size() == bn_weight.size()
weight_copy = 10 * weight_alpha * bn_weight
size = m.weight.data.shape[0]
bn[index:(index+size)] = weight_copy
bn_layer_mean_list.append(torch.mean(bn[index:(index+size)]))
bn_layer_var_list.append(torch.var(bn[index:(index+size)]))
index += size
sorted_bn, sorted_index = torch.sort(bn)
thresh_index = int(total*overall_ratio)
thresh = sorted_bn[thresh_index].to(device)
print('prune ratio is {}, prune thresh of BN is {}'.format(overall_ratio, thresh))
bn_layer_mean = torch.Tensor(bn_layer_mean_list).numpy().tolist()
bn_layer_var = [i*10 for i in torch.Tensor(bn_layer_var_list).numpy().tolist()]
return thresh
def parse_model(d):
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
len_backbone = len(d['backbone'])
grab_ifo_layer_idx, grab_ifo_layer_num = [], []
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
#for i, (f, n, m, args) in enumerate(d['backbone']): # from, number, module, args
m = eval(m) if isinstance(m, str) else m # eval strings
for j, a in enumerate(args):
try:
args[j] = eval(a) if isinstance(a, str) else a # eval strings
except:
pass
n = max(round(n * gd), 1) if n > 1 else n # depth gain
if n > 1 and m in [C3]:
grab_ifo_layer_idx.append(i)
grab_ifo_layer_num.append(n)
#grab_ifo.append({i:n})
return grab_ifo_layer_idx, grab_ifo_layer_num
def extract_weights(weights, destination, grab_ifo_layer_idx, grab_ifo_layer_num_ori, grab_ifo_layer_num, index_list):
index_list.sort()
save_path = destination.replace('.yaml', '.pt')
print(save_path)
print(grab_ifo_layer_idx, grab_ifo_layer_num_ori, grab_ifo_layer_num, index_list)
idx = 0
o_state_dict = torch.load(weights, map_location=lambda storage, loc: storage)
for i, (m,n) in enumerate(zip(grab_ifo_layer_num_ori, grab_ifo_layer_num)):
if m == n:
continue
else:
idx_tlist = index_list[idx: idx+m-n]
idx_loc_list = []
for idx_t in idx_tlist:
idx_loc_list.append( idx_t - sum(grab_ifo_layer_num_ori[:i]) )
idx += m-n
n_module_list = []
for module_idx in range(m):
if module_idx not in idx_loc_list:
n_module_list.append(o_state_dict['model'].model[grab_ifo_layer_idx[i]].m[module_idx])
n_state_dict = nn.Sequential(*n_module_list)
o_state_dict['model'].model[grab_ifo_layer_idx[i]].m = n_state_dict
torch.save(o_state_dict, save_path)
return save_path
def write_config_py(template, save_dir, grab_ifo_layer_idx, grab_ifo_layer_num):
destination = os.path.join(save_dir, 'pruned_'+os.path.split(template)[-1])
with open(template) as f:
model_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
for j, i in enumerate(grab_ifo_layer_idx):
if i <= len(model_dict['backbone']) - 1:
model_dict['backbone'][i][1] = grab_ifo_layer_num[j]
else:
model_dict['head'][i-len(model_dict['backbone'])][1] = grab_ifo_layer_num[j]
if i < len(model_dict['backbone']) - 1:
model_dict['backbone'][i][-1][-1] = model_dict['backbone'][i][-1][-1][:grab_ifo_layer_num[j]]
elif i == len(model_dict['backbone']) - 1:
if grab_ifo_layer_num[j] == 0:
model_dict['backbone'][i][-1][-1] = [model_dict['backbone'][i][-1][-1][-1]]
else:
model_dict['backbone'][i][-1][-1] = model_dict['backbone'][i][-1][-1][:3*grab_ifo_layer_num[j]]
else:
if grab_ifo_layer_num[j] == 0:
model_dict['head'][i - len(model_dict['backbone'])][-1][-1] = [model_dict['head'][i - len(model_dict['backbone'])][-1][-1][-1]]
else:
model_dict['head'][i-len(model_dict['backbone'])][-1][-1] = model_dict['head'][i-len(model_dict['backbone'])][-1][-1][:3*grab_ifo_layer_num[j]]
print(destination)
with open(destination, 'w') as ff:
#yaml.dump(model_dict, ff, sort_keys=False)
for k, v in model_dict.items():
ff.write("%s: " % k)
ff.write(str(v).replace('\'', ' ').replace(' nearest ', '\'nearest\''))
ff.write('\n')
return destination
def prune(opt, device):
with open(opt.data) as f:
data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
train_path = data_dict['train']
test_path = data_dict['val']
nc, names = int(data_dict['nc']), data_dict['names'] # number classes, names
assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
model = Model(opt.cfg, nc=nc).to(device)
gs = int(max(model.stride)) # grid size (max stride)
imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
if opt.weights.endswith('.pt'): # pytorch format
ckpt = torch.load(opt.weights, map_location=device) # load checkpoint
# load model
try:
exclude = [] # exclude keys
ckpt['model'] = {k: v for k, v in ckpt['model'].float().state_dict().items()
if k in model.state_dict() and not any(x in k for x in exclude)
and model.state_dict()[k].shape == v.shape}
model.load_state_dict(ckpt['model'], strict=True)
print('Transferred %g/%g items from %s' % (len(ckpt['model']), len(model.state_dict()), opt.weights))
except KeyError as e:
s = "%s is not compatible with %s. This may be due to model differences or %s may be out of date. " \
"Please delete or update %s and try again, or use --weights '' to train from scratch." \
% (weights, opt.cfg, opt.weights, opt.weights)
raise KeyError(s) from e
del ckpt
#print(model)
net_channel_1 = channel_count_rough(model)
print("The total number of channels in the model before pruning is ", net_channel_1)
with open(opt.cfg) as f:
model_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
grab_ifo_layer_idx, grab_ifo_layer_num = parse_model(model_dict)
grab_ifo_layer_num_ori = grab_ifo_layer_num.copy()
# prune
save, overall_ratio = opt.save, 0.5
if save != None:
if not os.path.exists(save):
os.makedirs(save)
thresh = grab_thresh(model, overall_ratio)
bn_mean_list = []
bn_mean_chan = []
for i, m in enumerate(model.model):
if i in grab_ifo_layer_idx:
m = m.m
for j, n in enumerate(m):
conv_copy = n.cv2.conv.state_dict()['weight'].abs().clone().cpu()
weight_alpha = torch.mean( conv_copy.view(conv_copy.size()[0], -1), dim=1 )
bn_weight = n.cv2.bn.state_dict()['weight'].abs().clone().cpu()
assert weight_alpha.size() == bn_weight.size()
weight_copy = 10 * weight_alpha * bn_weight
bn_mean_list.append(torch.mean(weight_copy).numpy().tolist())
bn_mean_chan.append(weight_copy.numpy().size)
index_list = [i[0] for i in sorted(enumerate(bn_mean_list), key=lambda x:x[1])]
for t in range(opt.overall_layers):
if index_list[t] < grab_ifo_layer_num_ori[0]:
grab_ifo_layer_num[0] -= 1
elif index_list[t] < sum(grab_ifo_layer_num_ori[:2]):
grab_ifo_layer_num[1] -= 1
elif index_list[t] < sum(grab_ifo_layer_num_ori[:3]):
grab_ifo_layer_num[2] -= 1
elif index_list[t] < sum(grab_ifo_layer_num_ori[:4]):
grab_ifo_layer_num[3] -= 1
elif index_list[t] < sum(grab_ifo_layer_num_ori[:5]):
grab_ifo_layer_num[4] -= 1
elif index_list[t] < sum(grab_ifo_layer_num_ori[:6]):
grab_ifo_layer_num[5] -= 1
elif index_list[t] < sum(grab_ifo_layer_num_ori[:7]):
grab_ifo_layer_num[6] -= 1
elif index_list[t] < sum(grab_ifo_layer_num_ori[:8]):
grab_ifo_layer_num[7] -= 1
else:
assert 'Not support' == 'Out of range'
destination_cfg = write_config_py(opt.cfg, save, grab_ifo_layer_idx, grab_ifo_layer_num)
destination_pth = extract_weights(opt.weights, destination_cfg, grab_ifo_layer_idx, grab_ifo_layer_num_ori, grab_ifo_layer_num, index_list[:opt.overall_layers])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='models/yolov5s.yaml', help='model.yaml path')
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='train,test sizes')
parser.add_argument('--weights', type=str, default='', help='initial weights path')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--batch-size', type=int, default=16, help="Total batch size for all gpus.")
parser.add_argument("--save", default='prune', type=str, help='path to save pruned model (default: none)')
parser.add_argument("--overall_layers", default=3, type=int, help='pruning layers')
opt = parser.parse_args()
opt.save += "_{}".format(opt.overall_layers)
opt.cfg = check_file(opt.cfg) # check file
opt.data = check_file(opt.data) # check file
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
device = select_device(opt.device, batch_size=opt.batch_size)
print(opt)
prune(opt, device)
| 46.921162 | 164 | 0.624337 | 1,702 | 11,308 | 3.912456 | 0.153937 | 0.056765 | 0.09551 | 0.090104 | 0.341643 | 0.306953 | 0.275867 | 0.26055 | 0.241928 | 0.207689 | 0 | 0.013415 | 0.23532 | 11,308 | 240 | 165 | 47.116667 | 0.756679 | 0.05191 | 0 | 0.125604 | 0 | 0.019324 | 0.092227 | 0 | 0 | 0 | 0 | 0 | 0.019324 | 1 | 0.028986 | false | 0.004831 | 0.057971 | 0 | 0.111111 | 0.033816 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25b188508f38bd8df1c9c5cddf7b6a70e5657a2c | 10,338 | py | Python | get_data_thomson.py | fusion-flap/flap_nstx_gpi | cf7d4bdecea8fd7434f8f7eb64e1a7b13fc0f759 | [
"MIT"
] | null | null | null | get_data_thomson.py | fusion-flap/flap_nstx_gpi | cf7d4bdecea8fd7434f8f7eb64e1a7b13fc0f759 | [
"MIT"
] | null | null | null | get_data_thomson.py | fusion-flap/flap_nstx_gpi | cf7d4bdecea8fd7434f8f7eb64e1a7b13fc0f759 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 5 18:24:43 2020
@author: mlampert
"""
import os
import copy
#FLAP imports and settings
import flap
import flap_mdsplus
flap_mdsplus.register('NSTX_MDSPlus')
thisdir = os.path.dirname(os.path.realpath(__file__))
fn = os.path.join(thisdir,"flap_nstx.cfg")
flap.config.read(file_name=fn)
#Scientific imports
import numpy as np
import matplotlib.pyplot as plt
#Other necessary imports
import MDSplus as mds
import pickle
def get_data_thomson(exp_id=None,
data_name=None,
no_data=False,
options=None,
coordinates=None,
data_source=None):
default_options = {'temperature': False,
'density': False,
'pressure': False,
'test': False,
'output_name': None,
'add_flux_coordinates': False,
'spline_data': False,
'force_mdsplus':False
}
_options = flap.config.merge_options(default_options,options,data_source=data_source)
temperature=_options['temperature']
density=_options['density']
pressure=_options['pressure']
test=_options['test']
output_name=_options['output_name']
add_flux_coordinates=_options['add_flux_coordinates']
spline_data=_options['spline_data']
force_mdsplus=_options['force_mdsplus']
"""
Returns the Thomson scattering processed data from the MDSplus tree as
a dictionary containing all the necessary parameters. The description of
the dictionary can be seen below.
"""
if pressure+temperature+density != 1:
raise ValueError('Either pressure or temperature or density can be set, neither none, nor more than one.')
if exp_id is None:
raise TypeError('exp_id must be set.')
wd=flap.config.get_all_section('Module NSTX_GPI')['Local datapath']
filename=wd+'/'+str(exp_id)+'/nstx_mdsplus_thomson_'+str(exp_id)+'.pickle'
if not os.path.exists(filename) or force_mdsplus:
conn = mds.Connection('skylark.pppl.gov:8501')
conn.openTree('activespec', exp_id)
mdsnames=['ts_times', #The time vector of the measurement (60Hz measurement with the Thomson)
'FIT_RADII', #Radius of the measurement
'FIT_R_WIDTH',
'FIT_TE', #Electron temperature profile numpy array([radius,time])
'FIT_TE_ERR', #The error for Te (symmetric)
'FIT_NE', #Electron density profile numpy array([radius,time])
'FIT_NE_ERR', #The error for ne (symmetric)
'FIT_PE', #Electron pressure profile numpy array([radius,time])
'FIT_PE_ERR', #The error for pe (symmetric)
'SPLINE_RADII', #Spline fit of the previous results (4times interpolation compared to the previous ones)
'SPLINE_NE', #Spline fit ne without error
'SPLINE_PE', #Spline fit pe without error
'SPLINE_TE', #Spline fit Te without error
'TS_LD', #N/A
'LASER_ID', #ID of the Thomson laser
'VALID', #Validity of the measurement
'DATEANALYZED', #The date when the analysis was done for the data
'COMMENT'] #Comment for the analysis
thomson={}
for name in mdsnames:
thomson[name]=conn.get('\\TS_BEST:'+name).data()
if name == 'ts_times' and type(thomson[name]) is str:
raise ValueError('No Thomson data available.')
thomson['FIT_R_WIDTH'] /= 100.
thomson['FIT_RADII'] /= 100.
thomson['SPLINE_RADII'] /= 100.
thomson['FIT_NE'] *= 1e6
thomson['FIT_NE_ERR'] *= 1e6
thomson['SPLINE_NE'] *= 1e6
conn.closeAllTrees()
conn.disconnect()
try:
pickle.dump(thomson,open(filename, 'wb'))
except:
raise IOError('The path '+filename+' cannot be accessed. Pickle file cannot be created.')
else:
thomson=pickle.load(open(filename, 'rb'))
try:
thomson_time=thomson['TS_TIMES']
except:
thomson_time=thomson['ts_times']
coord = []
coord.append(copy.deepcopy(flap.Coordinate(name='Time',
unit='s',
mode=flap.CoordinateMode(equidistant=True),
start=thomson_time[0],
step=thomson_time[1]-thomson_time[0],
#shape=time_arr.shape,
dimension_list=[1]
)))
coord.append(copy.deepcopy(flap.Coordinate(name='Sample',
unit='n.a.',
mode=flap.CoordinateMode(equidistant=True),
start=0,
step=1,
dimension_list=[1]
)))
if spline_data:
thomson_r_coord=thomson['SPLINE_RADII']
if pressure:
data_arr=thomson['SPLINE_PE']
data_arr_err=None
data_unit = flap.Unit(name='Pressure',unit='kPa')
elif temperature:
data_arr=thomson['SPLINE_TE']
data_arr_err=None
data_unit = flap.Unit(name='Temperature',unit='keV')
elif density:
data_arr=thomson['SPLINE_NE']
data_arr_err=None
data_unit = flap.Unit(name='Density',unit='m-3')
else:
thomson_r_coord=thomson['FIT_RADII']
if pressure:
data_arr=thomson['FIT_PE']
data_arr_err=thomson['FIT_PE_ERR']
data_unit = flap.Unit(name='Pressure',unit='kPa')
elif temperature:
data_arr=thomson['FIT_TE']
data_arr_err=thomson['FIT_TE_ERR']
data_unit = flap.Unit(name='Temperature',unit='keV')
elif density:
data_arr=thomson['FIT_NE']
data_arr_err=thomson['FIT_NE_ERR']
data_unit = flap.Unit(name='Density',unit='m-3')
coord.append(copy.deepcopy(flap.Coordinate(name='Device R',
unit='m',
mode=flap.CoordinateMode(equidistant=False),
values=thomson_r_coord,
shape=thomson_r_coord.shape,
dimension_list=[0]
)))
if test:
plt.figure()
if add_flux_coordinates:
try:
psi_rz_obj=flap.get_data('NSTX_MDSPlus',
name='\EFIT02::\PSIRZ',
exp_id=exp_id,
object_name='PSIRZ_FOR_COORD')
psi_mag=flap.get_data('NSTX_MDSPlus',
name='\EFIT02::\SSIMAG',
exp_id=exp_id,
object_name='SSIMAG_FOR_COORD')
psi_bdry=flap.get_data('NSTX_MDSPlus',
name='\EFIT02::\SSIBRY',
exp_id=exp_id,
object_name='SSIBRY_FOR_COORD')
except:
raise ValueError("The PSIRZ MDSPlus node cannot be reached.")
psi_values=psi_rz_obj.data[:,:,32]
psi_t_coord=psi_rz_obj.coordinate('Time')[0][:,0,0]
psi_r_coord=psi_rz_obj.coordinate('Device R')[0][:,:,32] #midplane is the middle coordinate in the array
#Do the interpolation
psi_values_spat_interpol=np.zeros([thomson_r_coord.shape[0],
psi_t_coord.shape[0]])
for index_t in range(psi_t_coord.shape[0]):
norm_psi_values=(psi_values[index_t,:]-psi_mag.data[index_t])/(psi_bdry.data[index_t]-psi_mag.data[index_t])
norm_psi_values[np.isnan(norm_psi_values)]=0.
psi_values_spat_interpol[:,index_t]=np.interp(thomson_r_coord,psi_r_coord[index_t,:],norm_psi_values)
psi_values_total_interpol=np.zeros(data_arr.shape)
for index_r in range(data_arr.shape[0]):
psi_values_total_interpol[index_r,:]=np.interp(thomson_time,psi_t_coord,psi_values_spat_interpol[index_r,:])
if test:
for index_t in range(len(thomson_time)):
plt.cla()
plt.plot(thomson_r_coord,psi_values_total_interpol[:,index_t])
plt.pause(0.5)
psi_values_total_interpol[np.isnan(psi_values_total_interpol)]=0.
coord.append(copy.deepcopy(flap.Coordinate(name='Flux r',
unit='',
mode=flap.CoordinateMode(equidistant=False),
values=psi_values_total_interpol,
shape=psi_values_total_interpol.shape,
dimension_list=[0,1]
)))
if test:
plt.plot(psi_values_total_interpol, data_arr)
d = flap.DataObject(data_array=data_arr,
error=data_arr_err,
data_unit=data_unit,
coordinates=coord,
exp_id=exp_id,
data_title='NSTX Thomson data')
if output_name is not None:
flap.add_data_object(d, output_name)
return d
def add_coordinate_thomson(data_object,
coordinates,
exp_id=None,
options=None):
raise NotImplementedError("New coordinates need to be added, everything else is added to the FLAP object as default.") | 42.368852 | 128 | 0.523796 | 1,118 | 10,338 | 4.597496 | 0.223614 | 0.023152 | 0.02179 | 0.034241 | 0.298638 | 0.205253 | 0.129572 | 0.070428 | 0.070428 | 0.049416 | 0 | 0.010938 | 0.380925 | 10,338 | 244 | 129 | 42.368852 | 0.792188 | 0.086961 | 0 | 0.205128 | 0 | 0 | 0.131205 | 0.00467 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010256 | false | 0 | 0.041026 | 0 | 0.05641 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25b2dee9cd1192a7a5e75e961aa831313600a105 | 1,351 | py | Python | res/007-itertools.py | leialbert/keep-learning-python | 7bbf2226e6e99e87661f15ea46e6149b61d9912f | [
"MIT"
] | null | null | null | res/007-itertools.py | leialbert/keep-learning-python | 7bbf2226e6e99e87661f15ea46e6149b61d9912f | [
"MIT"
] | null | null | null | res/007-itertools.py | leialbert/keep-learning-python | 7bbf2226e6e99e87661f15ea46e6149b61d9912f | [
"MIT"
] | null | null | null | from itertools import product
a = [1,2]
b = [3,4]
c = [4]
prd = product(a,b)
print(prd)
print(list(prd))
prd = product(a,c,repeat=2)
print(list(prd))
from itertools import permutations
a = [1,2,3]
per = permutations(a)
print(list(per))
a = [1,2,3]
per = permutations(a,2)
print(list(per))
from itertools import combinations
a = [1,2,3,4]
comb = combinations(a,2)
print(list(a))
from itertools import combinations_with_replacement
comb_wr = combinations_with_replacement(a,2)
print(list(comb_wr))
from itertools import accumulate
import operator
a = [1,2,3,4]
acc = accumulate(a,func=operator.mul)
print(a)
print(list(acc))
a = [1,2,5,3,4]
acc = accumulate(a,func=max)
print(a)
print(list(acc))
from itertools import groupby
def smaller_than_3(x):
return x<3
# lambda x:x<3
a = [1,2,3,4]
group_obj = groupby(a,key=smaller_than_3)
print(group_obj)
for key,value in group_obj:
print(key,list(value))
persons = [
{'name':'albert','age':28},{'name':'allen','age':25},
{'name':'zhangsan','age':30},{'name':'lisi','age':29}
]
group_obj = groupby(persons,key=lambda x:x['age'])
for key,value in group_obj:
print(key,list(value))
from itertools import count,cycle,repeat
# for z in count(1):
# print(z)
# if z == 15:
# break
# a = [1,2,3]
# for z in cycle(a):
# print(z)
for z in repeat(1,4):
print(z) | 18.256757 | 57 | 0.665433 | 240 | 1,351 | 3.683333 | 0.245833 | 0.0181 | 0.027149 | 0.027149 | 0.2319 | 0.176471 | 0.131222 | 0.085973 | 0.085973 | 0.085973 | 0 | 0.045614 | 0.156181 | 1,351 | 74 | 58 | 18.256757 | 0.729825 | 0.087343 | 0 | 0.333333 | 0 | 0 | 0.044082 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019608 | false | 0 | 0.156863 | 0.019608 | 0.196078 | 0.294118 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25b43442eeb848a6a93a6d999d5f0e66f89b810e | 1,234 | py | Python | searches/depth_first_search.py | exterkamps/Python-Data-Structures | 8594ed934edeaded4866999932384d12fb4519c3 | [
"Apache-2.0"
] | 3 | 2018-10-15T17:38:29.000Z | 2021-03-24T02:55:46.000Z | searches/depth_first_search.py | exterkamp/Python-Data-Structures | 8594ed934edeaded4866999932384d12fb4519c3 | [
"Apache-2.0"
] | null | null | null | searches/depth_first_search.py | exterkamp/Python-Data-Structures | 8594ed934edeaded4866999932384d12fb4519c3 | [
"Apache-2.0"
] | null | null | null | def depth_first_search(grid, start, target):
"""
Search a 2d grid for a given target starting at start.
Args:
grid: the input grid as a List[List]
start: the start grid in format (x,y) zero index
target: the target value to find in the grid
Returns:
Coordinate of the target. Or None if cannot be found.
"""
height = len(grid)
if not height:
return None
width = len(grid[0])
x_start = start[0]
y_start = start[1]
# short circuit the start lookup
if grid[y_start][x_start] == target:
return (x_start, y_start)
visited = set()
stack = [(x_start, y_start)]
visited.add((x_start, y_start))
while stack:
current = stack.pop()
for coor in [(current[0], current[1]-1),(current[0]-1, current[1]),(current[0]+1, current[1]),(current[0], current[1]+1)]:
if coor[0] < 0 or coor[0] > width-1 or coor[1] < 0 or coor[1] > height-1:
continue
if grid[coor[1]][coor[0]] == target:
return coor
else:
if coor not in visited:
stack.append(coor)
visited.add(current)
return None
| 29.380952 | 130 | 0.548622 | 177 | 1,234 | 3.757062 | 0.322034 | 0.045113 | 0.031579 | 0.054135 | 0.159399 | 0.064662 | 0.064662 | 0.064662 | 0 | 0 | 0 | 0.031746 | 0.336305 | 1,234 | 41 | 131 | 30.097561 | 0.78022 | 0.246353 | 0 | 0.083333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0 | 0 | 0.208333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25b8845e4d182b367e71bf9679fb75ef834e9cbd | 2,228 | py | Python | Chapter13/final/bookr/reviews/tests.py | PacktPublishing/Web-Development-Projects-with-Django | 531bc4d58d614888cc81b7fd6f8ec859f5a65217 | [
"MIT"
] | 97 | 2021-03-01T12:54:30.000Z | 2022-03-28T02:57:26.000Z | Chapter13/final/bookr/reviews/tests.py | PacktPublishing/Web-Development-Projects-with-Django | 531bc4d58d614888cc81b7fd6f8ec859f5a65217 | [
"MIT"
] | 81 | 2020-08-27T04:56:04.000Z | 2022-03-12T00:53:40.000Z | Chapter13/final/bookr/reviews/tests.py | PacktPublishing/Web-Development-Projects-with-Django | 531bc4d58d614888cc81b7fd6f8ec859f5a65217 | [
"MIT"
] | 163 | 2020-12-25T14:38:38.000Z | 2022-03-30T10:31:40.000Z | import os
from django.conf import settings
from django.test import TestCase, Client
from django.utils import timezone
from reviews.models import Book, Publisher
class Activity2Test(TestCase):
@classmethod
def setUpTestData(cls):
p = Publisher.objects.create(name='Test Publisher')
Book.objects.create(title='Test Book', publication_date=timezone.now(), publisher=p)
def test_book_detail_media_display(self):
"""
When we first view a book we should not see a cover image or link to sample. But if we upload these, they should
then be displayed on the book detail page.
"""
cover_filename = 'machine-learning-for-algorithmic-trading.png'
cover_save_path = os.path.join(settings.MEDIA_ROOT, 'book_covers', cover_filename)
sample_filename = 'machine-learning-for-trading.pdf'
sample_save_path = os.path.join(settings.MEDIA_ROOT, 'book_samples', sample_filename)
cover_img = b'<img src="/media/book_covers/machine-learning-for-algorithmic-trading.png">'
sample_link = b'<a href="/media/book_samples/machine-learning-for-trading.pdf">Download</a>'
c = Client()
resp = c.get('/books/1/')
self.assertIn(b'<a class="btn btn-primary" href="/books/1/media/">Media</a>', resp.content)
# check the cover image and sample link aren't in the initial HTML
self.assertNotIn(cover_img, resp.content)
self.assertNotIn(sample_link, resp.content)
try:
with open(os.path.join(settings.BASE_DIR, 'fixtures', cover_filename), 'rb') as cover_fp:
with open(os.path.join(settings.BASE_DIR, 'fixtures', sample_filename), 'rb') as sample_fp:
c.post('/books/1/media/', {'cover': cover_fp, 'sample': sample_fp})
finally:
if os.path.exists(cover_save_path):
os.unlink(cover_save_path)
if os.path.exists(sample_save_path):
os.unlink(sample_save_path)
resp = c.get('/books/1/')
# check the cover image and sample link are in the HTML after uploading the media
self.assertIn(cover_img, resp.content)
self.assertIn(sample_link, resp.content)
| 40.509091 | 120 | 0.666517 | 306 | 2,228 | 4.718954 | 0.359477 | 0.033241 | 0.049862 | 0.049862 | 0.297784 | 0.207756 | 0.15374 | 0.110803 | 0.110803 | 0 | 0 | 0.002882 | 0.221275 | 2,228 | 54 | 121 | 41.259259 | 0.829395 | 0.135099 | 0 | 0.058824 | 0 | 0.058824 | 0.208553 | 0.131996 | 0 | 0 | 0 | 0 | 0.147059 | 1 | 0.058824 | false | 0 | 0.147059 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25bc4ad308584b1f25deefeed2a0843dc4fbf607 | 955 | py | Python | django/devbot/project/urls.py | blitzagency/django-chatterbox | 7bf17444f8308aa12b6718bd62ee1344021c21aa | [
"MIT"
] | 8 | 2015-03-10T20:03:09.000Z | 2018-06-14T23:03:58.000Z | django/devbot/project/urls.py | blitzagency/django-chatterbox | 7bf17444f8308aa12b6718bd62ee1344021c21aa | [
"MIT"
] | 3 | 2015-07-14T22:44:47.000Z | 2020-06-05T23:43:05.000Z | django/devbot/project/urls.py | blitzagency/django-chatterbox | 7bf17444f8308aa12b6718bd62ee1344021c21aa | [
"MIT"
] | null | null | null | from django.conf import settings
from django.conf.urls import include, patterns, url
from django.contrib import admin
from django.views.generic import TemplateView
admin.autodiscover()
urlpatterns = patterns(
'',
(r'^grappelli/', include('grappelli.urls')),
(r'^chatterbox/', include('chatterbox.urls', namespace="chatterbox")),
(r'^admin/', include(admin.site.urls)),
# Homepage
(r'^$', TemplateView.as_view(template_name='index.html')),
)
#used to show static assets out of the collected-static
if getattr(settings, 'SERVE_STATIC', False) and settings.SERVE_STATIC:
urlpatterns += patterns(
'',
url(r'^static/(?P<path>.*)$',
'django.views.static.serve',
{'document_root': settings.STATIC_ROOT, 'show_indexes': False}),
url(r'^uploads/(?P<path>.*)$',
'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': False}),
)
| 32.931034 | 76 | 0.653403 | 111 | 955 | 5.531532 | 0.441441 | 0.065147 | 0.045603 | 0.052117 | 0.153094 | 0.153094 | 0.153094 | 0.153094 | 0.153094 | 0 | 0 | 0 | 0.183246 | 955 | 28 | 77 | 34.107143 | 0.787179 | 0.064921 | 0 | 0.181818 | 0 | 0 | 0.265169 | 0.104494 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25bcf1ca297f8201e6f00eeb98ba8123ff5c8130 | 1,753 | py | Python | graph_embeddings/models/complex.py | navidmdn/Multi-hop-qa-rl | 81ac9c2b4a37bd9a18dea3980624e338f4b16b4a | [
"MIT"
] | null | null | null | graph_embeddings/models/complex.py | navidmdn/Multi-hop-qa-rl | 81ac9c2b4a37bd9a18dea3980624e338f4b16b4a | [
"MIT"
] | null | null | null | graph_embeddings/models/complex.py | navidmdn/Multi-hop-qa-rl | 81ac9c2b4a37bd9a18dea3980624e338f4b16b4a | [
"MIT"
] | null | null | null | from graph_embeddings.models.embedding_model import EmbeddingModel
import torch
class ComplEx(EmbeddingModel):
def __init__(self, data_loader, entity_dim, rel_dim, loss_type,
device, do_batch_norm, **kwargs):
super(ComplEx, self).__init__(
data_loader, entity_dim, rel_dim, loss_type,
device, do_batch_norm, **kwargs
)
self.multiplier = 2
self.entity_dim = entity_dim * self.multiplier
self.bn0 = torch.nn.BatchNorm1d(self.multiplier)
self.bn1 = torch.nn.BatchNorm1d(self.multiplier)
self.bn2 = torch.nn.BatchNorm1d(self.multiplier)
self.E = self.create_entity_embeddings()
self.R = self.create_relation_embeddings()
def calculate_score(self, head, relation):
head = torch.stack(list(torch.chunk(head, 2, dim=1)), dim=1)
if self.do_batch_norm:
head = self.bn0(head)
head = self.input_dropout(head)
head = head.permute(1, 0, 2)
re_head = head[0]
im_head = head[1]
relation = self.hidden_dropout1(relation)
re_relation, im_relation = torch.chunk(relation, 2, dim=1)
re_tail, im_tail = torch.chunk(self.E.weight, 2, dim=1)
re_score = re_head * re_relation - im_head * im_relation
im_score = re_head * im_relation + im_head * re_relation
score = torch.stack([re_score, im_score], dim=1)
if self.do_batch_norm:
score = self.bn2(score)
score = self.hidden_dropout2(score)
score = score.permute(1, 0, 2)
re_score = score[0]
im_score = score[1]
score = torch.mm(re_score, re_tail.transpose(1, 0)) + torch.mm(im_score, im_tail.transpose(1, 0))
return score
| 37.297872 | 105 | 0.634912 | 240 | 1,753 | 4.3875 | 0.25 | 0.066477 | 0.041785 | 0.062678 | 0.271605 | 0.248813 | 0.146249 | 0.106363 | 0.106363 | 0.106363 | 0 | 0.025365 | 0.257844 | 1,753 | 46 | 106 | 38.108696 | 0.784012 | 0 | 0 | 0.052632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.052632 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25be2dc9f7c0978f0a272dbf62277c5be22a8d3f | 1,921 | py | Python | aiopika_macrobase/rpc/endpoint.py | mbcores/aiopika-macrobase | a3351b9eed3cc80995070675d99e7e68022b65d9 | [
"MIT"
] | null | null | null | aiopika_macrobase/rpc/endpoint.py | mbcores/aiopika-macrobase | a3351b9eed3cc80995070675d99e7e68022b65d9 | [
"MIT"
] | 1 | 2020-08-06T07:42:48.000Z | 2020-08-06T07:42:48.000Z | aiopika_macrobase/rpc/endpoint.py | mbcores/aiopika-macrobase | a3351b9eed3cc80995070675d99e7e68022b65d9 | [
"MIT"
] | 3 | 2020-04-07T10:02:16.000Z | 2021-07-08T05:16:11.000Z | from macrobase_driver.logging import set_request_id
from sentry_sdk import capture_exception
from .request import RPCRequest, RPCResponse, RPCMessageType
from ..endpoint import AiopikaEndpoint
from ..result import AiopikaResult
from aio_pika import IncomingMessage
from structlog import get_logger
log = get_logger('macrobase.aiopika.endpoint_rpc')
class AiopikaRPCEndpoint(AiopikaEndpoint):
"""
RPC implementation for RPC processing
"""
async def handle(self, driver, message: IncomingMessage, data, *args, **kwargs) -> AiopikaResult:
"""
Handle method for process incoming message
Args:
driver: Aiopika Macrobase driver
message (IncomingMessage): Incoming message from driver processing
data: Deserialized payload from Incoming Message
*args: Additional arguments
**kwargs: Additional arguments with keys
Returns:
AiopikaResult: Aiopika result action or None (if return None then driver ack message).
"""
identifier = kwargs.get('identifier', None)
request = RPCRequest(message, identifier, payload=data)
try:
set_request_id(message.headers.get('x-cross-request-id'))
response = await self.method(driver, request, request.payload, *args, **kwargs)
except Exception as e:
capture_exception(e)
response = RPCResponse(e, type=RPCMessageType.error)
return response.get_result(message.correlation_id, identifier, message.expiration)
async def method(self, driver, request: RPCRequest, data, *args, **kwargs) -> RPCResponse:
return RPCResponse()
class HealthEndpoint(AiopikaRPCEndpoint):
async def method(self, driver, request: RPCRequest, data, *args, **kwargs) -> RPCResponse:
log.info('Health')
return RPCResponse(payload={'status': 'health', 'value': 'ok'})
| 35.574074 | 101 | 0.690265 | 202 | 1,921 | 6.49505 | 0.386139 | 0.030488 | 0.032012 | 0.027439 | 0.10061 | 0.10061 | 0.10061 | 0.10061 | 0.10061 | 0.10061 | 0 | 0 | 0.222801 | 1,921 | 53 | 102 | 36.245283 | 0.878768 | 0.019261 | 0 | 0.08 | 0 | 0 | 0.059669 | 0.021567 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.28 | 0 | 0.48 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25c10987a4ba7935613dd91db86e1f9e7f66e461 | 1,640 | py | Python | superRes_Train/test.py | abhijitramesh/Eagle-eye | a79f6a1a6d7f2c887cc98f7afb7c6dbe823c7cee | [
"Apache-2.0"
] | 2 | 2021-02-15T14:58:19.000Z | 2021-02-17T22:51:34.000Z | superRes_Train/test.py | abhijitramesh/Eagle-eye | a79f6a1a6d7f2c887cc98f7afb7c6dbe823c7cee | [
"Apache-2.0"
] | null | null | null | superRes_Train/test.py | abhijitramesh/Eagle-eye | a79f6a1a6d7f2c887cc98f7afb7c6dbe823c7cee | [
"Apache-2.0"
] | null | null | null | import torch
from utils import *
from PIL import Image, ImageDraw, ImageFont
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def super_res(img):
## SRRESNET
srresnet_checkpoint = "./checkpoint_srresnet.pth.tar"
srresnet = torch.load(srresnet_checkpoint,map_location=device)['model']
srresnet.eval()
hr_img = Image.open(img, mode="r")
hr_img.show()
hr_img = hr_img.convert('RGB')
lr_img = hr_img.resize((int(hr_img.width / 4), int(hr_img.height / 4)),
Image.BICUBIC)
bicubic_img = lr_img.resize((hr_img.width, hr_img.height), Image.BICUBIC)
bicubic_img.show()
sr_img_srresnet = srresnet(convert_image(hr_img, source='pil', target='imagenet-norm').unsqueeze(0).to(device))
sr_img_srresnet = sr_img_srresnet.squeeze(0).cpu().detach()
sr_img_srresnet = convert_image(sr_img_srresnet, source='[-1, 1]', target='pil')
sr_img_srresnet.show()
## SRGAN
srgan_checkpoint = "./checkpoint_srgan.pth.tar"
srgan_generator = torch.load(srgan_checkpoint,map_location=device)['generator']
srgan_generator.eval()
sr_img_srgan = srgan_generator(convert_image(hr_img, source='pil', target='imagenet-norm').unsqueeze(0).to(device))
sr_img_srgan = sr_img_srgan.squeeze(0).cpu().detach()
sr_img_srgan = convert_image(sr_img_srgan, source='[-1, 1]', target='pil')
sr_img_srgan.show()
if __name__ == '__main__':
# img="/Users/abhijitramesh/Downloads/chair1_1.jpg"
# img_1="/Users/abhijitramesh/Downloads/person42_0.jpg"
img_2="/Users/abhijitramesh/Downloads/tvmonitor19_2.jpg"
super_res(img_2)
| 34.166667 | 119 | 0.70122 | 232 | 1,640 | 4.650862 | 0.280172 | 0.055607 | 0.072289 | 0.050046 | 0.205746 | 0.205746 | 0.164968 | 0.124189 | 0.124189 | 0.124189 | 0 | 0.015119 | 0.153049 | 1,640 | 48 | 120 | 34.166667 | 0.761699 | 0.071951 | 0 | 0 | 0 | 0 | 0.124011 | 0.067942 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.103448 | 0 | 0.137931 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25c4d04411b12679ad3412cf2a77df2f6285b638 | 1,580 | py | Python | day9/flask_day3/sqlalchemy_demo1/demo4.py | gaohj/wh1904js | a3af38f8311f79eb9f2e08a3de16dd1e02c40714 | [
"Apache-2.0"
] | null | null | null | day9/flask_day3/sqlalchemy_demo1/demo4.py | gaohj/wh1904js | a3af38f8311f79eb9f2e08a3de16dd1e02c40714 | [
"Apache-2.0"
] | null | null | null | day9/flask_day3/sqlalchemy_demo1/demo4.py | gaohj/wh1904js | a3af38f8311f79eb9f2e08a3de16dd1e02c40714 | [
"Apache-2.0"
] | null | null | null | from sqlalchemy import (
create_engine,
Column,
Integer,
String,
Float,
Boolean,
DECIMAL,
Enum,
DateTime,
DATE,
Time,
Text
)
from sqlalchemy.dialects.mysql import LONGTEXT
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker # 这是个基类
# 数据库的配置变量
HOSTNAME = '127.0.0.1' #数据库地址
PORT = '3306' #数据库端口号
DATABASE = '1904_sqlalchemy' #数据库名称
USERNAME = 'root' #用户名
PASSWORD = '123456' #密码
DB_URI = 'mysql+pymysql://{}:{}@{}:{}/{}?charset=utf8'.format(USERNAME,PASSWORD,HOSTNAME,PORT,DATABASE)
# 创建数据库引擎
engine = create_engine(DB_URI)
#1创建基类
Base = declarative_base(engine)
#对数据库增删该查 都是通过一个session对象
session = sessionmaker(engine)()
import enum
class TagEnum(enum.Enum):
nanshen = '男神'
xueba = '学霸'
geshen = '楼德华'
class Article(Base):
__tablename__ = 'article'
id = Column(Integer,primary_key=True,autoincrement=True)
# price_sale = Column(Float)
# is_delete = Column(Boolean)
price_ding = Column(DECIMAL(10,4)) #总长10位 小数点以后最多4位
# tag = Column(Enum(TagEnum))
# create_time1 = Column(DateTime)
# create_time2 = Column(DATE)
# create_time3 = Column(Time)
title = Column(String(50),default='默认值')
# content = Column(Text)
# content2 = Column(LONGTEXT)
# Base.metadata.drop_all() #修改字段类型等不能更新需要先把原来的 drop掉
# Base.metadata.create_all()
from datetime import datetime
from datetime import date
from datetime import time
article = Article(price_ding=1000.456677) #规定小数点后最多只能4位
#存进去就是 1000.4567
session.add(article)
session.commit()
| 24.6875 | 103 | 0.703165 | 189 | 1,580 | 5.767196 | 0.529101 | 0.051376 | 0.049541 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.040801 | 0.177848 | 1,580 | 63 | 104 | 25.079365 | 0.798306 | 0.261392 | 0 | 0 | 0 | 0 | 0.085965 | 0.037719 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.047619 | 0.190476 | 0 | 0.404762 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25c6233c949be37003df81cd078a37810b00f6ff | 2,411 | py | Python | recovery/TopoMADSrc/src/train.py | imperial-qore/CAROL | 57dc42c4ddeb9e75eed43a91ceb336a1ecc9c8b9 | [
"BSD-3-Clause"
] | 1 | 2022-03-19T16:37:40.000Z | 2022-03-19T16:37:40.000Z | recovery/TopoMADSrc/src/train.py | imperial-qore/CAROL | 57dc42c4ddeb9e75eed43a91ceb336a1ecc9c8b9 | [
"BSD-3-Clause"
] | null | null | null | recovery/TopoMADSrc/src/train.py | imperial-qore/CAROL | 57dc42c4ddeb9e75eed43a91ceb336a1ecc9c8b9 | [
"BSD-3-Clause"
] | null | null | null | from .constants import *
from .utils import *
import torch.nn as nn
from tqdm import tqdm
from .plotter import *
anomaly_loss = nn.MSELoss(reduction = 'none')
mse_loss = nn.MSELoss(reduction = 'mean')
def custom_loss(model, pred_state, true_state, thresholds):
aloss = mse_loss(pred_state.view(-1), torch.tensor(true_state, dtype=torch.double))
return aloss
def backprop(epoch, model, optimizer, train_time_data, train_schedule_data, stats, norm_series, thresholds, training = True):
global PROTO_UPDATE_FACTOR
aloss_list = []
for i in tqdm(range(train_time_data.shape[0]), leave=False, position=1):
state, schedule = train_time_data[i], train_schedule_data[i]
pred_state = model(state)
aloss = custom_loss(model, pred_state, state, thresholds)
aloss_list.append(aloss.item())
loss = aloss
if training:
optimizer.zero_grad(); loss.backward(); optimizer.step()
tqdm.write(f'Epoch {epoch},\tLoss = {np.mean(aloss_list)}')
return np.mean(aloss_list)
# Accuracy
def anomaly_accuracy(pred_state, target_anomaly, thresholds, model_plotter):
correct = 0; res_list = []; tp, fp, tn, fn = 0, 0, 0, 0
anomaly_any_dim, _ = check_anomalies(pred_state.view(1, -1).detach().clone().numpy(), thresholds)
anomaly_any_dim = anomaly_any_dim[0] + 0
for i, res in enumerate(anomaly_any_dim):
res_list.append(res)
if res == target_anomaly[i]:
correct += 1
if target_anomaly[i] == 1: tp += 1
else: tn += 1
else:
if target_anomaly[i] == 1: fn += 1
else: fp += 1
model_plotter.update_anomaly(res_list, target_anomaly, correct/pred_state.shape[0])
return correct/pred_state.shape[0], tp, tn, fp, fn
def accuracy(model, train_time_data, train_schedule_data, anomaly_data, class_data, thresholds, model_plotter):
anomaly_correct = 0; tpl, tnl, fpl, fnl = [], [], [], []
for i, d in enumerate(train_time_data):
pred_state = model(train_time_data[i])
model_plotter.update_lines(pred_state.view(-1), train_time_data[i][-1])
res, tp, tn, fp, fn = anomaly_accuracy(pred_state, anomaly_data[i], thresholds, model_plotter)
anomaly_correct += res
tpl.append(tp); tnl.append(tn); fpl.append(fp); fnl.append(fn)
tp += res; fp += res; tn += (1 - res); fn += (1 - res)
tp, fp, tn, fn = np.mean(tpl), np.mean(fpl), np.mean(tnl), np.mean(fn)
p, r = tp/(tp+fp), tp/(tp+fn)
tqdm.write(f'P = {p}, R = {r}, F1 = {2 * p * r / (p + r)}')
return anomaly_correct / len(train_time_data)
| 41.568966 | 125 | 0.705931 | 388 | 2,411 | 4.177835 | 0.244845 | 0.061073 | 0.064158 | 0.02591 | 0.159161 | 0.037014 | 0 | 0 | 0 | 0 | 0 | 0.013546 | 0.142679 | 2,411 | 57 | 126 | 42.298246 | 0.770682 | 0.003318 | 0 | 0 | 0 | 0.019608 | 0.04 | 0.00875 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078431 | false | 0 | 0.098039 | 0 | 0.254902 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25c9be0aa40c9cc11065bccf3512fe3ff60fb71c | 1,193 | py | Python | Model/Characteristics.py | ProjectBlackFalcon/DatBot | 8b2cc64af78757b832d8bc6a1373fb74b7a4316f | [
"MIT"
] | 7 | 2017-11-22T13:28:41.000Z | 2019-10-17T08:47:40.000Z | Model/Characteristics.py | ProjectBlackFalcon/DatBot | 8b2cc64af78757b832d8bc6a1373fb74b7a4316f | [
"MIT"
] | 3 | 2018-10-07T15:59:34.000Z | 2019-01-15T11:56:18.000Z | Model/Characteristics.py | ProjectBlackFalcon/DatBot | 8b2cc64af78757b832d8bc6a1373fb74b7a4316f | [
"MIT"
] | null | null | null | class Characteristics:
def __init__(self):
self.level = None
self.xp = None
self.xp_next_level_floor = None
self.weight = None
self.weight_max = None
self.health_percent = None
self.jobs = None
self.vi = None
self.int = None
self.agi = None
self.cha = None
self.fo = None
self.sa = None
self.available_stat_points = None
def get_primary_characs(self):
names = ['Vi', 'Int', 'Agi', 'Cha', 'Fo', 'Sa', 'Available']
return dict(zip(names, [self.vi, self.int, self.agi, self.cha, self.fo, self.sa, self.available_stat_points]))
def __str__(self):
return str({
'Level': self.level,
'Xp': self.xp,
'XpNextLevelFloor': self.xp_next_level_floor,
'Weight': self.weight,
'WeightMax': self.weight_max,
'HealthPercent': self.health_percent,
'Jobs': self.jobs,
'Vitality': self.vi,
'Intelligence': self.int,
'Agility': self.agi,
'Luck': self.cha,
'Strength': self.fo,
'Wisdom': self.sa,
})
| 29.825 | 118 | 0.52808 | 136 | 1,193 | 4.455882 | 0.301471 | 0.171617 | 0.033003 | 0.049505 | 0.066007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.346186 | 1,193 | 39 | 119 | 30.589744 | 0.776923 | 0 | 0 | 0 | 0 | 0 | 0.10394 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0 | 0.028571 | 0.171429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25c9dfc31337ec370a3cc01b5ad5fd8eda8cf639 | 285 | py | Python | deca/cmds/process_rtpc.py | kk49/deca | 8a03ea5d1b7ae0d787638f1797b6e2cb46de4bae | [
"MIT"
] | 50 | 2019-06-05T04:01:04.000Z | 2022-03-05T14:56:43.000Z | deca/cmds/process_rtpc.py | kk49/deca | 8a03ea5d1b7ae0d787638f1797b6e2cb46de4bae | [
"MIT"
] | 115 | 2019-03-27T13:34:00.000Z | 2022-03-11T23:43:12.000Z | deca/cmds/process_rtpc.py | kk49/deca | 8a03ea5d1b7ae0d787638f1797b6e2cb46de4bae | [
"MIT"
] | 13 | 2020-01-25T01:15:49.000Z | 2022-02-08T02:20:05.000Z | import sys
from deca.ff_rtpc import Rtpc
class FakeVfs:
def hash_string_match(self, hash32=None, hash48=None, hash64=None):
return []
in_file = sys.argv[1]
with open(in_file, 'rb') as f:
rtpc = Rtpc()
rtpc.deserialize(f)
print(rtpc.dump_to_string(FakeVfs()))
| 16.764706 | 71 | 0.684211 | 45 | 285 | 4.177778 | 0.688889 | 0.06383 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030303 | 0.189474 | 285 | 16 | 72 | 17.8125 | 0.78355 | 0 | 0 | 0 | 0 | 0 | 0.007018 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.2 | 0.1 | 0.5 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25ce92f15804d39fd750d921a11abe1b5d750803 | 1,622 | py | Python | bites/bite021.py | ChidinmaKO/Chobe-bitesofpy | 2f933e6c8877a37d1ce7ef54ea22169fc67417d3 | [
"MIT"
] | null | null | null | bites/bite021.py | ChidinmaKO/Chobe-bitesofpy | 2f933e6c8877a37d1ce7ef54ea22169fc67417d3 | [
"MIT"
] | null | null | null | bites/bite021.py | ChidinmaKO/Chobe-bitesofpy | 2f933e6c8877a37d1ce7ef54ea22169fc67417d3 | [
"MIT"
] | 1 | 2019-07-16T19:12:52.000Z | 2019-07-16T19:12:52.000Z | cars = {
'Ford': ['Falcon', 'Focus', 'Festiva', 'Fairlane'],
'Holden': ['Commodore', 'Captiva', 'Barina', 'Trailblazer'],
'Nissan': ['Maxima', 'Pulsar', '350Z', 'Navara'],
'Honda': ['Civic', 'Accord', 'Odyssey', 'Jazz'],
'Jeep': ['Grand Cherokee', 'Cherokee', 'Trailhawk', 'Trackhawk']
}
def get_all_jeeps(cars=cars):
"""return a comma + space (', ') separated string of jeep models
(original order)"""
jeep_list = ', '.join(cars['Jeep'])
return jeep_list
def get_first_model_each_manufacturer(cars=cars):
"""return a list of matching models (original ordering)"""
first = [model[0] for model in cars.values()]
return first
def get_all_matching_models(cars=cars, grep='trail'):
"""return a list of all models containing the case insensitive
'grep' string which defaults to 'trail' for this exercise,
sort the resulting sequence alphabetically"""
grep_models = []
for car in cars.values():
for model in car:
if grep.lower() in model.lower():
grep_models.append(model)
return sorted(grep_models)
# another way
# flatten the list of lists
grep = grep.lower()
models = sum(cars.values(), [])
# models = list(chain.from_iterable(cars.values()))
grep_models = [model for model in models if grep in model.lower()]
return sorted(grep_models)
def sort_car_models(cars=cars):
"""return a copy of the cars dict with the car models (values)
sorted alphabetically"""
sorted_car_dict = {car:sorted(model) for car,model in cars.items()}
return sorted_car_dict | 34.510638 | 71 | 0.6418 | 209 | 1,622 | 4.870813 | 0.401914 | 0.049116 | 0.041257 | 0.044204 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003147 | 0.2164 | 1,622 | 47 | 72 | 34.510638 | 0.797797 | 0.286683 | 0 | 0.074074 | 0 | 0 | 0.161698 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.148148 | false | 0 | 0 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25cf4945d974bdc43f8bfbe0d747b72c20968c9e | 3,027 | py | Python | homeassistant/components/switch/mysensors.py | magas0/home-assistant | 3c9e4934946ce99f5193ca550296034e86337997 | [
"MIT"
] | null | null | null | homeassistant/components/switch/mysensors.py | magas0/home-assistant | 3c9e4934946ce99f5193ca550296034e86337997 | [
"MIT"
] | null | null | null | homeassistant/components/switch/mysensors.py | magas0/home-assistant | 3c9e4934946ce99f5193ca550296034e86337997 | [
"MIT"
] | null | null | null | """
Support for MySensors switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.mysensors/
"""
import logging
from homeassistant.components import mysensors
from homeassistant.components.switch import SwitchDevice
from homeassistant.const import STATE_OFF, STATE_ON
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = []
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the mysensors platform for switches."""
# Only act if loaded via mysensors by discovery event.
# Otherwise gateway is not setup.
if discovery_info is None:
return
for gateway in mysensors.GATEWAYS.values():
# Define the S_TYPES and V_TYPES that the platform should handle as
# states. Map them in a dict of lists.
pres = gateway.const.Presentation
set_req = gateway.const.SetReq
map_sv_types = {
pres.S_DOOR: [set_req.V_ARMED],
pres.S_MOTION: [set_req.V_ARMED],
pres.S_SMOKE: [set_req.V_ARMED],
pres.S_LIGHT: [set_req.V_LIGHT],
pres.S_LOCK: [set_req.V_LOCK_STATUS],
}
if float(gateway.version) >= 1.5:
map_sv_types.update({
pres.S_BINARY: [set_req.V_STATUS, set_req.V_LIGHT],
pres.S_SPRINKLER: [set_req.V_STATUS],
pres.S_WATER_LEAK: [set_req.V_ARMED],
pres.S_SOUND: [set_req.V_ARMED],
pres.S_VIBRATION: [set_req.V_ARMED],
pres.S_MOISTURE: [set_req.V_ARMED],
})
map_sv_types[pres.S_LIGHT].append(set_req.V_STATUS)
devices = {}
gateway.platform_callbacks.append(mysensors.pf_callback_factory(
map_sv_types, devices, add_devices, MySensorsSwitch))
class MySensorsSwitch(mysensors.MySensorsDeviceEntity, SwitchDevice):
"""Representation of the value of a MySensors Switch child node."""
@property
def is_on(self):
"""Return True if switch is on."""
if self.value_type in self._values:
return self._values[self.value_type] == STATE_ON
return False
def turn_on(self):
"""Turn the switch on."""
self.gateway.set_child_value(
self.node_id, self.child_id, self.value_type, 1)
if self.gateway.optimistic:
# optimistically assume that switch has changed state
self._values[self.value_type] = STATE_ON
self.update_ha_state()
def turn_off(self):
"""Turn the switch off."""
self.gateway.set_child_value(
self.node_id, self.child_id, self.value_type, 0)
if self.gateway.optimistic:
# optimistically assume that switch has changed state
self._values[self.value_type] = STATE_OFF
self.update_ha_state()
@property
def assumed_state(self):
"""Return True if unable to access real state of entity."""
return self.gateway.optimistic
| 36.035714 | 75 | 0.650479 | 396 | 3,027 | 4.732323 | 0.328283 | 0.044824 | 0.048559 | 0.044824 | 0.270011 | 0.254002 | 0.18143 | 0.164354 | 0.164354 | 0.164354 | 0 | 0.001783 | 0.258672 | 3,027 | 83 | 76 | 36.46988 | 0.833333 | 0.225966 | 0 | 0.150943 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09434 | false | 0 | 0.075472 | 0 | 0.264151 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25d33d8c706c5e4f22c52021b3a60cc3f03f0db1 | 3,191 | py | Python | larch/io/save_restore.py | Bob620/xraylarch | f8d38e6122cc0e8c990b0f024db3b503a5fbf057 | [
"BSD-2-Clause"
] | null | null | null | larch/io/save_restore.py | Bob620/xraylarch | f8d38e6122cc0e8c990b0f024db3b503a5fbf057 | [
"BSD-2-Clause"
] | null | null | null | larch/io/save_restore.py | Bob620/xraylarch | f8d38e6122cc0e8c990b0f024db3b503a5fbf057 | [
"BSD-2-Clause"
] | null | null | null |
import json
import time
import numpy as np
from collections import OrderedDict
from larch import Group
from ..fitting import Parameter, isParameter
from ..utils.jsonutils import encode4js, decode4js
from . import fix_varname
def save(fname, *args, **kws):
"""save groups and data into a portable json file
save(fname, arg1, arg2, ....)
Parameters
----------
fname name of output save file.
args list of groups, data items to be saved.
See Also: restore()
"""
_larch = kws.get('_larch', None)
isgroup = _larch.symtable.isgroup
expr = getattr(_larch, 'this_expr', 'save(foo)')
expr = expr.replace('\n', ' ').replace('\r', ' ')
grouplist = _larch.symtable._sys.saverestore_groups[:]
buff = ["#Larch Save File: 1.0",
"#save.date: %s" % time.strftime('%Y-%m-%d %H:%M:%S'),
"#save.command: %s" % expr,
"#save.nitems: %i" % len(args)]
names = []
if expr.startswith('save('):
names = [a.strip() for a in expr[5:-1].split(',')]
try:
names.pop(0)
except:
pass
if len(names) < len(args):
names.extend(["_unknown_"]*(len(args) - len(names)))
for name, arg in zip(names, args):
buff.append("#=> %s" % name)
buff.append(json.dumps(encode4js(arg, grouplist=grouplist)))
buff.append("")
with open(fname, "w") as fh:
fh.write("\n".join(buff))
def restore(fname, top_level=True, _larch=None):
"""restore data from a json Larch save file
Arguments
---------
top_level bool whether to restore to _main [True]
Returns
-------
None with `top_level=True` or group with `top_level=False`
Notes
-----
1. With top_level=False, a new group containing the
recovered data will be returned.
"""
grouplist = _larch.symtable._sys.saverestore_groups
datalines = open(fname, 'r').readlines()
line1 = datalines.pop(0)
if not line1.startswith("#Larch Save File:"):
raise ValueError("%s is not a valid Larch save file" % fname)
version_string = line1.split(':')[1].strip()
version_info = [s for s in version_string.split('.')]
ivar = 0
header = {'version': version_info}
varnames = []
gname = fix_varname('restore_%s' % fname)
out = Group(name=gname)
for line in datalines:
line = line[:-1]
if line.startswith('#save.'):
key, value = line[6:].split(':', 1)
value = value.strip()
if key == 'nitems': value = int(value)
header[key] = value
elif line.startswith('#=>'):
name = fix_varname(line[4:].strip())
ivar += 1
if name in (None, 'None', '__unknown__') or name in varnames:
name = 'var_%5.5i' % (ivar)
varnames.append(name)
else:
val = decode4js(json.loads(line), grouplist)
setattr(out, varnames[-1], val)
setattr(out, '_restore_metadata_', header)
if top_level:
_main = _larch.symtable
for objname in dir(out):
setattr(_main, objname, getattr(out, objname))
return
return out
| 27.991228 | 73 | 0.576308 | 401 | 3,191 | 4.486284 | 0.369077 | 0.026681 | 0.028905 | 0.027793 | 0.046693 | 0.046693 | 0 | 0 | 0 | 0 | 0 | 0.011241 | 0.275149 | 3,191 | 113 | 74 | 28.238938 | 0.766537 | 0.158571 | 0 | 0 | 0 | 0 | 0.103117 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028986 | false | 0.014493 | 0.115942 | 0 | 0.15942 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25d477a7e3662db314d6e4fe2a30bcaeeeeeaa6c | 8,251 | py | Python | text2sql/data/dataset_readers/seq2seq_spans.py | inbaroren/improving-compgen-in-semparse | 06463b94f3d1b291759c08783d5a8661e2960f2e | [
"MIT"
] | 15 | 2020-09-30T12:24:29.000Z | 2021-12-24T13:45:25.000Z | text2sql/data/dataset_readers/seq2seq_spans.py | inbaroren/improving-compgen-in-semparse | 06463b94f3d1b291759c08783d5a8661e2960f2e | [
"MIT"
] | 2 | 2021-04-21T14:07:41.000Z | 2021-12-28T13:26:59.000Z | text2sql/data/dataset_readers/seq2seq_spans.py | inbaroren/improving-compgen-in-semparse | 06463b94f3d1b291759c08783d5a8661e2960f2e | [
"MIT"
] | 2 | 2020-10-19T22:06:45.000Z | 2021-02-05T22:08:23.000Z | from typing import Dict, List, Tuple
import logging
import json
import glob
import os
import sqlite3
import random
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import TextField, SpanField, ListField, Field
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import Token, Tokenizer, WordTokenizer
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
import text2sql.data.dataset_readers.dataset_utils.text2sql_utils as text2sql_utils
from allennlp.data.dataset_readers.dataset_utils import text2sql_utils as tu
from text2sql.data.preprocess.sql_templates import sql_schema_sanitize
from text2sql.data.tokenizers.whitespace_tokenizer import WhitespaceTokenizer, StandardTokenizer
from allennlp.data.dataset_readers.dataset_utils.span_utils import enumerate_spans
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@DatasetReader.register("seq2seq_spans")
class Seq2SeqSpansDatasetReader(DatasetReader):
def __init__(self,
schema_path: str,
database_path: str = None,
use_all_sql: bool = False,
use_all_queries: bool = True,
remove_unneeded_aliases: bool = False,
use_prelinked_entities: bool = True,
cross_validation_split_to_exclude: int = None,
source_tokenizer: Tokenizer = None,
target_tokenizer: Tokenizer = None,
source_token_indexers: Dict[str, TokenIndexer] = None,
target_token_indexers: Dict[str, TokenIndexer] = None,
source_add_start_token: bool = True,
lazy: bool = False,
random_seed:int = 0,
schema_free_supervision=False) -> None:
super().__init__(lazy)
self._random_seed = random_seed
# because the spans are preproceessed, it is essential to enforce the same
# tokenization
self._source_tokenizer = WhitespaceTokenizer()
self._target_tokenizer = StandardTokenizer()
self._source_token_indexers = source_token_indexers or {"tokens": SingleIdTokenIndexer()}
self._target_token_indexers = target_token_indexers or self._source_token_indexers
self._source_add_start_token = source_add_start_token
self._cross_validation_split_to_exclude = str(cross_validation_split_to_exclude)
self._use_all_sql = use_all_sql
self._use_all_queries = use_all_queries
self._remove_unneeded_aliases = remove_unneeded_aliases
self._use_prelinked_entities = use_prelinked_entities
if database_path is not None:
database_path = cached_path(database_path)
connection = sqlite3.connect(database_path)
self._cursor = connection.cursor()
else:
self._cursor = None
self._schema_path = schema_path
self._schema_free_supervision = schema_free_supervision
@overrides
def _read(self, file_path: str):
"""
Parameters
----------
file_path : ``str``, required.
For this dataset reader, file_path can either be a path to a file `or` a
path to a directory containing json files. The reason for this is because
some of the text2sql datasets require cross validation, which means they are split
up into many small files, for which you only want to exclude one.
"""
files = [p for p in glob.glob(file_path)
if self._cross_validation_split_to_exclude not in os.path.basename(p)]
for path in files:
split_data = []
with open(cached_path(path), "r") as data_file:
logger.info("Reading instances from lines in file at: %s", path)
data = json.load(data_file)
for text, sql, spans in text2sql_utils.process_sql_data_standard(data,
use_linked=self._use_prelinked_entities,
use_all_sql=self._use_all_sql,
use_all_queries=self._use_all_queries,
output_spans=True):
instance = self.text_to_instance(text, sql, spans)
if instance is not None:
split_data.append(instance)
# randomize and output
# random.Random(self._random_seed).shuffle(split_data)
for instance in split_data:
yield instance
@overrides
def text_to_instance(self, source_string: str, target_string: str = None, spans: List[Tuple[int, int]] = None) -> Instance: # type: ignore
# pylint: disable=arguments-differ
tokenized_source = self._source_tokenizer.tokenize(source_string)
if self._source_add_start_token:
tokenized_source.insert(0, Token(START_SYMBOL))
tokenized_source.append(Token(END_SYMBOL))
source_field = TextField(tokenized_source, self._source_token_indexers)
spans_field: List[Field] = []
spans = self._fix_spans_coverage(spans, len(tokenized_source))
for start, end in spans:
spans_field.append(SpanField(start, end, source_field))
span_list_field: ListField = ListField(spans_field)
if target_string is not None:
if self._schema_free_supervision:
_, _, target_string = sql_schema_sanitize(target_string, text2sql_utils.read_schema_dict(self._schema_path))
tokenized_target = self._target_tokenizer.tokenize(target_string)
if self._remove_unneeded_aliases:
new_target = tu.clean_unneeded_aliases([token.text for token in tokenized_target])
tokenized_target = [Token(t) for t in new_target]
tokenized_target.insert(0, Token(START_SYMBOL))
tokenized_target.append(Token(END_SYMBOL))
target_field = TextField(tokenized_target, self._target_token_indexers)
return Instance({"source_tokens": source_field, "spans": span_list_field, "target_tokens": target_field})
else:
return Instance({'source_tokens': source_field, "spans": span_list_field})
def _fix_spans_coverage(self, spans: List[Tuple[int, int]], source_length: int):
"""
Given a list of spans, fixes them to be inclusive, shifts them to adapt the sequence with START_SYMBOL,
and adds all the size 1 spans
:param spans: spans over source_tokenized
:param source_length: the length of source_tokenized
:return: List[Tuple[int, int]], spans.union(all size 1 spans)
"""
source_start_index = 0
source_end_index = source_length-1
# add +1 to the start indices since a START_SYMBOL was added
# end indices are now inclusive
if self._source_add_start_token:
new_spans: List[Tuple[int, int]] = []
for s, e in spans:
new_spans.append((s + 1, e))
source_start_index += 1
source_end_index -= 1
else:
new_spans = spans
spans_set = set(new_spans)
for i in range(source_start_index, source_end_index+1):
# inclusive spans
spans_set.add((i, i))
return spans_set
if __name__ == '__main__':
# test redear
c = Seq2SeqSpansDatasetReader('target',
use_all_sql=False,
use_all_queries=True,
use_prelinked_entities=True)
for dataset in ['advising']:
for split_type in ['schema_free_split', 'new_question_split', 'schema_full_split']:
for split in ['final_new_no_join_dev', 'final_new_no_join_test']:
data = c.read(f'/datainbaro2/text2sql/parsers_models/allennlp_text2sql/data/sql data/{dataset}/{split_type}/{split}.json')
| 47.97093 | 143 | 0.648891 | 976 | 8,251 | 5.16291 | 0.22541 | 0.014289 | 0.022227 | 0.018853 | 0.151816 | 0.107759 | 0.0385 | 0.02183 | 0.02183 | 0.02183 | 0 | 0.004887 | 0.280814 | 8,251 | 171 | 144 | 48.251462 | 0.844287 | 0.122167 | 0 | 0.055118 | 0 | 0.007874 | 0.046868 | 0.020549 | 0 | 0 | 0 | 0 | 0 | 1 | 0.031496 | false | 0 | 0.15748 | 0 | 0.220472 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25d4beca70d7a7ea9d34cbd419dd150e815027fd | 909 | py | Python | tests/test_main.py | Ronald-TR/dino_velocity | d3f4734c6ba0ac0b26d5cc088f53627204471cc8 | [
"MIT"
] | 1 | 2019-10-05T23:12:36.000Z | 2019-10-05T23:12:36.000Z | tests/test_main.py | Ronald-TR/dino_velocity | d3f4734c6ba0ac0b26d5cc088f53627204471cc8 | [
"MIT"
] | null | null | null | tests/test_main.py | Ronald-TR/dino_velocity | d3f4734c6ba0ac0b26d5cc088f53627204471cc8 | [
"MIT"
] | null | null | null | import os
import pytest
import pandas as pd
from pandas.testing import assert_frame_equal
from main import (
calc_velocity,
merge_datasets,
filter_by,
GRAV_CONST
)
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixtures')
@pytest.fixture
def dataset():
ds1 = pd.read_csv(os.path.join(FIXTURES_DIR, 'dataset1.csv'))
ds2 = pd.read_csv(os.path.join(FIXTURES_DIR, 'dataset2.csv'))
return ds1.merge(ds2, on='NAME')
def test_filter_by_bipedal(dataset):
ds = filter_by(dataset, 'STANCE', 'bipedal')
assert len(ds.index) == 4
def test_calc_velocity_success():
assert calc_velocity(1.4, 1.2, GRAV_CONST) == 0.5715476066494085
def test_merge_datasets_success(dataset):
ds = merge_datasets([
os.path.join(FIXTURES_DIR, 'dataset1.csv'),
os.path.join(FIXTURES_DIR, 'dataset2.csv')
])
assert_frame_equal(ds, dataset, check_dtype=False)
| 23.921053 | 68 | 0.711771 | 132 | 909 | 4.651515 | 0.401515 | 0.058632 | 0.081433 | 0.117264 | 0.237785 | 0.237785 | 0.237785 | 0.172638 | 0 | 0 | 0 | 0.039474 | 0.163916 | 909 | 37 | 69 | 24.567568 | 0.768421 | 0 | 0 | 0 | 0 | 0 | 0.080308 | 0 | 0 | 0 | 0 | 0 | 0.148148 | 1 | 0.148148 | false | 0 | 0.185185 | 0 | 0.37037 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25d5459c687261b94c617960f25183e8dcf9884d | 2,016 | py | Python | WebMirror/management/rss_parser_funcs/feed_parse_extractKONDEETranslations.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 193 | 2016-08-02T22:04:35.000Z | 2022-03-09T20:45:41.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractKONDEETranslations.py | fake-name/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 533 | 2016-08-23T20:48:23.000Z | 2022-03-28T15:55:13.000Z | WebMirror/management/rss_parser_funcs/feed_parse_extractKONDEETranslations.py | rrosajp/ReadableWebProxy | ed5c7abe38706acc2684a1e6cd80242a03c5f010 | [
"BSD-3-Clause"
] | 19 | 2015-08-13T18:01:08.000Z | 2021-07-12T17:13:09.000Z | def extractKONDEETranslations(item):
"""
#'KONDEE Translations'
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
tagmap = [
('Sakyubasu ni Tensei Shitanode Miruku o Shiborimasu', 'Sakyubasu ni Tensei Shitanode Miruku o Shiborimasu', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
titlemap = [
('Rune Troopers', 'Rune Troopers', 'translated'),
('SUCCUBUS NI TENSEI SHITANODE MIRUKU WO SHIBORIMASU ', 'Sakyubasu ni tensei shitanode miruku o shiborimasu', 'translated'),
('SAKYUBASU NI TENSEI SHITANODE MIRUKU O SHIBORIMASU ', 'Sakyubasu ni tensei shitanode miruku o shiborimasu', 'translated'),
('Omae wo Otaku ni Shiteyaru kara, Ore wo Riajuu ni Shitekure!', 'Omae o Otaku ni Shiteyaru kara, Ore o Riajuu ni Shitekure!', 'translated'),
('Omae o otaku ni shiteyaru kara, ore o riajuu ni shitekure!', 'Omae o Otaku ni Shiteyaru kara, Ore o Riajuu ni Shitekure!', 'translated'),
('Omae wo Otaku ni Shiteyarukara Ore wo Riajuu ni Shitekure', 'Omae o Otaku ni Shiteyaru kara, Ore o Riajuu ni Shitekure!', 'translated'),
('Omae o otaku ni shiteyaru kara ore o riajuu ni shitekure', 'Omae o Otaku ni Shiteyaru kara, Ore o Riajuu ni Shitekure!', 'translated'),
('Chuuko Demo Koi ga Shitai', 'Chuuko demo Koi ga Shitai!', 'translated'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | 57.6 | 152 | 0.628968 | 237 | 2,016 | 5.324895 | 0.232068 | 0.044374 | 0.107765 | 0.110935 | 0.714739 | 0.623613 | 0.623613 | 0.623613 | 0.623613 | 0.572108 | 0 | 0 | 0.27381 | 2,016 | 35 | 153 | 57.6 | 0.862022 | 0.010913 | 0 | 0.083333 | 0 | 0 | 0.48262 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0 | 0 | 0.208333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25d62da64c8583675cd96ec2da27f258feaa8342 | 5,235 | py | Python | python/stepspy-current/demo/demo_dynamic.py | changgang/steps | 9b8ea474581885129d1c1a1c3ad40bc8058a7e0a | [
"MIT"
] | 29 | 2019-10-30T07:04:10.000Z | 2022-02-22T06:34:32.000Z | python/stepspy-current/demo/demo_dynamic.py | cuihantao/steps | 60327bf42299cb7117ed5907a931583d7cdf590d | [
"MIT"
] | 1 | 2021-09-25T15:29:59.000Z | 2022-01-05T14:04:18.000Z | python/stepspy-current/demo/demo_dynamic.py | changgang/steps | 9b8ea474581885129d1c1a1c3ad40bc8058a7e0a | [
"MIT"
] | 8 | 2019-12-20T16:13:46.000Z | 2022-03-20T14:58:23.000Z | from stepspy import STEPS, POUCH_CSV
simulator = STEPS(is_default = False, log_file = 'test.log')
simulator.info()
simulator.set_toolkit_log_file("newtest.log", log_file_append_mode=False)
simulator.set_parallel_thread_number(1)
simulator.set_dynamic_model_database_capacity(10000000)
max_bus = simulator.get_allowed_maximum_bus_number()
info = "The default maximum bus number is: "+str(max_bus)
print(info)
simulator.set_allowed_maximum_bus_number(10000)
max_bus = simulator.get_allowed_maximum_bus_number()
info = "The default maximum bus number is changed to: "+str(max_bus)
print(info)
simulator.load_powerflow_data('IEEE39.raw','PSS/E')
simulator.check_powerflow_data()
simulator.check_network_connectivity()
simulator.build_dynamic_network_Y_matrix()
simulator.save_dynamic_network_Y_matrix('ymatrix_dyn.csv')
simulator.build_network_Z_matrix()
simulator.save_network_Z_matrix('zmatrix_dyn.csv')
nbus = simulator.get_bus_count()
print(nbus)
nline = simulator.get_line_count()
print(nline)
ntrans = simulator.get_transformer_count()
print(ntrans)
nload = simulator.get_load_count()
print(nload)
print("here goes all buses")
buses = simulator.get_all_buses()
for bus in buses:
busname = simulator.get_bus_data(bus, "string", "bus name")
basevoltage = simulator.get_bus_data(bus, "double", "base voltage in kV")
print(bus, busname, basevoltage)
print("here goes all lines")
lines = simulator.get_lines_at_bus(0)
for line in lines:
status_send = simulator.get_line_data(line, "bool", "sending side breaker status")
status_recv = simulator.get_line_data(line, "bool", "receiving side breaker status")
r1 = simulator.get_line_data(line, "double", "r1_pu")
x1 = simulator.get_line_data(line, "double", "x1_pu")
g1 = simulator.get_line_data(line, "double", "g1_pu")
b1 = simulator.get_line_data(line, "double", "b1_pu")
print(line, status_send, status_recv, r1, x1, g1, b1)
print("here goes all transformer")
transes = simulator.get_transformers_at_bus(0)
for trans in transes:
status_primary = simulator.get_transformer_data(trans, "bool", "primary", "status")
status_secondary = simulator.get_transformer_data(trans, "bool", "secondary", "status")
status_tertiary = simulator.get_transformer_data(trans, "bool", "tertiary", "status")
gm = simulator.get_transformer_data(trans, "double", "transformer", "gm_pu")
bm = simulator.get_transformer_data(trans, "double", "transformer", "bm_pu")
print(trans, status_primary, status_secondary, status_tertiary, gm, bm)
print("here goes solving powerflow")
simulator.set_powerflow_solver_parameter('bool','flat start logic', True)
simulator.solve_powerflow('NR')
simulator.save_powerflow_result('pfresult.csv')
simulator.save_network_matrix('ymatrix.csv')
simulator.save_jacobian_matrix('jacobian.csv')
print("here goes running dynamic simulation")
simulator.set_dynamic_model_database_capacity(1000000)
simulator.load_dynamic_data('IEEE39.dyr','psse')
simulator.check_missing_models()
simulator.check_dynamic_data()
simulator.check_least_dynamic_time_constants()
print("here goes generator dynamic data")
gens = simulator.get_generators_at_bus(0)
for gen in gens:
gen_model = simulator.get_generator_related_model_name(gen, "GEN")
avr_model = simulator.get_generator_related_model_name(gen, "avr")
pss_model = simulator.get_generator_related_model_name(gen, "pss")
gov_model = simulator.get_generator_related_model_name(gen, "gov")
pmax = simulator.get_generator_related_model_data(gen, "gov", 'pmax')
pmin = simulator.get_generator_related_model_data(gen, "gov", 'pmin')
mbase = simulator.get_generator_data(gen, 'd', "mbase_MVA")
print(gen, mbase, gen_model, avr_model, pss_model, gov_model, pmax, pmin)
data = simulator.get_generator_related_model_parameter_pair(gen, "gen")
print(gen_model, data)
simulator.set_dynamic_simulator_parameter('b','bin export logic',False)
simulator.set_dynamic_simulator_parameter('b','csv export logic',True)
simulator.set_dynamic_simulator_parameter('d','ITERATION ACCELERATOR',1.0)
simulator.set_dynamic_simulator_parameter('d','MAX POWER IMBALANCE IN MVA',0.1)
simulator.set_dynamic_simulator_parameter('i','MAX DAE ITERATION',3)
simulator.set_dynamic_simulator_parameter('i','MIN DAE ITERATION',3)
simulator.set_dynamic_simulator_parameter('i','MAX NETWORK ITERATION',100)
simulator.set_dynamic_simulator_parameter('i','MAX UPDATE ITERATION',3)
simulator.set_dynamic_simulator_parameter('b','AUTOMATIC ACCELERATOR TUNE LOGIC',False)
simulator.set_dynamic_simulator_parameter('b','ANGLE STABILITY SURVEILLANCE LOGIC',False)
simulator.set_dynamic_simulator_parameter('d','ANGLE STABILITY THRESHOLD IN DEG',360.0)
simulator.set_dynamic_simulation_time_step(0.01)
simulator.set_dynamic_simulator_output_file('ieee39')
simulator.prepare_meters('all')
simulator.start_dynamic_simulation()
simulator.run_dynamic_simulation_to_time(1.0)
simulator.set_bus_fault(15, 'three phase fault',[0.0, -2e2])
simulator.run_dynamic_simulation_to_time(1.1)
simulator.clear_bus_fault(15, 'three phase fault')
simulator.run_dynamic_simulation_to_time(5.0)
simulator.stop_dynamic_simulation()
time, value, dychannel = POUCH_CSV('ieee39.csv') | 40.898438 | 91 | 0.792359 | 742 | 5,235 | 5.262803 | 0.234501 | 0.095262 | 0.072983 | 0.086044 | 0.423816 | 0.404097 | 0.245583 | 0.15621 | 0.063508 | 0.037388 | 0 | 0.015756 | 0.090735 | 5,235 | 128 | 92 | 40.898438 | 0.804622 | 0 | 0 | 0.04 | 0 | 0 | 0.185829 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.01 | 0 | 0.01 | 0.17 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25d843360df31c7f2fcc1e0199a39a68f8fdf8a4 | 1,222 | py | Python | Student-management/code.py | singhbipin2117/dsmp-pre-work | b8af229276d46c40edb7e79a1387c6e2f3a481a2 | [
"MIT"
] | null | null | null | Student-management/code.py | singhbipin2117/dsmp-pre-work | b8af229276d46c40edb7e79a1387c6e2f3a481a2 | [
"MIT"
] | null | null | null | Student-management/code.py | singhbipin2117/dsmp-pre-work | b8af229276d46c40edb7e79a1387c6e2f3a481a2 | [
"MIT"
] | null | null | null | # --------------
# Code starts here
class_1 = ['Geoffrey Hinton','Andrew Ng','Sebastian Raschka','Yoshua Bengio']
class_2 = ['Hilary Mason','Carla Gentry','Corinna Cortes']
new_class = class_1 + class_2
new_class.append('Peter Warden')
for name in new_class:
if name == 'Carla Gentry':
new_class.remove('Carla Gentry')
print(new_class)
# Code ends here
# --------------
# Code starts here
courses = {"Math":65, "English":70, "History":80, "French": 70, "Science": 60}
marks = courses.values()
marksList = []
for mark in marks:
marksList.append(mark)
total = sum(marksList)
print(total)
percentage = (total/500) * 100
print(percentage)
# Code ends here
# --------------
# Code starts here
mathematics = {'Geoffrey Hinton' : 78, 'Andrew Ng': 95, 'Sebastian Raschka': 65,
'Yoshua Benjio': 50, 'Hilary Mason': 70, 'Corinna Cortes': 66, 'Peter Warden': 75}
max_marks_scored = max(mathematics,key = mathematics.get)
topper = max_marks_scored
# Code ends here
# --------------
# Given string
topper = 'andrew ng'
# Code starts here
first_name, last_name = topper.split(" ")
full_name = last_name + " "+ first_name
certificate_name = full_name.upper()
print(certificate_name)
# Code ends here
| 21.068966 | 83 | 0.663666 | 162 | 1,222 | 4.876543 | 0.432099 | 0.050633 | 0.070886 | 0.040506 | 0.065823 | 0.065823 | 0 | 0 | 0 | 0 | 0 | 0.033074 | 0.158756 | 1,222 | 57 | 84 | 21.438596 | 0.735409 | 0.165303 | 0 | 0 | 0 | 0 | 0.260956 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25d94b24d93064be7c3949298961c79e802c2296 | 3,759 | py | Python | vmad/core/node.py | VMBoehm/vmad | 3aeb57a43de10e146756f074cca7f77f210e3e74 | [
"BSD-2-Clause"
] | null | null | null | vmad/core/node.py | VMBoehm/vmad | 3aeb57a43de10e146756f074cca7f77f210e3e74 | [
"BSD-2-Clause"
] | null | null | null | vmad/core/node.py | VMBoehm/vmad | 3aeb57a43de10e146756f074cca7f77f210e3e74 | [
"BSD-2-Clause"
] | null | null | null | class Node:
""" A node on the computing graph.
The node is the first argument to apl(node, ....) and
jvp / vjp functions.
node[argname] gives the input symbol
"""
def __init__(self, primitive, _frameinfo):
self.primitive = primitive
self.operator = primitive.operator
self.prototype = primitive.operator.prototype
self._frameinfo = _frameinfo
# add a few aliases for accessing primitive attributes
#
self.name = primitive.name
self._varin = {} # references
self._varout = {}
def __getitem__(self, key):
""" getting input variables as symbols """
# varin are references.
return self._varin[key].symbol
@property
def varin(self):
return self._varin
@property
def varout(self):
return self._varout
def __repr__(self):
#return "%s(%s=>%s) at %s:%d" % (type(self).__name__, self.varin, self._varout, self._frameinfo[0], self._frameinfo[1])
return "%s @ %s : %s " % (self.name, self._frameinfo[0], self._frameinfo[1])
def call(self, kwargs):
""" call the implementation function of the primitive;
invoked by the Context
kwargs: the arguments that goes into the impl function
Returns: dict, result for each varout.
"""
from .symbol import BaseSymbol
for key, value in kwargs.items():
assert not isinstance(value, BaseSymbol)
r = self.primitive.impl(self, **kwargs)
# allow returning without using a dict
# if there is only a single output argument
if not isinstance(r, dict):
if len(self.varout) == 1:
argname = next(iter(self.varout.keys()))
r = {argname:r}
if len(self.varout) == 0:
if r is not None:
raise ValueError("Return value of the primitive is not None, while no output arguments are defined")
r = {}
for key, value in r.items():
assert not isinstance(value, BaseSymbol)
return r
def record(self, kwargs, r):
""" generate the kwargs that goes into the tape;
default is to record the entire kwargs.
Sometimes we do not need the entire kwargs; e.g.
for linear operators we only need enough information to create
the output array of the back-prop gradient
but we don't need the actual parameters.
invoked by the Context.
kwargs: the arguments that goes into the impl function
r : the result of the calculation operator apl, dict from argname to value
see above.
Returns: dict that goes into the tape, will be available in vjp and jpv
"""
# merge the two dictionaries, prioritizing kwargs (inputs).
d = {}
d.update(r)
d.update(kwargs)
from .symbol import BaseSymbol
for key, value in d.items():
assert not isinstance(value, BaseSymbol)
return self.primitive.record_impl(self, **d)
def find_primitive_type(node, func):
# we will only do this on the apl primitives
# because otherwise this is undefined
# the algebra of autodiff in vmad3 is explicitly not closed!
assert node.primitive == node.operator.apl
assert func in ['vjp', 'jvp', 'apl']
if func == 'jvp': return node.operator.jvp
if func == 'vjp': return node.operator.vjp
if func == 'apl': return node.operator.apl
def is_literal(self, argname):
from vmad.core.symbol import Literal
return isinstance(self.varin[argname].symbol, Literal)
| 33.864865 | 127 | 0.597765 | 469 | 3,759 | 4.720682 | 0.324094 | 0.0271 | 0.02168 | 0.0271 | 0.193767 | 0.176603 | 0.133695 | 0.093044 | 0.057814 | 0.057814 | 0 | 0.002726 | 0.31684 | 3,759 | 110 | 128 | 34.172727 | 0.859424 | 0.361266 | 0 | 0.134615 | 0 | 0 | 0.051318 | 0 | 0 | 0 | 0 | 0 | 0.096154 | 1 | 0.173077 | false | 0 | 0.057692 | 0.057692 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25d9d61573cd0d40344205aeac86f2dee6d6a535 | 5,111 | py | Python | tests/test_contributions.py | Alveo/pyalveo | 1e9eec22bc031bc9a08066f9966565a546e6242e | [
"BSD-3-Clause"
] | 2 | 2016-12-04T04:32:34.000Z | 2019-04-18T09:38:33.000Z | tests/test_contributions.py | Alveo/pyalveo | 1e9eec22bc031bc9a08066f9966565a546e6242e | [
"BSD-3-Clause"
] | 4 | 2017-05-24T01:37:48.000Z | 2018-04-09T02:35:25.000Z | tests/test_contributions.py | Alveo/pyalveo | 1e9eec22bc031bc9a08066f9966565a546e6242e | [
"BSD-3-Clause"
] | 2 | 2016-11-21T03:49:43.000Z | 2017-10-05T04:08:58.000Z | import unittest
import pyalveo
import requests_mock
import json
CONTEXT = { "ausnc": "http://ns.ausnc.org.au/schemas/ausnc_md_model/",
"corpus": "http://ns.ausnc.org.au/corpora/",
"dc": "http://purl.org/dc/terms/",
"dcterms": "http://purl.org/dc/terms/",
"foaf": "http://xmlns.com/foaf/0.1/",
"hcsvlab": "http://hcsvlab.org/vocabulary/"
}
API_URL = "http://example.alveo.froob"
API_KEY = "fakekeyvalue"
@requests_mock.Mocker()
class ContributionsTest(unittest.TestCase):
def test_create_contribution(self, m):
"""Test that we can create a new contribution"""
m.get(API_URL + "/item_lists.json",json={'success': 'yes'})
client = pyalveo.Client(api_url=API_URL, api_key=API_KEY)
cname = 'testcontrib'
m.post(client.oauth.api_url + "/contrib/",
json={'description': 'This is contribution description',
'documents': [{'name': 'testfile.txt',
'url': 'https://staging.alveo.edu.au/catalog/demotext/2006-05-28-19/document/testfile.txt'}],
'id': '29',
'metadata': {'abstract': '"This is contribution abstract"',
'collection': 'https://staging.alveo.edu.au/catalog/demotext',
'created': '2018-12-06T05:46:11Z',
'creator': 'Data Owner',
'title': 'HelloWorld'},
'name': 'HelloWorld',
'url': 'https://staging.alveo.edu.au/contrib/29'}
)
meta = {
"contribution_name": "HelloWorld",
"contribution_collection": "demotext",
"contribution_text": "This is contribution description",
"contribution_abstract": "This is contribution abstract"
}
result = client.create_contribution(meta)
# validate the request we made
req = m.last_request
self.assertEqual(req.method, 'POST')
# check that the right things were in the request
self.assertIn('contribution_collection', req.json())
self.assertIn('contribution_name', req.json())
self.assertDictEqual(meta, req.json())
def test_get_contribution(self, m):
"""Get details of a contribution"""
m.get(API_URL + "/item_lists.json",json={'success': 'yes'})
client = pyalveo.Client(api_url=API_URL, api_key=API_KEY)
cname = '29'
contrib_url = client.oauth.api_url + "/contrib/" + cname
m.get(contrib_url,
json={'description': 'This is contribution description',
'documents': [{'name': 'testfile.txt',
'url': 'https://staging.alveo.edu.au/catalog/demotext/2006-05-28-19/document/testfile.txt'}],
'metadata': {'abstract': '"This is contribution abstract"',
'collection': 'https://staging.alveo.edu.au/catalog/demotext',
'created': '2018-12-06T05:46:11Z',
'creator': 'Data Owner',
'title': 'HelloWorld'},
'name': 'HelloWorld',
'url': contrib_url}
)
result = client.get_contribution(contrib_url)
req = m.last_request
self.assertEqual(req.method, "GET")
self.assertEqual(result['id'], cname)
self.assertEqual(result['description'], 'This is contribution description')
def test_add_document_to_contrib(self, m):
"""Test adding documents to a contribution"""
m.get(API_URL + "/item_lists.json",json={'success': 'yes'})
client = pyalveo.Client(api_url=API_URL, api_key=API_KEY)
collection_name = "testcollection1"
itemname = "item1"
docname = "doc1.txt"
content = "Hello World!\n"
item_uri = API_URL + "/catalog/%s/%s" % (collection_name, itemname)
m.post(item_uri, json={"success":"Added the document %s to item %s in collection %s" % (docname, itemname, collection_name)})
docmeta = {
"dcterms:title": "Sample Document",
"dcterms:type": "Text"
}
document_uri = client.add_document(item_uri, docname, docmeta, content=content, contrib_id=1)
req = m.last_request
payload = req.json()
self.assertEqual(payload['document_content'], content)
self.assertIn('metadata', payload)
md = payload['metadata']
self.assertIn('dcterms:title', md)
self.assertEqual(md['dcterms:title'], docmeta['dcterms:title'])
self.assertEqual(md['@type'], "foaf:Document")
self.assertEqual(md['dcterms:identifier'], docname)
# in addition to the above info for add_document we
# should have the contribution id in the payload JSON
self.assertIn('contribution_id', payload)
if __name__ == "__main__" :
unittest.main(verbosity=5)
| 39.015267 | 133 | 0.562121 | 541 | 5,111 | 5.181146 | 0.266174 | 0.027827 | 0.044952 | 0.035676 | 0.418123 | 0.350339 | 0.34249 | 0.34249 | 0.314663 | 0.314663 | 0 | 0.01703 | 0.299159 | 5,111 | 130 | 134 | 39.315385 | 0.765494 | 0.057132 | 0 | 0.293478 | 0 | 0.021739 | 0.333125 | 0.013958 | 0 | 0 | 0 | 0 | 0.152174 | 1 | 0.032609 | false | 0 | 0.043478 | 0 | 0.086957 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25dcb2f396f296dfd8b14300cac137c91f58f89f | 10,271 | py | Python | scripts/fsqio/python3-port-utils/pants/futurize.py | jglesner/fsqio | 436dd3a7667fd23f638bf96bdcd9ec83266a2319 | [
"Apache-2.0"
] | 252 | 2016-01-08T23:12:13.000Z | 2022-01-17T16:31:49.000Z | scripts/fsqio/python3-port-utils/pants/futurize.py | jglesner/fsqio | 436dd3a7667fd23f638bf96bdcd9ec83266a2319 | [
"Apache-2.0"
] | 67 | 2016-01-13T17:34:12.000Z | 2021-08-04T18:50:24.000Z | scripts/fsqio/python3-port-utils/pants/futurize.py | jglesner/fsqio | 436dd3a7667fd23f638bf96bdcd9ec83266a2319 | [
"Apache-2.0"
] | 59 | 2016-03-25T20:49:03.000Z | 2021-08-04T05:36:38.000Z | #!/usr/bin/env python3
import argparse
import itertools
import subprocess
import sys
import re
from glob import glob
from textwrap import dedent
from typing import List, NamedTuple
def main() -> None:
parser = create_parser()
args = parser.parse_args()
# preview changes needed for file
if not args.file_names:
target_root = determine_target_root(args.folder, args.contrib, args.test)
check_what_needs_changes(target_root, args.root_only)
return
# futurize files
for file_name in args.file_names:
paths = determine_paths(args, file_name)
if args.preview:
preview_changes(paths.file_path)
continue
futurize_diff = call_futurize(paths.file_path)
if not futurize_made_changes(futurize_diff):
continue
if new_imports_added(futurize_diff):
update_build_dependencies(paths.target_root, paths.pants_target_name, file_name)
call_pants_fmt(paths.pants_target_path)
prompt_review_of_diffs(futurize_diff)
if not args.no_tests and file_changed(paths.file_path):
call_pants_test(paths.pants_test_path)
# --------------------------------------------------
# Command line utils
# -------------------------------------------------
def get_stdout(command: List[str]) -> str:
return subprocess.run(
command,
stdout=subprocess.PIPE,
encoding='utf-8') \
.stdout.strip()
def get_stderr(command: List[str]) -> str:
return subprocess.run(
command,
stderr=subprocess.PIPE,
encoding='utf-8') \
.stderr.strip()
# --------------------------------------------------
# Setup
# -------------------------------------------------
def create_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description='Run futurize script over targets.')
parser.add_argument('folder', help='Target folder name, e.g. backend/jvm')
parser.add_argument(
'file_names',
nargs='*',
default=[],
help='Specific .py file(s). Ignore this arg to see changes necessary in folder.'
)
parser.add_argument('-t', '--test', action='store_true', help='Operate on test targets.')
parser.add_argument('-p', '--preview', action='store_true', help='Do not write changes.')
parser.add_argument('-n', '--no-tests', action='store_true', help='Skip unit tests.')
parser.add_argument('-r', '--root-only', action='store_true', help='Do not recursively search subfolders.')
parser.add_argument('-c', '--contrib', action='store_true', help='Operate on targets in contrib/.')
return parser
class Paths(NamedTuple):
target_root: str
file_path: str
pants_target_name: str
pants_target_path: str
pants_test_path: str
SRC_BASE_ROOT = 'src/python/pants'
TEST_BASE_ROOT = 'tests/python/pants_test'
def determine_paths(args, file_name: str) -> Paths:
target_root = determine_target_root(args.folder, args.contrib, args.test)
test_root = determine_target_root(args.folder, args.contrib, is_test=True)
pants_target_name = determine_pants_target_name(target_root, file_name)
file_path = f'{target_root}/{file_name}'
pants_target_path = f'{target_root}:{pants_target_name}'
pants_test_path = f'{test_root}:{pants_target_name}'
return Paths(
target_root=target_root,
file_path=file_path,
pants_target_name=pants_target_name,
pants_target_path=pants_target_path,
pants_test_path=pants_test_path
)
def determine_target_root(folder: str, is_contrib: bool, is_test: bool) -> str:
if is_contrib:
target_folder_root = folder.split('/')[0]
base_root = (f'contrib/{target_folder_root}/{TEST_BASE_ROOT}/contrib'
if is_test
else f'contrib/{target_folder_root}/{SRC_BASE_ROOT}/contrib')
else:
base_root = TEST_BASE_ROOT if is_test else SRC_BASE_ROOT
return f'{base_root}/{folder}' if folder else base_root
def determine_pants_target_name(target_root: str, file_name: str) -> str:
file_map = get_stdout([
'./pants',
'filemap',
f'{target_root}:'
]).split('\n')
target_entry = next((line for line in file_map if file_name in line), None)
if target_entry is None:
raise SystemExit(dedent(f"""\n
ERROR: File name '{file_name}' invalid. Not found anywhere in {target_root}/BUILD."""))
pants_target_path = target_entry.split(' ')[1]
pants_target_name = pants_target_path.split(':')[1]
return pants_target_name
# --------------------------------------------------
# Futurize script
# -------------------------------------------------
FUTURIZE_BIN = 'build-support/pants_dev_deps.venv/bin/futurize'
def check_what_needs_changes(folder_root: str, root_only: bool) -> None:
file_paths = (glob(f'{folder_root}/*.py', recursive=False)
if root_only
else glob(f'{folder_root}/**/*.py', recursive=True))
futurize_output = get_stderr([
FUTURIZE_BIN,
'--stage2',
'--no-diffs'
] + file_paths) \
.split('\n')
errors_dropped = itertools.takewhile(
lambda line: not re.match('RefactoringTool:.*error:', line),
futurize_output)
ignore_unnecessary_lines = itertools.dropwhile(
lambda line: 'RefactoringTool: Files that need to be modified:' not in line,
errors_dropped)
remove_refactoring_text = [line.replace('RefactoringTool: ', '') for line in ignore_unnecessary_lines]
no_header = list(remove_refactoring_text)[1:]
if not no_header:
print('Folder is already Python 3 compatible 🐍 🎉')
return
split_by_warning: List[List[str]] = [list(group) for k, group
in itertools.groupby(no_header,
lambda line: 'Warnings/messages while refactoring:' in line)
if not k]
if len(split_by_warning) == 2: # warnings
print('Warnings while refactoring:\n' + '\n'.join(split_by_warning[1]) + '\n\n',
file=sys.stderr)
dropped_warnings = split_by_warning[0]
def drop_prefix(line: str) -> str:
return (line.split(f'{TEST_BASE_ROOT}/')[1]
if TEST_BASE_ROOT in line
else line.split(f'{SRC_BASE_ROOT}/')[1])
remove_path_prefix = [drop_prefix(line) for line in dropped_warnings]
if 'contrib' in folder_root:
remove_path_prefix = [line.split('contrib/')[1] for line in remove_path_prefix]
formatted_for_cli = ([f"{line.split('/')[-1]}" for line in remove_path_prefix]
if root_only
else [f"{'/'.join(line.split('/')[:-1])} {line.split('/')[-1]}" for line in remove_path_prefix])
delimiter = '\n' if not root_only else ' '
print(delimiter.join(sorted(formatted_for_cli)))
def preview_changes(file_path: str) -> None:
subprocess.run([
FUTURIZE_BIN,
'--stage2',
file_path
])
def call_futurize(file_path: str) -> str:
return get_stdout([
FUTURIZE_BIN,
'--stage2',
'--write',
'--nobackup',
file_path
])
# --------------------------------------------------
# Check for changes
# -------------------------------------------------
def file_changed(file_path: str) -> bool:
git_changes = get_stdout(['git', 'ls-files', '-m'])
return file_path in git_changes
def futurize_made_changes(futurize_output: str) -> bool:
return bool(futurize_output)
def new_imports_added(futurize_output: str) -> bool:
return 'import' in futurize_output
# --------------------------------------------------
# Update BUILD
# -------------------------------------------------
def _find_target_index_in_build(build_lines: List[str], pants_target_name: str, file_name: str) -> int:
index = next((i for i, line in enumerate(build_lines)
if f"name = '{pants_target_name}'" in line
or f"name='{pants_target_name}'" in line),
None)
if index is None: # mono-target
index = next((i for i, line in enumerate(build_lines) if file_name in line), None)
if index is None: # only one target block in file, and sources aren't specified
index = next(i for i, line in enumerate(build_lines) if 'python_' in line and '(' in line)
return index
def _future_dependency_already_added(lines: List[str], starting_index: int) -> bool:
for line in lines[starting_index:]:
if '3rdparty/python:future' in line:
return True
if ')\n' in line: # done with dependencies section
return False
def update_build_dependencies(folder_root: str, pants_target_name: str, file_name: str) -> None:
build_file = f'{folder_root}/BUILD'
with open(build_file, 'r') as f:
lines = list(f.readlines())
target_index = _find_target_index_in_build(lines, pants_target_name, file_name)
if _future_dependency_already_added(lines, target_index):
return
for i, line in enumerate(lines[target_index:]):
if 'dependencies = [' in line or 'dependencies=[' in line:
lines.insert(target_index + i + 1, " '3rdparty/python:future',\n")
break
if ')\n' in line: # dependencies section doesn't exist for target
lines.insert(target_index + i, ' dependencies = [\n')
lines.insert(target_index + i + 1, " '3rdparty/python:future',\n")
lines.insert(target_index + i + 2, ' ],\n')
break
with open(build_file, 'w') as f:
f.writelines(lines)
# --------------------------------------------------
# Pants goals
# -------------------------------------------------
def call_pants_fmt(pants_target_path: str) -> None:
subprocess.run([
'./pants',
'fmt',
pants_target_path
])
def call_pants_test(pants_test_target_path: str) -> None:
subprocess.run([
'./pants',
'test',
pants_test_target_path
])
# --------------------------------------------------
# Prompt review of diffs
# -------------------------------------------------
def prompt_review_of_diffs(futurize_output: str) -> None:
input(dedent(f"""\
----------------------------------------------------------------------
Review the file for changes and make modifications if necessary.
----------------------------------------------------------------------
{futurize_output}
----------------------------------------------------------------------
Input the enter key when ready to move on."""))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| 32.919872 | 123 | 0.620485 | 1,301 | 10,271 | 4.641045 | 0.183705 | 0.045545 | 0.039748 | 0.015734 | 0.287844 | 0.200066 | 0.134316 | 0.107155 | 0.066081 | 0.054488 | 0 | 0.00309 | 0.1808 | 10,271 | 311 | 124 | 33.025723 | 0.714286 | 0.101256 | 0 | 0.198198 | 0 | 0 | 0.205933 | 0.077701 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09009 | false | 0.004505 | 0.04955 | 0.027027 | 0.243243 | 0.013514 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25e1948389a30a6ca7680e5f23f31956636188e5 | 2,467 | py | Python | example.py | isanich/asyncio-mongo-reflection | 138f3b1373bf68562ce63c41833e68bbcc3ac0f2 | [
"MIT"
] | 5 | 2017-07-27T21:18:30.000Z | 2018-01-30T13:13:35.000Z | example.py | isanich/asyncio-mongo-reflection | 138f3b1373bf68562ce63c41833e68bbcc3ac0f2 | [
"MIT"
] | null | null | null | example.py | isanich/asyncio-mongo-reflection | 138f3b1373bf68562ce63c41833e68bbcc3ac0f2 | [
"MIT"
] | null | null | null | import asyncio
from motor import motor_asyncio
from mongodeque import MongoDequeReflection, MongoDictReflection
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
client = motor_asyncio.AsyncIOMotorClient()
db = client.test_db
# you should 'await' while reflection instance is created
# than there is no difference with python's deque (every mongo writing op will be done in background)
# with 'rewrite=False' flag initial list '[1, 2, [6, 7, 8]]' will be ignored next time (data will be loaded from db).
async def create_reflection():
# first arg is optional, without it empty reflection will be created
# or list will be loaded from mongo (if any found using provided obj_ref/key)
return await MongoDequeReflection([1, 2, [6, 7, 8]], col=db['example_reflection'],
obj_ref={'array_id': 'example'}, key='inner.arr',
rewrite=False)
# MongoDictReflection is similar to MongoDequeReflection but wraps python's dict.
# Note that you can create dicts inside MongoDequeReflection and lists inside MongoDictReflection
# All actions above that dicts and lists are reflected too.
mongo_reflection = loop.run_until_complete(create_reflection())
mongo_reflection.append(9)
mongo_reflection.popleft()
# nested reflections are created immediately so you can perform operations on them
mongo_reflection[1].extend(['a', 'b', [4, 5, 6]])
mongo_reflection[1][-1].pop()
# with mongo_reflection.mongo_pending.join() you can wait synchronously
# for mongo operation completion if needed
loop.run_until_complete(mongo_reflection.mongo_pending.join())
'''
# mongo db object
# note that 'obj_ref' could be ref to any existing mongo object
# or new one will be created like below:
{"_id": {"$oid": "59761ba93e5bb7435c1f6c9b"},
"array_id": "example",
"inner": {
"arr": [2, [6, 7, 8, "a", "b", [4, 5]], 9]}}
'''
'''
# also try this in aioconsole
# type in terminal "pip install aioconsole" then "apython" and paste:
from asyncio_mongo_reflection import MongoDequeReflection
import motor.motor_asyncio
client = motor.motor_asyncio.AsyncIOMotorClient()
db = client.test_db
ref = await MongoDequeReflection(col=db['example_reflection'],
obj_ref={'array_id': 'interacive_example'},
key='inner.arr', maxlen=10)
# empty reflection is created
# now you can try to modify ref and trace changes in any mongodb client
''' | 39.15873 | 117 | 0.71666 | 338 | 2,467 | 5.118343 | 0.452663 | 0.069364 | 0.005202 | 0.006936 | 0.132948 | 0.091329 | 0.091329 | 0.040462 | 0 | 0 | 0 | 0.020408 | 0.185651 | 2,467 | 63 | 118 | 39.15873 | 0.840717 | 0.340495 | 0 | 0 | 0 | 0 | 0.055838 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.176471 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25e203a1f236d488e7a3d1ca0cd29a2ca9047db4 | 2,581 | py | Python | asvtorch/src/frontend/frame_selector.py | ElsevierSoftwareX/SOFTX-D-20-00038 | 9c656dd55467f4480d4c455106c86519288723c3 | [
"MIT"
] | 1 | 2021-05-25T05:45:32.000Z | 2021-05-25T05:45:32.000Z | asvtorch/src/frontend/frame_selector.py | ElsevierSoftwareX/SOFTX-D-20-00038 | 9c656dd55467f4480d4c455106c86519288723c3 | [
"MIT"
] | null | null | null | asvtorch/src/frontend/frame_selector.py | ElsevierSoftwareX/SOFTX-D-20-00038 | 9c656dd55467f4480d4c455106c86519288723c3 | [
"MIT"
] | 1 | 2021-08-03T15:48:51.000Z | 2021-08-03T15:48:51.000Z | # Copyright 2020 Ville Vestman
# This file is licensed under the MIT license (see LICENSE.txt).
import sys
import numpy as np
from asvtorch.src.settings.settings import Settings
# This class is used to for storing and applying VAD and diarization labels.
class FrameSelector:
def __init__(self, boolean_selectors: np.ndarray):
self.frame_count = boolean_selectors.size
self.selected_count = np.sum(boolean_selectors)
self.bits = np.packbits(boolean_selectors)
def select(self, frames: np.ndarray, id_for_error_message: str = '') -> np.ndarray:
boolean_selectors = np.unpackbits(self.bits, count=self.frame_count).astype(bool)
size_diff = boolean_selectors.size - frames.shape[0]
if size_diff != 0:
if abs(size_diff) > Settings().features.vad_mismatch_tolerance:
if size_diff > 0:
sys.exit('[ERROR] {}: frame selector has {} extra values'.format(id_for_error_message, size_diff))
else:
sys.exit('[ERROR] {}: {} values are missing from frame selector'.format(id_for_error_message, abs(size_diff)))
elif size_diff < 0:
boolean_selectors = np.hstack((boolean_selectors, np.asarray([False]*abs(size_diff), dtype=bool)))
print('[WARNING] {}: frame selector was missing {} values'.format(id_for_error_message, abs(size_diff)))
else:
boolean_selectors = boolean_selectors[:-size_diff]
print('[WARNING] {}: frame selector had {} extra values'.format(id_for_error_message, size_diff))
return frames[boolean_selectors, :]
def intersect(self, boolean_selectors: np.ndarray):
if self.frame_count != boolean_selectors.size:
sys.exit('ERROR: Cannot intersect selectors of different sizes')
self_selectors = np.unpackbits(self.bits, count=self.frame_count).astype(bool)
intersection = np.logical_and(self_selectors, boolean_selectors)
self.__init__(intersection)
def clip_to_length(self, n_frames: int):
if self.selected_count <= n_frames:
return
startpos = np.random.randint(self.selected_count - n_frames + 1)
boolean_selectors = np.unpackbits(self.bits, count=self.frame_count).astype(bool)
indices_of_selected = np.where(boolean_selectors)[0]
indices_to_set_zero = np.concatenate((indices_of_selected[:startpos], indices_of_selected[startpos+n_frames:]))
boolean_selectors[indices_to_set_zero] = 0
self.__init__(boolean_selectors) | 52.673469 | 130 | 0.683069 | 328 | 2,581 | 5.109756 | 0.314024 | 0.171838 | 0.064439 | 0.050716 | 0.310263 | 0.247017 | 0.202864 | 0.202864 | 0.162291 | 0.112172 | 0 | 0.005435 | 0.215808 | 2,581 | 49 | 131 | 52.673469 | 0.822628 | 0.064316 | 0 | 0.102564 | 0 | 0 | 0.103234 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0 | 0.076923 | 0 | 0.25641 | 0.051282 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25e5141b8f26a32792d80ea7539a59750998a583 | 3,497 | py | Python | test/test_ssmbotocredentialprovider_fakemetadata.py | craighagan/ssmbotocredentialprovider | 99fc6c3f9daa63073ca05a7854165e89828a8528 | [
"MIT"
] | null | null | null | test/test_ssmbotocredentialprovider_fakemetadata.py | craighagan/ssmbotocredentialprovider | 99fc6c3f9daa63073ca05a7854165e89828a8528 | [
"MIT"
] | null | null | null | test/test_ssmbotocredentialprovider_fakemetadata.py | craighagan/ssmbotocredentialprovider | 99fc6c3f9daa63073ca05a7854165e89828a8528 | [
"MIT"
] | null | null | null | import datetime
import pytest
import mock
from copy import deepcopy
import os
import json
import shutil
import tempfile
import time
import botocore.auth
import ssmbotocredentialprovider.FakeMetadata
FAKE_CRED_CONTENTS = """
[default]
aws_access_key_id = fake_access_key
aws_secret_access_key = fake_secret_key
aws_session_token = fake_token
"""
FAKE_REGISTRATION_DATA = '{"ManagedInstanceID":"mi-xyzzy","Region":"us-test-1"}'
class TestFakeMetadata(object):
def setup(self):
self.credential_file = tempfile.mktemp()
self.ssm_registration_file = tempfile.mktemp()
with open(self.credential_file, "w") as f:
f.write(FAKE_CRED_CONTENTS)
with open(self.ssm_registration_file, "w") as f:
f.write(FAKE_REGISTRATION_DATA)
self.cp = ssmbotocredentialprovider.FakeMetadata.FakeMetadataCredentialProvider(credential_file=self.credential_file,
ssm_registration_file=self.ssm_registration_file)
assert self.cp.credential_file == self.credential_file
def teardown(self):
os.unlink(self.credential_file)
def test_metadata(self):
metadata = self.cp.metadata
assert metadata == {
'account_id': '408421710122',
'device_name': 'i-12345',
'region': 'us-test-1',
'role_alias_name': 'FakeRole'
}
def test_metadata_credentials(self):
metadata_creds = self.cp.metadata_credentials
del metadata_creds["Expiration"]
del metadata_creds["LastUpdated"]
assert metadata_creds == {
'AccessKeyId': 'fake_access_key',
'Code': 'Success',
'SecretAccessKey': 'fake_secret_key',
'Token': 'fake_token',
'Type': 'AWS-HMAC'}
def test_role_name(self):
assert self.cp.role_name == "FakeRole"
@mock.patch.object(ssmbotocredentialprovider.FakeMetadata.FakeMetadataCredentialProvider, "get_credentials")
def test_update_timer(self, mock_get_credentials):
self.cp.update_timer(refresh_time_seconds=1)
time.sleep(2)
assert mock_get_credentials.called is True
def test_cancel_timer_no_timer(self):
assert not hasattr(self.cp, "_update_timer")
self.cp.cancel_timer()
assert not hasattr(self.cp, "_update_timer")
@mock.patch.object(ssmbotocredentialprovider.FakeMetadata.FakeMetadataCredentialProvider, "get_credentials")
def test_cancel_timer(self, mock_get_credentials):
self.cp.update_timer(refresh_time_seconds=2)
time.sleep(1)
self.cp.cancel_timer()
time.sleep(2)
assert mock_get_credentials.called is False
@mock.patch.object(ssmbotocredentialprovider.FakeMetadata.FakeMetadataCredentialProvider, "get_credentials")
def test_get_refresh_seconds(self, mock_get_credentials):
retval = {
'accessKeyId': 'fake_access_key',
'secretAccessKey': 'fake_secret_key',
'sessionToken': 'fake_token',
'expiration': '2020-12-23T17:08:37Z',
}
expire_time = datetime.datetime.utcnow() + datetime.timedelta(hours=1)
retval['expiration'] = expire_time.strftime(botocore.auth.ISO8601)
mock_get_credentials.return_value = retval
refresh = self.cp.get_refresh_seconds()
assert refresh > 0.7*3600
assert refresh < 3600
| 34.284314 | 137 | 0.664856 | 382 | 3,497 | 5.814136 | 0.293194 | 0.032418 | 0.048627 | 0.030617 | 0.307519 | 0.278703 | 0.278703 | 0.232778 | 0.232778 | 0.194957 | 0 | 0.01994 | 0.23992 | 3,497 | 101 | 138 | 34.623762 | 0.815651 | 0 | 0 | 0.111111 | 0 | 0 | 0.160995 | 0.021161 | 0 | 0 | 0 | 0 | 0.123457 | 1 | 0.111111 | false | 0 | 0.135802 | 0 | 0.259259 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25e923ebbc138832145e35e8b345c5eef1d35dc1 | 3,710 | py | Python | x86cpu/tests/info_getters.py | haiwei-li/x86cpu | 3b27596f67acaa6b2498bfb63efadb6bdcc4d46f | [
"BSD-2-Clause"
] | 6 | 2018-09-27T06:15:42.000Z | 2021-09-15T18:20:44.000Z | x86cpu/tests/info_getters.py | haiwei-li/x86cpu | 3b27596f67acaa6b2498bfb63efadb6bdcc4d46f | [
"BSD-2-Clause"
] | 1 | 2018-09-27T06:17:18.000Z | 2018-10-01T07:52:10.000Z | x86cpu/tests/info_getters.py | haiwei-li/x86cpu | 3b27596f67acaa6b2498bfb63efadb6bdcc4d46f | [
"BSD-2-Clause"
] | 4 | 2016-06-08T11:32:05.000Z | 2021-01-19T19:53:53.000Z | """ Test helpers
"""
from subprocess import check_output
class Missing(object):
""" Class to indicate missing info """
SYSCTL_KEY_TRANSLATIONS = dict(
model='model_display',
family='family_display',
extmodel='extended_model',
extfamily='extended_family')
SYSCTL_FLAG_TRANSLATIONS = {
'sse4.1': 'sse4_1',
'sse4.2': 'sse4_2',
}
def get_sysctl_cpu():
sysctl_text = check_output(['sysctl', '-a']).decode('utf8')
info = {}
for line in sysctl_text.splitlines():
if not line.startswith('machdep.cpu.'):
continue
line = line.strip()[len('machdep.cpu.'):]
key, value = line.split(': ', 1)
key = SYSCTL_KEY_TRANSLATIONS.get(key, key)
try:
value = int(value)
except ValueError:
pass
info[key] = value
flags = [flag.lower() for flag in info['features'].split()]
info['flags'] = [SYSCTL_FLAG_TRANSLATIONS.get(flag, flag)
for flag in flags]
info['unknown_flags'] = ['3dnow']
info['supports_avx'] = 'hw.optional.avx1_0: 1\n' in sysctl_text
info['supports_avx2'] = 'hw.optional.avx2_0: 1\n' in sysctl_text
return info
PCPUINFO_KEY_TRANSLATIONS = {
'vendor_id': 'vendor',
'model': 'model_display',
'family': 'family_display',
'model name': 'brand',
}
def get_proc_cpuinfo():
with open('/proc/cpuinfo', 'rt') as fobj:
pci_lines = fobj.readlines()
info = {}
for line in pci_lines:
line = line.strip()
if line == '': # End of first processor
break
key, value = line.split(':', 1)
key, value = key.strip(), value.strip()
key = PCPUINFO_KEY_TRANSLATIONS.get(key, key)
try:
value = int(value)
except ValueError:
pass
info[key] = value
info['flags'] = info['flags'].split()
# cpuinfo records presence of Prescott New Instructions, Intel's code name
# for SSE3.
if 'pni' in info['flags']:
info['flags'].append('sse3')
info['unknown_flags'] = ['3dnow']
info['supports_avx'] = 'avx' in info['flags']
info['supports_avx2'] = 'avx2' in info['flags']
return info
WMIC_KEY_TRANSLATIONS = dict(
manufacturer='vendor',
model='model_display',
level='family_display',
name='brand')
def get_wmic_cpu():
""" Get CPU parameters using ``wmic`` Windows utility
For a description of each CPU field, see:
https://msdn.microsoft.com/en-us/library/aa394373(v=vs.85).aspx
"""
wmic_text = check_output(
['wmic', 'cpu', 'get', '/all', '/format:textvaluelist']
).decode('latin1')
info = {}
for line in wmic_text.splitlines():
line = line.strip()
if line == '':
continue
key, value = line.split('=', 1)
key = key.lower()
key = WMIC_KEY_TRANSLATIONS.get(key, key)
try:
value = int(value)
except ValueError:
pass
if key in info: # Now we're looking at another processor
break
info[key] = value
# Stepping sometines the empty string in wmic output
if 'stepping' in info and info['stepping'] == '':
info['stepping'] = Missing
# Get extra information from kernel32
from ctypes import windll, wintypes
has_feature = windll.kernel32.IsProcessorFeaturePresent
has_feature.argtypes = [wintypes.DWORD]
info['flags'] = {
'sse': has_feature(6),
'sse2': has_feature(10),
'sse3': has_feature(13), # Not available on XP
'mmx': has_feature(3),
'3dnow': has_feature(7),
}
info['unknown_flags'] = ('ssse3', 'sse4_1', 'sse4_2')
return info
| 28.75969 | 78 | 0.590296 | 449 | 3,710 | 4.739421 | 0.35412 | 0.033835 | 0.023966 | 0.018327 | 0.225094 | 0.207237 | 0.129699 | 0.095865 | 0.095865 | 0.095865 | 0 | 0.019514 | 0.267925 | 3,710 | 128 | 79 | 28.984375 | 0.763991 | 0.122372 | 0 | 0.353535 | 0 | 0 | 0.170656 | 0.006528 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0.030303 | 0.020202 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25ebcb044a317d6d6779de9daae7acf7db7d6196 | 5,192 | py | Python | attribution_printer.py | nickwbarber/hilt-scripts | 23df300d27c659da85acdf026a435dee0cb3c868 | [
"MIT"
] | 1 | 2018-06-25T17:30:57.000Z | 2018-06-25T17:30:57.000Z | attribution_printer.py | nickwbarber/hilt-scripts | 23df300d27c659da85acdf026a435dee0cb3c868 | [
"MIT"
] | null | null | null | attribution_printer.py | nickwbarber/hilt-scripts | 23df300d27c659da85acdf026a435dee0cb3c868 | [
"MIT"
] | null | null | null | import os
from itertools import chain
import gatenlphiltlab
import explanatory_style as es
def get_sentence(key_annotation):
if key_annotation.type.lower() == "sentence":
return
sentence = next(
(
annotation
for annotation in tree.search(key_annotation)
if annotation.type.lower() == "sentence"
),
None
)
return sentence
def get_context(key_annotation, distance):
center = get_sentence(key_annotation)
previous = []
following = []
count = 0
comparison_sentence = center
while count < distance:
previous.append(comparison_sentence.previous)
count += 1
comparison_sentence = comparison_sentence.previous
count = 0
comparison_sentence = center
while count < distance:
following.append(comparison_sentence.next)
count += 1
comparison_sentence = comparison_sentence.next
return chain(
list(reversed(previous)),
[center],
following,
)
relators = [
"because",
"cuz",
"since",
"after",
"when",
"whenever",
"once",
"therefore",
"so",
"if",
"soon",
"result",
"results",
"resulted",
"resulting",
"cause",
"causes",
"caused",
"causing",
"starts",
"start",
"starts",
"started",
"starting",
"make",
"makes",
"made",
"making",
"precipitate",
"precipitates",
"precipitated",
"precipitating",
"lead",
"leads",
"led",
"produce",
"produces",
"produced",
"producing",
"provoke",
"provokes",
"provoked",
"provoking",
"breeds",
"breeds",
"bred",
"breeding",
"induce",
"induces",
"induced",
"inducing",
"create",
"creates",
"created",
"creating",
"effect",
"effects",
"effected",
"effecting",
]
conversations_dir = "/home/nick/hilt/pes/conversations"
annotation_file_paths = [
os.path.join(root, f)
for root, dirs, files in os.walk(conversations_dir)
for f in files
if f.lower().endswith("pes_3_consensus.xml")
]
eau_count = 0
for annotation_file_path in annotation_file_paths:
basename = os.path.basename(annotation_file_path)
annotation_file = gatenlphiltlab.AnnotationFile(annotation_file_path)
annotations = annotation_file.annotations
annotations = [
annotation
for annotation in annotations
if annotation.type.lower() in [
"token",
"sentence",
"attribution",
"event",
]
]
tokens = [
annotation
for annotation in annotations
if annotation.type.lower() == "token"
]
sentences = [
annotation
for annotation in annotations
if annotation.type.lower() == "sentence"
]
gatenlphiltlab.dlink(sorted(sentences, key=lambda x: x.start_node))
events = [
es.Event(annotation)
for annotation in annotations
if (
annotation.type.lower() == "event"
and "consensus" in annotation.annotation_set_name.lower()
)
]
attributions = [
es.Attribution(annotation)
for annotation in annotations
if (
annotation.type.lower() == "attribution"
and "consensus" in annotation.annotation_set_name.lower()
)
]
annotations = chain(
tokens,
sentences,
attributions,
events,
)
tree = annotation_file.interval_tree
for annotation in annotations:
tree.add(annotation)
EAUs = es.get_event_attribution_units(events, attributions)
print(basename)
print()
for EAU in EAUs:
eau_count += 1
event = EAU.event
attribution = EAU.attribution
intersecting_sentences = [
annotation
for annotation in tree.search(attribution)
if annotation.type.lower() == "sentence"
]
intersecting_token_strings = [
annotation.text.lower()
for intersecting_sentence in intersecting_sentences
for annotation in tree.search(intersecting_sentence)
if annotation.type.lower() == "token"
]
relator_strings = [
string
for string in intersecting_token_strings
if string in relators
]
relator_string = ",".join(relator_strings)
event_context = get_context(event,5)
attribution_context = get_context(attribution,5)
context = sorted(
set(
chain(event_context, attribution_context)
),
key=lambda x: x.start_node,
)
print("Context:")
for x in context:
print(eau_count, x.id, x.text)
print()
print("Event:")
print()
print(event.get_concatenated_text())
print()
print("Attribution:")
print()
print(attribution.get_concatenated_text())
print()
print("relators = [{}]".format(relator_string))
print()
print()
print()
print()
print()
| 22.872247 | 73 | 0.574345 | 483 | 5,192 | 6.031056 | 0.304348 | 0.044628 | 0.058702 | 0.057673 | 0.296945 | 0.228974 | 0.162376 | 0.162376 | 0.097837 | 0 | 0 | 0.002547 | 0.319337 | 5,192 | 226 | 74 | 22.973451 | 0.821732 | 0 | 0 | 0.213592 | 0 | 0 | 0.113829 | 0.006356 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009709 | false | 0 | 0.019417 | 0 | 0.043689 | 0.092233 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25f0a89f4a14bb7d9ceabe42f6d7834a45f7d81e | 777 | py | Python | 07-numpy-lab/ufuncs.py | iproduct/coulse-ml | 65577fd4202630d3d5cb6333ddc51cede750fb5a | [
"Apache-2.0"
] | 1 | 2020-10-02T15:48:42.000Z | 2020-10-02T15:48:42.000Z | 07-numpy-lab/ufuncs.py | iproduct/coulse-ml | 65577fd4202630d3d5cb6333ddc51cede750fb5a | [
"Apache-2.0"
] | null | null | null | 07-numpy-lab/ufuncs.py | iproduct/coulse-ml | 65577fd4202630d3d5cb6333ddc51cede750fb5a | [
"Apache-2.0"
] | null | null | null | import numpy as np
if __name__ == '__main__':
x = [1, 2, 3, 4]
y = [5, 6, 7, 8]
z = []
for i, j in zip(x, y):
z.append(str(i) + str(j))
print(z)
def myconcat(x, y):
return int(str(x) + str(y))
uconcat = np.frompyfunc(myconcat, 2, 1)
arrx = np.array(x).reshape(2, 2)
arry = np.array(y).reshape(2, 2)
print(arrx)
print(arry)
print("\nDot:\n", arrx.dot(arry))
print("\nConcat:\n",uconcat(arrx, arry))
# with broadcasting
x = [1, 2, 3, 4]
y = [4, 5, 6]
arrx = np.array(x)
arry = np.array(y).reshape((3,1))
print("\n", arrx)
print(arry)
print(uconcat(arrx, arry))
z = np.array([[1, 2, 3], [4, 5, 6]])
w = np.array([1, 2])
print("\n", (z.T + w).T) # T means transposed | 23.545455 | 49 | 0.504505 | 133 | 777 | 2.887218 | 0.345865 | 0.109375 | 0.023438 | 0.03125 | 0.130208 | 0.03125 | 0 | 0 | 0 | 0 | 0 | 0.055456 | 0.280566 | 777 | 33 | 49 | 23.545455 | 0.631485 | 0.046332 | 0 | 0.148148 | 0 | 0 | 0.041949 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.037037 | 0.037037 | 0.111111 | 0.333333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25f0b8b673353f38381475af0f7d909f9123b249 | 2,632 | py | Python | bootstrap.py | rockwotj/dotfiles | cb48f3b729e0b650ebc4313c8003eb872024fa92 | [
"MIT"
] | null | null | null | bootstrap.py | rockwotj/dotfiles | cb48f3b729e0b650ebc4313c8003eb872024fa92 | [
"MIT"
] | null | null | null | bootstrap.py | rockwotj/dotfiles | cb48f3b729e0b650ebc4313c8003eb872024fa92 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import os
import subprocess
PARSER = argparse.ArgumentParser(description = "Bootstrap personal config")
PARSER.add_argument(
"-f",
"--force",
action = "store_true",
help = "overwrite existing config files?",
dest = "force",
default = False)
def symlink(src, dest):
container = os.path.dirname(dest)
if not os.path.isdir(container):
os.makedirs(container)
elif os.path.lexists(dest):
if os.path.realpath(dest) == src:
print("{} already set up correctly, skipping...".format(dest))
return
elif FLAGS.force:
os.remove(dest)
else:
raise Exception("{} already exists, use -f to overwrite".format(dest))
print("symlinking {} to {}".format(src, dest))
os.symlink(src, dest)
def check_call(cmd):
print("running {}...".format(" ".join(cmd)))
subprocess.check_call(cmd)
def git(config, home):
symlink(
"{}/git/gitconfig".format(config),
"{}/.gitconfig".format(home))
symlink(
"{}/git/gitignore_global".format(config),
"{}/.gitignore_global".format(home))
def tmux(config, home):
symlink(
"{}/tmux/tmux.conf".format(config),
"{}/.tmux.conf".format(home))
def nvim(config, home):
symlink(
"{}/nvim/vimrc".format(config),
"{}/.config/nvim/init.vim".format(home))
symlink(
"{}/nvim/autoplugins".format(config),
"{}/.local/share/nvim/site/pack/plugins/start".format(home))
symlink(
"{}/nvim/lazyplugins".format(config),
"{}/.local/share/nvim/site/pack/plugins/opt".format(home))
def zsh(config, home):
symlink(
"{}/zsh/zshrc".format(config),
"{}/.zshrc".format(home))
symlink(
"{}/zsh/zshrc.d".format(config),
"{}/.zshrc.d".format(home))
# This needs to be be on your $fpath
symlink(
"{}/zsh/completion".format(config),
"{}/.zsh/completion".format(home))
check_call(["mkdir", "-p", "{}/.zsh/cache/".format(home)])
def hg(config, home):
symlink(
"{}/hg/hgrc".format(config),
"{}/.hgrc".format(home))
def main():
global FLAGS
print("Starting bootstrap...")
FLAGS = PARSER.parse_args()
config = os.path.dirname(os.path.abspath(__file__))
home = os.environ["HOME"]
git(config, home)
tmux(config, home)
nvim(config, home)
zsh(config, home)
hg(config, home)
print("done!")
if __name__ == "__main__":
main()
| 27.416667 | 82 | 0.566109 | 294 | 2,632 | 5 | 0.363946 | 0.07483 | 0.057823 | 0.028571 | 0.055782 | 0.055782 | 0.055782 | 0.055782 | 0 | 0 | 0 | 0.000511 | 0.256839 | 2,632 | 95 | 83 | 27.705263 | 0.751022 | 0.021277 | 0 | 0.128205 | 0 | 0 | 0.238151 | 0.051671 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0 | 0.038462 | 0 | 0.153846 | 0.064103 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25f1c405e4f3a4b85b51c9e272095b3855de010f | 3,137 | py | Python | leetcode_python/Array/valid-word-square.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | 18 | 2019-08-01T07:45:02.000Z | 2022-03-31T18:05:44.000Z | leetcode_python/Array/valid-word-square.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | leetcode_python/Array/valid-word-square.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | 15 | 2019-12-29T08:46:20.000Z | 2022-03-08T14:14:05.000Z | """
LeetCode 422. Valid Word Square
Given a sequence of words, check whether it forms a valid word square.
A sequence of words forms a valid word square if the kth row and column read the exact same string, where 0 ≤ k < max(numRows, numColumns).
Note:
The number of words given is at least 1 and does not exceed 500.
Word length will be at least 1 and does not exceed 500.
Each word contains only lowercase English alphabet a-z.
Given a sequence of words, check whether it forms a valid word square.
A sequence of words forms a valid word square if the kth row and column read the exact same string, where 0 ≤ k < max(numRows, numColumns).
Example 1:
Input:
[
"abcd",
"bnrt",
"crmy",
"dtye"
]
Output:
true
Explanation:
The first row and first column both read "abcd".
The second row and second column both read "bnrt".
The third row and third column both read "crmy".
The fourth row and fourth column both read "dtye".
Therefore, it is a valid word square.
Example 2:
Input:
[
"abcd",
"bnrt",
"crm",
"dt"
]
Output:
true
Explanation:
The first row and first column both read "abcd".
The second row and second column both read "bnrt".
The third row and third column both read "crm".
The fourth row and fourth column both read "dt".
Therefore, it is a valid word square.
Example 3:
Input:
[
"ball",
"area",
"read",
"lady"
]
Output:
false
Explanation:
The third row reads "read" while the third column reads "lead".
Therefore, it is NOT a valid word square.
"""
# V0
# V1
# http://us.jiuzhang.com/solution/valid-word-square/#tag-highlight-lang-python
class Solution:
"""
@param words: a list of string
@return: return a boolean
"""
def validWordSquare(self, words):
# write your code here
n, m = len(words), len(words[0])
if(n != m):
return False
for i in range(n):
for j in range(m):
if(j >= n or i >= m or not(words[i][j] == words[j][i])):
return False
return True
# V1'
# http://bookshadow.com/weblog/2016/10/16/leetcode-valid-word-square/
class Solution(object):
def validWordSquare(self, words):
"""
:type words: List[str]
:rtype: bool
"""
m = len(words)
n = len(words[0]) if m else 0
if m != n:
return False
for x in range(m):
n = len(words[x])
c = 0
for y in range(m):
if len(words[y]) < x + 1:
break
c += 1
if c != n:
return False
for y in range(n):
if words[x][y] != words[y][x]:
return False
return True
# V2
# Time: O(m * n)
# Space: O(1)
class Solution(object):
def validWordSquare(self, words):
"""
:type words: List[str]
:rtype: bool
"""
for i in range(len(words)):
for j in range(len(words[i])):
if j >= len(words) or i >= len(words[j]) or \
words[j][i] != words[i][j]:
return False
return True | 22.731884 | 139 | 0.582404 | 471 | 3,137 | 3.883227 | 0.271762 | 0.049207 | 0.082012 | 0.061236 | 0.505194 | 0.505194 | 0.505194 | 0.505194 | 0.398032 | 0.398032 | 0 | 0.016189 | 0.310807 | 3,137 | 138 | 140 | 22.731884 | 0.828862 | 0.583679 | 0 | 0.378378 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007246 | 0 | 1 | 0.081081 | false | 0 | 0 | 0 | 0.405405 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25f474ba951a586d08fc977e2491327ce7286967 | 2,421 | py | Python | rewinder.py | CatMe0w/rewinder_rollwinder | 4092c3d2b238fa838386ae0b8c68a1a0674d5332 | [
"MIT"
] | null | null | null | rewinder.py | CatMe0w/rewinder_rollwinder | 4092c3d2b238fa838386ae0b8c68a1a0674d5332 | [
"MIT"
] | null | null | null | rewinder.py | CatMe0w/rewinder_rollwinder | 4092c3d2b238fa838386ae0b8c68a1a0674d5332 | [
"MIT"
] | null | null | null | import requests
import logging
import time
import json
TIEBA_NAME = ''
BDUSS = ''
TIEBA_FID = 0 # 从 https://tieba.baidu.com/f/commit/share/fnameShareApi?fname=(贴吧名) 复制fid字段
session = requests.Session()
def rewind(tid, pid=0):
cookies = {
'BDUSS': BDUSS,
}
headers = {
'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="96", "Google Chrome";v="96"',
'DNT': '1',
'sec-ch-ua-mobile': '?0',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'X-Requested-With': 'XMLHttpRequest',
'sec-ch-ua-platform': '"Windows"',
}
data = {
'fn': TIEBA_NAME,
'fid': TIEBA_FID,
'tid_list[]': tid,
'pid_list[]': pid,
'type_list[]': 1 if pid else 0,
'is_frs_mask_list[]': 0
}
while True:
try:
logging.info('Rewinding thread {}, post {}'.format(tid, pid if pid else None))
response = session.post('https://tieba.baidu.com/mo/q/bawurecoverthread',
headers=headers, data=data, cookies=cookies)
if response.status_code != 200:
raise ValueError
content = json.loads(response.content)
if int(content['no']):
logging.error('Rewind failed.')
logging.info('Response: {}'.format(content))
except requests.exceptions.Timeout:
print('Remote is not responding, sleep for 30s.')
time.sleep(30)
continue
except ValueError:
print('Rate limit exceeded, sleep for 30s.')
time.sleep(30)
continue
else:
break
def main():
logging.basicConfig(
format='%(asctime)s [%(levelname)s] %(message)s',
level=logging.INFO,
handlers=[
logging.FileHandler('rewinder.log'),
logging.StreamHandler()
])
with open('./rewind.txt', 'r', encoding='UTF-8') as f:
thread_list = f.readlines()
for thread in thread_list:
tid, pid, _ = thread.strip().split(' ')
rewind(int(tid), int(pid))
logging.info('All done! Have fun!')
if __name__ == '__main__':
main()
| 29.52439 | 140 | 0.551838 | 284 | 2,421 | 4.626761 | 0.528169 | 0.018265 | 0.015982 | 0.027397 | 0.045662 | 0.045662 | 0.045662 | 0 | 0 | 0 | 0 | 0.033898 | 0.293267 | 2,421 | 81 | 141 | 29.888889 | 0.734074 | 0.030566 | 0 | 0.059701 | 0 | 0.029851 | 0.307889 | 0.014499 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029851 | false | 0 | 0.059701 | 0 | 0.089552 | 0.029851 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25fb886061f27d9e7c039a4007d5e1dff34ab864 | 2,360 | py | Python | LAB_06/process_turtle_follow.py | vhorvat/psr_FER | 18e05e127cc41a4102b3578ff5986575ab5e5540 | [
"MIT"
] | null | null | null | LAB_06/process_turtle_follow.py | vhorvat/psr_FER | 18e05e127cc41a4102b3578ff5986575ab5e5540 | [
"MIT"
] | null | null | null | LAB_06/process_turtle_follow.py | vhorvat/psr_FER | 18e05e127cc41a4102b3578ff5986575ab5e5540 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import rosbag
import sys
import math
def getEuclidianDistanceOfTwoDots(x1, y1, x2, y2):
distance=math.sqrt(math.pow(x2-x1, 2)+math.pow(y2-y1, 2))
return distance
def getTotalTurtleDistance(xPoses, yPoses):
overallDistance=0
for i in range(len(xPoses)-1):
distance=getEuclidianDistanceOfTwoDots(xPoseArray[i], yPoseArray[i], xPoseArray[i+1], yPoseArray[i+1])
overallDistance=overallDistance+distance
return overallDistance
def getTotalActiveTime(tArray):
time=tArray[len(tArray)-1]-tArray[0]
return time
def resolutionCorrection(x,y,resolutionX,resolutionY):
newX=x/resolutionX*800
newY=y/resolutionY*600
return newX, newY
def printFollowerData(distance, time, velocity, msg_counter, outbag_filename):
print(f"Follower turtle")
print(f" Covered distance: {round(distance,2)} m")
print(f" Average velocity: {round(velocity,2)} m/s")
print(f"Follower session duration: {round(time,2)} s")
print(f"Wrote {msg_counter} messages to {outbag_filename}")
return
if __name__ == "__main__":
if len(sys.argv) != 2:
print(f'Usage: {sys.argv[0]} input.bag')
sys.exit()
inbag_filename = sys.argv[1]
outbag_filename = "processed_follow.bag"
print(f'Processing input bagfile: {inbag_filename}')
msg_counter = 0
xPoseArray=[]
yPoseArray=[]
tPoseArray=[]
with rosbag.Bag(outbag_filename, 'w') as outbag:
for topic, msg, t in rosbag.Bag(inbag_filename, 'r').read_messages():
if topic=="/turtle1/pose":
xPoseArray.append(msg.x)
yPoseArray.append(msg.y)
tPoseArray.append(t.to_sec())
outbag.write("/follower/pose",msg,t)
msg_counter = msg_counter+1
if topic=="/mouse_position":
positionX,positionY=resolutionCorrection(msg.x,msg.y,1680,1050)
msg.x=round(positionX)
msg.y=round(positionY)
outbag.write("/mouse_positions_on_grandparents_computer",msg,t)
msg_counter = msg_counter+1
distance=getTotalTurtleDistance(xPoseArray,yPoseArray)
time=getTotalActiveTime(tPoseArray)
averageVelocity=distance/time
printFollowerData(distance, time, averageVelocity, msg_counter, outbag_filename) | 32.777778 | 110 | 0.662712 | 281 | 2,360 | 5.451957 | 0.359431 | 0.052219 | 0.037859 | 0.031332 | 0.032637 | 0.032637 | 0.032637 | 0 | 0 | 0 | 0 | 0.022246 | 0.219068 | 2,360 | 72 | 111 | 32.777778 | 0.809007 | 0.008898 | 0 | 0.036364 | 0 | 0 | 0.16289 | 0.017529 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.054545 | 0 | 0.236364 | 0.163636 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25fde68ac183e9483da5a54efc052b95382967de | 16,381 | py | Python | rstoolbox/utils/tools.py | sesterhe/RosettaSilentToolbox | 010941b9b20974c61a86858bfb73d5913afc6849 | [
"MIT"
] | 14 | 2019-01-22T15:56:58.000Z | 2022-02-07T23:49:50.000Z | rstoolbox/utils/tools.py | sesterhe/RosettaSilentToolbox | 010941b9b20974c61a86858bfb73d5913afc6849 | [
"MIT"
] | null | null | null | rstoolbox/utils/tools.py | sesterhe/RosettaSilentToolbox | 010941b9b20974c61a86858bfb73d5913afc6849 | [
"MIT"
] | 2 | 2020-05-23T20:39:15.000Z | 2022-02-07T23:49:57.000Z | # -*- coding: utf-8 -*-
"""
.. codeauthor:: Jaume Bonet <jaume.bonet@gmail.com>
.. affiliation::
Laboratory of Protein Design and Immunoengineering <lpdi.epfl.ch>
Bruno Correia <bruno.correia@epfl.ch>
.. func:: format_Ipython
.. func:: use_qgrid
.. func:: add_column
.. func:: split_values
.. func:: make_rosetta_app_path
.. func:: execute_process
.. func:: report
.. func:: concat_fragments
"""
# Standard Libraries
import os
import copy
import textwrap
import subprocess # nosec
import shlex
import re
# External Libraries
import pandas as pd
from six import string_types
# This Library
__all__ = ['format_Ipython', 'highlight', 'use_qgrid', 'add_column', 'split_values', 'make_rosetta_app_path',
'execute_process', 'report', 'concat_fragments', 'split_dataframe_rows']
def format_Ipython():
"""Ensure ``monospace`` representation of :class:`~pandas.DataFrame`
in **Jupyter Notebooks**.
Just need to call it after importing the library.
.. note::
In order for this function to work, it is important that is the last
one in the Jupyter cell to be called.
:raises:
:ImportError: If [Ipython library](https://ipython.org/) is not present.
"""
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
pd.set_option("display.max_seq_items", 3)
pd.set_option("display.max_colwidth", -1)
from IPython.core.display import HTML
CSS = textwrap.dedent("""
table.dataframe, div.slick-cell {
font-family: monospace !important;
}
div.q-grid-toolbar > button:nth-of-type(1) {
visibility: hidden;
}
div.q-grid-toolbar > button:nth-of-type(2) {
visibility: hidden;
}
""")
return HTML('<style>{}</style>'.format(CSS))
def highlight( row, selection, color='yellow', text_color='black', bold=True, for_image=False ):
"""Highlight rows in **Jupyter Notebooks** that match the given index.
:param row: Row to which the formating is applied (directly provided by ``diplay.apply``)
:type row: :class:`~pandas.Series`
:param selection: :func:`list` of indexes to highlight.
:type selection: Union[:class:`~pandas.Index`, :class:`~pandas.DataFrame`]
:param str color: CSS defined color name for the background.
:param str text_color: CSS defined color name for the text.
:param bool bold: Make text bold.
:param str outfile: If provided, generate an image with the table.
:param str for_image: If provided, makes some format changes to better show in an image.
:return: CSS properties for the cells.
.. note::
Make the html output into an image with ``wkhtmltopdf`` and its python wrapper ``imgkit``.
``wkhtmltopdf`` installation depends on the operating system. While for linux it might work
with get-apt or similar, `here <http://macappstore.org/wkhtmltopdf/>`_ are some tips for the
macOS installation.
Then, one might make it with a call such as::
imgkit.from_string(df.style.apply(rstoolbox.utils.highlight, selection=topside,
for_image=True, axis=1).render(),
'out.png')
Take notice of the use of the ``for_image`` attribute. You can try to add more CSS rules with
:meth:`pandas.Styler.set_table_styles`. This seems to work properly for ``td`` and ``th`` but not for
``table`` or ``tr``.
"""
if isinstance(selection, (pd.Index, pd.DataFrame)):
if isinstance(selection, pd.DataFrame):
selection = selection.index
else:
raise NotImplementedError('Unknown selection type provided.')
txt = []
if for_image:
txt.extend(['font-family: monospace', 'text-align: right'])
if row.name in selection:
txt.extend(['background-color: {}'.format(color), 'color: {}'.format(text_color)])
if bold:
txt.append('font-weight: bold')
return [';'.join(txt), ] * len(row)
def use_qgrid( df, **kwargs ):
"""Create a ``QgridWidget`` object from the
`qgrid library <https://qgrid.readthedocs.io/en/latest/>`_ in
**Jupyter Notebooks**.
This allows the creation of a interactive table in a cell with a whole
lot of functionalities (see `qgrid documentation <https://qgrid.readthedocs.io/en/latest/>`_)
A part from the :class:`~pandas.DataFrame`, one can provide any named parameter that can
be applied to `qgrid.show_grid <https://qgrid.readthedocs.io/en/latest/#qgrid.show_grid>`_.
The only difference is that if there are more than 4 columns, the key ``forceFitColumns``
from the attribute ``grid_options`` is forced into :data:`False`.
The actual :class:`~pandas.DataFrame` can be retrieved back with::
qwdf = rstoolbox.utils.use_qgrid(df)
qdf = qwdf.get_changed_df()
# OR
qdf = qwdf.get_selected_df()
See more in the documentation for
`get_changed_df <https://qgrid.readthedocs.io/en/latest/#qgrid.QgridWidget.get_changed_df>`_
or `get_selected_df <https://qgrid.readthedocs.io/en/latest/#qgrid.QgridWidget.get_selected_df>`_.
Best used together with :func:`.format_Ipython`.
:param df: Data container.
:type df: :class:`~pandas.DataFrame`
:return: `QgridWidget <https://qgrid.readthedocs.io/en/latest/#qgrid.QgridWidget>`_
:raises:
:ImportError: If `qgrid library <https://qgrid.readthedocs.io/en/latest/>`_
is not present.
"""
try:
import qgrid
except ImportError:
raise ImportError('qgrid (not mandatory on rstoolbox install) is necessary to execute this function.')
go = kwargs.pop('grid_options', {})
if df.shape[1] > 4:
go['forceFitColumns'] = False
return qgrid.show_grid(df, grid_options=go, **kwargs)
def add_column( df, name, value ):
"""Adds a new column to the DataFrame with the given value.
:param df: Data container.
:type df: :class:`~pandas.DataFrame`
:param str name: Name of the new column
:param value: Value that will be given to all rows of the new column (any type)
:return: :class:`~pandas.DataFrame` - The data container with the new column
"""
data = pd.Series([value] * df.shape[0])
data.index = df.index
return df.assign(_placeholder=data).rename(columns={"_placeholder": name})
def split_values( df, keys ):
"""Reshape the data to aide plotting of multiple comparable scores.
.. note::
This might change the data in a way that a decoy would be repeated
multiple times.
The dictionary that needs to be provided to split the data container has three
main keys:
#. ``keep``: Identity the columns to keep (they cannot be the ones that split). \
If not provided, all columns are kept.
#. ``split``: List with columns to split. Each position is a tuple. The first position \
is the name of the column to split and the rest will be the value names that will be \
used to identify it.
#. ``names``: Names of the columns. The first one will be the name of the column where the \
values will be assigned, the rest will be the names of the columns for the rest of the \
identifiers.
:param df: Data container.
:type df: :class:`~pandas.DataFrame`
:param dict keys: Selection of the columns to keep and split.
:return: Altered Data container.
.. rubric:: Example
.. ipython::
In [1]: from rstoolbox.io import parse_rosetta_file
...: from rstoolbox.utils import split_values
...: import pandas as pd
...: pd.set_option('display.width', 1000)
...: pd.set_option('display.max_columns', 500)
...: ifile = '../rstoolbox/tests/data/input_2seq.minisilent.gz'
...: scorel = ['score', 'GRMSD2Target', 'GRMSD2Template', 'LRMSD2Target',
...: 'LRMSDH2Target', 'LRMSDLH2Target', 'description']
...: df = parse_rosetta_file(ifile, {'scores': scorel})
...: df
In [2]: split1 = {'split': [('GRMSD2Target', 'grmsdTr'), ('GRMSD2Template', 'grmsdTp'),
...: ('LRMSD2Target', 'lrmsdTp'), ('LRMSDH2Target', 'lrmsdh2'),
...: ('LRMSDLH2Target', 'lrmsdlh2')],
...: 'names': ['rmsd', 'rmsd_type']}
...: split_values(df, split1)
In [3]: split2 = {'split': [('GRMSD2Target', 'global', 'target'),
...: ('GRMSD2Template', 'global', 'template'),
...: ('LRMSD2Target', 'local', 'target'),
...: ('LRMSDH2Target', 'local', 'helix2'),
...: ('LRMSDLH2Target', 'local', 'lhelix2')],
...: 'names': ['rmsd', 'rmsd_type', 'rmsd_target']}
...: split_values(df, split2)
"""
split_columns = [_[0] for _ in keys['split']]
if 'keep' not in keys:
keys.setdefault('keep', list(set(df.columns).difference(set(split_columns))))
keys['keep'].sort(key=lambda x: list(df.columns.values).index(x))
dataframes = []
for k in keys["split"]:
colIDs = copy.copy(keys["keep"])
colIDs.append(k[0])
wdf = df[colIDs]
wdf = wdf.assign(tmpkey1=pd.Series([k[1]] * len(wdf[colIDs[0]])).values).copy(True)
wdf = wdf.rename(index=str, columns={
k[0]: keys["names"][0],
"tmpkey1": keys["names"][1]
})
if ( len(k) > 2 ):
wdf = wdf.assign(tmpkey2=pd.Series([k[2]] * len(wdf[colIDs[0]])).values).copy(True)
wdf = wdf.rename(index=str, columns={
"tmpkey2": keys["names"][2]
})
dataframes.append(wdf)
return pd.concat(dataframes)
def split_dataframe_rows(df, column_selectors, row_delimiter=None):
"""Given a dataframe in which certain columns are lists, it splits these lists
making new rows in the :class:`~pandas.DataFrame` out of itself.
When multiple columns have lists of similar lengths, it assumes that same index
positions on the list go in the same new row.
:param df: Input data.
:type df: :class:`~pandas.DataFrame`
:param column_selectors: List of columns containg same-sized lists.
:type column_selectors: :func:`list` of :class:`str`
:param str row_delimiter: If provided, instead of list, it assumes data are strings
and uses the delimiter to make those strings into lists.
"""
# https://gist.github.com/jlln/338b4b0b55bd6984f883#gistcomment-2698588
# we need to keep track of the ordering of the columns
def _split_list_to_rows(row, row_accumulator, column_selector, row_delimiter):
split_rows = {}
max_split = 0
for column_selector in column_selectors:
if row_delimiter is not None:
split_row = row[column_selector].split(row_delimiter)
else:
split_row = copy.deepcopy(row[column_selector])
split_rows[column_selector] = split_row
if len(split_row) > max_split:
max_split = len(split_row)
for _ in range(max_split):
new_row = row.to_dict()
for column_selector in column_selectors:
try:
new_row[column_selector] = split_rows[column_selector].pop(0)
except IndexError:
new_row[column_selector] = ''
row_accumulator.append(new_row)
new_rows = []
df.apply(_split_list_to_rows, axis=1, args=(new_rows, column_selectors, row_delimiter))
new_df = pd.DataFrame(new_rows, columns=df.columns)
return new_df
def make_rosetta_app_path( application ):
"""Provided the expected Rosetta application, add path and suffix.
.. note::
Depends on :ref:`rosetta.path <options>` and :ref:`rosetta.compilation <options>`,
if the ``filename`` does not exist.
:param str application: Name of the application to call.
:return: :class:`str`
:raise:
:IOError: If the final path created does not exist.
"""
import rstoolbox.core as core
path = core.get_option("rosetta", "path")
comp = core.get_option("rosetta", "compilation")
exe = os.path.join(path, "{0}.{1}".format(application, comp))
if not os.path.isfile(exe):
raise IOError("The expected Rosetta executable {0} is not found".format(exe))
return exe
def execute_process( command ): # pragma: no cover
"""Execute the provided command.
:param command: Command to be executed.
:type command: Union(:class:`str`, :func:`list`)
:param bool subp: When :data:`True` return ``subprocess`` otherwise return
the execution status as 0 (OK) or another number if failed.
:return: Output info of the execution
"""
if isinstance(command, string_types):
command = shlex.split(command)
try:
return subprocess.call( command ) # nosec
except OSError as e:
print('OS', e)
return 1
except subprocess.CalledProcessError as e:
print('CPE', e)
return 1
def report( df ):
"""Cast **basic sequence count** into **pdb count** for the appropiate
columns.
:param df: |df_param|
:type df: :class:`.DesignFrame`
:return: :class:`.DesignFrame` - with renumbered columns.
:raise:
:AttributeError: |designframe_cast_error|
"""
from rstoolbox.components import DesignFrame
def translate_positions(row, seqID, shift):
if len(row.get_mutation_positions(seqID)) == 0:
return ''
mutations = [int(x) for x in row.get_mutation_positions(seqID).split(',')]
for i, _ in enumerate(mutations):
if isinstance(shift, int):
mutations[i] += (shift - 1)
else:
mutations[i] = shift[i - 1]
return ','.join([str(x) for x in mutations])
def translate_mutants(row, seqID, shift):
if len(row.get_mutations(seqID)) == 0:
return ''
mutations = row.get_mutations(seqID).split(',')
for i, m in enumerate(mutations):
g = re.match(r'^(\w+)(\d+)(\w+)$', m)
if isinstance(shift, int):
position = int(g.group(2)) + (shift - 1)
else:
position = shift[int(g.group(2)) - 1]
mutations[i] = '{0}{1}{2}'.format(g.group(1), position, g.group(3))
return ','.join(mutations)
if not isinstance(df, pd.DataFrame):
raise AttributeError('Unexpected input attribute')
if not isinstance(df, DesignFrame):
return df
# Change mutation counts
chains = df.get_identified_mutants()
if len(chains) == 0: # remove if other thing than mutations are translated
return df
dcop = df.copy()
for c in chains:
shift = df.get_reference_shift(c)
if shift == 1:
continue
col = 'mutant_positions_{}'.format(c)
dcop[col] = dcop.apply(lambda row: translate_positions(row, c, shift), axis=1)
col = 'mutants_{}'.format(c)
dcop[col] = dcop.apply(lambda row: translate_mutants(row, c, shift), axis=1)
return dcop
def concat_fragments( fragment_list ):
"""Combine multiple :class:`.FragmentFrame`.
.. note::
Make sure to give an **ordered** ``fragment_list``, as the individual
:class:`.FragmentFrame` are processed one by one and the frame is
renumbered.
:param fragment_list: Command to be executed.
:type fragment_list: Union(:class:`.FragmentFrame`, :func:`list`)
:return: :class:`.FragmentFrame` - combined and renumbered.
"""
fragment_list_renum = []
for i, e in enumerate(fragment_list):
shiftset = e.iloc[0]['frame']
if i == 0:
newE = e.assign(renum_frame=e['frame'] - shiftset + 1)
else:
newE = e.assign(renum_frame=e['frame'] - shiftset + 1 + fragment_list_renum[i - 1]['renum_frame'].max())
fragment_list_renum.append(newE)
df = pd.concat(fragment_list_renum, ignore_index=True, sort=False)
df = df[['pdb', 'renum_frame', 'neighbors', 'neighbor', 'position', 'size', 'aa',
'sse', 'phi', 'psi', 'omega']].rename(columns={'renum_frame': 'frame'})
return df
| 38.273364 | 116 | 0.622856 | 2,082 | 16,381 | 4.804035 | 0.243996 | 0.006999 | 0.019996 | 0.016097 | 0.141072 | 0.116677 | 0.093081 | 0.070686 | 0.05139 | 0.031394 | 0 | 0.009029 | 0.249496 | 16,381 | 427 | 117 | 38.362998 | 0.804539 | 0.512057 | 0 | 0.143678 | 0 | 0 | 0.149402 | 0.011419 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074713 | false | 0 | 0.086207 | 0 | 0.264368 | 0.011494 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25fe1f2c8cff99110b6f98d36969565d8bab1254 | 330 | py | Python | MS2-Advanced/masking.py | PNightOwlY/opencv-course | 71f59327a9c2226144c16aaa42157d32bd392cca | [
"MIT"
] | null | null | null | MS2-Advanced/masking.py | PNightOwlY/opencv-course | 71f59327a9c2226144c16aaa42157d32bd392cca | [
"MIT"
] | null | null | null | MS2-Advanced/masking.py | PNightOwlY/opencv-course | 71f59327a9c2226144c16aaa42157d32bd392cca | [
"MIT"
] | null | null | null | import cv2 as cv
import numpy as np
url = '../Resources/Photos/cats.jpg'
img = cv.imread(url)
cv.imshow('Cat', img)
blank = np.zeros(img.shape[:2], dtype='uint8')
mask = cv.circle(blank, (img.shape[1]//2, img.shape[0]//2),100,
255, -1)
masked = cv.bitwise_and(img, img, mask=mask)
cv.imshow("Masked" ,masked)
cv.waitKey(0) | 18.333333 | 63 | 0.663636 | 59 | 330 | 3.694915 | 0.525424 | 0.110092 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.052083 | 0.127273 | 330 | 18 | 64 | 18.333333 | 0.704861 | 0 | 0 | 0 | 0 | 0 | 0.126888 | 0.084592 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
25fe70c6716bab3b914801edf65e0cf06f5c0d9b | 1,990 | py | Python | essay_scoring/dataloader.py | tushar117/Transformer-Models-for-Text-Coherence-Assessment | 041c6f00b606550461423ffff945f84dbfce6e3b | [
"MIT"
] | 6 | 2022-02-27T08:24:04.000Z | 2022-03-22T09:00:56.000Z | essay_scoring/dataloader.py | tushar117/Transformer-Models-for-Text-Coherence-Assessment | 041c6f00b606550461423ffff945f84dbfce6e3b | [
"MIT"
] | 2 | 2022-03-02T18:50:15.000Z | 2022-03-04T06:04:19.000Z | essay_scoring/dataloader.py | tushar117/Transformer-Models-for-Text-Coherence-Assessment | 041c6f00b606550461423ffff945f84dbfce6e3b | [
"MIT"
] | null | null | null | import torch
import json
import os, sys
import linecache
from torch.utils.data import DataLoader, TensorDataset, Dataset
# required to access the python modules present in project directory
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
# now we can import the all modules present in project folder
from utils.common import load_file
class TextDataset(Dataset):
def __init__(self, filename, float_label):
self.filename = filename
self.dataset = load_file(filename)
self.float_label = float_label
def _add_if_present(self, key, json_data, return_list, dtype):
if key in json_data:
return_list.append(torch.tensor(json_data[key], dtype=dtype))
def preprocess(self, json_data):
return_list = []
# prompt_id for identifying different prompt types
# d_id is added for identifying the task in multi-task-learning setup
key_order = ['prompt_id', 'd_id', 'essay_id', 'doc_a', 'doc_a_mask', 'doc_a_facts', 'doc_a_facts_mask', 'doc_a_facts_count',
'doc_b', 'doc_b_mask', 'doc_b_facts', 'doc_b_facts_mask', 'doc_b_facts_count', 'coherence_vector',
'label']
for key in key_order:
dtype = torch.long
if key == 'label' and self.float_label or key == "coherence_vector":
dtype = torch.float
self._add_if_present(key, json_data, return_list, dtype)
return tuple(return_list)
def __getitem__(self, idx):
data_instance = self.dataset[idx]
return self.preprocess(data_instance)
def __len__(self):
return len(self.dataset)
def get_dataset_loaders(filename, batch_size=8, num_threads=0, float_label=True):
dataset = TextDataset(filename, float_label=float_label)
input_dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_threads)
return input_dataloader
| 36.851852 | 133 | 0.693467 | 272 | 1,990 | 4.764706 | 0.345588 | 0.054012 | 0.04321 | 0.055556 | 0.040123 | 0.040123 | 0 | 0 | 0 | 0 | 0 | 0.001283 | 0.216583 | 1,990 | 53 | 134 | 37.54717 | 0.830019 | 0.122111 | 0 | 0 | 0 | 0 | 0.103963 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.162162 | false | 0 | 0.162162 | 0.027027 | 0.459459 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d30240c25d80ebae361eb9115fcaca91e584c3a4 | 5,305 | py | Python | oblio.py | billvb/oblio-game | c1c95b9d7bffe4e2841a978e4338cf72c38174ac | [
"MIT"
] | 2 | 2016-03-20T03:03:18.000Z | 2021-02-15T22:23:44.000Z | oblio.py | billvb/oblio-game | c1c95b9d7bffe4e2841a978e4338cf72c38174ac | [
"MIT"
] | null | null | null | oblio.py | billvb/oblio-game | c1c95b9d7bffe4e2841a978e4338cf72c38174ac | [
"MIT"
] | null | null | null | """
oblio.py: A framework to collect and trade algorithms with your friends to play Oblio.
A talented and trained human get average
about 12-15 guesses before converging on the solution. What can your algorithm
do?
To play Oblio:
- There exists a secret 4-digit number in which no two digits are the same.
(e.g., "1 2 3 4" or "0 5 1 2". "9 9 9 9" is NOT valid)
- Whenever you submit a guess of this secret nubmer, you get in return a
2-tuple in the form (X, Y). Y indicates the number of digits within
your guess that are in the correct position, and X indicates the number
of digits you guessed correctly, but are in the wrong position.
- Having the result (0, 4) implies you've won and guessed the secret number
correctly.
EXAMPLES:
When the secret number is "3 9 4 5":
- If you guess "1 2 4 5", you'll get back (0, 2), because "4" and "5" are
in the hidden number, and also in the proper spot.
- If you guess "5 4 9 3", you'll get back (4, 0), as all the digits in
your guess are in the hidden number, but none in the correct spot.
- If you guess "0 1 2 8", you'll get back (0, 0). Since none of the digits
in your guess are in the secret number.
- If your guess is "2 8 9 1", you'll get back (1, 0), implying you have
one correct digit in your guess but it's not in the correct spot. You'll
get this a lot and it's annoying.
"""
from __future__ import print_function
import sys
import unittest
import random
import json
from algorithms.utils import OblioTuple
from algorithms.utils import MAX_GUESS
from algorithms.utils import TUPLE_SIZE
from algorithms.utils import DIGIT_BASE
import algorithms
__credits__ = ["beer", "no internet access", "9 hour long-haul flight"]
class OblioContext(object):
""" Represents an oblio engine that is holding the secret number """
def __init__(self, algorithm, hidden_tuple):
assert isinstance(hidden_tuple, OblioTuple)
self.algorithm = algorithm
self.hidden_tuple = hidden_tuple
self.attempts = 0
def verify(self, oblio_tuple):
"""Returns (not in correct place, in correct place)"""
assert isinstance(oblio_tuple, OblioTuple)
cnt_correct = sum([1 if self.hidden_tuple[i] == oblio_tuple[i] \
else 0 for i in range(0, TUPLE_SIZE)])
cnt_misplaced = sum([1 if oblio_tuple[i] in self.hidden_tuple and \
oblio_tuple[i] != self.hidden_tuple[i] else 0 for i in range(0, TUPLE_SIZE)])
self.attempts += 1
return (cnt_misplaced, cnt_correct)
def solve(self, print_response=False):
for i in xrange(0, MAX_GUESS):
guess = self.algorithm.produce()
response = self.verify(guess)
if response == (0, TUPLE_SIZE):
return self.attempts
else:
if print_response:
print('Guess %3d: %s --> %s' % (i, guess, response))
self.algorithm.put(guess, response)
else:
# There are fewer than 10,000 possibilities,
# so if your algorithm cannot get the correct solution
# in 10,000 tries, you[r solution] sucks.
raise ValueError("Sucky algorithm")
class UnitTests(unittest.TestCase):
def test_verify(self):
t0 = OblioTuple((0, 1, 2, 3))
t1 = OblioTuple((3, 2, 1, 0))
t2 = OblioTuple((6, 7, 8, 9))
t3 = OblioTuple((3, 1, 8, 9))
c = OblioContext(None, OblioTuple((3, 2, 1, 0)))
self.assertEqual(c.verify(t0), (4, 0))
self.assertEqual(c.verify(t1), (0, 4))
self.assertEqual(c.verify(t2), (0, 0))
self.assertEqual(c.verify(t3), (1, 1))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Play Oblio')
parser.add_argument('subcommand', type=str, nargs='+',
help='play, test, or fight')
args = parser.parse_args()
if args.subcommand[0] == 'play':
secret_tuple = OblioTuple.get_random()
context = OblioContext(algorithms.ManualAlg(), secret_tuple)
context.solve(print_response=True)
elif args.subcommand[0] == 'test':
suite = unittest.TestLoader().loadTestsFromTestCase(UnitTests)
unittest.TextTestRunner(verbosity=2).run(suite)
elif args.subcommand[0] == 'fight':
ngames = 100
alg1, alg2 = args.subcommand[1:]
l_contender = getattr(algorithms, alg1)
r_contender = getattr(algorithms, alg2)
l_wins, r_wins = 0, 0
for game_cnt in xrange(0, ngames):
secret_tuple = OblioTuple.get_random()
l_cnt = OblioContext(l_contender(), secret_tuple).solve()
r_cnt = OblioContext(r_contender(), secret_tuple).solve()
if l_cnt < r_cnt:
l_wins += 1
elif l_cnt > r_cnt:
r_wins += 1
winner, margin = (l_contender, float(l_wins)/game_cnt) \
if l_wins > r_wins else (r_contender, float(r_wins)/game_cnt)
print(json.dumps({'winner': str(winner), 'margin': '%2.1f' % (margin * 100)},
indent=4))
else:
print('Unknown subcommand: ', args.subcommand, file=sys.stderr)
sys.exit(1)
| 35.13245 | 89 | 0.626202 | 760 | 5,305 | 4.265789 | 0.296053 | 0.01388 | 0.012338 | 0.014806 | 0.119062 | 0.037014 | 0.037014 | 0.037014 | 0.019741 | 0.019741 | 0 | 0.034161 | 0.271631 | 5,305 | 150 | 90 | 35.366667 | 0.804865 | 0.316494 | 0 | 0.060976 | 0 | 0 | 0.049695 | 0 | 0 | 0 | 0 | 0 | 0.073171 | 1 | 0.04878 | false | 0 | 0.134146 | 0 | 0.231707 | 0.085366 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d3030e29ce0e4ec52ab9dc86357738a3510047e0 | 2,597 | py | Python | FaceppApi.py | qzylalala/FaceScoring | 2e18268e997060f1be0a4eb86aa9893823a9e2b4 | [
"MIT"
] | null | null | null | FaceppApi.py | qzylalala/FaceScoring | 2e18268e997060f1be0a4eb86aa9893823a9e2b4 | [
"MIT"
] | null | null | null | FaceppApi.py | qzylalala/FaceScoring | 2e18268e997060f1be0a4eb86aa9893823a9e2b4 | [
"MIT"
] | null | null | null | # -*-coding:utf-8-*-
'''
@author : qzylalala
@file : FaceppApi.py
@time : 2020-09-07 19:04
'''
import urllib.request
import urllib.error
import json
import time
http_url = 'https://api-cn.faceplusplus.com/facepp/v3/detect'
key = "xxx"
secret = "xxx"
# use your own key and secret key
#---------------------------------------------------------------------------------------------------#
def get_info(file_path):
boundary = '----------%s' % hex(int(time.time() * 1000))
data = []
data.append('--%s' % boundary)
data.append('Content-Disposition: form-data; name="%s"\r\n' % 'api_key')
data.append(key)
data.append('--%s' % boundary)
data.append('Content-Disposition: form-data; name="%s"\r\n' % 'api_secret')
data.append(secret)
data.append('--%s' % boundary)
fr = open(file_path, 'rb')
data.append('Content-Disposition: form-data; name="%s"; filename=" "' % 'image_file')
data.append('Content-Type: %s\r\n' % 'application/octet-stream')
data.append(fr.read())
fr.close()
data.append('--%s' % boundary)
data.append('Content-Disposition: form-data; name="%s"\r\n' % 'return_landmark')
data.append('1')
data.append('--%s' % boundary)
data.append('Content-Disposition: form-data; name="%s"\r\n' % 'return_attributes')
data.append(
"gender,age,smiling,headpose,facequality,blur,eyestatus,emotion,ethnicity,beauty,mouthstatus,eyegaze,skinstatus")
data.append('--%s--\r\n' % boundary)
for i, d in enumerate(data):
if isinstance(d, str):
data[i] = d.encode('utf-8')
http_body = b'\r\n'.join(data)
# build http request
req = urllib.request.Request(url=http_url, data=http_body)
# header
req.add_header('Content-Type', 'multipart/form-data; boundary=%s' % boundary)
try:
# post data to server
resp = urllib.request.urlopen(req, timeout=5)
# get response
qrcont = resp.read()
# if you want to load as json, you should decode first,
# for example: json.loads(qrount.decode('utf-8'))
face_attr = json.loads(qrcont.decode('utf-8'))
dict = face_attr["faces"][0]['attributes']
# print(dict)
print(dict['gender']['value'])
print(dict['age']['value'])
print(dict['beauty']['male_score'] + 10)
print(dict['beauty']['female_score'] + 10)
# 'emotion': {'anger': 0.19, 'disgust': 0.017, 'fear': 0.003, 'happiness': 0.003, 'neutral': 99.532, 'sadness': 0.25, 'surprise': 0.005}
except urllib.error.HTTPError as e:
print(e.read().decode('utf-8')) | 36.577465 | 144 | 0.588756 | 342 | 2,597 | 4.421053 | 0.423977 | 0.112434 | 0.043651 | 0.062831 | 0.203042 | 0.203042 | 0.203042 | 0.203042 | 0.175926 | 0.175926 | 0 | 0.026528 | 0.187139 | 2,597 | 71 | 145 | 36.577465 | 0.689721 | 0.203697 | 0 | 0.106383 | 0 | 0.021277 | 0.330732 | 0.065366 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021277 | false | 0 | 0.085106 | 0 | 0.106383 | 0.106383 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d30337653c964374efa52a98be6cae3742cf83f3 | 5,818 | py | Python | project1/pacmanAgents.py | Plastix/CSC-320 | 4c8802d0ceeffbea77bd1ef5f21d27d4de80dbb6 | [
"MIT"
] | null | null | null | project1/pacmanAgents.py | Plastix/CSC-320 | 4c8802d0ceeffbea77bd1ef5f21d27d4de80dbb6 | [
"MIT"
] | null | null | null | project1/pacmanAgents.py | Plastix/CSC-320 | 4c8802d0ceeffbea77bd1ef5f21d27d4de80dbb6 | [
"MIT"
] | null | null | null | # pacmanAgents.py
# ---------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to
# http://inst.eecs.berkeley.edu/~cs188/pacman/pacman.html
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
#
# Some modifications were made to this file by Kristina Striegnitz
# (striegnk@union.edu).
from pacman import Directions
from game import Agent
import random
DIRECTION_LIST = [Directions.WEST, Directions.EAST, Directions.NORTH, Directions.SOUTH]
class GoWestAgent(Agent):
"""An agent that goes West until it can't."""
def getAction(self, game_state):
"The agent receives a GameState (defined in pacman.py)."
if Directions.WEST in game_state.getLegalPacmanActions():
return Directions.WEST
else:
return Directions.STOP
class LeftTurnAgent(Agent):
"""An agent that turns left at every opportunity"""
def getAction(self, game_state):
legal = game_state.getLegalPacmanActions()
current = game_state.getPacmanState().getDirection()
if current == Directions.STOP:
current = Directions.NORTH
if Directions.LEFT[current] in legal:
return Directions.LEFT[current]
elif current in legal:
return current
elif Directions.RIGHT[current] in legal:
return Directions.RIGHT[current]
elif Directions.REVERSE[current] in legal:
return Directions.REVERSE[current]
else:
return Directions.STOP
class RectangularRoomCleaner(Agent):
"""
A simple-reflex agent that will east an entire rectangular room. Assumes that there are no obstacles.
"""
def getAction(self, game_state):
legal = game_state.getLegalPacmanActions()
current = game_state.getPacmanState().getDirection()
left = Directions.LEFT[current]
right = Directions.RIGHT[current]
if current == Directions.STOP:
moves = list(filter(lambda move: move in legal, DIRECTION_LIST))
current = moves[0] if moves else current
if current == Directions.SOUTH:
# Turn east after hitting west wall
if left in legal and right not in legal:
return left
# Turn west after hitting east wall
elif left not in legal and right in legal:
return right
if current not in legal:
# Always turn south when hitting a wall
if left in legal and right in legal:
if current == Directions.WEST:
return left
else:
return right
# Turn or reverse when hitting a corner
elif left in legal:
return left
elif right in legal:
return right
return Directions.REVERSE[current]
else:
# Go straight if possible
return current
class RandomizedRoomCleaner(Agent):
"""
A randomized simple-reflex agent. Continues straight with a 50% chance as long as going straight is legal. Else,
it randomly picks between the remaining legal moves without stopping.
"""
def getAction(self, game_state):
legal = game_state.getLegalPacmanActions()
legal.remove(Directions.STOP)
# Stop if we have no moves
if not legal:
return Directions.STOP
# Continue straight with 50% chance as long as it is legal
current = game_state.getPacmanState().getDirection()
if current != Directions.STOP and bool(random.getrandbits(1)) and current in legal:
return current
# Randomly choose between legal moves. We will have at least one!
return random.choice(legal)
class ModelBasedRoomCleaner(Agent):
"""
A model-based reflex agent that traverses the room in a depth-first pattern.
"""
movements_x = {
Directions.NORTH: 0,
Directions.SOUTH: 0,
Directions.EAST: 1,
Directions.WEST: -1,
Directions.STOP: 0
}
movements_y = {
Directions.NORTH: 1,
Directions.SOUTH: -1,
Directions.EAST: 0,
Directions.WEST: 0,
Directions.STOP: 0
}
def __init__(self, index=0):
super().__init__(index)
self.x = 0
self.y = 0
self.explored = set()
self.moves = []
def getAction(self, game_state):
legal = game_state.getLegalPacmanActions()
legal.remove(Directions.STOP)
unexplored = list(filter(lambda move: not self.is_explored(move), legal))
if unexplored:
action = unexplored.pop()
self.update_model(action)
else:
action = Directions.REVERSE[self.moves.pop()]
self.update_model(action, backtrack=True)
return action
def update_model(self, action, backtrack=False):
self.explored.add((self.x, self.y))
self.x += ModelBasedRoomCleaner.movements_x[action]
self.y += ModelBasedRoomCleaner.movements_y[action]
if not backtrack:
self.moves.append(action)
def is_explored(self, action):
x = self.x + ModelBasedRoomCleaner.movements_x[action]
y = self.y + ModelBasedRoomCleaner.movements_y[action]
return (x, y) in self.explored
| 32.870056 | 116 | 0.636473 | 687 | 5,818 | 5.340611 | 0.292576 | 0.028618 | 0.031889 | 0.027255 | 0.299264 | 0.193241 | 0.139548 | 0.12592 | 0.12592 | 0.101935 | 0 | 0.005998 | 0.283603 | 5,818 | 176 | 117 | 33.056818 | 0.87428 | 0.271227 | 0 | 0.349057 | 0 | 0 | 0.012821 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075472 | false | 0 | 0.028302 | 0 | 0.358491 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d304caea20565e073d1d1120d20a6ebff5de1e6c | 8,898 | py | Python | MNAC/tk.py | yunruse/MNAC | 2c7a41c1c2e9b69caa80e9ae8018301ce214514c | [
"CC-BY-4.0"
] | 1 | 2018-07-02T10:07:04.000Z | 2018-07-02T10:07:04.000Z | MNAC/tk.py | yunruse/MNAC | 2c7a41c1c2e9b69caa80e9ae8018301ce214514c | [
"CC-BY-4.0"
] | null | null | null | MNAC/tk.py | yunruse/MNAC | 2c7a41c1c2e9b69caa80e9ae8018301ce214514c | [
"CC-BY-4.0"
] | null | null | null | '''
Tkinter implementation of Meta Noughts and Crosses.
Requires Python >3.6, tkinter and mnac.
1.0: release
1.1: keyboard indicators / keyboard controls are like numpad
1.2: new status menu, controls, help menu
1.3: better mouse handling
1.4: UI tweaks and touchups
'''
import random
import os
import tkinter as tk
import numpy as np
import mnac
import render
__version__ = '1.4'
TITLE = f'TkMNAC v{__version__} / yunru.se'
class CanvasRender(render.Render):
'''Tkinter Canvas-based renderer.'''
font = 'Segoe UI'
def __init__(self, app, theme='light'):
self.app = app
self.canvas = app.canvas
self.coordinates = {}
self.theme = render.THEMES[theme]
self.error = False
def draw(self):
self.game = self.app.game
self.error = self.app.error
# determine colours and status
players = [
('gray', 'Unknown error', 'Unknown error'),
('nought', 'Noughts', 'Noughts wins!'),
('cross', 'Crosses', 'Crosses wins!'),
('gray', 'Neutral', "It's a draw...")
]
code, name, _ = players[self.game.player]
titlefill = self.theme[code]['light']
if self.error:
text = self.error
elif self.game.winner:
text = players[self.game.winner][2]
else:
statuses = {
'begin': 'grid to start in',
'inner': 'cell to play in',
'outer': 'grid to send to',
}
text = '{}, pick a {}'.format(name, statuses[self.game.state])
# get canvas details
w, h, self.size, self.topleft, header_height = self.app.coordinate()
if w > h:
self.topleft += ((w - h) / 2, 0)
else:
self.topleft += (0, (h - w) / 2)
self.canvas.config(bg=self.background())
self.canvas.delete('status', 'backing', 'mark', 'play')
self.canvas.tag_unbind('backing', '<Button-1>')
font_size = int(self.size / 32)
glyph_size = int(font_size * 1.5)
leftText = 'tab: help'
if self.app.showHelp:
text = ''
leftText = 'tab: back to game'
header = (
lambda x, y=header_height / 2, fill=titlefill, **kw:
self.canvas.create_text(
x, y, fill=fill,
tags='status', font=(self.font, font_size), **kw))
header(self.topleft[0] + 5, anchor='w', text=leftText)
header(self.topleft[0] + self.size/2, anchor='center', text=text)
def draw_glyph(fromRight, glyph, fill): return self.canvas.create_polygon(
*(glyph * glyph_size + (
self.topleft[0] + self.size + fromRight * glyph_size,
(header_height - glyph_size) / 2 + 2)).flatten(),
width=0, fill=fill, tags='status')
render.Render.draw(self)
# draw beginning help in middle cell
if self.app.showHelp:
self.canvas.create_rectangle(
*self.topleft, *(self.topleft + self.size),
width=0, fill=titlefill, tags='status', stipple="gray50")
for i, text in enumerate((
'The board is 9 grids each with 9 cells. Play to win',
'a grid, and win the larger grids to win the game.',
'',
'Place a tile in the tile and you will put your opponent',
'into the equivalent grid. For example, if you are in the',
'top left grid and play the bottom cell, your opponent',
'will have to play in the bottom grid, and so on.',
'',
'One exception is that you may never send your',
'opponent to your own grid, or one that is captured -',
'tiles that would do so are marked as green, and are',
"'teleporters' allowing you to choose where to send",
'your opponent. As grids become taken, there is less',
'choice, so be careful to tactically set up traps!',
'',
'CONTROLS:',
'Control-R: Restart the game',
'Keys 1-9 and mouse/touch: Play in cell / grid'
), start=1):
header(w/2, self.topleft[1] + i * 1.5 *
font_size, fill='black', text=text)
def cell(self, grid, cell, tl, size, fill):
tl += self.topleft
coords = (*tl, *(tl+size))
backing = self.canvas.create_rectangle(
*coords, width=0, fill=fill, tags='backing')
self.coordinates[grid+1, cell+1] = coords
def ellipse(self, coords, outline, width):
coords += (*self.topleft, *self.topleft)
self.canvas.create_oval(
*coords, width=width, outline=outline, tags='mark')
def polygon(self, coords, fill):
coords += self.topleft
self.canvas.create_polygon(
*coords.flatten(), fill=fill, width=0, tags='mark')
def text(self, coords, isLarge, text, size, fill):
coords += self.topleft
# this is arbitrary and needs a lot more playtesting :(
if os.name == 'posix':
fiddle = (2/9, -3/9) if isLarge else (-2/9, -4/9)
else:
fiddle = (1/9, -7/6) if isLarge else (-2/9, -2/3)
coords += np.array(fiddle) * self.size / (9 + 2 * self.SEPARATION)
self.canvas.create_text(
*coords, text=text, fill=fill, font=(self.font, size), anchor='nw', tags='play')
class UIMNAC(tk.Tk):
def __init__(self, **kwargs):
'''Initialise frame. Set players to None or a number.'''
tk.Tk.__init__(self)
self.title(TITLE)
self.minsize(400, 424)
self.columnconfigure(1, weight=1)
self.rowconfigure(1, weight=1)
self.canvas = tk.Canvas(
self, height=0, width=0,
bd=0, highlightthickness=0, relief='ridge')
self.canvas.grid(row=1, column=1, columnspan=3, sticky='news')
self.render = CanvasRender(self)
self.bind_all('<Configure>', self.redraw)
self.bind_all('<Control-r>', self.restart)
self.bind_all('<Tab>', self.toggleHelp)
self.bind_all('<Escape>', self.clearError)
self.canvas.bind('<Button-1>', self.onClick)
def callbacker(i): return lambda *event: self.play(mnac.numpad(i))
for i in range(1, 10):
self.bind_all(str(i), callbacker(i))
self.restart()
def restart(self, *event):
self.showHelp = False
self.error = ''
self.game = mnac.MNAC(middleStart=False)
self.redraw()
def clearError(self, *event):
self.error = ''
self.redraw()
def toggleHelp(self, *event):
self.showHelp = not self.showHelp
self.redraw()
def coordinate(self):
w, h = self.canvas.winfo_width(), self.canvas.winfo_height()
header_height = h / 18
h -= header_height
s = min(w, h)
tl = np.array((0, header_height), dtype=float)
return w, h, s, tl, header_height
def redraw(self, *event):
self.render.draw()
def onClick(self, event):
if self.game.winner:
return
w, h, s, tl, header_height = self.coordinate()
x = (event.x - tl[0]) * 9 / s
if (0 < event.y < header_height) and (0 < x < 9):
# status bar click
if x < 2 or self.showHelp:
self.toggleHelp()
else:
self.clearError()
# Iterate through all coordinates the renderer claims
# each cell was at
for coord, bounds in self.render.coordinates.items():
x1, y1, x2, y2 = bounds
if x1 <= event.x <= x2 and y1 <= event.y <= y2:
grid, cell = coord
break
else:
return
if self.game.state in ('outer', 'begin'):
self.play(grid)
elif self.game.state == 'inner':
if grid == (self.game.grid + 1):
self.play(cell)
else:
self.play(grid)
def play(self, index):
if self.game.winner:
return
self.error = ''
try:
self.game.play(index)
except mnac.MoveError as e:
self.error = mnac.ERRORS[e.args[0]]
self.redraw()
def test_turn(self, *event):
'''debug: play random moves'''
choices = list(range(9))
random.shuffle(choices)
for i in choices:
try:
self.game.play(i + 1)
break
except mnac.MoveError:
continue
self.render.draw()
if not self.game.winner:
self.after(500, self.test_turn)
if __name__ == '__main__':
self = UIMNAC()
self.mainloop()
| 31.778571 | 92 | 0.53675 | 1,103 | 8,898 | 4.271985 | 0.274705 | 0.033956 | 0.023769 | 0.007216 | 0.059635 | 0.009762 | 0.009762 | 0 | 0 | 0 | 0 | 0.018957 | 0.336031 | 8,898 | 279 | 93 | 31.892473 | 0.778605 | 0.066757 | 0 | 0.172414 | 0 | 0 | 0.136314 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083744 | false | 0 | 0.029557 | 0.009852 | 0.147783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d3077603bb2759da9e55fab6315ffbbd7ba20959 | 1,974 | py | Python | Enemy.py | AleksCoolS/DungeonProject2 | c91876f9ce131cb76fde7222f949868ce844641b | [
"Unlicense"
] | null | null | null | Enemy.py | AleksCoolS/DungeonProject2 | c91876f9ce131cb76fde7222f949868ce844641b | [
"Unlicense"
] | null | null | null | Enemy.py | AleksCoolS/DungeonProject2 | c91876f9ce131cb76fde7222f949868ce844641b | [
"Unlicense"
] | null | null | null | from GameObjects import *
from settings import *
class enemy(Creature):
def __init__(self, position, textureSize, textureNames, textureParams, health, end):
super().__init__(position, textureSize, textureNames, textureParams, health)
self.end = end
self.path = [self.x, self.end]
self.direction = 'right'
def update(self):
self.new_move()
self.animate()
#change position and moving direction if need
def move(self):
if self.velocity > 0:
if self.x + self.velocity < self.path[1]:
self.x += self.velocity
else:
self.velocity *= -1
self.walkCount = 0
else:
if self.x - self.velocity > self.path[0]:
self.x += self.velocity
else:
self.velocity *= -1
self.walkCount = 0
def new_move(self):
#self.acc = vec(0, 0)
# check for reverse movement
if self.pos.x > self.path[1]:
self.left = True
self.right = False
self.standing = False
self.direction = 'left'
if self.pos.x <= self.path[0]:
self.left = False
self.right = True
self.standing = False
self.direction = 'right'
if self.direction == 'right':
self.acc.x = ENEMY_ACC
else:
self.acc.x = -ENEMY_ACC
# apply friction
self.acc.x += self.vel.x * PLAYER_FRICTION
# equations of motion
self.vel += self.acc
if abs(self.vel.x) < 0.1:
self.vel.x = 0
self.pos += self.vel + 0.5 * self.acc
self.x = self.pos.x
self.y = self.pos.y
self.rect.x = self.pos.x
self.rect.y = self.pos.y
def hit(self):
if self.health > 0:
self.health -= 1
print('hit')
else:
print('die')
#print('hit')
| 27.802817 | 88 | 0.508105 | 239 | 1,974 | 4.142259 | 0.251046 | 0.060606 | 0.054545 | 0.068687 | 0.408081 | 0.187879 | 0.151515 | 0.09697 | 0.09697 | 0.09697 | 0 | 0.014694 | 0.379433 | 1,974 | 70 | 89 | 28.2 | 0.793469 | 0.069909 | 0 | 0.277778 | 0 | 0 | 0.013661 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.092593 | false | 0 | 0.037037 | 0 | 0.148148 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d3098d8f260b9bb738a94d1506194ece68a54f8c | 26,353 | py | Python | gamd/langevin/base_integrator.py | lillgroup/GaMD-OpenMM | 4b00eb8feb327cd8db51c28c9b0b246dee12bf5a | [
"MIT"
] | null | null | null | gamd/langevin/base_integrator.py | lillgroup/GaMD-OpenMM | 4b00eb8feb327cd8db51c28c9b0b246dee12bf5a | [
"MIT"
] | null | null | null | gamd/langevin/base_integrator.py | lillgroup/GaMD-OpenMM | 4b00eb8feb327cd8db51c28c9b0b246dee12bf5a | [
"MIT"
] | null | null | null | """
gamd.py: Implements the GaMD integration method.
Portions copyright (c) 2020 University of Kansas
Authors: Matthew Copeland, Yinglong Miao
Contributors: Lane Votapka
"""
from __future__ import absolute_import
__author__ = "Matthew Copeland"
__version__ = "1.0"
from simtk import unit as unit
from abc import ABCMeta, ABC
from abc import abstractmethod
from ..stage_integrator import GamdStageIntegrator
from ..stage_integrator import BoostType
class GamdLangevinIntegrator(GamdStageIntegrator, ABC):
def __init__(self, system_group, group_name,
dt=2.0 * unit.femtoseconds, ntcmdprep=200000, ntcmd=1000000,
ntebprep=200000, nteb=1000000, nstlim=3000000, ntave=50000,
collision_rate=1.0 / unit.picoseconds,
temperature=298.15 * unit.kelvin,
restart_filename=None):
"""
Parameters
----------
:param dt: The Amount of time between each time step.
:param ntcmdprep: The number of conventional MD steps for system equilibration.
:param ntcmd: The total number of conventional MD steps (including ntcmdprep). (must be multiple of ntave)
:param ntebprep: The number of GaMD pre-equilibration steps.
:param nteb: The number of GaMD equilibration steps (including ntebprep). (must be a multiple of ntave)
:param nstlim: The total number of simulation steps.
:param ntave: The number of steps used to smooth the average and sigma of potential energy (corresponds to
a running average window size).
:param collision_rate: Collision rate (gamma) compatible with 1/picoseconds, default: 1.0/unit.picoseconds
:param temperature: "Bath" temperature value compatible with units.kelvin, default: 298.15*unit.kelvin
:param restart_filename: The file name of the restart file. (default=None indicates new simulation.)
"""
self.collision_rate = collision_rate # gamma
self.temperature = temperature
self.restart_filename = restart_filename
self.kB = unit.BOLTZMANN_CONSTANT_kB * unit.AVOGADRO_CONSTANT_NA
self.thermal_energy = self.kB * self.temperature # kT
#self.current_velocity_component = numpy.exp(-self.collision_rate * dt) # a
#self.random_velocity_component = numpy.sqrt(1 - numpy.exp(- 2 * self.collision_rate * dt)) # b
#
# Generally, I'm trying to put variables here that I know will be used across all implementations WITHOUT the
# name being overloaded to have another meaning for an object that inherits from this base class. No guarantee
# I got it perfectly correct, but that is the idea.
#
self.global_variables = {"thermal_energy": self.thermal_energy,
#"current_velocity_component": self.current_velocity_component,
#"random_velocity_component": self.random_velocity_component,
"collision_rate": self.collision_rate,
"vscale": 0.0, "fscale": 0.0,
"noisescale": 0.0
}
self.per_dof_variables = {"sigma": 0}
#
# We need to run our super classes constructor last, since it's going to execute our other methods, which
# have dependencies on our variables above being setup.
#
super(GamdLangevinIntegrator, self).__init__(system_group, group_name, dt, ntcmdprep, ntcmd, ntebprep, nteb,
nstlim, ntave)
def _add_common_variables(self):
garbage = {self.addGlobalVariable(key, value) for key, value in self.global_variables.items()}
garbage = {self.addPerDofVariable(key, value) for key, value in self.per_dof_variables.items()}
@abstractmethod
def _add_conventional_md_pre_calc_step(self): # O Step
raise NotImplementedError("must implement _add_conventional_md_pre_calc_step")
'''
@abstractmethod
def _add_conventional_md_position_update_step(self): # R Step
raise NotImplementedError("must implement _add_conventional_md_position_update_step")
@abstractmethod
def _add_conventional_md_velocity_update_step(self): # V Step
raise NotImplementedError("must implement _add_conventional_md_velocity_update_step")
@abstractmethod
def _add_conventional_md_stochastic_velocity_update_step(self): # O Step
raise NotImplementedError("must implement _add_conventional_md_stochastic_velocity_update_step")
'''
@abstractmethod
def _add_conventional_md_update_step(self):
raise NotImplementedError("must implement _add_conventional_md_update_step")
'''
@abstractmethod
def _add_gamd_position_update_step(self): # R Step
raise NotImplementedError("must implement _add_gamd_position_update_step")
@abstractmethod
def _add_gamd_velocity_update_step(self): # V Step
raise NotImplementedError("must implement _add_gamd_velocity_update_step")
@abstractmethod
def _add_gamd_stochastic_velocity_update_step(self): # O Step
raise NotImplementedError("must implement _add_gamd_stochastic_velocity_update_step")
'''
@abstractmethod
def _add_gamd_update_step(self):
raise NotImplementedError("must implement _add_gamd_update_step")
@abstractmethod
def _add_gamd_pre_calc_step(self):
raise NotImplementedError("must implement _add_gamd_pre_calc_step")
@abstractmethod
def _add_gamd_boost_calculations_step(self):
raise NotImplementedError("must implement _add_gamd_boost_calculations_step")
@abstractmethod
def _add_instructions_to_calculate_primary_boost_statistics(self):
raise NotImplementedError("must implement _add_instructions_to_calculate_primary_boost_statistics")
@abstractmethod
def _add_instructions_to_calculate_secondary_boost_statistics(self):
raise NotImplementedError("must implement _add_instructions_to_calculate_secondary_boost_statistics")
def _add_conventional_md_instructions(self):
self._add_conventional_md_pre_calc_step()
'''
self._add_conventional_md_velocity_update_step()
self._add_conventional_md_position_update_step()
self._add_conventional_md_stochastic_velocity_update_step()
self._add_conventional_md_position_update_step()
self._add_conventional_md_velocity_update_step()
'''
self._add_conventional_md_update_step()
def _add_gamd_instructions(self):
self._add_gamd_pre_calc_step()
self._add_gamd_boost_calculations_step()
'''
self._add_gamd_velocity_update_step()
self._add_gamd_position_update_step()
self._add_gamd_stochastic_velocity_update_step()
self._add_gamd_position_update_step()
#
# We should only need to calculating the scaling factor once per step, since Vmax, Vmin, the threshold energy,
# and the effective harmonic constant don't change after being set. It's only a question if the energy changes
# somehow during the step.
#
#self._add_gamd_boost_calculations_step()
self._add_gamd_velocity_update_step()
'''
self._add_gamd_update_step()
#
# Debugging Methods
#
@staticmethod
def _get_debug_values_as_dictionary(dictionary, counter, function_to_retrieve_value):
results = {}
for key, value in dictionary.items():
results[str(counter) + "_" + key] = function_to_retrieve_value(counter, key)
return results
def _add_debug(self):
garbage = {self._save_global_debug(key) for key, value in self.global_variables.items()}
garbage = {self._save_per_dof_debug(key) for key, value in self.per_dof_variables.items()}
super(GamdLangevinIntegrator, self)._add_debug()
def get_debug_step(self, counter):
results = super(GamdLangevinIntegrator, self).get_debug_step(counter)
results.update(self._get_debug_values_as_dictionary(self.global_variables, counter, self._get_global_debug_value))
results.update(self._get_debug_values_as_dictionary(self.per_dof_variables, counter, self._get_per_dof_debug_value))
return results
#
# This integrator is the basis for all of our single boost type integrators
# to perform them in a generic way that will work across boost types.
#
class GroupBoostIntegrator(GamdLangevinIntegrator, ABC):
""" This class is an OpenMM Integrator for doing the dihedral boost for Gaussian accelerated molecular dynamics.
"""
def __init__(self, system_group, group_name, dt, ntcmdprep, ntcmd, ntebprep, nteb, nstlim, ntave, sigma0,
collision_rate, temperature, restart_filename):
"""
Parameters
----------
:param system_group: This value indicates what value should be appended to system names (energy, force) for accessing the correct group's variable.
:param group_name: This variable along with the system_group is used to create a unique name for each of our variables, so that if you are composing groups for boosts, they do not overwrite.
:param dt: The Amount of time between each time step.
:param ntcmdprep: The number of conventional MD steps for system equilibration.
:param ntcmd: The total number of conventional MD steps (including ntcmdprep). (must be a multiple of ntave)
:param ntebprep: The number of GaMD pre-equilibration steps.
:param nteb: The number of GaMD equilibration steps (including ntebprep). (must be a multiple of ntave)
:param nstlim: The total number of simulation steps.
:param ntave: The number of steps used to smooth the average and sigma of potential energy (corresponds to a
running average window size).
:param sigma0: The upper limit of the standard deviation of the potential boost that allows for
accurate reweighting.
:param collision_rate: Collision rate (gamma) compatible with 1/picoseconds, default: 1.0/unit.picoseconds
:param temperature: "Bath" temperature value compatible with units.kelvin, default: 298.15*unit.kelvin
:param restart_filename: The file name of the restart file. (default=None indicates new simulation.)
"""
#
# These variables are generated per type of boost being performed
#
self.global_variables_by_boost_type = {"Vmax": -1E99, "Vmin": 1E99, "Vavg": 0,
"oldVavg": 0, "sigmaV": 0, "M2": 0, "wVavg": 0, "k0": 0,
"k0prime": 0, "k0doubleprime": 0, "k0doubleprime_window": 0,
"boosted_energy": 0, "check_boost": 0, "sigma0": sigma0,
"threshold_energy": -1E99}
#
# These variables are always kept for reporting, regardless of boost type
#
self.boost_global_variables = {}
self.boost_per_dof_variables = {"newx": 0, "coordinates": 0}
self.debug_per_dof_variables = []
# self.debug_per_dof_variables = ["x", "v", "f", "m"]
self.debug_global_variables = ["dt", "energy", "energy0", "energy1", "energy2", "energy3", "energy4"]
self.sigma0 = sigma0
self.debuggingIsEnabled = True
super(GroupBoostIntegrator, self).__init__(system_group, group_name, dt, ntcmdprep, ntcmd, ntebprep, nteb, nstlim, ntave, collision_rate,
temperature, restart_filename)
#
# We have to set this value separate from the others, so that when we do a non-total boost, we will still
# have a total boost to report back. In that condition, the above ForceScalingFactor will get setup for
# appropriate boost type.
#
# NOTE: THIS VALUE WILL NEED TO BE FIXED SOMEHOW FOR DUAL BOOST.
#
self.addGlobalVariable(self._append_group_name_by_type("ForceScalingFactor", BoostType.TOTAL), 1.0)
self.addGlobalVariable(self._append_group_name_by_type("BoostPotential", BoostType.TOTAL), 0.0)
if self.get_boost_type() == BoostType.TOTAL or self.get_boost_type() == BoostType.DIHEDRAL:
self.addGlobalVariable(self._append_group_name_by_type("ForceScalingFactor", BoostType.DIHEDRAL), 1.0)
self.addGlobalVariable(self._append_group_name_by_type("BoostPotential", BoostType.DIHEDRAL), 0.0)
else:
self.addGlobalVariable(self._append_group_name("ForceScalingFactor"), 1.0)
self.addGlobalVariable(self._append_group_name("BoostPotential"), 0.0)
self.addComputePerDof("coordinates", "x")
#
#
#
#
# def get_starting_energy(self):
# return self.getGlobalVariableByName("starting_energy")
# def get_current_state(self):
# results = {"step": self.getGlobalVariableByName("stepCount")}
# return results
# pass
def _add_common_variables(self):
unused_return_values = {self.addGlobalVariable(key, value) for key, value in
self.boost_global_variables.items()}
unused_return_values = {self.addPerDofVariable(key, value) for key, value in
self.boost_per_dof_variables.items()}
unused_return_values = {self.addGlobalVariable(self._append_group_name(key), value) for key, value in
self.global_variables_by_boost_type.items()}
super(GroupBoostIntegrator, self)._add_common_variables()
def _update_potential_state_values_with_window_potential_state_values(self):
# Update window variables
self.addComputeGlobal(self._append_group_name("Vavg"), self._append_group_name("wVavg"))
self.addComputeGlobal(self._append_group_name("sigmaV"), "sqrt({0}/(windowCount-1))".format(
self._append_group_name("M2")))
# Reset variables
self.addComputeGlobal(self._append_group_name("M2"), "0")
self.addComputeGlobal(self._append_group_name("wVavg"), "0.0")
self.addComputeGlobal(self._append_group_name("oldVavg"), "0.0")
def _add_instructions_to_calculate_primary_boost_statistics(self):
self.addComputeGlobal(self._append_group_name("Vmax"), "max({0}, {1})".format(self._append_group_name("StartingPotentialEnergy"),
self._append_group_name("Vmax")))
self.addComputeGlobal(self._append_group_name("Vmin"), "min({0}, {1})".format(self._append_group_name("StartingPotentialEnergy"),
self._append_group_name("Vmin")))
def _add_instructions_to_calculate_secondary_boost_statistics(self):
#
# The following calculations are used to calculate the average and variance/standard deviation,
# rather than calculating the average at the ntave % 0 step
#
# Algorithm Description:
#
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
#
#
self.addComputeGlobal(self._append_group_name("oldVavg"), self._append_group_name("wVavg"))
self.addComputeGlobal(self._append_group_name("wVavg"), "{0} + ({1}-{0})/windowCount".format(
self._append_group_name("wVavg"), self._append_group_name("StartingPotentialEnergy")))
self.addComputeGlobal(self._append_group_name("M2"), "{0} + ({1}-{2})*({1}-{3})".format(
self._append_group_name("M2"),
self._append_group_name("StartingPotentialEnergy"),
self._append_group_name("oldVavg"),
self._append_group_name("wVavg")))
def _add_conventional_md_pre_calc_step(self):
self.addComputeGlobal("vscale", "exp(-dt*collision_rate)")
self.addComputeGlobal("fscale", "(1-vscale)/collision_rate")
self.addComputeGlobal("noisescale", "sqrt(thermal_energy*(1-vscale*vscale))")
def _add_conventional_md_update_step(self):
self.addComputePerDof("newx", "x")
self.addComputePerDof("v", "vscale*v + fscale*f/m + noisescale*gaussian/sqrt(m)")
self.addComputePerDof("x", "x+dt*v")
self.addConstrainPositions()
self.addComputePerDof("v", "(x-newx)/dt")
def _add_gamd_pre_calc_step(self):
self.addComputeGlobal("vscale", "exp(-dt*collision_rate)")
self.addComputeGlobal("fscale", "(1-vscale)/collision_rate")
self.addComputeGlobal("noisescale", "sqrt(thermal_energy*(1-vscale*vscale))")
#
# We do not apply the boost potential to the energy value since energy is read only.
#
self.addComputeGlobal(self._append_group_name("BoostPotential"), "0.5 * {0} * ({1} - {2})^2 / ({3} - {4})".
format(self._append_group_name("k0"), self._append_group_name("threshold_energy"),
self._append_group_name("StartingPotentialEnergy"),
self._append_group_name("Vmax"), self._append_group_name("Vmin")))
#
# "BoostPotential*step(threshold_energy-boosted_energy)")
self.addComputeGlobal(self._append_group_name("BoostPotential"), "{0}*step({1} - ({2} + {3}))".format(
self._append_group_name("BoostPotential"), self._append_group_name("threshold_energy"),
self._append_group_name("BoostPotential"), self._append_group_name("StartingPotentialEnergy")))
#
# If the boostPotential is zero, we want to set the Force Scaling Factor to one, which is what we will use
# the check_boost value to do in a later portion of the code.
#
self.addComputeGlobal(self._append_group_name("check_boost"), "1 - delta({0})".format(self._append_group_name("BoostPotential")))
# "boosted_energy" = "energy + BoostPotential"
self.addComputeGlobal(self._append_group_name("boosted_energy"), "{0} + {1}".format(
self._append_group_name("StartingPotentialEnergy"),
self._append_group_name("BoostPotential")))
def _add_gamd_boost_calculations_step(self):
self.addComputeGlobal(self._append_group_name("ForceScalingFactor"), "1.0 - (({0} * ({1} - {2}))/({3} - {4}))"
.format(self._append_group_name("k0"), self._append_group_name("threshold_energy"),
self._append_group_name("StartingPotentialEnergy"), self._append_group_name("Vmax"),
self._append_group_name("Vmin")))
# This is the psuedo code of what we are about to do, in case it helps you read it.
#
# self.beginIfBlock("boosted_energy >= threshold_energy")
#
#
# When the boosted energy is greater than or equal to the threshold energy, the value of check_boost will be 0.
# This will cause the following equation to change the ForceScalingFactor to 1.0. When the boosted_energy
# is less than the threshold energy, we are in our normal good condition, and just want to keep the
# ForceScalingFactor the same.
#
# NOTE: We do these odd computational gymnastics to counteract the problem within OpenMM with
# if statements causing the JIT compiler to take an exponentially larger amount of time to start.
#
# 1.0 - 1.0 * check_boost + check_boost * ForceScalingFactor"
self.addComputeGlobal(self._append_group_name("ForceScalingFactor"), "1.0 - {0} + {0} * {1}"
.format(self._append_group_name("check_boost"),
self._append_group_name("ForceScalingFactor")))
#
#
#
def _add_gamd_update_step(self):
self.addComputePerDof("newx", "x")
#
if self.get_boost_type() == BoostType.TOTAL:
# We take care of stochastic kick and drag here.
self.addComputePerDof("v", "vscale*v + noisescale*gaussian/sqrt(m)")
# We take care of all of the forces and the scaling here.
self.addComputePerDof("v", "v + fscale*{0}*{1}/m"
.format(self._append_group("f"), self._append_group_name("ForceScalingFactor")))
elif self.get_boost_type() == BoostType.DIHEDRAL:
# We take care of stochastic kick and drag here.
self.addComputePerDof("v", "vscale*v + noisescale*gaussian/sqrt(m)")
# We take care of all of the forces that aren't the dihedral.
self.addComputePerDof("v", "v + fscale*f0/m")
# We boost the dihedral force.
self.addComputePerDof("v", "v + fscale*{0}*{1}/m"
.format(self._append_group("f"), self._append_group_name("ForceScalingFactor")))
else:
print("Failure in detecting boost type to determine proper boost methodology.")
self.addComputePerDof("x", "x+dt*v")
self.addConstrainPositions()
self.addComputePerDof("v", "(x-newx)/dt")
def get_force_scaling_factors(self):
force_scaling_factors = {
self._append_group_name_by_type("ForceScalingFactor", BoostType.TOTAL): self.getGlobalVariableByName(
self._append_group_name_by_type("ForceScalingFactor", BoostType.TOTAL))}
if self.get_boost_type() == BoostType.TOTAL or self.get_boost_type() == BoostType.DIHEDRAL:
force_scaling_factors[self._append_group_name_by_type("ForceScalingFactor", BoostType.DIHEDRAL)] = \
self.getGlobalVariableByName(self._append_group_name_by_type("ForceScalingFactor", BoostType.DIHEDRAL))
else:
force_scaling_factors[self._append_group_name("ForceScalingFactor")] = self.getGlobalVariableByName(
self._append_group_name("ForceScalingFactor"))
return force_scaling_factors
def get_boost_potentials(self):
boost_potentials = {
self._append_group_name_by_type("BoostPotential", BoostType.TOTAL): self.getGlobalVariableByName(
self._append_group_name_by_type("BoostPotential", BoostType.TOTAL))}
if self.get_boost_type() == BoostType.TOTAL or self.get_boost_type() == BoostType.DIHEDRAL:
boost_potentials[self._append_group_name_by_type("BoostPotential", BoostType.DIHEDRAL)] = \
self.getGlobalVariableByName(self._append_group_name_by_type("BoostPotential", BoostType.DIHEDRAL))
else:
boost_potentials[self._append_group_name("BoostPotential")] = self.getGlobalVariableByName(
self._append_group_name("BoostPotential"))
return boost_potentials
def __calculate_simple_threshold_energy_and_effective_harmonic_constant(self):
self.addComputeGlobal(self._append_group_name("threshold_energy"), self._append_group_name("Vmax"))
# "(sigma0/sigmaV) * (Vmax - Vmin)/(Vmax - Vavg)"
self.addComputeGlobal(self._append_group_name("k0prime"),
"({0}/{1}) * ({2} - {3}) / ({2} - {4})".format(self._append_group_name("sigma0"),
self._append_group_name("sigmaV"),
self._append_group_name("Vmax"),
self._append_group_name("Vmin"),
self._append_group_name("Vavg")))
self.addComputeGlobal(self._append_group_name("k0"),
"min(1.0, {0}) ".format(self._append_group_name("k0prime")))
def _upper_bound_calculate_threshold_energy_and_effective_harmonic_constant(self):
self.addComputeGlobal(self._append_group_name("k0"), "1.0")
# "1 - (sigma0/sigmaV) * (Vmax - Vmin)/(Vavg - Vmin)"
self.addComputeGlobal(self._append_group_name("k0doubleprime"),
"(1 - {0}/{1}) * ({2} - {3})/({4} - {3})".format(self._append_group_name("sigma0"),
self._append_group_name("sigmaV"),
self._append_group_name("Vmax"),
self._append_group_name("Vmin"),
self._append_group_name("Vavg")))
#
#
#
#
self.addComputeGlobal(self._append_group_name("k0"), self._append_group_name("k0doubleprime"))
# "Vmin + (Vmax - Vmin)/k0"
self.addComputeGlobal(self._append_group_name("threshold_energy"),
"{0} + ({1} - {0})/{2}".format(self._append_group_name("Vmin"),
self._append_group_name("Vmax"),
self._append_group_name("k0")))
# self.beginIfBlock("{0} <= 0.0".format(self._append_group_name("k0doubleprime")))
# self.beginIfBlock("{0} > 1.0".format(self._append_group_name("k0doubleprime")))
# "k0doubleprime_window = (-k0doubleprime) * (1 - k0doubleprime)"
self.addComputeGlobal(self._append_group_name("k0doubleprime_window"),
"(-{0}) * (1 - {0})".format(self._append_group_name("k0doubleprime")))
self.beginIfBlock(self._append_group_name("k0doubleprime_window") + " >= 0.0")
self.__calculate_simple_threshold_energy_and_effective_harmonic_constant()
self.endBlock()
def _lower_bound_calculate_threshold_energy_and_effective_harmonic_constant(self):
self.__calculate_simple_threshold_energy_and_effective_harmonic_constant()
| 53.346154 | 199 | 0.644708 | 2,942 | 26,353 | 5.467029 | 0.145479 | 0.057075 | 0.092328 | 0.114586 | 0.666501 | 0.62814 | 0.573116 | 0.511067 | 0.461452 | 0.391134 | 0 | 0.013026 | 0.260046 | 26,353 | 493 | 200 | 53.454361 | 0.811795 | 0.239479 | 0 | 0.296943 | 0 | 0.017467 | 0.146123 | 0.041863 | 0 | 0 | 0 | 0 | 0 | 1 | 0.126638 | false | 0 | 0.026201 | 0 | 0.179039 | 0.004367 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d30a980368b44952326b7f77662802b2e9c11e3d | 4,031 | py | Python | combiner/combiner/jax/model/seq_summary.py | gunpowder78/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-13T21:48:52.000Z | 2022-03-13T21:48:52.000Z | combiner/combiner/jax/model/seq_summary.py | gunpowder78/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | null | null | null | combiner/combiner/jax/model/seq_summary.py | gunpowder78/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-30T07:20:29.000Z | 2022-03-30T07:20:29.000Z | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
import jax
import jax.numpy as jnp
import flax.linen as nn
import numpy as np
from jax import lax
from functools import partial
from combiner.jax.model.transformer_base import TransformerConfig, EncoderDecoder1DBlock, MultiDimEncoderDecoder1DBlock
def do_pooling(pool_spec, input_embed, keepdims):
if pool_spec == 'last':
summary = input_embed[Ellipsis, -1, :]
if keepdims:
summary = jnp.expand_dims(summary, axis=-2)
return summary
else:
pool_func = getattr(jnp, pool_spec, None)
if pool_func is None:
raise ValueError('unknown pooling method %s' % pool_spec)
return pool_func(input_embed, axis=-2, keepdims=keepdims)
class JustPooling(nn.Module):
config: TransformerConfig
@nn.compact
def __call__(self, input_embed, keepdims=True):
pool_spec = self.config.seq_summary.split('-')[1]
return do_pooling(pool_spec, input_embed, keepdims)
class SelfAttPooling(nn.Module):
config: TransformerConfig
num_repeat: int
def setup(self):
if self.num_repeat == -1:
self.self_att = EncoderDecoder1DBlock(config=self.config, is_self_att=True)
else:
self.self_att = MultiDimEncoderDecoder1DBlock(config=self.config, num_repeat=self.num_repeat, is_self_att=True)
def __call__(self, input_embed, keepdims=True):
"""
Args:
input_embed: embedding of size
`[batch_sizes..., length, input_embed_dim]`.
Returns:
summary: tensor of shape `[batch_sizes..., 1, input_embed_dim]`.
"""
all_att = self.self_att(input_embed)
pool_spec = self.config.seq_summary.split('-')[1]
return do_pooling(pool_spec, all_att, keepdims)
class CrossAttSummary(nn.Module):
config: TransformerConfig
num_repeat: int
def setup(self):
if self.num_repeat == -1:
self.cross_att = EncoderDecoder1DBlock(config=self.config, is_self_att=False)
else:
self.cross_att = MultiDimEncoderDecoder1DBlock(config=self.config, num_repeat=self.num_repeat, is_self_att=False)
@nn.compact
def __call__(self, input_embed, keepdims=True):
"""
Args:
input_embed: embedding of size
`[batch_sizes..., length, input_embed_dim]`.
Returns:
summary: tensor of shape `[batch_sizes..., 1, input_embed_dim]`.
"""
if self.config.seq_summary == 'cross-cls': # use cls embedding for query
cls_embedding = self.param('cls_embed', self.config.kernel_init, (1, input_embed.shape[-1]))
tile_times = []
for i in range(len(input_embed.shape) - 2):
cls_embedding = jnp.expand_dims(cls_embedding, axis=0)
tile_times.append(input_embed.shape[i])
tile_times += [1, 1]
query = jnp.tile(cls_embedding, tile_times)
else:
assert self.config.seq_summary == 'cross-last' # use last embedding for query
query = jnp.expand_dims(input_embed[Ellipsis, -1, :], axis=-2)
summary = self.cross_att(inputs=query, inputs_kv=input_embed)
if not keepdims:
summary = jnp.squeeze(summary, axis=-2)
return summary
def get_seq_summary_module(config, num_repeat=-1):
if config.seq_summary.startswith('pool-'):
return partial(SelfAttPooling, config, num_repeat)
elif config.seq_summary.startswith('just-'):
return partial(JustPooling, config, num_repeat)
elif config.seq_summary.startswith('cross-'):
return partial(CrossAttSummary, config, num_repeat)
else:
raise NotImplementedError
| 34.161017 | 119 | 0.717688 | 553 | 4,031 | 5.039783 | 0.296564 | 0.068174 | 0.040187 | 0.028705 | 0.393972 | 0.358091 | 0.358091 | 0.339074 | 0.271618 | 0.253319 | 0 | 0.010253 | 0.177375 | 4,031 | 117 | 120 | 34.452991 | 0.830217 | 0.245597 | 0 | 0.328571 | 0 | 0 | 0.025321 | 0 | 0 | 0 | 0 | 0 | 0.014286 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d30cbc4e8c45e3609e5a516620fd13593cdf577b | 833 | py | Python | bot/views.py | toast38coza/API.AI-FullfillmentBackend | 9b5ac39fbe5b9f5ffe7126890a4aca3e9307c106 | [
"MIT"
] | 1 | 2016-12-12T08:05:05.000Z | 2016-12-12T08:05:05.000Z | bot/views.py | toast38coza/API.AI-FullfillmentBackend | 9b5ac39fbe5b9f5ffe7126890a4aca3e9307c106 | [
"MIT"
] | null | null | null | bot/views.py | toast38coza/API.AI-FullfillmentBackend | 9b5ac39fbe5b9f5ffe7126890a4aca3e9307c106 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from . import actions
import json
router = {
'create.appointment': actions.debug
}
def get_payload(request):
# work out how to do this:
request_json = request.body.decode('utf-8')
return json.loads(request_json)
def execute_action(request):
payload = get_payload(request)
action = payload.get('result').get('action')
params = payload.get('result').get('parameters', {})
token = payload.get('sessionId', None)
return router.get(action)(payload, params=params, token=token)
@csrf_exempt
def index(request):
print("request >> {}" .format(request.body))
result = execute_action(request)
print("response << {}" .format(result))
return JsonResponse(result)
| 28.724138 | 66 | 0.713085 | 105 | 833 | 5.580952 | 0.428571 | 0.068259 | 0.05802 | 0.064846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001422 | 0.156062 | 833 | 28 | 67 | 29.75 | 0.832148 | 0.028812 | 0 | 0 | 0 | 0 | 0.107807 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.217391 | 0 | 0.478261 | 0.086957 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d31206d9d66ed5d70b28e29ae463a9a13242487b | 4,721 | py | Python | shop/models/cartmodel.py | christianbertschy/django-shop | 432a15b17b8d09d8a3fece23709dd91d113f37e3 | [
"BSD-3-Clause"
] | 1 | 2015-09-24T00:36:32.000Z | 2015-09-24T00:36:32.000Z | shop/models/cartmodel.py | christianbertschy/django-shop | 432a15b17b8d09d8a3fece23709dd91d113f37e3 | [
"BSD-3-Clause"
] | null | null | null | shop/models/cartmodel.py | christianbertschy/django-shop | 432a15b17b8d09d8a3fece23709dd91d113f37e3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from decimal import Decimal
from django.contrib.auth.models import User
from django.db import models
from shop.cart.modifiers_pool import cart_modifiers_pool
from shop.models.productmodel import Product
class Cart(models.Model):
'''
This should be a rather simple list of items. Ideally it should be bound to
a session and not to a User is we want to let people buy from our shop
without having to register with us.
'''
# If the user is null, that means this is used for a session
user = models.OneToOneField(User, null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True)
last_updated = models.DateTimeField(auto_now=True)
class Meta:
app_label = 'shop'
def __init__(self, *args, **kwargs):
super(Cart, self).__init__(*args,**kwargs)
# That will hold things like tax totals or total discount
self.subtotal_price = Decimal('0.0')
self.total_price = Decimal('0.0')
self.extra_price_fields = [] # List of tuples (label, value)
def add_product(self,product, quantity=1):
'''
Adds a product to the cart
'''
# Let's see if we already have an Item with the same product ID
if len(CartItem.objects.filter(cart=self).filter(product=product)) > 0:
cart_item = CartItem.objects.filter(cart=self).filter(product=product)[0]
cart_item.quantity = cart_item.quantity + int(quantity)
cart_item.save()
else:
cart_item = CartItem.objects.create(cart=self,quantity=quantity,product=product)
cart_item.save()
self.save() # to get the last updated timestamp
def update(self):
'''
This should be called whenever anything is changed in the cart (added or removed)
It will loop on all line items in the cart, and call all the price modifiers
on each row.
After doing this, it will compute and update the order's total and
subtotal fields, along with any payment field added along the way by
modifiers.
Note that theses added fields are not stored - we actually want to reflect
rebate and tax changes on the *cart* items, but we don't want that for
the order items (since they are legally binding after the "purchase" button
was pressed)
'''
items = CartItem.objects.filter(cart=self)
self.subtotal_price = Decimal('0.0') # Reset the subtotal
for item in items: # For each OrderItem (order line)...
self.subtotal_price = self.subtotal_price + item.update()
item.save()
# Now we have to iterate over the registered modifiers again (unfortunately)
# to pass them the whole Order this time
for modifier in cart_modifiers_pool.get_modifiers_list():
modifier.process_cart(self)
self.total_price = self.subtotal_price
# Like for line items, most of the modifiers will simply add a field
# to extra_price_fields, let's update the total with them
for label, value in self.extra_price_fields:
self.total_price = self.total_price + value
class CartItem(models.Model):
'''
This is a holder for the quantity of items in the cart and, obviously, a
pointer to the actual Product being purchased :)
'''
cart = models.ForeignKey(Cart, related_name="items")
quantity = models.IntegerField()
product = models.ForeignKey(Product)
class Meta:
app_label = 'shop'
def __init__(self, *args, **kwargs):
# That will hold extra fields to display to the user
# (ex. taxes, discount)
super(CartItem, self).__init__(*args,**kwargs)
self.extra_price_fields = [] # list of tuples (label, value)
# These must not be stored, since their components can be changed between
# sessions / logins etc...
self.line_subtotal = Decimal('0.0')
self.line_total = Decimal('0.0')
def update(self):
self.line_subtotal = self.product.get_specific().get_price() * self.quantity
self.line_total = self.line_subtotal
for modifier in cart_modifiers_pool.get_modifiers_list():
# We now loop over every registered price modifier,
# most of them will simply add a field to extra_payment_fields
modifier.process_cart_item(self)
for label, value in self.extra_price_fields:
self.line_total = self.line_total + value
return self.line_total
| 41.052174 | 92 | 0.640119 | 635 | 4,721 | 4.63937 | 0.308661 | 0.021724 | 0.028853 | 0.027155 | 0.241344 | 0.189409 | 0.171758 | 0.154107 | 0.154107 | 0.067889 | 0 | 0.004133 | 0.282567 | 4,721 | 115 | 93 | 41.052174 | 0.865663 | 0.371955 | 0 | 0.327273 | 0 | 0 | 0.010076 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.381818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d31427e347045367d72029ec89d0a2520511fe05 | 2,456 | py | Python | tests/test_transform.py | rdmolony/merge-sec-mentor-excels | 430b6275e9fd142089b3a6b1cf1d7b25c2c5bb71 | [
"MIT"
] | null | null | null | tests/test_transform.py | rdmolony/merge-sec-mentor-excels | 430b6275e9fd142089b3a6b1cf1d7b25c2c5bb71 | [
"MIT"
] | null | null | null | tests/test_transform.py | rdmolony/merge-sec-mentor-excels | 430b6275e9fd142089b3a6b1cf1d7b25c2c5bb71 | [
"MIT"
] | 1 | 2020-07-31T11:51:54.000Z | 2020-07-31T11:51:54.000Z | from pathlib import Path
from typing import Dict, List
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from tdda.referencetest.checkpandas import default_csv_loader
from secs.tasks.extract import regroup_excels_by_sheet
from secs.tasks.transform import (
transform_sheet,
_select_numeric_columns,
_clean_numeric_columns,
)
INPUT_DIR = Path(__file__).parent / "input_data"
REFERENCE_DIR = Path(__file__).parent / "reference_data"
MENTOR_DIR = INPUT_DIR / "mentors"
@pytest.fixture
def mentor_excels_by_sheet() -> Dict[str, pd.DataFrame]:
mentor_filepath = MENTOR_DIR / "DCC" / "SEC - CM - DCC.xlsx"
mentor_excel = pd.read_excel(mentor_filepath, sheet_name=None)
mentor_excels = [mentor_excel, mentor_excel]
return regroup_excels_by_sheet.run(mentor_excels)
def test_select_numeric_columns() -> List[str]:
input = pd.DataFrame(
{
"mostly_numbers": [",4", "6!", 1],
"not_number_column": ["SEC blah", "SEC2", "Hi"],
"string_with_numbers": ["Level 1", "Level 2", "Level 3"],
"addresses": ["18 Castleview Heath", "Unit 5 District", "Howth, D13HW18"],
"mostly_empty_with_numbers": [np.nan, np.nan, 1],
12: [1, 2, 3],
}
)
expected_output = ["mostly_numbers", "mostly_empty_with_numbers", 12]
output = _select_numeric_columns(input)
assert output == expected_output
def test_clean_numeric_columns() -> List[str]:
input = pd.DataFrame(
{
"dirty_col": [",4", "6!", " ", 1, "", "None", 2],
"clean_col": [1, 2, 3, 4, 5, 6, 7],
}
)
expected_output = pd.DataFrame(
{"dirty_col": [4, 6, 0, 1, 0, 0, 2], "clean_col": [1, 2, 3, 4, 5, 6, 7]},
).convert_dtypes()
output = _clean_numeric_columns(input)
assert_frame_equal(output, expected_output)
@pytest.mark.parametrize(
"sheet_name,header_row,filename",
[
("SEC activity by month", 7, "SecActivityByMonth.csv"),
("Other activity by month", 7, "OtherActivityByMonth.csv"),
("Summary", 4, "Summary.csv"),
("SEC contacts", 4, "SecContacts.csv"),
],
)
def test_transform_sheet(
mentor_excels_by_sheet, sheet_name, header_row, filename
) -> None:
output = transform_sheet.run(
mentor_excels_by_sheet[sheet_name], header_row=header_row
)
# ref.assertDataFrameCorrect(output, filename)
| 29.590361 | 86 | 0.653502 | 314 | 2,456 | 4.808917 | 0.350318 | 0.055629 | 0.043046 | 0.037748 | 0.162252 | 0.139735 | 0.119205 | 0.070199 | 0.021192 | 0.021192 | 0 | 0.026398 | 0.213355 | 2,456 | 82 | 87 | 29.95122 | 0.755176 | 0.017915 | 0 | 0.031746 | 0 | 0 | 0.196266 | 0.052282 | 0 | 0 | 0 | 0 | 0.047619 | 1 | 0.063492 | false | 0 | 0.142857 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d315e1c08f3a0c36dcb3ea7a752d2f2aeb60a151 | 15,086 | py | Python | src/mist/api/schedules/base.py | SpiralUp/mist.api | a3b5233ab4aa3f6a0a2dea6333ff1e5a260af934 | [
"Apache-2.0"
] | null | null | null | src/mist/api/schedules/base.py | SpiralUp/mist.api | a3b5233ab4aa3f6a0a2dea6333ff1e5a260af934 | [
"Apache-2.0"
] | null | null | null | src/mist/api/schedules/base.py | SpiralUp/mist.api | a3b5233ab4aa3f6a0a2dea6333ff1e5a260af934 | [
"Apache-2.0"
] | null | null | null | """Definition of base classes for Schedules
This currently contains only BaseController. It includes basic functionality
for a given schedule.
Cloud specific controllers are in `mist.api.schedules.controllers`.
"""
import logging
import datetime
import mongoengine as me
from mist.api.scripts.models import Script
from mist.api.exceptions import MistError
from mist.api.exceptions import InternalServerError
from mist.api.exceptions import BadRequestError
from mist.api.exceptions import ScriptNotFoundError
from mist.api.exceptions import ScheduleOperationError
from mist.api.exceptions import ScheduleNameExistsError
from mist.api.machines.models import Machine
from mist.api.exceptions import NotFoundError
from mist.api.selectors.models import FieldSelector, GenericResourceSelector
from mist.api.selectors.models import TaggingSelector, MachinesAgeSelector
import mist.api.schedules.models as schedules
from mist.api.auth.methods import AuthContext
log = logging.getLogger(__name__)
class BaseController(object):
def __init__(self, schedule, auth_context=None):
"""Initialize schedule controller given a schedule
Most times one is expected to access a controller from inside the
schedule. Like this:
schedule = mist.api.schedules.models.Schedule.objects.get(id=s_id)
schedule.ctl.add()
"""
self.schedule = schedule
self._auth_context = auth_context
def set_auth_context(self, auth_context):
assert isinstance(auth_context, AuthContext)
self._auth_context = auth_context
@property
def auth_context(self):
if self._auth_context is None:
raise Exception("Forgot to set auth_context")
elif self._auth_context is False:
return None
return self._auth_context
def add(self, **kwargs):
"""Add an entry to the database
This is only to be called by `Schedule.add` classmethod to create
a schedule. Fields `owner` and `name` are already populated in
`self.schedule`. The `self.schedule` is not yet saved.
"""
# check if required variables exist.
if not (kwargs.get('script_id', '') or kwargs.get('action', '')):
raise BadRequestError("You must provide script_id "
"or machine's action")
if not kwargs.get('selectors'):
raise BadRequestError("You must provide a list of selectors, "
"at least machine ids or tags")
if kwargs.get('schedule_type') not in ['crontab', 'reminder',
'interval', 'one_off']:
raise BadRequestError('schedule type must be one of these '
'(crontab, interval, one_off)]')
if kwargs.get('schedule_type') in ['one_off', 'reminder'] and \
not kwargs.get('schedule_entry', ''):
raise BadRequestError('one_off schedule '
'requires date given in schedule_entry')
try:
self.update(**kwargs)
except (me.ValidationError, me.NotUniqueError) as exc:
# Propagate original error.
log.error("Error adding %s: %s", self.schedule.name,
exc.to_dict())
raise
log.info("Added schedule with name '%s'", self.schedule.name)
self.schedule.owner.mapper.update(self.schedule)
def update(self, **kwargs):
"""Edit an existing Schedule"""
if self.auth_context is not None:
auth_context = self.auth_context
else:
raise MistError("You are not authorized to update schedule")
owner = auth_context.owner
if kwargs.get('action'):
if kwargs.get('action') not in ['reboot', 'destroy', 'notify',
'start', 'stop']:
raise BadRequestError("Action is not correct")
script_id = kwargs.pop('script_id', '')
if script_id:
try:
Script.objects.get(owner=owner, id=script_id, deleted=None)
except me.DoesNotExist:
raise ScriptNotFoundError('Script with id %s does not '
'exist' % script_id)
# SEC require permission RUN on script
auth_context.check_perm('script', 'run', script_id)
# for ui compatibility
if kwargs.get('expires') == '':
kwargs['expires'] = None
if kwargs.get('max_run_count') == '':
kwargs['max_run_count'] = None
if kwargs.get('start_after') == '':
kwargs['start_after'] = None
# transform string to datetime
if kwargs.get('expires'):
try:
if isinstance(kwargs['expires'], int):
if kwargs['expires'] > 5000000000: # Timestamp in millis
kwargs['expires'] = kwargs['expires'] / 1000
kwargs['expires'] = datetime.datetime.fromtimestamp(
kwargs['expires'])
else:
kwargs['expires'] = datetime.datetime.strptime(
kwargs['expires'], '%Y-%m-%d %H:%M:%S')
except (ValueError, TypeError):
raise BadRequestError('Expiration date value was not valid')
if kwargs.get('start_after'):
try:
if isinstance(kwargs['start_after'], int):
if kwargs['start_after'] > 5000000000: # Timestamp in ms
kwargs['start_after'] = kwargs['start_after'] / 1000
kwargs['start_after'] = datetime.datetime.fromtimestamp(
kwargs['start_after']
)
else:
kwargs['start_after'] = datetime.datetime.strptime(
kwargs['start_after'], '%Y-%m-%d %H:%M:%S')
except (ValueError, TypeError):
raise BadRequestError('Start-after date value was not valid')
now = datetime.datetime.now()
if self.schedule.expires and self.schedule.expires < now:
raise BadRequestError('Date of future task is in the past. '
'Please contact Marty McFly')
if self.schedule.start_after and self.schedule.start_after < now:
raise BadRequestError('Date of future task is in the past. '
'Please contact Marty McFly')
# Schedule selectors pre-parsing.
try:
self._update__preparse_machines(auth_context, kwargs)
except MistError as exc:
log.error("Error while updating schedule %s: %r",
self.schedule.id, exc)
raise
except Exception as exc:
log.exception("Error while preparsing kwargs on update %s",
self.schedule.id)
raise InternalServerError(exc=exc)
action = kwargs.pop('action', '')
if action:
self.schedule.task_type = schedules.ActionTask(action=action)
elif script_id:
self.schedule.task_type = schedules.ScriptTask(
script_id=script_id, params=kwargs.pop('params', ''))
schedule_type = kwargs.pop('schedule_type', '')
if (schedule_type == 'crontab' or
isinstance(self.schedule.schedule_type, schedules.Crontab)):
schedule_entry = kwargs.pop('schedule_entry', {})
if schedule_entry:
for k in schedule_entry:
if k not in ['minute', 'hour', 'day_of_week',
'day_of_month', 'month_of_year']:
raise BadRequestError("Invalid key given: %s" % k)
self.schedule.schedule_type = schedules.Crontab(
**schedule_entry)
elif (schedule_type == 'interval' or
type(self.schedule.schedule_type) == schedules.Interval):
schedule_entry = kwargs.pop('schedule_entry', {})
if schedule_entry:
for k in schedule_entry:
if k not in ['period', 'every']:
raise BadRequestError("Invalid key given: %s" % k)
self.schedule.schedule_type = schedules.Interval(
**schedule_entry)
elif (schedule_type in ['one_off', 'reminder'] or
type(self.schedule.schedule_type) == schedules.OneOff):
# implements Interval under the hood
future_date = kwargs.pop('schedule_entry', '')
if future_date:
try:
if isinstance(future_date, int):
if future_date > 5000000000: # Timestamp is in millis
future_date = future_date / 1000
future_date = datetime.datetime.fromtimestamp(
future_date)
else:
future_date = datetime.datetime.strptime(
future_date, '%Y-%m-%d %H:%M:%S')
except (ValueError, TypeError):
raise BadRequestError('Date value was not valid')
if future_date < now:
raise BadRequestError(
'Date of future task is in the past. '
'Please contact Marty McFly')
delta = future_date - now
notify_msg = kwargs.get('notify_msg', '')
if schedule_type == 'reminder':
self.schedule.schedule_type = schedules.Reminder(
period='seconds',
every=delta.seconds,
entry=future_date,
message=notify_msg)
else:
self.schedule.schedule_type = schedules.OneOff(
period='seconds',
every=delta.seconds,
entry=future_date)
self.schedule.max_run_count = self.schedule.max_run_count or 1
notify = kwargs.pop('notify', 0)
if notify:
_delta = datetime.timedelta(0, notify)
notify_at = future_date - _delta
notify_at = notify_at.strftime('%Y-%m-%d %H:%M:%S')
params = {
'action': 'notify',
'schedule_type': 'reminder',
'description': 'Machine expiration reminder',
'task_enabled': True,
'schedule_entry': notify_at,
'selectors': kwargs.get('selectors'),
'notify_msg': notify_msg
}
name = self.schedule.name + '-reminder'
if self.schedule.reminder:
self.schedule.reminder.delete()
from mist.api.schedules.models import Schedule
self.schedule.reminder = Schedule.add(
auth_context, name, **params)
# set schedule attributes
try:
kwargs.pop('selectors')
except KeyError:
pass
for key, value in kwargs.items():
if key in self.schedule._fields:
setattr(self.schedule, key, value)
try:
self.schedule.save()
except me.ValidationError as e:
log.error("Error updating %s: %s", self.schedule.name,
e.to_dict())
raise BadRequestError({"msg": str(e), "errors": e.to_dict()})
except me.NotUniqueError as exc:
log.error("Schedule %s not unique error: %s", self.schedule, exc)
raise ScheduleNameExistsError()
except me.OperationError:
raise ScheduleOperationError()
def _update__preparse_machines(self, auth_context, kwargs):
"""Preparse machines arguments to `self.update`
This is called by `self.update` when adding a new schedule,
in order to apply pre processing to the given params. Any subclass
that requires any special pre processing of the params passed to
`self.update`, SHOULD override this method.
Params:
kwargs: A dict of the keyword arguments that will be set as attributes
to the `Schedule` model instance stored in `self.schedule`.
This method is expected to modify `kwargs` in place and set the
specific field of each scheduler.
Subclasses MAY override this method.
"""
sel_cls = {'tags': TaggingSelector,
'machines': GenericResourceSelector,
'field': FieldSelector,
'age': MachinesAgeSelector}
if kwargs.get('selectors'):
self.schedule.selectors = []
for selector in kwargs.get('selectors', []):
if selector.get('type') not in sel_cls:
raise BadRequestError()
if selector['type'] == 'field':
if selector['field'] not in ('created', 'state',
'cost__monthly'):
raise BadRequestError()
sel = sel_cls[selector.get('type')]()
sel.update(**selector)
self.schedule.selectors.append(sel)
action = kwargs.get('action')
# check permissions
check = False
for selector in self.schedule.selectors:
if selector.ctype == 'machines':
for mid in selector.ids:
try:
machine = Machine.objects.get(id=mid,
state__ne='terminated')
except Machine.DoesNotExist:
raise NotFoundError('Machine state is terminated')
# SEC require permission READ on cloud
auth_context.check_perm("cloud", "read", machine.cloud.id)
if action and action not in ['notify']:
# SEC require permission ACTION on machine
auth_context.check_perm("machine", action, mid)
else:
# SEC require permission RUN_SCRIPT on machine
auth_context.check_perm("machine", "run_script", mid)
check = True
elif selector.ctype == 'tags':
if action and action not in ['notify']:
# SEC require permission ACTION on machine
auth_context.check_perm("machine", action, None)
else:
# SEC require permission RUN_SCRIPT on machine
auth_context.check_perm("machine", "run_script", None)
check = True
if not check:
raise BadRequestError("Specify at least machine ids or tags")
return
| 41.905556 | 78 | 0.548389 | 1,532 | 15,086 | 5.287859 | 0.189295 | 0.056289 | 0.017652 | 0.018146 | 0.289841 | 0.189112 | 0.156894 | 0.151339 | 0.12517 | 0.12517 | 0 | 0.004679 | 0.362455 | 15,086 | 359 | 79 | 42.022284 | 0.837596 | 0.117394 | 0 | 0.185328 | 0 | 0 | 0.144961 | 0 | 0 | 0 | 0 | 0 | 0.003861 | 1 | 0.023166 | false | 0.003861 | 0.065637 | 0 | 0.104247 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d31712d1a2bfe35b4c9ceb39be24ba200a10a773 | 16,423 | py | Python | vexmpp/stanzas.py | nicfit/vexmpp | e67070d2822da8356345976fb15d365935b550a6 | [
"MIT"
] | null | null | null | vexmpp/stanzas.py | nicfit/vexmpp | e67070d2822da8356345976fb15d365935b550a6 | [
"MIT"
] | 349 | 2017-02-18T22:48:17.000Z | 2021-12-13T19:50:23.000Z | vexmpp/stanzas.py | nicfit/vexmpp | e67070d2822da8356345976fb15d365935b550a6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import uuid
import functools
from copy import deepcopy
from lxml import etree
from .namespaces import (XML_NS_URI, STREAM_NS_URI,
CLIENT_NS_URI, SERVER_NS_URI,
STANZA_ERROR_NS_URI, STREAM_ERROR_NS_URI)
from .jid import Jid
XML_LANG = "{%s}lang" % XML_NS_URI
STANZA_ERROR_TAG = "{%s}error" % STANZA_ERROR_NS_URI
STREAM_ERROR_TAG = "{%s}error" % STREAM_ERROR_NS_URI
class ElementWrapper:
_NEXT_ID = 1
_UUID = str(uuid.uuid4()).split("-")[0]
def __init__(self, xml):
if isinstance(xml, ElementWrapper):
self.xml = xml.xml
else:
self.xml = xml
def _getChildText(self, child):
e = self.xml.xpath("child::%s" % child)
return e[0].text if e else None
def _setChildText(self, child, s):
matches = self.xml.xpath("child::%s" % child)
e = matches[0] if matches else None
if s is None:
if e is not None:
self.xml.remove(e)
return
if e is None:
e = etree.Element(child)
self.xml.append(e)
e.text = s
def toXml(self, pprint=False, encoding="utf-8"):
return etree.tostring(self.xml, pretty_print=pprint, encoding=encoding)
@property
def name(self):
return self.xml.tag
@property
def type(self):
return self.get("type")
@type.setter
def type(self, t):
self.set("type", t)
@property
def id(self):
return self.get("id")
@id.setter
def id(self, i):
self.set("id", i)
def setId(self, prefix=None):
id_str = ""
if prefix and ':' in prefix:
raise ValueError("Prefix cannot contain ':'")
elif prefix:
id_str += "%s:" % prefix
id_str += "%s-" % ElementWrapper._UUID
id_str += str(ElementWrapper._NEXT_ID)
ElementWrapper._NEXT_ID += 1
self.id = id_str
def x(self, ns):
x = self.getChild("x", ns)
return ElementWrapper(x) if (x is not None) else None
def getChild(self, name, ns):
child = self.xml.find("{%s}%s" % (ns, name))
return child
# ----- etree Element interface begin -----
def find(self, *args, **kwargs):
e = self.xml.find(*args, **kwargs)
return ElementWrapper(e) if e is not None else None
def xpath(self, *args, **kwargs):
matches = self.xml.xpath(*args, **kwargs)
retval = []
for m in matches:
if isinstance(m, str):
retval.append(m)
else:
retval.append(ElementWrapper(m))
return retval
def get(self, key, default=None, as_jid=False):
value = self.xml.get(key, default=default)
if value:
return value if not as_jid else Jid(value)
else:
return None
def set(self, attr, s):
if not s:
if attr in self.xml.attrib:
del self.xml.attrib[attr]
else:
if isinstance(s, Jid):
s = s.full
self.xml.attrib[attr] = s
def append(self, child_elem):
if isinstance(child_elem, ElementWrapper):
child_elem = child_elem.xml
return self.xml.append(child_elem)
def remove(self, child_elem):
if isinstance(child_elem, ElementWrapper):
child_elem = child_elem.xml
return self.xml.remove(child_elem)
def findtext(self, *args, **kwargs):
return self.xml.findtext(*args, **kwargs)
def findall(self, *args, **kwargs):
all_ = self.xml.findall(*args, **kwargs)
return [ElementWrapper(e) for e in all_]
def __iter__(self):
return iter(self.xml)
@property
def attrib(self):
return self.xml.attrib
@property
def text(self):
return self.xml.text
@text.setter
def text(self, txt):
self.xml.text = txt
@property
def tag(self):
return self.xml.tag
def clear(self):
return self.xml.clear()
def getchildren(self):
return self.xml.getchildren()
# ----- etree Element interface end -----
@staticmethod
def _makeTagName(tag, ns):
return "{%s}%s" % (ns, tag)
def appendChild(self, name, ns=None):
if not ns:
nsmap = self.xml.nsmap
# None represents the unprefixed default namespace. Top-level stanza
# types don't have this.
ns = nsmap[None] if None in nsmap else None
else:
nsmap = {None: ns}
c = etree.Element("{%s}%s" % (ns, name), nsmap=nsmap)
self.xml.append(c)
return ElementWrapper(c)
class Stanza(ElementWrapper):
XPATH = (None, None)
TYPE_GET = "get"
TYPE_SET = "set"
TYPE_ERROR = "error"
TYPE_RESULT = "result"
def __init__(self, tag=None, nsmap=None, xml=None, attrs=None):
if xml is None and tag:
xml = etree.Element(tag, nsmap=nsmap)
elif xml is None:
raise ValueError("'tag' or 'xml' argument is required")
super().__init__(xml)
for name, value in (attrs or {}).items():
self.set(name, value)
def _initAttributes(self, to=None, frm=None, type=None, id=None):
if to:
self.to = to
if frm:
self.frm = frm
if type:
self.type = type
if id:
self.id = id
@property
def to(self):
return self.get("to", as_jid=True)
@to.setter
def to(self, j):
self.set("to", j)
@property
def frm(self):
return self.get("from", as_jid=True)
@frm.setter
def frm(self, j):
self.set("from", j)
@property
def error(self):
from . import errors
error = self.xml.xpath("/*/error")
if error:
return errors.makeStanzaError(error[0])
return None
@error.setter
def error(self, err):
from . import errors
curr = self.xml.xpath("/*/error")
if curr:
self.xml.remove(curr[0])
if err is not None:
if not isinstance(err, errors.StanzaError):
raise ValueError("error attribute must be of type StanzaError")
self.xml.append(err.xml)
def swapToFrom(self):
tmp_to = self.to
tmp_from = self.frm
if tmp_from:
self.to = tmp_from
if tmp_to:
self.frm = tmp_to
def errorResponse(self, err):
err_stanza = deepcopy(self)
err_stanza.type = "error"
for c in err_stanza.xml.getchildren():
err_stanza.xml.remove(c)
err_stanza.error = err
err_stanza.swapToFrom()
return err_stanza
def resultResponse(self, clear=False):
res_stanza = deepcopy(self)
res_stanza.type = "result"
res_stanza.error = None
res_stanza.swapToFrom()
if clear:
for c in res_stanza.xml.getchildren():
res_stanza.xml.remove(c)
return res_stanza
class StreamHeader(Stanza):
XPATH = ("/stream:stream", {"stream": STREAM_NS_URI})
def __init__(self, ns=CLIENT_NS_URI, to=None, frm=None, version="1.0",
lang="en", id=None, xml=None):
if xml is not None:
assert(xml.tag == "{%s}stream" % STREAM_NS_URI)
assert(xml.nsmap["stream"] == STREAM_NS_URI)
assert(xml.nsmap[None] in [CLIENT_NS_URI, SERVER_NS_URI])
super().__init__(xml=xml)
else:
assert(ns in [CLIENT_NS_URI, SERVER_NS_URI])
super().__init__("{%s}stream" % STREAM_NS_URI,
nsmap={"stream": STREAM_NS_URI, None: ns})
self._initAttributes(to=to, frm=frm, id=id)
self.version = version
self.lang = lang
@property
def version(self):
return self.get("version")
@version.setter
def version(self, v):
self.set("version", v)
@property
def lang(self):
return self.get(XML_LANG)
@lang.setter
def lang(self, l):
self.set(XML_LANG, l)
@property
def defaultNamespace(self):
return self.xml.nsmap[None]
def toXml(self, pprint=False, encoding="utf-8"):
# Special serialization since it must be an open tag.
header = u"<stream:stream xmlns:stream='%s' xmlns='%s' " % \
(STREAM_NS_URI, self.defaultNamespace)
if self.lang:
header += u"xml:lang='%s' " % self.lang
if self.version:
header += "version='%s'" % self.version
if self.to:
header += " to='%s'" % self.to.full
if self.frm:
header += " from='%s'" % self.frm.full
if self.id is not None:
header += " id='%s'" % self.id
header += ">\n"
return header.encode(encoding)
class StreamFeatures(Stanza):
XPATH = ("/stream:features", {"stream": STREAM_NS_URI})
def __init__(self, xml=None):
if xml is not None:
assert(xml.tag == "{%s}features" % STREAM_NS_URI)
assert(xml.nsmap["stream"] == STREAM_NS_URI)
super().__init__(xml=xml)
else:
super().__init__("{%s}features" % STREAM_NS_URI,
nsmap={"stream": STREAM_NS_URI})
def getFeature(self, name, ns):
for feature in self.xml:
if feature.tag == "{%s}%s" % (ns, name):
return feature
return None
class StreamError(Stanza, RuntimeError):
XPATH = ("/stream:error", {"stream": STREAM_NS_URI})
def __init__(self, error=None, xml=None):
assert(error is not None or xml is not None)
if xml is not None:
assert(xml.tag == "{%s}error" % STREAM_NS_URI)
assert(xml.nsmap["stream"] == STREAM_NS_URI)
super().__init__(xml=xml)
else:
super().__init__("{%s}error" % STREAM_NS_URI,
nsmap={"stream": STREAM_NS_URI})
self.error = error
@property
def error(self):
from . import errors
return errors.makeStreamError(self.xml)
@error.setter
def error(self, err):
from . import errors
if not isinstance(err, errors.StreamError):
raise ValueError("error attribute must be of type "
"hiss.xmpp.errors.StreamError")
self.xml = err.xml
class Iq(Stanza):
XPATH = ("/iq", None)
def __init__(self, to=None, frm=None, type="get", id=None, request=None,
xml=None, id_prefix=None, attrs=None):
if xml is not None:
assert(xml.tag == "iq")
assert(None not in xml.nsmap)
super().__init__(xml=xml, attrs=attrs)
else:
super().__init__("iq", attrs=attrs)
self._initAttributes(to=to, frm=frm, id=id, type=type)
if id is None:
# Iqs most of all need id values
self.setId(prefix=id_prefix)
if request:
name, ns = request
self.xml.append(etree.Element("{%s}%s" % (ns, name),
nsmap={None: ns}))
@property
def request(self):
for e in self.xml.getchildren():
if e.tag != STANZA_ERROR_TAG:
return ElementWrapper(e)
return None
query = request
@functools.total_ordering
class Presence(Stanza):
XPATH = ("/presence", None)
TYPE_AVAILABLE = 'available'
TYPE_UNAVAILABLE = 'unavailable'
TYPE_SUBSCRIBE = 'subscribe'
TYPE_SUBSCRIBED = 'subscribed'
TYPE_UNSUBSCRIBE = 'unsubscribe'
TYPE_UNSUBSCRIBED = 'unsubscribed'
TYPE_PROBE = 'probe'
SHOW_AWAY = 'away'
SHOW_CHAT = 'chat'
SHOW_DND = 'dnd'
SHOW_XA = 'xa'
ORDERED_SHOWS = [SHOW_CHAT, None, SHOW_AWAY, SHOW_XA, SHOW_DND]
def __init__(self, to=None, frm=None, type=TYPE_AVAILABLE, priority=None,
show=None, status=None, xml=None, attrs=None):
if xml is not None:
assert(xml.tag == "presence")
assert(None not in xml.nsmap)
super().__init__(xml=xml, attrs=attrs)
else:
super().__init__("presence", attrs=attrs)
self._initAttributes(to=to, frm=frm, type=type)
self.priority = priority
self.show = show
self.status = status
def __gt__(self, rhs):
# Must implement even with total_ordering to make !lt != gt
if self < rhs:
return False
else:
if self.priority == rhs.priority and self.show == rhs.show:
return False
else:
return True
def __lt__(self, rhs):
if self.priority < rhs.priority:
return True
elif self.priority > rhs.priority:
return False
else:
if (self.ORDERED_SHOWS.index(self.show) <=
self.ORDERED_SHOWS.index(rhs.show)):
return False
else:
return True
@property
def type(self):
t = self.get("type")
return t if t else Presence.TYPE_AVAILABLE
@type.setter
def type(self, t):
if t == Presence.TYPE_AVAILABLE:
if "type" in self.xml.attrib:
del self.xml.attrib["type"]
else:
self.set("type", t)
@property
def priority(self):
t = self._getChildText("priority")
return int(t) if t is not None else None
@priority.setter
def priority(self, i):
if i is None:
self._setChildText("priority", None)
else:
i = int(i)
if -128 < i < 127:
self._setChildText("priority", str(i))
else:
raise ValueError("out of range: -128 < priority > 127")
@property
def show(self):
return self._getChildText("show")
@show.setter
def show(self, s):
if s not in self.ORDERED_SHOWS:
raise ValueError("Invald show: %s" % s)
self._setChildText("show", s)
@property
def status(self):
return self._getChildText("status")
@status.setter
def status(self, s):
self._setChildText("status", s)
class Message(Stanza):
XPATH = ("/message", None)
TYPE_CHAT = "chat"
TYPE_ERROR = "error"
TYPE_GC = "groupchat"
TYPE_HEADLINE = "headline"
TYPE_NORMAL = "normal"
def __init__(self, to=None, frm=None, type=TYPE_CHAT, subject=None,
body=None, thread=None, xml=None, attrs=None):
if xml is not None:
assert(xml.tag == "message")
assert(None not in xml.nsmap)
super().__init__(xml=xml, attrs=attrs)
else:
super().__init__("message", attrs=attrs)
self._initAttributes(to=to, frm=frm, type=type)
self.subject = subject
self.body = body
self.thread = thread
@property
def type(self):
t = self.get("type")
return t if t else Message.TYPE_NORMAL
@type.setter
def type(self, t):
if not t or t == Message.TYPE_NORMAL:
if "type" in self.xml.attrib:
del self.xml.attrib["type"]
else:
self.set("type", t)
@property
def subject(self):
return self._getChildText("subject")
@subject.setter
def subject(self, s):
self._setChildText("subject", s)
@property
def body(self):
return self._getChildText("body")
@body.setter
def body(self, s):
self._setChildText("body", s)
@property
def thread(self):
return self._getChildText("thread")
@thread.setter
def thread(self, s):
self._setChildText("thread", s)
def makeStanza(elem):
if elem.tag == "presence":
return Presence(xml=elem)
elif elem.tag == "message":
return Message(xml=elem)
elif elem.tag == "iq":
return Iq(xml=elem)
elif elem.tag == "{%s}stream" % STREAM_NS_URI:
return StreamHeader(xml=elem)
elif elem.tag == "{%s}features" % STREAM_NS_URI:
return StreamFeatures(xml=elem)
elif elem.tag == "{%s}error" % STREAM_NS_URI:
return StreamError(xml=elem)
else:
return Stanza(xml=elem)
| 28.073504 | 80 | 0.553736 | 2,040 | 16,423 | 4.318627 | 0.108333 | 0.03496 | 0.024972 | 0.023156 | 0.321453 | 0.273553 | 0.233825 | 0.194325 | 0.147673 | 0.123383 | 0 | 0.002262 | 0.327041 | 16,423 | 584 | 81 | 28.121575 | 0.794879 | 0.020337 | 0 | 0.24197 | 0 | 0 | 0.060821 | 0.001741 | 0 | 0 | 0 | 0 | 0.03212 | 1 | 0.1606 | false | 0 | 0.021413 | 0.047109 | 0.396146 | 0.006424 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |