hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
0775eae440b3ed8a8de73f26dfbbc57343a6323d
6,670
py
Python
text_selection/analyse_zenon_scrape.py
dainst/chronoi-corpus-processing
7f508a7572e1022c4c88d1477db029e6619a1f0c
[ "MIT" ]
null
null
null
text_selection/analyse_zenon_scrape.py
dainst/chronoi-corpus-processing
7f508a7572e1022c4c88d1477db029e6619a1f0c
[ "MIT" ]
null
null
null
text_selection/analyse_zenon_scrape.py
dainst/chronoi-corpus-processing
7f508a7572e1022c4c88d1477db029e6619a1f0c
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import argparse import csv import furl import json import re import sys from collections import defaultdict def filter_records_without_url(records: []) -> []: return [r for r in records if any(r.get("urls"))] def build_furl(url: str) -> furl.furl: try: furl_obj = furl.furl(url) if not furl_obj.host: furl_obj = furl.furl("http://" + url) return furl_obj except ValueError: return furl.furl("https://invalid-url.xyz") def determine_host(url: str) -> str: furl_obj = build_furl(url) return re.sub(r"^www[0-9]*\.", "", furl_obj.host) def build_hosts_to_urls(records: []) -> {str: {str}}: result = defaultdict(set) for record in records: for url in record.get("urls"): host = determine_host(url.get("url")) result[host].add(url.get("url")) return result def print_most_common_url_hosts(hosts_to_urls: {}, n: int): hosts = [h for h in hosts_to_urls.keys() if len(hosts_to_urls[h]) > n] hosts = sorted(hosts, key=lambda h: len(hosts_to_urls[h])) for host in hosts: print("% 6d\t%s" % (len(hosts_to_urls[host]), host)) def print_urls_for_host(hosts_to_urls: {}, host: str): urls = hosts_to_urls.get(host, []) for url in urls: print(url) if not any(urls): print(f"No urls for host: '{host}'", file=sys.stderr) def print_how_often_url_patterns_cooccur(records: [{}], pattern1: str, pattern2: str): # It should be ok, to only pattern match the hosts here... ids1 = {r.get("id") for r in records if record_has_matching_url(r, pattern1)} ids2 = {r.get("id") for r in records if record_has_matching_url(r, pattern2)} ids_both = ids1.intersection(ids2) for host, number in {pattern1: len(ids1), pattern2: len(ids2), "both": len(ids_both)}.items(): print(f"{host}: {number}") def record_has_matching_url(record: {}, pattern: str) -> bool: return any(record_get_urls_matching(record, pattern)) def record_get_urls_matching(record: {}, pattern: str) -> [{}]: result = [] for url in record.get("urls"): if any(re.findall(pattern, url.get("url"))): result.append(url) return result def record_remove_urls_not_matching(record: {}, pattern: str): record["urls"] = record_get_urls_matching(record, pattern) def earliest_year(year_strings: [str]) -> str: years = [] for year_s in year_strings: try: years.append(int(year_s)) except ValueError: print(f"Not a string that is a year: '{year_s}'", file=sys.stderr) continue return str(sorted(years)[0]) if any(years) else "" def main(args: argparse.Namespace): with open(args.scrape_file, "r") as file: records = json.load(file) records = filter_records_without_url(records) # filter urls by the user-provided filter list if args.desc_filters: with open(args.desc_filters, "r") as file: filters = file.read().splitlines() for record in records: record["urls"] = [url for url in record.get("urls") if url.get("desc") not in filters] records = filter_records_without_url(records) # print unique hosts or urls, then exit if args.print_host_urls or args.print_common_hosts >= 0: hosts_to_urls = build_hosts_to_urls(records) if args.print_common_hosts >= 0: print_most_common_url_hosts(hosts_to_urls, n=args.print_common_hosts) elif args.print_host_urls: print_urls_for_host(hosts_to_urls, host=args.print_host_urls) exit(0) # check in how many records the two given hosts co-occur, then exit if args.patterns_cooccur: host1, host2 = args.patterns_cooccur.split(",") print_how_often_url_patterns_cooccur(records, host1, host2) exit(0) # do some selection based on a url pattern, remove all non-matching urls from the record if args.select_by_url: pattern = args.select_by_url records = [r for r in records if record_has_matching_url(r, pattern)] for record in records: record_remove_urls_not_matching(record, pattern) # sort the records by id, to be extra sure, that we get the same order every time this is called # print each line as a csv column records = sorted(records, key=lambda r: r.get("id")) writer = csv.writer(sys.stdout, delimiter=",", quoting=csv.QUOTE_ALL) for record in records: to_print = [] if args.print_id: to_print.append(record.get("id", "")) if args.print_url: to_print.append(record.get("urls")[0].get("url") if any(record.get("urls")) else "") if args.print_pub_date: to_print.append(earliest_year(record.get("publicationDates", []))) if args.print_languages: to_print.append("|".join(record.get("languages", []))) writer.writerow(to_print) if __name__ == '__main__': parser = argparse.ArgumentParser( description="Process a file with zenon json records and print some information about them.") parser.add_argument("scrape_file", type=str, help="The file that contains the zenon dumps as json.") parser.add_argument("--desc-filters", type=str, help="A file to filter urls by. Excludes urls with 'desc' fields matching a line in the file.") # these are arguments to print some specific information parser.add_argument("--print-common-hosts", type=int, default=-1, help="Print hosts that appear more than n times in the records urls, then exit.") parser.add_argument("--print-host-urls", type=str, help="Print all urls for the host, then exit.") parser.add_argument("--patterns-cooccur", type=str, help="Format: 'pattern1,pattern2', print how often these occur in single records url fields, then exit.") # these are meant to work together select by a url pattern then print information about the records parser.add_argument("--select-by-url", type=str, help="Give a pattern for a url to select records by.") parser.add_argument("--print-url", action="store_true", help="Print the first of each urls for the selected records. (Ignores other urls present on the records if --select-url is given.)") parser.add_argument("--print-pub-date", action="store_true", help="Print the earliest publication year for each of the selected records.") parser.add_argument("--print-id", action="store_true", help="Print the selected records' ids") parser.add_argument("--print-languages", action="store_true", help="Print the selected records' languages") main(parser.parse_args())
40.670732
192
0.669715
990
6,670
4.346465
0.218182
0.019521
0.030676
0.030676
0.254706
0.188706
0.142226
0.078085
0.044155
0.027888
0
0.005669
0.206597
6,670
163
193
40.920245
0.807445
0.092354
0
0.137931
0
0.025862
0.189475
0
0
0
0
0
0
1
0.103448
false
0
0.060345
0.017241
0.232759
0.275862
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07773d417997f41786d66f2eb9103478a102aad8
2,578
py
Python
src/python/twitter/pants/targets/java_antlr_library.py
wfarner/commons
42988a7a49f012665174538cca53604c7846ee86
[ "Apache-2.0" ]
1
2019-12-20T14:13:27.000Z
2019-12-20T14:13:27.000Z
src/python/twitter/pants/targets/java_antlr_library.py
wfarner/commons
42988a7a49f012665174538cca53604c7846ee86
[ "Apache-2.0" ]
null
null
null
src/python/twitter/pants/targets/java_antlr_library.py
wfarner/commons
42988a7a49f012665174538cca53604c7846ee86
[ "Apache-2.0" ]
1
2019-12-20T14:13:29.000Z
2019-12-20T14:13:29.000Z
# ================================================================================================== # Copyright 2012 Twitter, Inc. # -------------------------------------------------------------------------------------------------- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this work except in compliance with the License. # You may obtain a copy of the License in the LICENSE file, or at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ================================================================================================== __author__ = 'Brian Larson' from twitter.pants.targets.exportable_jvm_library import ExportableJvmLibrary class JavaAntlrLibrary(ExportableJvmLibrary): """Defines a target that builds java stubs from an Antlr grammar file.""" def __init__(self, name, sources, provides = None, dependencies = None, excludes = None, compiler = 'antlr3'): """name: The name of this module target, addressable via pants via the portion of the spec following the colon sources: A list of paths containing the Antlr source files this module's jar is compiled from provides: An optional Dependency object indicating the The ivy artifact to export dependencies: An optional list of Dependency objects specifying the binary (jar) dependencies of this module. excludes: An optional list of dependency exclude patterns to filter all of this module's transitive dependencies against. compiler: The name of the compiler used to compile the ANTLR files. Currently only supports 'antlr3' and 'antlr4'""" ExportableJvmLibrary.__init__(self, name, sources, provides, dependencies, excludes) self.add_labels('codegen') if compiler not in ['antlr3', 'antlr4']: raise ValueError("Illegal value for 'compiler': {}".format(compiler)) self.compiler = compiler def _as_jar_dependency(self): return ExportableJvmLibrary._as_jar_dependency(self).with_sources()
44.448276
100
0.588053
276
2,578
5.413043
0.518116
0.046854
0.024096
0.021419
0.07095
0
0
0
0
0
0
0.00666
0.242824
2,578
57
101
45.22807
0.758709
0.61443
0
0.181818
0
0
0.073955
0
0
0
0
0
0
1
0.090909
false
0
0.045455
0.045455
0.227273
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0777dbaeb86425a933c2accd81e0d8dadd226bab
3,092
py
Python
bigml/tests/create_pca_steps_bck.py
devs-cloud/python_ml
05d90f5ce1862a5d2d8ff99d2e46446dc1d5af3c
[ "Apache-2.0" ]
null
null
null
bigml/tests/create_pca_steps_bck.py
devs-cloud/python_ml
05d90f5ce1862a5d2d8ff99d2e46446dc1d5af3c
[ "Apache-2.0" ]
null
null
null
bigml/tests/create_pca_steps_bck.py
devs-cloud/python_ml
05d90f5ce1862a5d2d8ff99d2e46446dc1d5af3c
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- #!/usr/bin/env python # # Copyright 2018-2020 BigML # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time import json import os from datetime import datetime, timedelta from world import world from nose.tools import eq_, assert_less from bigml.api import HTTP_CREATED from bigml.api import HTTP_ACCEPTED from bigml.api import FINISHED from bigml.api import FAULTY from bigml.api import get_status from read_pca_steps import i_get_the_pca #@step(r'the pca name is "(.*)"') def i_check_pca_name(step, name): pca_name = world.pca['name'] eq_(name, pca_name) #@step(r'I create a PCA from a dataset$') def i_create_a_pca_from_dataset(step): dataset = world.dataset.get('resource') resource = world.api.create_pca(dataset, {'name': 'new PCA'}) world.status = resource['code'] eq_(world.status, HTTP_CREATED) world.location = resource['location'] world.pca = resource['object'] world.pcas.append(resource['resource']) #@step(r'I create a PCA from a dataset$') def i_create_a_pca_with_params(step, params): params = json.loads(params) dataset = world.dataset.get('resource') resource = world.api.create_pca(dataset, params) world.status = resource['code'] eq_(world.status, HTTP_CREATED) world.location = resource['location'] world.pca = resource['object'] world.pcas.append(resource['resource']) def i_create_a_pca(step): i_create_a_pca_from_dataset(step) #@step(r'I update the PCA name to "(.*)"$') def i_update_pca_name(step, name): resource = world.api.update_pca(world.pca['resource'], {'name': name}) world.status = resource['code'] eq_(world.status, HTTP_ACCEPTED) world.location = resource['location'] world.pca = resource['object'] #@step(r'I wait until the PCA status code is either (\d) or (-\d) less than (\d+)') def wait_until_pca_status_code_is(step, code1, code2, secs): start = datetime.utcnow() delta = int(secs) * world.delta pca_id = world.pca['resource'] i_get_the_pca(step, pca_id) status = get_status(world.pca) while (status['code'] != int(code1) and status['code'] != int(code2)): time.sleep(3) assert_less(datetime.utcnow() - start, timedelta(seconds=delta)) i_get_the_pca(step, pca_id) status = get_status(world.pca) eq_(status['code'], int(code1)) #@step(r'I wait until the PCA is ready less than (\d+)') def the_pca_is_finished_in_less_than(step, secs): wait_until_pca_status_code_is(step, FINISHED, FAULTY, secs)
32.893617
83
0.698254
465
3,092
4.48172
0.275269
0.023033
0.023033
0.03167
0.408829
0.37428
0.37428
0.307582
0.263916
0.263916
0
0.007495
0.180142
3,092
93
84
33.247312
0.814596
0.284605
0
0.339286
0
0
0.060758
0
0
0
0
0
0.035714
1
0.125
false
0
0.214286
0
0.339286
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0778705078ff1aa67fe1ad3d2a88bc9581c13e09
2,331
py
Python
Chapter 8/sandwich-maker.py
ostin-r/automate-boring-stuff-solutions
78f0a2981e6520ff2907285e666168a0f35eba02
[ "FTL" ]
4
2021-06-14T10:37:58.000Z
2021-12-30T17:49:17.000Z
Chapter 8/sandwich-maker.py
ostin-r/automate-boring-stuff-solutions
78f0a2981e6520ff2907285e666168a0f35eba02
[ "FTL" ]
null
null
null
Chapter 8/sandwich-maker.py
ostin-r/automate-boring-stuff-solutions
78f0a2981e6520ff2907285e666168a0f35eba02
[ "FTL" ]
1
2021-07-29T15:26:54.000Z
2021-07-29T15:26:54.000Z
''' Austin Richards 2/20/21 sandwich-maker.py uses pyinputplus to validate user input for sandwich preferences ''' import pyinputplus as ip def get_cost(food_name): '''gets the cost of items in sandwich_builder''' food_dict = { 'sourdough':1.75, 'rye':2.0, 'wheat':1.50, 'white':1.25, 'chicken':2.0, 'turkey':1.50, 'ham':2.0, 'tofu':1.25, 'cheddar':2.0, 'swiss':2.5, 'mozzarella':2.5, 'yes':0.25, # toppings return 'yes' in sandwich_builder(), so I made them all cost 0.25 'no':0 # saying no to a topping costs nothing } return food_dict[food_name] def sandwich_builder(): print('Enter your sandwich preferences below:\n') bread_prompt = 'What bread type would you like? (sourdough, rye, wheat, or white)\n' bread_type = ip.inputChoice(['sourdough', 'rye', 'wheat', 'white'], prompt=bread_prompt) protein_prompt = 'What type of protein would you like? (chicken, turkey, ham, or tofu)\n' protein_type = ip.inputChoice(['chicken', 'turkey', 'ham', 'tofu'], prompt=protein_prompt) mayo = ip.inputYesNo(prompt='Would you like mayo?\n') mustard = ip.inputYesNo(prompt='Would you like mustard?\n') tomato = ip.inputYesNo(prompt='Would you like tomato?\n') lettuce = ip.inputYesNo(prompt='Would you like lettuce?\n') like_cheese = ip.inputYesNo(prompt='Do you like cheese on your sandwich?\n') if like_cheese is 'yes': cheese_prompt = 'What kind of cheese would you like? (cheddar, swiss, mozzarella)\n' cheese_type = ip.inputChoice(['cheddar', 'swiss', 'mozzarella'], prompt=cheese_prompt) sandwich = [] cost = 0 sandwich.extend([bread_type, protein_type, cheese_type, mayo, mustard, tomato, lettuce]) for item in sandwich: cost += get_cost(item) else: sandwich = [] cost = 0 sandwich.extend([bread_type, protein_type, mayo, mustard, tomato, lettuce]) for item in sandwich: cost += get_cost(item) how_many_prompt = 'How many sandwiches would you like?\n' how_many = ip.inputInt(min=1, prompt=how_many_prompt) print('\nFinal cost: ${}'.format(round(cost * how_many * 1.06, 2))) sandwich_builder()
33.3
96
0.62248
316
2,331
4.487342
0.316456
0.044429
0.067701
0.06488
0.232722
0.232722
0.148096
0.148096
0.148096
0.084626
0
0.026166
0.245817
2,331
70
97
33.3
0.780432
0.112398
0
0.170213
0
0
0.275778
0
0
0
0
0
0
1
0.042553
false
0
0.021277
0
0.085106
0.042553
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0778aa1b06b2fda0447a13db0a273ce1b3e6b40f
2,021
py
Python
tests/core/test_headerupdater.py
My-Novel-Management/storybuilderunite
c003d3451e237f574c54a87ea7d4fd8da8e833be
[ "MIT" ]
1
2020-06-18T01:38:55.000Z
2020-06-18T01:38:55.000Z
tests/core/test_headerupdater.py
My-Novel-Management/storybuilder
1f36e56a74dbb55a25d60fce3ce81f3c650f521a
[ "MIT" ]
143
2019-11-13T00:21:11.000Z
2020-08-15T05:47:41.000Z
tests/core/test_headerupdater.py
My-Novel-Management/storybuilderunite
c003d3451e237f574c54a87ea7d4fd8da8e833be
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- ''' HeaderUpdater class test ======================== ''' import unittest from tests.testutils import print_testtitle, validate_with_fail from builder.commands.scode import SCode, SCmd from builder.containers.chapter import Chapter from builder.containers.episode import Episode from builder.containers.scene import Scene from builder.containers.story import Story from builder.core import headerupdater as hd class HeaderUpdaterTest(unittest.TestCase): @classmethod def setUpClass(cls): print_testtitle(hd.__name__, 'HeaderUpdater class') def test_instance(self): tmp = hd.HeaderUpdater() self.assertIsInstance(tmp, hd.HeaderUpdater) def test_title_of(self): data = [ # (src, expect, exp_opt) (True, Story('test',), ('test',), 1), ] def checker(src, expect, exp_opt): tmp = hd.HeaderUpdater()._title_of(src) self.assertIsInstance(tmp, SCode) self.assertEqual(tmp.cmd, SCmd.TAG_TITLE) self.assertEqual(tmp.script, expect) self.assertEqual(tmp.option, exp_opt) validate_with_fail(self, 'title_of', checker, data) def test_outline_of(self): data = [ # (src, expect) (True, Story('test',outline='apple'), ('apple',)), ] def checker(src, expect): tmp = hd.HeaderUpdater()._outline_of(src) self.assertIsInstance(tmp, SCode) self.assertEqual(tmp.cmd, SCmd.TAG_COMMENT) self.assertEqual(tmp.script, expect) validate_with_fail(self, 'outline_of', checker, data) def test_end_of(self): data = [ # (src, expect) (True, Chapter('test',), SCmd.END_CHAPTER), ] validate_with_fail(self, 'end_of', lambda src, expect: self.assertEqual( hd.HeaderUpdater()._end_of(src).cmd, expect), data)
33.131148
66
0.597724
219
2,021
5.351598
0.269406
0.056314
0.076792
0.033276
0.244881
0.143345
0.104096
0.104096
0.104096
0.104096
0
0.001375
0.280059
2,021
60
67
33.683333
0.804124
0.060861
0
0.159091
0
0
0.036566
0
0
0
0
0
0.204545
1
0.159091
false
0
0.181818
0
0.363636
0.045455
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0778ae783c1f5257a96e5e0972a23c96938e6782
682
py
Python
dotsDB/test_vlen_datasets.py
aernesto/Lab_DotsDB_Utilities
d8458b4126d80daeb5084234889fc6674158ea0f
[ "MIT" ]
1
2019-03-11T19:12:12.000Z
2019-03-11T19:12:12.000Z
dotsDB/test_vlen_datasets.py
aernesto/Lab_DotsDB_Utilities
d8458b4126d80daeb5084234889fc6674158ea0f
[ "MIT" ]
null
null
null
dotsDB/test_vlen_datasets.py
aernesto/Lab_DotsDB_Utilities
d8458b4126d80daeb5084234889fc6674158ea0f
[ "MIT" ]
1
2019-10-31T20:10:12.000Z
2019-10-31T20:10:12.000Z
import numpy as np import h5py filename = "test_vlen_datasets_np_bool.h5" rows = [np.array([np.True_, np.False_]), np.array([np.True_, np.True_, np.False_])] f = h5py.File(filename, 'x') # create file, fails if exists vlen_data_type = h5py.special_dtype(vlen=np.bool_) dset = f.create_dataset("vlen_matrix", (2,), compression="gzip", compression_opts=9, fletcher32=True, dtype=vlen_data_type) for r in range(len(rows)): dset[r] = rows[r] f.flush() f.close() f = h5py.File(filename, 'r') dsetr = f["vlen_matrix"] for r in range(dsetr.shape[0]): print(dsetr[r])
22.733333
60
0.590909
98
682
3.918367
0.469388
0.046875
0.0625
0.067708
0.078125
0
0
0
0
0
0
0.01992
0.26393
682
29
61
23.517241
0.74502
0.041056
0
0
0
0
0.087423
0.044479
0
0
0
0
0
1
0
false
0
0.1
0
0.1
0.05
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
077a977fb0ed578109f21b4a8ba0c330e1e23efb
1,441
py
Python
weasyl/emailer.py
akash143143/weasyl
be42a2313e657e97c4a48432379e37b6a3d4a4af
[ "Apache-2.0" ]
null
null
null
weasyl/emailer.py
akash143143/weasyl
be42a2313e657e97c4a48432379e37b6a3d4a4af
[ "Apache-2.0" ]
null
null
null
weasyl/emailer.py
akash143143/weasyl
be42a2313e657e97c4a48432379e37b6a3d4a4af
[ "Apache-2.0" ]
null
null
null
from __future__ import absolute_import import re from email.mime.text import MIMEText from smtplib import SMTP from weasyl import define, macro EMAIL_ADDRESS = re.compile(r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+\Z") def normalize_address(address): """ Converts an e-mail address to a consistent representation. Returns None if the given address is not considered valid. """ address = address.strip() if not EMAIL_ADDRESS.match(address): return None local, domain = address.split("@", 1) return "%s@%s" % (local, domain.lower()) def send(mailto, subject, content): """Send an e-mail. `mailto` must be a normalized e-mail address to send this e-mail to. The system email will be designated as the sender. """ message = MIMEText(content.strip()) message["To"] = mailto message["From"] = macro.MACRO_EMAIL_ADDRESS message["Subject"] = subject # smtp.sendmail() only converts CR and LF (produced by MIMEText and our templates) to CRLF in Python 3. In Python 2, we need this: msg_crlf = re.sub(r"\r\n|[\r\n]", "\r\n", message.as_string()) smtp = SMTP(define.config_read_setting('host', "localhost", section='smtp')) try: smtp.sendmail( from_addr=macro.MACRO_EMAIL_ADDRESS, to_addrs=[mailto], msg=msg_crlf, ) finally: smtp.quit() define.metric('increment', 'emails')
26.685185
134
0.646079
203
1,441
4.487685
0.477833
0.052689
0.055982
0.019759
0.019759
0.019759
0.019759
0.019759
0
0
0
0.008021
0.221374
1,441
53
135
27.188679
0.803922
0.266482
0
0
0
0.035714
0.112856
0.048086
0
0
0
0
0
1
0.071429
false
0
0.178571
0
0.321429
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
077afe0d8f015a761ad56ef674705600c184e8fe
1,721
py
Python
analysis_functionarcademix.py
thekushalpokhrel/Python_Programs_SoftDev_DataAnalysis
e56e0e853aca4367ebf99ae18e920b80f39bd133
[ "MIT" ]
null
null
null
analysis_functionarcademix.py
thekushalpokhrel/Python_Programs_SoftDev_DataAnalysis
e56e0e853aca4367ebf99ae18e920b80f39bd133
[ "MIT" ]
null
null
null
analysis_functionarcademix.py
thekushalpokhrel/Python_Programs_SoftDev_DataAnalysis
e56e0e853aca4367ebf99ae18e920b80f39bd133
[ "MIT" ]
null
null
null
#analysis function for three level game def stat_analysis(c1,c2,c3): #ask question for viewing analysis of game analysis=input('\nDo you want to see your game analysis? (Yes/No) ') if analysis=='Yes': levels=['Level 1','Level 2','Level 3'] #calculating the score of levels l1_score= c1*10 l2_score= c2*10 l3_score= c3*10 level_score=[l1_score,l2_score,l3_score] #plot bar chart plt.bar(levels,level_score,color='blue',edgecolor='black') plt.title('Levelwise Scores',fontsize=16)#add title plt.xlabel('Levels')#set x-axis label plt.ylabel('Scores')#set y-axis label plt.show() print('\nDescriptive Statistics of Scores:') #find mean value print('\nMean: ',statistics.mean(level_score)) #find median value print('\nMediand: ',statistics.median(level_score)) #Mode calculation #create numPy array of values with only one mode arr_val = np.array(level_score) #find unique values in array along with their counts vals, uni_val_counts = np.unique(arr_val, return_counts=True) #find mode mode_value = np.argwhere(counts == np.max(uni_val_counts)) print('\nMode: ',vals[mode_value].flatten().tolist()) #find variance print('\nVariance: ',np.var(level_score)) #find standard deviation print('\nStandard Deviation: ',statistics.stdev(level_score)) print('\nGood Bye.See you later!!!') elif analysis=='No': print('\nGood Bye.See you later!!!') else: print('Invalid value enter') stat_analysis(c1,c2,c3)
30.732143
72
0.613016
224
1,721
4.602679
0.477679
0.067895
0.040737
0.031038
0.081474
0.046557
0
0
0
0
0
0.020651
0.268449
1,721
55
73
31.290909
0.798253
0.210343
0
0.071429
0
0
0.209978
0
0
0
0
0
0
1
0.035714
false
0
0
0
0.035714
0.321429
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
077b64f9f341be6f03c89ac88afd5ce1383da321
2,246
py
Python
Hello_Cone.py
TechnoTanuki/Python_BMP
d6f7e7a4b74f7d6e8761d618c156d37c97726038
[ "MIT" ]
3
2022-02-24T15:46:43.000Z
2022-03-30T13:17:03.000Z
Hello_Cone.py
TechnoTanuki/Python_BMP
d6f7e7a4b74f7d6e8761d618c156d37c97726038
[ "MIT" ]
null
null
null
Hello_Cone.py
TechnoTanuki/Python_BMP
d6f7e7a4b74f7d6e8761d618c156d37c97726038
[ "MIT" ]
null
null
null
notice = """ Cone Demo ----------------------------------- | Copyright 2022 by Joel C. Alcarez | | [joelalcarez1975@gmail.com] | |-----------------------------------| | We make absolutely no warranty | | of any kind, expressed or implied | |-----------------------------------| | This graphics library outputs | | to a bitmap file. | ----------------------------------- """ from Python_BMP.BITMAPlib import( newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP ) import subprocess as proc from os import path def main(): print(notice) imgedt = 'mspaint' # replace with another editor if Unix rootdir = path.dirname(__file__) # get path of this script mx = my = 250 # x=y square bmp file = 'HelloCone.bmp' # some random file name as string bmp = newBMP(mx, my, 24) # RGB bmp cenpt = centercoord(bmp) # helper method to get center of a bitmap cf = getRGBfactors() # color info with presets d, translationvector = 400, [0, 0, 200] # be careful with these variables or object goes offscreen isSolid = True # toggle solid or outline showoutline = False # can show outline even if solid cf = getRGBfactors() # color list color = cf['brightyellow'] # color of solid outlinecolor = 0 # outline color rotation = rotvec3D(25,240,70) # rotation vector (x,y,z) in degrees vcen = (1,0,0) # x y z coords r = 40 # radius of cone zlen = 40 # height of cone deganglestep = 5 # how finely we tile flat surfaces around the cone obj3D = conevertandsurface(vcen, r, zlen, deganglestep)# A solid is defined by vertices and surfaces plot3Dsolid(bmp, obj3D, isSolid, color, showoutline, outlinecolor, rotation, translationvector, d, cenpt) saveBMP(file, bmp) # save file print('Saved to %s in %s\nAll done close %s to finish' % \ (file, rootdir, imgedt)) ret = proc.call([imgedt, file]) if __name__=="__main__": main()
38.724138
109
0.548531
250
2,246
4.876
0.592
0.004922
0.032814
0
0
0
0
0
0
0
0
0.02785
0.312556
2,246
57
110
39.403509
0.761658
0.227516
0
0.111111
0
0
0.294508
0.103199
0
0
0
0
0
1
0.018519
false
0
0.055556
0
0.074074
0.037037
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
077c2964f05f1e340c5f354633e006236a1d9021
2,001
py
Python
analysis/training_curve_6D.py
AndrewKirby2/data_synthesis
656858137a348fd5dcb57bcd04bdfece2b9eac1b
[ "MIT" ]
null
null
null
analysis/training_curve_6D.py
AndrewKirby2/data_synthesis
656858137a348fd5dcb57bcd04bdfece2b9eac1b
[ "MIT" ]
null
null
null
analysis/training_curve_6D.py
AndrewKirby2/data_synthesis
656858137a348fd5dcb57bcd04bdfece2b9eac1b
[ "MIT" ]
null
null
null
""" Plot a training curve for the 6D data simulator of CT* """ import numpy as np import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import RBF, WhiteKernel, Matern from sklearn.metrics import mean_squared_error from sklearn.pipeline import Pipeline import sys sys.path.append(r'/home/andrewkirby72/phd_work/data_synthesis') from GP_machine_learning.GP_machine_learning_functions import * from regular_array_sampling.functions import regular_array_monte_carlo # create array to store results for plotting rmse = np.ones((25, 2)) noise = 0.01 # create array of sampled regular array layouts #cand_points = regular_array_monte_carlo(10000) # create testing points X_test, y_test = create_testing_points_regular(noise) n = 0 n_target = 0 n_train = 0 while n_train < 200: n_target = 100 +100*n # create training points X_train, y_train, n_train = \ create_training_points_irregular(n_target, noise) # fit GP regression and calculate rmse kernel = 1.0 ** 2 * RBF(length_scale=[1., 1., 1., 1., 1., 1.]) \ + WhiteKernel(noise_level=1e-5, noise_level_bounds=[1e-10, 1]) pipe = Pipeline([('scaler', StandardScaler()), ('gp', GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=20))]) pipe.fit(X_train, y_train) y_predict = pipe.predict(X_test) mse = mean_squared_error(y_test, y_predict) # report rmse print(n_train, np.sqrt(mse)) rmse[n, 0] = n_train rmse[n, 1] = np.sqrt(mse) n += 1 plt.scatter(rmse[:, 0], rmse[:, 1]) plt.yscale('log') plt.ylim([1e-3, 1e-1]) plt.xlim([0, 200]) plt.title('Training curve RBF - 6D 1% noise - irregular array training - max change halved') plt.ylabel('RMSE') plt.xlabel('Training points') plt.savefig('analysis/GP_machine_learning_plots/\ gp_training_curve_RBF_irregular_training_maxchangehalved_regular_testing.png')
34.5
92
0.733633
297
2,001
4.73064
0.40404
0.039146
0.008541
0.008541
0.00427
0
0
0
0
0
0
0.034503
0.15992
2,001
57
93
35.105263
0.801309
0.141929
0
0
0
0
0.089254
0.02525
0
0
0
0
0
1
0
false
0
0.238095
0
0.238095
0.02381
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
077cc03c99f16d778bcf96bc07a8e66081bca025
5,210
py
Python
website/raspac.py
tpudlik/RaspAC
e0a01a8b9123e74f6e4fb53f084e4ddf3ea24677
[ "MIT" ]
28
2015-04-03T05:01:14.000Z
2021-12-31T00:29:40.000Z
website/raspac.py
tpudlik/RaspAC
e0a01a8b9123e74f6e4fb53f084e4ddf3ea24677
[ "MIT" ]
3
2017-03-20T15:47:24.000Z
2017-05-21T16:07:22.000Z
website/raspac.py
tpudlik/RaspAC
e0a01a8b9123e74f6e4fb53f084e4ddf3ea24677
[ "MIT" ]
8
2015-08-27T07:33:08.000Z
2018-09-27T21:54:49.000Z
import sqlite3 import subprocess, datetime from flask import Flask, request, session, g, redirect, url_for, \ abort, render_template, flash from contextlib import closing from tquery import get_latest_record from config import * app = Flask(__name__) app.config.from_object(__name__) # DB helper functions def connect_db(): return sqlite3.connect(app.config['DATABASE']) def init_db(): """Initializes the sqlite3 database. This function must be imported and executed from the Python interpreter before the application is first run.""" with closing(connect_db()) as db: with app.open_resource('schema.sql', mode='r') as f: db.cursor().executescript(f.read()) db.commit() # Auto-open and close DB when serving requests @app.before_request def before_request(): g.db = connect_db() @app.teardown_request def teardown_request(exception): db = getattr(g, 'db', None) if db is not None: db.close() @app.route('/', methods=['GET', 'POST']) def welcome_page(): if 'username' in session and session['username']: return redirect(url_for('submit_page')) error = None if request.method == 'POST': # someone's logging in if not request.form['username'] in app.config['USERNAMES']: error = 'username' elif request.form['password'] != app.config['PASSWORD']: error = 'password' else: # successful login session['username'] = request.form['username'] flash('Hi ' + session['username'] + '!') return redirect(url_for('submit_page')) return render_template('welcome_page.html', commands=command_history(), error=error, last_record=last_record()) @app.route('/submit', methods=['GET', 'POST']) def submit_page(): error = None if not session.get('username'): abort(401) if request.method == 'POST': # command is being issued to AC user_mode = request.form['mode'] user_temperature = request.form['temperature'] validation_codes = validate_AC_command(user_mode, user_temperature) if (validation_codes['mode_error'] or validation_codes['temperature_error']): error=validation_codes else: subprocess.call(['/usr/bin/irsend','SEND_ONCE', 'lgac', validation_codes['command']]) g.db.execute('insert into commands (command, ts, user) values (?, ?, ?)', [validation_codes['command'], datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), session['username']]) g.db.commit() flash('Command submitted') return render_template('submit_page.html', commands=command_history(), error=error, last_record=last_record()) @app.route('/logout') def logout(): session.pop('logged_in', None) flash('You were logged out') return redirect(url_for('welcome_page')) def validate_AC_command(user_mode, user_temperature): """Validates and sanitizes user-input command; translates command into irsend call.""" codes = dict() if user_mode not in app.config['ACMODES']: codes['mode_error'] = True else: codes['mode_error'] = False if user_mode is not 'off' and user_temperature not in app.config['ACTEMPERATURES']: codes['temperature_error'] = True else: codes['temperature_error'] = False if not codes['mode_error'] and not codes['temperature_error']: codes['mode'] = user_mode codes['temperature'] = user_temperature if codes['mode'] == 'off': command_postfix = 'off' elif codes['mode'] == 'heat': command_postfix = 'heat' + codes['temperature'] else: command_postfix = codes['temperature'] codes['command'] = command_postfix return codes def command_history(): """Returns a list of dictionaries, each containing a command issued to the AC previously. The list is ordered chronologically, from newest to oldest.""" cur = g.db.execute('select command, ts, user from commands order by id desc') command_history = [] for row in cur.fetchall(): if row[0][0] == 'h': cmd = 'heat to ' + row[0][4:] elif row[0] == 'off': cmd = 'off' else: cmd = 'cool to ' + row[0] command_history.append(dict(command=cmd, ts=row[1], user=row[2])) return command_history def last_record(): """Returns the last temperature and humidity record data. The returned object is a dict with keys ts, fahrenheit, celsius and humidity. """ db_record = get_latest_record() out_record = dict() out_record['date'] = db_record[0].strftime("%Y-%m-%d") out_record['time'] = db_record[0].strftime("%H:%M") out_record['celsius'] = db_record[1] out_record['fahrenheit'] = int(round(out_record['celsius']*9/5.0 + 32)) out_record['humidity'] = int(round(db_record[2])) return out_record if __name__ == '__main__': app.run(host='0.0.0.0')
37.482014
87
0.616507
640
5,210
4.857813
0.296875
0.023159
0.018012
0.019299
0.108717
0.098424
0.098424
0.072692
0.043744
0.043744
0
0.006928
0.252015
5,210
138
88
37.753623
0.790865
0.123033
0
0.125
0
0
0.167147
0
0
0
0
0
0
1
0.089286
false
0.017857
0.053571
0.008929
0.223214
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
077e6b7b62074f7defc4bfc023b3cef03e6c40c9
1,046
py
Python
solutions/Interview-03-shu-zu-zhong-zhong-fu-de-shu-zi-lcof/03.py
leetcode-notebook/wonz
9ffd2ce9b5f3a544ee958f5a0673215afd176c2b
[ "MIT" ]
12
2020-04-21T01:09:14.000Z
2022-01-13T08:42:03.000Z
solutions/Interview-03-shu-zu-zhong-zhong-fu-de-shu-zi-lcof/03.py
leetcode-notebook/wonz
9ffd2ce9b5f3a544ee958f5a0673215afd176c2b
[ "MIT" ]
null
null
null
solutions/Interview-03-shu-zu-zhong-zhong-fu-de-shu-zi-lcof/03.py
leetcode-notebook/wonz
9ffd2ce9b5f3a544ee958f5a0673215afd176c2b
[ "MIT" ]
4
2020-03-31T03:06:16.000Z
2021-07-06T07:27:44.000Z
from typing import List class Solution: def findRepeatNumber(self, nums: List[int]) -> int: # solution one: 哈希表 n = len(nums) flag = [False for i in range(n)] for i in range(n): if flag[nums[i]] == False: flag[nums[i]] = True else: return nums[i] return -1 # solution two: 排序 nums.sort() pre = nums[0] for i in range(1, len(nums)): if pre == nums[i]: return nums[i] else: pre = nums[i] return -1 # solution three: 两个萝卜一个坑 n = len(nums) for i in range(n): if nums[i] == i: continue # 有重复 elif nums[nums[i]] == nums[i]: return nums[i] # 交换 else: nums[nums[i]], nums[i] = nums[i], nums[nums[i]] return -1 if __name__ == "__main__": nums = [2, 3, 1, 0, 2, 5, 3] print(Solution().findRepeatNumber(nums))
26.15
63
0.43499
129
1,046
3.465116
0.333333
0.1566
0.123043
0.098434
0.322148
0.06264
0
0
0
0
0
0.020548
0.441683
1,046
40
64
26.15
0.744863
0.062141
0
0.419355
0
0
0.008197
0
0
0
0
0
0
1
0.032258
false
0
0.032258
0
0.290323
0.032258
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07800b91cf15e2b3fdf48ab87571db57cf0566dc
1,702
py
Python
examples/test_network.py
Charles-Peeke/gwu_nn
3f5e9937abf2bfb81a74a2d6f3653a661e705f67
[ "MIT" ]
4
2020-11-17T00:31:40.000Z
2021-11-11T01:56:27.000Z
examples/test_network.py
Charles-Peeke/gwu_nn
3f5e9937abf2bfb81a74a2d6f3653a661e705f67
[ "MIT" ]
1
2020-10-12T17:41:40.000Z
2020-10-12T17:41:40.000Z
examples/test_network.py
jdk514/gwu_nn
3f5e9937abf2bfb81a74a2d6f3653a661e705f67
[ "MIT" ]
5
2020-11-12T21:13:35.000Z
2021-11-30T22:15:28.000Z
import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from gwu_nn.gwu_network import GWUNetwork from gwu_nn.layers import Dense from gwu_nn.activation_layers import Sigmoid np.random.seed(8) num_obs = 8000 # Create our features to draw from two distinct 2D normal distributions x1 = np.random.multivariate_normal([0, 0], [[1, .75],[.75, 1]], num_obs) x2 = np.random.multivariate_normal([3, 8], [[1, .25],[.25, 1]], num_obs) # Stack our inputs into one feature space X = np.vstack((x1, x2)) print(X.shape) y = np.hstack((np.zeros(num_obs), np.ones(num_obs))) print(y.shape) # colors = ['red'] * num_obs + ['blue'] * num_obs # plt.figure(figsize=(12,8)) # plt.scatter(X[:, 0], X[:, 1], c = colors, alpha = 0.5) # Lets randomly split things into training and testing sets so we don't cheat X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42) # Create our model network = GWUNetwork() network.add(Dense(2, 1, True, 'sigmoid')) network.add(Sigmoid()) #network.set_loss('mse') network.compile('log_loss', 0.001) network.fit(X_train, y_train, epochs=100) from scipy.special import logit colors = ['red'] * num_obs + ['blue'] * num_obs plt.figure(figsize=(12, 8)) plt.scatter(X[:, 0], X[:, 1], c=colors, alpha=0.5) # Range of our X values start_x1 = -5 end_x1 = 7 weights = network.layers[0].weights.reshape(-1).tolist() bias = network.layers[0].bias[0][0] start_y = (bias + start_x1 * weights[0] - logit(0.5)) / - weights[1] end_y = (bias + end_x1 * weights[0] - logit(0.5)) / -weights[1] plt.plot([start_x1, end_x1], [start_y, end_y], color='grey')
30.392857
91
0.675676
290
1,702
3.824138
0.393103
0.048693
0.024346
0.046889
0.174932
0.174932
0.174932
0.174932
0.129847
0.129847
0
0.052558
0.161575
1,702
56
92
30.392857
0.724597
0.221504
0
0
0
0
0.020619
0
0
0
0
0
0
1
0
false
0
0.225806
0
0.225806
0.064516
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07806fd3652fda3f2cbc32b699b1c68679d17f6c
6,227
py
Python
scattering/van_hove.py
XiaoboLinlin/scattering
0173b63f3243bdbcccfa562dbf5e3714920cded2
[ "MIT" ]
null
null
null
scattering/van_hove.py
XiaoboLinlin/scattering
0173b63f3243bdbcccfa562dbf5e3714920cded2
[ "MIT" ]
null
null
null
scattering/van_hove.py
XiaoboLinlin/scattering
0173b63f3243bdbcccfa562dbf5e3714920cded2
[ "MIT" ]
null
null
null
import itertools as it import numpy as np import mdtraj as md from progressbar import ProgressBar from scattering.utils.utils import get_dt from scattering.utils.constants import get_form_factor def compute_van_hove(trj, chunk_length, water=False, r_range=(0, 1.0), bin_width=0.005, n_bins=None, self_correlation=True, periodic=True, opt=True, partial=False): """Compute the partial van Hove function of a trajectory Parameters ---------- trj : mdtraj.Trajectory trajectory on which to compute the Van Hove function chunk_length : int length of time between restarting averaging water : bool use X-ray form factors for water that account for polarization r_range : array-like, shape=(2,), optional, default=(0.0, 1.0) Minimum and maximum radii. bin_width : float, optional, default=0.005 Width of the bins in nanometers. n_bins : int, optional, default=None The number of bins. If specified, this will override the `bin_width` parameter. self_correlation : bool, default=True Whether or not to include the self-self correlations Returns ------- r : numpy.ndarray r positions generated by histogram binning g_r_t : numpy.ndarray Van Hove function at each time and position """ n_physical_atoms = len([a for a in trj.top.atoms if a.element.mass > 0]) unique_elements = list(set([a.element for a in trj.top.atoms if a.element.mass > 0])) partial_dict = dict() for elem1, elem2 in it.combinations_with_replacement(unique_elements[::-1], 2): print('doing {0} and {1} ...'.format(elem1, elem2)) r, g_r_t_partial = compute_partial_van_hove(trj=trj, chunk_length=chunk_length, selection1='element {}'.format(elem1.symbol), selection2='element {}'.format(elem2.symbol), r_range=r_range, bin_width=bin_width, n_bins=n_bins, self_correlation=self_correlation, periodic=periodic, opt=opt) partial_dict[(elem1, elem2)] = g_r_t_partial if partial: return partial_dict norm = 0 g_r_t = None for key, val in partial_dict.items(): elem1, elem2 = key concentration1 = trj.atom_slice(trj.top.select('element {}'.format(elem1.symbol))).n_atoms / n_physical_atoms concentration2 = trj.atom_slice(trj.top.select('element {}'.format(elem2.symbol))).n_atoms / n_physical_atoms form_factor1 = get_form_factor(element_name=elem1.symbol, water=water) form_factor2 = get_form_factor(element_name=elem2.symbol, water=water) coeff = form_factor1 * concentration1 * form_factor2 * concentration2 if g_r_t is None: g_r_t = np.zeros_like(val) g_r_t += val * coeff norm += coeff # Reshape g_r_t to better represent the discretization in both r and t g_r_t_final = np.empty(shape=(chunk_length, len(r))) for i in range(chunk_length): g_r_t_final[i, :] = np.mean(g_r_t[i::chunk_length], axis=0) g_r_t_final /= norm t = trj.time[:chunk_length] return r, t, g_r_t_final def compute_partial_van_hove(trj, chunk_length=10, selection1=None, selection2=None, r_range=(0, 1.0), bin_width=0.005, n_bins=200, self_correlation=True, periodic=True, opt=True): """Compute the partial van Hove function of a trajectory Parameters ---------- trj : mdtraj.Trajectory trajectory on which to compute the Van Hove function chunk_length : int length of time between restarting averaging selection1 : str selection to be considered, in the style of MDTraj atom selection selection2 : str selection to be considered, in the style of MDTraj atom selection r_range : array-like, shape=(2,), optional, default=(0.0, 1.0) Minimum and maximum radii. bin_width : float, optional, default=0.005 Width of the bins in nanometers. n_bins : int, optional, default=None The number of bins. If specified, this will override the `bin_width` parameter. self_correlation : bool, default=True Whether or not to include the self-self correlations Returns ------- r : numpy.ndarray r positions generated by histogram binning g_r_t : numpy.ndarray Van Hove function at each time and position """ unique_elements = ( set([a.element for a in trj.atom_slice(trj.top.select(selection1)).top.atoms]), set([a.element for a in trj.atom_slice(trj.top.select(selection2)).top.atoms]), ) if any([len(val) > 1 for val in unique_elements]): raise UserWarning( 'Multiple elements found in a selection(s). Results may not be ' 'direcitly comprable to scattering experiments.' ) # Don't need to store it, but this serves to check that dt is constant dt = get_dt(trj) pairs = trj.top.select_pairs(selection1=selection1, selection2=selection2) n_chunks = int(trj.n_frames / chunk_length) g_r_t = None pbar = ProgressBar() for i in pbar(range(n_chunks)): times = list() for j in range(chunk_length): times.append([chunk_length*i, chunk_length*i+j]) r, g_r_t_frame = md.compute_rdf_t( traj=trj, pairs=pairs, times=times, r_range=r_range, bin_width=bin_width, n_bins=n_bins, period_length=chunk_length, self_correlation=self_correlation, periodic=periodic, opt=opt, ) if g_r_t is None: g_r_t = np.zeros_like(g_r_t_frame) g_r_t += g_r_t_frame return r, g_r_t
36.629412
117
0.604464
831
6,227
4.34296
0.223827
0.012746
0.018288
0.009975
0.569964
0.520089
0.50568
0.481574
0.432253
0.432253
0
0.018648
0.311065
6,227
169
118
36.846154
0.822611
0.310904
0
0.168675
0
0
0.041452
0
0
0
0
0
0
1
0.024096
false
0
0.072289
0
0.13253
0.012048
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
078077ca30c799c1dd1930850adac76494c46916
820
py
Python
Section1_Basics/contours.py
NeeharikaDva/opencv_course
234515ab59a1228c8dfd3c69f310dbc1d86c6089
[ "MIT" ]
null
null
null
Section1_Basics/contours.py
NeeharikaDva/opencv_course
234515ab59a1228c8dfd3c69f310dbc1d86c6089
[ "MIT" ]
null
null
null
Section1_Basics/contours.py
NeeharikaDva/opencv_course
234515ab59a1228c8dfd3c69f310dbc1d86c6089
[ "MIT" ]
null
null
null
#pylint:disable=no-member import cv2 as cv import numpy as np img = cv.imread('/Users/webileapp/Desktop/niharika_files/projects/opencv_course_master/Resources/Photos/cats.jpg') cv.imshow('Cats', img) # blank = np.zeros(img.shape[:2], dtype='uint8') cv.imshow('Blank', blank) gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY) cv.imshow('Gray', gray) # blur = cv.GaussianBlur(gray, (5,5), cv.BORDER_DEFAULT) cv.imshow('Blur', blur) canny = cv.Canny(blur, 125, 175) cv.imshow('Canny Edges', canny) # ret, thresh = cv.threshold(gray, 125, 255, cv.THRESH_BINARY) cv.imshow('Thresh', thresh) # contours, hierarchies = cv.findContours(canny, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) print(f'{len(contours)} contour(s) found!') # cv.drawContours(blank, contours, -1, (200,120,100), 1) cv.imshow('Contours Drawn', blank) cv.waitKey(0)
27.333333
114
0.729268
129
820
4.565891
0.55814
0.095076
0
0
0
0
0
0
0
0
0
0.040377
0.093902
820
30
115
27.333333
0.752355
0.029268
0
0
0
0
0.228824
0.120101
0
0
0
0
0
1
0
false
0
0.105263
0
0.105263
0.052632
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07827562174365b2eae2774f05f5a4b7e0a35d24
1,617
py
Python
install-hooks.py
JustasGau/DonjinKrawler
faff50dcfcebf82028c9af10434359f975247d33
[ "MIT" ]
null
null
null
install-hooks.py
JustasGau/DonjinKrawler
faff50dcfcebf82028c9af10434359f975247d33
[ "MIT" ]
9
2020-10-11T13:55:12.000Z
2020-12-09T16:28:06.000Z
install-hooks.py
JustasGau/DonjinKrawler
faff50dcfcebf82028c9af10434359f975247d33
[ "MIT" ]
null
null
null
import sys from os import path import urllib; from urllib.request import urlretrieve from subprocess import call def install_hooks(directory): checkstyleUrl = 'https://github.com/checkstyle/checkstyle/releases/download/checkstyle-8.36.1/checkstyle-8.36.1-all.jar' preCommitUrl = 'https://gist.githubusercontent.com/EdotJ/d512826d5b4fd3e6cdc285b9236511b2/raw/43e5087ed173fd03aab640b0b3db22f11319c623/pre-commit' checkstyleName = checkstyleUrl.split('/')[len(checkstyleUrl.split('/')) - 1] basePath = path.abspath(directory) print("Downloading checkstyle to %s..." % basePath + "/.git/hooks/" + checkstyleName) urlretrieve(checkstyleUrl, basePath + "/.git/hooks/" + checkstyleName) print("Downloading pre-commit script to %s" % basePath + "/.git/hooks/pre-commit") urlretrieve(preCommitUrl, basePath + "/.git/hooks/pre-commit") with open(basePath + '/.git/config', 'a+') as gitConfig: if ("[checkstyle]" not in gitConfig.read()): print("Adding git configurations to .git/config") gitConfig.write("[checkstyle]\n") gitConfig.write("jar = %s\n" % (basePath + "/.git/hooks/" + checkstyleName)) gitConfig.write("checkfile = %s\n" % (basePath + "/checkstyle_config.xml")) print("Changing permissions for pre-commit. Has to run as root, enter password plz") call(["sudo", "chmod", "+x", (basePath + "/.git/hooks/pre-commit")]) if __name__ == "__main__": if (len(sys.argv) < 2): print("Enter a directory to install hooks") else: if (path.exists(sys.argv[1])): install_hooks(sys.argv[1])
52.16129
150
0.678417
188
1,617
5.776596
0.425532
0.070902
0.088398
0.082873
0.089319
0
0
0
0
0
0
0.043801
0.166976
1,617
30
151
53.9
0.762435
0
0
0
0
0.037037
0.406308
0.054422
0
0
0
0
0
1
0.037037
false
0.037037
0.148148
0
0.185185
0.185185
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
078375b28d8a3c360255f33574ef015988b175c4
3,962
py
Python
09_MicroServer_Cookies/micro_server.py
Rockfish/PythonCourse
1d650e49950d1987d052028139fcdfcb0bbfcc70
[ "MIT" ]
null
null
null
09_MicroServer_Cookies/micro_server.py
Rockfish/PythonCourse
1d650e49950d1987d052028139fcdfcb0bbfcc70
[ "MIT" ]
null
null
null
09_MicroServer_Cookies/micro_server.py
Rockfish/PythonCourse
1d650e49950d1987d052028139fcdfcb0bbfcc70
[ "MIT" ]
null
null
null
""" Micro webapp based on WebOb, Jinja2, WSGI with a simple router """ import os import hmac import hashlib import mimetypes from wsgiref.simple_server import WSGIServer, WSGIRequestHandler from webob import Request from webob import Response from jinja2 import Environment, FileSystemLoader class MicroServer(object): """Small web server.""" def __init__(self): """Initializes the class and configures the paths and the Jinja2 environment so it can find and render pages.""" if self.static_root is None: self.static_root = 'static' if self.templates_root is None: self.templates_root = 'templates' if self.routes is None: self.routes = {} # Set up the paths and environment for Jinja. This is how it finds the templates. self.template_path = os.path.join(os.path.dirname(__file__), self.templates_root) self.env = Environment(autoescape=True, loader=FileSystemLoader(self.template_path)) # Figure out what directory the server is running it as save the path. # The path will be used later to find the site's resources. self.current_dir = os.path.dirname(os.path.realpath(__file__)) def __call__(self, environ, start_response): """This method is called by the HTTPServer when there is a request to be handled.""" # Create the WebOb Request and Response objects for # used to read the request and write the response. self.request = Request(environ) self.response = Response() # Find a handler for the path if there is one. handler = self.routes.get(self.request.path_info) # If there is call it. If not call the static handler. if handler: handler() else: self.static() return self.response(environ, start_response) def static(self, resource=''): """Handles request for static pages. It is the default handler.""" # Build a file path using either the resource parameter or the path in the request. if resource: file_path = os.path.join(self.current_dir, self.static_root, resource) else: file_path = os.path.join(self.current_dir, self.static_root, self.request.path_info[1:]) print("File path:", file_path) # Try to open the file. If we can then guess its type and write its # content to the response object to send it to the client. # If we can't find the file then return an error to the client. try: file_type = mimetypes.guess_type(file_path)[0] self.response.content_type = file_type data = open(file_path, 'rb').read() self.response.body_file.write(data) except Exception as e: self.response.status = 404 self.response.write(str(e)) def render_template(self, template_name, template_values={}): """Renders Jinja2 templates into HTML""" # Find the template and render it to HTML # then write it to the response object to send it to the client. template = self.env.get_template(template_name) html = template.render(template_values) self.response.write(html) def get_signature(self, passphrase, *parts): """Creates a hash from strings based on a passphrase.""" cookiehash = hmac.new(passphrase.encode(), digestmod=hashlib.sha1) for part in parts: cookiehash.update(part.encode()) return cookiehash.hexdigest() def run(self, port): """Starts the HTTP server and tells it what port to listen on""" # Create the WSGI HTTP server. Set the port it should listen on. # And start the server. server = WSGIServer(('', 8000), WSGIRequestHandler) server.set_app(self) print("Serving on http://localhost:8000/ ...") server.serve_forever()
33.576271
100
0.649419
530
3,962
4.756604
0.324528
0.03332
0.022213
0.01666
0.06664
0.06664
0.06664
0.06664
0.06664
0.06664
0
0.006203
0.267542
3,962
117
101
33.863248
0.862509
0.336194
0
0.035714
0
0
0.025049
0
0
0
0
0
0
1
0.107143
false
0.035714
0.142857
0
0.303571
0.035714
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07851008cd92498c823e9a48b615278bda99bf7d
49,605
py
Python
astroplan/constraints.py
edose/astroplan
b3cf55340c50ccf69ec363889c1fe8ff2f93cada
[ "BSD-3-Clause" ]
160
2015-09-09T00:07:34.000Z
2022-03-15T22:22:49.000Z
astroplan/constraints.py
edose/astroplan
b3cf55340c50ccf69ec363889c1fe8ff2f93cada
[ "BSD-3-Clause" ]
414
2015-08-25T20:22:09.000Z
2022-03-31T13:01:10.000Z
astroplan/constraints.py
edose/astroplan
b3cf55340c50ccf69ec363889c1fe8ff2f93cada
[ "BSD-3-Clause" ]
90
2015-08-27T20:53:48.000Z
2022-01-25T06:11:16.000Z
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ Specify and constraints to determine which targets are observable for an observer. """ from __future__ import (absolute_import, division, print_function, unicode_literals) # Standard library from abc import ABCMeta, abstractmethod import datetime import time import warnings # Third-party from astropy.time import Time import astropy.units as u from astropy.coordinates import get_body, get_sun, get_moon, Galactic, SkyCoord from astropy import table import numpy as np from numpy.lib.stride_tricks import as_strided # Package from .moon import moon_illumination from .utils import time_grid_from_range from .target import get_skycoord __all__ = ["AltitudeConstraint", "AirmassConstraint", "AtNightConstraint", "is_observable", "is_always_observable", "time_grid_from_range", "GalacticLatitudeConstraint", "SunSeparationConstraint", "MoonSeparationConstraint", "MoonIlluminationConstraint", "LocalTimeConstraint", "PrimaryEclipseConstraint", "SecondaryEclipseConstraint", "Constraint", "TimeConstraint", "observability_table", "months_observable", "max_best_rescale", "min_best_rescale", "PhaseConstraint", "is_event_observable"] _current_year = time.localtime().tm_year # needed for backward compatibility _current_year_time_range = Time( # needed for backward compatibility [str(_current_year) + '-01-01', str(_current_year) + '-12-31'] ) def _make_cache_key(times, targets): """ Make a unique key to reference this combination of ``times`` and ``targets``. Often, we wish to store expensive calculations for a combination of ``targets`` and ``times`` in a cache on an ``observer``` object. This routine will provide an appropriate, hashable, key to store these calculations in a dictionary. Parameters ---------- times : `~astropy.time.Time` Array of times on which to test the constraint. targets : `~astropy.coordinates.SkyCoord` Target or list of targets. Returns ------- cache_key : tuple A hashable tuple for use as a cache key """ # make a tuple from times try: timekey = tuple(times.jd) + times.shape except BaseException: # must be scalar timekey = (times.jd,) # make hashable thing from targets coords try: if hasattr(targets, 'frame'): # treat as a SkyCoord object. Accessing the longitude # attribute of the frame data should be unique and is # quicker than accessing the ra attribute. targkey = tuple(targets.frame.data.lon.value.ravel()) + targets.shape else: # assume targets is a string. targkey = (targets,) except BaseException: targkey = (targets.frame.data.lon,) return timekey + targkey def _get_altaz(times, observer, targets, force_zero_pressure=False): """ Calculate alt/az for ``target`` at times linearly spaced between the two times in ``time_range`` with grid spacing ``time_resolution`` for ``observer``. Cache the result on the ``observer`` object. Parameters ---------- times : `~astropy.time.Time` Array of times on which to test the constraint. targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets. observer : `~astroplan.Observer` The observer who has constraints ``constraints``. force_zero_pressure : bool Forcefully use 0 pressure. Returns ------- altaz_dict : dict Dictionary containing two key-value pairs. (1) 'times' contains the times for the alt/az computations, (2) 'altaz' contains the corresponding alt/az coordinates at those times. """ if not hasattr(observer, '_altaz_cache'): observer._altaz_cache = {} # convert times, targets to tuple for hashing aakey = _make_cache_key(times, targets) if aakey not in observer._altaz_cache: try: if force_zero_pressure: observer_old_pressure = observer.pressure observer.pressure = 0 altaz = observer.altaz(times, targets, grid_times_targets=False) observer._altaz_cache[aakey] = dict(times=times, altaz=altaz) finally: if force_zero_pressure: observer.pressure = observer_old_pressure return observer._altaz_cache[aakey] def _get_moon_data(times, observer, force_zero_pressure=False): """ Calculate moon altitude az and illumination for an array of times for ``observer``. Cache the result on the ``observer`` object. Parameters ---------- times : `~astropy.time.Time` Array of times on which to test the constraint. observer : `~astroplan.Observer` The observer who has constraints ``constraints``. force_zero_pressure : bool Forcefully use 0 pressure. Returns ------- moon_dict : dict Dictionary containing three key-value pairs. (1) 'times' contains the times for the computations, (2) 'altaz' contains the corresponding alt/az coordinates at those times and (3) contains the moon illumination for those times. """ if not hasattr(observer, '_moon_cache'): observer._moon_cache = {} # convert times to tuple for hashing aakey = _make_cache_key(times, 'moon') if aakey not in observer._moon_cache: try: if force_zero_pressure: observer_old_pressure = observer.pressure observer.pressure = 0 altaz = observer.moon_altaz(times) illumination = np.array(moon_illumination(times)) observer._moon_cache[aakey] = dict(times=times, illum=illumination, altaz=altaz) finally: if force_zero_pressure: observer.pressure = observer_old_pressure return observer._moon_cache[aakey] def _get_meridian_transit_times(times, observer, targets): """ Calculate next meridian transit for an array of times for ``targets`` and ``observer``. Cache the result on the ``observer`` object. Parameters ---------- times : `~astropy.time.Time` Array of times on which to test the constraint observer : `~astroplan.Observer` The observer who has constraints ``constraints`` targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets Returns ------- time_dict : dict Dictionary containing a key-value pair. 'times' contains the meridian_transit times. """ if not hasattr(observer, '_meridian_transit_cache'): observer._meridian_transit_cache = {} # convert times to tuple for hashing aakey = _make_cache_key(times, targets) if aakey not in observer._meridian_transit_cache: meridian_transit_times = observer.target_meridian_transit_time(times, targets) observer._meridian_transit_cache[aakey] = dict(times=meridian_transit_times) return observer._meridian_transit_cache[aakey] @abstractmethod class Constraint(object): """ Abstract class for objects defining observational constraints. """ __metaclass__ = ABCMeta def __call__(self, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour, grid_times_targets=False): """ Compute the constraint for this class Parameters ---------- observer : `~astroplan.Observer` the observation location from which to apply the constraints targets : sequence of `~astroplan.Target` The targets on which to apply the constraints. times : `~astropy.time.Time` The times to compute the constraint. WHAT HAPPENS WHEN BOTH TIMES AND TIME_RANGE ARE SET? time_range : `~astropy.time.Time` (length = 2) Lower and upper bounds on time sequence. time_grid_resolution : `~astropy.units.quantity` Time-grid spacing grid_times_targets : bool if True, grids the constraint result with targets along the first index and times along the second. Otherwise, we rely on broadcasting the shapes together using standard numpy rules. Returns ------- constraint_result : 1D or 2D array of float or bool The constraints. If 2D with targets along the first index and times along the second. """ if times is None and time_range is not None: times = time_grid_from_range(time_range, time_resolution=time_grid_resolution) if grid_times_targets: targets = get_skycoord(targets) # TODO: these broadcasting operations are relatively slow # but there is potential for huge speedup if the end user # disables gridding and re-shapes the coords themselves # prior to evaluating multiple constraints. if targets.isscalar: # ensure we have a (1, 1) shape coord targets = SkyCoord(np.tile(targets, 1))[:, np.newaxis] else: targets = targets[..., np.newaxis] times, targets = observer._preprocess_inputs(times, targets, grid_times_targets=False) result = self.compute_constraint(times, observer, targets) # make sure the output has the same shape as would result from # broadcasting times and targets against each other if targets is not None: # broadcasting times v targets is slow due to # complex nature of these objects. We make # to simple numpy arrays of the same shape and # broadcast these to find the correct shape shp1, shp2 = times.shape, targets.shape x = np.array([1]) a = as_strided(x, shape=shp1, strides=[0] * len(shp1)) b = as_strided(x, shape=shp2, strides=[0] * len(shp2)) output_shape = np.broadcast(a, b).shape if output_shape != np.array(result).shape: result = np.broadcast_to(result, output_shape) return result @abstractmethod def compute_constraint(self, times, observer, targets): """ Actually do the real work of computing the constraint. Subclasses override this. Parameters ---------- times : `~astropy.time.Time` The times to compute the constraint observer : `~astroplan.Observer` the observaton location from which to apply the constraints targets : sequence of `~astroplan.Target` The targets on which to apply the constraints. Returns ------- constraint_result : 2D array of float or bool The constraints, with targets along the first index and times along the second. """ # Should be implemented on each subclass of Constraint raise NotImplementedError class AltitudeConstraint(Constraint): """ Constrain the altitude of the target. .. note:: This can misbehave if you try to constrain negative altitudes, as the `~astropy.coordinates.AltAz` frame tends to mishandle negative Parameters ---------- min : `~astropy.units.Quantity` or `None` Minimum altitude of the target (inclusive). `None` indicates no limit. max : `~astropy.units.Quantity` or `None` Maximum altitude of the target (inclusive). `None` indicates no limit. boolean_constraint : bool If True, the constraint is treated as a boolean (True for within the limits and False for outside). If False, the constraint returns a float on [0, 1], where 0 is the min altitude and 1 is the max. """ def __init__(self, min=None, max=None, boolean_constraint=True): if min is None: self.min = -90*u.deg else: self.min = min if max is None: self.max = 90*u.deg else: self.max = max self.boolean_constraint = boolean_constraint def compute_constraint(self, times, observer, targets): cached_altaz = _get_altaz(times, observer, targets) alt = cached_altaz['altaz'].alt if self.boolean_constraint: lowermask = self.min <= alt uppermask = alt <= self.max return lowermask & uppermask else: return max_best_rescale(alt, self.min, self.max) class AirmassConstraint(AltitudeConstraint): """ Constrain the airmass of a target. In the current implementation the airmass is approximated by the secant of the zenith angle. .. note:: The ``max`` and ``min`` arguments appear in the order (max, min) in this initializer to support the common case for users who care about the upper limit on the airmass (``max``) and not the lower limit. Parameters ---------- max : float or `None` Maximum airmass of the target. `None` indicates no limit. min : float or `None` Minimum airmass of the target. `None` indicates no limit. boolean_contstraint : bool Examples -------- To create a constraint that requires the airmass be "better than 2", i.e. at a higher altitude than airmass=2:: AirmassConstraint(2) """ def __init__(self, max=None, min=1, boolean_constraint=True): self.min = min self.max = max self.boolean_constraint = boolean_constraint def compute_constraint(self, times, observer, targets): cached_altaz = _get_altaz(times, observer, targets) secz = cached_altaz['altaz'].secz.value if self.boolean_constraint: if self.min is None and self.max is not None: mask = secz <= self.max elif self.max is None and self.min is not None: mask = self.min <= secz elif self.min is not None and self.max is not None: mask = (self.min <= secz) & (secz <= self.max) else: raise ValueError("No max and/or min specified in " "AirmassConstraint.") return mask else: if self.max is None: raise ValueError("Cannot have a float AirmassConstraint if max is None.") else: mx = self.max mi = 1 if self.min is None else self.min # values below 1 should be disregarded return min_best_rescale(secz, mi, mx, less_than_min=0) class AtNightConstraint(Constraint): """ Constrain the Sun to be below ``horizon``. """ @u.quantity_input(horizon=u.deg) def __init__(self, max_solar_altitude=0*u.deg, force_pressure_zero=True): """ Parameters ---------- max_solar_altitude : `~astropy.units.Quantity` The altitude of the sun below which it is considered to be "night" (inclusive). force_pressure_zero : bool (optional) Force the pressure to zero for solar altitude calculations. This avoids errors in the altitude of the Sun that can occur when the Sun is below the horizon and the corrections for atmospheric refraction return nonsense values. """ self.max_solar_altitude = max_solar_altitude self.force_pressure_zero = force_pressure_zero @classmethod def twilight_civil(cls, **kwargs): """ Consider nighttime as time between civil twilights (-6 degrees). """ return cls(max_solar_altitude=-6*u.deg, **kwargs) @classmethod def twilight_nautical(cls, **kwargs): """ Consider nighttime as time between nautical twilights (-12 degrees). """ return cls(max_solar_altitude=-12*u.deg, **kwargs) @classmethod def twilight_astronomical(cls, **kwargs): """ Consider nighttime as time between astronomical twilights (-18 degrees). """ return cls(max_solar_altitude=-18*u.deg, **kwargs) def _get_solar_altitudes(self, times, observer, targets): if not hasattr(observer, '_altaz_cache'): observer._altaz_cache = {} aakey = _make_cache_key(times, 'sun') if aakey not in observer._altaz_cache: try: if self.force_pressure_zero: observer_old_pressure = observer.pressure observer.pressure = 0 # find solar altitude at these times altaz = observer.altaz(times, get_sun(times)) altitude = altaz.alt # cache the altitude observer._altaz_cache[aakey] = dict(times=times, altitude=altitude) finally: if self.force_pressure_zero: observer.pressure = observer_old_pressure else: altitude = observer._altaz_cache[aakey]['altitude'] return altitude def compute_constraint(self, times, observer, targets): solar_altitude = self._get_solar_altitudes(times, observer, targets) mask = solar_altitude <= self.max_solar_altitude return mask class GalacticLatitudeConstraint(Constraint): """ Constrain the distance between the Galactic plane and some targets. """ def __init__(self, min=None, max=None): """ Parameters ---------- min : `~astropy.units.Quantity` or `None` (optional) Minimum acceptable Galactic latitude of target (inclusive). `None` indicates no limit. max : `~astropy.units.Quantity` or `None` (optional) Minimum acceptable Galactic latitude of target (inclusive). `None` indicates no limit. """ self.min = min self.max = max def compute_constraint(self, times, observer, targets): separation = abs(targets.transform_to(Galactic).b) if self.min is None and self.max is not None: mask = self.max >= separation elif self.max is None and self.min is not None: mask = self.min <= separation elif self.min is not None and self.max is not None: mask = ((self.min <= separation) & (separation <= self.max)) else: raise ValueError("No max and/or min specified in " "GalacticLatitudeConstraint.") return mask class SunSeparationConstraint(Constraint): """ Constrain the distance between the Sun and some targets. """ def __init__(self, min=None, max=None): """ Parameters ---------- min : `~astropy.units.Quantity` or `None` (optional) Minimum acceptable separation between Sun and target (inclusive). `None` indicates no limit. max : `~astropy.units.Quantity` or `None` (optional) Maximum acceptable separation between Sun and target (inclusive). `None` indicates no limit. """ self.min = min self.max = max def compute_constraint(self, times, observer, targets): # use get_body rather than get sun here, since # it returns the Sun's coordinates in an observer # centred frame, so the separation is as-seen # by the observer. # 'get_sun' returns ICRS coords. sun = get_body('sun', times, location=observer.location) solar_separation = sun.separation(targets) if self.min is None and self.max is not None: mask = self.max >= solar_separation elif self.max is None and self.min is not None: mask = self.min <= solar_separation elif self.min is not None and self.max is not None: mask = ((self.min <= solar_separation) & (solar_separation <= self.max)) else: raise ValueError("No max and/or min specified in " "SunSeparationConstraint.") return mask class MoonSeparationConstraint(Constraint): """ Constrain the distance between the Earth's moon and some targets. """ def __init__(self, min=None, max=None, ephemeris=None): """ Parameters ---------- min : `~astropy.units.Quantity` or `None` (optional) Minimum acceptable separation between moon and target (inclusive). `None` indicates no limit. max : `~astropy.units.Quantity` or `None` (optional) Maximum acceptable separation between moon and target (inclusive). `None` indicates no limit. ephemeris : str, optional Ephemeris to use. If not given, use the one set with ``astropy.coordinates.solar_system_ephemeris.set`` (which is set to 'builtin' by default). """ self.min = min self.max = max self.ephemeris = ephemeris def compute_constraint(self, times, observer, targets): # removed the location argument here, which causes small <1 deg # innacuracies, but it is needed until astropy PR #5897 is released # which should be astropy 1.3.2 moon = get_moon(times, ephemeris=self.ephemeris) # note to future editors - the order matters here # moon.separation(targets) is NOT the same as targets.separation(moon) # the former calculates the separation in the frame of the moon coord # which is GCRS, and that is what we want. moon_separation = moon.separation(targets) if self.min is None and self.max is not None: mask = self.max >= moon_separation elif self.max is None and self.min is not None: mask = self.min <= moon_separation elif self.min is not None and self.max is not None: mask = ((self.min <= moon_separation) & (moon_separation <= self.max)) else: raise ValueError("No max and/or min specified in " "MoonSeparationConstraint.") return mask class MoonIlluminationConstraint(Constraint): """ Constrain the fractional illumination of the Earth's moon. Constraint is also satisfied if the Moon has set. """ def __init__(self, min=None, max=None, ephemeris=None): """ Parameters ---------- min : float or `None` (optional) Minimum acceptable fractional illumination (inclusive). `None` indicates no limit. max : float or `None` (optional) Maximum acceptable fractional illumination (inclusive). `None` indicates no limit. ephemeris : str, optional Ephemeris to use. If not given, use the one set with `~astropy.coordinates.solar_system_ephemeris` (which is set to 'builtin' by default). """ self.min = min self.max = max self.ephemeris = ephemeris @classmethod def dark(cls, min=None, max=0.25, **kwargs): """ initialize a `~astroplan.constraints.MoonIlluminationConstraint` with defaults of no minimum and a maximum of 0.25 Parameters ---------- min : float or `None` (optional) Minimum acceptable fractional illumination (inclusive). `None` indicates no limit. max : float or `None` (optional) Maximum acceptable fractional illumination (inclusive). `None` indicates no limit. """ return cls(min, max, **kwargs) @classmethod def grey(cls, min=0.25, max=0.65, **kwargs): """ initialize a `~astroplan.constraints.MoonIlluminationConstraint` with defaults of a minimum of 0.25 and a maximum of 0.65 Parameters ---------- min : float or `None` (optional) Minimum acceptable fractional illumination (inclusive). `None` indicates no limit. max : float or `None` (optional) Maximum acceptable fractional illumination (inclusive). `None` indicates no limit. """ return cls(min, max, **kwargs) @classmethod def bright(cls, min=0.65, max=None, **kwargs): """ initialize a `~astroplan.constraints.MoonIlluminationConstraint` with defaults of a minimum of 0.65 and no maximum Parameters ---------- min : float or `None` (optional) Minimum acceptable fractional illumination (inclusive). `None` indicates no limit. max : float or `None` (optional) Maximum acceptable fractional illumination (inclusive). `None` indicates no limit. """ return cls(min, max, **kwargs) def compute_constraint(self, times, observer, targets): # first is the moon up? cached_moon = _get_moon_data(times, observer) moon_alt = cached_moon['altaz'].alt moon_down_mask = moon_alt < 0 moon_up_mask = moon_alt >= 0 illumination = cached_moon['illum'] if self.min is None and self.max is not None: mask = (self.max >= illumination) | moon_down_mask elif self.max is None and self.min is not None: mask = (self.min <= illumination) & moon_up_mask elif self.min is not None and self.max is not None: mask = ((self.min <= illumination) & (illumination <= self.max)) & moon_up_mask else: raise ValueError("No max and/or min specified in " "MoonSeparationConstraint.") return mask class LocalTimeConstraint(Constraint): """ Constrain the observable hours. """ def __init__(self, min=None, max=None): """ Parameters ---------- min : `~datetime.time` Earliest local time (inclusive). `None` indicates no limit. max : `~datetime.time` Latest local time (inclusive). `None` indicates no limit. Examples -------- Constrain the observations to targets that are observable between 23:50 and 04:08 local time: >>> from astroplan import Observer >>> from astroplan.constraints import LocalTimeConstraint >>> import datetime as dt >>> subaru = Observer.at_site("Subaru", timezone="US/Hawaii") >>> # bound times between 23:50 and 04:08 local Hawaiian time >>> constraint = LocalTimeConstraint(min=dt.time(23,50), max=dt.time(4,8)) """ self.min = min self.max = max if self.min is None and self.max is None: raise ValueError("You must at least supply either a minimum or a maximum time.") if self.min is not None: if not isinstance(self.min, datetime.time): raise TypeError("Time limits must be specified as datetime.time objects.") if self.max is not None: if not isinstance(self.max, datetime.time): raise TypeError("Time limits must be specified as datetime.time objects.") def compute_constraint(self, times, observer, targets): timezone = None # get timezone from time objects, or from observer if self.min is not None: timezone = self.min.tzinfo elif self.max is not None: timezone = self.max.tzinfo if timezone is None: timezone = observer.timezone if self.min is not None: min_time = self.min else: min_time = self.min = datetime.time(0, 0, 0) if self.max is not None: max_time = self.max else: max_time = datetime.time(23, 59, 59) # If time limits occur on same day: if min_time < max_time: try: mask = np.array([min_time <= t.time() <= max_time for t in times.datetime]) except BaseException: # use np.bool so shape queries don't cause problems mask = np.bool_(min_time <= times.datetime.time() <= max_time) # If time boundaries straddle midnight: else: try: mask = np.array([(t.time() >= min_time) or (t.time() <= max_time) for t in times.datetime]) except BaseException: mask = np.bool_((times.datetime.time() >= min_time) or (times.datetime.time() <= max_time)) return mask class TimeConstraint(Constraint): """Constrain the observing time to be within certain time limits. An example use case for this class would be to associate an acceptable time range with a specific observing block. This can be useful if not all observing blocks are valid over the time limits used in calls to `is_observable` or `is_always_observable`. """ def __init__(self, min=None, max=None): """ Parameters ---------- min : `~astropy.time.Time` Earliest time (inclusive). `None` indicates no limit. max : `~astropy.time.Time` Latest time (inclusive). `None` indicates no limit. Examples -------- Constrain the observations to targets that are observable between 2016-03-28 and 2016-03-30: >>> from astroplan import Observer >>> from astropy.time import Time >>> subaru = Observer.at_site("Subaru") >>> t1 = Time("2016-03-28T12:00:00") >>> t2 = Time("2016-03-30T12:00:00") >>> constraint = TimeConstraint(t1,t2) """ self.min = min self.max = max if self.min is None and self.max is None: raise ValueError("You must at least supply either a minimum or a " "maximum time.") if self.min is not None: if not isinstance(self.min, Time): raise TypeError("Time limits must be specified as " "astropy.time.Time objects.") if self.max is not None: if not isinstance(self.max, Time): raise TypeError("Time limits must be specified as " "astropy.time.Time objects.") def compute_constraint(self, times, observer, targets): with warnings.catch_warnings(): warnings.simplefilter('ignore') min_time = Time("1950-01-01T00:00:00") if self.min is None else self.min max_time = Time("2120-01-01T00:00:00") if self.max is None else self.max mask = np.logical_and(times > min_time, times < max_time) return mask class PrimaryEclipseConstraint(Constraint): """ Constrain observations to times during primary eclipse. """ def __init__(self, eclipsing_system): """ Parameters ---------- eclipsing_system : `~astroplan.periodic.EclipsingSystem` System which must be in primary eclipse. """ self.eclipsing_system = eclipsing_system def compute_constraint(self, times, observer=None, targets=None): mask = self.eclipsing_system.in_primary_eclipse(times) return mask class SecondaryEclipseConstraint(Constraint): """ Constrain observations to times during secondary eclipse. """ def __init__(self, eclipsing_system): """ Parameters ---------- eclipsing_system : `~astroplan.periodic.EclipsingSystem` System which must be in secondary eclipse. """ self.eclipsing_system = eclipsing_system def compute_constraint(self, times, observer=None, targets=None): mask = self.eclipsing_system.in_secondary_eclipse(times) return mask class PhaseConstraint(Constraint): """ Constrain observations to times in some range of phases for a periodic event (e.g.~transiting exoplanets, eclipsing binaries). """ def __init__(self, periodic_event, min=None, max=None): """ Parameters ---------- periodic_event : `~astroplan.periodic.PeriodicEvent` or subclass System on which to compute the phase. For example, the system could be an eclipsing or non-eclipsing binary, or exoplanet system. min : float (optional) Minimum phase (inclusive) on interval [0, 1). Default is zero. max : float (optional) Maximum phase (inclusive) on interval [0, 1). Default is one. Examples -------- To constrain observations on orbital phases between 0.4 and 0.6, >>> from astroplan import PeriodicEvent >>> from astropy.time import Time >>> import astropy.units as u >>> binary = PeriodicEvent(epoch=Time('2017-01-01 02:00'), period=1*u.day) >>> constraint = PhaseConstraint(binary, min=0.4, max=0.6) The minimum and maximum phase must be described on the interval [0, 1). To constrain observations on orbital phases between 0.6 and 1.2, for example, you should subtract one from the second number: >>> constraint = PhaseConstraint(binary, min=0.6, max=0.2) """ self.periodic_event = periodic_event if (min < 0) or (min > 1) or (max < 0) or (max > 1): raise ValueError('The minimum of the PhaseConstraint must be within' ' the interval [0, 1).') self.min = min if min is not None else 0.0 self.max = max if max is not None else 1.0 def compute_constraint(self, times, observer=None, targets=None): phase = self.periodic_event.phase(times) mask = np.where(self.max > self.min, (phase >= self.min) & (phase <= self.max), (phase >= self.min) | (phase <= self.max)) return mask def is_always_observable(constraints, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour): """ A function to determine whether ``targets`` are always observable throughout ``time_range`` given constraints in the ``constraints_list`` for a particular ``observer``. Parameters ---------- constraints : list or `~astroplan.constraints.Constraint` Observational constraint(s) observer : `~astroplan.Observer` The observer who has constraints ``constraints`` targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets times : `~astropy.time.Time` (optional) Array of times on which to test the constraint time_range : `~astropy.time.Time` (optional) Lower and upper bounds on time sequence, with spacing ``time_resolution``. This will be passed as the first argument into `~astroplan.time_grid_from_range`. time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range`` is specified, determine whether constraints are met between test times in ``time_range`` by checking constraint at linearly-spaced times separated by ``time_resolution``. Default is 0.5 hours. Returns ------- ever_observable : list List of booleans of same length as ``targets`` for whether or not each target is observable in the time range given the constraints. """ if not hasattr(constraints, '__len__'): constraints = [constraints] applied_constraints = [constraint(observer, targets, times=times, time_range=time_range, time_grid_resolution=time_grid_resolution, grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) return np.all(constraint_arr, axis=1) def is_observable(constraints, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour): """ Determines if the ``targets`` are observable during ``time_range`` given constraints in ``constraints_list`` for a particular ``observer``. Parameters ---------- constraints : list or `~astroplan.constraints.Constraint` Observational constraint(s) observer : `~astroplan.Observer` The observer who has constraints ``constraints`` targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets times : `~astropy.time.Time` (optional) Array of times on which to test the constraint time_range : `~astropy.time.Time` (optional) Lower and upper bounds on time sequence, with spacing ``time_resolution``. This will be passed as the first argument into `~astroplan.time_grid_from_range`. time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range`` is specified, determine whether constraints are met between test times in ``time_range`` by checking constraint at linearly-spaced times separated by ``time_resolution``. Default is 0.5 hours. Returns ------- ever_observable : list List of booleans of same length as ``targets`` for whether or not each target is ever observable in the time range given the constraints. """ if not hasattr(constraints, '__len__'): constraints = [constraints] applied_constraints = [constraint(observer, targets, times=times, time_range=time_range, time_grid_resolution=time_grid_resolution, grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) return np.any(constraint_arr, axis=1) def is_event_observable(constraints, observer, target, times=None, times_ingress_egress=None): """ Determines if the ``target`` is observable at each time in ``times``, given constraints in ``constraints`` for a particular ``observer``. Parameters ---------- constraints : list or `~astroplan.constraints.Constraint` Observational constraint(s) observer : `~astroplan.Observer` The observer who has constraints ``constraints`` target : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target times : `~astropy.time.Time` (optional) Array of mid-event times on which to test the constraints times_ingress_egress : `~astropy.time.Time` (optional) Array of ingress and egress times for ``N`` events, with shape (``N``, 2). Returns ------- event_observable : `~numpy.ndarray` Array of booleans of same length as ``times`` for whether or not the target is ever observable at each time, given the constraints. """ if not hasattr(constraints, '__len__'): constraints = [constraints] if times is not None: applied_constraints = [constraint(observer, target, times=times, grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) else: times_ing = times_ingress_egress[:, 0] times_egr = times_ingress_egress[:, 1] applied_constraints_ing = [constraint(observer, target, times=times_ing, grid_times_targets=True) for constraint in constraints] applied_constraints_egr = [constraint(observer, target, times=times_egr, grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and(np.logical_and.reduce(applied_constraints_ing), np.logical_and.reduce(applied_constraints_egr)) return constraint_arr def months_observable(constraints, observer, targets, time_range=_current_year_time_range, time_grid_resolution=0.5*u.hour): """ Determines which month the specified ``targets`` are observable for a specific ``observer``, given the supplied ``constraints``. Parameters ---------- constraints : list or `~astroplan.constraints.Constraint` Observational constraint(s) observer : `~astroplan.Observer` The observer who has constraints ``constraints`` targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets time_range : `~astropy.time.Time` (optional) Lower and upper bounds on time sequence If ``time_range`` is not specified, defaults to current year (localtime) time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range`` is specified, determine whether constraints are met between test times in ``time_range`` by checking constraint at linearly-spaced times separated by ``time_resolution``. Default is 0.5 hours. Returns ------- observable_months : list List of sets of unique integers representing each month that a target is observable, one set per target. These integers are 1-based so that January maps to 1, February maps to 2, etc. """ # TODO: This method could be sped up a lot by dropping to the trigonometric # altitude calculations. if not hasattr(constraints, '__len__'): constraints = [constraints] times = time_grid_from_range(time_range, time_grid_resolution) # TODO: This method could be sped up a lot by dropping to the trigonometric # altitude calculations. applied_constraints = [constraint(observer, targets, times=times, grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) months_observable = [] for target, observable in zip(targets, constraint_arr): s = set([t.datetime.month for t in times[observable]]) months_observable.append(s) return months_observable def observability_table(constraints, observer, targets, times=None, time_range=None, time_grid_resolution=0.5*u.hour): """ Creates a table with information about observability for all the ``targets`` over the requested ``time_range``, given the constraints in ``constraints_list`` for ``observer``. Parameters ---------- constraints : list or `~astroplan.constraints.Constraint` Observational constraint(s) observer : `~astroplan.Observer` The observer who has constraints ``constraints`` targets : {list, `~astropy.coordinates.SkyCoord`, `~astroplan.FixedTarget`} Target or list of targets times : `~astropy.time.Time` (optional) Array of times on which to test the constraint time_range : `~astropy.time.Time` (optional) Lower and upper bounds on time sequence, with spacing ``time_resolution``. This will be passed as the first argument into `~astroplan.time_grid_from_range`. If a single (scalar) time, the table will be for a 24 hour period centered on that time. time_grid_resolution : `~astropy.units.Quantity` (optional) If ``time_range`` is specified, determine whether constraints are met between test times in ``time_range`` by checking constraint at linearly-spaced times separated by ``time_resolution``. Default is 0.5 hours. Returns ------- observability_table : `~astropy.table.Table` A Table containing the observability information for each of the ``targets``. The table contains four columns with information about the target and it's observability: ``'target name'``, ``'ever observable'``, ``'always observable'``, and ``'fraction of time observable'``. The column ``'time observable'`` will also be present if the ``time_range`` is given as a scalar. It also contains metadata entries ``'times'`` (with an array of all the times), ``'observer'`` (the `~astroplan.Observer` object), and ``'constraints'`` (containing the supplied ``constraints``). """ if not hasattr(constraints, '__len__'): constraints = [constraints] is_24hr_table = False if hasattr(time_range, 'isscalar') and time_range.isscalar: time_range = (time_range-12*u.hour, time_range+12*u.hour) is_24hr_table = True applied_constraints = [constraint(observer, targets, times=times, time_range=time_range, time_grid_resolution=time_grid_resolution, grid_times_targets=True) for constraint in constraints] constraint_arr = np.logical_and.reduce(applied_constraints) colnames = ['target name', 'ever observable', 'always observable', 'fraction of time observable'] target_names = [target.name for target in targets] ever_obs = np.any(constraint_arr, axis=1) always_obs = np.all(constraint_arr, axis=1) frac_obs = np.sum(constraint_arr, axis=1) / constraint_arr.shape[1] tab = table.Table(names=colnames, data=[target_names, ever_obs, always_obs, frac_obs]) if times is None and time_range is not None: times = time_grid_from_range(time_range, time_resolution=time_grid_resolution) if is_24hr_table: tab['time observable'] = tab['fraction of time observable'] * 24*u.hour tab.meta['times'] = times.datetime tab.meta['observer'] = observer tab.meta['constraints'] = constraints return tab def min_best_rescale(vals, min_val, max_val, less_than_min=1): """ rescales an input array ``vals`` to be a score (between zero and one), where the ``min_val`` goes to one, and the ``max_val`` goes to zero. Parameters ---------- vals : array-like the values that need to be rescaled to be between 0 and 1 min_val : float worst acceptable value (rescales to 0) max_val : float best value cared about (rescales to 1) less_than_min : 0 or 1 what is returned for ``vals`` below ``min_val``. (in some cases anything less than ``min_val`` should also return one, in some cases it should return zero) Returns ------- array of floats between 0 and 1 inclusive rescaled so that ``vals`` equal to ``max_val`` equal 0 and those equal to ``min_val`` equal 1 Examples -------- rescale airmasses to between 0 and 1, with the best (1) and worst (2.25). All values outside the range should return 0. >>> from astroplan.constraints import min_best_rescale >>> import numpy as np >>> airmasses = np.array([1, 1.5, 2, 3, 0]) >>> min_best_rescale(airmasses, 1, 2.25, less_than_min = 0) # doctest: +FLOAT_CMP array([ 1. , 0.6, 0.2, 0. , 0. ]) """ rescaled = (vals - max_val) / (min_val - max_val) below = vals < min_val above = vals > max_val rescaled[below] = less_than_min rescaled[above] = 0 return rescaled def max_best_rescale(vals, min_val, max_val, greater_than_max=1): """ rescales an input array ``vals`` to be a score (between zero and one), where the ``max_val`` goes to one, and the ``min_val`` goes to zero. Parameters ---------- vals : array-like the values that need to be rescaled to be between 0 and 1 min_val : float worst acceptable value (rescales to 0) max_val : float best value cared about (rescales to 1) greater_than_max : 0 or 1 what is returned for ``vals`` above ``max_val``. (in some cases anything higher than ``max_val`` should also return one, in some cases it should return zero) Returns ------- array of floats between 0 and 1 inclusive rescaled so that ``vals`` equal to ``min_val`` equal 0 and those equal to ``max_val`` equal 1 Examples -------- rescale an array of altitudes to be between 0 and 1, with the best (60) going to 1 and worst (35) going to 0. For values outside the range, the rescale should return 0 below 35 and 1 above 60. >>> from astroplan.constraints import max_best_rescale >>> import numpy as np >>> altitudes = np.array([20, 30, 40, 45, 55, 70]) >>> max_best_rescale(altitudes, 35, 60) # doctest: +FLOAT_CMP array([ 0. , 0. , 0.2, 0.4, 0.8, 1. ]) """ rescaled = (vals - min_val) / (max_val - min_val) below = vals < min_val above = vals > max_val rescaled[below] = 0 rescaled[above] = greater_than_max return rescaled
37.353163
100
0.617579
5,884
49,605
5.090245
0.10826
0.01449
0.010217
0.014691
0.590865
0.544723
0.503222
0.470702
0.447898
0.432807
0
0.010835
0.292954
49,605
1,327
101
37.381311
0.843127
0.463885
0
0.458774
0
0
0.063508
0.011715
0
0
0
0.001507
0
1
0.093023
false
0
0.029598
0
0.221987
0.002114
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
078516b1582562801fbf63851c5ac10efbd5d833
6,191
py
Python
backend/views.py
Raulios/django-blog
ff25c8f21a3f6644e77a2ef5bb7bf7026770e0c2
[ "MIT" ]
null
null
null
backend/views.py
Raulios/django-blog
ff25c8f21a3f6644e77a2ef5bb7bf7026770e0c2
[ "MIT" ]
null
null
null
backend/views.py
Raulios/django-blog
ff25c8f21a3f6644e77a2ef5bb7bf7026770e0c2
[ "MIT" ]
null
null
null
from django.contrib import messages from django.contrib.auth.decorators import login_required from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage from django.core.urlresolvers import reverse from django.shortcuts import render from django.http import HttpResponseRedirect from core.models import Post, Category, Tag from backend.forms import PostForm, CategoryForm, TagForm # Create your views here. @login_required() def index(request): context = {} context['nav_active'] = 'index' return render(request, 'backend/index.html', context) @login_required() def posts(request): context = {} context['nav_active'] = 'posts' post_list = Post.objects.all() paginator = Paginator(list(reversed(post_list)), 10) page = request.GET.get('page') try: posts = paginator.page(page) except PageNotAnInteger: posts = paginator.page(1) except EmptyPage: posts = paginator.page(paginator.num_pages) context['posts'] = posts return render(request, 'backend/posts.html', context) @login_required() def add_post(request): context = {} context['nav_active'] = 'posts' form = PostForm() if request.method == 'POST': form = PostForm(request.POST, request.FILES) if form.is_valid(): form.save() messages.success(request, 'Post created.') return HttpResponseRedirect(reverse('user_panel_posts')) context['form'] = form return render(request, 'backend/edit_post.html', context) @login_required() def edit_post(request, post_id): context = {} context['nav_active'] = 'posts' post = Post.objects.get(pk=post_id) context['post'] = post form = PostForm(instance=post) if request.method == 'POST': form = PostForm(request.POST, request.FILES, instance=post) if form.is_valid(): form.save() messages.success(request, 'Post updated.') return HttpResponseRedirect(reverse('user_panel_posts')) context['form'] = form return render(request, 'backend/edit_post.html', context) @login_required() def delete_post(request, post_id): context = {} context['nav_active'] = 'posts' post = Post.objects.get(pk=post_id) post.delete() messages.success(request, 'Post deleted.') return HttpResponseRedirect(reverse('user_panel_posts')) @login_required() def categories(request): context = {} context['nav_active'] = 'categories' categories_list = Category.objects.all() paginator = Paginator(list(reversed(categories_list)), 10) page = request.GET.get('page') try: categories = paginator.page(page) except PageNotAnInteger: categories = paginator.page(1) except EmptyPage: categories = paginator.page(paginator.num_pages) context['categories'] = categories return render(request, 'backend/categories.html', context) @login_required() def add_category(request): context = {} context['nav_active'] = 'categories' form = CategoryForm() if request.method == 'POST': form = CategoryForm(request.POST, request.FILES) if form.is_valid(): form.save() messages.success(request, 'Category created.') return HttpResponseRedirect(reverse('user_panel_categories')) context['form'] = form return render(request, 'backend/edit_category.html', context) @login_required() def edit_category(request, category_id): context = {} context['nav_active'] = 'categories' category = Category.objects.get(pk=category_id) context['category'] = category form = CategoryForm(instance=category) if request.method == 'POST': form = CategoryForm(request.POST, request.FILES, instance=category) if form.is_valid(): form.save() messages.success(request, 'Category updated.') return HttpResponseRedirect(reverse('user_panel_categories')) context['form'] = form return render(request, 'backend/edit_category.html', context) @login_required() def delete_category(request, category_id): context = {} context['nav_active'] = 'categories' category = Category.objects.get(pk=category_id) category.delete() messages.success(request, 'Category deleted.') return HttpResponseRedirect(reverse('user_panel_categories')) @login_required() def tags(request): context = {} context['nav_active'] = 'tags' tags_list = Tag.objects.all() paginator = Paginator(list(reversed(tags_list)), 10) page = request.GET.get('page') try: tags = paginator.page(page) except PageNotAnInteger: tags = paginator.page(1) except EmptyPage: tags = paginator.page(paginator.num_pages) context['tags'] = tags return render(request, 'backend/tags.html', context) @login_required() def add_tag(request): context = {} context['nav_active'] = 'tags' form = TagForm() if request.method == 'POST': form = TagForm(request.POST, request.FILES) if form.is_valid(): form.save() messages.success(request, 'Tag created.') return HttpResponseRedirect(reverse('user_panel_tags')) context['form'] = form return render(request, 'backend/edit_tag.html', context) @login_required() def edit_tag(request, tag_id): context = {} context['nav_active'] = 'tags' tag = Tag.objects.get(pk=tag_id) context['tag'] = tag form = TagForm(instance=tag) if request.method == 'POST': form = TagForm(request.POST, request.FILES, instance=tag) if form.is_valid(): form.save() messages.success(request, 'Tag updated.') return HttpResponseRedirect(reverse('user_panel_tags')) context['form'] = form return render(request, 'backend/edit_tag.html', context) @login_required() def delete_tag(request, tag_id): context = {} context['nav_active'] = 'tags' tag = Tag.objects.get(pk=tag_id) tag.delete() messages.success(request, 'Tag deleted.') return HttpResponseRedirect(reverse('user_panel_tags'))
25.166667
75
0.663059
693
6,191
5.79798
0.106782
0.045296
0.051767
0.074415
0.744151
0.673221
0.478845
0.478845
0.456446
0.456446
0
0.00184
0.210144
6,191
246
76
25.166667
0.819836
0.003715
0
0.572289
0
0
0.129885
0.036322
0
0
0
0
0
1
0.078313
false
0
0.048193
0
0.240964
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
078596cc2ee665e19eee2250f95d62feca0bd3b2
1,816
py
Python
openpype/modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py
dangerstudios/OpenPype
10ddcc4699137888616eec57cd7fac9648189714
[ "MIT" ]
null
null
null
openpype/modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py
dangerstudios/OpenPype
10ddcc4699137888616eec57cd7fac9648189714
[ "MIT" ]
null
null
null
openpype/modules/ftrack/event_handlers_server/event_del_avalon_id_from_new.py
dangerstudios/OpenPype
10ddcc4699137888616eec57cd7fac9648189714
[ "MIT" ]
null
null
null
from openpype.modules.ftrack.lib import BaseEvent from openpype.modules.ftrack.lib.avalon_sync import CUST_ATTR_ID_KEY from openpype.modules.ftrack.event_handlers_server.event_sync_to_avalon import ( SyncToAvalonEvent ) class DelAvalonIdFromNew(BaseEvent): ''' This event removes AvalonId from custom attributes of new entities Result: - 'Copy->Pasted' entities won't have same AvalonID as source entity Priority of this event must be less than SyncToAvalon event ''' priority = SyncToAvalonEvent.priority - 1 ignore_me = True def launch(self, session, event): created = [] entities = event['data']['entities'] for entity in entities: try: entity_id = entity['entityId'] if entity.get('action', None) == 'add': id_dict = entity['changes']['id'] if id_dict['new'] is not None and id_dict['old'] is None: created.append(id_dict['new']) elif ( entity.get('action', None) == 'update' and CUST_ATTR_ID_KEY in entity['keys'] and entity_id in created ): ftrack_entity = session.get( self._get_entity_type(entity), entity_id ) cust_attrs = ftrack_entity["custom_attributes"] if cust_attrs[CUST_ATTR_ID_KEY]: cust_attrs[CUST_ATTR_ID_KEY] = "" session.commit() except Exception: session.rollback() continue def register(session): '''Register plugin. Called when used as an plugin.''' DelAvalonIdFromNew(session).register()
33.018182
80
0.562225
192
1,816
5.135417
0.447917
0.032454
0.040568
0.052738
0.10142
0.044625
0
0
0
0
0
0.000851
0.352974
1,816
54
81
33.62963
0.838298
0.138216
0
0
0
0
0.052219
0
0
0
0
0
0
1
0.055556
false
0
0.083333
0
0.222222
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
078638d293ec315b963165f9210a9060b0e09180
2,436
py
Python
go/token/views.py
lynnUg/vumi-go
852f906c46d5d26940bd6699f11488b73bbc3742
[ "BSD-3-Clause" ]
null
null
null
go/token/views.py
lynnUg/vumi-go
852f906c46d5d26940bd6699f11488b73bbc3742
[ "BSD-3-Clause" ]
null
null
null
go/token/views.py
lynnUg/vumi-go
852f906c46d5d26940bd6699f11488b73bbc3742
[ "BSD-3-Clause" ]
null
null
null
from urllib import urlencode import urlparse from django.shortcuts import Http404, redirect from django.contrib.auth.views import logout from django.contrib import messages from django.core.urlresolvers import reverse from django.contrib.auth.decorators import login_required from vumi.utils import load_class_by_string from go.base.utils import vumi_api def token(request, token): # We only need the redis manager here, but it's saner to get a whole # vumi_api and not worry about all the setup magic. api = vumi_api() token_data = api.token_manager.get(token) if not token_data: raise Http404 user_id = int(token_data['user_id']) redirect_to = token_data['redirect_to'] system_token = token_data['system_token'] # If we're authorized and we're the same user_id then redirect to # where we need to be if not user_id or request.user.id == user_id: path, _, qs = redirect_to.partition('?') params = urlparse.parse_qs(qs) # since the token can be custom we prepend the size of the user_token # to the token being forwarded so the view handling the `redirect_to` # can lookup the token and verify the system token. params.update({'token': '%s-%s%s' % (len(token), token, system_token)}) return redirect('%s?%s' % (path, urlencode(params))) # If we got here then we need authentication and the user's either not # logged in or is logged in with a wrong account. if request.user.is_authenticated(): logout(request) messages.info(request, 'Wrong account for this token.') return redirect('%s?%s' % (reverse('auth_login'), urlencode({ 'next': reverse('token', kwargs={'token': token}), }))) @login_required def token_task(request): api = request.user_api.api token = request.GET.get('token') token_data = api.token_manager.verify_get(token) if not token_data: raise Http404 params = token_data['extra_params'] callback_name = params['callback_name'] callback_args = params['callback_args'] callback_kwargs = params['callback_kwargs'] return_to = params['return_to'] message = params['message'] message_level = params['message_level'] callback = load_class_by_string(callback_name) callback(*callback_args, **callback_kwargs) messages.add_message(request, message_level, message) return redirect(return_to)
35.304348
79
0.70197
348
2,436
4.747126
0.321839
0.043584
0.030872
0.025424
0.095642
0.041162
0.041162
0.041162
0
0
0
0.00463
0.20197
2,436
68
80
35.823529
0.845165
0.206486
0
0.086957
0
0
0.100364
0
0
0
0
0
0
1
0.043478
false
0
0.195652
0
0.304348
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0789092717eaaad8fa74b92820df7d2a61d9ba9b
18,065
py
Python
bvbabel/vmr.py
carbrock/bvbabel
baac12d106455e34d9924309eadb4df991d3d8c9
[ "MIT" ]
7
2021-08-02T09:58:08.000Z
2022-03-17T21:13:34.000Z
bvbabel/vmr.py
carbrock/bvbabel
baac12d106455e34d9924309eadb4df991d3d8c9
[ "MIT" ]
2
2021-08-09T14:57:38.000Z
2022-03-28T13:25:19.000Z
bvbabel/vmr.py
carbrock/bvbabel
baac12d106455e34d9924309eadb4df991d3d8c9
[ "MIT" ]
4
2021-08-09T07:45:59.000Z
2022-03-22T23:02:15.000Z
"""Read, write, create Brainvoyager VMR file format.""" import struct import numpy as np from bvbabel.utils import (read_variable_length_string, write_variable_length_string) # ============================================================================= def read_vmr(filename): """Read Brainvoyager VMR file. Parameters ---------- filename : string Path to file. Returns ------- header : dictionary Pre-data and post-data headers. data : 3D numpy.array Image data. """ header = dict() with open(filename, 'rb') as f: # --------------------------------------------------------------------- # VMR Pre-Data Header # --------------------------------------------------------------------- # NOTE(Developer Guide 2.6): VMR files contain anatomical 3D data sets, # typically containing the whole brain (head) of subjects. The # intensity values are stored as a series of bytes. See the V16 format # for a version storing each intensity value with two bytes (short # integers). The VMR format contains a small header followed by the # actual data followed by a second, more extensive, header. The current # version of VMR files is "4", which is only slightly different from # version 3 (as indicated below). Version 3 added offset values to # format 2 in order to represent large data sets efficiently, e.g. in # the context of advanced segmentation processing. Compared to the # original file version "1", file versions 2 and higher contain # additional header information after the actual data ("post-data # header"). This allows to read VMR data sets with minimal header # checking if the extended information is not needed. The information # in the post-data header contains position information (if available) # and stores a series of spatial transformations, which might have been # performed to the original data set ("history record"). The # post-header data can be probably ignored for custom routines, but is # important in BrainVoyager QX for spatial transformation and # coregistration routines as well as for proper visualization. # Expected binary data: unsigned short int (2 bytes) data, = struct.unpack('<H', f.read(2)) header["File version"] = data data, = struct.unpack('<H', f.read(2)) header["DimX"] = data data, = struct.unpack('<H', f.read(2)) header["DimY"] = data data, = struct.unpack('<H', f.read(2)) header["DimZ"] = data # --------------------------------------------------------------------- # VMR Data # --------------------------------------------------------------------- # NOTE(Developer Guide 2.6): Each data element (intensity value) is # represented in 1 byte. The data is organized in three loops: # DimZ # DimY # DimX # # The axes terminology follows the internal BrainVoyager (BV) format. # The mapping to Talairach axes is as follows: # BV (X front -> back) [axis 2 after np.reshape] = Y in Tal space # BV (Y top -> bottom) [axis 1 after np.reshape] = Z in Tal space # BV (Z left -> right) [axis 0 after np.reshape] = X in Tal space # Expected binary data: unsigned char (1 byte) data_img = np.zeros((header["DimZ"] * header["DimY"] * header["DimX"]), dtype="<B") for i in range(data_img.size): data_img[i], = struct.unpack('<B', f.read(1)) data_img = np.reshape( data_img, (header["DimZ"], header["DimY"], header["DimX"])) data_img = np.transpose(data_img, (0, 2, 1)) # BV to Tal data_img = data_img[::-1, ::-1, ::-1] # Flip BV axes # --------------------------------------------------------------------- # VMR Post-Data Header # --------------------------------------------------------------------- # NOTE(Developer Guide 2.6): The first four entries of the post-data # header are new since file version "3" and contain offset values for # each dimension as well as a value indicating the size of a cube with # iso-dimensions to which the data set will be internally "expanded" # for certain operations. The axes labels are in terms of # BrainVoyager's internal format. These four entries are followed by # scan position information from the original file headers, e.g. from # DICOM files. The coordinate axes labels in these entries are not in # terms of BrainVoyager's internal conventions but follow the DICOM # standard. Then follows eventually a section listing spatial # transformations which have been eventually performed to create the # current VMR (e.g. ACPC transformation). Finally, additional # information further descries the data set, including the assumed # left-right convention, the reference space (e.g. Talairach after # normalization) and voxel resolution. if header["File version"] >= 3: # NOTE(Developer Guide 2.6): These four entries have been added in # file version "3" with BrainVoyager QX 1.7. All other entries are # identical to file version "2". # Expected binary data: short int (2 bytes) data, = struct.unpack('<h', f.read(2)) header["OffsetX"] = data data, = struct.unpack('<h', f.read(2)) header["OffsetY"] = data data, = struct.unpack('<h', f.read(2)) header["OffsetZ"] = data data, = struct.unpack('<h', f.read(2)) header["FramingCubeDim"] = data # Expected binary data: int (4 bytes) data, = struct.unpack('<i', f.read(4)) header["PosInfosVerified"] = data data, = struct.unpack('<i', f.read(4)) header["CoordinateSystem"] = data # Expected binary data: float (4 bytes) data, = struct.unpack('<f', f.read(4)) header["Slice1CenterX"] = data # First slice center X coordinate data, = struct.unpack('<f', f.read(4)) header["Slice1CenterY"] = data # First slice center Y coordinate data, = struct.unpack('<f', f.read(4)) header["Slice1CenterZ"] = data # First slice center Z coordinate data, = struct.unpack('<f', f.read(4)) header["SliceNCenterX"] = data # Last slice center X coordinate data, = struct.unpack('<f', f.read(4)) header["SliceNCenterY"] = data # Last slice center Y coordinate data, = struct.unpack('<f', f.read(4)) header["SliceNCenterZ"] = data # Last slice center Z coordinate data, = struct.unpack('<f', f.read(4)) header["RowDirX"] = data # Slice row direction vector X component data, = struct.unpack('<f', f.read(4)) header["RowDirY"] = data # Slice row direction vector Y component data, = struct.unpack('<f', f.read(4)) header["RowDirZ"] = data # Slice row direction vector Z component data, = struct.unpack('<f', f.read(4)) header["ColDirX"] = data # Slice column direction vector X component data, = struct.unpack('<f', f.read(4)) header["ColDirY"] = data # Slice column direction vector Y component data, = struct.unpack('<f', f.read(4)) header["ColDirZ"] = data # Slice column direction vector Z component # Expected binary data: int (4 bytes) data, = struct.unpack('<i', f.read(4)) header["NRows"] = data # Nr of rows of slice image matrix data, = struct.unpack('<i', f.read(4)) header["NCols"] = data # Nr of columns of slice image matrix # Expected binary data: float (4 bytes) data, = struct.unpack('<f', f.read(4)) header["FoVRows"] = data # Field of view extent in row direction [mm] data, = struct.unpack('<f', f.read(4)) header["FoVCols"] = data # Field of view extent in column dir. [mm] data, = struct.unpack('<f', f.read(4)) header["SliceThickness"] = data # Slice thickness [mm] data, = struct.unpack('<f', f.read(4)) header["GapThickness"] = data # Gap thickness [mm] # Expected binary data: int (4 bytes) data, = struct.unpack('<i', f.read(4)) header["NrOfPastSpatialTransformations"] = data if header["NrOfPastSpatialTransformations"] != 0: # NOTE(Developer Guide 2.6): For each past transformation, the # information specified in the following table is stored. The # "type of transformation" is a value determining how many # subsequent values define the transformation: # "1": Rigid body+scale (3 translation, 3 rotation, 3 scale) # "2": Affine transformation (16 values, 4x4 matrix) # "4": Talairach transformation # "5": Un-Talairach transformation (1 - 5 -> BV axes) header["PastTransformation"] = [] for i in range(header["NrOfPastSpatialTransformations"]): header["PastTransformation"].append(dict()) # Expected binary data: variable-length string data = read_variable_length_string(f) header["PastTransformation"][i]["Name"] = data # Expected binary data: int (4 bytes) data, = struct.unpack('<i', f.read(4)) header["PastTransformation"][i]["Type"] = data # Expected binary data: variable-length string data = read_variable_length_string(f) header["PastTransformation"][i]["SourceFileName"] = data # Expected binary data: int (4 bytes) data, = struct.unpack('<i', f.read(4)) header["PastTransformation"][i]["NrOfValues"] = data # Store transformation values as a list trans_values = [] for j in range(header["PastTransformation"][i]["NrOfValues"]): # Expected binary data: float (4 bytes) data, = struct.unpack('<f', f.read(4)) trans_values.append(data) header["PastTransformation"][i]["Values"] = trans_values # Expected binary data: char (1 byte) data, = struct.unpack('<B', f.read(1)) header["LeftRightConvention"] = data # modified in v4 data, = struct.unpack('<B', f.read(1)) header["ReferenceSpaceVMR"] = data # new in v4 # Expected binary data: float (4 bytes) data, = struct.unpack('<f', f.read(4)) header["VoxelSizeX"] = data # Voxel resolution along X axis data, = struct.unpack('<f', f.read(4)) header["VoxelSizeY"] = data # Voxel resolution along Y axis data, = struct.unpack('<f', f.read(4)) header["VoxelSizeZ"] = data # Voxel resolution along Z axis # Expected binary data: char (1 byte) data, = struct.unpack('<B', f.read(1)) header["VoxelResolutionVerified"] = data data, = struct.unpack('<B', f.read(1)) header["VoxelResolutionInTALmm"] = data # Expected binary data: int (4 bytes) data, = struct.unpack('<i', f.read(4)) header["VMROrigV16MinValue"] = data # 16-bit data min intensity data, = struct.unpack('<i', f.read(4)) header["VMROrigV16MeanValue"] = data # 16-bit data mean intensity data, = struct.unpack('<i', f.read(4)) header["VMROrigV16MaxValue"] = data # 16-bit data max intensity return header, data_img # ============================================================================= def write_vmr(filename, header, data_img): """Protocol to write Brainvoyager VMR file. Parameters ---------- filename : string Output filename. header : dictionary Header of VMR file. data_img : numpy.array, 3D Image. """ with open(filename, 'wb') as f: # --------------------------------------------------------------------- # VMR Pre-Data Header # --------------------------------------------------------------------- # Expected binary data: unsigned short int (2 bytes) data = header["File version"] f.write(struct.pack('<H', data)) data = header["DimX"] f.write(struct.pack('<H', data)) data = header["DimY"] f.write(struct.pack('<H', data)) data = header["DimZ"] f.write(struct.pack('<H', data)) # --------------------------------------------------------------------- # VMR Data # --------------------------------------------------------------------- # Convert axes from Nifti standard back to BV standard data_img = data_img[::-1, ::-1, ::-1] # Flip BV axes data_img = np.transpose(data_img, (0, 2, 1)) # BV to Tal # Expected binary data: unsigned char (1 byte) data_img = data_img.flatten() for i in range(data_img.size): f.write(struct.pack('<B', data_img[i])) # --------------------------------------------------------------------- # VMR Post-Data Header # --------------------------------------------------------------------- if header["File version"] >= 3: # Expected binary data: short int (2 bytes) data = header["OffsetX"] f.write(struct.pack('<h', data)) data = header["OffsetY"] f.write(struct.pack('<h', data)) data = header["OffsetZ"] f.write(struct.pack('<h', data)) data = header["FramingCubeDim"] f.write(struct.pack('<h', data)) # Expected binary data: int (4 bytes) data = header["PosInfosVerified"] f.write(struct.pack('<i', data)) data = header["CoordinateSystem"] f.write(struct.pack('<i', data)) # Expected binary data: float (4 bytes) data = header["Slice1CenterX"] f.write(struct.pack('<f', data)) data = header["Slice1CenterY"] f.write(struct.pack('<f', data)) data = header["Slice1CenterZ"] f.write(struct.pack('<f', data)) data = header["SliceNCenterX"] f.write(struct.pack('<f', data)) data = header["SliceNCenterY"] f.write(struct.pack('<f', data)) data = header["SliceNCenterZ"] f.write(struct.pack('<f', data)) data = header["RowDirX"] f.write(struct.pack('<f', data)) data = header["RowDirY"] f.write(struct.pack('<f', data)) data = header["RowDirZ"] f.write(struct.pack('<f', data)) data = header["ColDirX"] f.write(struct.pack('<f', data)) data = header["ColDirY"] f.write(struct.pack('<f', data)) data = header["ColDirZ"] f.write(struct.pack('<f', data)) # Expected binary data: int (4 bytes) data = header["NRows"] f.write(struct.pack('<i', data)) data = header["NCols"] f.write(struct.pack('<i', data)) # Expected binary data: float (4 bytes) data = header["FoVRows"] f.write(struct.pack('<f', data)) data = header["FoVCols"] f.write(struct.pack('<f', data)) data = header["SliceThickness"] f.write(struct.pack('<f', data)) data = header["GapThickness"] f.write(struct.pack('<f', data)) # Expected binary data: int (4 bytes) data = header["NrOfPastSpatialTransformations"] f.write(struct.pack('<i', data)) if header["NrOfPastSpatialTransformations"] != 0: for i in range(header["NrOfPastSpatialTransformations"]): # Expected binary data: variable-length string data = header["PastTransformation"][i]["Name"] write_variable_length_string(f, data) # Expected binary data: int (4 bytes) data = header["PastTransformation"][i]["Type"] f.write(struct.pack('<i', data)) # Expected binary data: variable-length string data = header["PastTransformation"][i]["SourceFileName"] write_variable_length_string(f, data) # Expected binary data: int (4 bytes) data = header["PastTransformation"][i]["NrOfValues"] f.write(struct.pack('<i', data)) # Transformation values are stored as a list trans_values = header["PastTransformation"][i]["Values"] for j in range(header["PastTransformation"][i]["NrOfValues"]): # Expected binary data: float (4 bytes) f.write(struct.pack('<f', trans_values[j])) # Expected binary data: char (1 byte) data = header["LeftRightConvention"] f.write(struct.pack('<B', data)) data = header["ReferenceSpaceVMR"] f.write(struct.pack('<B', data)) # Expected binary data: float (4 bytes) data = header["VoxelSizeX"] f.write(struct.pack('<f', data)) data = header["VoxelSizeY"] f.write(struct.pack('<f', data)) data = header["VoxelSizeZ"] f.write(struct.pack('<f', data)) # Expected binary data: char (1 byte) data = header["VoxelResolutionVerified"] f.write(struct.pack('<B', data)) data = header["VoxelResolutionInTALmm"] f.write(struct.pack('<B', data)) # Expected binary data: int (4 bytes) data = header["VMROrigV16MinValue"] f.write(struct.pack('<i', data)) data = header["VMROrigV16MeanValue"] f.write(struct.pack('<i', data)) data = header["VMROrigV16MaxValue"] f.write(struct.pack('<i', data)) return print("VMR saved.")
45.049875
79
0.546305
2,052
18,065
4.787524
0.16423
0.052932
0.052524
0.070033
0.53634
0.498371
0.444727
0.434141
0.321967
0.236055
0
0.012127
0.278771
18,065
400
80
45.1625
0.741883
0.397509
0
0.476415
0
0
0.152053
0.025374
0
0
0
0
0
1
0.009434
false
0
0.014151
0
0.033019
0.004717
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0789c9270fff78d0b163f2215a2a6a958e9cdb11
2,279
py
Python
example/image-classification/test_score.py
Vikas-kum/incubator-mxnet
ba02bf2fe2da423caa59ddb3fd5e433b90b730bf
[ "Apache-2.0" ]
399
2017-05-30T05:12:48.000Z
2022-01-29T05:53:08.000Z
example/image-classification/test_score.py
Vikas-kum/incubator-mxnet
ba02bf2fe2da423caa59ddb3fd5e433b90b730bf
[ "Apache-2.0" ]
187
2018-03-16T23:44:43.000Z
2021-12-14T21:19:54.000Z
example/image-classification/test_score.py
Vikas-kum/incubator-mxnet
ba02bf2fe2da423caa59ddb3fd5e433b90b730bf
[ "Apache-2.0" ]
107
2017-05-30T05:53:22.000Z
2021-06-24T02:43:31.000Z
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ test pretrained models """ from __future__ import print_function import mxnet as mx from common import find_mxnet, modelzoo from score import score VAL_DATA='data/val-5k-256.rec' def download_data(): return mx.test_utils.download( 'http://data.mxnet.io/data/val-5k-256.rec', VAL_DATA) def test_imagenet1k_resnet(**kwargs): models = ['imagenet1k-resnet-50', 'imagenet1k-resnet-152'] accs = [.77, .78] for (m, g) in zip(models, accs): acc = mx.metric.create('acc') (speed,) = score(model=m, data_val=VAL_DATA, rgb_mean='0,0,0', metrics=acc, **kwargs) r = acc.get()[1] print('Tested %s, acc = %f, speed = %f img/sec' % (m, r, speed)) assert r > g and r < g + .1 def test_imagenet1k_inception_bn(**kwargs): acc = mx.metric.create('acc') m = 'imagenet1k-inception-bn' g = 0.75 (speed,) = score(model=m, data_val=VAL_DATA, rgb_mean='123.68,116.779,103.939', metrics=acc, **kwargs) r = acc.get()[1] print('Tested %s acc = %f, speed = %f img/sec' % (m, r, speed)) assert r > g and r < g + .1 if __name__ == '__main__': gpus = mx.test_utils.list_gpus() assert len(gpus) > 0 batch_size = 16 * len(gpus) gpus = ','.join([str(i) for i in gpus]) kwargs = {'gpus':gpus, 'batch_size':batch_size, 'max_num_examples':500} download_data() test_imagenet1k_resnet(**kwargs) test_imagenet1k_inception_bn(**kwargs)
36.174603
78
0.662571
342
2,279
4.292398
0.435673
0.040872
0.042916
0.021798
0.239782
0.149864
0.149864
0.149864
0.149864
0.149864
0
0.03404
0.21369
2,279
62
79
36.758065
0.785156
0.3405
0
0.162162
0
0
0.183784
0.044595
0
0
0
0
0.081081
1
0.081081
false
0
0.108108
0.027027
0.216216
0.081081
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
078a7ff149f5f6902b3df48444c9f900c3b57349
139,271
py
Python
verticapy/vcolumn.py
vertica/vertica_ml_python
9e82dba94afe8447bfa2492f343af6669128e2fb
[ "Apache-2.0" ]
7
2018-05-10T08:16:31.000Z
2018-05-15T00:59:26.000Z
verticapy/vcolumn.py
vertica/vertica_ml_python
9e82dba94afe8447bfa2492f343af6669128e2fb
[ "Apache-2.0" ]
1
2018-05-15T00:15:35.000Z
2018-05-15T13:40:19.000Z
verticapy/vcolumn.py
vertica/vertica_ml_python
9e82dba94afe8447bfa2492f343af6669128e2fb
[ "Apache-2.0" ]
null
null
null
# (c) Copyright [2018-2022] Micro Focus or one of its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # You may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # |_ |~) _ _| _ /~\ _ |. # |_)\/ |_)(_|(_|| \_/|_|(_||| # / # ____________ ______ # / __ `\ / / # | \/ / / / # |______ / / / # |____/ / / # _____________ / / # \ / / / # \ / / / # \_______/ / / # ______ / / # \ / / / # \ / / / # \/ / / # / / # / / # \ / # \ / # \/ # _ # \ / _ __|_. _ _ |_) # \/ (/_| | |(_(_|| \/ # / # VerticaPy is a Python library with scikit-like functionality for conducting # data science projects on data stored in Vertica, taking advantage Vertica’s # speed and built-in analytics and machine learning features. It supports the # entire data science life cycle, uses a ‘pipeline’ mechanism to sequentialize # data transformation operations, and offers beautiful graphical options. # # VerticaPy aims to do all of the above. The idea is simple: instead of moving # data around for processing, VerticaPy brings the logic to the data. # # # Modules # # Standard Python Modules import math, re, decimal, warnings, datetime from collections.abc import Iterable from typing import Union # VerticaPy Modules import verticapy from verticapy.utilities import * from verticapy.toolbox import * from verticapy.errors import * ## # # __ __ ______ ______ __ __ __ __ __ __ __ # /\ \ / / /\ ___\ /\ __ \ /\ \ /\ \/\ \ /\ "-./ \ /\ "-.\ \ # \ \ \'/ \ \ \____ \ \ \/\ \ \ \ \____ \ \ \_\ \ \ \ \-./\ \ \ \ \-. \ # \ \__| \ \_____\ \ \_____\ \ \_____\ \ \_____\ \ \_\ \ \_\ \ \_\\"\_\ # \/_/ \/_____/ \/_____/ \/_____/ \/_____/ \/_/ \/_/ \/_/ \/_/ # # # ---# class vColumn(str_sql): """ --------------------------------------------------------------------------- Python object which that stores all user transformations. If the vDataFrame represents the entire relation, a vColumn can be seen as one column of that relation. vColumns simplify several processes with its abstractions. Parameters ---------- alias: str vColumn alias. transformations: list, optional List of the different transformations. Each transformation must be similar to the following: (function, type, category) parent: vDataFrame, optional Parent of the vColumn. One vDataFrame can have multiple children vColumns whereas one vColumn can only have one parent. catalog: dict, optional Catalog where each key corresponds to an aggregation. vColumns will memorize the already computed aggregations to gain in performance. The catalog will be updated when the parent vDataFrame is modified. Attributes ---------- alias, str : vColumn alias. catalog, dict : Catalog of pre-computed aggregations. parent, vDataFrame : Parent of the vColumn. transformations, str : List of the different transformations. """ # # Special Methods # # ---# def __init__( self, alias: str, transformations: list = [], parent=None, catalog: dict = {} ): self.parent, self.alias, self.transformations = ( parent, alias, [elem for elem in transformations], ) self.catalog = { "cov": {}, "pearson": {}, "spearman": {}, "spearmand": {}, "kendall": {}, "cramer": {}, "biserial": {}, "regr_avgx": {}, "regr_avgy": {}, "regr_count": {}, "regr_intercept": {}, "regr_r2": {}, "regr_slope": {}, "regr_sxx": {}, "regr_sxy": {}, "regr_syy": {}, } for elem in catalog: self.catalog[elem] = catalog[elem] # ---# def __getitem__(self, index): if isinstance(index, slice): assert index.step in (1, None), ValueError( "vColumn doesn't allow slicing having steps different than 1." ) index_stop = index.stop index_start = index.start if not (isinstance(index_start, int)): index_start = 0 if index_start < 0: index_start += self.parent.shape()[0] if isinstance(index_stop, int): if index_stop < 0: index_stop += self.parent.shape()[0] limit = index_stop - index_start if limit <= 0: limit = 0 limit = " LIMIT {}".format(limit) else: limit = "" query = "(SELECT {} FROM {}{} OFFSET {}{}) VERTICAPY_SUBTABLE".format( self.alias, self.parent.__genSQL__(), self.parent.__get_last_order_by__(), index_start, limit, ) return vDataFrameSQL(query) elif isinstance(index, int): cast = "::float" if self.category() == "float" else "" if index < 0: index += self.parent.shape()[0] query = "SELECT {}{} FROM {}{} OFFSET {} LIMIT 1".format( self.alias, cast, self.parent.__genSQL__(), self.parent.__get_last_order_by__(), index, ) return executeSQL( query=query, title="Getting the vColumn element.", method="fetchfirstelem", ) else: return getattr(self, index) # ---# def __len__(self): return int(self.count()) # ---# def __nonzero__(self): return self.count() > 0 # ---# def __repr__(self): return self.head(limit=verticapy.options["max_rows"]).__repr__() # ---# def _repr_html_(self): return self.head(limit=verticapy.options["max_rows"])._repr_html_() # ---# def __setattr__(self, attr, val): self.__dict__[attr] = val # # Methods # # ---# def aad(self): """ --------------------------------------------------------------------------- Aggregates the vColumn using 'aad' (Average Absolute Deviation). Returns ------- float aad See Also -------- vDataFrame.aggregate : Computes the vDataFrame input aggregations. """ return self.aggregate(["aad"]).values[self.alias][0] # ---# def abs(self): """ --------------------------------------------------------------------------- Applies the absolute value function to the input vColumn. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].apply : Applies a function to the input vColumn. """ return self.apply(func="ABS({})") # ---# def add(self, x: float): """ --------------------------------------------------------------------------- Adds the input element to the vColumn. Parameters ---------- x: float If the vColumn type is date like (date, datetime ...), the parameter 'x' will represent the number of seconds, otherwise it will represent a number. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].apply : Applies a function to the input vColumn. """ check_types([("x", x, [int, float])]) if self.isdate(): return self.apply(func="TIMESTAMPADD(SECOND, {}, {})".format(x, "{}")) else: return self.apply(func="{} + ({})".format("{}", x)) # ---# def add_copy(self, name: str): """ --------------------------------------------------------------------------- Adds a copy vColumn to the parent vDataFrame. Parameters ---------- name: str Name of the copy. Returns ------- vDataFrame self.parent See Also -------- vDataFrame.eval : Evaluates a customized expression. """ check_types([("name", name, [str])]) name = quote_ident(name.replace('"', "_")) assert name.replace('"', ""), EmptyParameter( "The parameter 'name' must not be empty" ) assert not (self.parent.is_colname_in(name)), NameError( f"A vColumn has already the alias {name}.\nBy changing the parameter 'name', you'll be able to solve this issue." ) new_vColumn = vColumn( name, parent=self.parent, transformations=[item for item in self.transformations], catalog=self.catalog, ) setattr(self.parent, name, new_vColumn) setattr(self.parent, name[1:-1], new_vColumn) self.parent._VERTICAPY_VARIABLES_["columns"] += [name] self.parent.__add_to_history__( "[Add Copy]: A copy of the vColumn {} named {} was added to the vDataFrame.".format( self.alias, name ) ) return self.parent # ---# def aggregate(self, func: list): """ --------------------------------------------------------------------------- Aggregates the vColumn using the input functions. Parameters ---------- func: list List of the different aggregation. aad : average absolute deviation approx_unique : approximative cardinality count : number of non-missing elements cvar : conditional value at risk dtype : vColumn type iqr : interquartile range kurtosis : kurtosis jb : Jarque-Bera index mad : median absolute deviation max : maximum mean : average median : median min : minimum mode : most occurent element percent : percent of non-missing elements q% : q quantile (ex: 50% for the median) prod : product range : difference between the max and the min sem : standard error of the mean skewness : skewness sum : sum std : standard deviation topk : kth most occurent element (ex: top1 for the mode) topk_percent : kth most occurent element density unique : cardinality (count distinct) var : variance Other aggregations could work if it is part of the DB version you are using. Returns ------- tablesample An object containing the result. For more information, see utilities.tablesample. See Also -------- vDataFrame.analytic : Adds a new vColumn to the vDataFrame by using an advanced analytical function on a specific vColumn. """ return self.parent.aggregate(func=func, columns=[self.alias]).transpose() agg = aggregate # ---# def apply(self, func: str, copy_name: str = ""): """ --------------------------------------------------------------------------- Applies a function to the vColumn. Parameters ---------- func: str, Function in pure SQL used to transform the vColumn. The function variable must be composed of two flower brackets {}. For example to apply the function: x -> x^2 + 2 use "POWER({}, 2) + 2". copy_name: str, optional If not empty, a copy will be created using the input Name. Returns ------- vDataFrame self.parent See Also -------- vDataFrame.apply : Applies functions to the input vColumns. vDataFrame.applymap : Applies a function to all the vColumns. vDataFrame.eval : Evaluates a customized expression. """ if isinstance(func, str_sql): func = str(func) check_types([("func", func, [str]), ("copy_name", copy_name, [str])]) try: try: ctype = get_data_types( "SELECT {} AS apply_test_feature FROM {} WHERE {} IS NOT NULL LIMIT 0".format( func.replace("{}", self.alias), self.parent.__genSQL__(), self.alias, ), "apply_test_feature", ) except: ctype = get_data_types( "SELECT {} AS apply_test_feature FROM {} WHERE {} IS NOT NULL LIMIT 0".format( func.replace("{}", self.alias), self.parent.__genSQL__(), self.alias, ), "apply_test_feature", ) category = get_category_from_vertica_type(ctype=ctype) all_cols, max_floor = self.parent.get_columns(), 0 for column in all_cols: try: if (quote_ident(column) in func) or ( re.search( re.compile("\\b{}\\b".format(column.replace('"', ""))), func ) ): max_floor = max( len(self.parent[column].transformations), max_floor ) except: pass max_floor -= len(self.transformations) if copy_name: self.add_copy(name=copy_name) for k in range(max_floor): self.parent[copy_name].transformations += [ ("{}", self.ctype(), self.category()) ] self.parent[copy_name].transformations += [(func, ctype, category)] self.parent[copy_name].catalog = self.catalog self.parent.__add_to_history__( "[Apply]: The vColumn '{}' was transformed with the func 'x -> {}'.".format( copy_name.replace('"', ""), func.replace("{}", "x"), ) ) else: for k in range(max_floor): self.transformations += [("{}", self.ctype(), self.category())] self.transformations += [(func, ctype, category)] self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.parent.__add_to_history__( "[Apply]: The vColumn '{}' was transformed with the func 'x -> {}'.".format( self.alias.replace('"', ""), func.replace("{}", "x"), ) ) return self.parent except Exception as e: raise QueryError( "{}\nError when applying the func 'x -> {}' to '{}'".format( e, func.replace("{}", "x"), self.alias.replace('"', "") ) ) # ---# def apply_fun(self, func: str, x: float = 2): """ --------------------------------------------------------------------------- Applies a default function to the vColumn. Parameters ---------- func: str Function to use to transform the vColumn. abs : absolute value acos : trigonometric inverse cosine asin : trigonometric inverse sine atan : trigonometric inverse tangent cbrt : cube root ceil : value up to the next whole number cos : trigonometric cosine cosh : hyperbolic cosine cot : trigonometric cotangent exp : exponential function floor : value down to the next whole number ln : natural logarithm log : logarithm log10 : base 10 logarithm mod : remainder of a division operation pow : number raised to the power of another number round : rounds a value to a specified number of decimal places sign : arithmetic sign sin : trigonometric sine sinh : hyperbolic sine sqrt : arithmetic square root tan : trigonometric tangent tanh : hyperbolic tangent x: int/float, optional If the function has two arguments (example, power or mod), 'x' represents the second argument. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].apply : Applies a function to the vColumn. """ check_types( [ ( "func", func, [ "abs", "acos", "asin", "atan", "cbrt", "ceil", "cos", "cosh", "cot", "exp", "floor", "ln", "log", "log10", "mod", "pow", "round", "sign", "sin", "sinh", "sqrt", "tan", "tanh", ], ), ("x", x, [int, float]), ] ) if func not in ("log", "mod", "pow", "round"): expr = "{}({})".format(func.upper(), "{}") else: expr = "{}({}, {})".format(func.upper(), "{}", x) return self.apply(func=expr) # ---# def astype(self, dtype: str): """ --------------------------------------------------------------------------- Converts the vColumn to the input type. Parameters ---------- dtype: str New type. Returns ------- vDataFrame self.parent See Also -------- vDataFrame.astype : Converts the vColumns to the input type. """ check_types([("dtype", dtype, [str])]) try: query = "SELECT {}::{} AS {} FROM {} WHERE {} IS NOT NULL LIMIT 20".format( self.alias, dtype, self.alias, self.parent.__genSQL__(), self.alias ) executeSQL(query, title="Testing the Type casting.") self.transformations += [ ( "{}::{}".format("{}", dtype), dtype, get_category_from_vertica_type(ctype=dtype), ) ] self.parent.__add_to_history__( "[AsType]: The vColumn {} was converted to {}.".format( self.alias, dtype ) ) return self.parent except Exception as e: raise ConversionError( "{}\nThe vColumn {} can not be converted to {}".format( e, self.alias, dtype ) ) # ---# def avg(self): """ --------------------------------------------------------------------------- Aggregates the vColumn using 'avg' (Average). Returns ------- float average See Also -------- vDataFrame.aggregate : Computes the vDataFrame input aggregations. """ return self.aggregate(["avg"]).values[self.alias][0] mean = avg # ---# def bar( self, method: str = "density", of: str = "", max_cardinality: int = 6, nbins: int = 0, h: float = 0, ax=None, **style_kwds, ): """ --------------------------------------------------------------------------- Draws the bar chart of the vColumn based on an aggregation. Parameters ---------- method: str, optional The method to use to aggregate the data. count : Number of elements. density : Percentage of the distribution. mean : Average of the vColumn 'of'. min : Minimum of the vColumn 'of'. max : Maximum of the vColumn 'of'. sum : Sum of the vColumn 'of'. q% : q Quantile of the vColumn 'of' (ex: 50% to get the median). It can also be a cutomized aggregation (ex: AVG(column1) + 5). of: str, optional The vColumn to use to compute the aggregation. max_cardinality: int, optional Maximum number of the vColumn distinct elements to be used as categorical (No h will be picked or computed) nbins: int, optional Number of nbins. If empty, an optimized number of nbins will be computed. h: float, optional Interval width of the bar. If empty, an optimized h will be computed. ax: Matplotlib axes object, optional The axes to plot on. **style_kwds Any optional parameter to pass to the Matplotlib functions. Returns ------- ax Matplotlib axes object See Also -------- vDataFrame[].hist : Draws the histogram of the vColumn based on an aggregation. """ check_types( [ ("method", method, [str]), ("of", of, [str]), ("max_cardinality", max_cardinality, [int, float]), ("nbins", nbins, [int, float]), ("h", h, [int, float]), ] ) if of: self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from verticapy.plot import bar return bar(self, method, of, max_cardinality, nbins, h, ax=ax, **style_kwds) # ---# def boxplot( self, by: str = "", h: float = 0, max_cardinality: int = 8, cat_priority: list = [], ax=None, **style_kwds, ): """ --------------------------------------------------------------------------- Draws the box plot of the vColumn. Parameters ---------- by: str, optional vColumn to use to partition the data. h: float, optional Interval width if the vColumn is numerical or of type date like. Optimized h will be computed if the parameter is empty or invalid. max_cardinality: int, optional Maximum number of vColumn distinct elements to be used as categorical. The less frequent elements will be gathered together to create a new category : 'Others'. cat_priority: list, optional List of the different categories to consider when drawing the box plot. The other categories will be filtered. ax: Matplotlib axes object, optional The axes to plot on. **style_kwds Any optional parameter to pass to the Matplotlib functions. Returns ------- ax Matplotlib axes object See Also -------- vDataFrame.boxplot : Draws the Box Plot of the input vColumns. """ if isinstance(cat_priority, str) or not (isinstance(cat_priority, Iterable)): cat_priority = [cat_priority] check_types( [ ("by", by, [str]), ("max_cardinality", max_cardinality, [int, float]), ("h", h, [int, float]), ("cat_priority", cat_priority, [list]), ] ) if by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) from verticapy.plot import boxplot return boxplot(self, by, h, max_cardinality, cat_priority, ax=ax, **style_kwds) # ---# def category(self): """ --------------------------------------------------------------------------- Returns the category of the vColumn. The category will be one of the following: date / int / float / text / binary / spatial / uuid / undefined Returns ------- str vColumn category. See Also -------- vDataFrame[].ctype : Returns the vColumn database type. """ return self.transformations[-1][2] # ---# def clip(self, lower=None, upper=None): """ --------------------------------------------------------------------------- Clips the vColumn by transforming the values lesser than the lower bound to the lower bound itself and the values higher than the upper bound to the upper bound itself. Parameters ---------- lower: float, optional Lower bound. upper: float, optional Upper bound. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].fill_outliers : Fills the vColumn outliers using the input method. """ check_types([("lower", lower, [float, int]), ("upper", upper, [float, int])]) assert (lower != None) or (upper != None), ParameterError( "At least 'lower' or 'upper' must have a numerical value" ) lower_when = ( "WHEN {} < {} THEN {} ".format("{}", lower, lower) if (isinstance(lower, (float, int))) else "" ) upper_when = ( "WHEN {} > {} THEN {} ".format("{}", upper, upper) if (isinstance(upper, (float, int))) else "" ) func = "(CASE {}{}ELSE {} END)".format(lower_when, upper_when, "{}") self.apply(func=func) return self.parent # ---# def count(self): """ --------------------------------------------------------------------------- Aggregates the vColumn using 'count' (Number of non-Missing elements). Returns ------- int number of non-Missing elements. See Also -------- vDataFrame.aggregate : Computes the vDataFrame input aggregations. """ return self.aggregate(["count"]).values[self.alias][0] # ---# def cut( self, breaks: list, labels: list = [], include_lowest: bool = True, right: bool = True, ): """ --------------------------------------------------------------------------- Discretizes the vColumn using the input list. Parameters ---------- breaks: list List of values used to cut the vColumn. labels: list, optional Labels used to name the new categories. If empty, names will be generated. include_lowest: bool, optional If set to True, the lowest element of the list will be included. right: bool, optional How the intervals should be closed. If set to True, the intervals will be closed on the right. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].apply : Applies a function to the input vColumn. """ check_types( [ ("breaks", breaks, [list]), ("labels", labels, [list]), ("include_lowest", include_lowest, [bool]), ("right", right, [bool]), ] ) assert self.isnum() or self.isdate(), TypeError( "cut only works on numerical / date-like vColumns." ) assert len(breaks) >= 2, ParameterError( "Length of parameter 'breaks' must be greater or equal to 2." ) assert len(breaks) == len(labels) + 1 or not (labels), ParameterError( "Length of parameter breaks must be equal to the length of parameter 'labels' + 1 or parameter 'labels' must be empty." ) conditions, column = [], self.alias for idx in range(len(breaks) - 1): first_elem, second_elem = breaks[idx], breaks[idx + 1] if right: op1, op2, close_l, close_r = "<", "<=", "]", "]" else: op1, op2, close_l, close_r = "<=", "<", "[", "[" if idx == 0 and include_lowest: op1, close_l = "<=", "[" elif idx == 0: op1, close_l = "<", "]" if labels: label = labels[idx] else: label = f"{close_l}{first_elem};{second_elem}{close_r}" conditions += [ f"'{first_elem}' {op1} {column} AND {column} {op2} '{second_elem}' THEN '{label}'" ] expr = "CASE WHEN " + " WHEN ".join(conditions) + " END" self.apply(func=expr) # ---# def ctype(self): """ --------------------------------------------------------------------------- Returns the vColumn DB type. Returns ------- str vColumn DB type. """ return self.transformations[-1][1].lower() dtype = ctype # ---# def date_part(self, field: str): """ --------------------------------------------------------------------------- Extracts a specific TS field from the vColumn (only if the vColumn type is date like). The vColumn will be transformed. Parameters ---------- field: str The field to extract. It must be one of the following: CENTURY / DAY / DECADE / DOQ / DOW / DOY / EPOCH / HOUR / ISODOW / ISOWEEK / ISOYEAR / MICROSECONDS / MILLENNIUM / MILLISECONDS / MINUTE / MONTH / QUARTER / SECOND / TIME ZONE / TIMEZONE_HOUR / TIMEZONE_MINUTE / WEEK / YEAR Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].slice : Slices the vColumn using a time series rule. """ return self.apply(func="DATE_PART('{}', {})".format(field, "{}")) # ---# def decode(self, *argv): """ --------------------------------------------------------------------------- Encodes the vColumn using a user-defined encoding. Parameters ---------- argv: object Any amount of expressions. The expression generated will look like: even: CASE ... WHEN vColumn = argv[2 * i] THEN argv[2 * i + 1] ... END odd : CASE ... WHEN vColumn = argv[2 * i] THEN argv[2 * i + 1] ... ELSE argv[n] END Returns ------- vDataFrame self.parent See Also -------- vDataFrame.case_when : Creates a new feature by evaluating some conditions. vDataFrame[].discretize : Discretizes the vColumn. vDataFrame[].label_encode : Encodes the vColumn with Label Encoding. vDataFrame[].get_dummies : Encodes the vColumn with One-Hot Encoding. vDataFrame[].mean_encode : Encodes the vColumn using the mean encoding of a response. """ import verticapy.stats as st return self.apply(func=st.decode(str_sql("{}"), *argv)) # ---# def density( self, by: str = "", bandwidth: float = 1.0, kernel: str = "gaussian", nbins: int = 200, xlim: tuple = None, ax=None, **style_kwds, ): """ --------------------------------------------------------------------------- Draws the vColumn Density Plot. Parameters ---------- by: str, optional vColumn to use to partition the data. bandwidth: float, optional The bandwidth of the kernel. kernel: str, optional The method used for the plot. gaussian : Gaussian kernel. logistic : Logistic kernel. sigmoid : Sigmoid kernel. silverman : Silverman kernel. nbins: int, optional Maximum number of points to use to evaluate the approximate density function. Increasing this parameter will increase the precision but will also increase the time of the learning and scoring phases. xlim: tuple, optional Set the x limits of the current axes. ax: Matplotlib axes object, optional The axes to plot on. **style_kwds Any optional parameter to pass to the Matplotlib functions. Returns ------- ax Matplotlib axes object See Also -------- vDataFrame[].hist : Draws the histogram of the vColumn based on an aggregation. """ check_types( [ ("by", by, [str]), ("kernel", kernel, ["gaussian", "logistic", "sigmoid", "silverman"]), ("bandwidth", bandwidth, [int, float]), ("nbins", nbins, [float, int]), ] ) if by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) from verticapy.plot import gen_colors from matplotlib.lines import Line2D colors = gen_colors() if not xlim: xmin = self.min() xmax = self.max() else: xmin, xmax = xlim custom_lines = [] columns = self.parent[by].distinct() for idx, column in enumerate(columns): param = {"color": colors[idx % len(colors)]} ax = self.parent.search( "{} = '{}'".format(self.parent[by].alias, column) )[self.alias].density( bandwidth=bandwidth, kernel=kernel, nbins=nbins, xlim=(xmin, xmax), ax=ax, **updated_dict(param, style_kwds, idx), ) custom_lines += [ Line2D( [0], [0], color=updated_dict(param, style_kwds, idx)["color"], lw=4, ), ] ax.set_title("KernelDensity") ax.legend( custom_lines, columns, title=by, loc="center left", bbox_to_anchor=[1, 0.5], ) ax.set_xlabel(self.alias) return ax kernel = kernel.lower() from verticapy.learn.neighbors import KernelDensity schema = verticapy.options["temp_schema"] if not (schema): schema = "public" name = gen_tmp_name(schema=schema, name="kde") if isinstance(xlim, (tuple, list)): xlim_tmp = [xlim] else: xlim_tmp = [] model = KernelDensity( name, bandwidth=bandwidth, kernel=kernel, nbins=nbins, xlim=xlim_tmp, store=False, ) try: result = model.fit(self.parent.__genSQL__(), [self.alias]).plot( ax=ax, **style_kwds ) model.drop() return result except: model.drop() raise # ---# def describe( self, method: str = "auto", max_cardinality: int = 6, numcol: str = "" ): """ --------------------------------------------------------------------------- Aggregates the vColumn using multiple statistical aggregations: min, max, median, unique... depending on the input method. Parameters ---------- method: str, optional The describe method. auto : Sets the method to 'numerical' if the vColumn is numerical , 'categorical' otherwise. categorical : Uses only categorical aggregations during the computation. cat_stats : Computes statistics of a numerical column for each vColumn category. In this case, the parameter 'numcol' must be defined. numerical : Uses popular numerical aggregations during the computation. max_cardinality: int, optional Cardinality threshold to use to determine if the vColumn will be considered as categorical. numcol: str, optional Numerical vColumn to use when the parameter method is set to 'cat_stats'. Returns ------- tablesample An object containing the result. For more information, see utilities.tablesample. See Also -------- vDataFrame.aggregate : Computes the vDataFrame input aggregations. """ check_types( [ ("method", method, ["auto", "numerical", "categorical", "cat_stats"]), ("max_cardinality", max_cardinality, [int, float]), ("numcol", numcol, [str]), ] ) method = method.lower() assert (method != "cat_stats") or (numcol), ParameterError( "The parameter 'numcol' must be a vDataFrame column if the method is 'cat_stats'" ) distinct_count, is_numeric, is_date = ( self.nunique(), self.isnum(), self.isdate(), ) if (is_date) and not (method == "categorical"): result = self.aggregate(["count", "min", "max"]) index = result.values["index"] result = result.values[self.alias] elif (method == "cat_stats") and (numcol != ""): numcol = self.parent.format_colnames(numcol) assert self.parent[numcol].category() in ("float", "int"), TypeError( "The column 'numcol' must be numerical" ) cast = "::int" if (self.parent[numcol].isbool()) else "" query, cat = [], self.distinct() if len(cat) == 1: lp, rp = "(", ")" else: lp, rp = "", "" for category in cat: tmp_query = """SELECT '{0}' AS 'index', COUNT({1}) AS count, 100 * COUNT({1}) / {2} AS percent, AVG({3}{4}) AS mean, STDDEV({3}{4}) AS std, MIN({3}{4}) AS min, APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.1) AS 'approx_10%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.25) AS 'approx_25%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.5) AS 'approx_50%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.75) AS 'approx_75%', APPROXIMATE_PERCENTILE ({3}{4} USING PARAMETERS percentile = 0.9) AS 'approx_90%', MAX({3}{4}) AS max FROM vdf_table""".format( category, self.alias, self.parent.shape()[0], numcol, cast, ) tmp_query += ( " WHERE {} IS NULL".format(self.alias) if (category in ("None", None)) else " WHERE {} = '{}'".format( bin_spatial_to_str(self.category(), self.alias), category, ) ) query += [lp + tmp_query + rp] query = "WITH vdf_table AS (SELECT * FROM {}) {}".format( self.parent.__genSQL__(), " UNION ALL ".join(query) ) title = "Describes the statics of {} partitioned by {}.".format( numcol, self.alias ) values = to_tablesample(query, title=title).values elif ( ((distinct_count < max_cardinality + 1) and (method != "numerical")) or not (is_numeric) or (method == "categorical") ): query = """(SELECT {0} || '', COUNT(*) FROM vdf_table GROUP BY {0} ORDER BY COUNT(*) DESC LIMIT {1})""".format( self.alias, max_cardinality ) if distinct_count > max_cardinality: query += ( "UNION ALL (SELECT 'Others', SUM(count) FROM (SELECT COUNT(*) AS count" " FROM vdf_table WHERE {0} IS NOT NULL GROUP BY {0} ORDER BY COUNT(*)" " DESC OFFSET {1}) VERTICAPY_SUBTABLE) ORDER BY count DESC" ).format(self.alias, max_cardinality + 1) query = "WITH vdf_table AS (SELECT * FROM {}) {}".format( self.parent.__genSQL__(), query ) query_result = executeSQL( query=query, title="Computing the descriptive statistics of {}.".format(self.alias), method="fetchall", ) result = [distinct_count, self.count()] + [item[1] for item in query_result] index = ["unique", "count"] + [item[0] for item in query_result] else: result = ( self.parent.describe( method="numerical", columns=[self.alias], unique=False ) .transpose() .values[self.alias] ) result = [distinct_count] + result index = [ "unique", "count", "mean", "std", "min", "approx_25%", "approx_50%", "approx_75%", "max", ] if method != "cat_stats": values = { "index": ["name", "dtype"] + index, "value": [self.alias, self.ctype()] + result, } if ((is_date) and not (method == "categorical")) or ( method == "is_numeric" ): self.parent.__update_catalog__({"index": index, self.alias: result}) for elem in values: for i in range(len(values[elem])): if isinstance(values[elem][i], decimal.Decimal): values[elem][i] = float(values[elem][i]) return tablesample(values) # ---# def discretize( self, method: str = "auto", h: float = 0, nbins: int = -1, k: int = 6, new_category: str = "Others", RFmodel_params: dict = {}, response: str = "", return_enum_trans: bool = False, ): """ --------------------------------------------------------------------------- Discretizes the vColumn using the input method. Parameters ---------- method: str, optional The method to use to discretize the vColumn. auto : Uses method 'same_width' for numerical vColumns, cast the other types to varchar. same_freq : Computes bins with the same number of elements. same_width : Computes regular width bins. smart : Uses the Random Forest on a response column to find the most relevant interval to use for the discretization. topk : Keeps the topk most frequent categories and merge the other into one unique category. h: float, optional The interval size to convert to use to convert the vColumn. If this parameter is equal to 0, an optimised interval will be computed. nbins: int, optional Number of bins used for the discretization (must be > 1) k: int, optional The integer k of the 'topk' method. new_category: str, optional The name of the merging category when using the 'topk' method. RFmodel_params: dict, optional Dictionary of the Random Forest model parameters used to compute the best splits when 'method' is set to 'smart'. A RF Regressor will be trained if the response is numerical (except ints and bools), a RF Classifier otherwise. Example: Write {"n_estimators": 20, "max_depth": 10} to train a Random Forest with 20 trees and a maximum depth of 10. response: str, optional Response vColumn when method is set to 'smart'. return_enum_trans: bool, optional Returns the transformation instead of the vDataFrame parent and do not apply it. This parameter is very useful for testing to be able to look at the final transformation. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].decode : Encodes the vColumn with user defined Encoding. vDataFrame[].get_dummies : Encodes the vColumn with One-Hot Encoding. vDataFrame[].label_encode : Encodes the vColumn with Label Encoding. vDataFrame[].mean_encode : Encodes the vColumn using the mean encoding of a response. """ check_types( [ ("RFmodel_params", RFmodel_params, [dict]), ("return_enum_trans", return_enum_trans, [bool]), ("h", h, [int, float]), ("response", response, [str]), ("nbins", nbins, [int, float]), ( "method", method, ["auto", "smart", "same_width", "same_freq", "topk"], ), ("return_enum_trans", return_enum_trans, [bool]), ] ) method = method.lower() if self.isnum() and method == "smart": schema = verticapy.options["temp_schema"] if not (schema): schema = "public" tmp_view_name = gen_tmp_name(schema=schema, name="view") tmp_model_name = gen_tmp_name(schema=schema, name="model") assert nbins >= 2, ParameterError( "Parameter 'nbins' must be greater or equals to 2 in case of discretization using the method 'smart'." ) assert response, ParameterError( "Parameter 'response' can not be empty in case of discretization using the method 'smart'." ) self.parent.are_namecols_in(response) response = self.parent.format_colnames(response) drop(tmp_view_name, method="view") self.parent.to_db(tmp_view_name) from verticapy.learn.ensemble import ( RandomForestClassifier, RandomForestRegressor, ) drop(tmp_model_name, method="model") if self.parent[response].category() == "float": model = RandomForestRegressor(tmp_model_name) else: model = RandomForestClassifier(tmp_model_name) model.set_params({"n_estimators": 20, "max_depth": 8, "nbins": 100}) model.set_params(RFmodel_params) parameters = model.get_params() try: model.fit(tmp_view_name, [self.alias], response) query = [ "(SELECT READ_TREE(USING PARAMETERS model_name = '{}', tree_id = {}, format = 'tabular'))".format( tmp_model_name, i ) for i in range(parameters["n_estimators"]) ] query = "SELECT split_value FROM (SELECT split_value, MAX(weighted_information_gain) FROM ({}) VERTICAPY_SUBTABLE WHERE split_value IS NOT NULL GROUP BY 1 ORDER BY 2 DESC LIMIT {}) VERTICAPY_SUBTABLE ORDER BY split_value::float".format( " UNION ALL ".join(query), nbins - 1 ) result = executeSQL( query=query, title="Computing the optimized histogram nbins using Random Forest.", method="fetchall", ) result = [elem[0] for elem in result] except: drop(tmp_view_name, method="view") drop(tmp_model_name, method="model") raise drop(tmp_view_name, method="view") drop(tmp_model_name, method="model") result = [self.min()] + result + [self.max()] elif method == "topk": assert k >= 2, ParameterError( "Parameter 'k' must be greater or equals to 2 in case of discretization using the method 'topk'" ) distinct = self.topk(k).values["index"] trans = ( "(CASE WHEN {} IN ({}) THEN {} || '' ELSE '{}' END)".format( bin_spatial_to_str(self.category()), ", ".join( [ "'{}'".format(str(elem).replace("'", "''")) for elem in distinct ] ), bin_spatial_to_str(self.category()), new_category.replace("'", "''"), ), "varchar", "text", ) elif self.isnum() and method == "same_freq": assert nbins >= 2, ParameterError( "Parameter 'nbins' must be greater or equals to 2 in case of discretization using the method 'same_freq'" ) count = self.count() nb = int(float(count / int(nbins))) assert nb != 0, Exception( "Not enough values to compute the Equal Frequency discretization" ) total, query, nth_elems = nb, [], [] while total < int(float(count / int(nbins))) * int(nbins): nth_elems += [str(total)] total += nb where = "WHERE _verticapy_row_nb_ IN ({})".format( ", ".join(["1"] + nth_elems + [str(count)]) ) query = "SELECT {} FROM (SELECT {}, ROW_NUMBER() OVER (ORDER BY {}) AS _verticapy_row_nb_ FROM {} WHERE {} IS NOT NULL) VERTICAPY_SUBTABLE {}".format( self.alias, self.alias, self.alias, self.parent.__genSQL__(), self.alias, where, ) result = executeSQL( query=query, title="Computing the equal frequency histogram bins.", method="fetchall", ) result = [elem[0] for elem in result] elif self.isnum() and method in ("same_width", "auto"): if not (h) or h <= 0: if nbins <= 0: h = self.numh() else: h = (self.max() - self.min()) * 1.01 / nbins if h > 0.01: h = round(h, 2) elif h > 0.0001: h = round(h, 4) elif h > 0.000001: h = round(h, 6) if self.category() == "int": h = int(max(math.floor(h), 1)) floor_end = -1 if (self.category() == "int") else "" if (h > 1) or (self.category() == "float"): trans = ( "'[' || FLOOR({} / {}) * {} || ';' || (FLOOR({} / {}) * {} + {}{}) || ']'".format( "{}", h, h, "{}", h, h, h, floor_end ), "varchar", "text", ) else: trans = ("FLOOR({}) || ''", "varchar", "text") else: trans = ("{} || ''", "varchar", "text") if (self.isnum() and method == "same_freq") or ( self.isnum() and method == "smart" ): n = len(result) trans = "(CASE " for i in range(1, n): trans += "WHEN {} BETWEEN {} AND {} THEN '[{};{}]' ".format( "{}", result[i - 1], result[i], result[i - 1], result[i] ) trans += " ELSE NULL END)" trans = (trans, "varchar", "text") if return_enum_trans: return trans else: self.transformations += [trans] sauv = {} for elem in self.catalog: sauv[elem] = self.catalog[elem] self.parent.__update_catalog__(erase=True, columns=[self.alias]) try: if "count" in sauv: self.catalog["count"] = sauv["count"] self.catalog["percent"] = ( 100 * sauv["count"] / self.parent.shape()[0] ) except: pass self.parent.__add_to_history__( "[Discretize]: The vColumn {} was discretized.".format(self.alias) ) return self.parent # ---# def distinct(self, **kwargs): """ --------------------------------------------------------------------------- Returns the distinct categories of the vColumn. Returns ------- list Distinct caterogies of the vColumn. See Also -------- vDataFrame.topk : Returns the vColumn most occurent elements. """ if "agg" not in kwargs: query = "SELECT {} AS {} FROM {} WHERE {} IS NOT NULL GROUP BY {} ORDER BY {}".format( bin_spatial_to_str(self.category(), self.alias), self.alias, self.parent.__genSQL__(), self.alias, self.alias, self.alias, ) else: query = "SELECT {} FROM (SELECT {} AS {}, {} AS verticapy_agg FROM {} WHERE {} IS NOT NULL GROUP BY 1) x ORDER BY verticapy_agg DESC".format( self.alias, bin_spatial_to_str(self.category(), self.alias), self.alias, kwargs["agg"], self.parent.__genSQL__(), self.alias, ) query_result = executeSQL( query=query, title="Computing the distinct categories of {}.".format(self.alias), method="fetchall", ) return [item for sublist in query_result for item in sublist] # ---# def div(self, x: float): """ --------------------------------------------------------------------------- Divides the vColumn by the input element. Parameters ---------- x: float Input number. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].apply : Applies a function to the input vColumn. """ check_types([("x", x, [int, float])]) assert x != 0, ValueError("Division by 0 is forbidden !") return self.apply(func="{} / ({})".format("{}", x)) # ---# def drop(self, add_history: bool = True): """ --------------------------------------------------------------------------- Drops the vColumn from the vDataFrame. Dropping a vColumn means simply not selecting it in the final generated SQL code. Note: Dropping a vColumn can make the vDataFrame "heavier" if it is used to compute other vColumns. Parameters ---------- add_history: bool, optional If set to True, the information will be stored in the vDataFrame history. Returns ------- vDataFrame self.parent See Also -------- vDataFrame.drop: Drops the input vColumns from the vDataFrame. """ check_types([("add_history", add_history, [bool])]) try: parent = self.parent force_columns = [ column for column in self.parent._VERTICAPY_VARIABLES_["columns"] ] force_columns.remove(self.alias) executeSQL( "SELECT * FROM {} LIMIT 10".format( self.parent.__genSQL__(force_columns=force_columns) ), print_time_sql=False, ) self.parent._VERTICAPY_VARIABLES_["columns"].remove(self.alias) delattr(self.parent, self.alias) except: self.parent._VERTICAPY_VARIABLES_["exclude_columns"] += [self.alias] if add_history: self.parent.__add_to_history__( "[Drop]: vColumn {} was deleted from the vDataFrame.".format(self.alias) ) return parent # ---# def drop_outliers( self, threshold: float = 4.0, use_threshold: bool = True, alpha: float = 0.05 ): """ --------------------------------------------------------------------------- Drops outliers in the vColumn. Parameters ---------- threshold: float, optional Uses the Gaussian distribution to identify outliers. After normalizing the data (Z-Score), if the absolute value of the record is greater than the threshold, it will be considered as an outlier. use_threshold: bool, optional Uses the threshold instead of the 'alpha' parameter. alpha: float, optional Number representing the outliers threshold. Values lesser than quantile(alpha) or greater than quantile(1-alpha) will be dropped. Returns ------- vDataFrame self.parent See Also -------- vDataFrame.fill_outliers : Fills the outliers in the vColumn. vDataFrame.outliers : Adds a new vColumn labeled with 0 and 1 (1 meaning global outlier). """ check_types( [ ("alpha", alpha, [int, float]), ("use_threshold", use_threshold, [bool]), ("threshold", threshold, [int, float]), ] ) if use_threshold: result = self.aggregate(func=["std", "avg"]).transpose().values self.parent.filter( "ABS({} - {}) / {} < {}".format( self.alias, result["avg"][0], result["std"][0], threshold ) ) else: p_alpha, p_1_alpha = ( self.parent.quantile([alpha, 1 - alpha], [self.alias]) .transpose() .values[self.alias] ) self.parent.filter( "({} BETWEEN {} AND {})".format(self.alias, p_alpha, p_1_alpha) ) return self.parent # ---# def dropna(self): """ --------------------------------------------------------------------------- Filters the vDataFrame where the vColumn is missing. Returns ------- vDataFrame self.parent See Also -------- vDataFrame.filter: Filters the data using the input expression. """ self.parent.filter("{} IS NOT NULL".format(self.alias)) return self.parent # ---# def fill_outliers( self, method: str = "winsorize", threshold: float = 4.0, use_threshold: bool = True, alpha: float = 0.05, ): """ --------------------------------------------------------------------------- Fills the vColumns outliers using the input method. Parameters ---------- method: str, optional Method to use to fill the vColumn outliers. mean : Replaces the upper and lower outliers by their respective average. null : Replaces the outliers by the NULL value. winsorize : Clips the vColumn using as lower bound quantile(alpha) and as upper bound quantile(1-alpha) if 'use_threshold' is set to False else the lower and upper ZScores. threshold: float, optional Uses the Gaussian distribution to define the outliers. After normalizing the data (Z-Score), if the absolute value of the record is greater than the threshold it will be considered as an outlier. use_threshold: bool, optional Uses the threshold instead of the 'alpha' parameter. alpha: float, optional Number representing the outliers threshold. Values lesser than quantile(alpha) or greater than quantile(1-alpha) will be filled. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].drop_outliers : Drops outliers in the vColumn. vDataFrame.outliers : Adds a new vColumn labeled with 0 and 1 (1 meaning global outlier). """ if isinstance(method, str): method = method.lower() check_types( [ ("method", method, ["winsorize", "null", "mean"]), ("alpha", alpha, [int, float]), ("use_threshold", use_threshold, [bool]), ("threshold", threshold, [int, float]), ] ) if use_threshold: result = self.aggregate(func=["std", "avg"]).transpose().values p_alpha, p_1_alpha = ( -threshold * result["std"][0] + result["avg"][0], threshold * result["std"][0] + result["avg"][0], ) else: query = "SELECT PERCENTILE_CONT({}) WITHIN GROUP (ORDER BY {}) OVER (), PERCENTILE_CONT(1 - {}) WITHIN GROUP (ORDER BY {}) OVER () FROM {} LIMIT 1".format( alpha, self.alias, alpha, self.alias, self.parent.__genSQL__() ) p_alpha, p_1_alpha = executeSQL( query=query, title="Computing the quantiles of {}.".format(self.alias), method="fetchrow", ) if method == "winsorize": self.clip(lower=p_alpha, upper=p_1_alpha) elif method == "null": self.apply( func="(CASE WHEN ({} BETWEEN {} AND {}) THEN {} ELSE NULL END)".format( "{}", p_alpha, p_1_alpha, "{}" ) ) elif method == "mean": query = "WITH vdf_table AS (SELECT * FROM {}) (SELECT AVG({}) FROM vdf_table WHERE {} < {}) UNION ALL (SELECT AVG({}) FROM vdf_table WHERE {} > {})".format( self.parent.__genSQL__(), self.alias, self.alias, p_alpha, self.alias, self.alias, p_1_alpha, ) mean_alpha, mean_1_alpha = [ item[0] for item in executeSQL( query=query, title="Computing the average of the {}'s lower and upper outliers.".format( self.alias ), method="fetchall", ) ] if mean_alpha == None: mean_alpha = "NULL" if mean_1_alpha == None: mean_alpha = "NULL" self.apply( func="(CASE WHEN {} < {} THEN {} WHEN {} > {} THEN {} ELSE {} END)".format( "{}", p_alpha, mean_alpha, "{}", p_1_alpha, mean_1_alpha, "{}" ) ) return self.parent # ---# def fillna( self, val=None, method: str = "auto", expr: str = "", by: list = [], order_by: list = [], ): """ --------------------------------------------------------------------------- Fills missing elements in the vColumn with a user-specified rule. Parameters ---------- val: int/float/str, optional Value to use to impute the vColumn. method: dict, optional Method to use to impute the missing values. auto : Mean for the numerical and Mode for the categorical vColumns. bfill : Back Propagation of the next element (Constant Interpolation). ffill : Propagation of the first element (Constant Interpolation). mean : Average. median : median. mode : mode (most occurent element). 0ifnull : 0 when the vColumn is null, 1 otherwise. expr: str, optional SQL expression. by: list, optional vColumns used in the partition. order_by: list, optional List of the vColumns to use to sort the data when using TS methods. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].dropna : Drops the vColumn missing values. """ if isinstance(by, str): by = [by] if isinstance(order_by, str): order_by = [order_by] check_types( [ ( "method", method, [ "auto", "mode", "0ifnull", "mean", "avg", "median", "ffill", "pad", "bfill", "backfill", ], ), ("expr", expr, [str]), ("by", by, [list]), ("order_by", order_by, [list]), ] ) method = method.lower() self.parent.are_namecols_in([elem for elem in order_by] + by) by = self.parent.format_colnames(by) if method == "auto": method = "mean" if (self.isnum() and self.nunique(True) > 6) else "mode" total = self.count() if (method == "mode") and (val == None): val = self.mode(dropna=True) if val == None: warning_message = "The vColumn {} has no mode (only missing values).\nNothing was filled.".format( self.alias ) warnings.warn(warning_message, Warning) return self.parent if isinstance(val, str): val = val.replace("'", "''") if val != None: new_column = "COALESCE({}, '{}')".format("{}", val) elif expr: new_column = "COALESCE({}, {})".format("{}", expr) elif method == "0ifnull": new_column = "DECODE({}, NULL, 0, 1)" elif method in ("mean", "avg", "median"): fun = "MEDIAN" if (method == "median") else "AVG" if by == []: if fun == "AVG": val = self.avg() elif fun == "MEDIAN": val = self.median() new_column = "COALESCE({}, {})".format("{}", val) elif (len(by) == 1) and (self.parent[by[0]].nunique() < 50): try: if fun == "MEDIAN": fun = "APPROXIMATE_MEDIAN" query = "SELECT {}, {}({}) FROM {} GROUP BY {};".format( by[0], fun, self.alias, self.parent.__genSQL__(), by[0] ) result = executeSQL( query, title="Computing the different aggregations.", method="fetchall", ) for idx, elem in enumerate(result): result[idx][0] = ( "NULL" if (elem[0] == None) else "'{}'".format(str(elem[0]).replace("'", "''")) ) result[idx][1] = "NULL" if (elem[1] == None) else str(elem[1]) new_column = "COALESCE({}, DECODE({}, {}, NULL))".format( "{}", by[0], ", ".join( ["{}, {}".format(elem[0], elem[1]) for elem in result] ), ) executeSQL( "SELECT {} FROM {} LIMIT 1".format( new_column.format(self.alias), self.parent.__genSQL__() ), print_time_sql=False, ) except: new_column = "COALESCE({}, {}({}) OVER (PARTITION BY {}))".format( "{}", fun, "{}", ", ".join(by) ) else: new_column = "COALESCE({}, {}({}) OVER (PARTITION BY {}))".format( "{}", fun, "{}", ", ".join(by) ) elif method in ("ffill", "pad", "bfill", "backfill"): assert order_by, ParameterError( "If the method is in ffill|pad|bfill|backfill then 'order_by' must be a list of at least one element to use to order the data" ) desc = "" if (method in ("ffill", "pad")) else " DESC" partition_by = ( "PARTITION BY {}".format( ", ".join([quote_ident(column) for column in by]) ) if (by) else "" ) order_by_ts = ", ".join([quote_ident(column) + desc for column in order_by]) new_column = "COALESCE({}, LAST_VALUE({} IGNORE NULLS) OVER ({} ORDER BY {}))".format( "{}", "{}", partition_by, order_by_ts ) if method in ("mean", "median") or isinstance(val, float): category, ctype = "float", "float" elif method == "0ifnull": category, ctype = "int", "bool" else: category, ctype = self.category(), self.ctype() copy_trans = [elem for elem in self.transformations] total = self.count() if method not in ["mode", "0ifnull"]: max_floor = 0 all_partition = by if method in ["ffill", "pad", "bfill", "backfill"]: all_partition += [elem for elem in order_by] for elem in all_partition: if len(self.parent[elem].transformations) > max_floor: max_floor = len(self.parent[elem].transformations) max_floor -= len(self.transformations) for k in range(max_floor): self.transformations += [("{}", self.ctype(), self.category())] self.transformations += [(new_column, ctype, category)] try: sauv = {} for elem in self.catalog: sauv[elem] = self.catalog[elem] self.parent.__update_catalog__(erase=True, columns=[self.alias]) total = abs(self.count() - total) except Exception as e: self.transformations = [elem for elem in copy_trans] raise QueryError("{}\nAn Error happened during the filling.".format(e)) if total > 0: try: if "count" in sauv: self.catalog["count"] = int(sauv["count"]) + total self.catalog["percent"] = ( 100 * (int(sauv["count"]) + total) / self.parent.shape()[0] ) except: pass total = int(total) conj = "s were " if total > 1 else " was " if verticapy.options["print_info"]: print("{} element{}filled.".format(total, conj)) self.parent.__add_to_history__( "[Fillna]: {} {} missing value{} filled.".format( total, self.alias, conj, ) ) else: if verticapy.options["print_info"]: print("Nothing was filled.") self.transformations = [elem for elem in copy_trans] for elem in sauv: self.catalog[elem] = sauv[elem] return self.parent # ---# def geo_plot(self, *args, **kwargs): """ --------------------------------------------------------------------------- Draws the Geospatial object. Parameters ---------- *args / **kwargs Any optional parameter to pass to the geopandas plot function. For more information, see: https://geopandas.readthedocs.io/en/latest/docs/reference/api/ geopandas.GeoDataFrame.plot.html Returns ------- ax Matplotlib axes object """ columns = [self.alias] check = True if len(args) > 0: column = args[0] elif "column" in kwargs: column = kwargs["column"] else: check = False if check: self.parent.are_namecols_in(column) column = self.parent.format_colnames(column) columns += [column] if not ("cmap" in kwargs): from verticapy.plot import gen_cmap kwargs["cmap"] = gen_cmap()[0] else: if not ("color" in kwargs): from verticapy.plot import gen_colors kwargs["color"] = gen_colors()[0] if not ("legend" in kwargs): kwargs["legend"] = True if not ("figsize" in kwargs): kwargs["figsize"] = (14, 10) return self.parent[columns].to_geopandas(self.alias).plot(*args, **kwargs) # ---# def get_dummies( self, prefix: str = "", prefix_sep: str = "_", drop_first: bool = True, use_numbers_as_suffix: bool = False, ): """ --------------------------------------------------------------------------- Encodes the vColumn with the One-Hot Encoding algorithm. Parameters ---------- prefix: str, optional Prefix of the dummies. prefix_sep: str, optional Prefix delimitor of the dummies. drop_first: bool, optional Drops the first dummy to avoid the creation of correlated features. use_numbers_as_suffix: bool, optional Uses numbers as suffix instead of the vColumns categories. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].decode : Encodes the vColumn with user defined Encoding. vDataFrame[].discretize : Discretizes the vColumn. vDataFrame[].label_encode : Encodes the vColumn with Label Encoding. vDataFrame[].mean_encode : Encodes the vColumn using the mean encoding of a response. """ check_types( [ ("prefix", prefix, [str]), ("prefix_sep", prefix_sep, [str]), ("drop_first", drop_first, [bool]), ("use_numbers_as_suffix", use_numbers_as_suffix, [bool]), ] ) distinct_elements = self.distinct() if distinct_elements not in ([0, 1], [1, 0]) or self.isbool(): all_new_features = [] prefix = ( self.alias.replace('"', "") + prefix_sep.replace('"', "_") if not (prefix) else prefix.replace('"', "_") + prefix_sep.replace('"', "_") ) n = 1 if drop_first else 0 for k in range(len(distinct_elements) - n): name = ( '"{}{}"'.format(prefix, k) if (use_numbers_as_suffix) else '"{}{}"'.format( prefix, str(distinct_elements[k]).replace('"', "_") ) ) assert not (self.parent.is_colname_in(name)), NameError( f"A vColumn has already the alias of one of the dummies ({name}).\n" "It can be the result of using previously the method on the vColumn " "or simply because of ambiguous columns naming.\nBy changing one of " "the parameters ('prefix', 'prefix_sep'), you'll be able to solve this " "issue." ) for k in range(len(distinct_elements) - n): name = ( '"{}{}"'.format(prefix, k) if (use_numbers_as_suffix) else '"{}{}"'.format( prefix, str(distinct_elements[k]).replace('"', "_") ) ) name = ( name.replace(" ", "_") .replace("/", "_") .replace(",", "_") .replace("'", "_") ) expr = "DECODE({}, '{}', 1, 0)".format( "{}", str(distinct_elements[k]).replace("'", "''") ) transformations = self.transformations + [(expr, "bool", "int")] new_vColumn = vColumn( name, parent=self.parent, transformations=transformations, catalog={ "min": 0, "max": 1, "count": self.parent.shape()[0], "percent": 100.0, "unique": 2, "approx_unique": 2, "prod": 0, }, ) setattr(self.parent, name, new_vColumn) setattr(self.parent, name.replace('"', ""), new_vColumn) self.parent._VERTICAPY_VARIABLES_["columns"] += [name] all_new_features += [name] conj = "s were " if len(all_new_features) > 1 else " was " self.parent.__add_to_history__( "[Get Dummies]: One hot encoder was applied to the vColumn {}\n{} feature{}created: {}".format( self.alias, len(all_new_features), conj, ", ".join(all_new_features) ) + "." ) return self.parent one_hot_encode = get_dummies # ---# def head(self, limit: int = 5): """ --------------------------------------------------------------------------- Returns the head of the vColumn. Parameters ---------- limit: int, optional Number of elements to display. Returns ------- tablesample An object containing the result. For more information, see utilities.tablesample. See Also -------- vDataFrame[].tail : Returns the a part of the vColumn. """ return self.iloc(limit=limit) # ---# def hist( self, method: str = "density", of: str = "", max_cardinality: int = 6, nbins: int = 0, h: float = 0, ax=None, **style_kwds, ): """ --------------------------------------------------------------------------- Draws the histogram of the vColumn based on an aggregation. Parameters ---------- method: str, optional The method to use to aggregate the data. count : Number of elements. density : Percentage of the distribution. mean : Average of the vColumn 'of'. min : Minimum of the vColumn 'of'. max : Maximum of the vColumn 'of'. sum : Sum of the vColumn 'of'. q% : q Quantile of the vColumn 'of' (ex: 50% to get the median). It can also be a cutomized aggregation (ex: AVG(column1) + 5). of: str, optional The vColumn to use to compute the aggregation. max_cardinality: int, optional Maximum number of the vColumn distinct elements to be used as categorical (No h will be picked or computed) nbins: int, optional Number of bins. If empty, an optimized number of bins will be computed. h: float, optional Interval width of the bar. If empty, an optimized h will be computed. ax: Matplotlib axes object, optional The axes to plot on. **style_kwds Any optional parameter to pass to the Matplotlib functions. Returns ------- ax Matplotlib axes object See Also -------- vDataFrame[].bar : Draws the Bar Chart of vColumn based on an aggregation. """ check_types( [ ("method", method, [str]), ("of", of, [str]), ("max_cardinality", max_cardinality, [int, float]), ("h", h, [int, float]), ("nbins", nbins, [int, float]), ] ) if of: self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from verticapy.plot import hist return hist(self, method, of, max_cardinality, nbins, h, ax=ax, **style_kwds) # ---# def iloc(self, limit: int = 5, offset: int = 0): """ --------------------------------------------------------------------------- Returns a part of the vColumn (delimited by an offset and a limit). Parameters ---------- limit: int, optional Number of elements to display. offset: int, optional Number of elements to skip. Returns ------- tablesample An object containing the result. For more information, see utilities.tablesample. See Also -------- vDataFrame[].head : Returns the head of the vColumn. vDataFrame[].tail : Returns the tail of the vColumn. """ check_types([("limit", limit, [int, float]), ("offset", offset, [int, float])]) if offset < 0: offset = max(0, self.parent.shape()[0] - limit) title = "Reads {}.".format(self.alias) tail = to_tablesample( "SELECT {} AS {} FROM {}{} LIMIT {} OFFSET {}".format( bin_spatial_to_str(self.category(), self.alias), self.alias, self.parent.__genSQL__(), self.parent.__get_last_order_by__(), limit, offset, ), title=title, ) tail.count = self.parent.shape()[0] tail.offset = offset tail.dtype[self.alias] = self.ctype() tail.name = self.alias return tail # ---# def isbool(self): """ --------------------------------------------------------------------------- Returns True if the vColumn is boolean, False otherwise. Returns ------- bool True if the vColumn is boolean. See Also -------- vDataFrame[].isdate : Returns True if the vColumn category is date. vDataFrame[].isnum : Returns True if the vColumn is numerical. """ return self.ctype().lower() in ("bool", "boolean") # ---# def isdate(self): """ --------------------------------------------------------------------------- Returns True if the vColumn category is date, False otherwise. Returns ------- bool True if the vColumn category is date. See Also -------- vDataFrame[].isbool : Returns True if the vColumn is boolean. vDataFrame[].isnum : Returns True if the vColumn is numerical. """ return self.category() == "date" # ---# def isin(self, val: list, *args): """ --------------------------------------------------------------------------- Looks if some specific records are in the vColumn and it returns the new vDataFrame of the search. Parameters ---------- val: list List of the different records. For example, to check if Badr and Fouad are in the vColumn. You can write the following list: ["Fouad", "Badr"] Returns ------- vDataFrame The vDataFrame of the search. See Also -------- vDataFrame.isin : Looks if some specific records are in the vDataFrame. """ if isinstance(val, str) or not (isinstance(val, Iterable)): val = [val] val += list(args) check_types([("val", val, [list])]) val = {self.alias: val} return self.parent.isin(val) # ---# def isnum(self): """ --------------------------------------------------------------------------- Returns True if the vColumn is numerical, False otherwise. Returns ------- bool True if the vColumn is numerical. See Also -------- vDataFrame[].isbool : Returns True if the vColumn is boolean. vDataFrame[].isdate : Returns True if the vColumn category is date. """ return self.category() in ("float", "int") # ---# def iv_woe(self, y: str, nbins: int = 10): """ --------------------------------------------------------------------------- Computes the Information Value (IV) / Weight Of Evidence (WOE) Table. It tells the predictive power of an independent variable in relation to the dependent variable. Parameters ---------- y: str Response vColumn. nbins: int, optional Maximum number of nbins used for the discretization (must be > 1) Returns ------- tablesample An object containing the result. For more information, see utilities.tablesample. See Also -------- vDataFrame.iv_woe : Computes the Information Value (IV) Table. """ check_types([("y", y, [str]), ("nbins", nbins, [int])]) self.parent.are_namecols_in(y) y = self.parent.format_colnames(y) assert self.parent[y].nunique() == 2, TypeError( "vColumn {} must be binary to use iv_woe.".format(y) ) response_cat = self.parent[y].distinct() response_cat.sort() assert response_cat == [0, 1], TypeError( "vColumn {} must be binary to use iv_woe.".format(y) ) self.parent[y].distinct() trans = self.discretize( method="same_width" if self.isnum() else "topk", nbins=nbins, k=nbins, new_category="Others", return_enum_trans=True, )[0].replace("{}", self.alias) query = "SELECT {} AS {}, {} AS ord, {}::int AS {} FROM {}".format( trans, self.alias, self.alias, y, y, self.parent.__genSQL__(), ) query = "SELECT {}, MIN(ord) AS ord, SUM(1 - {}) AS non_events, SUM({}) AS events FROM ({}) x GROUP BY 1".format( self.alias, y, y, query, ) query = "SELECT {}, ord, non_events, events, non_events / NULLIFZERO(SUM(non_events) OVER ()) AS pt_non_events, events / NULLIFZERO(SUM(events) OVER ()) AS pt_events FROM ({}) x".format( self.alias, query, ) query = "SELECT {} AS index, non_events, events, pt_non_events, pt_events, CASE WHEN non_events = 0 OR events = 0 THEN 0 ELSE ZEROIFNULL(LN(pt_non_events / NULLIFZERO(pt_events))) END AS woe, CASE WHEN non_events = 0 OR events = 0 THEN 0 ELSE (pt_non_events - pt_events) * ZEROIFNULL(LN(pt_non_events / NULLIFZERO(pt_events))) END AS iv FROM ({}) x ORDER BY ord".format( self.alias, query, ) title = "Computing WOE & IV of {} (response = {}).".format(self.alias, y) result = to_tablesample(query, title=title) result.values["index"] += ["total"] result.values["non_events"] += [sum(result["non_events"])] result.values["events"] += [sum(result["events"])] result.values["pt_non_events"] += [""] result.values["pt_events"] += [""] result.values["woe"] += [""] result.values["iv"] += [sum(result["iv"])] return result # ---# def kurtosis(self): """ --------------------------------------------------------------------------- Aggregates the vColumn using 'kurtosis'. Returns ------- float kurtosis See Also -------- vDataFrame.aggregate : Computes the vDataFrame input aggregations. """ return self.aggregate(["kurtosis"]).values[self.alias][0] kurt = kurtosis # ---# def label_encode(self): """ --------------------------------------------------------------------------- Encodes the vColumn using a bijection from the different categories to [0, n - 1] (n being the vColumn cardinality). Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].decode : Encodes the vColumn with a user defined Encoding. vDataFrame[].discretize : Discretizes the vColumn. vDataFrame[].get_dummies : Encodes the vColumn with One-Hot Encoding. vDataFrame[].mean_encode : Encodes the vColumn using the mean encoding of a response. """ if self.category() in ["date", "float"]: warning_message = ( "label_encode is only available for categorical variables." ) warnings.warn(warning_message, Warning) else: distinct_elements = self.distinct() expr = ["DECODE({}"] text_info = "\n" for k in range(len(distinct_elements)): expr += [ "'{}', {}".format(str(distinct_elements[k]).replace("'", "''"), k) ] text_info += "\t{} => {}".format(distinct_elements[k], k) expr = ", ".join(expr) + ", {})".format(len(distinct_elements)) self.transformations += [(expr, "int", "int")] self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.catalog["count"] = self.parent.shape()[0] self.catalog["percent"] = 100 self.parent.__add_to_history__( "[Label Encoding]: Label Encoding was applied to the vColumn {} using the following mapping:{}".format( self.alias, text_info ) ) return self.parent # ---# def mad(self): """ --------------------------------------------------------------------------- Aggregates the vColumn using 'mad' (median absolute deviation). Returns ------- float mad See Also -------- vDataFrame.aggregate : Computes the vDataFrame input aggregations. """ return self.aggregate(["mad"]).values[self.alias][0] # ---# def max(self): """ --------------------------------------------------------------------------- Aggregates the vColumn using 'max' (Maximum). Returns ------- float/str maximum See Also -------- vDataFrame.aggregate : Computes the vDataFrame input aggregations. """ return self.aggregate(["max"]).values[self.alias][0] # ---# def mean_encode(self, response: str): """ --------------------------------------------------------------------------- Encodes the vColumn using the average of the response partitioned by the different vColumn categories. Parameters ---------- response: str Response vColumn. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].decode : Encodes the vColumn using a user-defined encoding. vDataFrame[].discretize : Discretizes the vColumn. vDataFrame[].label_encode : Encodes the vColumn with Label Encoding. vDataFrame[].get_dummies : Encodes the vColumn with One-Hot Encoding. """ check_types([("response", response, [str])]) self.parent.are_namecols_in(response) response = self.parent.format_colnames(response) assert self.parent[response].isnum(), TypeError( "The response column must be numerical to use a mean encoding" ) max_floor = len(self.parent[response].transformations) - len( self.transformations ) for k in range(max_floor): self.transformations += [("{}", self.ctype(), self.category())] self.transformations += [ ("AVG({}) OVER (PARTITION BY {})".format(response, "{}"), "int", "float") ] self.parent.__update_catalog__(erase=True, columns=[self.alias]) self.parent.__add_to_history__( "[Mean Encode]: The vColumn {} was transformed using a mean encoding with {} as Response Column.".format( self.alias, response ) ) if verticapy.options["print_info"]: print("The mean encoding was successfully done.") return self.parent # ---# def median( self, approx: bool = True, ): """ --------------------------------------------------------------------------- Aggregates the vColumn using 'median'. Parameters ---------- approx: bool, optional If set to True, the approximate median is returned. By setting this parameter to False, the function's performance can drastically decrease. Returns ------- float/str median See Also -------- vDataFrame.aggregate : Computes the vDataFrame input aggregations. """ return self.quantile(0.5, approx=approx) # ---# def memory_usage(self): """ --------------------------------------------------------------------------- Returns the vColumn memory usage. Returns ------- float vColumn memory usage (byte) See Also -------- vDataFrame.memory_usage : Returns the vDataFrame memory usage. """ import sys total = ( sys.getsizeof(self) + sys.getsizeof(self.alias) + sys.getsizeof(self.transformations) + sys.getsizeof(self.catalog) ) for elem in self.catalog: total += sys.getsizeof(elem) return total # ---# def min(self): """ --------------------------------------------------------------------------- Aggregates the vColumn using 'min' (Minimum). Returns ------- float/str minimum See Also -------- vDataFrame.aggregate : Computes the vDataFrame input aggregations. """ return self.aggregate(["min"]).values[self.alias][0] # ---# def mode(self, dropna: bool = False, n: int = 1): """ --------------------------------------------------------------------------- Returns the nth most occurent element. Parameters ---------- dropna: bool, optional If set to True, NULL values will not be considered during the computation. n: int, optional Integer corresponding to the offset. For example, if n = 1 then this method will return the mode of the vColumn. Returns ------- str/float/int vColumn nth most occurent element. See Also -------- vDataFrame.aggregate : Computes the vDataFrame input aggregations. """ check_types([("dropna", dropna, [bool]), ("n", n, [int, float])]) if n == 1: pre_comp = self.parent.__get_catalog_value__(self.alias, "top") if pre_comp != "VERTICAPY_NOT_PRECOMPUTED": if not (dropna) and (pre_comp != None): return pre_comp assert n >= 1, ParameterError("Parameter 'n' must be greater or equal to 1") where = " WHERE {} IS NOT NULL ".format(self.alias) if (dropna) else " " result = executeSQL( "SELECT {} FROM (SELECT {}, COUNT(*) AS _verticapy_cnt_ FROM {}{}GROUP BY {} ORDER BY _verticapy_cnt_ DESC LIMIT {}) VERTICAPY_SUBTABLE ORDER BY _verticapy_cnt_ ASC LIMIT 1".format( self.alias, self.alias, self.parent.__genSQL__(), where, self.alias, n ), title="Computing the mode.", method="fetchall", ) top = None if not (result) else result[0][0] if not (dropna): n = "" if (n == 1) else str(int(n)) if isinstance(top, decimal.Decimal): top = float(top) self.parent.__update_catalog__( {"index": ["top{}".format(n)], self.alias: [top]} ) return top # ---# def mul(self, x: float): """ --------------------------------------------------------------------------- Multiplies the vColumn by the input element. Parameters ---------- x: float Input number. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].apply : Applies a function to the input vColumn. """ check_types([("x", x, [int, float])]) return self.apply(func="{} * ({})".format("{}", x)) # ---# def nlargest(self, n: int = 10): """ --------------------------------------------------------------------------- Returns the n largest vColumn elements. Parameters ---------- n: int, optional Offset. Returns ------- tablesample An object containing the result. For more information, see utilities.tablesample. See Also -------- vDataFrame[].nsmallest : Returns the n smallest elements in the vColumn. """ check_types([("n", n, [int, float])]) query = "SELECT * FROM {} WHERE {} IS NOT NULL ORDER BY {} DESC LIMIT {}".format( self.parent.__genSQL__(), self.alias, self.alias, n ) title = "Reads {} {} largest elements.".format(self.alias, n) return to_tablesample(query, title=title) # ---# def normalize( self, method: str = "zscore", by: list = [], return_trans: bool = False ): """ --------------------------------------------------------------------------- Normalizes the input vColumns using the input method. Parameters ---------- method: str, optional Method to use to normalize. zscore : Normalization using the Z-Score (avg and std). (x - avg) / std robust_zscore : Normalization using the Robust Z-Score (median and mad). (x - median) / (1.4826 * mad) minmax : Normalization using the MinMax (min and max). (x - min) / (max - min) by: list, optional vColumns used in the partition. return_trans: bool, optimal If set to True, the method will return the transformation used instead of the parent vDataFrame. This parameter is used for testing purpose. Returns ------- vDataFrame self.parent See Also -------- vDataFrame.outliers : Computes the vDataFrame Global Outliers. """ if isinstance(by, str): by = [by] check_types( [ ("method", method, ["zscore", "robust_zscore", "minmax"]), ("by", by, [list]), ("return_trans", return_trans, [bool]), ] ) method = method.lower() self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) nullifzero, n = 1, len(by) if self.isbool(): warning_message = "Normalize doesn't work on booleans".format(self.alias) warnings.warn(warning_message, Warning) elif self.isnum(): if method == "zscore": if n == 0: nullifzero = 0 avg, stddev = self.aggregate(["avg", "std"]).values[self.alias] if stddev == 0: warning_message = "Can not normalize {} using a Z-Score - The Standard Deviation is null !".format( self.alias ) warnings.warn(warning_message, Warning) return self elif (n == 1) and (self.parent[by[0]].nunique() < 50): try: result = executeSQL( "SELECT {}, AVG({}), STDDEV({}) FROM {} GROUP BY {}".format( by[0], self.alias, self.alias, self.parent.__genSQL__(), by[0], ), title="Computing the different categories to normalize.", method="fetchall", ) for i in range(len(result)): if result[i][2] == None: pass elif math.isnan(result[i][2]): result[i][2] = None avg = "DECODE({}, {}, NULL)".format( by[0], ", ".join( [ "{}, {}".format( "'{}'".format(str(elem[0]).replace("'", "''")) if elem[0] != None else "NULL", elem[1] if elem[1] != None else "NULL", ) for elem in result if elem[1] != None ] ), ) stddev = "DECODE({}, {}, NULL)".format( by[0], ", ".join( [ "{}, {}".format( "'{}'".format(str(elem[0]).replace("'", "''")) if elem[0] != None else "NULL", elem[2] if elem[2] != None else "NULL", ) for elem in result if elem[2] != None ] ), ) executeSQL( "SELECT {}, {} FROM {} LIMIT 1".format( avg, stddev, self.parent.__genSQL__() ), print_time_sql=False, ) except: avg, stddev = ( "AVG({}) OVER (PARTITION BY {})".format( self.alias, ", ".join(by) ), "STDDEV({}) OVER (PARTITION BY {})".format( self.alias, ", ".join(by) ), ) else: avg, stddev = ( "AVG({}) OVER (PARTITION BY {})".format( self.alias, ", ".join(by) ), "STDDEV({}) OVER (PARTITION BY {})".format( self.alias, ", ".join(by) ), ) if return_trans: return "({} - {}) / {}({})".format( self.alias, avg, "NULLIFZERO" if (nullifzero) else "", stddev ) else: final_transformation = [ ( "({} - {}) / {}({})".format( "{}", avg, "NULLIFZERO" if (nullifzero) else "", stddev ), "float", "float", ) ] elif method == "robust_zscore": if n > 0: warning_message = "The method 'robust_zscore' is available only if the parameter 'by' is empty\nIf you want to normalize by grouping by elements, please use a method in zscore|minmax" warnings.warn(warning_message, Warning) return self mad, med = self.aggregate(["mad", "approx_median"]).values[self.alias] mad *= 1.4826 if mad != 0: if return_trans: return "({} - {}) / ({})".format(self.alias, med, mad) else: final_transformation = [ ( "({} - {}) / ({})".format("{}", med, mad), "float", "float", ) ] else: warning_message = "Can not normalize {} using a Robust Z-Score - The MAD is null !".format( self.alias ) warnings.warn(warning_message, Warning) return self elif method == "minmax": if n == 0: nullifzero = 0 cmin, cmax = self.aggregate(["min", "max"]).values[self.alias] if cmax - cmin == 0: warning_message = "Can not normalize {} using the MIN and the MAX. MAX = MIN !".format( self.alias ) warnings.warn(warning_message, Warning) return self elif n == 1: try: result = executeSQL( "SELECT {}, MIN({}), MAX({}) FROM {} GROUP BY {}".format( by[0], self.alias, self.alias, self.parent.__genSQL__(), by[0], ), title="Computing the different categories {} to normalize.".format( by[0] ), method="fetchall", ) cmin = "DECODE({}, {}, NULL)".format( by[0], ", ".join( [ "{}, {}".format( "'{}'".format(str(elem[0]).replace("'", "''")) if elem[0] != None else "NULL", elem[1] if elem[1] != None else "NULL", ) for elem in result if elem[1] != None ] ), ) cmax = "DECODE({}, {}, NULL)".format( by[0], ", ".join( [ "{}, {}".format( "'{}'".format(str(elem[0]).replace("'", "''")) if elem[0] != None else "NULL", elem[2] if elem[2] != None else "NULL", ) for elem in result if elem[2] != None ] ), ) executeSQL( "SELECT {}, {} FROM {} LIMIT 1".format( cmax, cmin, self.parent.__genSQL__() ), print_time_sql=False, ) except: cmax, cmin = ( "MAX({}) OVER (PARTITION BY {})".format( self.alias, ", ".join(by) ), "MIN({}) OVER (PARTITION BY {})".format( self.alias, ", ".join(by) ), ) else: cmax, cmin = ( "MAX({}) OVER (PARTITION BY {})".format( self.alias, ", ".join(by) ), "MIN({}) OVER (PARTITION BY {})".format( self.alias, ", ".join(by) ), ) if return_trans: return "({} - {}) / {}({} - {})".format( self.alias, cmin, "NULLIFZERO" if (nullifzero) else "", cmax, cmin, ) else: final_transformation = [ ( "({} - {}) / {}({} - {})".format( "{}", cmin, "NULLIFZERO" if (nullifzero) else "", cmax, cmin, ), "float", "float", ) ] if method != "robust_zscore": max_floor = 0 for elem in by: if len(self.parent[elem].transformations) > max_floor: max_floor = len(self.parent[elem].transformations) max_floor -= len(self.transformations) for k in range(max_floor): self.transformations += [("{}", self.ctype(), self.category())] self.transformations += final_transformation sauv = {} for elem in self.catalog: sauv[elem] = self.catalog[elem] self.parent.__update_catalog__(erase=True, columns=[self.alias]) try: if "count" in sauv: self.catalog["count"] = sauv["count"] self.catalog["percent"] = ( 100 * sauv["count"] / self.parent.shape()[0] ) for elem in sauv: if "top" in elem: if "percent" in elem: self.catalog[elem] = sauv[elem] elif elem == None: self.catalog[elem] = None elif method == "robust_zscore": self.catalog[elem] = (sauv[elem] - sauv["approx_50%"]) / ( 1.4826 * sauv["mad"] ) elif method == "zscore": self.catalog[elem] = (sauv[elem] - sauv["mean"]) / sauv[ "std" ] elif method == "minmax": self.catalog[elem] = (sauv[elem] - sauv["min"]) / ( sauv["max"] - sauv["min"] ) except: pass if method == "robust_zscore": self.catalog["median"] = 0 self.catalog["mad"] = 1 / 1.4826 elif method == "zscore": self.catalog["mean"] = 0 self.catalog["std"] = 1 elif method == "minmax": self.catalog["min"] = 0 self.catalog["max"] = 1 self.parent.__add_to_history__( "[Normalize]: The vColumn '{}' was normalized with the method '{}'.".format( self.alias, method ) ) else: raise TypeError("The vColumn must be numerical for Normalization") return self.parent # ---# def nsmallest(self, n: int = 10): """ --------------------------------------------------------------------------- Returns the n smallest elements in the vColumn. Parameters ---------- n: int, optional Offset. Returns ------- tablesample An object containing the result. For more information, see utilities.tablesample. See Also -------- vDataFrame[].nlargest : Returns the n largest vColumn elements. """ check_types([("n", n, [int, float])]) query = "SELECT * FROM {} WHERE {} IS NOT NULL ORDER BY {} ASC LIMIT {}".format( self.parent.__genSQL__(), self.alias, self.alias, n ) title = "Reads {} {} smallest elements.".format(n, self.alias) return to_tablesample(query, title=title) # ---# def numh(self, method: str = "auto"): """ --------------------------------------------------------------------------- Computes the optimal vColumn bar width. Parameters ---------- method: str, optional Method to use to compute the optimal h. auto : Combination of Freedman Diaconis and Sturges. freedman_diaconis : Freedman Diaconis [2 * IQR / n ** (1 / 3)] sturges : Sturges [CEIL(log2(n)) + 1] Returns ------- float optimal bar width. """ check_types( [("method", method, ["sturges", "freedman_diaconis", "fd", "auto"])] ) method = method.lower() if method == "auto": pre_comp = self.parent.__get_catalog_value__(self.alias, "numh") if pre_comp != "VERTICAPY_NOT_PRECOMPUTED": return pre_comp assert self.isnum() or self.isdate(), ParameterError( "numh is only available on type numeric|date" ) if self.isnum(): result = ( self.parent.describe( method="numerical", columns=[self.alias], unique=False ) .transpose() .values[self.alias] ) count, vColumn_min, vColumn_025, vColumn_075, vColumn_max = ( result[0], result[3], result[4], result[6], result[7], ) elif self.isdate(): min_date = self.min() table = "(SELECT DATEDIFF('second', '{}'::timestamp, {}) AS {} FROM {}) VERTICAPY_OPTIMAL_H_TABLE".format( min_date, self.alias, self.alias, self.parent.__genSQL__() ) query = "SELECT COUNT({}) AS NAs, MIN({}) AS min, APPROXIMATE_PERCENTILE({} USING PARAMETERS percentile = 0.25) AS Q1, APPROXIMATE_PERCENTILE({} USING PARAMETERS percentile = 0.75) AS Q3, MAX({}) AS max FROM {}".format( self.alias, self.alias, self.alias, self.alias, self.alias, table ) result = executeSQL( query, title="Different aggregations to compute the optimal h.", method="fetchrow", ) count, vColumn_min, vColumn_025, vColumn_075, vColumn_max = result sturges = max( float(vColumn_max - vColumn_min) / int(math.floor(math.log(count, 2) + 2)), 1e-99, ) fd = max(2.0 * (vColumn_075 - vColumn_025) / (count) ** (1.0 / 3.0), 1e-99) if method.lower() == "sturges": best_h = sturges elif method.lower() in ("freedman_diaconis", "fd"): best_h = fd else: best_h = max(sturges, fd) self.parent.__update_catalog__({"index": ["numh"], self.alias: [best_h]}) if self.category() == "int": best_h = max(math.floor(best_h), 1) return best_h # ---# def nunique(self, approx: bool = True): """ --------------------------------------------------------------------------- Aggregates the vColumn using 'unique' (cardinality). Parameters ---------- approx: bool, optional If set to True, the approximate cardinality is returned. By setting this parameter to False, the function's performance can drastically decrease. Returns ------- int vColumn cardinality (or approximate cardinality). See Also -------- vDataFrame.aggregate : Computes the vDataFrame input aggregations. """ check_types([("approx", approx, [bool])]) if approx: return self.aggregate(func=["approx_unique"]).values[self.alias][0] else: return self.aggregate(func=["unique"]).values[self.alias][0] # ---# def pie( self, method: str = "density", of: str = "", max_cardinality: int = 6, h: float = 0, pie_type: str = "auto", ax=None, **style_kwds, ): """ --------------------------------------------------------------------------- Draws the pie chart of the vColumn based on an aggregation. Parameters ---------- method: str, optional The method to use to aggregate the data. count : Number of elements. density : Percentage of the distribution. mean : Average of the vColumn 'of'. min : Minimum of the vColumn 'of'. max : Maximum of the vColumn 'of'. sum : Sum of the vColumn 'of'. q% : q Quantile of the vColumn 'of' (ex: 50% to get the median). It can also be a cutomized aggregation (ex: AVG(column1) + 5). of: str, optional The vColumn to use to compute the aggregation. max_cardinality: int, optional Maximum number of the vColumn distinct elements to be used as categorical (No h will be picked or computed) h: float, optional Interval width of the bar. If empty, an optimized h will be computed. pie_type: str, optional The type of pie chart. auto : Regular pie chart. donut : Donut chart. rose : Rose chart. It can also be a cutomized aggregation (ex: AVG(column1) + 5). ax: Matplotlib axes object, optional The axes to plot on. **style_kwds Any optional parameter to pass to the Matplotlib functions. Returns ------- ax Matplotlib axes object See Also -------- vDataFrame.donut : Draws the donut chart of the vColumn based on an aggregation. """ if isinstance(pie_type, str): pie_type = pie_type.lower() check_types( [ ("method", method, [str]), ("of", of, [str]), ("max_cardinality", max_cardinality, [int, float]), ("h", h, [int, float]), ("pie_type", pie_type, ["auto", "donut", "rose"]), ] ) donut = True if pie_type == "donut" else False rose = True if pie_type == "rose" else False if of: self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from verticapy.plot import pie return pie( self, method, of, max_cardinality, h, donut, rose, ax=None, **style_kwds, ) # ---# def plot( self, ts: str, by: str = "", start_date: Union[str, datetime.datetime, datetime.date] = "", end_date: Union[str, datetime.datetime, datetime.date] = "", area: bool = False, step: bool = False, ax=None, **style_kwds, ): """ --------------------------------------------------------------------------- Draws the Time Series of the vColumn. Parameters ---------- ts: str TS (Time Series) vColumn to use to order the data. The vColumn type must be date like (date, datetime, timestamp...) or numerical. by: str, optional vColumn to use to partition the TS. start_date: str / date, optional Input Start Date. For example, time = '03-11-1993' will filter the data when 'ts' is lesser than November 1993 the 3rd. end_date: str / date, optional Input End Date. For example, time = '03-11-1993' will filter the data when 'ts' is greater than November 1993 the 3rd. area: bool, optional If set to True, draw an Area Plot. step: bool, optional If set to True, draw a Step Plot. ax: Matplotlib axes object, optional The axes to plot on. **style_kwds Any optional parameter to pass to the Matplotlib functions. Returns ------- ax Matplotlib axes object See Also -------- vDataFrame.plot : Draws the time series. """ check_types( [ ("ts", ts, [str]), ("by", by, [str]), ("start_date", start_date, [str, datetime.datetime, datetime.date]), ("end_date", end_date, [str, datetime.datetime, datetime.date]), ("area", area, [bool]), ("step", step, [bool]), ] ) self.parent.are_namecols_in(ts) ts = self.parent.format_colnames(ts) if by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) from verticapy.plot import ts_plot return ts_plot( self, ts, by, start_date, end_date, area, step, ax=ax, **style_kwds, ) # ---# def product(self): """ --------------------------------------------------------------------------- Aggregates the vColumn using 'product'. Returns ------- float product See Also -------- vDataFrame.aggregate : Computes the vDataFrame input aggregations. """ return self.aggregate(func=["prod"]).values[self.alias][0] prod = product # ---# def quantile(self, x: float, approx: bool = True): """ --------------------------------------------------------------------------- Aggregates the vColumn using an input 'quantile'. Parameters ---------- x: float A float between 0 and 1 that represents the quantile. For example: 0.25 represents Q1. approx: bool, optional If set to True, the approximate quantile is returned. By setting this parameter to False, the function's performance can drastically decrease. Returns ------- float quantile (or approximate quantile). See Also -------- vDataFrame.aggregate : Computes the vDataFrame input aggregations. """ check_types([("x", x, [int, float], ("approx", approx, [bool]))]) prefix = "approx_" if approx else "" return self.aggregate(func=[prefix + "{}%".format(x * 100)]).values[self.alias][ 0 ] # ---# def range_plot( self, ts: str, q: tuple = (0.25, 0.75), start_date: Union[str, datetime.datetime, datetime.date] = "", end_date: Union[str, datetime.datetime, datetime.date] = "", plot_median: bool = False, ax=None, **style_kwds, ): """ --------------------------------------------------------------------------- Draws the range plot of the vColumn. The aggregations used are the median and two input quantiles. Parameters ---------- ts: str TS (Time Series) vColumn to use to order the data. The vColumn type must be date like (date, datetime, timestamp...) or numerical. q: tuple, optional Tuple including the 2 quantiles used to draw the Plot. start_date: str / date, optional Input Start Date. For example, time = '03-11-1993' will filter the data when 'ts' is lesser than November 1993 the 3rd. end_date: str / date, optional Input End Date. For example, time = '03-11-1993' will filter the data when 'ts' is greater than November 1993 the 3rd. plot_median: bool, optional If set to True, the Median will be drawn. ax: Matplotlib axes object, optional The axes to plot on. **style_kwds Any optional parameter to pass to the Matplotlib functions. Returns ------- ax Matplotlib axes object See Also -------- vDataFrame.plot : Draws the time series. """ check_types( [ ("ts", ts, [str]), ("q", q, [tuple]), ( "start_date", start_date, [str, datetime.datetime, datetime.date, int, float], ), ( "end_date", end_date, [str, datetime.datetime, datetime.date, int, float], ), ("plot_median", plot_median, [bool]), ] ) self.parent.are_namecols_in(ts) ts = self.parent.format_colnames(ts) from verticapy.plot import range_curve_vdf return range_curve_vdf( self, ts, q, start_date, end_date, plot_median, ax=ax, **style_kwds, ) # ---# def rename(self, new_name: str): """ --------------------------------------------------------------------------- Renames the vColumn by dropping the current vColumn and creating a copy with the specified name. \u26A0 Warning : SQL code generation will be slower if the vDataFrame has been transformed multiple times, so it's better practice to use this method when first preparing your data. Parameters ---------- new_name: str The new vColumn alias. Returns ------- vDataFrame self.parent See Also -------- vDataFrame.add_copy : Creates a copy of the vColumn. """ check_types([("new_name", new_name, [str])]) old_name = quote_ident(self.alias) new_name = new_name.replace('"', "") assert not (self.parent.is_colname_in(new_name)), NameError( f"A vColumn has already the alias {new_name}.\nBy changing the parameter 'new_name', you'll be able to solve this issue." ) self.add_copy(new_name) parent = self.drop(add_history=False) parent.__add_to_history__( "[Rename]: The vColumn {} was renamed '{}'.".format(old_name, new_name) ) return parent # ---# def round(self, n: int): """ --------------------------------------------------------------------------- Rounds the vColumn by keeping only the input number of digits after the comma. Parameters ---------- n: int Number of digits to keep after the comma. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].apply : Applies a function to the input vColumn. """ check_types([("n", n, [int, float])]) return self.apply(func="ROUND({}, {})".format("{}", n)) # ---# def sem(self): """ --------------------------------------------------------------------------- Aggregates the vColumn using 'sem' (standard error of mean). Returns ------- float sem See Also -------- vDataFrame.aggregate : Computes the vDataFrame input aggregations. """ return self.aggregate(["sem"]).values[self.alias][0] # ---# def skewness(self): """ --------------------------------------------------------------------------- Aggregates the vColumn using 'skewness'. Returns ------- float skewness See Also -------- vDataFrame.aggregate : Computes the vDataFrame input aggregations. """ return self.aggregate(["skewness"]).values[self.alias][0] skew = skewness # ---# def slice(self, length: int, unit: str = "second", start: bool = True): """ --------------------------------------------------------------------------- Slices and transforms the vColumn using a time series rule. Parameters ---------- length: int Slice size. unit: str, optional Slice size unit. For example, it can be 'minute' 'hour'... start: bool, optional If set to True, the record will be sliced using the floor of the slicing instead of the ceiling. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].date_part : Extracts a specific TS field from the vColumn. """ check_types( [ ("length", length, [int, float]), ("unit", unit, [str]), ("start", start, [bool]), ] ) start_or_end = "START" if (start) else "END" return self.apply( func="TIME_SLICE({}, {}, '{}', '{}')".format( "{}", length, unit.upper(), start_or_end ) ) # ---# def spider( self, by: str = "", method: str = "density", of: str = "", max_cardinality: Union[int, tuple] = (6, 6), h: Union[int, float, tuple] = (None, None), ax=None, **style_kwds, ): """ --------------------------------------------------------------------------- Draws the spider plot of the input vColumn based on an aggregation. Parameters ---------- by: str, optional vColumn to use to partition the data. method: str, optional The method to use to aggregate the data. count : Number of elements. density : Percentage of the distribution. mean : Average of the vColumn 'of'. min : Minimum of the vColumn 'of'. max : Maximum of the vColumn 'of'. sum : Sum of the vColumn 'of'. q% : q Quantile of the vColumn 'of' (ex: 50% to get the median). It can also be a cutomized aggregation (ex: AVG(column1) + 5). of: str, optional The vColumn to use to compute the aggregation. h: int/float/tuple, optional Interval width of the vColumns 1 and 2 bars. It is only valid if the vColumns are numerical. Optimized h will be computed if the parameter is empty or invalid. max_cardinality: int/tuple, optional Maximum number of distinct elements for vColumns 1 and 2 to be used as categorical (No h will be picked or computed) ax: Matplotlib axes object, optional The axes to plot on. **style_kwds Any optional parameter to pass to the Matplotlib functions. Returns ------- ax Matplotlib axes object See Also -------- vDataFrame.bar : Draws the Bar Chart of the input vColumns based on an aggregation. """ check_types( [ ("by", by, [str]), ("method", method, [str]), ("of", of, [str]), ("max_cardinality", max_cardinality, [list]), ("h", h, [list, float, int]), ] ) if by: self.parent.are_namecols_in(by) by = self.parent.format_colnames(by) columns = [self.alias, by] else: columns = [self.alias] if of: self.parent.are_namecols_in(of) of = self.parent.format_colnames(of) from verticapy.plot import spider as spider_plot return spider_plot( self.parent, columns, method, of, max_cardinality, h, ax=ax, **style_kwds, ) # ---# def std(self): """ --------------------------------------------------------------------------- Aggregates the vColumn using 'std' (Standard Deviation). Returns ------- float std See Also -------- vDataFrame.aggregate : Computes the vDataFrame input aggregations. """ return self.aggregate(["stddev"]).values[self.alias][0] stddev = std # ---# def store_usage(self): """ --------------------------------------------------------------------------- Returns the vColumn expected store usage (unit: b). Returns ------- int vColumn expected store usage. See Also -------- vDataFrame.expected_store_usage : Returns the vDataFrame expected store usage. """ pre_comp = self.parent.__get_catalog_value__(self.alias, "store_usage") if pre_comp != "VERTICAPY_NOT_PRECOMPUTED": return pre_comp store_usage = executeSQL( "SELECT ZEROIFNULL(SUM(LENGTH({}::varchar))) FROM {}".format( bin_spatial_to_str(self.category(), self.alias), self.parent.__genSQL__(), ), title="Computing the Store Usage of the vColumn {}.".format(self.alias), method="fetchfirstelem", ) self.parent.__update_catalog__( {"index": ["store_usage"], self.alias: [store_usage]} ) return store_usage # ---# def str_contains(self, pat: str): """ --------------------------------------------------------------------------- Verifies if the regular expression is in each of the vColumn records. The vColumn will be transformed. Parameters ---------- pat: str Regular expression. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].str_count : Computes the number of matches for the regular expression in each record of the vColumn. vDataFrame[].extract : Extracts the regular expression in each record of the vColumn. vDataFrame[].str_replace : Replaces the regular expression matches in each of the vColumn records by an input value. vDataFrame[].str_slice : Slices the vColumn. """ check_types([("pat", pat, [str])]) return self.apply( func="REGEXP_COUNT({}, '{}') > 0".format("{}", pat.replace("'", "''")) ) # ---# def str_count(self, pat: str): """ --------------------------------------------------------------------------- Computes the number of matches for the regular expression in each record of the vColumn. The vColumn will be transformed. Parameters ---------- pat: str regular expression. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].str_contains : Verifies if the regular expression is in each of the vColumn records. vDataFrame[].extract : Extracts the regular expression in each record of the vColumn. vDataFrame[].str_replace : Replaces the regular expression matches in each of the vColumn records by an input value. vDataFrame[].str_slice : Slices the vColumn. """ check_types([("pat", pat, [str])]) return self.apply( func="REGEXP_COUNT({}, '{}')".format("{}", pat.replace("'", "''")) ) # ---# def str_extract(self, pat: str): """ --------------------------------------------------------------------------- Extracts the regular expression in each record of the vColumn. The vColumn will be transformed. Parameters ---------- pat: str regular expression. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].str_contains : Verifies if the regular expression is in each of the vColumn records. vDataFrame[].str_count : Computes the number of matches for the regular expression in each record of the vColumn. vDataFrame[].str_replace : Replaces the regular expression matches in each of the vColumn records by an input value. vDataFrame[].str_slice : Slices the vColumn. """ check_types([("pat", pat, [str])]) return self.apply( func="REGEXP_SUBSTR({}, '{}')".format("{}", pat.replace("'", "''")) ) # ---# def str_replace(self, to_replace: str, value: str = ""): """ --------------------------------------------------------------------------- Replaces the regular expression matches in each of the vColumn record by an input value. The vColumn will be transformed. Parameters ---------- to_replace: str Regular expression to replace. value: str, optional New value. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].str_contains : Verifies if the regular expression is in each of the vColumn records. vDataFrame[].str_count : Computes the number of matches for the regular expression in each record of the vColumn. vDataFrame[].extract : Extracts the regular expression in each record of the vColumn. vDataFrame[].str_slice : Slices the vColumn. """ check_types([("to_replace", to_replace, [str]), ("value", value, [str])]) return self.apply( func="REGEXP_REPLACE({}, '{}', '{}')".format( "{}", to_replace.replace("'", "''"), value.replace("'", "''") ) ) # ---# def str_slice(self, start: int, step: int): """ --------------------------------------------------------------------------- Slices the vColumn. The vColumn will be transformed. Parameters ---------- start: int Start of the slicing. step: int Size of the slicing. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].str_contains : Verifies if the regular expression is in each of the vColumn records. vDataFrame[].str_count : Computes the number of matches for the regular expression in each record of the vColumn. vDataFrame[].extract : Extracts the regular expression in each record of the vColumn. vDataFrame[].str_replace : Replaces the regular expression matches in each of the vColumn records by an input value. """ check_types([("start", start, [int, float]), ("step", step, [int, float])]) return self.apply(func="SUBSTR({}, {}, {})".format("{}", start, step)) # ---# def sub(self, x: float): """ --------------------------------------------------------------------------- Subtracts the input element from the vColumn. Parameters ---------- x: float If the vColumn type is date like (date, datetime ...), the parameter 'x' will represent the number of seconds, otherwise it will represent a number. Returns ------- vDataFrame self.parent See Also -------- vDataFrame[].apply : Applies a function to the input vColumn. """ check_types([("x", x, [int, float])]) if self.isdate(): return self.apply(func="TIMESTAMPADD(SECOND, -({}), {})".format(x, "{}")) else: return self.apply(func="{} - ({})".format("{}", x)) # ---# def sum(self): """ --------------------------------------------------------------------------- Aggregates the vColumn using 'sum'. Returns ------- float sum See Also -------- vDataFrame.aggregate : Computes the vDataFrame input aggregations. """ return self.aggregate(["sum"]).values[self.alias][0] # ---# def tail(self, limit: int = 5): """ --------------------------------------------------------------------------- Returns the tail of the vColumn. Parameters ---------- limit: int, optional Number of elements to display. Returns ------- tablesample An object containing the result. For more information, see utilities.tablesample. See Also -------- vDataFrame[].head : Returns the head of the vColumn. """ return self.iloc(limit=limit, offset=-1) # ---# def topk(self, k: int = -1, dropna: bool = True): """ --------------------------------------------------------------------------- Returns the k most occurent elements and their distributions as percents. Parameters ---------- k: int, optional Number of most occurent elements to return. dropna: bool, optional If set to True, NULL values will not be considered during the computation. Returns ------- tablesample An object containing the result. For more information, see utilities.tablesample. See Also -------- vDataFrame[].describe : Computes the vColumn descriptive statistics. """ check_types([("k", k, [int, float]), ("dropna", dropna, [bool])]) topk = "" if (k < 1) else "LIMIT {}".format(k) dropna = " WHERE {} IS NOT NULL".format(self.alias) if (dropna) else "" query = "SELECT {} AS {}, COUNT(*) AS _verticapy_cnt_, 100 * COUNT(*) / {} AS percent FROM {}{} GROUP BY {} ORDER BY _verticapy_cnt_ DESC {}".format( bin_spatial_to_str(self.category(), self.alias), self.alias, self.parent.shape()[0], self.parent.__genSQL__(), dropna, self.alias, topk, ) result = executeSQL( query, title="Computing the top{} categories of {}.".format( k if k > 0 else "", self.alias ), method="fetchall", ) values = { "index": [item[0] for item in result], "count": [int(item[1]) for item in result], "percent": [float(round(item[2], 3)) for item in result], } return tablesample(values) # ---# def value_counts(self, k: int = 30): """ --------------------------------------------------------------------------- Returns the k most occurent elements, how often they occur, and other statistical information. Parameters ---------- k: int, optional Number of most occurent elements to return. Returns ------- tablesample An object containing the result. For more information, see utilities.tablesample. See Also -------- vDataFrame[].describe : Computes the vColumn descriptive statistics. """ return self.describe(method="categorical", max_cardinality=k) # ---# def var(self): """ --------------------------------------------------------------------------- Aggregates the vColumn using 'var' (Variance). Returns ------- float var See Also -------- vDataFrame.aggregate : Computes the vDataFrame input aggregations. """ return self.aggregate(["variance"]).values[self.alias][0] variance = var
34.261009
378
0.486139
13,770
139,271
4.813217
0.07618
0.03093
0.018981
0.012629
0.528214
0.465705
0.423579
0.384698
0.346676
0.330079
0
0.007071
0.356248
139,271
4,064
379
34.269439
0.732176
0.386283
0
0.36368
0
0.010169
0.162804
0.006469
0.000484
0
0
0
0.011138
1
0.040678
false
0.002421
0.010654
0.001937
0.104116
0.004843
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
078a84b6d5f9b27e924368e0c1490273227caf78
6,918
py
Python
booktags/flaskapp/book/views.py
MagicSword/Booktags
44142e19aec5ce75266233964d7ab21503bbe57c
[ "Apache-1.1" ]
null
null
null
booktags/flaskapp/book/views.py
MagicSword/Booktags
44142e19aec5ce75266233964d7ab21503bbe57c
[ "Apache-1.1" ]
9
2019-12-20T15:24:38.000Z
2021-12-13T20:28:48.000Z
booktags/flaskapp/book/views.py
MagicSword/BookTags
44142e19aec5ce75266233964d7ab21503bbe57c
[ "Apache-1.1" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ example.py ~~~~~~~~~ A simple command line application to run flask apps. :copyright: 2019 Miller :license: BSD-3-Clause """ # Known bugs that can't be fixed here: # - synopsis() cannot be prevented from clobbering existing # loaded modules. # - If the __file__ attribute on a module is a relative path and # the current directory is changed with os.chdir(), an incorrect # path will be displayed. from flask import render_template, redirect, request, url_for, flash,jsonify,current_app from flask_login import login_user, logout_user, login_required, current_user from . import book from flask_sqlalchemy import get_debug_queries from sqlalchemy.sql.expression import cast from datatables import ColumnDT, DataTables from .. import auth from .. import db from .forms import EditBookForm, HackmdMeta # from booktags.db.basemodels import Book from booktags.flaskapp.model.models import BookMain # --------------------------------------------------------- common routines @book.after_app_request def after_request(response): for query in get_debug_queries(): if query.duration >= current_app.config['PROJECT_SLOW_DB_QUERY_TIME']: current_app.logger.warning( 'Slow query: %s\nParameters: %s\nDuration: %fs\nContext: %s\n' % (query.statement, query.parameters, query.duration, query.context)) return response @book.route('/', methods=['GET', 'POST']) def index(): # books=BookMain.get_all_book() query = BookMain.query page = request.args.get('page', 1, type=int) pagination = query.order_by(cast(BookMain.id, db.Integer)).paginate( page, per_page=current_app.config['PROJECT_BOOKS_PER_PAGE'], error_out=False) books = pagination.items return render_template('book/index.html',books=books,pagination=pagination) # @book.route('/list/', methods=['GET', 'POST']) # def list_book(): # """ # # :param field: col name # :param order: asc or desc # :return: renew query # """ # books = BookMain.get_all_book() # return render_template('book/list_book.html',books=books) @book.route("/list") def list_book(): """List users with DataTables <= 1.10.x.""" return render_template('book/list_book.html') @book.route('/data', methods=['GET', 'POST']) def data(): """Return server side data.""" # defining columns # - explicitly cast date to string, so string searching the date # will search a date formatted equal to how it is presented # in the table columns = [ # ColumnDT(cast(BookMain.id, db.Integer)), ColumnDT(BookMain.id), ColumnDT(BookMain.isbn), ColumnDT(BookMain.title_short), ColumnDT(BookMain.title), ColumnDT(BookMain.catalogue), ColumnDT(BookMain.cutter), ColumnDT(BookMain.pub_year), ColumnDT(BookMain.copy_info) # ColumnDT(BookMain.get_link), # ColumnDT(BookMain.note), # ColumnDT(BookMain.reprint), # ColumnDT(BookMain.removed), # ColumnDT(BookMain.keepsite) ] # defining the initial query depending on your purpose query = db.session.query().select_from(BookMain) # GET parameters params = request.args.to_dict() # instantiating a DataTable for the query and table needed rowTable = DataTables(params, query, columns) # returns what is needed by DataTable return jsonify(rowTable.output_result()) @book.route('/get/<int:id>', methods=['GET', 'POST']) def get_book(): return f"Hello book index : {id}" @book.route('/post/', methods=['GET', 'POST']) def post_book(): """ post new book entry :return: """ book = BookMain.query.all() id = int(book[-1].id) + 1 print(f"id is : {id}") form = EditBookForm() if form.validate_on_submit(): book.id = form.id.data book.isbn = form.isbn.data book.title_short = form.title_short.data book.title = form.title.data book.catalogue = form.catalogue.data book.cutter = form.cutter.data book.pub_year = form.pub_year.data book.copy_info = form.copy_info.data book.get_link = form.get_link.data book.note = form.note.data book.reprint = form.reprint.data book.removed = form.removed.data book.keepsite = form.keepsite.data db.session.add(book) db.session.commit() flash('Your book data has been added.', 'success') return redirect(url_for('book.index')) form.id.data = id return render_template('book/edit_book.html', form=form) @book.route('/edit/<int:id>', methods=['GET', 'POST']) def edit_book(id): """ edit , put book data :param id: :return: """ form = EditBookForm() book = BookMain.query.filter_by(id=id).first_or_404() if form.validate_on_submit(): # book.id = form.id.data book.isbn = form.isbn.data book.title_short = form.title_short.data book.title = form.title.data book.catalogue = form.catalogue.data book.cutter = form.cutter.data book.pub_year = form.pub_year.data book.copy_info = form.copy_info.data book.get_link = form.get_link.data book.note = form.note.data book.reprint = form.reprint.data book.removed = form.removed.data book.keepsite = form.keepsite.data db.session.add(book) db.session.commit() flash('Your book data has been updated.', 'success') return redirect(url_for('book.index')) form.id.data = book.id form.isbn.data = book.isbn form.title_short.data = book.title_short form.title.data = book.title form.catalogue.data = book.catalogue form.cutter.data = book.cutter form.pub_year.data = book.pub_year form.copy_info.data = book.copy_info form.get_link.data = book.get_link form.note.data = book.note form.reprint.data = book.reprint form.removed.data = book.removed form.keepsite.data = book.keepsite return render_template('book/edit_book.html', form=form) @book.route('/del/<int:id>', methods=['GET', 'POST']) def del_book(id): return f"Hello book index: del {id}" @book.route('/hackmdmeta', methods=['GET', 'POST']) def hackmd_meta(): """ :return: """ from booktags.vendor.hackmd_meta import get_hackmdmeta form = HackmdMeta() if form.validate_on_submit(): booksn = str(form.booksn.data) # print(f"booksn is : {booksn}") temp = get_hackmdmeta(booksn) # print(temp) form.body.data = temp # flash('Your book data has been updated.', 'success') # return redirect(url_for('book.hackmd_meta')) return render_template('book/hackmd_meta.html',form=form) if __name__ == '__main__': pass
30.746667
88
0.645273
902
6,918
4.827051
0.257206
0.067983
0.025723
0.031236
0.341755
0.286633
0.262058
0.245521
0.245521
0.245521
0
0.002966
0.220295
6,918
224
89
30.883929
0.804227
0.238653
0
0.296
0
0.008
0.094969
0.013456
0
0
0
0
0
1
0.072
false
0.008
0.088
0.016
0.248
0.032
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
078df566472a507b372dad23be527536aa2fa29f
4,082
py
Python
storelet.py
markembling/storelet
9951368e2f143855d2c14509bdb8cf796d6e54b8
[ "BSD-3-Clause" ]
1
2015-09-07T17:19:40.000Z
2015-09-07T17:19:40.000Z
storelet.py
markembling/storelet
9951368e2f143855d2c14509bdb8cf796d6e54b8
[ "BSD-3-Clause" ]
1
2016-01-05T13:18:16.000Z
2016-01-05T14:16:57.000Z
storelet.py
markembling/storelet
9951368e2f143855d2c14509bdb8cf796d6e54b8
[ "BSD-3-Clause" ]
1
2019-02-21T09:20:48.000Z
2019-02-21T09:20:48.000Z
import os import logging from tempfile import mkstemp, mkdtemp from shutil import rmtree from zipfile import ZipFile, ZIP_DEFLATED from datetime import datetime from boto.s3.connection import S3Connection from boto.s3.key import Key __version__ = "0.1.8" __author__ = "Mark Embling" __email__ = "mark@markembling.info" logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) LOGGING_DEFAULTS = {"level": logging.INFO, "format": "%(asctime)s [%(levelname)s]: %(message)s"} def setup_logging(**kwargs): """Convenience function for setting up some sane logging defaults""" opts = dict(LOGGING_DEFAULTS.items() + kwargs.items()) logging.basicConfig(**opts) class ZipBackup(object): """ A compressed ZIP file backup Note: large inclusion operations can sometimes take time as files are compressed on the fly. This prevents all the files being copied to a temporary location (and using unnecessary extra space) and storing up the need for a potentially large compression at the end. """ def __init__(self, name): self.name = name _, self._path = mkstemp() logger.debug("Created temporary file %s" % self._path) def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def close(self): os.remove(self._path) logger.debug("Removed temporary file %s" % self._path) def include_directory(self, path, preserve_paths=False, name=None): """Add the contents of a directory to the backup""" path = os.path.abspath(path) logger.debug("Adding directory %s" % path) with ZipFile(self._path, 'a', ZIP_DEFLATED, allowZip64=True) as zipfile: for base,dirs,files in os.walk(path): logger.debug("Walking directory %s" % path) for file in files: filename = os.path.join(base, file) try: zipfile.write(filename, self._get_filename_for_archive( path, filename, preserve_paths, name)) logger.info("Added file %s" % filename) except: logger.warn("Could not add file %s" % file, exc_info=True) logger.debug("Finished directory %s" % path) def save_to_s3(self, bucket, access_key, secret_key, **kwargs): """Save the backup to Amazon S3""" logger.info("Saving to S3 in '%s' bucket" % bucket) conn = S3Connection(access_key, secret_key, **kwargs) bucket = conn.get_bucket(bucket) key = Key(bucket) key.key = '%s_%s.zip' % \ (self.name, datetime.now().strftime("%Y%m%d%H%M%S")) key.set_contents_from_filename(self._path) logger.info("Saving to S3 done %s" % key.key) def include_new_dir(self, name): """Add a new empty directory to the backup""" return BackupIncludedDirectory(name, self) def _get_filename_for_archive(self, directory, filename, preserve_paths, name): if not preserve_paths: filename = filename.replace(directory, "") if name is not None: filename = name + os.sep + filename return filename class BackupIncludedDirectory(object): """A new directory which is subsequently added to the backup""" def __init__(self, name, owner): self.name = name self.path = mkdtemp() self._owner = owner logger.debug("Created temporary directory %s" % self.path) def __str__(self): return self.path def __enter__(self): return self def __exit__(self, type, value, traceback): self._owner.include_directory(self.path, preserve_paths=False, name=self.name) rmtree(self.path) logger.debug("Removed temporary directory %s" % self.path)
36.123894
82
0.607055
487
4,082
4.909651
0.342916
0.043496
0.015056
0.015056
0.205353
0.132999
0.091175
0.091175
0.052698
0.052698
0
0.004509
0.293729
4,082
112
83
36.446429
0.824835
0.130818
0
0.101266
0
0
0.103695
0.006015
0
0
0
0
0
1
0.164557
false
0
0.101266
0.037975
0.35443
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
078f93641653dd1d0323ab98d96a9fc89761a30c
721
py
Python
elementary/date-and-time-convertor.py
vargad/exercises
1a2fc2557672749d590ebdf596f99f53405320a1
[ "MIT" ]
1
2018-02-24T10:51:07.000Z
2018-02-24T10:51:07.000Z
elementary/date-and-time-convertor.py
vargad/exercises
1a2fc2557672749d590ebdf596f99f53405320a1
[ "MIT" ]
null
null
null
elementary/date-and-time-convertor.py
vargad/exercises
1a2fc2557672749d590ebdf596f99f53405320a1
[ "MIT" ]
1
2019-02-13T21:41:07.000Z
2019-02-13T21:41:07.000Z
#!/usr/bin/env python3 def date_time(time): months = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"] hour, minute = int(time[11:13]), int(time[14:16]) return f"{int(time[0:2])} {months[int(time[3:5])-1]} {time[6:10]} year {hour} hour{'s' if hour!=1 else ''} {minute} minute{'s' if minute!=1 else ''}" if __name__ == '__main__': print(date_time("01.01.2018 00:00")) assert date_time("01.01.2018 00:00") == "1 January 2018 year 0 hours 0 minutes" assert date_time("04.08.1984 08:15") == "4 August 1984 year 8 hours 15 minutes" assert date_time("17.12.1990 07:42") == "17 December 1990 year 7 hours 42 minutes"
51.5
153
0.62552
120
721
3.65
0.508333
0.091324
0.09589
0.054795
0.091324
0.091324
0.091324
0
0
0
0
0.151414
0.166436
721
13
154
55.461538
0.577371
0.029126
0
0
0
0.1
0.570815
0.037196
0
0
0
0
0.3
1
0.1
false
0
0
0
0.2
0.1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
078fda9e491f07fc6685fbdf4be7377dd5f3e4a3
10,361
py
Python
lbry/wallet/server/peer.py
snapperVibes/lbry-sdk
77a51d1ad43404e5dc52af715a7bebfaeb3fee16
[ "MIT" ]
2
2021-04-14T07:37:37.000Z
2021-05-18T13:20:11.000Z
lbry/wallet/server/peer.py
lucianolb76/lbry-sdk
0c09f24cbf5bd0959dedca63363ff7ffadd45d66
[ "MIT" ]
null
null
null
lbry/wallet/server/peer.py
lucianolb76/lbry-sdk
0c09f24cbf5bd0959dedca63363ff7ffadd45d66
[ "MIT" ]
null
null
null
# Copyright (c) 2017, Neil Booth # # All rights reserved. # # The MIT License (MIT) # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """Representation of a peer server.""" from ipaddress import ip_address from lbry.wallet.server import util from lbry.wallet.server.util import cachedproperty from typing import Dict class Peer: # Protocol version ATTRS = ('host', 'features', # metadata 'source', 'ip_addr', 'last_good', 'last_try', 'try_count') FEATURES = ('pruning', 'server_version', 'protocol_min', 'protocol_max', 'ssl_port', 'tcp_port') # This should be set by the application DEFAULT_PORTS: Dict[str, int] = {} def __init__(self, host, features, source='unknown', ip_addr=None, last_good=0, last_try=0, try_count=0): """Create a peer given a host name (or IP address as a string), a dictionary of features, and a record of the source.""" assert isinstance(host, str) assert isinstance(features, dict) assert host in features.get('hosts', {}) self.host = host self.features = features.copy() # Canonicalize / clean-up for feature in self.FEATURES: self.features[feature] = getattr(self, feature) # Metadata self.source = source self.ip_addr = ip_addr # last_good represents the last connection that was # successful *and* successfully verified, at which point # try_count is set to 0. Failure to connect or failure to # verify increment the try_count. self.last_good = last_good self.last_try = last_try self.try_count = try_count # Transient, non-persisted metadata self.bad = False self.other_port_pairs = set() self.status = 2 @classmethod def peers_from_features(cls, features, source): peers = [] if isinstance(features, dict): hosts = features.get('hosts') if isinstance(hosts, dict): peers = [Peer(host, features, source=source) for host in hosts if isinstance(host, str)] return peers @classmethod def deserialize(cls, item): """Deserialize from a dictionary.""" return cls(**item) def matches(self, peers): """Return peers whose host matches our hostname or IP address. Additionally include all peers whose IP address matches our hostname if that is an IP address. """ candidates = (self.host.lower(), self.ip_addr) return [peer for peer in peers if peer.host.lower() in candidates or peer.ip_addr == self.host] def __str__(self): return self.host def update_features(self, features): """Update features in-place.""" try: tmp = Peer(self.host, features) except Exception: pass else: self.update_features_from_peer(tmp) def update_features_from_peer(self, peer): if peer != self: self.features = peer.features for feature in self.FEATURES: setattr(self, feature, getattr(peer, feature)) def connection_port_pairs(self): """Return a list of (kind, port) pairs to try when making a connection.""" # Use a list not a set - it's important to try the registered # ports first. pairs = [('SSL', self.ssl_port), ('TCP', self.tcp_port)] while self.other_port_pairs: pairs.append(self.other_port_pairs.pop()) return [pair for pair in pairs if pair[1]] def mark_bad(self): """Mark as bad to avoid reconnects but also to remember for a while.""" self.bad = True def check_ports(self, other): """Remember differing ports in case server operator changed them or removed one.""" if other.ssl_port != self.ssl_port: self.other_port_pairs.add(('SSL', other.ssl_port)) if other.tcp_port != self.tcp_port: self.other_port_pairs.add(('TCP', other.tcp_port)) return bool(self.other_port_pairs) @cachedproperty def is_tor(self): return self.host.endswith('.onion') @cachedproperty def is_valid(self): ip = self.ip_address if ip: return ((ip.is_global or ip.is_private) and not (ip.is_multicast or ip.is_unspecified)) return util.is_valid_hostname(self.host) @cachedproperty def is_public(self): ip = self.ip_address if ip: return self.is_valid and not ip.is_private else: return self.is_valid and self.host != 'localhost' @cachedproperty def ip_address(self): """The host as a python ip_address object, or None.""" try: return ip_address(self.host) except ValueError: return None def bucket(self): if self.is_tor: return 'onion' if not self.ip_addr: return '' return tuple(self.ip_addr.split('.')[:2]) def serialize(self): """Serialize to a dictionary.""" return {attr: getattr(self, attr) for attr in self.ATTRS} def _port(self, key): hosts = self.features.get('hosts') if isinstance(hosts, dict): host = hosts.get(self.host) port = self._integer(key, host) if port and 0 < port < 65536: return port return None def _integer(self, key, d=None): d = d or self.features result = d.get(key) if isinstance(d, dict) else None if isinstance(result, str): try: result = int(result) except ValueError: pass return result if isinstance(result, int) else None def _string(self, key): result = self.features.get(key) return result if isinstance(result, str) else None @cachedproperty def genesis_hash(self): """Returns None if no SSL port, otherwise the port as an integer.""" return self._string('genesis_hash') @cachedproperty def ssl_port(self): """Returns None if no SSL port, otherwise the port as an integer.""" return self._port('ssl_port') @cachedproperty def tcp_port(self): """Returns None if no TCP port, otherwise the port as an integer.""" return self._port('tcp_port') @cachedproperty def server_version(self): """Returns the server version as a string if known, otherwise None.""" return self._string('server_version') @cachedproperty def pruning(self): """Returns the pruning level as an integer. None indicates no pruning.""" pruning = self._integer('pruning') if pruning and pruning > 0: return pruning return None def _protocol_version_string(self, key): version_str = self.features.get(key) ptuple = util.protocol_tuple(version_str) return util.version_string(ptuple) @cachedproperty def protocol_min(self): """Minimum protocol version as a string, e.g., 1.0""" return self._protocol_version_string('protocol_min') @cachedproperty def protocol_max(self): """Maximum protocol version as a string, e.g., 1.1""" return self._protocol_version_string('protocol_max') def to_tuple(self): """The tuple ((ip, host, details) expected in response to a peers subscription.""" details = self.real_name().split()[1:] return (self.ip_addr or self.host, self.host, details) def real_name(self): """Real name of this peer as used on IRC.""" def port_text(letter, port): if port == self.DEFAULT_PORTS.get(letter): return letter else: return letter + str(port) parts = [self.host, 'v' + self.protocol_max] if self.pruning: parts.append(f'p{self.pruning:d}') for letter, port in (('s', self.ssl_port), ('t', self.tcp_port)): if port: parts.append(port_text(letter, port)) return ' '.join(parts) @classmethod def from_real_name(cls, real_name, source): """Real name is a real name as on IRC, such as "erbium1.sytes.net v1.0 s t" Returns an instance of this Peer class. """ host = 'nohost' features = {} ports = {} for n, part in enumerate(real_name.split()): if n == 0: host = part continue if part[0] in ('s', 't'): if len(part) == 1: port = cls.DEFAULT_PORTS[part[0]] else: port = part[1:] if part[0] == 's': ports['ssl_port'] = port else: ports['tcp_port'] = port elif part[0] == 'v': features['protocol_max'] = features['protocol_min'] = part[1:] elif part[0] == 'p': features['pruning'] = part[1:] features.update(ports) features['hosts'] = {host: ports} return cls(host, features, source)
34.194719
78
0.599266
1,320
10,361
4.596212
0.223485
0.018461
0.012856
0.017801
0.108456
0.08505
0.059502
0.047305
0.028845
0.028845
0
0.00499
0.303639
10,361
302
79
34.307947
0.835897
0.269858
0
0.192708
0
0
0.044196
0
0
0
0
0
0.015625
1
0.161458
false
0.010417
0.020833
0.010417
0.385417
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
078fed475eb9a6e7954859be5858da011d4c522e
1,787
py
Python
tests/components/deconz/test_diagnostics.py
aomann/core
5e71e7b775461cd4849c36075c6a1459a7d0ad22
[ "Apache-2.0" ]
null
null
null
tests/components/deconz/test_diagnostics.py
aomann/core
5e71e7b775461cd4849c36075c6a1459a7d0ad22
[ "Apache-2.0" ]
24
2021-11-03T06:20:16.000Z
2022-03-31T06:23:17.000Z
tests/components/deconz/test_diagnostics.py
aomann/core
5e71e7b775461cd4849c36075c6a1459a7d0ad22
[ "Apache-2.0" ]
null
null
null
"""Test deCONZ diagnostics.""" from unittest.mock import patch from pydeconz.websocket import STATE_RUNNING from homeassistant.const import Platform from .test_gateway import DECONZ_CONFIG, setup_deconz_integration from tests.components.diagnostics import get_diagnostics_for_config_entry async def test_entry_diagnostics( hass, hass_client, aioclient_mock, mock_deconz_websocket ): """Test config entry diagnostics.""" config_entry = await setup_deconz_integration(hass, aioclient_mock) await mock_deconz_websocket(state=STATE_RUNNING) await hass.async_block_till_done() with patch( "homeassistant.helpers.system_info.async_get_system_info", return_value={"get_system_info": "fake data"}, ): assert await get_diagnostics_for_config_entry( hass, hass_client, config_entry ) == { "home_assistant": {"get_system_info": "fake data"}, "config_entry": dict(config_entry.data), "deconz_config": DECONZ_CONFIG, "websocket_state": STATE_RUNNING, "deconz_ids": {}, "entities": { str(Platform.ALARM_CONTROL_PANEL): [], str(Platform.BINARY_SENSOR): [], str(Platform.CLIMATE): [], str(Platform.COVER): [], str(Platform.FAN): [], str(Platform.LIGHT): [], str(Platform.LOCK): [], str(Platform.NUMBER): [], str(Platform.SENSOR): [], str(Platform.SIREN): [], str(Platform.SWITCH): [], }, "events": {}, "alarm_systems": {}, "groups": {}, "lights": {}, "scenes": {}, "sensors": {}, }
32.490909
73
0.582541
173
1,787
5.716763
0.387283
0.122346
0.039434
0.046512
0.09909
0
0
0
0
0
0
0
0.297706
1,787
54
74
33.092593
0.788048
0.01343
0
0.046512
0
0
0.127252
0.031958
0
0
0
0
0.023256
1
0
false
0
0.116279
0
0.116279
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
079020088d5707b9fc6c67fde0c7358e446490f2
32,821
py
Python
jax_md/partition.py
l1zp/jax-md
2440794aebb1c77116459e2ec2051d537a94ecf4
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
jax_md/partition.py
l1zp/jax-md
2440794aebb1c77116459e2ec2051d537a94ecf4
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
jax_md/partition.py
l1zp/jax-md
2440794aebb1c77116459e2ec2051d537a94ecf4
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Code to transform functions on individual tuples of particles to sets.""" from absl import logging from functools import reduce, partial from collections import namedtuple from enum import Enum from typing import Any, Callable, Optional, Dict, Tuple, Generator, Union import math from operator import mul import numpy as onp from jax import lax from jax import ops from jax import jit, vmap, eval_shape from jax.abstract_arrays import ShapedArray from jax.interpreters import partial_eval as pe import jax.numpy as jnp from jax_md import quantity, space, dataclasses, util import jraph # Types Array = util.Array f32 = util.f32 f64 = util.f64 i32 = util.i32 i64 = util.i64 Box = space.Box DisplacementOrMetricFn = space.DisplacementOrMetricFn MetricFn = space.MetricFn # Cell List @dataclasses.dataclass class CellList: """Stores the spatial partition of a system into a cell list. See cell_list(...) for details on the construction / specification. Cell list buffers all have a common shape, S, where * `S = [cell_count_x, cell_count_y, cell_capacity]` * `S = [cell_count_x, cell_count_y, cell_count_z, cell_capacity]` in two- and three-dimensions respectively. It is assumed that each cell has the same capacity. Attributes: position_buffer: An ndarray of floating point positions with shape S + [spatial_dimension]. id_buffer: An ndarray of int32 particle ids of shape S. Note that empty slots are specified by id = N where N is the number of particles in the system. kwarg_buffers: A dictionary of ndarrays of shape S + [...]. This contains side data placed into the cell list. """ position_buffer: Array id_buffer: Array kwarg_buffers: Dict[str, Array] def _cell_dimensions(spatial_dimension: int, box_size: Box, minimum_cell_size: float) -> Tuple[Box, Array, Array, int]: """Compute the number of cells-per-side and total number of cells in a box.""" if isinstance(box_size, int) or isinstance(box_size, float): box_size = float(box_size) # NOTE(schsam): Should we auto-cast based on box_size? I can't imagine a case # in which the box_size would not be accurately represented by an f32. if (isinstance(box_size, onp.ndarray) and (box_size.dtype == jnp.int32 or box_size.dtype == jnp.int64)): box_size = float(box_size) cells_per_side = onp.floor(box_size / minimum_cell_size) cell_size = box_size / cells_per_side cells_per_side = onp.array(cells_per_side, dtype=jnp.int64) if isinstance(box_size, onp.ndarray): if box_size.ndim == 1 or box_size.ndim == 2: assert box_size.size == spatial_dimension flat_cells_per_side = onp.reshape(cells_per_side, (-1,)) for cells in flat_cells_per_side: if cells < 3: raise ValueError( ('Box must be at least 3x the size of the grid spacing in each ' 'dimension.')) cell_count = reduce(mul, flat_cells_per_side, 1) elif box_size.ndim == 0: cell_count = cells_per_side ** spatial_dimension else: raise ValueError('Box must either be a scalar or a vector.') else: cell_count = cells_per_side ** spatial_dimension return box_size, cell_size, cells_per_side, int(cell_count) def count_cell_filling(R: Array, box_size: Box, minimum_cell_size: float) -> Array: """Counts the number of particles per-cell in a spatial partition.""" dim = int(R.shape[1]) box_size, cell_size, cells_per_side, cell_count = \ _cell_dimensions(dim, box_size, minimum_cell_size) hash_multipliers = _compute_hash_constants(dim, cells_per_side) particle_index = jnp.array(R / cell_size, dtype=jnp.int64) particle_hash = jnp.sum(particle_index * hash_multipliers, axis=1) filling = ops.segment_sum(jnp.ones_like(particle_hash), particle_hash, cell_count) return filling def _is_variable_compatible_with_positions(R: Array) -> bool: if (util.is_array(R) and len(R.shape) == 2 and jnp.issubdtype(R.dtype, jnp.floating)): return True return False def _compute_hash_constants(spatial_dimension: int, cells_per_side: Array) -> Array: if cells_per_side.size == 1: return jnp.array([[cells_per_side ** d for d in range(spatial_dimension)]], dtype=jnp.int64) elif cells_per_side.size == spatial_dimension: one = jnp.array([[1]], dtype=jnp.int32) cells_per_side = jnp.concatenate((one, cells_per_side[:, :-1]), axis=1) return jnp.array(jnp.cumprod(cells_per_side), dtype=jnp.int64) else: raise ValueError() def _neighboring_cells(dimension: int) -> Generator[onp.ndarray, None, None]: for dindex in onp.ndindex(*([3] * dimension)): yield onp.array(dindex, dtype=jnp.int64) - 1 def _estimate_cell_capacity(R: Array, box_size: Box, cell_size: float, buffer_size_multiplier: float) -> int: # TODO(schsam): We might want to do something more sophisticated here or at # least expose this constant. spatial_dim = R.shape[-1] cell_capacity = onp.max(count_cell_filling(R, box_size, cell_size)) return int(cell_capacity * buffer_size_multiplier) def _unflatten_cell_buffer(arr: Array, cells_per_side: Array, dim: int) -> Array: if (isinstance(cells_per_side, int) or isinstance(cells_per_side, float) or (util.is_array(cells_per_side) and not cells_per_side.shape)): cells_per_side = (int(cells_per_side),) * dim elif util.is_array(cells_per_side) and len(cells_per_side.shape) == 1: cells_per_side = tuple([int(x) for x in cells_per_side[::-1]]) elif util.is_array(cells_per_side) and len(cells_per_side.shape) == 2: cells_per_side = tuple([int(x) for x in cells_per_side[0][::-1]]) else: raise ValueError() # TODO return jnp.reshape(arr, cells_per_side + (-1,) + arr.shape[1:]) def _shift_array(arr: onp.ndarray, dindex: Array) -> Array: if len(dindex) == 2: dx, dy = dindex dz = 0 elif len(dindex) == 3: dx, dy, dz = dindex if dx < 0: arr = jnp.concatenate((arr[1:], arr[:1])) elif dx > 0: arr = jnp.concatenate((arr[-1:], arr[:-1])) if dy < 0: arr = jnp.concatenate((arr[:, 1:], arr[:, :1]), axis=1) elif dy > 0: arr = jnp.concatenate((arr[:, -1:], arr[:, :-1]), axis=1) if dz < 0: arr = jnp.concatenate((arr[:, :, 1:], arr[:, :, :1]), axis=2) elif dz > 0: arr = jnp.concatenate((arr[:, :, -1:], arr[:, :, :-1]), axis=2) return arr def _vectorize(f: Callable, dim: int) -> Callable: if dim == 2: return vmap(vmap(f, 0, 0), 0, 0) elif dim == 3: return vmap(vmap(vmap(f, 0, 0), 0, 0), 0, 0) raise ValueError('Cell list only supports 2d or 3d.') def cell_list(box_size: Box, minimum_cell_size: float, cell_capacity_or_example_R: Union[int, Array], buffer_size_multiplier: float=1.1 ) -> Callable[[Array], CellList]: r"""Returns a function that partitions point data spatially. Given a set of points {x_i \in R^d} with associated data {k_i \in R^m} it is often useful to partition the points / data spatially. A simple partitioning that can be implemented efficiently within XLA is a dense partition into a uniform grid called a cell list. Since XLA requires that shapes be statically specified, we allocate fixed sized buffers for each cell. The size of this buffer can either be specified manually or it can be estimated automatically from a set of positions. Note, if the distribution of points changes significantly it is likely the buffer the buffer sizes will have to be adjusted. This partitioning will likely form the groundwork for parallelizing simulations over different accelerators. Args: box_size: A float or an ndarray of shape [spatial_dimension] specifying the size of the system. Note, this code is written for the case where the boundaries are periodic. If this is not the case, then the current code will be slightly less efficient. minimum_cell_size: A float specifying the minimum side length of each cell. Cells are enlarged so that they exactly fill the box. cell_capacity_or_example_R: Either an integer specifying the size number of particles that can be stored in each cell or an ndarray of positions of shape [particle_count, spatial_dimension] that is used to estimate the cell_capacity. buffer_size_multiplier: A floating point multiplier that multiplies the estimated cell capacity to allow for fluctuations in the maximum cell occupancy. Returns: A function `cell_list_fn(R, **kwargs)` that partitions positions, `R`, and side data specified by kwargs into a cell list. Returns a CellList containing the partition. """ if util.is_array(box_size): box_size = onp.array(box_size) if len(box_size.shape) == 1: box_size = jnp.reshape(box_size, (1, -1)) if util.is_array(minimum_cell_size): minimum_cell_size = onp.array(minimum_cell_size) cell_capacity = cell_capacity_or_example_R if _is_variable_compatible_with_positions(cell_capacity): cell_capacity = _estimate_cell_capacity( cell_capacity, box_size, minimum_cell_size, buffer_size_multiplier) elif not isinstance(cell_capacity, int): msg = ( 'cell_capacity_or_example_positions must either be an integer ' 'specifying the cell capacity or a set of positions that will be used ' 'to estimate a cell capacity. Found {}.'.format(type(cell_capacity)) ) raise ValueError(msg) def build_cells(R: Array, extra_capacity: int=0, **kwargs) -> CellList: N = R.shape[0] dim = R.shape[1] _cell_capacity = cell_capacity + extra_capacity if dim != 2 and dim != 3: # NOTE(schsam): Do we want to check this in compute_fn as well? raise ValueError( 'Cell list spatial dimension must be 2 or 3. Found {}'.format(dim)) neighborhood_tile_count = 3 ** dim _, cell_size, cells_per_side, cell_count = \ _cell_dimensions(dim, box_size, minimum_cell_size) hash_multipliers = _compute_hash_constants(dim, cells_per_side) # Create cell list data. particle_id = lax.iota(jnp.int64, N) # NOTE(schsam): We use the convention that particles that are successfully, # copied have their true id whereas particles empty slots have id = N. # Then when we copy data back from the grid, copy it to an array of shape # [N + 1, output_dimension] and then truncate it to an array of shape # [N, output_dimension] which ignores the empty slots. mask_id = jnp.ones((N,), jnp.int64) * N cell_R = jnp.zeros((cell_count * _cell_capacity, dim), dtype=R.dtype) cell_id = N * jnp.ones((cell_count * _cell_capacity, 1), dtype=i32) # It might be worth adding an occupied mask. However, that will involve # more compute since often we will do a mask for species that will include # an occupancy test. It seems easier to design around this empty_data_value # for now and revisit the issue if it comes up later. empty_kwarg_value = 10 ** 5 cell_kwargs = {} for k, v in kwargs.items(): if not util.is_array(v): raise ValueError(( 'Data must be specified as an ndarry. Found "{}" with ' 'type {}'.format(k, type(v)))) if v.shape[0] != R.shape[0]: raise ValueError( ('Data must be specified per-particle (an ndarray with shape ' '(R.shape[0], ...)). Found "{}" with shape {}'.format(k, v.shape))) kwarg_shape = v.shape[1:] if v.ndim > 1 else (1,) cell_kwargs[k] = empty_kwarg_value * jnp.ones( (cell_count * _cell_capacity,) + kwarg_shape, v.dtype) indices = jnp.array(R / cell_size, dtype=i32) hashes = jnp.sum(indices * hash_multipliers, axis=1) # Copy the particle data into the grid. Here we use a trick to allow us to # copy into all cells simultaneously using a single lax.scatter call. To do # this we first sort particles by their cell hash. We then assign each # particle to have a cell id = hash * cell_capacity + grid_id where grid_id # is a flat list that repeats 0, .., cell_capacity. So long as there are # fewer than cell_capacity particles per cell, each particle is guarenteed # to get a cell id that is unique. sort_map = jnp.argsort(hashes) sorted_R = R[sort_map] sorted_hash = hashes[sort_map] sorted_id = particle_id[sort_map] sorted_kwargs = {} for k, v in kwargs.items(): sorted_kwargs[k] = v[sort_map] sorted_cell_id = jnp.mod(lax.iota(jnp.int64, N), _cell_capacity) sorted_cell_id = sorted_hash * _cell_capacity + sorted_cell_id cell_R = ops.index_update(cell_R, sorted_cell_id, sorted_R) sorted_id = jnp.reshape(sorted_id, (N, 1)) cell_id = ops.index_update( cell_id, sorted_cell_id, sorted_id) cell_R = _unflatten_cell_buffer(cell_R, cells_per_side, dim) cell_id = _unflatten_cell_buffer(cell_id, cells_per_side, dim) for k, v in sorted_kwargs.items(): if v.ndim == 1: v = jnp.reshape(v, v.shape + (1,)) cell_kwargs[k] = ops.index_update(cell_kwargs[k], sorted_cell_id, v) cell_kwargs[k] = _unflatten_cell_buffer( cell_kwargs[k], cells_per_side, dim) return CellList(cell_R, cell_id, cell_kwargs) # pytype: disable=wrong-arg-count return build_cells def _displacement_or_metric_to_metric_sq( displacement_or_metric: DisplacementOrMetricFn) -> MetricFn: """Checks whether or not a displacement or metric was provided.""" for dim in range(1, 4): try: R = ShapedArray((dim,), f32) dR_or_dr = eval_shape(displacement_or_metric, R, R, t=0) if len(dR_or_dr.shape) == 0: return lambda Ra, Rb, **kwargs: \ displacement_or_metric(Ra, Rb, **kwargs) ** 2 else: return lambda Ra, Rb, **kwargs: space.square_distance( displacement_or_metric(Ra, Rb, **kwargs)) except TypeError: continue except ValueError: continue raise ValueError( 'Canonicalize displacement not implemented for spatial dimension larger' 'than 4.') class NeighborListFormat(Enum): """An enum listing the different neighbor list formats. Attributes: Dense: A dense neighbor list where the ids are a square matrix of shape `(N, max_neighbors_per_atom)`. Here the capacity of the neighbor list must scale with the highest connectivity neighbor. Sparse: A sparse neighbor list where the ids are a rectangular matrix of shape `(2, max_neighbors)` specifying the start / end particle of each neighbor pair. OrderedSparse: A sparse neighbor list whose format is the same as `Sparse` where only bonds with i < j are included. """ Dense = 0 Sparse = 1 OrderedSparse = 2 def is_sparse(format: NeighborListFormat) -> bool: return (format is NeighborListFormat.Sparse or format is NeighborListFormat.OrderedSparse) def is_format_valid(format: NeighborListFormat): if not format in list(NeighborListFormat): raise ValueError(( 'Neighbor list format must be a member of NeighorListFormat' f' found {format}.')) @dataclasses.dataclass class NeighborList(object): """A struct containing the state of a Neighbor List. Attributes: idx: For an N particle system this is an `[N, max_occupancy]` array of integers such that `idx[i, j]` is the jth neighbor of particle i. reference_position: The positions of particles when the neighbor list was constructed. This is used to decide whether the neighbor list ought to be updated. did_buffer_overflow: A boolean that starts out False. If there are ever more neighbors than max_neighbors this is set to true to indicate that there was a buffer overflow. If this happens, it means that the results of the simulation will be incorrect and the simulation needs to be rerun using a larger buffer. max_occupancy: A static integer specifying the maximum size of the neighbor list. Changing this will invoke a recompilation. format: A NeighborListFormat enum specifying the format of the neighbor list. cell_list_fn: A static python callable that is used to construct a cell list used in an intermediate step of the neighbor list calculation. update_fn: A static python function used to update the neighbor list. """ idx: Array reference_position: Array did_buffer_overflow: Array max_occupancy: int = dataclasses.static_field() format: NeighborListFormat = dataclasses.static_field() cell_list_fn: Callable[[Array], CellList] = dataclasses.static_field() update_fn: Callable[[Array, 'NeighborList'], 'NeighborList'] = dataclasses.static_field() def update(self, R, **kwargs): return self.update_fn(R, self, **kwargs) @dataclasses.dataclass class NeighborListFns: """A struct containing functions to allocate and update neighbor lists. Attributes: allocate: A function to allocate a new neighbor list. This function cannot be compiled, since it uses the values of positions to infer the shapes. update: A function to update a neighbor list given a new set of positions and a new neighbor list. """ allocate: Callable[..., NeighborList] = dataclasses.static_field() update: Callable[[Array, NeighborList], NeighborList] = dataclasses.static_field() def __call__(self, R: Array, neighbor_list: Optional[NeighborList]=None, extra_capacity: int=0, **kwargs) -> NeighborList: """A function for backward compatibility with previous neighbor lists. Attributes: R: An `(N, dim)` array of particle positions. neighbor_list: An optional neighor list object. If it is provided then the function updates the neighbor list, otherwise it allocates a new neighbor list. extra_capacity: Extra capacity to add if allocating the neighbor list. """ logging.warning('Using a depricated code path to create / update neighbor ' 'lists. It will be removed in a later version of JAX MD. ' 'Using `neighbor_fn.allocate` and `neighbor_fn.update` ' 'is preferred.') if neighbor_list is None: return self.allocate(R, extra_capacity, **kwargs) return self.update(R, neighbor_list, **kwargs) def __iter__(self): return iter((self.allocate, self.update)) NeighborFn = Callable[[Array, Optional[NeighborList], Optional[int]], NeighborList] def neighbor_list(displacement_or_metric: DisplacementOrMetricFn, box_size: Box, r_cutoff: float, dr_threshold: float, capacity_multiplier: float=1.25, disable_cell_list: bool=False, mask_self: bool=True, fractional_coordinates: bool=False, format: NeighborListFormat=NeighborListFormat.Dense, **static_kwargs) -> NeighborFn: """Returns a function that builds a list neighbors for collections of points. Neighbor lists must balance the need to be jit compatable with the fact that under a jit the maximum number of neighbors cannot change (owing to static shape requirements). To deal with this, our `neighbor_list` returns a `NeighborListFns` object that contains two functions: 1) `neighbor_fn.allocate` create a new neighbor list and 2) `neighbor_fn.update` updates an existing neighbor list. Neighbor lists themselves additionally have a convenience `update` member function. Note that allocation of a new neighbor list cannot be jit compiled since it uses the positions to infer the maximum number of neighbors (along with additional space specified by the `capacity_multiplier`). Updating the neighbor list can be jit compiled; if the neighbor list capacity is not sufficient to store all the neighbors, the `did_buffer_overflow` bit will be set to `True` and a new neighbor list will need to be reallocated. Here is a typical example of a simulation loop with neighbor lists: >>> init_fn, apply_fn = simulate.nve(energy_fn, shift, 1e-3) >>> exact_init_fn, exact_apply_fn = simulate.nve(exact_energy_fn, shift, 1e-3) >>> >>> nbrs = neighbor_fn.allocate(R) >>> state = init_fn(random.PRNGKey(0), R, neighbor_idx=nbrs.idx) >>> >>> def body_fn(i, state): >>> state, nbrs = state >>> nbrs = nbrs.update(state.position) >>> state = apply_fn(state, neighbor_idx=nbrs.idx) >>> return state, nbrs >>> >>> step = 0 >>> for _ in range(20): >>> new_state, nbrs = lax.fori_loop(0, 100, body_fn, (state, nbrs)) >>> if nbrs.did_buffer_overflow: >>> nbrs = neighbor_fn.allocate(state.position) >>> else: >>> state = new_state >>> step += 1 Args: displacement: A function `d(R_a, R_b)` that computes the displacement between pairs of points. box_size: Either a float specifying the size of the box or an array of shape [spatial_dim] specifying the box size in each spatial dimension. r_cutoff: A scalar specifying the neighborhood radius. dr_threshold: A scalar specifying the maximum distance particles can move before rebuilding the neighbor list. capacity_multiplier: A floating point scalar specifying the fractional increase in maximum neighborhood occupancy we allocate compared with the maximum in the example positions. disable_cell_list: An optional boolean. If set to True then the neighbor list is constructed using only distances. This can be useful for debugging but should generally be left as False. mask_self: An optional boolean. Determines whether points can consider themselves to be their own neighbors. fractional_coordinates: An optional boolean. Specifies whether positions will be supplied in fractional coordinates in the unit cube, [0, 1]^d. If this is set to True then the box_size will be set to 1.0 and the cell size used in the cell list will be set to cutoff / box_size. format: The format of the neighbor list; see the NeighborListFormat enum for details about the different choices for formats. Defaults to `Dense`. **static_kwargs: kwargs that get threaded through the calculation of example positions. Returns: A pair. The first element is a NeighborList containing the current neighbor list. The second element contains a function `neighbor_list_fn(R, neighbor_list=None)` that will update the neighbor list. If neighbor_list is None then the function will construct a new neighbor list whose capacity is inferred from R. If neighbor_list is given then it will update the neighbor list (with fixed capacity) if any particle has moved more than dr_threshold / 2. Note that only `neighbor_list_fn(R, neighbor_list)` can be `jit` since it keeps array shapes fixed. """ is_format_valid(format) box_size = lax.stop_gradient(box_size) r_cutoff = lax.stop_gradient(r_cutoff) dr_threshold = lax.stop_gradient(dr_threshold) box_size = f32(box_size) cutoff = r_cutoff + dr_threshold cutoff_sq = cutoff ** 2 threshold_sq = (dr_threshold / f32(2)) ** 2 metric_sq = _displacement_or_metric_to_metric_sq(displacement_or_metric) cell_size = cutoff if fractional_coordinates: cell_size = cutoff / box_size box_size = f32(1) use_cell_list = jnp.all(cell_size < box_size / 3.) and not disable_cell_list @jit def candidate_fn(R, **kwargs): return jnp.broadcast_to(jnp.reshape(jnp.arange(R.shape[0]), (1, R.shape[0])), (R.shape[0], R.shape[0])) @jit def cell_list_candidate_fn(cl, R, **kwargs): N, dim = R.shape R = cl.position_buffer idx = cl.id_buffer cell_idx = [idx] for dindex in _neighboring_cells(dim): if onp.all(dindex == 0): continue cell_idx += [_shift_array(idx, dindex)] cell_idx = jnp.concatenate(cell_idx, axis=-2) cell_idx = cell_idx[..., jnp.newaxis, :, :] cell_idx = jnp.broadcast_to(cell_idx, idx.shape[:-1] + cell_idx.shape[-2:]) def copy_values_from_cell(value, cell_value, cell_id): scatter_indices = jnp.reshape(cell_id, (-1,)) cell_value = jnp.reshape(cell_value, (-1,) + cell_value.shape[-2:]) return ops.index_update(value, scatter_indices, cell_value) # NOTE(schsam): Currently, this makes a verlet list that is larger than # needed since the idx buffer inherets its size from the cell-list. In # three-dimensions this seems to translate into an occupancy of ~70%. We # can make this more efficient by shrinking the verlet list at the cost of # another sort. However, this seems possibly less efficient than just # computing everything. neighbor_idx = jnp.zeros((N + 1,) + cell_idx.shape[-2:], jnp.int32) neighbor_idx = copy_values_from_cell(neighbor_idx, cell_idx, idx) return neighbor_idx[:-1, :, 0] @jit def mask_self_fn(idx): self_mask = idx == jnp.reshape(jnp.arange(idx.shape[0]), (idx.shape[0], 1)) return jnp.where(self_mask, idx.shape[0], idx) @jit def prune_neighbor_list_dense(R, idx, **kwargs): d = partial(metric_sq, **kwargs) d = space.map_neighbor(d) N = R.shape[0] neigh_R = R[idx] dR = d(R, neigh_R) mask = (dR < cutoff_sq) & (idx < N) out_idx = N * jnp.ones(idx.shape, jnp.int32) cumsum = jnp.cumsum(mask, axis=1) index = jnp.where(mask, cumsum - 1, idx.shape[1] - 1) p_index = jnp.arange(idx.shape[0])[:, None] out_idx = out_idx.at[p_index, index].set(idx) max_occupancy = jnp.max(cumsum[:, -1]) return out_idx[:, :-1], max_occupancy @jit def prune_neighbor_list_sparse(R, idx, **kwargs): d = partial(metric_sq, **kwargs) d = space.map_bond(d) N = R.shape[0] sender_idx = jnp.broadcast_to(jnp.arange(N)[:, None], idx.shape) sender_idx = jnp.reshape(sender_idx, (-1,)) receiver_idx = jnp.reshape(idx, (-1,)) dR = d(R[sender_idx], R[receiver_idx]) mask = (dR < cutoff_sq) & (receiver_idx < N) if format is NeighborListFormat.OrderedSparse: mask = mask & (receiver_idx < sender_idx) out_idx = N * jnp.ones(receiver_idx.shape, jnp.int32) cumsum = jnp.cumsum(mask) index = jnp.where(mask, cumsum - 1, len(receiver_idx) - 1) receiver_idx = out_idx.at[index].set(receiver_idx) sender_idx = out_idx.at[index].set(sender_idx) max_occupancy = cumsum[-1] return jnp.stack((receiver_idx[:-1], sender_idx[:-1])), max_occupancy def neighbor_list_fn(R: Array, neighbor_list: Optional[NeighborList]=None, extra_capacity: int=0, **kwargs) -> NeighborList: nbrs = neighbor_list def neighbor_fn(R_and_overflow, max_occupancy=None): R, overflow = R_and_overflow N = R.shape[0] if cell_list_fn is not None: cl = cell_list_fn(R) idx = cell_list_candidate_fn(cl, R, **kwargs) else: idx = candidate_fn(R, **kwargs) if mask_self: idx = mask_self_fn(idx) if is_sparse(format): idx, occupancy = prune_neighbor_list_sparse(R, idx, **kwargs) else: idx, occupancy = prune_neighbor_list_dense(R, idx, **kwargs) if max_occupancy is None: _extra_capacity = (extra_capacity if not is_sparse(format) else N * extra_capacity) max_occupancy = int(occupancy * capacity_multiplier + _extra_capacity) if max_occupancy > R.shape[0] and not is_sparse(format): max_occupancy = R.shape[0] padding = max_occupancy - occupancy if max_occupancy > occupancy: idx = jnp.concatenate( [idx, N * jnp.ones((idx.shape[0], padding), dtype=idx.dtype)], axis=1) idx = idx[:, :max_occupancy] update_fn = (neighbor_list_fn if neighbor_list is None else neighbor_list.update_fn) return NeighborList( idx, R, jnp.logical_or(overflow, (max_occupancy < occupancy)), max_occupancy, format, cell_list_fn, update_fn) # pytype: disable=wrong-arg-count if nbrs is None: cell_list_fn = (cell_list(box_size, cell_size, R, capacity_multiplier) if use_cell_list else None) return neighbor_fn((R, False)) else: cell_list_fn = nbrs.cell_list_fn neighbor_fn = partial(neighbor_fn, max_occupancy=nbrs.max_occupancy) d = partial(metric_sq, **kwargs) d = vmap(d) return lax.cond( jnp.any(d(R, nbrs.reference_position) > threshold_sq), (R, nbrs.did_buffer_overflow), neighbor_fn, nbrs, lambda x: x) return NeighborListFns(lambda R, extra_capacity=0, **kwargs: neighbor_list_fn(R, extra_capacity=extra_capacity, **kwargs), lambda R, nbrs, **kwargs: # pytype: disable=wrong-arg-count neighbor_list_fn(R, nbrs, **kwargs)) def neighbor_list_mask(neighbor: NeighborList, mask_self: bool=False) -> Array: """Compute a mask for neighbor list.""" if is_sparse(neighbor.format): mask = neighbor.idx[0] < len(neighbor.reference_position) if mask_self: mask = mask & (neighbor.idx[0] != neighbor.idx[1]) return mask mask = neighbor.idx < len(neighbor.idx) if mask_self: N = len(neighbor.reference_position) self_mask = neighbor.idx != jnp.reshape(jnp.arange(N), (N, 1)) mask = mask & self_mask return mask def to_jraph(neighbor: NeighborList, mask: Array=None) -> jraph.GraphsTuple: """Convert a sparse neighbor list to a `jraph.GraphsTuple`. As in jraph, padding here is accomplished by adding a ficticious graph with a single node. Args: neighbor: A neighbor list that we will convert to the jraph format. Must be sparse. mask: An optional mask on the edges. Returns: A `jraph.GraphsTuple` that contains the topology of the neighbor list. """ if not is_sparse(neighbor.format): raise ValueError('Cannot convert a dense neighbor list to jraph format. ' 'Please use either NeighborListFormat.Sparse or ' 'NeighborListFormat.OrderedSparse.') receivers, senders = neighbor.idx N = len(neighbor.reference_position) _mask = neighbor_list_mask(neighbor) if mask is not None: _mask = _mask & mask cumsum = jnp.cumsum(_mask) index = jnp.where(_mask, cumsum - 1, len(receivers)) ordered = N * jnp.ones((len(receivers) + 1,), jnp.int32) receivers = ordered.at[index].set(receivers)[:-1] senders = ordered.at[index].set(senders)[:-1] mask = receivers < N return jraph.GraphsTuple( nodes=None, edges=None, receivers=receivers, senders=senders, globals=None, n_node=jnp.array([N, 1]), n_edge=jnp.array([jnp.sum(_mask), jnp.sum(~_mask)]), ) def to_dense(neighbor: NeighborList) -> Array: """Converts a sparse neighbor list to dense ids. Cannot be JIT.""" if neighbor.format is not Sparse: raise ValueError('Can only convert sparse neighbor lists to dense ones.') receivers, senders = neighbor.idx mask = neighbor_list_mask(neighbor) receivers = receivers[mask] senders = senders[mask] N = len(neighbor.reference_position) count = ops.segment_sum(jnp.ones(len(receivers), jnp.int32), receivers, N) max_count = jnp.max(count) offset = jnp.tile(jnp.arange(max_count), N)[:len(senders)] hashes = senders * max_count + offset dense_idx = N * jnp.ones((N * max_count,), jnp.int32) dense_idx = dense_idx.at[hashes].set(receivers).reshape((N, max_count)) return dense_idx Dense = NeighborListFormat.Dense Sparse = NeighborListFormat.Sparse OrderedSparse = NeighborListFormat.OrderedSparse
38.34229
85
0.681728
4,736
32,821
4.559755
0.140203
0.036119
0.023339
0.005186
0.181848
0.11762
0.083862
0.058115
0.047696
0.040472
0
0.010211
0.227141
32,821
855
86
38.387135
0.841126
0.363334
0
0.133891
0
0
0.052335
0.005581
0
0
0
0.00117
0.002092
1
0.060669
false
0
0.033473
0.008368
0.205021
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0790c876aeebfd734072b2709676f53a6053af06
964
py
Python
rhucrl_experiments/evaluate/launch_evaluate_mass.py
sebascuri/rhucrl
27663e1302f3bbc636dff28495c6f2667bb7c1da
[ "MIT" ]
1
2021-11-19T11:46:48.000Z
2021-11-19T11:46:48.000Z
rhucrl_experiments/evaluate/launch_evaluate_mass.py
sebascuri/rhucrl
27663e1302f3bbc636dff28495c6f2667bb7c1da
[ "MIT" ]
1
2021-11-22T07:48:03.000Z
2021-11-22T07:48:03.000Z
rhucrl_experiments/evaluate/launch_evaluate_mass.py
sebascuri/rhucrl
27663e1302f3bbc636dff28495c6f2667bb7c1da
[ "MIT" ]
1
2022-03-26T10:18:01.000Z
2022-03-26T10:18:01.000Z
"""Run from rhucrl_experiments.evaluate folder.""" import socket from lsf_runner import init_runner, make_commands from rhucrl_experiments.evaluate.utilities import ENVIRONMENTS RARL_DIR = "../../runs/RARLAgent" ZERO_SUM_DIR = "../../runs/ZeroSumAgent" SCRIPT = "evaluate_mass_change.py" EXPERIMENTS = { "supermodularity": {"algorithm": "RARL_MF", "base-dir": RARL_DIR}, "shallow": {"algorithm": "RHUCRL", "base-dir": ZERO_SUM_DIR}, "greedy": {"algorithm": "RHUCRL", "base-dir": ZERO_SUM_DIR}, "lazy": {"algorithm": "HUCRL", "base-dir": RARL_DIR}, }.get(socket.gethostname(), {"algorithm": "RARL", "base-dir": RARL_DIR}) runner = init_runner("EvaluateMassChange.", num_threads=4) for seed in [0, 1, 2, 3, 4]: base_args = {"num-runs": 10, "seed": seed} base_args.update(**EXPERIMENTS) commands = make_commands( SCRIPT, base_args=base_args, common_hyper_args={"environment": ENVIRONMENTS} ) runner.run_batch(commands)
37.076923
84
0.693983
121
964
5.297521
0.446281
0.054602
0.046802
0.065523
0.099844
0.099844
0.099844
0
0
0
0
0.009569
0.13278
964
25
85
38.56
0.757177
0.045643
0
0
0
0
0.276805
0.050328
0
0
0
0
0
1
0
false
0
0.15
0
0.15
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07915aac7804509d8daa27f0a06836f1ec42314d
739
py
Python
src/sentry/api/endpoints/project_tags.py
seukjung/sentry-custom
c5f6bb2019aef3caff7f3e2b619f7a70f2b9b963
[ "BSD-3-Clause" ]
1
2021-08-10T06:07:13.000Z
2021-08-10T06:07:13.000Z
src/sentry/api/endpoints/project_tags.py
fotinakis/sentry
c5cfa5c5e47475bf5ef41e702548c2dfc7bb8a7c
[ "BSD-3-Clause" ]
8
2019-12-28T23:49:55.000Z
2022-03-02T04:34:18.000Z
src/sentry/api/endpoints/project_tags.py
fotinakis/sentry
c5cfa5c5e47475bf5ef41e702548c2dfc7bb8a7c
[ "BSD-3-Clause" ]
1
2017-04-08T04:09:18.000Z
2017-04-08T04:09:18.000Z
from __future__ import absolute_import import six from rest_framework.response import Response from sentry.api.bases.project import ProjectEndpoint from sentry.models import TagKey, TagKeyStatus class ProjectTagsEndpoint(ProjectEndpoint): def get(self, request, project): tag_keys = TagKey.objects.filter( project=project, status=TagKeyStatus.VISIBLE, ) data = [] for tag_key in tag_keys: data.append({ 'id': six.text_type(tag_key.id), 'key': TagKey.get_standardized_key(tag_key.key), 'name': tag_key.get_label(), 'uniqueValues': tag_key.values_seen, }) return Response(data)
26.392857
64
0.626522
81
739
5.493827
0.530864
0.067416
0
0
0
0
0
0
0
0
0
0
0.288227
739
27
65
27.37037
0.846008
0
0
0
0
0
0.028417
0
0
0
0
0
0
1
0.05
false
0
0.25
0
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07919b5c6e89a71c84a3a15315c4e1b2767495ce
871
py
Python
examples/02 - callbacks/callbacks.py
TensorTom/async-Eel
d6484b6c5c9f89b64f5119d908fcdf29b173bd57
[ "MIT" ]
9
2019-06-17T22:11:01.000Z
2021-08-30T17:36:13.000Z
examples/02 - callbacks/callbacks.py
TensorTom/async-Eel
d6484b6c5c9f89b64f5119d908fcdf29b173bd57
[ "MIT" ]
4
2020-08-17T18:39:21.000Z
2021-08-29T02:54:23.000Z
examples/02 - callbacks/callbacks.py
TensorTom/async-Eel
d6484b6c5c9f89b64f5119d908fcdf29b173bd57
[ "MIT" ]
3
2020-02-27T03:40:05.000Z
2021-03-09T11:52:32.000Z
from __future__ import print_function # For Py2/3 compatibility import async_eel import random import asyncio loop = asyncio.get_event_loop() @async_eel.expose async def py_random(): return random.random() async def print_num(n): """callback of js_random""" print('Got this from Javascript:', n) async def main(): try: async_eel.init('web') await async_eel.start('callbacks.html', size=(400, 300)) # Call Javascript function, and pass explicit callback function await async_eel.js_random()(print_num) # Do the same with an inline callback await async_eel.js_random()(lambda n: print('2Got this from Javascript:', n)) except Exception: import traceback traceback.print_exc() if __name__ == '__main__': asyncio.run_coroutine_threadsafe(main(), loop) loop.run_forever()
22.921053
85
0.686567
117
871
4.846154
0.521368
0.084656
0.068783
0.067019
0.074074
0
0
0
0
0
0
0.013139
0.213548
871
37
86
23.540541
0.814599
0.138921
0
0
0
0
0.105702
0
0
0
0
0
0
1
0
false
0
0.227273
0
0.272727
0.272727
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
079304ab36eaaab974694a2714de62dfde9010a0
1,578
py
Python
datacube/index/_api.py
AMA-AC/datacube-core
0d2fe0792cb9298cc93d1a97bbb921cfa59d6f2d
[ "Apache-2.0" ]
2
2019-10-24T15:29:54.000Z
2019-10-24T15:29:58.000Z
datacube/index/_api.py
AMA-AC/datacube-core
0d2fe0792cb9298cc93d1a97bbb921cfa59d6f2d
[ "Apache-2.0" ]
2
2021-03-26T00:37:36.000Z
2021-03-31T20:05:01.000Z
datacube/index/_api.py
PhilipeRLeal/datacube-core
81bed714f2e5cb30a2492f1b0cf3397b79141c3a
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 """ Access methods for indexing datasets & products. """ import logging from datacube.config import LocalConfig from datacube.drivers import index_driver_by_name, index_drivers from .index import Index _LOG = logging.getLogger(__name__) def index_connect(local_config=None, application_name=None, validate_connection=True): # type: (LocalConfig, str, bool) -> Index """ Create a Data Cube Index that can connect to a PostgreSQL server It contains all the required connection parameters, but doesn't actually check that the server is available. :param application_name: A short, alphanumeric name to identify this application. :param datacube.config.LocalConfig local_config: Config object to use. (optional) :param validate_connection: Validate database connection and schema immediately :rtype: datacube.index.index.Index :raises datacube.drivers.postgres._connections.IndexSetupError: """ if local_config is None: local_config = LocalConfig.find() driver_name = local_config.get('index_driver', 'default') index_driver = index_driver_by_name(driver_name) if not index_driver: raise RuntimeError( "No index driver found for %r. %s available: %s" % ( driver_name, len(index_drivers()), ', '.join(index_drivers()) ) ) return index_driver.connect_to_index(local_config, application_name=application_name, validate_connection=validate_connection)
35.863636
86
0.697085
186
1,578
5.704301
0.462366
0.072573
0.024505
0.032045
0
0
0
0
0
0
0
0.00082
0.227503
1,578
43
87
36.697674
0.869565
0.393536
0
0
0
0
0.074033
0
0
0
0
0
0
1
0.052632
false
0
0.210526
0
0.315789
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
079480b6c82dd74c78bc4155e46e5d2906c6b673
6,560
py
Python
pgarchives/loader/load_message.py
WeilerWebServices/PostgreSQL
ae594ed077bebbad1be3c1d95c38b7c2c2683e8c
[ "PostgreSQL" ]
null
null
null
pgarchives/loader/load_message.py
WeilerWebServices/PostgreSQL
ae594ed077bebbad1be3c1d95c38b7c2c2683e8c
[ "PostgreSQL" ]
null
null
null
pgarchives/loader/load_message.py
WeilerWebServices/PostgreSQL
ae594ed077bebbad1be3c1d95c38b7c2c2683e8c
[ "PostgreSQL" ]
null
null
null
#!/usr/bin/env python3 # # load_message.py - takes a single email or mbox formatted # file on stdin or in a file and reads it into the database. # import os import sys from optparse import OptionParser from configparser import ConfigParser import psycopg2 from lib.storage import ArchivesParserStorage from lib.mbox import MailboxBreakupParser from lib.exception import IgnorableException from lib.log import log, opstatus from lib.varnish import VarnishPurger def log_failed_message(listid, srctype, src, msg, err): try: msgid = msg.msgid except Exception: msgid = "<unknown>" log.error("Failed to load message (msgid %s) from %s, spec %s: %s" % (msgid, srctype, src, err)) # We also put the data in the db. This happens in the main transaction # so if the whole script dies, it goes away... conn.cursor().execute("INSERT INTO loaderrors (listid, msgid, srctype, src, err) VALUES (%(listid)s, %(msgid)s, %(srctype)s, %(src)s, %(err)s)", { 'listid': listid, 'msgid': msgid, 'srctype': srctype, 'src': src, 'err': str(str(err), 'us-ascii', 'replace'), }) if __name__ == "__main__": optparser = OptionParser() optparser.add_option('-l', '--list', dest='list', help='Name of list to load message for') optparser.add_option('-d', '--directory', dest='directory', help='Load all messages in directory') optparser.add_option('-m', '--mbox', dest='mbox', help='Load all messages in mbox') optparser.add_option('-i', '--interactive', dest='interactive', action='store_true', help='Prompt after each message') optparser.add_option('-v', '--verbose', dest='verbose', action='store_true', help='Verbose output') optparser.add_option('--force-date', dest='force_date', help='Override date (used for dates that can\'t be parsed)') optparser.add_option('--filter-msgid', dest='filter_msgid', help='Only process message with given msgid') (opt, args) = optparser.parse_args() if (len(args)): print("No bare arguments accepted") optparser.print_usage() sys.exit(1) if not opt.list: print("List must be specified") optparser.print_usage() sys.exit(1) if opt.directory and opt.mbox: print("Can't specify both directory and mbox!") optparser.print_usage() sys.exit(1) if opt.force_date and (opt.directory or opt.mbox) and not opt.filter_msgid: print("Can't use force_date with directory or mbox - only individual messages") optparser.print_usage() sys.exit(1) if opt.filter_msgid and not (opt.directory or opt.mbox): print("filter_msgid makes no sense without directory or mbox!") optparser.print_usage() sys.exit(1) log.set(opt.verbose) cfg = ConfigParser() cfg.read('%s/archives.ini' % os.path.realpath(os.path.dirname(sys.argv[0]))) try: connstr = cfg.get('db', 'connstr') except Exception: connstr = 'need_connstr' conn = psycopg2.connect(connstr) curs = conn.cursor() # Take an advisory lock to force serialization. # We could do this "properly" by reordering operations and using ON CONFLICT, # but concurrency is not that important and this is easier... try: curs.execute("SET statement_timeout='30s'") curs.execute("SELECT pg_advisory_xact_lock(8059944559669076)") except Exception as e: print(("Failed to wait on advisory lock: %s" % e)) sys.exit(1) # Get the listid we're working on curs.execute("SELECT listid FROM lists WHERE listname=%(list)s", { 'list': opt.list }) r = curs.fetchall() if len(r) != 1: log.error("List %s not found" % opt.list) conn.close() sys.exit(1) listid = r[0][0] purges = set() if opt.directory: # Parse all files in directory for x in os.listdir(opt.directory): log.status("Parsing file %s" % x) with open(os.path.join(opt.directory, x)) as f: ap = ArchivesParserStorage() ap.parse(f) if opt.filter_msgid and not ap.is_msgid(opt.filter_msgid): continue try: ap.analyze(date_override=opt.force_date) except IgnorableException as e: log_failed_message(listid, "directory", os.path.join(opt.directory, x), ap, e) opstatus.failed += 1 continue ap.store(conn, listid) purges.update(ap.purges) if opt.interactive: print("Interactive mode, committing transaction") conn.commit() print("Proceed to next message with Enter, or input a period (.) to stop processing") x = input() if x == '.': print("Ok, aborting!") break print("---------------------------------") elif opt.mbox: if not os.path.isfile(opt.mbox): print("File %s does not exist" % opt.mbox) sys.exit(1) mboxparser = MailboxBreakupParser(opt.mbox) while not mboxparser.EOF: ap = ArchivesParserStorage() msg = next(mboxparser) if not msg: break ap.parse(msg) if opt.filter_msgid and not ap.is_msgid(opt.filter_msgid): continue try: ap.analyze(date_override=opt.force_date) except IgnorableException as e: log_failed_message(listid, "mbox", opt.mbox, ap, e) opstatus.failed += 1 continue ap.store(conn, listid) purges.update(ap.purges) if mboxparser.returncode(): log.error("Failed to parse mbox:") log.error(mboxparser.stderr_output()) sys.exit(1) else: # Parse single message on stdin ap = ArchivesParserStorage() ap.parse(sys.stdin.buffer) try: ap.analyze(date_override=opt.force_date) except IgnorableException as e: log_failed_message(listid, "stdin", "", ap, e) conn.close() sys.exit(1) ap.store(conn, listid) purges.update(ap.purges) if opstatus.stored: log.log("Stored message with message-id %s" % ap.msgid) conn.commit() conn.close() opstatus.print_status() VarnishPurger(cfg).purge(purges)
35.846995
150
0.594817
820
6,560
4.687805
0.290244
0.01821
0.020812
0.028616
0.226587
0.19589
0.179501
0.162851
0.137877
0.127732
0
0.007889
0.285061
6,560
182
151
36.043956
0.811727
0.079878
0
0.37931
0
0.013793
0.213183
0.015773
0
0
0
0
0
1
0.006897
false
0
0.068966
0
0.075862
0.117241
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
079607c3763787747e103898caecb5035367fd71
1,351
py
Python
lib/dataset/iNaturalist.py
jrcai/ACE
1e2b04d1cf4bb517f107664ac489a1a96e95a4c1
[ "MIT" ]
18
2021-08-06T01:15:32.000Z
2022-03-14T07:09:39.000Z
lib/dataset/iNaturalist.py
jrcai/BagofTricks-LT
d75b195367e3d535d316d134ec4bbef4bb7fcbdd
[ "MIT" ]
2
2021-09-24T03:29:17.000Z
2021-11-22T19:18:58.000Z
lib/dataset/iNaturalist.py
jrcai/BagofTricks-LT
d75b195367e3d535d316d134ec4bbef4bb7fcbdd
[ "MIT" ]
2
2021-10-17T18:09:20.000Z
2021-11-08T04:19:19.000Z
from dataset.baseset import BaseSet import random, cv2 import numpy as np class iNaturalist(BaseSet): def __init__(self, mode='train', cfg=None, transform=None): super(iNaturalist, self).__init__(mode, cfg, transform) random.seed(0) self.class_dict = self._get_class_dict() def __getitem__(self, index): if self.cfg.TRAIN.SAMPLER.TYPE == "weighted sampler" and self.mode == 'train': assert self.cfg.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE in ["balance", 'square', 'progressive'] if self.cfg.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE == "balance": sample_class = random.randint(0, self.num_classes - 1) elif self.cfg.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE == "square": sample_class = np.random.choice(np.arange(self.num_classes), p=self.square_p) else: sample_class = np.random.choice(np.arange(self.num_classes), p=self.progress_p) sample_indexes = self.class_dict[sample_class] index = random.choice(sample_indexes) now_info = self.data[index] img = self._get_image(now_info) image = self.transform(img) meta = dict() image_label = now_info['category_id'] # 0-index return image, image_label, meta
32.95122
104
0.623982
167
1,351
4.814371
0.347305
0.034826
0.059701
0.094527
0.299751
0.271144
0.271144
0.129353
0.129353
0.129353
0
0.00504
0.265729
1,351
40
105
33.775
0.805444
0.005181
0
0
0
0
0.057364
0
0
0
0
0
0.04
1
0.08
false
0
0.12
0
0.28
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
079659eddea08dcb5566544e99167b54c9eb8c33
27,964
py
Python
tests/test_conferences.py
mattclark/osf.io
7a362ceb6af3393d3d0423aafef336ee13277303
[ "Apache-2.0" ]
null
null
null
tests/test_conferences.py
mattclark/osf.io
7a362ceb6af3393d3d0423aafef336ee13277303
[ "Apache-2.0" ]
80
2015-02-25T15:12:15.000Z
2015-06-11T18:44:55.000Z
tests/test_conferences.py
mattclark/osf.io
7a362ceb6af3393d3d0423aafef336ee13277303
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- import mock from nose.tools import * # noqa (PEP8 asserts) import hmac import hashlib from StringIO import StringIO from django.core.exceptions import ValidationError from django.db import IntegrityError import furl from framework.auth import get_or_create_user from framework.auth.core import Auth from osf.models import OSFUser, AbstractNode from addons.wiki.models import WikiVersion from osf.exceptions import BlacklistedEmailError from website import settings from website.conferences import views from website.conferences import utils, message from website.util import api_url_for, web_url_for from tests.base import OsfTestCase, fake from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory def assert_absolute(url): parsed_domain = furl.furl(settings.DOMAIN) parsed_url = furl.furl(url) assert_equal(parsed_domain.host, parsed_url.host) def assert_equal_urls(first, second): parsed_first = furl.furl(first) parsed_first.port = None parsed_second = furl.furl(second) parsed_second.port = None assert_equal(parsed_first, parsed_second) def create_fake_conference_nodes(n, conference): nodes = [] for i in range(n): node = ProjectFactory(is_public=True) conference.submissions.add(node) node.save() nodes.append(node) return nodes def create_fake_conference_nodes_bad_data(conference, n, bad_n, endpoint): nodes = [] for i in range(n): node = ProjectFactory(is_public=True) conference.submissions.add(node) # inject bad data if i < bad_n: # Delete only contributor node.contributor_set.filter(user=node.contributors.first()).delete() node.save() nodes.append(node) return nodes class TestConferenceUtils(OsfTestCase): def test_get_or_create_user_exists(self): user = UserFactory() fetched, created = get_or_create_user(user.fullname, user.username, is_spam=True) assert_false(created) assert_equal(user._id, fetched._id) assert_false('is_spam' in fetched.system_tags) def test_get_or_create_user_not_exists(self): fullname = 'Roger Taylor' username = 'roger@queen.com' fetched, created = get_or_create_user(fullname, username, is_spam=False) fetched.save() # in order to access m2m fields, e.g. tags assert_true(created) assert_equal(fetched.fullname, fullname) assert_equal(fetched.username, username) assert_false('is_spam' in fetched.system_tags) def test_get_or_create_user_is_spam(self): fullname = 'John Deacon' username = 'deacon@queen.com' fetched, created = get_or_create_user(fullname, username, is_spam=True) fetched.save() # in order to access m2m fields, e.g. tags assert_true(created) assert_equal(fetched.fullname, fullname) assert_equal(fetched.username, username) assert_true('is_spam' in fetched.system_tags) def test_get_or_create_user_with_blacklisted_domain(self): fullname = 'Kanye West' username = 'kanye@mailinator.com' with assert_raises(BlacklistedEmailError) as e: get_or_create_user(fullname, username, is_spam=True) assert_equal(e.exception.message, 'Invalid Email') class ContextTestCase(OsfTestCase): MAILGUN_API_KEY = 'mailkimp' @classmethod def setUpClass(cls): super(ContextTestCase, cls).setUpClass() settings.MAILGUN_API_KEY, cls._MAILGUN_API_KEY = cls.MAILGUN_API_KEY, settings.MAILGUN_API_KEY @classmethod def tearDownClass(cls): super(ContextTestCase, cls).tearDownClass() settings.MAILGUN_API_KEY = cls._MAILGUN_API_KEY def make_context(self, method='POST', **kwargs): data = { 'X-Mailgun-Sscore': 0, 'timestamp': '123', 'token': 'secret', 'signature': hmac.new( key=settings.MAILGUN_API_KEY, msg='{}{}'.format('123', 'secret'), digestmod=hashlib.sha256, ).hexdigest(), } data.update(kwargs.pop('data', {})) data = { key: value for key, value in data.items() if value is not None } return self.app.app.test_request_context(method=method, data=data, **kwargs) class TestProvisionNode(ContextTestCase): def setUp(self): super(TestProvisionNode, self).setUp() self.node = ProjectFactory() self.user = self.node.creator self.conference = ConferenceFactory() self.body = 'dragon on my back' self.content = 'dragon attack' self.attachment = StringIO(self.content) self.recipient = '{0}{1}-poster@osf.io'.format( 'test-' if settings.DEV_MODE else '', self.conference.endpoint, ) def make_context(self, **kwargs): data = { 'attachment-count': '1', 'attachment-1': (self.attachment, 'attachment-1'), 'X-Mailgun-Sscore': 0, 'recipient': self.recipient, 'stripped-text': self.body, } data.update(kwargs.pop('data', {})) return super(TestProvisionNode, self).make_context(data=data, **kwargs) def test_provision(self): with self.make_context(): msg = message.ConferenceMessage() utils.provision_node(self.conference, msg, self.node, self.user) assert_true(self.node.is_public) assert_in(self.conference.admins.first(), self.node.contributors) assert_in('emailed', self.node.system_tags) assert_in(self.conference.endpoint, self.node.system_tags) assert self.node in self.conference.submissions.all() assert_not_in('spam', self.node.system_tags) def test_provision_private(self): self.conference.public_projects = False self.conference.save() with self.make_context(): msg = message.ConferenceMessage() utils.provision_node(self.conference, msg, self.node, self.user) assert_false(self.node.is_public) assert_in(self.conference.admins.first(), self.node.contributors) assert_in('emailed', self.node.system_tags) assert_not_in('spam', self.node.system_tags) def test_provision_spam(self): with self.make_context(data={'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE + 1}): msg = message.ConferenceMessage() utils.provision_node(self.conference, msg, self.node, self.user) assert_false(self.node.is_public) assert_in(self.conference.admins.first(), self.node.contributors) assert_in('emailed', self.node.system_tags) assert_in('spam', self.node.system_tags) @mock.patch('website.conferences.utils.waterbutler_api_url_for') @mock.patch('website.conferences.utils.requests.put') def test_upload(self, mock_put, mock_get_url): mock_get_url.return_value = 'http://queen.com/' file_name = 'hammer-to-fall' self.attachment.filename = file_name self.attachment.content_type = 'application/json' utils.upload_attachment(self.user, self.node, self.attachment) mock_get_url.assert_called_with( self.node._id, 'osfstorage', _internal=True, base_url=self.node.osfstorage_region.waterbutler_url, cookie=self.user.get_or_create_cookie(), name=file_name ) mock_put.assert_called_with( mock_get_url.return_value, data=self.content, ) @mock.patch('website.conferences.utils.waterbutler_api_url_for') @mock.patch('website.conferences.utils.requests.put') def test_upload_no_file_name(self, mock_put, mock_get_url): mock_get_url.return_value = 'http://queen.com/' self.attachment.filename = '' self.attachment.content_type = 'application/json' utils.upload_attachment(self.user, self.node, self.attachment) mock_get_url.assert_called_with( self.node._id, 'osfstorage', _internal=True, base_url=self.node.osfstorage_region.waterbutler_url, cookie=self.user.get_or_create_cookie(), name=settings.MISSING_FILE_NAME, ) mock_put.assert_called_with( mock_get_url.return_value, data=self.content, ) @mock.patch('website.conferences.utils.upload_attachments') def test_add_poster_by_email(self, mock_upload_attachments): conference = ConferenceFactory() with self.make_context(data={'from': 'bdawk@sb52champs.com', 'subject': 'It\'s PARTY TIME!'}): msg = message.ConferenceMessage() views.add_poster_by_email(conference, msg) user = OSFUser.objects.get(username='bdawk@sb52champs.com') assert user.email == 'bdawk@sb52champs.com' assert user.fullname == user._id # user's shouldn't be able to use email as fullname, so we use the guid. class TestMessage(ContextTestCase): PUSH_CONTEXT = False def test_verify_signature_valid(self): with self.make_context(): msg = message.ConferenceMessage() msg.verify_signature() def test_verify_signature_invalid(self): with self.make_context(data={'signature': 'fake'}): self.app.app.preprocess_request() msg = message.ConferenceMessage() with assert_raises(message.ConferenceError): msg.verify_signature() def test_is_spam_false_missing_headers(self): ctx = self.make_context( method='POST', data={'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE - 1}, ) with ctx: msg = message.ConferenceMessage() assert not msg.is_spam def test_is_spam_false_all_headers(self): ctx = self.make_context( method='POST', data={ 'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE - 1, 'X-Mailgun-Dkim-Check-Result': message.DKIM_PASS_VALUES[0], 'X-Mailgun-Spf': message.SPF_PASS_VALUES[0], }, ) with ctx: msg = message.ConferenceMessage() assert not msg.is_spam def test_is_spam_true_sscore(self): ctx = self.make_context( method='POST', data={'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE + 1}, ) with ctx: msg = message.ConferenceMessage() assert msg.is_spam def test_is_spam_true_dkim(self): ctx = self.make_context( method='POST', data={'X-Mailgun-Dkim-Check-Result': message.DKIM_PASS_VALUES[0][::-1]}, ) with ctx: msg = message.ConferenceMessage() assert msg.is_spam def test_is_spam_true_spf(self): ctx = self.make_context( method='POST', data={'X-Mailgun-Spf': message.SPF_PASS_VALUES[0][::-1]}, ) with ctx: msg = message.ConferenceMessage() assert msg.is_spam def test_subject(self): ctx = self.make_context( method='POST', data={'subject': 'RE: Hip Hopera'}, ) with ctx: msg = message.ConferenceMessage() assert_equal(msg.subject, 'Hip Hopera') def test_recipient(self): address = 'test-conference@osf.io' ctx = self.make_context( method='POST', data={'recipient': address}, ) with ctx: msg = message.ConferenceMessage() assert_equal(msg.recipient, address) def test_text(self): text = 'welcome to my nuclear family' ctx = self.make_context( method='POST', data={'stripped-text': text}, ) with ctx: msg = message.ConferenceMessage() assert_equal(msg.text, text) def test_sender_name(self): names = [ (' Fred', 'Fred'), (u'Me䬟', u'Me䬟'), (u'fred@queen.com', u'fred@queen.com'), (u'Fred <fred@queen.com>', u'Fred'), (u'"Fred" <fred@queen.com>', u'Fred'), ] for name in names: with self.make_context(data={'from': name[0]}): msg = message.ConferenceMessage() assert_equal(msg.sender_name, name[1]) def test_sender_email(self): emails = [ (u'fred@queen.com', u'fred@queen.com'), (u'FRED@queen.com', u'fred@queen.com') ] for email in emails: with self.make_context(data={'from': email[0]}): msg = message.ConferenceMessage() assert_equal(msg.sender_email, email[1]) def test_route_invalid_pattern(self): with self.make_context(data={'recipient': 'spam@osf.io'}): self.app.app.preprocess_request() msg = message.ConferenceMessage() with assert_raises(message.ConferenceError): msg.route def test_route_invalid_test(self): recipient = '{0}conf-talk@osf.io'.format('' if settings.DEV_MODE else 'stage-') with self.make_context(data={'recipient': recipient}): self.app.app.preprocess_request() msg = message.ConferenceMessage() with assert_raises(message.ConferenceError): msg.route def test_route_valid_alternate(self): conf = ConferenceFactory(endpoint='chocolate', active=True) conf.name = 'Chocolate Conference' conf.field_names['submission2'] = 'data' conf.save() recipient = '{0}chocolate-data@osf.io'.format('test-' if settings.DEV_MODE else '') with self.make_context(data={'recipient': recipient}): self.app.app.preprocess_request() msg = message.ConferenceMessage() assert_equal(msg.conference_name, 'chocolate') assert_equal(msg.conference_category, 'data') conf.__class__.delete(conf) def test_route_valid_b(self): recipient = '{0}conf-poster@osf.io'.format('test-' if settings.DEV_MODE else '') with self.make_context(data={'recipient': recipient}): self.app.app.preprocess_request() msg = message.ConferenceMessage() assert_equal(msg.conference_name, 'conf') assert_equal(msg.conference_category, 'poster') def test_alternate_route_invalid(self): recipient = '{0}chocolate-data@osf.io'.format('test-' if settings.DEV_MODE else '') with self.make_context(data={'recipient': recipient}): self.app.app.preprocess_request() msg = message.ConferenceMessage() with assert_raises(message.ConferenceError): msg.route def test_attachments_count_zero(self): with self.make_context(data={'attachment-count': '0'}): msg = message.ConferenceMessage() assert_equal(msg.attachments, []) def test_attachments_count_one(self): content = 'slightly mad' sio = StringIO(content) ctx = self.make_context( method='POST', data={ 'attachment-count': 1, 'attachment-1': (sio, 'attachment-1'), }, ) with ctx: msg = message.ConferenceMessage() assert_equal(len(msg.attachments), 1) assert_equal(msg.attachments[0].read(), content) class TestConferenceEmailViews(OsfTestCase): def test_redirect_to_meetings_url(self): url = '/presentations/' res = self.app.get(url) assert_equal(res.status_code, 302) res = res.follow() assert_equal(res.request.path, '/meetings/') def test_conference_submissions(self): AbstractNode.objects.all().delete() conference1 = ConferenceFactory() conference2 = ConferenceFactory() # Create conference nodes create_fake_conference_nodes( 3, conference1, ) create_fake_conference_nodes( 2, conference2, ) url = api_url_for('conference_submissions') res = self.app.get(url) assert_equal(res.json['success'], True) def test_conference_plain_returns_200(self): conference = ConferenceFactory() url = web_url_for('conference_results__plain', meeting=conference.endpoint) res = self.app.get(url) assert_equal(res.status_code, 200) def test_conference_data(self): conference = ConferenceFactory() # Create conference nodes n_conference_nodes = 3 create_fake_conference_nodes( n_conference_nodes, conference, ) # Create a non-conference node ProjectFactory() url = api_url_for('conference_data', meeting=conference.endpoint) res = self.app.get(url) assert_equal(res.status_code, 200) assert_equal(len(res.json), n_conference_nodes) # Regression for OSF-8864 to confirm bad project data does not make whole conference break def test_conference_bad_data(self): conference = ConferenceFactory() # Create conference nodes n_conference_nodes = 3 n_conference_nodes_bad = 1 create_fake_conference_nodes_bad_data( conference, n_conference_nodes, n_conference_nodes_bad, conference, ) # Create a non-conference node ProjectFactory() url = api_url_for('conference_data', meeting=conference.endpoint) res = self.app.get(url) assert_equal(res.status_code, 200) assert_equal(len(res.json), n_conference_nodes - n_conference_nodes_bad) def test_conference_data_url_upper(self): conference = ConferenceFactory() # Create conference nodes n_conference_nodes = 3 create_fake_conference_nodes( n_conference_nodes, conference, ) # Create a non-conference node ProjectFactory() url = api_url_for('conference_data', meeting=conference.endpoint.upper()) res = self.app.get(url) assert_equal(res.status_code, 200) assert_equal(len(res.json), n_conference_nodes) def test_conference_data_tag_upper(self): conference = ConferenceFactory() # Create conference nodes n_conference_nodes = 3 create_fake_conference_nodes( n_conference_nodes, conference, ) # Create a non-conference node ProjectFactory() url = api_url_for('conference_data', meeting=conference.endpoint) res = self.app.get(url) assert_equal(res.status_code, 200) assert_equal(len(res.json), n_conference_nodes) def test_conference_results(self): conference = ConferenceFactory() url = web_url_for('conference_results', meeting=conference.endpoint) res = self.app.get(url) assert_equal(res.status_code, 200) def test_confererence_results_endpoint_is_case_insensitive(self): ConferenceFactory(endpoint='StudySwap') url = web_url_for('conference_results', meeting='studyswap') res = self.app.get(url) assert_equal(res.status_code, 200) class TestConferenceModel(OsfTestCase): def test_endpoint_is_required(self): with assert_raises(IntegrityError): ConferenceFactory(endpoint=None, name=fake.company()).save() def test_name_is_required(self): with assert_raises(IntegrityError): ConferenceFactory(endpoint='spsp2014', name=None).save() def test_default_field_names(self): conf = ConferenceFactory(endpoint='cookie', name='Cookies Conference') conf.save() assert_equal(conf.field_names['submission1'], 'poster') assert_equal(conf.field_names['mail_subject'], 'Presentation title') def test_conference_valid_submissions(self): conf = ConferenceFactory(endpoint='Hamburgers', name='Hamburger conference') conf.save() # 3 good nodes added create_fake_conference_nodes(3, conf) # Deleted node added deleted_node = ProjectFactory(is_public=True) deleted_node.is_deleted = True deleted_node.save() conf.submissions.add(deleted_node) # Private node added private_node = ProjectFactory(is_public=False) conf.submissions.add(private_node) assert_equal(conf.submissions.count(), 5) assert_equal(conf.valid_submissions.count(), 3) class TestConferenceIntegration(ContextTestCase): @mock.patch('website.conferences.views.send_mail') @mock.patch('website.conferences.utils.upload_attachments') def test_integration(self, mock_upload, mock_send_mail): fullname = 'John Deacon' username = 'deacon@queen.com' title = 'good songs' conference = ConferenceFactory() body = 'dragon on my back' content = 'dragon attack' recipient = '{0}{1}-poster@osf.io'.format( 'test-' if settings.DEV_MODE else '', conference.endpoint, ) self.app.post( api_url_for('meeting_hook'), { 'X-Mailgun-Sscore': 0, 'timestamp': '123', 'token': 'secret', 'signature': hmac.new( key=settings.MAILGUN_API_KEY, msg='{}{}'.format('123', 'secret'), digestmod=hashlib.sha256, ).hexdigest(), 'attachment-count': '1', 'X-Mailgun-Sscore': 0, 'from': '{0} <{1}>'.format(fullname, username), 'recipient': recipient, 'subject': title, 'stripped-text': body, }, upload_files=[ ('attachment-1', 'attachment-1', content), ], ) assert_true(mock_upload.called) users = OSFUser.objects.filter(username=username) assert_equal(users.count(), 1) nodes = AbstractNode.objects.filter(title=title) assert_equal(nodes.count(), 1) node = nodes[0] assert_equal(WikiVersion.objects.get_for_node(node, 'home').content, body) assert_true(mock_send_mail.called) call_args, call_kwargs = mock_send_mail.call_args assert_absolute(call_kwargs['conf_view_url']) assert_absolute(call_kwargs['set_password_url']) assert_absolute(call_kwargs['profile_url']) assert_absolute(call_kwargs['file_url']) assert_absolute(call_kwargs['node_url']) @mock.patch('website.conferences.views.send_mail') def test_integration_inactive(self, mock_send_mail): conference = ConferenceFactory(active=False) fullname = 'John Deacon' username = 'deacon@queen.com' title = 'good songs' body = 'dragon on my back' recipient = '{0}{1}-poster@osf.io'.format( 'test-' if settings.DEV_MODE else '', conference.endpoint, ) res = self.app.post( api_url_for('meeting_hook'), { 'X-Mailgun-Sscore': 0, 'timestamp': '123', 'token': 'secret', 'signature': hmac.new( key=settings.MAILGUN_API_KEY, msg='{}{}'.format('123', 'secret'), digestmod=hashlib.sha256, ).hexdigest(), 'attachment-count': '1', 'X-Mailgun-Sscore': 0, 'from': '{0} <{1}>'.format(fullname, username), 'recipient': recipient, 'subject': title, 'stripped-text': body, }, expect_errors=True, ) assert_equal(res.status_code, 406) call_args, call_kwargs = mock_send_mail.call_args assert_equal(call_args, (username, views.CONFERENCE_INACTIVE)) assert_equal(call_kwargs['fullname'], fullname) assert_equal_urls( call_kwargs['presentations_url'], web_url_for('conference_view', _absolute=True), ) @mock.patch('website.conferences.views.send_mail') @mock.patch('website.conferences.utils.upload_attachments') def test_integration_wo_full_name(self, mock_upload, mock_send_mail): username = 'no_full_name@mail.com' title = 'no full name only email' conference = ConferenceFactory() body = 'dragon on my back' content = 'dragon attack' recipient = '{0}{1}-poster@osf.io'.format( 'test-' if settings.DEV_MODE else '', conference.endpoint, ) self.app.post( api_url_for('meeting_hook'), { 'X-Mailgun-Sscore': 0, 'timestamp': '123', 'token': 'secret', 'signature': hmac.new( key=settings.MAILGUN_API_KEY, msg='{}{}'.format('123', 'secret'), digestmod=hashlib.sha256, ).hexdigest(), 'attachment-count': '1', 'X-Mailgun-Sscore': 0, 'from': username, 'recipient': recipient, 'subject': title, 'stripped-text': body, }, upload_files=[ ('attachment-1', 'attachment-1', content), ], ) assert_true(mock_upload.called) users = OSFUser.objects.filter(username=username) assert_equal(users.count(), 1) nodes = AbstractNode.objects.filter(title=title) assert_equal(nodes.count(), 1) node = nodes[0] assert_equal(WikiVersion.objects.get_for_node(node, 'home').content, body) assert_true(mock_send_mail.called) call_args, call_kwargs = mock_send_mail.call_args assert_absolute(call_kwargs['conf_view_url']) assert_absolute(call_kwargs['set_password_url']) assert_absolute(call_kwargs['profile_url']) assert_absolute(call_kwargs['file_url']) assert_absolute(call_kwargs['node_url']) @mock.patch('website.conferences.views.send_mail') @mock.patch('website.conferences.utils.upload_attachments') def test_create_conference_node_with_same_name_as_existing_node(self, mock_upload, mock_send_mail): conference = ConferenceFactory() user = UserFactory() title = 'Long Live Greg' ProjectFactory(creator=user, title=title) body = 'Greg is a good plant' content = 'Long may they reign.' recipient = '{0}{1}-poster@osf.io'.format( 'test-' if settings.DEV_MODE else '', conference.endpoint, ) self.app.post( api_url_for('meeting_hook'), { 'X-Mailgun-Sscore': 0, 'timestamp': '123', 'token': 'secret', 'signature': hmac.new( key=settings.MAILGUN_API_KEY, msg='{}{}'.format('123', 'secret'), digestmod=hashlib.sha256, ).hexdigest(), 'attachment-count': '1', 'X-Mailgun-Sscore': 0, 'from': '{0} <{1}>'.format(user.fullname, user.username), 'recipient': recipient, 'subject': title, 'stripped-text': body, }, upload_files=[ ('attachment-1', 'attachment-1', content), ], ) assert AbstractNode.objects.filter(title=title, creator=user).count() == 2 assert mock_upload.called assert mock_send_mail.called
36.506527
114
0.611357
3,099
27,964
5.294934
0.116489
0.032848
0.021939
0.016211
0.679079
0.637211
0.61192
0.583643
0.548967
0.526601
0
0.008919
0.278322
27,964
765
115
36.554248
0.803974
0.022028
0
0.550614
0
0
0.11915
0.025725
0
0
0
0
0.161043
1
0.084356
false
0.009202
0.029141
0
0.133436
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
079711a016ea4fa9510f792cc6a93c56f3a684e5
4,060
py
Python
socket_tentacles/__init__.py
innovationgarage/socket-tentacles
1cfbf7649017493fafacfcbc96cd05f3c4c5d6b6
[ "MIT" ]
null
null
null
socket_tentacles/__init__.py
innovationgarage/socket-tentacles
1cfbf7649017493fafacfcbc96cd05f3c4c5d6b6
[ "MIT" ]
null
null
null
socket_tentacles/__init__.py
innovationgarage/socket-tentacles
1cfbf7649017493fafacfcbc96cd05f3c4c5d6b6
[ "MIT" ]
1
2019-11-01T12:38:20.000Z
2019-11-01T12:38:20.000Z
import socketserver import socket import sys import threading import json import queue import time import datetime import traceback class TCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer): def server_bind(self): self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket.bind(self.server_address) class Listener(threading.Thread): def run(self): kwargs = self._kwargs print("Listener: Started: %s" % kwargs) Handler = self._kwargs["handler"] server = self._kwargs["server"] class Server(socketserver.BaseRequestHandler): def handle(self): print("Listener: Connection request received: %s" % kwargs) Handler(server, self.request) self.server = TCPServer((kwargs["host"], kwargs["port"]), Server) self.server.serve_forever() def stop(self): self.server.shutdown() self.server.server_close() class Connector(threading.Thread): def __init__(self, *arg, **kw): self.is_stopping = False threading.Thread.__init__(self, *arg, **kw) def run(self): print("Connector: Started: %s" % self._kwargs) while not self.is_stopping: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: try: sock.connect((self._kwargs["host"], self._kwargs["port"])) print("Connector: Connected: %s" % self._kwargs) self._kwargs["handler"](self._kwargs["server"], sock) except Exception as e: print(e) traceback.print_exc() finally: sock.close() time.sleep(1) def stop(self): self.is_stopping = True class Handler(object): encoding = "utf-8" binary = False filemode = "r" def __init__(self, server, conn): self.server = server self.conn = conn self.makefile() self.handle() def makefile(self): args = {"mode": self.filemode + ["", "b"][self.binary]} if not self.binary: args["encoding"] = self.encoding self.file = self.conn.makefile(**args) def handle(self): """self.conn is a socket object, self.file a file wrapper for that socket""" def __hash__(self): return id(self) class ReceiveHandler(Handler): filemode = "r" class SendHandler(Handler): filemode = "w" class Server(object): def __init__(self, handlers): self.handlers = handlers self.config = None self.servers = {} def configure(self, config): self.config = config connections = {self.connection_key(connection): connection for connection in config["connections"]} to_create = connections.keys() - self.servers.keys() to_destroy = self.servers.keys() - connections.keys() for key in to_create: server = self.start_connection(connections[key]) server.start() self.servers[key] = server for key in to_destroy: server = self.servers.pop(key) server.stop() def connection_key(self, connection): return json.dumps(connection, sort_keys=True, separators=(',', ':')) def start_connection(self, connection): handler = self.handlers[connection["handler"]] addr = connection["address"].split(":") assert addr[0] == "tcp" host = "0.0.0.0" port = 1024 if len(addr) == 2: port = addr[1] if len(addr) == 3: host, port = addr[1:] port = int(port) connhandler = {"listen": Listener, "connect": Connector}[connection["type"]] return connhandler(kwargs={"server": self, "host": host, "port": port, "handler": handler}) def run(config, handlers): server = Server(handlers) server.configure(config) return server
31.472868
107
0.580049
438
4,060
5.262557
0.276256
0.043384
0.014317
0.017354
0
0
0
0
0
0
0
0.005622
0.299015
4,060
128
108
31.71875
0.804287
0.017241
0
0.09434
0
0
0.062123
0
0
0
0
0
0.009434
1
0.150943
false
0
0.084906
0.018868
0.396226
0.056604
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0797927e3d7bde5eb907276662251361ce156da5
475
py
Python
leehao/learn63.py
pilihaotian/pythonlearning
e84b7766cc9ea8131e9720fb1f06761c9581d0da
[ "Apache-2.0" ]
1
2020-02-26T14:52:17.000Z
2020-02-26T14:52:17.000Z
leehao/learn63.py
pilihaotian/pythonlearning
e84b7766cc9ea8131e9720fb1f06761c9581d0da
[ "Apache-2.0" ]
null
null
null
leehao/learn63.py
pilihaotian/pythonlearning
e84b7766cc9ea8131e9720fb1f06761c9581d0da
[ "Apache-2.0" ]
null
null
null
# 随机6位密码 a-zA-Z0-9下划线 import random source = '' lower_char = [chr(x) for x in range(ord('a'), ord('z') + 1)] upper_char = [chr(x) for x in range(ord('A'), ord('Z') + 1)] number_char = [chr(x) for x in range(ord('0'), ord('9') + 1)] source += "".join(lower_char) source += "".join(upper_char) source += "".join(number_char) source += "_" print(source) # 随机取出20位字符串,包括下划线 while True: s = "".join(random.sample(source, 20)) if '_' in s: print(s) break
23.75
61
0.591579
78
475
3.5
0.423077
0.076923
0.087912
0.120879
0.285714
0.285714
0.285714
0.285714
0.205128
0.205128
0
0.03125
0.191579
475
19
62
25
0.679688
0.075789
0
0
0
0
0.018349
0
0
0
0
0
0
1
0
false
0
0.066667
0
0.066667
0.133333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0799d6264b46d419f66659960f9e80e3239bdd1c
1,237
py
Python
gbic/tests.py
fga-eps-mds/2017.2-SiGI-Op_API
4532019c15414fd17e06bb3aa78501886e00da1d
[ "BSD-3-Clause" ]
6
2017-08-24T13:18:21.000Z
2017-10-03T18:06:13.000Z
gbic/tests.py
fga-gpp-mds/2017.2-Grupo9
4532019c15414fd17e06bb3aa78501886e00da1d
[ "BSD-3-Clause" ]
173
2017-08-31T15:29:01.000Z
2017-12-14T13:40:13.000Z
gbic/tests.py
fga-gpp-mds/2017.2-SiGI-Op_API
4532019c15414fd17e06bb3aa78501886e00da1d
[ "BSD-3-Clause" ]
2
2018-11-19T10:33:00.000Z
2019-06-19T22:35:43.000Z
from django.test import TestCase from rest_framework.test import APIRequestFactory from .models import GBIC, GBICType from .views import GBICListViewSet # Create your tests here. class GBICTest(TestCase): def test_gbic_view_set(self): request = APIRequestFactory().get("") gbic_detail = GBICListViewSet.as_view(actions={'get': 'retrieve'}) gbic_type_test = GBICType.objects.create(description='muito_bom') gbic_test = GBIC.objects.create( serial='showdaxuxa', patrimony_number='666', gbic_type=gbic_type_test ) response = gbic_detail(request, pk=gbic_test.pk) self.assertEqual(response.status_code, 200) def test_deleted_gbic_view_set(self): request = APIRequestFactory().get("") gbic_detail = GBICListViewSet.as_view(actions={'get': 'retrieve'}) gbic_type_test = GBICType.objects.create(description='muitoruim') gbic_test = GBIC.objects.create( serial='showdomilhao', patrimony_number='777', gbic_type=gbic_type_test ) gbic_test.delete() response = gbic_detail(request, pk=gbic_test.pk) self.assertEqual(response.status_code, 404)
35.342857
74
0.672595
140
1,237
5.7
0.364286
0.06015
0.06015
0.037594
0.641604
0.591479
0.513784
0.513784
0.513784
0.513784
0
0.012539
0.226354
1,237
34
75
36.382353
0.821317
0.018593
0
0.357143
0
0
0.056106
0
0
0
0
0
0.071429
1
0.071429
false
0
0.142857
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
079a7acf6ad24d3b711dca5d0ee7ae05fdfe00c0
23,239
py
Python
fruits/core/fruit.py
alienkrieg/fruits
b3b4b6afd7f97d2d4060909689f9811dc97981ed
[ "MIT" ]
4
2021-10-08T11:14:54.000Z
2021-12-30T13:56:32.000Z
fruits/core/fruit.py
alienkrieg/fruits
b3b4b6afd7f97d2d4060909689f9811dc97981ed
[ "MIT" ]
null
null
null
fruits/core/fruit.py
alienkrieg/fruits
b3b4b6afd7f97d2d4060909689f9811dc97981ed
[ "MIT" ]
null
null
null
import inspect from typing import List, Union, Set, Any import numpy as np from fruits.cache import Cache, CoquantileCache from fruits.scope import force_input_shape, FitTransform from fruits.core.callback import AbstractCallback from fruits.signature.iss import SignatureCalculator, CachePlan from fruits.words.word import Word from fruits.sieving.abstract import FeatureSieve from fruits.preparation.abstract import DataPreparateur class Fruit: """Feature Extractor using iterated sums. A Fruit consists of a number of :class:`~fruits.core.fruit.FruitBranch` objects. At the end of the pipeline, each branch returns their own features and they will be concatenated by this class. A simple example (using two branches): .. code-block:: python fruit = fruits.Fruit("My Fruit") # optional: add preparateurs for preprocessing fruit.add(fruits.preparation.INC) # add words for iterated sums calculation fruit.add(fruits.words.creation.simplewords_by_weight(4)) # choose sieves fruit.add(fruits.sieving.PPV(0.5)) fruit.add(fruits.sieving.END) # add a new branch without INC fruit.fork() fruit.add(fruits.words.creation.simplewords_by_weight(4)) fruit.add(fruits.sieving.PPV(0.5)) fruit.add(fruits.sieving.END) # configure the fruit fruit.configure(mode="extended") # fit the fruit on a time series dataset fruit.fit(X_train) # transform the dataset X_train_transformed = fruit.transform(X_train) X_test_tranformed = fruit.transform(X_test) # use the transformed results (features) in a classifier ... The ``fruit`` above will result in ``2*8*2=32`` features per time series. """ def __init__(self, name: str = ""): self.name: str = name # list of FruitBranches self._branches: List[FruitBranch] = [] # pointer for the current branch index self._cbi: int = 0 self._fitted: bool = False @property def name(self) -> str: """Simple identifier for the Fruit object.""" return self._name @name.setter def name(self, name: str): self._name = name def fork(self, branch: "FruitBranch" = None): """Adds a new branch to the pipeline. If none is given, an empty FruitBranch will be created and switched to. :type branch: FruitBranch, optional """ if branch is None: branch = FruitBranch() self._branches.append(branch) self._cbi = len(self._branches) - 1 self._fitted = False def branch(self, index: int = None): """Returns the currently selected branch or the branch with the given index. :rtype: FruitBranch """ if index is None: return self._branches[self._cbi] return self._branches[index] def branches(self) -> list: """Returns all branches of this Fruit object. :rtype: list """ return self._branches def switch_branch(self, index: int): """Switches to the branch with the given index. :param index: Integer in ``[0, 1, ..., len(self.branches())-1]`` :type index: int """ if not (0 <= index < len(self._branches)): raise IndexError("Index has to be in [0, len(self.branches()))") self._cbi = index def add(self, *objects: Union[FitTransform, Word, type]): """Adds one or multiple object(s) to the currently selected branch. :param objects: One or more objects of the following types: - :class:`~fruits.preparation.abstract.DataPreparateur` - :class:`~fruits.words.word.Word` - :class:`~fruits.sieving.abstract.FeatureSieve` :type objects: Union[FitTransform, Word] """ if len(self._branches) == 0: self.fork() self._branches[self._cbi].add(*objects) self._fitted = False def nfeatures(self) -> int: """Returns the total number of features of all branches combined. :rtype: int """ return sum([branch.nfeatures() for branch in self._branches]) def configure(self, **kwargs: Any): """Makes changes to the default configuration of a all branches if arguments differ from ``None``. :param kwargs: For possible options, have a look at :meth:`fruits.core.fruit.FruitBranch.configure`. :type kwargs: Any """ for branch in self._branches: branch.configure(**kwargs) def fit(self, X: np.ndarray): """Fits all branches to the given data. :param X: (Multidimensional) time series dataset as an array of three dimensions. Have a look at :meth:`~fruits.scope.force_input_shape`. :type X: np.ndarray """ for branch in self._branches: branch.fit(X) self._fitted = True def transform(self, X: np.ndarray, callbacks: List[AbstractCallback] = []) -> np.ndarray: """Returns a two dimensional array of all features from all branches this Fruit object contains. :param X: (Multidimensional) time series dataset as an array of three dimensions. Have a look at :meth:`~fruits.scope.force_input_shape`. :type X: np.ndarray :param callbacks: List of callbacks. To write your own callback, override the class :class:`~fruits.core.callback.AbstractCallback`., defaults to None :type callbacks: List[AbstractCallback], optional :rtype: np.ndarray :raises: RuntimeError if Fruit.fit wasn't called """ if not self._fitted: raise RuntimeError("Missing call of self.fit") result = np.zeros((X.shape[0], self.nfeatures())) index = 0 for branch in self._branches: for callback in callbacks: callback.on_next_branch() k = branch.nfeatures() result[:, index:index+k] = branch.transform(X, callbacks) index += k result = np.nan_to_num(result, copy=False, nan=0.0) return result def fit_transform(self, X: np.ndarray) -> np.ndarray: """Fits all branches to the given dataset and returns the transformed results of X from all branches. :param X: (Multidimensional) time series dataset :type X: np.ndarray :returns: Two dimensional feature array :rtype: np.ndarray """ self.fit(X) return self.transform(X) def summary(self) -> str: """Returns a summary of this object. The summary contains a summary for each FruitBranch in this Fruit object. :rtype: str """ summary = "{:=^80}".format(f"Summary of fruits.Fruit: '{self.name}'") summary += f"\nBranches: {len(self.branches())}" summary += f"\nFeatures: {self.nfeatures()}" for branch in self.branches(): summary += "\n\n" + branch.summary() summary += "\n{:=^80}".format(f"End of Summary") return summary def copy(self) -> "Fruit": """Creates a shallow copy of this Fruit object. This also creates shallow copies of all branches in this object. :rtype: Fruit """ copy_ = Fruit(self.name+" (Copy)") for branch in self._branches: copy_.fork(branch.copy()) return copy_ def deepcopy(self) -> "Fruit": """Creates a deep copy of this Fruit object. This also creates deep copies of all branches in this object. :rtype: Fruit """ copy_ = Fruit(self.name+" (Copy)") for branch in self._branches: copy_.fork(branch.deepcopy()) return copy_ class FruitBranch: """One branch of a Fruit object. A FruitBranch object extracts values from time series data that are somehow representative of the input data. The user can customize any of the following three steps. - Preparing data: Apply functions at the start of the extraction procedure. There are many so called :class:`~fruits.preparation.abstract.DataPreparateur` objects in fruits available for preprocessing. The preparateurs will be applied sequentially to the input data. - Calculating Iterated Sums: The preprocessed data is now used to calculate the iterated sums signature for different :class:`~fruits.words.word.Word` objects the user can specify. - Extracting Features: Each :class:`~fruits.sieving.abstract.FeatureSieve` added to the branch will be fitted on the iterated sums from the previous step. The branch then returns an array of numbers (the transformed results from those sieves), i.e. the features for each time series. """ def __init__(self): # lists of used classes for data processing self._preparateurs: list = [] self._words: list = [] self._sieves: list = [] # calculator options used in the ISS calculation self._calculator_options: dict = {"batch_size": 1, "mode": "single"} # list with inner lists containing sieves # all sieves in one list are trained on one specific output # of an ISS-result self._sieves_extended: list = [] # configurations for fitting self._fitted: bool = False self._fit_sample_size: Union[float, int] = 1 # cache that is calculated at fitting and also used in the # transformation process self._cache: Cache def configure(self, mode: str = None, batch_size: int = None, fit_sample_size: Union[float, int] = None): """Makes changes to the default configuration of a fruit branch if arguments differ from ``None``. :param mode: See :meth:`fruits.signature.iss.SignatureCalculator.transform`, defaults to None :type mode: str, optional :param batch_size: See :meth:`~ruits.signature.iss.SignatureCalculator.transform`, defaults to None :type batch_size: int, optional :param fit_sample_size: Size of the random time series sample that is used for fitting. This is represented as a float which will be multiplied by ``X.shape[0]`` or ``1`` for one random time series., defaults to 1 :type fit_sample_size: Union[float, int] """ if mode is not None: self._calculator_options["mode"] = mode if batch_size is not None: self._calculator_options["batch_size"] = batch_size if fit_sample_size is not None: self._fit_sample_size = fit_sample_size def add_preparateur(self, preparateur: DataPreparateur): """Adds a preparateur to the branch. :type preparateur: DataPreparateur """ if not isinstance(preparateur, DataPreparateur): raise TypeError self._preparateurs.append(preparateur) self._fitted = False def get_preparateurs(self) -> List[DataPreparateur]: """Returns a list of all preparateurs added to the branch. :rtype: List[DataPreparateur] """ return self._preparateurs def clear_preparateurs(self): """Removes all preparateurs that were added to this branch.""" self._preparateurs = [] self._fitted = False def add_word(self, word: Word): """Adds a word to the branch. :type word: Word """ if not isinstance(word, Word): raise TypeError self._words.append(word) self._fitted = False def get_words(self) -> List[Word]: """Returns a list of all words in the branch. :rtype: List[Word] """ return self._words def clear_words(self): """Removes all words that were added to this branch.""" self._words = [] self._sieves_extended = [] self._fitted = False def add_sieve(self, sieve: FeatureSieve): """Appends a new feature sieve to the FruitBranch. :type sieve: FeatureSieve """ if not isinstance(sieve, FeatureSieve): raise TypeError self._sieves.append(sieve) self._fitted = False def get_sieves(self) -> List[FeatureSieve]: """Returns a list of all feature sieves added to the branch. :rtype: List[FeatureSieve] """ return self._sieves def clear_sieves(self): """Removes all feature sieves that were added to this branch.""" self._sieves = [] self._sieve_prerequisites = None self._sieves_extended = [] self._fitted = False def add(self, *objects: Union[FitTransform, Word, type]): """Adds one or multiple object(s) to the branch. :type objects: One or more objects of the following types: - :class:`~fruits.preparation.abstract.DataPreparateur` - :class:`~fruits.words.word.Word` - :class:`~fruits.sieving.abstract.FeatureSieve` """ objects_flattened = np.array(objects, dtype=object).flatten() for obj in objects_flattened: if inspect.isclass(obj): obj = obj() if isinstance(obj, DataPreparateur): self.add_preparateur(obj) elif isinstance(obj, Word): self.add_word(obj) elif isinstance(obj, FeatureSieve): self.add_sieve(obj) else: raise TypeError("Cannot add variable of type"+str(type(obj))) def clear(self): """Clears all settings, configurations and calculated results the branch has. After the branch is cleared, it has the same settings as a newly created FruitBranch object. """ self.clear_preparateurs() self.clear_words() self.clear_sieves() self._calculator_options = {"batch_size": 1, "mode": "single"} def nfeatures(self) -> int: """Returns the total number of features the current configuration produces. :rtype: int """ if self._calculator_options["mode"] == "extended": return ( sum([s.nfeatures() for s in self._sieves]) * CachePlan(self._words).n_iterated_sums( list(range(len(self._words))) ) ) else: return ( sum([s.nfeatures() for s in self._sieves]) * len(self._words) ) def _compile(self): # checks if the FruitBranch is configured correctly and ready # for fitting if not self._words: raise RuntimeError("No words specified for ISS calculation") if not self._sieves: raise RuntimeError("No FeatureSieve objects specified") def _collect_cache_keys(self) -> Set[str]: # collects cache keys of all FitTransformers in the branch keys: Set[str] = set() for prep in self._preparateurs: prep_keys = prep._get_cache_keys() if 'coquantile' in prep_keys: keys = keys.union(prep_keys['coquantile']) for sieve in self._sieves: sieve_keys = sieve._get_cache_keys() if 'coquantile' in sieve_keys: keys = keys.union(sieve_keys['coquantile']) return keys def _get_cache(self, X: np.ndarray): # returns the already processed cache needed in this branch self._cache = CoquantileCache() self._cache.process(X, list(self._collect_cache_keys())) def _select_fit_sample(self, X: np.ndarray) -> np.ndarray: # returns a sample of the data used for fitting if (isinstance(self._fit_sample_size, int) and self._fit_sample_size == 1): ind = np.random.randint(0, X.shape[0]) return X[ind:ind+1, :, :] else: s = int(self._fit_sample_size * X.shape[0]) if s < 1: s = 1 indices = np.random.choice(X.shape[0], size=s, replace=False) return X[indices, :, :] def fit(self, X: np.ndarray): """Fits the branch to the given dataset. What this action explicitly does depends on the FruitBranch configuration. :param X: (Multidimensional) time series dataset as an array of three dimensions. Have a look at :meth:`~fruits.scope.force_input_shape`. :type X: np.ndarray """ self._compile() self._get_cache(X) prepared_data = self._select_fit_sample(X) for prep in self._preparateurs: prep.fit(prepared_data) prepared_data = prep.transform(prepared_data, cache=self._cache) self._sieves_extended = [] iss_calculations = SignatureCalculator().transform( prepared_data, words=self._words, **self._calculator_options )[0] for iterated_data in iss_calculations: iterated_data = iterated_data.reshape(iterated_data.shape[0] * iterated_data.shape[1], iterated_data.shape[2]) sieves_copy = [sieve.copy() for sieve in self._sieves] for sieve in sieves_copy: sieve.fit(iterated_data[:, :]) self._sieves_extended.append(sieves_copy) self._fitted = True def transform(self, X: np.ndarray, callbacks: List[AbstractCallback] = []) -> np.ndarray: """Transforms the given time series dataset. The results are the calculated features for the different time series. :param X: (Multidimensional) time series dataset as an array of three dimensions. Have a look at :meth:`~fruits.scope.force_input_shape`. :type X: np.ndarray :param callbacks: List of callbacks. To write your own callback, override the class :class:`~fruits.core.callback.AbstractCallback`., defaults to [] :type callbacks: List[AbstractCallback], optional :rtype: np.ndarray :raises: RuntimeError if ``self.fit`` wasn't called """ if not self._fitted: raise RuntimeError("Missing call of self.fit") self._get_cache(X) prepared_data = force_input_shape(X) for prep in self._preparateurs: prepared_data = prep.transform(prepared_data, cache=self._cache) for callback in callbacks: callback.on_preparateur(prepared_data) for callback in callbacks: callback.on_preparation_end(prepared_data) sieved_data = np.zeros((prepared_data.shape[0], self.nfeatures())) k = 0 iss_calculations = SignatureCalculator().transform( prepared_data, words=self._words, **self._calculator_options )[0] for i, iterated_data in enumerate(iss_calculations): for callback in callbacks: callback.on_iterated_sum(iterated_data) for sieve in self._sieves_extended[i]: nf = sieve.nfeatures() new_features = nf * iterated_data.shape[1] for it in range(iterated_data.shape[1]): sieved_data[:, k+it*nf:k+(it+1)*nf] = sieve.transform( iterated_data[:, it, :], cache=self._cache, ) for callback in callbacks: callback.on_sieve(sieved_data[k:k+new_features]) k += new_features for callback in callbacks: callback.on_sieving_end(sieved_data) return sieved_data def fit_transform(self, X: np.ndarray) -> np.ndarray: """This function does the same that calling ``self.fit(X)`` and ``self.transform(X)`` consecutively does. :param X: (Multidimensional) time series dataset as an array of three dimensions. Have a look at `:meth:`~fruits.scope.force_input_shape`. :type X: np.ndarray :returns: Array of features. :rtype: np.ndarray """ self.fit(X) return self.transform(X) def summary(self) -> str: """Returns a summary of this object. The summary contains all added preparateurs, words and sieves. :rtype: str """ summary = "{:-^80}".format("fruits.FruitBranch") summary += f"\nNumber of features: {self.nfeatures()}" summary += f"\n\nPreparateurs ({len(self._preparateurs)}): " if len(self._preparateurs) == 0: summary += "-" else: summary += "\n\t+ " + \ "\n\t+ ".join([str(x) for x in self._preparateurs]) summary += f"\nIterators ({len(self._words)}): " if len(self._words) == 0: summary += "-" elif len(self._words) > 10: summary += "\n\t+ " + \ "\n\t+ ".join([str(x) for x in self._words[:9]]) summary += "\n\t..." else: summary += "\n\t+ " + \ "\n\t+ ".join([str(x) for x in self._words]) summary += f"\nSieves ({len(self._sieves)}): " if len(self._sieves) == 0: summary += "-" else: for x in self._sieves: lines = x.summary().split("\n") summary += "\n\t+ " + lines[0] summary += "\n\t " summary += "\n\t ".join(lines[1:]) return summary def copy(self) -> "FruitBranch": """Returns a shallow copy of this FruitBranch object. :returns: Copy of the branch with same settings but all calculations done erased. :rtype: FruitBranch """ copy_ = FruitBranch() for preparateur in self._preparateurs: copy_.add(preparateur) for iterator in self._words: copy_.add(iterator) for sieve in self._sieves: copy_.add(sieve) return copy_ def deepcopy(self) -> "FruitBranch": """Returns a deep copy of this FruitBranch object. :returns: Deepcopy of the branch with same settings but all calculations done erased. :rtype: FruitBranch """ copy_ = FruitBranch() for preparateur in self._preparateurs: copy_.add(preparateur.copy()) for iterator in self._words: copy_.add(iterator.copy()) for sieve in self._sieves: copy_.add(sieve.copy()) copy_._calculator_options = self._calculator_options.copy() return copy_
35.752308
77
0.588235
2,713
23,239
4.926281
0.134537
0.011223
0.010475
0.010774
0.414964
0.364834
0.296296
0.287168
0.246988
0.215563
0
0.003836
0.315676
23,239
649
78
35.807396
0.836572
0.3711
0
0.324759
0
0
0.055921
0.008709
0
0
0
0
0
1
0.128617
false
0
0.032154
0
0.241158
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
079c6865019ab91124c781c2644155172081ba8b
9,863
py
Python
workoutizer/__main__.py
pa3kDaWae/workoutizer
15501d0060711bbd8308642bc89b45c1442d4d0f
[ "MIT" ]
null
null
null
workoutizer/__main__.py
pa3kDaWae/workoutizer
15501d0060711bbd8308642bc89b45c1442d4d0f
[ "MIT" ]
null
null
null
workoutizer/__main__.py
pa3kDaWae/workoutizer
15501d0060711bbd8308642bc89b45c1442d4d0f
[ "MIT" ]
null
null
null
import os import argparse import subprocess import socket import sys import click from django.core.management import execute_from_command_line from workoutizer.settings import WORKOUTIZER_DIR, WORKOUTIZER_DB_PATH, TRACKS_DIR from workoutizer import __version__ BASE_DIR = os.path.dirname(os.path.dirname(__file__)) SETUP_DIR = os.path.join(BASE_DIR, 'setup') os.environ["DJANGO_SETTINGS_MODULE"] = "workoutizer.settings" example_rpi_cmd = "wkz --setup_rpi vendor_id=091e product_id=4b48" url_help = 'specify ip address and port pair, like: address:port' @click.group() def cli(): pass @click.command(help='Mandatory command to initialize workoutizer. This fetches the static files, creates the database ' 'and applies the required migrations.') def init(): _build_home() execute_from_command_line(["manage.py", "collectstatic", "--noinput"]) execute_from_command_line(["manage.py", "migrate"]) execute_from_command_line(["manage.py", "check"]) click.echo(f"database and track files are stored in: {WORKOUTIZER_DIR}") @click.option('--ip', default="", help=url_help) @click.option('--product_id', help="product ip of your device", required=True) @click.option('--vendor_id', help="vendor ip of your device", required=True) @click.command(help='Configure Raspberry Pi to auto mount devices. Passing vendor and product id is required. Passing ' f'the local ip address and port is optionally. E.g.: {example_rpi_cmd}') def setup_rpi(ip, vendor_id, product_id): if not ip: ip = _get_local_ip_address() answer = input(f"Are you sure you want to setup your Raspberry Pi?\n\n" f"This will copy the required udev rule and systemd service file\n" f"to your system to enable automated mounting of your device.\n" f"This might take a while...\n\n" f"Start setup? [Y/n] ") if answer.lower() == 'y': click.echo(f"installing ansible...") _pip_install('ansible==2.9.10') click.echo(f"starting setup using ansible...") _setup_rpi( vendor_id=vendor_id, product_id=product_id, ip_port=f"{ip}:8000" ) _run_ansible(playbook='install_packages.yml') click.echo(f"Successfully configured to automatically mount your device when plugged in. Note: These changes " f"require a system restart to take effect.") else: click.echo(f"Aborted.") @click.argument('url', default="") @click.command(help="Run workoutizer. Passing the local ip address and port is optionally. In case of no ip address " "being passed, it will be determined automatically. Usage, e.g.: 'wkz run 0.0.0.0:8000'.") def run(url): if not url: url = f"{_get_local_ip_address()}:8000" execute_from_command_line(["manage.py", "runserver", url]) @click.argument('url', default="") @click.command(help='Configure workoutizer to run as systemd service. Passing the local ip address and port is ' 'optionally. In case of no ip address being passed, it will be determined automatically.') def wkz_as_service(url): _pip_install('ansible==2.9.10') _wkz_as_service(url=url) @click.argument('cmd', nargs=1) @click.command(help="Pass commands to django's manage.py. Convenience function to access all django commands which are " "not yet covered with the given set of workoutizer commands. Usage, e.g.: " "wkz manage 'runserver 0.0.0.0:8000 --noreload'.") def manage(cmd): execute_from_command_line(["manage.py"] + cmd.split(' ')) @click.command(help='Show the version of currently installed workoutizer.') def version(): click.echo(__version__) @click.command(help='Check for a newer version and install if there is any.') def upgrade(): _upgrade() cli.add_command(upgrade) cli.add_command(version) cli.add_command(init) cli.add_command(setup_rpi) cli.add_command(run) cli.add_command(manage) cli.add_command(wkz_as_service) def _upgrade(): latest_version = _get_latest_version_of("workoutizer") from workoutizer import __version__ as current_version if latest_version: click.echo(f"found newer version: {latest_version}, you have {current_version} installed") _pip_install('workoutizer', upgrade=True) execute_from_command_line(["manage.py", "collectstatic", "--noinput"]) execute_from_command_line(["manage.py", "migrate"]) execute_from_command_line(["manage.py", "check"]) click.echo(f"Successfully upgraded from {current_version} to {latest_version}") else: click.echo(f"No update available. You are running the latest version: {current_version}") def _get_latest_version_of(package: str): outdated = str( subprocess.check_output([sys.executable, "-m", "pip", "list", '--outdated', '--disable-pip-version-check'])) if package in outdated: output = str(subprocess.check_output([sys.executable, "-m", "pip", "search", package])) latest_version = output[output.find('LATEST'):].split('\\n')[0].split(' ')[-1] return latest_version else: return False def _setup_rpi(vendor_id: str, product_id: str, ip_port: str = None): if not ip_port: ip_port = f"{_get_local_ip_address()}:8000" result = _run_ansible( playbook='setup_on_rpi.yml', variables={ 'vendor_id': vendor_id, 'product_id': product_id, 'address_plus_port': ip_port, } ) if result == 0: pass else: click.echo(f"ERROR: Could not configure Raspberry Pi, see above errors.") quit() return result def _wkz_as_service(url: str): click.echo(f"configuring workoutizer to run as system service") if not url: url = f"{_get_local_ip_address()}:8000" env_binaries = sys.executable wkz_executable = env_binaries[:env_binaries.find('python')] + "wkz" result = _run_ansible( playbook='wkz_as_service.yml', variables={ 'address_plus_port': url, 'wkz_executable': wkz_executable, } ) if result == 0: click.echo(f"Successfully configured workoutizer as systemd service. Run it with: systemctl start wkz.service") else: click.echo(f"ERROR: Could not configure workoutizer as systemd service, see above errors.") return result def _get_local_ip_address(): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) ip_address = s.getsockname()[0] s.close() return ip_address def _build_home(): if os.path.isdir(WORKOUTIZER_DIR): if os.path.isfile(WORKOUTIZER_DB_PATH): click.echo(f"Found existing workoutizer database at: {WORKOUTIZER_DB_PATH}\n") answer = input(f"Workoutizer could try to use the existing database instead of creating a new one.\n" f"Note that this could lead to faulty behaviour because of mismatching applied\n" f"migrations on this database.\n\n" f"Do you want to use the existing database instead of creating a new one? [Y/n] ") if answer.lower() == 'y': click.echo(f"keeping existing database at {WORKOUTIZER_DB_PATH}") return else: click.echo(f"removed database at {WORKOUTIZER_DB_PATH}") os.remove(WORKOUTIZER_DB_PATH) _make_tracks_dir(TRACKS_DIR) else: os.mkdir(WORKOUTIZER_DIR) _make_tracks_dir(TRACKS_DIR) def _make_tracks_dir(path): if not os.path.isdir(path): os.mkdir(path) class ParseDict(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): d = {} if values: for item in values: split_items = item.split("=", 1) key = split_items[0].strip() # we remove blanks around keys, as is logical value = split_items[1] d[key] = value setattr(namespace, self.dest, d) def _pip_install(package, upgrade: bool = False): if upgrade: subprocess.check_call([sys.executable, "-m", "pip", "install", package, '--upgrade']) else: subprocess.check_call([sys.executable, "-m", "pip", "install", package]) def _run_ansible(playbook: str, variables: dict = None): if variables is None: variables = {} from ansible import context from ansible.cli import CLI from ansible.module_utils.common.collections import ImmutableDict from ansible.executor.playbook_executor import PlaybookExecutor from ansible.parsing.dataloader import DataLoader from ansible.inventory.manager import InventoryManager from ansible.vars.manager import VariableManager loader = DataLoader() context.CLIARGS = ImmutableDict( tags={}, listtags=False, listtasks=False, listhosts=False, syntax=False, connection='ssh', module_path=None, forks=100, remote_user='xxx', private_key_file=None, ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, become=True, become_method='sudo', become_user='root', verbosity=True, check=False, start_at_task=None ) inventory = InventoryManager(loader=loader, sources=()) variable_manager = VariableManager(loader=loader, inventory=inventory, version_info=CLI.version_info(gitinfo=False)) variable_manager._extra_vars = variables pbex = PlaybookExecutor(playbooks=[os.path.join(SETUP_DIR, 'ansible', playbook)], inventory=inventory, variable_manager=variable_manager, loader=loader, passwords={}) return pbex.run() if __name__ == '__main__': cli()
38.678431
120
0.667343
1,301
9,863
4.864719
0.250576
0.022752
0.0237
0.031285
0.244746
0.206668
0.187075
0.164955
0.12419
0.100174
0
0.008329
0.220927
9,863
254
121
38.830709
0.815331
0.00436
0
0.173077
0
0.004808
0.323589
0.020778
0
0
0
0
0
1
0.086538
false
0.043269
0.081731
0
0.206731
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
079ca32a831e991e4272d56b55460b5a2a9b338e
8,807
py
Python
bcbio/bam/trim.py
matanhofree/bcbio-nextgen
e6938cedb20ff3b7632165105941d71189e46aac
[ "MIT" ]
1
2015-04-08T17:43:39.000Z
2015-04-08T17:43:39.000Z
bcbio/bam/trim.py
matanhofree/bcbio-nextgen
e6938cedb20ff3b7632165105941d71189e46aac
[ "MIT" ]
null
null
null
bcbio/bam/trim.py
matanhofree/bcbio-nextgen
e6938cedb20ff3b7632165105941d71189e46aac
[ "MIT" ]
null
null
null
"""Provide trimming of input reads from Fastq or BAM files. """ import os import sys import tempfile from bcbio.utils import (file_exists, safe_makedir, replace_suffix, append_stem, is_pair, replace_directory, map_wrap) from bcbio.log import logger from bcbio.bam import fastq from bcbio.provenance import do from Bio.Seq import Seq from itertools import izip, repeat from bcbio.distributed.transaction import file_transaction from bcbio.pipeline import config_utils SUPPORTED_ADAPTERS = { "illumina": ["AACACTCTTTCCCT", "AGATCGGAAGAGCG"], "truseq": ["AGATCGGAAGAG"], "polya": ["AAAAAAAAAAAAA"], "nextera": ["AATGATACGGCGA", "CAAGCAGAAGACG"]} QUALITY_FLAGS = {5: ['"E"', '"&"'], 20: ['"T"', '"5"']} def trim_adapters(fastq_files, dirs, config): QUALITY_CUTOFF = 5 to_trim = _get_sequences_to_trim(config) resources = config_utils.get_resources("AlienTrimmer", config) try: jarpath = config_utils.get_program("AlienTrimmer", config, "dir") # fall back on Cutadapt if AlienTrimmer is not installed # XXX: remove after it has been live for a while except: return trim_read_through(fastq_files, dirs, config) jarfile = config_utils.get_jar("AlienTrimmer", jarpath) jvm_opts = " ".join(resources.get("jvm_opts", ["-Xms750m", "-Xmx2g"])) base_cmd = ("java -jar {jvm_opts} {jarfile} -k 10 ") fastq1 = fastq_files[0] supplied_quality_format = _get_quality_format(config) cores = config["algorithm"].get("num_cores", 0) out_files = _get_read_through_trimmed_outfiles(fastq_files, dirs) fastq1_out = out_files[0] if supplied_quality_format == "illumina": quality_flag = QUALITY_FLAGS[QUALITY_CUTOFF][0] else: quality_flag = QUALITY_FLAGS[QUALITY_CUTOFF][1] quality_flag = '-q ' + quality_flag if len(fastq_files) == 1: if file_exists(fastq1_out): return [fastq1_out] base_cmd += ("-i {fastq1} -o {tx_fastq1_out} -c {temp_file} " "{quality_flag}") message = "Trimming %s from %s with AlienTrimmer." % (to_trim, fastq1) else: fastq2 = fastq_files[1] fastq2_out = out_files[1] if all(map(file_exists, [fastq1_out, fastq2_out])): return [fastq1_out, fastq2_out] base_cmd += ("-if {fastq1} -ir {fastq2} -of {tx_fastq1_out} " "-or {tx_fastq2_out} -c {temp_file} {quality_flag}") message = ("Trimming %s from %s and %s with AlienTrimmer." % (to_trim, fastq1, fastq2)) with tempfile.NamedTemporaryFile(delete=False) as temp: temp_file = temp.name for adapter in to_trim: temp.write(adapter + "\n") temp.close() if len(fastq_files) == 1: with file_transaction(fastq1_out) as tx_fastq1_out: do.run(base_cmd.format(**locals()), message) return [fastq1_out] else: with file_transaction([fastq1_out, fastq2_out]) as tx_out_files: tx_fastq1_out = tx_out_files[0] tx_fastq2_out = tx_out_files[1] do.run(base_cmd.format(**locals()), message) return [fastq1_out, fastq2_out] def trim_read_through(fastq_files, dirs, lane_config): """ for small insert sizes, the read length can be longer than the insert resulting in the reverse complement of the 3' adapter being sequenced. this takes adapter sequences and trims the only the reverse complement of the adapter MYSEQUENCEAAAARETPADA -> MYSEQUENCEAAAA (no polyA trim) """ quality_format = _get_quality_format(lane_config) to_trim = _get_sequences_to_trim(lane_config) out_files = _get_read_through_trimmed_outfiles(fastq_files, dirs) fixed_files = append_stem(out_files, ".fixed") if all(map(file_exists, fixed_files)): return fixed_files logger.info("Trimming %s from the 3' end of reads in %s using " "cutadapt." % (", ".join(to_trim), ", ".join(fastq_files))) cores = lane_config["algorithm"].get("num_cores", 1) out_files = _cutadapt_trim(fastq_files, quality_format, to_trim, out_files, cores) fixed_files = remove_short_reads(out_files, dirs, lane_config) return fixed_files def remove_short_reads(fastq_files, dirs, lane_config): """ remove reads from a single or pair of fastq files which fall below a length threshold (30 bases) """ min_length = int(lane_config["algorithm"].get("min_read_length", 20)) supplied_quality_format = _get_quality_format(lane_config) if supplied_quality_format == "illumina": quality_format = "fastq-illumina" else: quality_format = "fastq-sanger" if is_pair(fastq_files): fastq1, fastq2 = fastq_files out_files = fastq.filter_reads_by_length(fastq1, fastq2, quality_format, min_length) else: out_files = [fastq.filter_single_reads_by_length(fastq_files[0], quality_format, min_length)] map(os.remove, fastq_files) return out_files def _get_read_through_trimmed_outfiles(fastq_files, dirs): out_dir = os.path.join(dirs["work"], "trim") safe_makedir(out_dir) out_files = replace_directory(append_stem(fastq_files, "_trimmed"), out_dir) return out_files def _get_sequences_to_trim(lane_config): builtin_adapters = _get_builtin_adapters(lane_config) polya = builtin_adapters.get("polya", [None])[0] # allow for trimming of custom sequences for advanced users custom_trim = lane_config["algorithm"].get("custom_trim", []) builtin_adapters = {k: v for k, v in builtin_adapters.items() if k != "polya"} trim_sequences = custom_trim # for unstranded RNA-seq, libraries, both polyA and polyT can appear # at the 3' end as well if polya: trim_sequences += [polya, str(Seq(polya).reverse_complement())] # also trim the reverse complement of the adapters for _, v in builtin_adapters.items(): trim_sequences += [str(Seq(sequence)) for sequence in v] trim_sequences += [str(Seq(sequence).reverse_complement()) for sequence in v] return trim_sequences def _cutadapt_trim(fastq_files, quality_format, adapters, out_files, cores): """Trimming with cutadapt, using version installed with bcbio-nextgen. Uses the system executable to find the version next to our Anaconda Python. TODO: Could we use cutadapt as a library to avoid this? """ if quality_format == "illumina": quality_base = "64" else: quality_base = "33" # --times=2 tries twice remove adapters which will allow things like: # realsequenceAAAAAAadapter to remove both the poly-A and the adapter # this behavior might not be what we want; we could also do two or # more passes of cutadapt cutadapt = os.path.join(os.path.dirname(sys.executable), "cutadapt") base_cmd = [cutadapt, "--times=" + "2", "--quality-base=" + quality_base, "--quality-cutoff=5", "--format=fastq", "--minimum-length=0"] adapter_cmd = map(lambda x: "--adapter=" + x, adapters) base_cmd.extend(adapter_cmd) if all(map(file_exists, out_files)): return out_files with file_transaction(out_files) as tmp_out_files: if isinstance(tmp_out_files, basestring): tmp_out_files = [tmp_out_files] map(_run_cutadapt_on_single_file, izip(repeat(base_cmd), fastq_files, tmp_out_files)) return out_files @map_wrap def _run_cutadapt_on_single_file(base_cmd, fastq_file, out_file): stat_file = replace_suffix(out_file, ".trim_stats.txt") with open(stat_file, "w") as stat_handle: cmd = list(base_cmd) cmd.extend(["--output=" + out_file, fastq_file]) do.run(cmd, "Running cutadapt on %s." % (fastq_file), None) def _get_quality_format(lane_config): SUPPORTED_FORMATS = ["illumina", "standard"] quality_format = lane_config["algorithm"].get("quality_format", "standard").lower() if quality_format not in SUPPORTED_FORMATS: logger.error("quality_format is set to an unsupported format. " "Supported formats are %s." % (", ".join(SUPPORTED_FORMATS))) exit(1) return quality_format def _get_builtin_adapters(lane_config): chemistries = lane_config["algorithm"].get("adapters", []) adapters = {chemistry: SUPPORTED_ADAPTERS[chemistry] for chemistry in chemistries if chemistry in SUPPORTED_ADAPTERS} return adapters
40.962791
92
0.65641
1,132
8,807
4.834806
0.235866
0.038005
0.017906
0.020099
0.274438
0.157135
0.073634
0.059382
0.051526
0.051526
0
0.010923
0.241172
8,807
214
93
41.154206
0.80802
0.131941
0
0.149068
0
0
0.127795
0
0
0
0
0.004673
0
1
0.055901
false
0
0.068323
0
0.21118
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
079cc8e3750a72f04c77d4be145f9892cc269784
13,419
py
Python
FEniCSUI/AnalysesHub/views.py
nasserarbabi/FEniCSUI-dev
f8f161e1b49932843e01301212e7d031fff4f6c8
[ "MIT" ]
null
null
null
FEniCSUI/AnalysesHub/views.py
nasserarbabi/FEniCSUI-dev
f8f161e1b49932843e01301212e7d031fff4f6c8
[ "MIT" ]
8
2021-03-10T21:59:52.000Z
2021-09-22T19:12:57.000Z
FEniCSUI/AnalysesHub/views.py
nasserarbabi/FEniCSUI
f8f161e1b49932843e01301212e7d031fff4f6c8
[ "MIT" ]
null
null
null
from rest_framework.response import Response from rest_framework.views import APIView from django.shortcuts import get_object_or_404 from dashboard.models import projects from .models import AnalysisConfig, SolverResults, SolverProgress, DockerLogs from rest_framework.parsers import FormParser, JSONParser, MultiPartParser, FileUploadParser from rest_framework import status import docker import os import json from zipfile import ZipFile from django.http import HttpResponse from threading import Thread from time import sleep from datetime import datetime class solverConfig(APIView): parser_classes = [FormParser, MultiPartParser] def get(self, request, *args, **kwargs): """ return a list of entries within a given category """ project = get_object_or_404(projects, id=kwargs['project_id']) category = request.query_params.get('category') parentConfig = AnalysisConfig.objects.get(project=project) jsonHelper = json.loads(parentConfig.config) if category in jsonHelper: return Response(data=jsonHelper[category], status=status.HTTP_200_OK) else: return Response(data="The category {} does not exist".format(category), status=status.HTTP_204_NO_CONTENT) def post(self, request, *args, **kwargs): """ create a new category for solver configuration """ project = get_object_or_404(projects, id=kwargs['project_id']) data = request.data.dict() category = request.query_params.get('category') parentConfig = AnalysisConfig.objects.get(project=project) jsonHelper = json.loads(parentConfig.config) # if request does not contain a name if not "Name" in data: return Response(data="Please provide a 'Name' for the entry", status=400) # if there is no category similar to the user request if category not in jsonHelper: jsonHelper[category] = [] jsonHelper[category].append(data) # check if the entry with the same name exists elif not list(filter(lambda name: name["Name"] == data["Name"], jsonHelper[category])): jsonHelper[category].append(data) else: return Response(data="an entry with the same name exists", status=400) parentConfig.config = json.dumps(jsonHelper) parentConfig.save() return Response(data=jsonHelper[category], status=status.HTTP_201_CREATED) def put(self, request, *args, **kwargs): """ Edit an existing category entry's data """ project = get_object_or_404(projects, id=kwargs['project_id']) data = request.data.dict() # if request does not contain a name if not "Name" in data: return Response(data="Please provide a 'Name' for the entry", status=400) category = request.query_params.get('category') list_id = int(request.query_params.get('id')) parentConfig = AnalysisConfig.objects.get(project=project) jsonHelper = json.loads(parentConfig.config) if category in jsonHelper: if list_id >= 0 and list_id < len(jsonHelper[category]): # check if an entry with the same name exists if not list(filter(lambda name: name["Name"] == data["Name"], jsonHelper[category])) or jsonHelper[category][list_id]["Name"] == data["Name"]: jsonHelper[category][list_id] = data parentConfig.config = json.dumps(jsonHelper) parentConfig.save() return Response(data=jsonHelper[category], status=status.HTTP_200_OK) else: return Response(data="an entry with the same name exists", status=400) else: return Response(data="No entry with the id={}".format(list_id), status=status.HTTP_204_NO_CONTENT) else: return Response(data="The category {} does not exist".format(category), status=status.HTTP_204_NO_CONTENT) def delete(self, request, *args, **kwargs): """ Delete an entry from the category """ project = get_object_or_404(projects, id=kwargs['project_id']) category = request.query_params.get('category') list_id = int(request.query_params.get('id')) parentConfig = AnalysisConfig.objects.get(project=project) jsonHelper = json.loads(parentConfig.config) if jsonHelper[category]: if list_id >= 0 and list_id < len(jsonHelper[category]): jsonHelper[category].pop(int(list_id)) parentConfig.config = json.dumps(jsonHelper) parentConfig.save() return Response(data=jsonHelper[category], status=status.HTTP_200_OK) else: return Response(data="No entry with the id={}".format(list_id), status=status.HTTP_204_NO_CONTENT) else: return Response(data="The category {} does not exist".format(category), status=status.HTTP_204_NO_CONTENT) class Categories(APIView): parser_classes = [FormParser, MultiPartParser] def get(self, request, *args, **kwargs): """ Return the existing categories in the solver config """ project = get_object_or_404(projects, id=kwargs['project_id']) config = json.loads(AnalysisConfig.objects.get( project=project).config).keys() return Response(data=config, status=status.HTTP_200_OK) def delete(self, request, *args, **kwargs): """ DELETE the existing categories in the solver config """ project = get_object_or_404(projects, id=kwargs['project_id']) category = request.query_params.get('category') parentConfig = AnalysisConfig.objects.get(project=project) jsonHelper = json.loads(parentConfig.config) if category in jsonHelper: del jsonHelper[category] parentConfig.config = json.dumps(jsonHelper) parentConfig.save() return Response(data=jsonHelper, status=status.HTTP_410_GONE) else: return Response(data="The category {} does not exist!".format(category), status=status.HTTP_404_NOT_FOUND) class getConfiguration(APIView): parser_classes = [FormParser, MultiPartParser] def get(self, request, *args, **kwargs): """ Get the solver config to be submitted to the analysis """ project = get_object_or_404(projects, id=kwargs['project_id']) config = AnalysisConfig.objects.filter(project=project).values()[0] return Response(data=config["config"], status=status.HTTP_200_OK) def streamDockerLog(container, project): for line in container.logs(stream=True): logs = get_object_or_404(DockerLogs, project=project) now = datetime.now() current_time = now.strftime("[%H:%M:%S]: ") logs.log = current_time + str(line.strip(), 'utf-8') + "\n" + logs.log logs.save() class solvers(APIView): parser_classes = [FormParser, MultiPartParser] def get(self, request, *args, **kwargs): """ Runs the related solver defined in url parameters """ project = get_object_or_404(projects, id=kwargs['project_id']) # set progress to initial SolverProgress.objects.get_or_create( project=project, defaults={'progress' :json.dumps({"status": "", "message": ""})}) progress = SolverProgress.objects.get( project=project ) progress.progress = json.dumps({"state":{"status": "RECEIVED", "message": {"progress": "0.0"}}, "logs":""}) progress.save() # initiate related solver solver = request.query_params.get('solver') client = docker.from_env() solverPath = os.path.abspath('./solvers') if DockerLogs.objects.filter(project=project).exists(): DockerLogs.objects.filter(project=project).delete() DockerLogs.objects.create(project=project,log="") try: container = client.containers.run( "quay.io/fenicsproject/stable:current", volumes={solverPath: { 'bind': '/home/fenics/shared', 'mode': 'rw'}}, working_dir="/home/fenics/shared", # runs solver.py with two arguments to be passed in to python file command=["`sudo pip3 install requests \n python3 solverHub.py {} {}`".format( project.id, solver)], name="FEniCSDocker", auto_remove=False, detach=True) thread = Thread(target=streamDockerLog, args=(container, project)) thread.start() except: message = '''please check if the docker is running, and if a container with the name FEniCSDocker does not exist. if you are using docker windows, make sure the file sharing setting for the main folder directory is on. If you are woking with WSL, make sure it has access to the windows docker. Instructions can be found at: https://nickjanetakis.com/blog/setting-up-docker-for-windows-and-wsl-to-work-flawlessly''' print(message) return Response(data=message, status=500) return Response(data="submitted to analysis", status=status.HTTP_200_OK) def delete(self, request, *args, **kwargs): """ kills the running docker container """ client = docker.from_env() try: container = client.containers.get("FEniCSDocker") container.stop() return Response(data="container stopped successfully", status=200) except: return Response(data="No container running", status=404) class saveResults(APIView): parser_classes = [FileUploadParser] def put(self, request, filename, format=None, *args, **kwargs): """ save results to media folder. a query will be created to make it available for download """ project = get_object_or_404(projects, id=kwargs['project_id']) fileType = request.query_params.get('fileType') data = request.data['file'] folderPath = os.path.abspath( "../FEniCSUI/media/{}/results/".format(kwargs['project_id'])) os.makedirs(folderPath, exist_ok=True) filePath = '{}/{}.{}'.format(folderPath, filename, fileType) with open(filePath, 'wb+') as destination: for chunk in data.chunks(): destination.write(chunk) if not SolverResults.objects.filter(project=project).exists(): SolverResults.objects.create(project=project, path=folderPath) return Response(data="results updated at {}".format(filePath), status=status.HTTP_201_CREATED) class downloadResults(APIView): def get(self, request, *args, **kwargs): """ Get the results saved in the database """ project = get_object_or_404(projects, id=kwargs['project_id']) if (SolverResults.objects.filter(project=project).exists()): resutls = SolverResults.objects.filter(project=project).values()[0] folderPath = resutls['path'] # create a ZipFile object with ZipFile('{}/results.zip'.format(folderPath), 'w') as zipObj: # Iterate over all the files in directory for folderName, subfolders, filenames in os.walk(folderPath): for filename in filenames: if not filename == 'results.zip': filePath = os.path.join(folderName, filename) # Add file to zip zipObj.write(filePath, os.path.basename(filePath)) zipFile = open('{}/results.zip'.format(folderPath), 'rb') response= HttpResponse(zipFile,content_type='application/zip') response['Content-Disposition'] = 'attachment; filename=results.zip' return response else: return Response(data="not found", status=404) class solverProgress(APIView): parser_classes = [JSONParser] def get(self, request, *args, **kwargs): """ Get the progress """ project = get_object_or_404(projects, id=kwargs['project_id']) if (SolverProgress.objects.filter(project=project).exists()): progress = json.loads(get_object_or_404(SolverProgress, project=project).progress) logs = get_object_or_404(DockerLogs, project=project).log else: progress = "null" logs="" return Response(data=json.dumps({"state":progress,"logs":logs}), status=status.HTTP_200_OK) def post(self, request, *args, **kwargs): """ Update the progress from solver """ project = get_object_or_404(projects, id=kwargs['project_id']) data = request.data if SolverProgress.objects.filter(project=project).exists(): progress = get_object_or_404(SolverProgress, project=project) progress.progress = json.dumps(data) progress.save() else: SolverProgress.objects.create(project=project, progress=data) return Response(data=get_object_or_404(SolverProgress, project=project).progress, status=status.HTTP_201_CREATED)
42.735669
158
0.634474
1,515
13,419
5.522772
0.186139
0.043504
0.053783
0.030118
0.534242
0.507589
0.460978
0.448667
0.398709
0.398709
0
0.013829
0.256353
13,419
313
159
42.872204
0.824632
0.073851
0
0.439252
0
0.009346
0.1237
0.005364
0
0
0
0
0
1
0.065421
false
0
0.070093
0
0.317757
0.004673
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
079d837b78456fbc7dc57dde76430c107de0f8b2
1,627
py
Python
fs/opener/appfs.py
EnjoyLifeFund/macHighSierra-py36-pkgs
5668b5785296b314ea1321057420bcd077dba9ea
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
null
null
null
fs/opener/appfs.py
EnjoyLifeFund/macHighSierra-py36-pkgs
5668b5785296b314ea1321057420bcd077dba9ea
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
null
null
null
fs/opener/appfs.py
EnjoyLifeFund/macHighSierra-py36-pkgs
5668b5785296b314ea1321057420bcd077dba9ea
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
null
null
null
# coding: utf-8 """``AppFS`` opener definition. """ from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals from .base import Opener from .errors import OpenerError from ..subfs import ClosingSubFS from .. import appfs class AppFSOpener(Opener): """``AppFS`` opener. """ protocols = [ 'userdata', 'userconf', 'sitedata', 'siteconf', 'usercache', 'userlog' ] _protocol_mapping = { 'userdata': appfs.UserDataFS, 'userconf': appfs.UserConfigFS, 'sitedata': appfs.SiteDataFS, 'siteconf': appfs.SiteConfigFS, 'usercache': appfs.UserCacheFS, 'userlog': appfs.UserLogFS } def open_fs(self, fs_url, parse_result, writeable, create, cwd): fs_class = self._protocol_mapping[parse_result.protocol] resource, delim, path = parse_result.resource.partition('/') tokens = resource.split(':', 3) if len(tokens) == 2: appname, author = tokens version = None elif len(tokens) == 3: appname, author, version = tokens else: raise OpenerError( 'resource should be <appname>:<author> ' 'or <appname>:<author>:<version>' ) app_fs = fs_class( appname, author=author, version=version, create=create ) app_fs = ( app_fs.opendir(path, factory=ClosingSubFS) if delim else app_fs ) return app_fs
24.283582
68
0.566687
154
1,627
5.785714
0.454545
0.072952
0.053872
0
0
0
0
0
0
0
0
0.003663
0.328826
1,627
66
69
24.651515
0.812271
0.040565
0
0
0
0
0.107811
0.018076
0
0
0
0
0
1
0.02
false
0
0.14
0
0.24
0.02
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
079e4e7b1369d32d29e2b328e164f3c7bb362e85
23,180
py
Python
scripts/modeling_toolbox/evaluation.py
cyberj0g/verification-classifier
efb19a3864e27a7f149a1c27ee8e13eaa19f96eb
[ "MIT" ]
8
2019-06-06T08:16:45.000Z
2021-06-26T11:53:48.000Z
scripts/modeling_toolbox/evaluation.py
cyberj0g/verification-classifier
efb19a3864e27a7f149a1c27ee8e13eaa19f96eb
[ "MIT" ]
95
2019-03-27T08:36:01.000Z
2022-02-10T00:15:20.000Z
scripts/modeling_toolbox/evaluation.py
cyberj0g/verification-classifier
efb19a3864e27a7f149a1c27ee8e13eaa19f96eb
[ "MIT" ]
8
2019-02-28T11:21:46.000Z
2022-03-21T07:34:20.000Z
import numpy as np from sklearn.metrics import fbeta_score, roc_curve, auc, confusion_matrix from sklearn.decomposition import PCA from sklearn import random_projection from sklearn import svm from sklearn.ensemble import IsolationForest import matplotlib.pyplot as plt from keras.layers import Dense, Input, Dropout from keras.models import Model from keras import regularizers from keras.models import Sequential from keras.optimizers import Adam from keras.regularizers import l2 from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import AdaBoostClassifier import xgboost as xgb def one_class_svm(x_train, x_test, x_attacks, svm_results): # SVM Hyper-parameters nus = [0.01] gammas = ['auto'] dimensions = [int(i*x_test.shape[1]) for i in [0.25, 0.35, 0.5, 0.75, 0.9, 1]] dimensions = list(filter(lambda x: x > 0, dimensions)) for n in dimensions: x_reduced_pca, test_reduced_pca, attack_reduced_pca = reduce_dimensionality(n, x_train, x_test, 'PCA', attack=x_attacks) for nu in nus: for gamma in gammas: # Fit classifier with PCA reduced data classifier = svm.OneClassSVM(kernel='rbf', gamma=gamma, nu=nu, cache_size=7000) classifier.fit(x_reduced_pca) fb, area, tnr, tpr_train, tpr_test = unsupervised_evaluation(classifier, x_reduced_pca, test_reduced_pca, attack_reduced_pca) svm_results = svm_results.append({'nu': nu, 'gamma': gamma, 'n_components': n, 'TPR_train': tpr_train, 'TPR_test': tpr_test, 'TNR': tnr, 'model': 'svm', 'auc': area, 'f_beta': fb, 'projection': 'PCA'}, ignore_index=True) # Fit classifier with RP reduced data classifier = svm.OneClassSVM(kernel='rbf', gamma=gamma, nu=nu, cache_size=7000) classifier.fit(x_train) fb, area, tnr, tpr_train, tpr_test = unsupervised_evaluation(classifier, x_train, x_test, x_attacks) svm_results = svm_results.append({'nu': nu, 'gamma': gamma, 'n_components': x_test.shape[1], 'TPR_train': tpr_train, 'TPR_test': tpr_test, 'TNR': tnr, 'model': 'svm', 'auc': area, 'f_beta': fb, 'projection': 'None'}, ignore_index=True) return svm_results def isolation_forest(x_train, x_test, x_attacks, isolation_results): # Isolation Forest Hyper-parameters estimators = [200, 100] contaminations = [0.01] dimensions = [int(i*x_test.shape[1]) for i in [0.25, 0.5, 0.9, 1]] dimensions = list(filter(lambda x: x > 0, dimensions)) for n in dimensions: x_reduced_pca, test_reduced_pca, attack_reduced_pca = reduce_dimensionality(n, x_train, x_test, 'PCA', attack=x_attacks) x_reduced_rp, test_reduced_rp, attack_reduced_rp = reduce_dimensionality(n, x_train, x_test, 'RP', attack=x_attacks) max_features = list(range(1, n + 1, 4)) for estimator in estimators: for contamination in contaminations: for max_feature in max_features: classifier = IsolationForest(n_estimators=estimator, contamination=contamination, max_features=max_feature, n_jobs=7) classifier.fit(x_reduced_pca) fb, area, tnr, tpr_train, tpr_test = unsupervised_evaluation(classifier, x_reduced_pca, test_reduced_pca, attack_reduced_pca) isolation_results = isolation_results.append({'estimators': estimator, 'contamination': contamination, 'n_components': n, 'max_features': max_feature, 'TPR_train': tpr_train, 'TPR_test': tpr_test, 'TNR': tnr, 'model': 'isolation_forest', 'auc': area, 'f_beta': fb, 'projection': 'PCA'}, ignore_index=True) classifier = IsolationForest(n_estimators=estimator, contamination=contamination, max_features=max_feature, n_jobs=7) classifier.fit(x_reduced_rp) fb, area, tnr, tpr_train, tpr_test = unsupervised_evaluation(classifier, x_reduced_rp, test_reduced_rp, attack_reduced_rp) isolation_results = isolation_results.append({'estimators': estimator, 'contamination': contamination, 'n_components': n, 'max_features': max_feature, 'TPR_train': tpr_train, 'TPR_test': tpr_test, 'TNR': tnr, 'model': 'isolation_forest', 'auc': area, 'f_beta': fb, 'projection': 'RP'}, ignore_index=True) return isolation_results def autoencoder(x_train, x_test, x_attacks, ae_svm_results): latent_dim = 3 input_vector = Input(shape=(x_train.shape[1],)) encoded = Dense(latent_dim, activation='relu')(input_vector) decoded = Dense(x_train.shape[1], activity_regularizer=regularizers.l1(10e-5))(encoded) autoencoder = Model(input_vector, decoded) encoder = Model(input_vector, encoded) autoencoder.compile(optimizer=Adam(lr=0.001), loss='mse') network_history = autoencoder.fit(x_train, x_train, shuffle=True, batch_size=16, epochs=10, validation_data=(x_test, x_test), verbose=True) plot_history(network_history, 'AE history') print('Mean loss on train: {}'.format(autoencoder.evaluate(x_train, x_train, batch_size=8, verbose=False))) print('Mean loss on test: {}'.format(autoencoder.evaluate(x_test, x_test, batch_size=8, verbose=False))) print('Mean loss on attacks: {}'.format(autoencoder.evaluate(x_attacks, x_attacks, batch_size=8, verbose=False))) x_train_red = encoder.predict(x_train, batch_size=8) x_test_red = encoder.predict(x_test, batch_size=8) x_attacks_red = encoder.predict(x_attacks, batch_size=8) nus = [0.01] gammas = [x_train_red.shape[1], 2*x_train_red.shape[1], x_train_red.shape[1]/2, 'auto'] for nu in nus: for gamma in gammas: classifier = svm.OneClassSVM(kernel='rbf', gamma=gamma, nu=nu, cache_size=7000) classifier.fit(x_train_red) fb, area, tnr, tpr_train, tpr_test = unsupervised_evaluation(classifier, x_train_red, x_test_red, x_attacks_red) ae_svm_results = ae_svm_results.append({'nu': nu, 'gamma': gamma, 'n_components': latent_dim, 'TPR_train': tpr_train, 'TPR_test': tpr_test, 'TNR': tnr, 'model': 'ae-svm', 'auc': area, 'f_beta': fb}, ignore_index=True) return ae_svm_results def unsupervised_evaluation(classifier, train_set, test_set, attack_set, beta=20): y_pred_train = classifier.predict(train_set) y_pred_test = classifier.predict(test_set) y_pred_outliers = classifier.predict(attack_set) n_accurate_train = y_pred_train[y_pred_train == 1].size n_accurate_test = y_pred_test[y_pred_test == 1].size n_accurate_outliers = y_pred_outliers[y_pred_outliers == -1].size fpr, tpr, _ = roc_curve(np.concatenate([np.ones(y_pred_test.shape[0]), -1*np.ones(y_pred_outliers.shape[0])]), np.concatenate([y_pred_test, y_pred_outliers]), pos_label=1) fb = fbeta_score(np.concatenate([np.ones(y_pred_test.shape[0]), -1*np.ones(y_pred_outliers.shape[0])]), np.concatenate([y_pred_test, y_pred_outliers]), beta=beta, pos_label=1) tnr = n_accurate_outliers/attack_set.shape[0] tpr_test = n_accurate_test/test_set.shape[0] tpr_train = n_accurate_train/train_set.shape[0] area = auc(fpr, tpr) return fb, area, tnr, tpr_train, tpr_test def neural_network(x_train, y_train, x_test, y_test): model = Sequential() model.add(Dense(128, input_shape=(x_train.shape[1],), activation='relu', kernel_regularizer=l2(0.01))) model.add(Dropout(0.1)) model.add(Dense(64, activation='relu', kernel_regularizer=l2(0.01))) model.add(Dropout(0.2)) model.add(Dense(128, kernel_initializer='glorot_uniform', activation='sigmoid')) model.add(Dropout(0.4)) model.add(Dense(64, kernel_initializer='glorot_uniform', activation='tanh')) model.add(Dropout(0.5)) model.add(Dense(32, kernel_initializer='glorot_uniform', activation='tanh')) model.add(Dropout(0.4)) model.add(Dense(128, kernel_initializer='glorot_uniform', activation='tanh')) model.add(Dropout(0.3)) model.add(Dense(1, kernel_initializer='normal', activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy']) network_history = model.fit(x_train, y_train, batch_size=128, epochs=10, verbose=0, validation_data=(x_test, y_test)) plot_history_with_acc(network_history) return model def random_forest(x_train, y_train, x_test, y_test, random_forest_results): # Random forest Hyper-parameters estimators = [150, 200] dimensions = [int(i*x_test.shape[1]) for i in [1]] for estimator in estimators: for n in dimensions: x_reduced_pca, test_reduced_pca = reduce_dimensionality(n, x_train, x_test, 'PCA') x_reduced_rp, test_reduced_rp = reduce_dimensionality(n, x_train, x_test, 'RP') classifier = RandomForestClassifier(n_estimators=estimator, n_jobs=7) classifier.fit(x_reduced_pca, y_train) fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_pca, y_test) random_forest_results = random_forest_results.append({'estimators': estimator, 'n_components': n, 'TPR': tpr, 'TNR': tnr, 'model': 'random_forest', 'auc': area, 'f_beta': fb, 'projection': 'PCA'}, ignore_index=True) classifier = RandomForestClassifier(n_estimators=estimator, n_jobs=7) classifier.fit(x_reduced_rp, y_train) fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_rp, y_test) random_forest_results = random_forest_results.append({'estimators': estimator, 'n_components': n, 'TPR': tpr, 'TNR': tnr, 'model': 'random_forest', 'auc': area, 'f_beta': fb, 'projection': 'RP'}, ignore_index=True) classifier = RandomForestClassifier(n_estimators=estimator, n_jobs=7) classifier.fit(x_train, y_train) fb, area, tnr, tpr = supervised_evaluation(classifier, x_test, y_test) random_forest_results = random_forest_results.append({'estimators': estimator, 'n_components': x_test.shape[1], 'TPR': tpr, 'TNR': tnr, 'model': 'random_forest', 'auc': area, 'f_beta': fb, 'projection': 'None'}, ignore_index=True) return random_forest_results def ada_boost(x_train, y_train, x_test, y_test, ada_boost_results): # AdaBoost Hyper-parameters learning_rates = [0.55] dimensions = [int(i*x_test.shape[1]) for i in [1]] for n in dimensions: x_reduced_pca, test_reduced_pca = reduce_dimensionality(n, x_train, x_test, 'PCA') x_reduced_rp, test_reduced_rp = reduce_dimensionality(n, x_train, x_test, 'RP') for lr in learning_rates: classifier = AdaBoostClassifier(learning_rate=lr) classifier.fit(x_reduced_pca, y_train) fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_pca, y_test) ada_boost_results = ada_boost_results.append({'LR': lr, 'n_components': n, 'TPR': tpr, 'TNR': tnr, 'model': 'ada_boost', 'auc': area, 'f_beta': fb, 'projection': 'PCA'}, ignore_index=True) classifier = AdaBoostClassifier(learning_rate=lr) classifier.fit(x_reduced_rp, y_train) fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_rp, y_test) ada_boost_results = ada_boost_results.append({'LR': lr, 'n_components': n, 'TPR': tpr, 'TNR': tnr, 'model': 'ada_boost', 'auc': area, 'f_beta': fb, 'projection': 'RP'}, ignore_index=True) return ada_boost_results def svm_classifier(x_train, y_train, x_test, y_test, svm_results): # SVC Hyper-parameters dimensions = [int(i*x_test.shape[1]) for i in [1]] for n in dimensions: x_reduced_pca, test_reduced_pca = reduce_dimensionality(n, x_train, x_test, 'PCA') x_reduced_rp, test_reduced_rp = reduce_dimensionality(n, x_train, x_test, 'RP') classifier = svm.SVC(gamma='auto', cache_size=7000) classifier.fit(x_reduced_pca, y_train) fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_pca, y_test) svm_results = svm_results.append({ 'n_components': n, 'TPR': tpr, 'TNR': tnr, 'model': 'svm', 'auc': area, 'f_beta': fb, 'projection': 'PCA'}, ignore_index=True) classifier = svm.SVC(gamma='auto', cache_size=7000) classifier.fit(x_reduced_rp, y_train) fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_rp, y_test) svm_results = svm_results.append({ 'n_components': n, 'TPR': tpr, 'TNR': tnr, 'model': 'svm', 'auc': area, 'f_beta': fb, 'projection': 'RP'}, ignore_index=True) return svm_results def xg_boost(x_train, y_train, x_test, y_test, xg_boost_results): # XGBoost Hyper-parameters dimensions = [int(i*x_test.shape[1]) for i in [1]] for n in dimensions: x_reduced_pca, test_reduced_pca = reduce_dimensionality(n, x_train, x_test, 'PCA') x_reduced_rp, test_reduced_rp = reduce_dimensionality(n, x_train, x_test, 'RP') classifier = xgb.XGBClassifier() grid = {'max_depth': 10} classifier.set_params(**grid) classifier.fit(x_reduced_pca, y_train) fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_pca, y_test) xg_boost_results = xg_boost_results.append({ 'n_components': n, 'TPR': tpr, 'TNR': tnr, 'model': 'xgboost', 'auc': area, 'f_beta': fb, 'projection': 'PCA'}, ignore_index=True) classifier = xgb.XGBClassifier() grid = {'max_depth': 10} classifier.set_params(**grid) classifier.fit(x_reduced_rp, y_train) fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_rp, y_test) xg_boost_results = xg_boost_results.append({ 'n_components': n, 'TPR': tpr, 'TNR': tnr, 'model': 'xgboost', 'auc': area, 'f_beta': fb, 'projection': 'RP'}, ignore_index=True) classifier = xgb.XGBClassifier() grid = {'max_depth': 10} classifier.set_params(**grid) classifier.fit(x_train, y_train) fb, area, tnr, tpr = supervised_evaluation(classifier, x_test, y_test) xg_boost_results = xg_boost_results.append({ 'n_components': x_test.shape[1], 'TPR': tpr, 'TNR': tnr, 'model': 'xgboost', 'auc': area, 'f_beta': fb, 'projection': 'None'}, ignore_index=True) return xg_boost_results def supervised_evaluation(classifier, x_test, y_test, beta=20, nn=False): if not nn: y_pred = classifier.predict(x_test) confusion_matrix(y_test, y_pred) fpr, tpr, _ = roc_curve(y_test, y_pred) fb = fbeta_score(y_test, y_pred, beta=beta, pos_label=1) area = auc(fpr, tpr) tpr = tpr[1] tnr = 1 - fpr[1] return fb, area, tnr, tpr def plot_roc(classifier, test, attacks, title): y_pred_test = classifier.predict(test) y_pred_outliers = classifier.predict(attacks) fpr, tpr, _ = roc_curve(np.concatenate([np.ones(y_pred_test.shape[0]), -1*np.ones(y_pred_outliers.shape[0])]), np.concatenate([y_pred_test, y_pred_outliers]), pos_label=1) roc_auc = auc(fpr, tpr) plt.figure() lw = 2 plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic: {}'.format(title)) plt.legend(loc='lower right') plt.show() def plot_roc_supervised(classifier, x_test, y_test, title, nn=False): y_pred = classifier.predict(x_test) fpr, tpr, _ = roc_curve(y_test, y_pred) if nn: y_pred = [round(x[0]) for x in y_pred] print(confusion_matrix(y_test, y_pred)) roc_auc = auc(fpr, tpr) plt.figure() lw = 2 plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic {}'.format(title)) plt.legend(loc='lower right') plt.show() def plot_history(network_history, title): plt.figure(figsize=(10, 5)) plt.title(title) plt.xlabel('Epochs') plt.ylabel('Loss') plt.semilogy(network_history.history['loss']) plt.semilogy(network_history.history['val_loss']) plt.legend(['Training', 'Validation']) plt.show() def plot_history_with_acc(network_history, title='Loss and Accuracy'): plt.figure(figsize=(15, 10)) plt.subplot(211) plt.title(title) plt.xlabel('Epochs') plt.ylabel('Loss') plt.semilogy(network_history.history['loss']) plt.semilogy(network_history.history['val_loss']) plt.legend(['Training', 'Validation']) plt.subplot(212) plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.plot(network_history.history['acc']) plt.plot(network_history.history['val_acc']) plt.legend(['Training', 'Validation'], loc='lower right') plt.show() def reduce_dimensionality(n_components, train, test, method, attack=None): if method == 'PCA': matrix = PCA(n_components=n_components) elif method == 'RP': matrix = random_projection.SparseRandomProjection(n_components=n_components, random_state=7) else: print('unknown projection method, choose either RP or PCA') return None train = matrix.fit_transform(train) test = matrix.transform(test) if attack is None: return train, test attack = matrix.transform(attack) return train, test, attack
44.83559
122
0.519586
2,497
23,180
4.56628
0.098919
0.018856
0.017541
0.017892
0.733117
0.695492
0.661989
0.651903
0.635327
0.594983
0
0.016543
0.379336
23,180
516
123
44.922481
0.775978
0.009922
0
0.611399
0
0
0.07528
0
0
0
0
0
0
1
0.03886
false
0
0.041451
0
0.11399
0.012953
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07a0528abcb6666cfcae1ee1c6cb300b86de98df
1,896
py
Python
tests/test_notifo_message.py
mrtazz/notifo.py
26079db3b40c26661155af20a9f16a0eca06dbde
[ "MIT" ]
3
2015-11-05T11:49:34.000Z
2016-07-17T18:06:15.000Z
tests/test_notifo_message.py
mrtazz/notifo.py
26079db3b40c26661155af20a9f16a0eca06dbde
[ "MIT" ]
null
null
null
tests/test_notifo_message.py
mrtazz/notifo.py
26079db3b40c26661155af20a9f16a0eca06dbde
[ "MIT" ]
null
null
null
# encoding: utf-8 import unittest import os import sys sys.path.append(os.getcwd()) from notifo import Notifo, send_message class TestNotifyUser(unittest.TestCase): def setUp(self): self.provider = "test_provider" self.provider_banned = "test_provider_msg_banned" self.user = "test_user" self.sender = "test_user2" self.banned = "test_user_banned" self.banned_token = "x128302fd34a60bf7e5670d003d858e6fb06ce6bf" self.sender_token = "x633a05b18f7f65bf461ffb3900c6eb70eaafb0ed" self.provider_token = "74515bc044df6594fbdb761b12a42f8028e14588" self.provider_banned_token = "e34e447385fb4ff9084204cba19731d29c2afd78" self.user_token = "xbb8b3cba22a5f3d64fd404a07e84cdbb0c3566e5" def test_message(self): res = send_message(self.sender, self.sender_token, to=self.user, msg="foo test") self.assertEqual(2201, res["response_code"]) def test_message_with_object(self): res = Notifo(self.sender, self.sender_token).send_message( to=self.user, msg="foo test") self.assertEqual(2201, res["response_code"]) def test_message_banned(self): res = send_message(self.banned, self.banned_token, to=self.user, msg="foo test") self.assertEqual(403, res["response_code"]) def test_message_provider(self): res = send_message(self.provider, self.provider_token, to=self.user, msg="foo test") self.assertEqual(2201, res["response_code"]) def test_message_provider_banned(self): res = send_message(self.provider_banned, self.provider_banned_token, to=self.user, msg="foo test") self.assertEqual(403, res["response_code"]) if __name__ == '__main__': unittest.main()
38.693878
79
0.655591
206
1,896
5.781553
0.223301
0.080605
0.058774
0.054576
0.444165
0.391268
0.312343
0.287154
0.287154
0.287154
0
0.098532
0.245253
1,896
48
80
39.5
0.733753
0.007911
0
0.25641
0
0
0.206493
0.120809
0
0
0
0
0.128205
1
0.153846
false
0
0.102564
0
0.282051
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07a14721d51acde99d2ac0e4f6c8f439015ceb27
2,759
py
Python
hebsafeharbor/identifier/signals/lexicon_based_recognizer.py
dkarmon/HebSafeHarbor
fdad7481c74feb78f8c3265c327eae7712cf16ce
[ "MIT" ]
3
2022-03-10T14:41:54.000Z
2022-03-28T11:18:56.000Z
hebsafeharbor/identifier/signals/lexicon_based_recognizer.py
8400TheHealthNetwork/HebSafeHarbor
f618ca76d995e6fb74c0cb2f01478c7efd2d8836
[ "MIT" ]
2
2022-03-06T10:39:27.000Z
2022-03-07T12:42:13.000Z
hebsafeharbor/identifier/signals/lexicon_based_recognizer.py
dkarmon/HebSafeHarbor
fdad7481c74feb78f8c3265c327eae7712cf16ce
[ "MIT" ]
3
2022-02-15T09:50:08.000Z
2022-02-22T08:43:26.000Z
from typing import List from presidio_analyzer import EntityRecognizer, RecognizerResult, AnalysisExplanation from presidio_analyzer.nlp_engine import NlpArtifacts from hebsafeharbor.common.terms_recognizer import TermsRecognizer class LexiconBasedRecognizer(EntityRecognizer): """ A class which extends the EntityRecognizer (@Presidio) and recognize entities based on a lexicon """ DEFAULT_CONFIDENCE_LEVEL = 0.7 # expected confidence level for this recognizer def __init__(self, name: str, supported_entity: str, phrase_list: List[str], supported_language: str = "he", allowed_prepositions: List[str] = None): """ Initializes Hebrew LexiconBasedRecognizer :param name: recognizer's name :param supported_entity: entity type to be associated with the entities recognized by the lexicon based recognizer :param phrase_list: lexicon's phrases :param supported_language: the language that the recognizer supports. Hebrew is the default :param allowed_prepositions: prepositions that allowed to be recognized as part of the entity (in addition to the lexicon phrase itself). Empty list (which means prepositions are not allowed) is the default """ super().__init__(name=name, supported_entities=[supported_entity], supported_language=supported_language) self.terms_recognizer = TermsRecognizer(phrase_list) self.allowed_prepositions = allowed_prepositions if allowed_prepositions else [] def load(self) -> None: """No loading is required.""" pass def analyze( self, text: str, entities: List[str], nlp_artifacts: NlpArtifacts ) -> List[RecognizerResult]: """ Recognize entities based on lexicon :param text: text for recognition :param entities: supported entities :param nlp_artifacts: artifacts of the nlp engine :return list of entities recognized based on the lexicon """ results = [] terms_offsets = self.terms_recognizer(text, prefixes=self.allowed_prepositions) # Iterate over the Automaton offsets and create Recognizer result for each of them for start_offset, length in terms_offsets: result = RecognizerResult( entity_type=self.supported_entities[0], start=start_offset, end=start_offset + length, score=self.DEFAULT_CONFIDENCE_LEVEL, analysis_explanation=AnalysisExplanation(self.name, self.DEFAULT_CONFIDENCE_LEVEL), recognition_metadata={RecognizerResult.RECOGNIZER_NAME_KEY: self.name} ) results.append(result) return results
43.793651
117
0.697717
303
2,759
6.188119
0.356436
0.0608
0.0352
0.0256
0
0
0
0
0
0
0
0.001432
0.240667
2,759
62
118
44.5
0.893556
0.357738
0
0
0
0
0.001238
0
0
0
0
0
0
1
0.103448
false
0.034483
0.137931
0
0.344828
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07a42ef8c15bd224503fc4a06fa31dab2756c605
35,966
py
Python
my_plugins/YouCompleteMe/third_party/ycmd/ycmd/tests/clangd/subcommands_test.py
cyx233/vim_config
f09c9206344c17df20a05dd2c08a02f098a7e873
[ "MIT" ]
null
null
null
my_plugins/YouCompleteMe/third_party/ycmd/ycmd/tests/clangd/subcommands_test.py
cyx233/vim_config
f09c9206344c17df20a05dd2c08a02f098a7e873
[ "MIT" ]
null
null
null
my_plugins/YouCompleteMe/third_party/ycmd/ycmd/tests/clangd/subcommands_test.py
cyx233/vim_config
f09c9206344c17df20a05dd2c08a02f098a7e873
[ "MIT" ]
null
null
null
# encoding: utf-8 # # Copyright (C) 2018 ycmd contributors # # This file is part of ycmd. # # ycmd is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ycmd is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with ycmd. If not, see <http://www.gnu.org/licenses/>. from __future__ import absolute_import from __future__ import unicode_literals from __future__ import print_function from __future__ import division from hamcrest.core.base_matcher import BaseMatcher from hamcrest import ( assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp ) from pprint import pprint import requests import os.path from ycmd.tests.clangd import ( IsolatedYcmd, SharedYcmd, PathToTestFile, RunAfterInitialized ) from ycmd.tests.test_utils import ( BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady ) from ycmd.utils import ReadFile # This test is isolated to trigger objcpp hooks, rather than fetching completer # from cache. @IsolatedYcmd() def Subcommands_DefinedSubcommands_test( app ): file_path = PathToTestFile( 'GoTo_Clang_ZeroBasedLineAndColumn_test.cc' ) RunAfterInitialized( app, { 'request': { 'completer_target': 'filetype_default', 'line_num': 10, 'column_num': 3, 'filetype': 'objcpp', 'filepath': file_path }, 'expect': { 'response': requests.codes.ok, 'data': contains( *sorted( [ 'ExecuteCommand', 'FixIt', 'Format', 'GetDoc', 'GetDocImprecise', 'GetType', 'GetTypeImprecise', 'GoTo', 'GoToDeclaration', 'GoToDefinition', 'GoToImprecise', 'GoToInclude', 'GoToReferences', 'RefactorRename', 'RestartServer' ] ) ) }, 'route': '/defined_subcommands', } ) @SharedYcmd def Subcommands_GoTo_ZeroBasedLineAndColumn_test( app ): file_path = PathToTestFile( 'GoTo_Clang_ZeroBasedLineAndColumn_test.cc' ) RunAfterInitialized( app, { 'request': { 'contents': ReadFile( file_path ), 'completer_target': 'filetype_default', 'command_arguments': [ 'GoToDefinition' ], 'line_num': 10, 'column_num': 3, 'filetype': 'cpp', 'filepath': file_path }, 'expect': { 'response': requests.codes.ok, 'data': { 'filepath': os.path.abspath( file_path ), 'line_num': 2, 'column_num': 8 } }, 'route': '/run_completer_command', } ) @SharedYcmd def RunGoToTest_all( app, folder, command, test ): filepath = PathToTestFile( folder, test[ 'req' ][ 0 ] ) common_request = { 'completer_target' : 'filetype_default', 'filepath' : filepath, 'command_arguments': [ command ], 'contents' : ReadFile( filepath ), 'filetype' : 'cpp' } request = common_request request.update( { 'line_num' : test[ 'req' ][ 1 ], 'column_num': test[ 'req' ][ 2 ], } ) response = test[ 'res' ] if isinstance( response, list ): expect = { 'response': requests.codes.ok, 'data': contains( *[ LocationMatcher( PathToTestFile( folder, os.path.normpath( location[ 0 ] ) ), location[ 1 ], location[ 2 ] ) for location in response ] ) } elif isinstance( response, tuple ): expect = { 'response': requests.codes.ok, 'data': LocationMatcher( PathToTestFile( folder, os.path.normpath( response[ 0 ] ) ), response[ 1 ], response[ 2 ] ) } else: expect = { 'response': requests.codes.internal_server_error, 'data': ErrorMatcher( RuntimeError, test[ 'res' ] ) } RunAfterInitialized( app, { 'request': request, 'route' : '/run_completer_command', 'expect' : expect } ) def Subcommands_GoTo_all_test(): tests = [ # Local::x -> definition/declaration of x { 'req': ( 'goto.cc', 23, 21 ), 'res': ( 'goto.cc', 4, 9 ) }, # Local::in_line -> definition/declaration of Local::in_line { 'req': ( 'goto.cc', 24, 26 ), 'res': ( 'goto.cc', 6, 10 ) }, # Local -> definition/declaration of Local { 'req': ( 'goto.cc', 24, 16 ), 'res': ( 'goto.cc', 2, 11 ) }, # Local::out_of_line -> definition of Local::out_of_line { 'req': ( 'goto.cc', 25, 27 ), 'res': ( 'goto.cc', 14, 13 ) }, # GoToDeclaration alternates between definition and declaration { 'req': ( 'goto.cc', 14, 13 ), 'res': ( 'goto.cc', 11, 10 ) }, { 'req': ( 'goto.cc', 11, 10 ), 'res': ( 'goto.cc', 14, 13 ) }, # test -> definition and declaration of test { 'req': ( 'goto.cc', 21, 5 ), 'res': ( 'goto.cc', 19, 5 ) }, { 'req': ( 'goto.cc', 19, 5 ), 'res': ( 'goto.cc', 21, 5 ) }, # Unicøde { 'req': ( 'goto.cc', 34, 9 ), 'res': ( 'goto.cc', 32, 26 ) }, # Another_Unicøde { 'req': ( 'goto.cc', 36, 17 ), 'res': ( 'goto.cc', 32, 54 ) }, { 'req': ( 'goto.cc', 36, 25 ), 'res': ( 'goto.cc', 32, 54 ) }, { 'req': ( 'goto.cc', 38, 3 ), 'res': ( 'goto.cc', 36, 28 ) }, # Expected failures { 'req': ( 'goto.cc', 13, 1 ), 'res': 'Cannot jump to location' }, { 'req': ( 'goto.cc', 16, 6 ), 'res': 'Cannot jump to location' }, ] for test in tests: for cmd in [ 'GoToDefinition', 'GoTo', 'GoToImprecise' ]: yield RunGoToTest_all, '', cmd, test def Subcommands_GoToDeclaration_all_test(): tests = [ # Local::x -> definition/declaration of x { 'req': ( 'goto.cc', 23, 21 ), 'res': ( 'goto.cc', 4, 9 ) }, # Local::in_line -> definition/declaration of Local::in_line { 'req': ( 'goto.cc', 24, 26 ), 'res': ( 'goto.cc', 6, 10 ) }, # Local -> definition/declaration of Local { 'req': ( 'goto.cc', 24, 16 ), 'res': ( 'goto.cc', 2, 11 ) }, # Local::out_of_line -> declaration of Local::out_of_line { 'req': ( 'goto.cc', 25, 27 ), 'res': ( 'goto.cc', 11, 10 ) }, # GoToDeclaration alternates between definition and declaration { 'req': ( 'goto.cc', 14, 13 ), 'res': ( 'goto.cc', 11, 10 ) }, { 'req': ( 'goto.cc', 11, 10 ), 'res': ( 'goto.cc', 14, 13 ) }, # test -> definition and declaration of test { 'req': ( 'goto.cc', 21, 5 ), 'res': ( 'goto.cc', 19, 5 ) }, { 'req': ( 'goto.cc', 19, 5 ), 'res': ( 'goto.cc', 21, 5 ) }, # Unicøde { 'req': ( 'goto.cc', 34, 9 ), 'res': ( 'goto.cc', 32, 26 ) }, # Another_Unicøde { 'req': ( 'goto.cc', 36, 17 ), 'res': ( 'goto.cc', 32, 54 ) }, { 'req': ( 'goto.cc', 36, 25 ), 'res': ( 'goto.cc', 32, 54 ) }, { 'req': ( 'goto.cc', 38, 3 ), 'res': ( 'goto.cc', 36, 28 ) }, # Expected failures { 'req': ( 'goto.cc', 13, 1 ), 'res': 'Cannot jump to location' }, { 'req': ( 'goto.cc', 16, 6 ), 'res': 'Cannot jump to location' }, ] for test in tests: yield RunGoToTest_all, '', 'GoToDeclaration', test def Subcommands_GoToInclude_test(): tests = [ { 'req': ( 'main.cpp', 1, 6 ), 'res': ( 'a.hpp', 1, 1 ) }, { 'req': ( 'main.cpp', 2, 14 ), 'res': ( 'system/a.hpp', 1, 1 ) }, { 'req': ( 'main.cpp', 3, 1 ), 'res': ( 'quote/b.hpp', 1, 1 ) }, # FIXME: should fail since b.hpp is included with angled brackets but its # folder is added with -iquote. { 'req': ( 'main.cpp', 4, 10 ), 'res': ( 'quote/b.hpp', 1, 1 ) }, { 'req': ( 'main.cpp', 5, 11 ), 'res': ( 'system/c.hpp', 1, 1 ) }, { 'req': ( 'main.cpp', 6, 11 ), 'res': ( 'system/c.hpp', 1, 1 ) }, # Expected failures { 'req': ( 'main.cpp', 7, 1 ), 'res': 'Cannot jump to location' }, { 'req': ( 'main.cpp', 10, 13 ), 'res': 'Cannot jump to location' }, ] for test in tests: for cmd in [ 'GoToInclude', 'GoTo', 'GoToImprecise' ]: yield RunGoToTest_all, 'test-include', cmd, test def Subcommands_GoToReferences_test(): tests = [ # Function { 'req': ( 'goto.cc', 14, 21 ), 'res': [ ( 'goto.cc', 11, 10 ), ( 'goto.cc', 14, 13 ), ( 'goto.cc', 25, 22 ) ] }, # Namespace { 'req': ( 'goto.cc', 24, 17 ), 'res': [ ( 'goto.cc', 2, 11 ), ( 'goto.cc', 14, 6 ), ( 'goto.cc', 23, 14 ), ( 'goto.cc', 24, 15 ), ( 'goto.cc', 25, 15 ) ] }, # Expected failure { 'req': ( 'goto.cc', 27, 8 ), 'res': 'Cannot jump to location' }, ] for test in tests: yield RunGoToTest_all, '', 'GoToReferences', test @SharedYcmd def RunGetSemanticTest( app, filepath, filetype, test, command, response = requests.codes.ok ): contents = ReadFile( filepath ) common_args = { 'completer_target' : 'filetype_default', 'command_arguments': command, 'line_num' : 10, 'column_num' : 3, 'filepath' : filepath, 'contents' : contents, 'filetype' : filetype } args = test[ 0 ] if response == requests.codes.ok: if not isinstance( test[ 1 ], BaseMatcher ): expected = has_entry( 'message', contains_string( test[ 1 ] ) ) else: expected = has_entry( 'message', test[ 1 ] ) else: expected = test[ 1 ] request = common_args request.update( args ) test = { 'request': request, 'route': '/run_completer_command', 'expect': { 'response': response, 'data': expected } } RunAfterInitialized( app, test ) def Subcommands_GetType_test(): tests = [ # Basic pod types [ { 'line_num': 24, 'column_num': 3 }, 'Foo' ], # [ { 'line_num': 12, 'column_num': 2 }, 'Foo' ], [ { 'line_num': 12, 'column_num': 8 }, 'Foo' ], [ { 'line_num': 12, 'column_num': 9 }, 'Foo' ], [ { 'line_num': 12, 'column_num': 10 }, 'Foo' ], # [ { 'line_num': 13, 'column_num': 3 }, 'int' ], [ { 'line_num': 13, 'column_num': 7 }, 'int' ], # [ { 'line_num': 15, 'column_num': 7 }, 'char' ], # Function # [ { 'line_num': 22, 'column_num': 2 }, 'int main()' ], [ { 'line_num': 22, 'column_num': 6 }, 'int main()' ], # Declared and canonical type # On Ns:: [ { 'line_num': 25, 'column_num': 3 }, 'namespace Ns' ], # On Type (Type) # [ { 'line_num': 25, 'column_num': 8 }, # 'Ns::Type => Ns::BasicType<char>' ], # On "a" (Ns::Type) # [ { 'line_num': 25, 'column_num': 15 }, # 'Ns::Type => Ns::BasicType<char>' ], # [ { 'line_num': 26, 'column_num': 13 }, # 'Ns::Type => Ns::BasicType<char>' ], # Cursor on decl for refs & pointers [ { 'line_num': 39, 'column_num': 3 }, 'Foo' ], [ { 'line_num': 39, 'column_num': 11 }, 'Foo &' ], [ { 'line_num': 39, 'column_num': 15 }, 'Foo' ], [ { 'line_num': 40, 'column_num': 3 }, 'Foo' ], [ { 'line_num': 40, 'column_num': 11 }, 'Foo *' ], [ { 'line_num': 40, 'column_num': 18 }, 'Foo' ], # [ { 'line_num': 42, 'column_num': 3 }, 'const Foo &' ], [ { 'line_num': 42, 'column_num': 16 }, 'const struct Foo &' ], # [ { 'line_num': 43, 'column_num': 3 }, 'const Foo *' ], [ { 'line_num': 43, 'column_num': 16 }, 'const struct Foo *' ], # Cursor on usage [ { 'line_num': 45, 'column_num': 13 }, 'const struct Foo' ], # [ { 'line_num': 45, 'column_num': 19 }, 'const int' ], [ { 'line_num': 46, 'column_num': 13 }, 'const struct Foo *' ], # [ { 'line_num': 46, 'column_num': 20 }, 'const int' ], [ { 'line_num': 47, 'column_num': 12 }, 'Foo' ], [ { 'line_num': 47, 'column_num': 17 }, 'int' ], [ { 'line_num': 48, 'column_num': 12 }, 'Foo *' ], [ { 'line_num': 48, 'column_num': 18 }, 'int' ], # Auto in declaration # [ { 'line_num': 28, 'column_num': 3 }, 'struct Foo &' ], # [ { 'line_num': 28, 'column_num': 11 }, 'struct Foo &' ], [ { 'line_num': 28, 'column_num': 18 }, 'struct Foo' ], # [ { 'line_num': 29, 'column_num': 3 }, 'Foo *' ], # [ { 'line_num': 29, 'column_num': 11 }, 'Foo *' ], [ { 'line_num': 29, 'column_num': 18 }, 'Foo' ], # [ { 'line_num': 31, 'column_num': 3 }, 'const Foo &' ], # [ { 'line_num': 31, 'column_num': 16 }, 'const Foo &' ], # [ { 'line_num': 32, 'column_num': 3 }, 'const Foo *' ], # [ { 'line_num': 32, 'column_num': 16 }, 'const Foo *' ], # Auto in usage # [ { 'line_num': 34, 'column_num': 14 }, 'const Foo' ], # [ { 'line_num': 34, 'column_num': 21 }, 'const int' ], # [ { 'line_num': 35, 'column_num': 14 }, 'const Foo *' ], # [ { 'line_num': 35, 'column_num': 22 }, 'const int' ], [ { 'line_num': 36, 'column_num': 13 }, 'Foo' ], [ { 'line_num': 36, 'column_num': 19 }, 'int' ], # [ { 'line_num': 37, 'column_num': 13 }, 'Foo *' ], [ { 'line_num': 37, 'column_num': 20 }, 'int' ], # Unicode [ { 'line_num': 51, 'column_num': 13 }, 'Unicøde *' ], # Bound methods # On Win32, methods pick up an __attribute__((thiscall)) to annotate their # calling convention. This shows up in the type, which isn't ideal, but # also prohibitively complex to try and strip out. [ { 'line_num': 53, 'column_num': 15 }, matches_regexp( r'int bar\(int i\)(?: __attribute__\(\(thiscall\)\))?' ) ], [ { 'line_num': 54, 'column_num': 18 }, matches_regexp( r'int bar\(int i\)(?: __attribute__\(\(thiscall\)\))?' ) ], ] for subcommand in [ 'GetType', 'GetTypeImprecise' ]: for test in tests: yield ( RunGetSemanticTest, PathToTestFile( 'GetType_Clang_test.cc' ), 'cpp', test, [ subcommand ] ) def Subcommands_GetDoc_test(): tests = [ # from local file [ { 'line_num': 5, 'column_num': 10 }, 'docstring', requests.codes.ok ], # from header [ { 'line_num': 6, 'column_num': 10 }, 'docstring', requests.codes.ok ], # no docstring [ { 'line_num': 7, 'column_num': 7 }, 'int x = 3', requests.codes.ok ], # no hover [ { 'line_num': 8, 'column_num': 1 }, ErrorMatcher( RuntimeError, 'No hover information.' ), requests.codes.server_error ] ] for subcommand in [ 'GetDoc', 'GetDocImprecise' ]: for test in tests: yield ( RunGetSemanticTest, PathToTestFile( 'GetDoc_Clang_test.cc' ), 'cpp', test, [ subcommand ], test[ 2 ] ) @SharedYcmd def RunFixItTest( app, line, column, lang, file_path, check ): contents = ReadFile( file_path ) language_options = { 'cpp11': { 'filetype' : 'cpp', }, 'cuda': { 'filetype' : 'cuda', }, 'objective-c': { 'filetype' : 'objc', }, } args = { 'completer_target' : 'filetype_default', 'contents' : contents, 'filepath' : file_path, 'command_arguments': [ 'FixIt' ], 'line_num' : line, 'column_num' : column, } args.update( language_options[ lang ] ) test = { 'request': args, 'route': '/detailed_diagnostic' } # First get diags. diags = RunAfterInitialized( app, test ) while 'message' in diags and 'diagnostics' in diags[ 'message' ].lower(): receive_diags = { 'request': args, 'route': '/receive_messages' } RunAfterInitialized( app, receive_diags ) diags = RunAfterInitialized( app, test ) results = app.post_json( '/run_completer_command', BuildRequest( **args ) ).json pprint( results ) check( results ) def FixIt_Check_cpp11_Ins( results ): # First fixit # switch(A()) { // expected-error{{explicit conversion to}} assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( 'static_cast<int>(' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 16, 'column_num': 10 } ), 'end' : has_entries( { 'line_num': 16, 'column_num': 10 } ), } ), } ), has_entries( { 'replacement_text': equal_to( ')' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 16, 'column_num': 13 } ), 'end' : has_entries( { 'line_num': 16, 'column_num': 13 } ), } ), } ) ), 'location': has_entries( { 'line_num': 16, 'column_num': 0 } ) } ) ) } ) ) def FixIt_Check_cpp11_InsMultiLine( results ): # Similar to FixIt_Check_cpp11_1 but inserts split across lines # assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( 'static_cast<int>(' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 26, 'column_num': 7 } ), 'end' : has_entries( { 'line_num': 26, 'column_num': 7 } ), } ), } ), has_entries( { 'replacement_text': equal_to( ')' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 28, 'column_num': 2 } ), 'end' : has_entries( { 'line_num': 28, 'column_num': 2 } ), } ), } ) ), 'location': has_entries( { 'line_num': 25, 'column_num': 14 } ) } ) ) } ) ) def FixIt_Check_cpp11_Del( results ): # Removal of :: assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( '' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 35, 'column_num': 7 } ), 'end' : has_entries( { 'line_num': 35, 'column_num': 9 } ), } ), } ) ), 'location': has_entries( { 'line_num': 35, 'column_num': 7 } ) } ) ) } ) ) def FixIt_Check_cpp11_Repl( results ): assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( 'foo' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 40, 'column_num': 6 } ), 'end' : has_entries( { 'line_num': 40, 'column_num': 9 } ), } ), } ) ), 'location': has_entries( { 'line_num': 40, 'column_num': 6 } ) } ) ) } ) ) def FixIt_Check_cpp11_DelAdd( results ): assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( '' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 48, 'column_num': 3 } ), 'end' : has_entries( { 'line_num': 48, 'column_num': 4 } ), } ), } ), has_entries( { 'replacement_text': equal_to( '~' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 48, 'column_num': 9 } ), 'end' : has_entries( { 'line_num': 48, 'column_num': 9 } ), } ), } ), ), 'location': has_entries( { 'line_num': 48, 'column_num': 3 } ) } ), has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( '= default;' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 48, 'column_num': 15 } ), 'end' : has_entries( { 'line_num': 48, 'column_num': 17 } ), } ), } ), ), 'location': has_entries( { 'line_num': 48, 'column_num': 3 } ) } ), ) } ) ) def FixIt_Check_objc( results ): assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( 'id' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 5, 'column_num': 3 } ), 'end' : has_entries( { 'line_num': 5, 'column_num': 3 } ), } ), } ) ), 'location': has_entries( { 'line_num': 5, 'column_num': 3 } ) } ) ) } ) ) def FixIt_Check_objc_NoFixIt( results ): # and finally, a warning with no fixits assert_that( results, equal_to( { 'fixits': [] } ) ) def FixIt_Check_cpp11_MultiFirst( results ): assert_that( results, has_entries( { 'fixits': contains( # first fix-it at 54,16 has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( 'foo' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 54, 'column_num': 16 } ), 'end' : has_entries( { 'line_num': 54, 'column_num': 19 } ), } ), } ) ), 'location': has_entries( { 'line_num': 54, 'column_num': 15 } ) } ), # second fix-it at 54,52 has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( '' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 54, 'column_num': 52 } ), 'end' : has_entries( { 'line_num': 54, 'column_num': 53 } ), } ), } ), has_entries( { 'replacement_text': equal_to( '~' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 54, 'column_num': 58 } ), 'end' : has_entries( { 'line_num': 54, 'column_num': 58 } ), } ), } ), ), 'location': has_entries( { 'line_num': 54, 'column_num': 15 } ) } ), has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( '= default;' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 54, 'column_num': 64 } ), 'end' : has_entries( { 'line_num': 54, 'column_num': 67 } ), } ), } ) ), 'location': has_entries( { 'line_num': 54, 'column_num': 15 } ) } ), ) } ) ) def FixIt_Check_cpp11_MultiSecond( results ): assert_that( results, has_entries( { 'fixits': contains( # first fix-it at 54,16 has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( 'foo' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 54, 'column_num': 16 } ), 'end' : has_entries( { 'line_num': 54, 'column_num': 19 } ), } ), } ) ), 'location': has_entries( { 'line_num': 54, 'column_num': 51 } ) } ), # second fix-it at 54,52 has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( '' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 54, 'column_num': 52 } ), 'end' : has_entries( { 'line_num': 54, 'column_num': 53 } ), } ), } ), has_entries( { 'replacement_text': equal_to( '~' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 54, 'column_num': 58 } ), 'end' : has_entries( { 'line_num': 54, 'column_num': 58 } ), } ), } ), ), 'location': has_entries( { 'line_num': 54, 'column_num': 51 } ) } ), has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( '= default;' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 54, 'column_num': 64 } ), 'end' : has_entries( { 'line_num': 54, 'column_num': 67 } ), } ), } ) ), 'location': has_entries( { 'line_num': 54, 'column_num': 51 } ) } ), ) } ) ) def FixIt_Check_unicode_Ins( results ): assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'chunks': contains( has_entries( { 'replacement_text': equal_to( '=' ), 'range': has_entries( { 'start': has_entries( { 'line_num': 21, 'column_num': 9 } ), 'end' : has_entries( { 'line_num': 21, 'column_num': 11 } ), } ), } ) ), 'location': has_entries( { 'line_num': 21, 'column_num': 16 } ) } ) ) } ) ) def FixIt_Check_cpp11_Note( results ): assert_that( results, has_entries( { 'fixits': contains( # First note: put parens around it has_entries( { 'text': contains_string( 'parentheses around the assignment' ), 'chunks': contains( ChunkMatcher( '(', LineColMatcher( 59, 8 ), LineColMatcher( 59, 8 ) ), ChunkMatcher( ')', LineColMatcher( 61, 12 ), LineColMatcher( 61, 12 ) ) ), 'location': LineColMatcher( 60, 1 ), } ), # Second note: change to == has_entries( { 'text': contains_string( '==' ), 'chunks': contains( ChunkMatcher( '==', LineColMatcher( 60, 8 ), LineColMatcher( 60, 9 ) ) ), 'location': LineColMatcher( 60, 1 ), } ), # Unresolved, requires /resolve_fixit request has_entries( { 'text': 'Extract subexpression to variable', 'resolve': True, 'command': has_entries( { 'command': 'clangd.applyTweak' } ) } ) ) } ) ) def FixIt_Check_cpp11_SpellCheck( results ): assert_that( results, has_entries( { 'fixits': contains( # Change to SpellingIsNotMyStrongPoint has_entries( { 'text': contains_string( "change 'SpellingIsNotMyStringPiont' to " "'SpellingIsNotMyStrongPoint'" ), 'chunks': contains( ChunkMatcher( 'SpellingIsNotMyStrongPoint', LineColMatcher( 72, 9 ), LineColMatcher( 72, 35 ) ) ), 'location': LineColMatcher( 72, 9 ), } ) ) } ) ) def FixIt_Check_cuda( results ): assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'text': contains_string( "change 'int' to 'void'" ), 'chunks': contains( ChunkMatcher( 'void', LineColMatcher( 3, 12 ), LineColMatcher( 3, 15 ) ) ), 'location': LineColMatcher( 3, 12 ), } ) ) } ) ) def FixIt_Check_SubexprExtract_Resolved( results ): assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'text': 'Extract subexpression to variable', 'chunks': contains( ChunkMatcher( 'auto dummy = foo(i + 3);\n ', LineColMatcher( 84, 3 ), LineColMatcher( 84, 3 ) ), ChunkMatcher( 'dummy', LineColMatcher( 84, 10 ), LineColMatcher( 84, 22 ) ), ) } ) ) } ) ) def FixIt_Check_RawStringReplace_Resolved( results ): assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'text': 'Convert to raw string', 'chunks': contains( ChunkMatcher( 'R"(\\\\r\\asd\n\\v)"', LineColMatcher( 80, 19 ), LineColMatcher( 80, 36 ) ), ) } ) ) } ) ) def FixIt_Check_MacroExpand_Resolved( results ): assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'text': "Expand macro 'DECLARE_INT'", 'chunks': contains( ChunkMatcher( 'int i', LineColMatcher( 83, 3 ), LineColMatcher( 83, 17 ) ), ) } ) ) } ) ) def FixIt_Check_AutoExpand_Resolved( results ): assert_that( results, has_entries( { 'fixits': contains( has_entries( { 'text': "Expand auto type", 'chunks': contains( ChunkMatcher( 'const char *', LineColMatcher( 80, 1 ), LineColMatcher( 80, 6 ) ), ) } ) ) } ) ) def Subcommands_FixIt_all_test(): cfile = PathToTestFile( 'FixIt_Clang_cpp11.cpp' ) mfile = PathToTestFile( 'objc', 'FixIt_Clang_objc.m' ) cufile = PathToTestFile( 'cuda', 'fixit_test.cu' ) ufile = PathToTestFile( 'unicode.cc' ) tests = [ # L # i C # n o # e l Lang File, Checker [ 16, 0, 'cpp11', cfile, FixIt_Check_cpp11_Ins ], [ 25, 14, 'cpp11', cfile, FixIt_Check_cpp11_InsMultiLine ], [ 35, 7, 'cpp11', cfile, FixIt_Check_cpp11_Del ], [ 40, 6, 'cpp11', cfile, FixIt_Check_cpp11_Repl ], [ 48, 3, 'cpp11', cfile, FixIt_Check_cpp11_DelAdd ], [ 5, 3, 'objective-c', mfile, FixIt_Check_objc ], [ 7, 1, 'objective-c', mfile, FixIt_Check_objc_NoFixIt ], [ 3, 12, 'cuda', cufile, FixIt_Check_cuda ], # multiple errors on a single line; both with fixits [ 54, 15, 'cpp11', cfile, FixIt_Check_cpp11_MultiFirst ], # should put closest fix-it first? [ 54, 51, 'cpp11', cfile, FixIt_Check_cpp11_MultiSecond ], # unicode in line for fixit [ 21, 16, 'cpp11', ufile, FixIt_Check_unicode_Ins ], # FixIt attached to a "child" diagnostic (i.e. a Note) [ 60, 1, 'cpp11', cfile, FixIt_Check_cpp11_Note ], # FixIt due to forced spell checking [ 72, 9, 'cpp11', cfile, FixIt_Check_cpp11_SpellCheck ], ] for test in tests: yield RunFixItTest, test[ 0 ], test[ 1 ], test[ 2 ], test[ 3 ], test[ 4 ] @WithRetry @SharedYcmd def RunRangedFixItTest( app, rng, expected ): contents = ReadFile( PathToTestFile( 'FixIt_Clang_cpp11.cpp' ) ) args = { 'completer_target' : 'filetype_default', 'contents' : contents, 'filepath' : PathToTestFile( 'FixIt_Clang_cpp11.cpp' ), 'command_arguments': [ 'FixIt' ], 'range' : rng, 'filetype' : 'cpp' } app.post_json( '/event_notification', CombineRequest( args, { 'event_name': 'FileReadyToParse', } ), expect_errors = True ) WaitUntilCompleterServerReady( app, 'cpp' ) response = app.post_json( '/run_completer_command', BuildRequest( **args ) ).json args[ 'fixit' ] = response[ 'fixits' ][ 0 ] response = app.post_json( '/resolve_fixit', BuildRequest( **args ) ).json print( 'Resolved fixit response = ' ) print( response ) expected( response ) def Subcommands_FixIt_Ranged_test(): expand_auto_range = { 'start': { 'line_num': 80, 'column_num': 1 }, 'end': { 'line_num': 80, 'column_num': 4 }, } subexpression_extract_range = { 'start': { 'line_num': 84, 'column_num': 14 }, 'end': { 'line_num': 84, 'column_num': 20 }, } macro_expand_range = { 'start': { 'line_num': 83, 'column_num': 3 }, 'end': { 'line_num': 83, 'column_num': 13 }, } raw_string_range = { 'start': { 'line_num': 80, 'column_num': 19 }, 'end': { 'line_num': 80, 'column_num': 35 }, } tests = [ [ expand_auto_range, FixIt_Check_AutoExpand_Resolved ], [ macro_expand_range, FixIt_Check_MacroExpand_Resolved ], [ subexpression_extract_range, FixIt_Check_SubexprExtract_Resolved ], [ raw_string_range, FixIt_Check_RawStringReplace_Resolved ], ] for test in tests: yield RunRangedFixItTest, test[ 0 ], test[ 1 ] @WithRetry @SharedYcmd def Subcommands_FixIt_AlreadyResolved_test( app ): filename = PathToTestFile( 'FixIt_Clang_cpp11.cpp' ) request = { 'completer_target' : 'filetype_default', 'contents' : ReadFile( filename ), 'filepath' : filename, 'command_arguments': [ 'FixIt' ], 'line_num' : 16, 'column_num' : 1, 'filetype' : 'cpp' } app.post_json( '/event_notification', CombineRequest( request, { 'event_name': 'FileReadyToParse', } ), expect_errors = True ) WaitUntilCompleterServerReady( app, 'cpp' ) expected = app.post_json( '/run_completer_command', BuildRequest( **request ) ).json print( 'expected = ' ) print( expected ) request[ 'fixit' ] = expected[ 'fixits' ][ 0 ] actual = app.post_json( '/resolve_fixit', BuildRequest( **request ) ).json print( 'actual = ' ) print( actual ) assert_that( actual, equal_to( expected ) ) @SharedYcmd def Subcommands_RefactorRename_test( app ): test = { 'request': { 'filetype': 'cpp', 'completer_target': 'filetype_default', 'contents': ReadFile( PathToTestFile( 'basic.cpp' ) ), 'filepath': PathToTestFile( 'basic.cpp' ), 'command_arguments': [ 'RefactorRename', 'Bar' ], 'line_num': 17, 'column_num': 4, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'fixits': contains( has_entries( { 'chunks': contains( ChunkMatcher( 'Bar', LineColMatcher( 1, 8 ), LineColMatcher( 1, 11 ) ), ChunkMatcher( 'Bar', LineColMatcher( 9, 3 ), LineColMatcher( 9, 6 ) ), ChunkMatcher( '\n\n', LineColMatcher( 12, 2 ), LineColMatcher( 15, 1 ) ), ChunkMatcher( 'Bar', LineColMatcher( 15, 8 ), LineColMatcher( 15, 11 ) ), ChunkMatcher( ' ', LineColMatcher( 15, 46 ), LineColMatcher( 16, 1 ) ), ChunkMatcher( 'Bar', LineColMatcher( 17, 3 ), LineColMatcher( 17, 6 ) ), ChunkMatcher( '', LineColMatcher( 17, 14 ), LineColMatcher( 17, 15 ) ), ChunkMatcher( ' ', LineColMatcher( 17, 17 ), LineColMatcher( 17, 17 ) ), ChunkMatcher( ' ', LineColMatcher( 17, 19 ), LineColMatcher( 17, 19 ) ), ) } ) ) } ) }, 'route': '/run_completer_command' } RunAfterInitialized( app, test )
34.783366
79
0.508564
3,639
35,966
4.819181
0.12366
0.07584
0.041512
0.050408
0.560358
0.484575
0.41267
0.356446
0.30467
0.27211
0
0.041237
0.331813
35,966
1,033
80
34.817038
0.688499
0.113858
0
0.488372
0
0
0.20837
0.015273
0
0
0
0.000968
0.023256
1
0.040392
false
0
0.014688
0
0.05508
0.011016
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07a4651e3c80a36d3030fc52e231138dba1e43c1
1,032
py
Python
sentiment/config.py
TheRensselaerIDEA/covid19_tweet_ids
fee7d951b11cf2650e48668614c30672179ab3af
[ "MIT" ]
null
null
null
sentiment/config.py
TheRensselaerIDEA/covid19_tweet_ids
fee7d951b11cf2650e48668614c30672179ab3af
[ "MIT" ]
null
null
null
sentiment/config.py
TheRensselaerIDEA/covid19_tweet_ids
fee7d951b11cf2650e48668614c30672179ab3af
[ "MIT" ]
null
null
null
""" Config class containing all the settings for running sentiment scoring tool """ import jsonpickle class Config(object): """Container for sentiment scoring tool settings. """ def __init__(self): """Initializes the Config instance. """ #Elasticsearch settings self.elasticsearch_host = "" self.elasticsearch_verify_certs = False self.elasticsearch_index_name = "" self.elasticsearch_batch_size = 500 self.elasticsearch_timeout_secs = 30 #Processing settings self.sentiment_modelpath = "" self.sentiment_max_seq_length = 512 self.sleep_idle_secs = 5 self.sleep_not_idle_secs = 0.01 self.log_level = "ERROR" @staticmethod def load(filepath): """Loads the config from a JSON file. Args: filepath: path of the JSON file. """ with open(filepath, "r") as file: json = file.read() config = jsonpickle.decode(json) return config
27.157895
75
0.622093
113
1,032
5.477876
0.584071
0.137318
0.06462
0
0
0
0
0
0
0
0
0.016506
0.295543
1,032
38
76
27.157895
0.834938
0.281977
0
0
0
0
0.008658
0
0
0
0
0
0
1
0.105263
false
0
0.052632
0
0.263158
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07a4d5bc6f7dc36e1d1fd72f273d7cc3af846e4b
9,184
py
Python
tests/ut/python/dataset/test_invert.py
GuoSuiming/mindspore
48afc4cfa53d970c0b20eedfb46e039db2a133d5
[ "Apache-2.0" ]
4
2021-01-26T09:14:01.000Z
2021-01-26T09:17:24.000Z
tests/ut/python/dataset/test_invert.py
forwhat461/mindspore
59a277756eb4faad9ac9afcc7fd526e8277d4994
[ "Apache-2.0" ]
null
null
null
tests/ut/python/dataset/test_invert.py
forwhat461/mindspore
59a277756eb4faad9ac9afcc7fd526e8277d4994
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ Testing Invert op in DE """ import numpy as np import mindspore.dataset as ds import mindspore.dataset.transforms.py_transforms import mindspore.dataset.vision.py_transforms as F import mindspore.dataset.vision.c_transforms as C from mindspore import log as logger from util import visualize_list, save_and_check_md5, diff_mse DATA_DIR = "../data/dataset/testImageNetData/train/" GENERATE_GOLDEN = False def test_invert_py(plot=False): """ Test Invert python op """ logger.info("Test Invert Python op") # Original Images data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) transforms_original = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(), F.Resize((224, 224)), F.ToTensor()]) ds_original = data_set.map(operations=transforms_original, input_columns="image") ds_original = ds_original.batch(512) for idx, (image, _) in enumerate(ds_original): if idx == 0: images_original = np.transpose(image.asnumpy(), (0, 2, 3, 1)) else: images_original = np.append(images_original, np.transpose(image.asnumpy(), (0, 2, 3, 1)), axis=0) # Color Inverted Images data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) transforms_invert = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(), F.Resize((224, 224)), F.Invert(), F.ToTensor()]) ds_invert = data_set.map(operations=transforms_invert, input_columns="image") ds_invert = ds_invert.batch(512) for idx, (image, _) in enumerate(ds_invert): if idx == 0: images_invert = np.transpose(image.asnumpy(), (0, 2, 3, 1)) else: images_invert = np.append(images_invert, np.transpose(image.asnumpy(), (0, 2, 3, 1)), axis=0) num_samples = images_original.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): mse[i] = np.mean((images_invert[i] - images_original[i]) ** 2) logger.info("MSE= {}".format(str(np.mean(mse)))) if plot: visualize_list(images_original, images_invert) def test_invert_c(plot=False): """ Test Invert Cpp op """ logger.info("Test Invert cpp op") # Original Images data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) transforms_original = [C.Decode(), C.Resize(size=[224, 224])] ds_original = data_set.map(operations=transforms_original, input_columns="image") ds_original = ds_original.batch(512) for idx, (image, _) in enumerate(ds_original): if idx == 0: images_original = image.asnumpy() else: images_original = np.append(images_original, image.asnumpy(), axis=0) # Invert Images data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) transform_invert = [C.Decode(), C.Resize(size=[224, 224]), C.Invert()] ds_invert = data_set.map(operations=transform_invert, input_columns="image") ds_invert = ds_invert.batch(512) for idx, (image, _) in enumerate(ds_invert): if idx == 0: images_invert = image.asnumpy() else: images_invert = np.append(images_invert, image.asnumpy(), axis=0) if plot: visualize_list(images_original, images_invert) num_samples = images_original.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): mse[i] = diff_mse(images_invert[i], images_original[i]) logger.info("MSE= {}".format(str(np.mean(mse)))) def test_invert_py_c(plot=False): """ Test Invert Cpp op and python op """ logger.info("Test Invert cpp and python op") # Invert Images in cpp data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224))], input_columns=["image"]) ds_c_invert = data_set.map(operations=C.Invert(), input_columns="image") ds_c_invert = ds_c_invert.batch(512) for idx, (image, _) in enumerate(ds_c_invert): if idx == 0: images_c_invert = image.asnumpy() else: images_c_invert = np.append(images_c_invert, image.asnumpy(), axis=0) # invert images in python data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224))], input_columns=["image"]) transforms_p_invert = mindspore.dataset.transforms.py_transforms.Compose([lambda img: img.astype(np.uint8), F.ToPIL(), F.Invert(), np.array]) ds_p_invert = data_set.map(operations=transforms_p_invert, input_columns="image") ds_p_invert = ds_p_invert.batch(512) for idx, (image, _) in enumerate(ds_p_invert): if idx == 0: images_p_invert = image.asnumpy() else: images_p_invert = np.append(images_p_invert, image.asnumpy(), axis=0) num_samples = images_c_invert.shape[0] mse = np.zeros(num_samples) for i in range(num_samples): mse[i] = diff_mse(images_p_invert[i], images_c_invert[i]) logger.info("MSE= {}".format(str(np.mean(mse)))) if plot: visualize_list(images_c_invert, images_p_invert, visualize_mode=2) def test_invert_one_channel(): """ Test Invert cpp op with one channel image """ logger.info("Test Invert C Op With One Channel Images") c_op = C.Invert() try: data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224)), lambda img: np.array(img[:, :, 0])], input_columns=["image"]) data_set.map(operations=c_op, input_columns="image") except RuntimeError as e: logger.info("Got an exception in DE: {}".format(str(e))) assert "The shape" in str(e) def test_invert_md5_py(): """ Test Invert python op with md5 check """ logger.info("Test Invert python op with md5 check") # Generate dataset data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) transforms_invert = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(), F.Invert(), F.ToTensor()]) data = data_set.map(operations=transforms_invert, input_columns="image") # Compare with expected md5 from images filename = "invert_01_result_py.npz" save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) def test_invert_md5_c(): """ Test Invert cpp op with md5 check """ logger.info("Test Invert cpp op with md5 check") # Generate dataset data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False) transforms_invert = [C.Decode(), C.Resize(size=[224, 224]), C.Invert(), F.ToTensor()] data = data_set.map(operations=transforms_invert, input_columns="image") # Compare with expected md5 from images filename = "invert_01_result_c.npz" save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN) if __name__ == "__main__": test_invert_py(plot=False) test_invert_c(plot=False) test_invert_py_c(plot=False) test_invert_one_channel() test_invert_md5_py() test_invert_md5_c()
35.875
111
0.579377
1,095
9,184
4.641096
0.153425
0.047226
0.023613
0.047226
0.749311
0.689886
0.615899
0.550767
0.512003
0.484652
0
0.019874
0.309669
9,184
255
112
36.015686
0.781703
0.117051
0
0.530201
0
0
0.048209
0.010518
0
0
0
0
0.006711
1
0.040268
false
0
0.04698
0
0.087248
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07a4fdcadf45ca16af301e1936af3cd96dd967fd
748
py
Python
puzzle/tests/test_candy.py
aliciawyy/dmining
513f6f036f8f258281e1282fef052a74bf9cc3d3
[ "Apache-2.0" ]
null
null
null
puzzle/tests/test_candy.py
aliciawyy/dmining
513f6f036f8f258281e1282fef052a74bf9cc3d3
[ "Apache-2.0" ]
9
2017-10-25T10:03:36.000Z
2018-06-12T22:49:22.000Z
puzzle/tests/test_candy.py
aliciawyy/dmining
513f6f036f8f258281e1282fef052a74bf9cc3d3
[ "Apache-2.0" ]
null
null
null
from parameterized import parameterized from numpy.testing import TestCase from .. import candy class TestCollectCandies(TestCase): @parameterized.expand( [(5, 5, 12, [[2, 1, 1, 1, 1], [2, 2, 1, 1, 1], [1, 2, 1, 1, 1], [2, 2, 1, 1, 3], [2, 2, 2, 2, 2]])] ) def test_candy(self, n, m, t, candies): collector = candy.CollectCandies(n, m, t, candies) for pos, expected in [[(1, 1), [(0, 1), (2, 1), (1, 0), (1, 2)]], [(0, 0), [(1, 0), (0, 1)]], [(4, 4), [(3, 4), (4, 3)]]]: self.assertListEqual( collector.get_next_positions(pos), expected + [pos]) self.assertEqual(collector.get_max_sum(), 27)
35.619048
73
0.481283
102
748
3.480392
0.372549
0.061972
0.042254
0.033803
0.08169
0.053521
0.039437
0
0
0
0
0.105368
0.32754
748
20
74
37.4
0.600398
0
0
0
0
0
0
0
0
0
0
0
0.117647
1
0.058824
false
0
0.176471
0
0.294118
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07a74f63c4315c5c81ce34abcbe9eb4483cc9b1f
4,783
py
Python
tests/test_dynamodbHandler.py
unfoldingWord-dev/python-aws-tools
8e856697ab07c5c33e60cde2d82ac805dec3ddf3
[ "MIT" ]
1
2017-08-23T22:31:23.000Z
2017-08-23T22:31:23.000Z
tests/test_dynamodbHandler.py
unfoldingWord-dev/python-aws-tools
8e856697ab07c5c33e60cde2d82ac805dec3ddf3
[ "MIT" ]
null
null
null
tests/test_dynamodbHandler.py
unfoldingWord-dev/python-aws-tools
8e856697ab07c5c33e60cde2d82ac805dec3ddf3
[ "MIT" ]
null
null
null
from __future__ import absolute_import, unicode_literals, print_function import mock import unittest import d43_aws_tools as aws_tools from boto3.dynamodb.conditions import Attr class DynamoDBHandlerTests(unittest.TestCase): @classmethod def setUpClass(cls): with mock.patch("d43_aws_tools.dynamodb_handler.boto3", mock.MagicMock()): cls.handler = aws_tools.dynamodb_handler.DynamoDBHandler("table_name") cls.handler.table = mock.MagicMock() def setUp(self): self.handler.table.reset_mock() def test_get_item(self): """Test a successful invocation of `get_item`.""" expected = dict(field1="1", field2="2") self.handler.table.get_item.return_value = { "Item": expected } self.assertEqual(self.handler.get_item("key"), expected) def test_get_item_malformed(self): """Test an unsuccessful invocation of `get_item`.""" self.handler.table.get_item.return_value = { "TheWrongKey": dict(field1="1", field2="2") } self.assertIsNone(self.handler.get_item("key")) def test_insert_item(self): """Test a successful invocation of `insert_item`.""" data = dict(x="x", y="y", three=3) self.handler.insert_item(data) self.handler.table.put_item.assert_called_once_with(Item=data) def test_update_item(self): """Test a successful invocation of `update_item`.""" key = {"id": 1} data = {"age": 40, "name": "John Doe"} self.handler.update_item(key, data) self.handler.table.update_item.assert_called_once() _, kwargs = self.handler.table.update_item.call_args self.assertIn("Key", kwargs) self.assertEqual(kwargs["Key"], key) self.assertIn("UpdateExpression", kwargs) # ignore whitespace and order of assignments expr = kwargs["UpdateExpression"].replace(" ", "") self.assertTrue(expr.startswith("SET")) self.assertIn("age=:age", expr) self.assertIn("#item_name=:name", expr) self.assertIn("ExpressionAttributeValues", kwargs) self.assertEqual(kwargs["ExpressionAttributeValues"], {":age": 40, ":name": "John Doe"}) self.assertIn("ExpressionAttributeNames", kwargs) self.assertEqual(kwargs["ExpressionAttributeNames"], {"#item_name": "name"}) def test_delete_item(self): """Test a successful invocation of `delete_item`.""" key = {"id": 1234} self.handler.delete_item(key) self.handler.table.delete_item.assert_called_once_with(Key=key) def test_query_item(self): """ Test a successful invocation of `query_item`.""" for cond in ("ne", "lt", "lte", "gt", "gte", "begins_with", "is_in", "contains"): self.handler.table.reset_mock() query = { "age": { "condition": "eq", "value": 25 }, "full_name": { "condition": cond, "value": "John Doe" } } data = {"age": 30, "full_name": "John Doe"} self.handler.table.scan.return_value = {"Items": data} self.assertEqual(self.handler.query_items(query), data) self.handler.table.scan.assert_called_once() def test_query_bool_item(self): """ Test a successful invocation of `query_item`. with a False boolean query""" for cond in ("ne", "lt", "lte", "gt", "gte", "begins_with", "is_in", "contains"): self.handler.table.reset_mock() query = { "ready": False } data = {"age": 30, "full_name": "John Doe", "ready": False} self.handler.table.scan.return_value = {"Items": data} self.assertEqual(self.handler.query_items(query), data) self.handler.table.scan.assert_called_once() err_msg = 'query_items: Expecting FilterExpression parameter for table.scan() but non found' try: self.handler.table.scan.assert_called_once_with() # If the scan ran without an argument this is a failure self.assertTrue(False, err_msg) except Exception as e: if err_msg in str(e): raise e def test_query_item_no_query(self): """Test a invocation of `query_item` with no query.""" data = {"age": 30, "full_name": "John Doe"} self.handler.table.scan.return_value = {"Items": data} self.assertEqual(self.handler.query_items(), data) self.handler.table.scan.assert_called_once_with()
40.533898
104
0.59147
548
4,783
4.979927
0.25
0.096739
0.093807
0.051301
0.439355
0.364969
0.335654
0.250641
0.219861
0.187615
0
0.009283
0.279323
4,783
117
105
40.880342
0.78242
0.10391
0
0.23913
0
0
0.135314
0.031589
0
0
0
0
0.25
1
0.108696
false
0
0.054348
0
0.173913
0.01087
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07a75032397b40d87190a129b70b4805e05f8c42
799
py
Python
app.py
sbustamante/heroku_app
6c8ff0b570750f3fe53ec67e24b71641167d53ce
[ "MIT" ]
null
null
null
app.py
sbustamante/heroku_app
6c8ff0b570750f3fe53ec67e24b71641167d53ce
[ "MIT" ]
null
null
null
app.py
sbustamante/heroku_app
6c8ff0b570750f3fe53ec67e24b71641167d53ce
[ "MIT" ]
null
null
null
from dash import Dash, html, dcc import plotly.express as px import pandas as pd app = Dash(__name__) server = app.server # assume you have a "long-form" data frame # see https://plotly.com/python/px-arguments/ for more options df = pd.DataFrame({ "Fruit": ["Apples", "Oranges", "Bananas", "Apples", "Oranges", "Bananas"], "Amount": [4, 1, 2, 2, 4, 5], "City": ["SF", "SF", "SF", "Montreal", "Montreal", "Montreal"] }) fig = px.bar(df, x="Fruit", y="Amount", color="City", barmode="group") app.layout = html.Div(children=[ html.H1(children='Hello Dash'), html.Div(children=''' Dash: A web application framework for your data. '''), dcc.Graph( id='example-graph', figure=fig ) ]) if __name__ == '__main__': app.run_server(debug=True)
24.96875
78
0.617021
111
799
4.324324
0.621622
0.033333
0.083333
0
0
0
0
0
0
0
0
0.010903
0.196496
799
32
79
24.96875
0.73676
0.126408
0
0
0
0
0.284483
0
0
0
0
0
0
1
0
false
0
0.130435
0
0.130435
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07a86aa8bb5513e6175425568029999ed308c9e2
23,759
py
Python
peter_sslers/web/lib/form_utils.py
aptise/peter_sslers
1dcae3fee0c1f4c67ae8a614aed7e2a3121e88b0
[ "MIT" ]
35
2016-04-21T18:55:31.000Z
2022-03-30T08:22:43.000Z
peter_sslers/web/lib/form_utils.py
aptise/peter_sslers
1dcae3fee0c1f4c67ae8a614aed7e2a3121e88b0
[ "MIT" ]
8
2018-05-23T13:38:49.000Z
2021-03-19T21:05:44.000Z
peter_sslers/web/lib/form_utils.py
aptise/peter_sslers
1dcae3fee0c1f4c67ae8a614aed7e2a3121e88b0
[ "MIT" ]
2
2016-08-18T21:07:11.000Z
2017-01-11T09:47:40.000Z
# pypi import six # local from ...lib import db as lib_db from ...lib import utils from ...model import objects as model_objects from ...model import utils as model_utils from . import formhandling # ============================================================================== def decode_args(getcreate_args): """ support for Python2/3 """ if six.PY3: for (k, v) in list(getcreate_args.items()): if isinstance(v, bytes): getcreate_args[k] = v.decode("utf8") return getcreate_args # standardized mapping for `model_utils.DomainsChallenged` to a formStash DOMAINS_CHALLENGED_FIELDS = { "http-01": "domain_names_http01", "dns-01": "domain_names_dns01", } class AcmeAccountUploadParser(object): """ An AcmeAccount may be uploaded multiple ways: * a single PEM file * an intra-associated three file triplet from a Certbot installation This parser operates on a validated FormEncode results object (via `pyramid_formencode_classic`) """ # overwritten in __init__ getcreate_args = None formStash = None # tracked acme_account_provider_id = None account_key_pem = None le_meta_jsons = None le_pkey_jsons = None le_reg_jsons = None private_key_cycle_id = None private_key_technology_id = None upload_type = None # pem OR letsencrypt def __init__(self, formStash): self.formStash = formStash self.getcreate_args = {} def require_new(self, require_contact=None, require_technology=True): """ routine for creating a NEW AcmeAccount (peter_sslers generates the credentials) :param require_contact: ``True`` if required; ``False`` if not; ``None`` for conditional logic :param require_technology: ``True`` if required; ``False`` if not; ``None`` for conditional logic """ formStash = self.formStash acme_account_provider_id = formStash.results.get( "acme_account_provider_id", None ) if acme_account_provider_id is None: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="acme_account_provider_id", message="No provider submitted." ) private_key_cycle = formStash.results.get("account__private_key_cycle", None) if private_key_cycle is None: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="account__private_key_cycle", message="No PrivateKey cycle submitted.", ) private_key_cycle_id = model_utils.PrivateKeyCycle.from_string( private_key_cycle ) private_key_technology_id = None private_key_technology = formStash.results.get( "account__private_key_technology", None ) if private_key_technology: private_key_technology_id = model_utils.KeyTechnology.from_string( private_key_technology ) if not private_key_technology_id and require_technology: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="account__private_key_technology", message="No PrivateKey technology submitted.", ) contact = formStash.results.get("account__contact", None) if not contact and require_contact: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="account__contact", message="`account__contact` is required.", ) getcreate_args = {} self.contact = getcreate_args["contact"] = contact self.acme_account_provider_id = getcreate_args[ "acme_account_provider_id" ] = acme_account_provider_id self.private_key_cycle_id = getcreate_args[ "private_key_cycle_id" ] = private_key_cycle_id self.private_key_technology_id = getcreate_args[ "private_key_technology_id" ] = private_key_technology_id self.getcreate_args = decode_args(getcreate_args) def require_upload(self, require_contact=None, require_technology=None): """ routine for uploading an exiting AcmeAccount+AcmeAccountKey :param require_contact: ``True`` if required; ``False`` if not; ``None`` for conditional logic :param require_technology: ``True`` if required; ``False`` if not; ``None`` for conditional logic """ formStash = self.formStash # ------------------- # do a quick parse... requirements_either_or = ( ( "account_key_file_pem", # "acme_account_provider_id", ), ( "account_key_file_le_meta", "account_key_file_le_pkey", "account_key_file_le_reg", ), ) failures = [] passes = [] for idx, option_set in enumerate(requirements_either_or): option_set_results = [ True if formStash.results[option_set_item] is not None else False for option_set_item in option_set ] # if we have any item, we need all of them if any(option_set_results): if not all(option_set_results): failures.append( "If any of %s is provided, all must be provided." % str(option_set) ) else: passes.append(idx) if (len(passes) != 1) or failures: # `formStash.fatal_form()` will raise `FormInvalid()` formStash.fatal_form( "You must upload `account_key_file_pem` or all of (`account_key_file_le_meta`, `account_key_file_le_pkey`, `account_key_file_le_reg`)." ) # ------------------- # validate the provider option # will be None unless a pem is uploaded # required for PEM, ignored otherwise acme_account_provider_id = formStash.results.get( "acme_account_provider_id", None ) private_key_cycle = formStash.results.get("account__private_key_cycle", None) if private_key_cycle is None: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="account__private_key_cycle", message="No PrivateKey cycle submitted.", ) private_key_cycle_id = model_utils.PrivateKeyCycle.from_string( private_key_cycle ) private_key_technology_id = None private_key_technology = formStash.results.get( "account__private_key_technology", None ) if private_key_technology is not None: private_key_technology_id = model_utils.KeyTechnology.from_string( private_key_technology ) if not private_key_technology_id and require_technology: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="account__private_key_technology", message="No PrivateKey technology submitted.", ) # require `contact` when uploading a PEM file if formStash.results["account_key_file_pem"] is not None: require_contact = True contact = formStash.results.get("account__contact") if not contact and require_contact: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="account__contact", message="`account__contact` is required.", ) getcreate_args = {} self.contact = getcreate_args["contact"] = contact self.private_key_cycle_id = getcreate_args[ "private_key_cycle_id" ] = private_key_cycle_id self.private_key_technology_id = getcreate_args[ "private_key_technology_id" ] = private_key_technology_id if formStash.results["account_key_file_pem"] is not None: if acme_account_provider_id is None: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="acme_account_provider_id", message="No provider submitted." ) self.upload_type = "pem" self.acme_account_provider_id = getcreate_args[ "acme_account_provider_id" ] = acme_account_provider_id self.account_key_pem = getcreate_args[ "key_pem" ] = formhandling.slurp_file_field(formStash, "account_key_file_pem") else: # note that we use `jsonS` to indicate a string self.le_meta_jsons = getcreate_args[ "le_meta_jsons" ] = formhandling.slurp_file_field(formStash, "account_key_file_le_meta") self.le_pkey_jsons = getcreate_args[ "le_pkey_jsons" ] = formhandling.slurp_file_field(formStash, "account_key_file_le_pkey") self.le_reg_jsons = getcreate_args[ "le_reg_jsons" ] = formhandling.slurp_file_field(formStash, "account_key_file_le_reg") self.getcreate_args = decode_args(getcreate_args) class _PrivateKeyUploadParser(object): """ A PrivateKey is not a complex upload to parse itself This code exists to mimic the AcmeAccount uploading. """ # overwritten in __init__ getcreate_args = None formStash = None # tracked private_key_pem = None upload_type = None # pem def __init__(self, formStash): self.formStash = formStash self.getcreate_args = {} def require_upload(self): """ routine for uploading an exiting PrivateKey """ formStash = self.formStash getcreate_args = {} if formStash.results["private_key_file_pem"] is not None: self.upload_type = "pem" self.private_key_pem = getcreate_args[ "key_pem" ] = formhandling.slurp_file_field(formStash, "private_key_file_pem") self.getcreate_args = decode_args(getcreate_args) class _AcmeAccountSelection(object): """ Class used to manage an uploaded AcmeAccount """ selection = None upload_parsed = None # instance of AcmeAccountUploadParser or None AcmeAccount = None class _PrivateKeySelection(object): selection = None upload_parsed = None # instance of AcmeAccountUploadParser or None private_key_strategy__requested = None PrivateKey = None @property def private_key_strategy_id__requested(self): return model_utils.PrivateKeyStrategy.from_string( self.private_key_strategy__requested ) def parse_AcmeAccountSelection( request, formStash, account_key_option=None, allow_none=None, require_contact=None, ): """ :param formStash: an instance of `pyramid_formencode_classic.FormStash` :param account_key_option: :param allow_none: :param require_contact: ``True`` if required; ``False`` if not; ``None`` for conditional logic """ account_key_pem = None account_key_pem_md5 = None dbAcmeAccount = None is_global_default = None # handle the explicit-option acmeAccountSelection = _AcmeAccountSelection() if account_key_option == "account_key_file": # this will handle form validation and raise errors. parser = AcmeAccountUploadParser(formStash) # this will have: `contact`, `private_key_cycle`, `private_key_technology` parser.require_upload(require_contact=require_contact) # update our object acmeAccountSelection.selection = "upload" acmeAccountSelection.upload_parsed = parser return acmeAccountSelection else: if account_key_option == "account_key_global_default": acmeAccountSelection.selection = "global_default" account_key_pem_md5 = formStash.results["account_key_global_default"] is_global_default = True elif account_key_option == "account_key_existing": acmeAccountSelection.selection = "existing" account_key_pem_md5 = formStash.results["account_key_existing"] elif account_key_option == "account_key_reuse": acmeAccountSelection.selection = "reuse" account_key_pem_md5 = formStash.results["account_key_reuse"] elif account_key_option == "none": if not allow_none: # `formStash.fatal_form()` will raise `FormInvalid()` formStash.fatal_form( "This form does not support no AcmeAccount selection." ) # note the lowercase "none"; this is an explicit "no item" selection # only certain routes allow this acmeAccountSelection.selection = "none" account_key_pem_md5 = None return acmeAccountSelection else: formStash.fatal_form( message="Invalid `account_key_option`", ) if not account_key_pem_md5: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field=account_key_option, message="You did not provide a value" ) dbAcmeAccount = lib_db.get.get__AcmeAccount__by_pemMd5( request.api_context, account_key_pem_md5, is_active=True ) if not dbAcmeAccount: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field=account_key_option, message="The selected AcmeAccount is not enrolled in the system.", ) if is_global_default and not dbAcmeAccount.is_global_default: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field=account_key_option, message="The selected AcmeAccount is not the current default.", ) acmeAccountSelection.AcmeAccount = dbAcmeAccount return acmeAccountSelection # `formStash.fatal_form()` will raise `FormInvalid()` formStash.fatal_form("There was an error validating your form.") def parse_PrivateKeySelection(request, formStash, private_key_option=None): private_key_pem = None private_key_pem_md5 = None PrivateKey = None # :class:`model.objects.PrivateKey` # handle the explicit-option privateKeySelection = _PrivateKeySelection() if private_key_option == "private_key_file": # this will handle form validation and raise errors. parser = _PrivateKeyUploadParser(formStash) parser.require_upload() # update our object privateKeySelection.selection = "upload" privateKeySelection.upload_parsed = parser privateKeySelection.private_key_strategy__requested = ( model_utils.PrivateKeySelection_2_PrivateKeyStrategy["upload"] ) return privateKeySelection else: if private_key_option == "private_key_existing": privateKeySelection.selection = "existing" privateKeySelection.private_key_strategy__requested = ( model_utils.PrivateKeySelection_2_PrivateKeyStrategy["existing"] ) private_key_pem_md5 = formStash.results["private_key_existing"] elif private_key_option == "private_key_reuse": privateKeySelection.selection = "reuse" privateKeySelection.private_key_strategy__requested = ( model_utils.PrivateKeySelection_2_PrivateKeyStrategy["reuse"] ) private_key_pem_md5 = formStash.results["private_key_reuse"] elif private_key_option in ( "private_key_generate", "private_key_for_account_key", ): dbPrivateKey = lib_db.get.get__PrivateKey__by_id(request.api_context, 0) if not dbPrivateKey: formStash.fatal_field( field=private_key_option, message="Could not load the placeholder PrivateKey.", ) privateKeySelection.PrivateKey = dbPrivateKey if private_key_option == "private_key_generate": privateKeySelection.selection = "generate" privateKeySelection.private_key_strategy__requested = ( model_utils.PrivateKeySelection_2_PrivateKeyStrategy["generate"] ) elif private_key_option == "private_key_for_account_key": privateKeySelection.selection = "private_key_for_account_key" privateKeySelection.private_key_strategy__requested = ( model_utils.PrivateKeySelection_2_PrivateKeyStrategy[ "private_key_for_account_key" ] ) return privateKeySelection else: # `formStash.fatal_form()` will raise `FormInvalid()` formStash.fatal_form("Invalid `private_key_option`") if not private_key_pem_md5: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field=private_key_option, message="You did not provide a value" ) dbPrivateKey = lib_db.get.get__PrivateKey__by_pemMd5( request.api_context, private_key_pem_md5, is_active=True ) if not dbPrivateKey: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field=private_key_option, message="The selected PrivateKey is not enrolled in the system.", ) privateKeySelection.PrivateKey = dbPrivateKey return privateKeySelection # `formStash.fatal_form()` will raise `FormInvalid()` formStash.fatal_form("There was an error validating your form.") def form_key_selection(request, formStash, require_contact=None): """ :param formStash: an instance of `pyramid_formencode_classic.FormStash` :param require_contact: ``True`` if required; ``False`` if not; ``None`` for conditional logic """ acmeAccountSelection = parse_AcmeAccountSelection( request, formStash, account_key_option=formStash.results["account_key_option"], require_contact=require_contact, ) if acmeAccountSelection.selection == "upload": key_create_args = acmeAccountSelection.upload_parsed.getcreate_args key_create_args["event_type"] = "AcmeAccount__insert" key_create_args[ "acme_account_key_source_id" ] = model_utils.AcmeAccountKeySource.from_string("imported") (dbAcmeAccount, _is_created,) = lib_db.getcreate.getcreate__AcmeAccount( request.api_context, **key_create_args ) acmeAccountSelection.AcmeAccount = dbAcmeAccount privateKeySelection = parse_PrivateKeySelection( request, formStash, private_key_option=formStash.results["private_key_option"], ) if privateKeySelection.selection == "upload": key_create_args = privateKeySelection.upload_parsed.getcreate_args key_create_args["event_type"] = "PrivateKey__insert" key_create_args[ "private_key_source_id" ] = model_utils.PrivateKeySource.from_string("imported") key_create_args["private_key_type_id"] = model_utils.PrivateKeyType.from_string( "standard" ) ( dbPrivateKey, _is_created, ) = lib_db.getcreate.getcreate__PrivateKey__by_pem_text( request.api_context, **key_create_args ) privateKeySelection.PrivateKey = dbPrivateKey elif privateKeySelection.selection == "generate": dbPrivateKey = lib_db.get.get__PrivateKey__by_id(request.api_context, 0) if not dbPrivateKey: formStash.fatal_field( field="private_key_option", message="Could not load the placeholder PrivateKey for autogeneration.", ) privateKeySelection.PrivateKey = dbPrivateKey return (acmeAccountSelection, privateKeySelection) def form_domains_challenge_typed(request, formStash, http01_only=False): domains_challenged = model_utils.DomainsChallenged() domain_names_all = [] try: # 1: iterate over the submitted domains by segment for (target_, source_) in DOMAINS_CHALLENGED_FIELDS.items(): submitted_ = formStash.results.get(source_) if submitted_: # this function checks the domain names match a simple regex # it will raise a `ValueError("invalid domain")` on the first invalid domain submitted_ = utils.domains_from_string(submitted_) if submitted_: domain_names_all.extend(submitted_) domains_challenged[target_] = submitted_ # 2: ensure there are domains if not domain_names_all: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="Error_Main", message="no domain names submitted", ) # 3: ensure there is no overlap domain_names_all_set = set(domain_names_all) if len(domain_names_all) != len(domain_names_all_set): # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="Error_Main", message="a domain name can only be associated to one challenge type", ) # 4: maybe we only want http01 domains submitted? if http01_only: for (k, v) in domains_challenged.items(): if k == "http-01": continue if v: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="Error_Main", message="only http-01 domains are accepted by this form", ) except ValueError as exc: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="Error_Main", message="invalid domain names detected" ) return domains_challenged def form_single_domain_challenge_typed(request, formStash, challenge_type="http-01"): domains_challenged = model_utils.DomainsChallenged() # this function checks the domain names match a simple regex domain_names = utils.domains_from_string(formStash.results["domain_name"]) if not domain_names: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field(field="domain_name", message="Found no domain names") if len(domain_names) != 1: # `formStash.fatal_field()` will raise `FormFieldInvalid(FormInvalid)` formStash.fatal_field( field="domain_name", message="This endpoint currently supports only 1 domain name", ) domains_challenged[challenge_type] = domain_names return domains_challenged
39.271074
151
0.638032
2,444
23,759
5.864566
0.115385
0.062094
0.053024
0.035164
0.610898
0.572385
0.518384
0.497384
0.471848
0.442964
0
0.003051
0.282672
23,759
604
152
39.336093
0.837939
0.182373
0
0.424242
0
0
0.148851
0.045716
0
0
0
0
0
1
0.027972
false
0.006993
0.018648
0.002331
0.130536
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07a919ed87f13258649cbf2c9c6e2971a4de419e
5,568
py
Python
AI_Engine_Development/Feature_Tutorials/07-AI-Engine-Floating-Point/Utils/GenerationLib.py
jlamperez/Vitis-Tutorials
9a5b611caabb5656bbb2879116e032227b164bfd
[ "Apache-2.0" ]
1
2022-03-09T06:15:43.000Z
2022-03-09T06:15:43.000Z
AI_Engine_Development/Feature_Tutorials/07-AI-Engine-Floating-Point/Utils/GenerationLib.py
jlamperez/Vitis-Tutorials
9a5b611caabb5656bbb2879116e032227b164bfd
[ "Apache-2.0" ]
null
null
null
AI_Engine_Development/Feature_Tutorials/07-AI-Engine-Floating-Point/Utils/GenerationLib.py
jlamperez/Vitis-Tutorials
9a5b611caabb5656bbb2879116e032227b164bfd
[ "Apache-2.0" ]
null
null
null
# # Copyright 2020–2021 Xilinx, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np from math import * import random def GenerateTestVector(dtval,pliow,NPhases_s,NStreams_s,NSamples_s,NFrames_s,SeqType_s,Basename_s): print('DtVal : ',dtval.get()) print('PLIO width : ',pliow.get()) print('NPhases : ',NPhases_s.get()) print('NStreams : ',NStreams_s.get()) print('NSamples : ',NSamples_s.get()) print('NFrames : ',NFrames_s.get()) print('Type of Sequence : ',SeqType_s.get()) print('Base filename : ',Basename_s.get()) NPhases = int(NPhases_s.get()) NStreams = int(NStreams_s.get()) LFrame = int(NSamples_s.get()) NFrames = int(NFrames_s.get()) SequenceType = SeqType_s.get() Basename = Basename_s.get() #parameters that should be in the GUI # SequenceType ='Linear' # 'SinCos' 'Linear' 'Random' 'Dirac' # Basename = 'PhaseIn' NSamples = NPhases*NStreams*LFrame*NFrames; NSamples1 = NPhases*NStreams*LFrame*(NFrames+1); # A little longer to allow for delay in streams NBitsData = 32; if( dtval.get() == 'int16'): NBitsData = 16 HasImag = 0 if (dtval.get() == 'cint16'): HasImag = 1 if(SequenceType != 'SinCos' and SequenceType != 'Linear' and SequenceType != 'Random' and SequenceType != 'Dirac'): print ('Unknown Sequence Type') return # Create the overall signal that will be distributed over all streams # it is already separated in phases S = np.zeros((NPhases,int(NSamples1/NPhases),1+HasImag)) for i in range(int(NSamples1/NPhases)): for p in range (NPhases): k = i*NPhases+p if (SequenceType == 'SinCos'): vr = int(5000*cos(6.28*5/(NPhases*NStreams*LFrame)*k)) vi = int(5000*sin(6.28*5/(NPhases*NStreams*LFrame)*k)) elif (SequenceType == 'Linear'): vr = k vi = -k elif (SequenceType == 'Random'): vr = random.randint(-5000,5000) vi = random.randint(-5000,5000) elif (SequenceType == 'Dirac'): vr = 0 vi = 0 if(k%151 == 1): vr = 1 elif(k%151 == 40): vi = 1 elif(k%151 == 81): vr = 2 elif(k%151 == 115): vi = -2 # if(k%311 == 50): # vr = 1 # S[p,i,0] = # if(HasImag==1): # S[p,i,1] = int(5000*sin(6.28*5/(NPhases*NStreams*LFrame)*k)) S[p,i,0] = vr if (HasImag == 1 ): S[p,i,1] = vi PLIOwidth = int(pliow.get()) NSamplesPerLine = int(PLIOwidth/NBitsData) # Data are read in blocks of 128 bits (4 data in cint16) # Create an Input test Vector in TestInputS.txt FileNames = []; # Easiest case: 1 stream per AI Engine if (NStreams == 1): #Creates list of filenames for Phi in range(NPhases): FileNames.append(Basename+'_'+str(Phi)+'.txt') #Open all files fds = [open(path, 'w') for path in FileNames] #Fill all files with the right data for p in range(NPhases): fd = fds[p] for s in range(int(NSamples1/NPhases/NSamplesPerLine)): for d in range(NSamplesPerLine): index = s*NSamplesPerLine + d fd.write(str(int(S[p,index,0]))+' ') if(HasImag): fd.write(str(int(S[p,index,1]))+' ') fd.write('\n') for fd in fds: fd.close() if (NStreams == 2): #Creates list of filenames for Phi in range(NPhases): for Stream in range(NStreams): FileNames.append('PhaseIn_'+str(Phi)+'_'+str(Stream)+'.txt') # Hash table to associate data to streams NSamplesIn128bits = int(128/NBitsData ) H = np.zeros((int(NSamples1/NPhases/2),2)) H = H.astype('int32') index = np.zeros(2) index = index.astype('int32') for s in range(int(NSamples1/NPhases)): k = int(s/NSamplesIn128bits) # Block order i = k%2 # Which streams H[index[i],i] = s index[i] = index[i]+1 #Open all files fds = [open(path, 'w') for path in FileNames] #Fill all files with the right data for p in range(NPhases): for stream in range(2): fd = fds[2*p+stream] for s in range(int(NSamples1/NPhases/NSamplesPerLine/NStreams)): for d in range(NSamplesPerLine): index = s*NSamplesPerLine + d fd.write(str(int(S[p,H[index,stream],0]))+' ') if(HasImag): fd.write(str(int(S[p,H[index,stream],1]))+' ') fd.write('\n') for fd in fds: fd.close()
33.341317
119
0.541667
703
5,568
4.261735
0.28165
0.030374
0.038051
0.025367
0.281709
0.267023
0.267023
0.224967
0.194927
0.14219
0
0.038182
0.332076
5,568
166
120
33.542169
0.767142
0.231861
0
0.188119
0
0
0.051452
0
0
0
0
0
0
1
0.009901
false
0
0.029703
0
0.049505
0.089109
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07ad506b318b0e47d29c2d5e943847cb809ea5ef
721
py
Python
adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/examples/icm20x_icm20948_gyro_data_rate_test.py
jacoblb64/pico_rgb_keypad_hid
3251ca6a98ef86d9f98c54f639c4d61810601a0b
[ "MIT" ]
47
2021-02-15T23:02:36.000Z
2022-03-04T21:30:03.000Z
adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/examples/icm20x_icm20948_gyro_data_rate_test.py
jacoblb64/pico_rgb_keypad_hid
3251ca6a98ef86d9f98c54f639c4d61810601a0b
[ "MIT" ]
7
2021-02-19T20:00:08.000Z
2022-01-14T10:51:12.000Z
adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/examples/icm20x_icm20948_gyro_data_rate_test.py
jacoblb64/pico_rgb_keypad_hid
3251ca6a98ef86d9f98c54f639c4d61810601a0b
[ "MIT" ]
14
2021-02-20T17:40:56.000Z
2022-01-01T19:53:38.000Z
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries # SPDX-License-Identifier: MIT import time import board import busio from adafruit_icm20x import ICM20948 cycles = 200 i2c = busio.I2C(board.SCL, board.SDA) icm = ICM20948(i2c) # Cycle between two data rates # Best viewed in the Mu serial plotter where you can see how # the data rate affects the resolution of the data while True: icm.gyro_data_rate_divisor = 0 # minimum print("Data Rate:", icm.gyro_data_rate) time.sleep(2) for i in range(cycles): print(icm.gyro) icm.gyro_data_rate_divisor = 255 # maximum print("Data Rate:", icm.gyro_data_rate) time.sleep(2) for i in range(cycles): print(icm.gyro)
25.75
62
0.718447
112
721
4.526786
0.508929
0.110454
0.086785
0.118343
0.351085
0.2643
0.2643
0.2643
0.2643
0.2643
0
0.048443
0.198336
721
27
63
26.703704
0.82872
0.335645
0
0.444444
0
0
0.042463
0
0
0
0
0
0
1
0
false
0
0.222222
0
0.222222
0.222222
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07afdc77d12fe88ab48cead7329435544528b522
8,032
py
Python
utils/common.py
initialed85/eds-cctv-system
fcdb7e7e23327bf3a901d23d506b3915833027d1
[ "MIT" ]
null
null
null
utils/common.py
initialed85/eds-cctv-system
fcdb7e7e23327bf3a901d23d506b3915833027d1
[ "MIT" ]
null
null
null
utils/common.py
initialed85/eds-cctv-system
fcdb7e7e23327bf3a901d23d506b3915833027d1
[ "MIT" ]
null
null
null
import datetime import json import os from pathlib import Path from types import SimpleNamespace from typing import List from typing import NamedTuple, Union, Optional, Callable from uuid import uuid3, NAMESPACE_DNS from dateutil.parser import parse _VIDEO_SUFFIXES = [".mkv", ".mp4"] _IMAGE_SUFFIXES = [".jpg"] _PERMITTED_EXTENSIONS = _VIDEO_SUFFIXES + _IMAGE_SUFFIXES class PathDetails(NamedTuple): path: Path event_id: Optional[int] camera_id: Optional[int] timestamp: datetime.datetime camera_name: str is_image: bool is_lowres: bool class Event(SimpleNamespace): event_id: str timestamp: Union[datetime.datetime, str] camera_name: str high_res_image_path: str low_res_image_path: str high_res_video_path: str low_res_video_path: str def get_sorted_paths(path: Path) -> List[Path]: return sorted(Path(path).iterdir(), key=os.path.getmtime) def format_timestamp_for_go(timestamp: Union[datetime.datetime, str]) -> str: if isinstance(timestamp, str): timestamp = parse(timestamp) us = timestamp.strftime("%f") tz_raw = timestamp.strftime("%z") tz = "{}:{}".format(tz_raw[0:3], tz_raw[3:]) return timestamp.strftime(f"%Y-%m-%dT%H:%M:%S.{us}00{tz}") def parse_paths(paths: List[Path], tzinfo: datetime.tzinfo, parse_method: Callable) -> List[PathDetails]: return [ y for y in [parse_method(path=x, tzinfo=tzinfo) for x in paths if x is not None] if y is not None ] def build_event_for_some_path_details(some_path_details: List[PathDetails], path: Path): if len(some_path_details) != 4: raise ValueError( f"expected some_path_details to be 4 long (and related); instead it was {len(some_path_details)} long" ) event_ids = list(set([x.event_id for x in some_path_details])) if len(event_ids) != 1: raise ValueError( f"expected all PathDetails to have a common event_id; instead they were {event_ids}" ) camera_ids = list(set([x.camera_id for x in some_path_details])) if len(camera_ids) != 1: raise ValueError( f"expected all PathDetails to have a common camera_id; instead they were {camera_ids}" ) camera_names = list(set([x.camera_name for x in some_path_details])) if len(camera_names) != 1: raise ValueError( f"expected all PathDetails to have a common camera_name; instead they were {camera_names}" ) high_res_image_paths = list( set([x.path for x in some_path_details if x.is_image and not x.is_lowres]) ) if len(high_res_image_paths) != 1: raise ValueError( f"expected to find 1 high_res_image_path from PathDetails; instead found {high_res_image_paths}" ) low_res_image_paths = list( set([x.path for x in some_path_details if x.is_image and x.is_lowres]) ) if len(low_res_image_paths) != 1: raise ValueError( f"expected to find 1 low_res_image_path from PathDetails; instead found {low_res_image_paths}" ) high_res_video_paths = list( set([x.path for x in some_path_details if not x.is_image and not x.is_lowres]) ) if len(high_res_video_paths) != 1: raise ValueError( f"expected to find 1 high_res_video_path from PathDetails; instead found {high_res_video_paths}" ) low_res_video_paths = list( set([x.path for x in some_path_details if not x.is_image and x.is_lowres]) ) if len(low_res_video_paths) != 1: raise ValueError( f"expected to find 1 low_res_video_path from PathDetails; instead found {low_res_video_paths}" ) timestamp = sorted([x.timestamp for x in some_path_details])[0] high_res_image_path = high_res_image_paths[0] low_res_image_path = low_res_image_paths[0] high_res_video_path = high_res_video_paths[0] low_res_video_path = low_res_video_paths[0] # in Go: # eventId := uuid.NewSHA1( # uuid.NameSpaceDNS, # []byte(fmt.Sprintf("%v, %v, %v, %v, %v", timestamp, highResImagePath, lowResImagePath, highResVideoPath, lowResVideoPath)), # ) event_id = uuid3( NAMESPACE_DNS, f"{format_timestamp_for_go(timestamp)}, {high_res_image_path}, {low_res_image_path}, {high_res_video_path}, {low_res_video_path}", ) return Event( event_id=str(event_id), timestamp=timestamp, camera_name=camera_names[0], high_res_image_path=str(path / high_res_image_path), low_res_image_path=str(path / low_res_image_path), high_res_video_path=str(path / high_res_video_path), low_res_video_path=str(path / low_res_video_path), ) def relate_path_details( some_path_details: List[PathDetails], get_key_methods: List[Callable] ) -> List[List[PathDetails]]: some_path_details_by_key = {} for path_details in some_path_details: keys = [x(path_details) for x in get_key_methods] for key in keys: some_path_details_by_key.setdefault(key, []) some_path_details_by_key[key] += [path_details] viable_some_path_details_by_key = { k: v for k, v in some_path_details_by_key.items() if len(v) == 4 } deduplicated_path_details = [] for some_path_details in viable_some_path_details_by_key.values(): if some_path_details not in deduplicated_path_details: deduplicated_path_details += [some_path_details] return deduplicated_path_details def build_events_for_related_path_details( related_path_details: List[List[PathDetails]], path: Path ) -> List[Event]: events: List[Event] = [] for some_path_details in related_path_details: events += [ build_event_for_some_path_details( some_path_details=some_path_details, path=path ) ] sorted_events = sorted(events, key=lambda x: x.timestamp) for event in sorted_events: event.timestamp = format_timestamp_for_go(timestamp=event.timestamp) return sorted_events def build_json_lines_from_events(events: List[Event]) -> str: return "\n".join( [ json.dumps( { "event_id": x.event_id, "timestamp": x.timestamp, "camera_name": x.camera_name, "high_res_image_path": x.high_res_image_path, "low_res_image_path": x.low_res_image_path, "high_res_video_path": x.high_res_video_path, "low_res_video_path": x.low_res_video_path, } ) for x in events ] ) def write_to_file(path: Path, data: str): with open(str(path), "w") as f: f.write(data) def rebuild_event_store(root_path: Path, tzinfo: datetime.tzinfo, json_path: Path, parse_method: Callable, get_key_methods: List[Callable]): print(f"getting sorted paths from {root_path}...") sorted_paths = get_sorted_paths(path=root_path) print(f"got {len(sorted_paths)} sorted paths") print("parsing sorted paths...") some_path_details = parse_paths(paths=sorted_paths, tzinfo=tzinfo, parse_method=parse_method) print(f"got {len(some_path_details)} parsed paths") print("relating parsed paths...") related_path_details = relate_path_details(some_path_details=some_path_details, get_key_methods=get_key_methods) print(f"got {len(related_path_details)} related paths") print("building events...") events = build_events_for_related_path_details( related_path_details=related_path_details, path=root_path ) print(f"built {len(events)} events") print("building json lines...") json_lines = build_json_lines_from_events(events=events) print(f"built {len(json_lines)} bytes") print(f"writing to {json_path}") write_to_file(path=json_path, data=json_lines) print("done.")
33.606695
140
0.671564
1,137
8,032
4.41073
0.135444
0.107478
0.095713
0.033898
0.44985
0.343968
0.302293
0.257228
0.218943
0.139182
0
0.00471
0.233441
8,032
238
141
33.747899
0.80981
0.022908
0
0.054645
0
0
0.169472
0.037108
0
0
0
0
0
1
0.04918
false
0
0.04918
0.016393
0.224044
0.065574
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07b3b408f0f9c7313a3971b858fbe6beb4247623
1,733
py
Python
taattack/_datasets/dataset.py
linerxliner/ValCAT
e62985c6c64f6415bb2bb4716bd02d9686badd47
[ "MIT" ]
null
null
null
taattack/_datasets/dataset.py
linerxliner/ValCAT
e62985c6c64f6415bb2bb4716bd02d9686badd47
[ "MIT" ]
null
null
null
taattack/_datasets/dataset.py
linerxliner/ValCAT
e62985c6c64f6415bb2bb4716bd02d9686badd47
[ "MIT" ]
null
null
null
class Dataset: _data = None _first_text_col = 'text' _second_text_col = None _label_col = 'label' def __init__(self): self._idx = 0 if self._data is None: raise Exception('Dataset is not loaded') def __iter__(self): return self def __next__(self): if self._idx >= len(self._data): raise StopIteration else: item = self._data.iloc[self._idx] self._idx += 1 if self._second_text_col: return item[self._first_text_col], item[self._second_text_col], int(item[self._label_col]) else: return item[self._first_text_col], int(item[self._label_col]) def __getitem__(self, item): if isinstance(item, int): item = self._data.iloc[item] if self._second_text_col: return item[self._first_text_col], item[self._second_text_col], int(item[self._label_col]) else: return item[self._first_text_col], int(item[self._label_col]) elif isinstance(item, slice): start = item.start if item.start else 0 stop = item.stop if item.stop else len(self._data) step = item.step if item.step else 1 items = self._data.iloc[start:stop:step] if self._second_text_col: return [(item[self._first_text_col], item[self._second_text_col], int(item[self._label_col])) for _, item in items.iterrows()] else: return [(item[self._first_text_col], int(item[self._label_col])) for _, item in items.iterrows()] else: raise KeyError def __str__(self): return str(self._data)
34.66
142
0.589729
227
1,733
4.105727
0.189427
0.145923
0.090129
0.109442
0.482833
0.482833
0.482833
0.482833
0.482833
0.482833
0
0.003336
0.308136
1,733
50
143
34.66
0.773978
0
0
0.292683
0
0
0.017301
0
0
0
0
0
0
1
0.121951
false
0
0
0.04878
0.439024
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07b47bbb8eb7acb013788dd5e04ca55c384f4c1f
7,732
py
Python
scripts/extract_gs_citations.py
akhilpandey95/scholarlyimpact
215ae832c90f0564fa0301e4c3f1c99525617625
[ "MIT" ]
null
null
null
scripts/extract_gs_citations.py
akhilpandey95/scholarlyimpact
215ae832c90f0564fa0301e4c3f1c99525617625
[ "MIT" ]
18
2020-02-20T23:40:26.000Z
2020-10-20T04:05:43.000Z
scripts/extract_gs_citations.py
akhilpandey95/scholarlyimpact
215ae832c90f0564fa0301e4c3f1c99525617625
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # This Source Code Form is subject to the terms of the MIT # License. If a copy of the same was not distributed with this # file, You can obtain one at # https://github.com/akhilpandey95/scholarlyimpact/blob/master/LICENSE. import os import csv import glob import json import requests import subprocess import numpy as np import pandas as pd from tqdm import tqdm from ast import literal_eval from fp.fp import FreeProxy from torrequest import TorRequest from scholarly import scholarly from collections import Counter, OrderedDict from operator import attrgetter # class definition for Rate limiting class RateLimiter: """ Class object for putting a rate limit on the number of requests made Parameters ---------- No arguments Returns ------- Nothing """ def __init__(self, maxRate=5, timeUnit=1): self.timeUnit = timeUnit self.deque = deque(maxlen=maxRate) def __call__(self): if self.deque.maxlen == len(self.deque): cTime = time.time() if cTime - self.deque[0] > self.timeUnit: self.deque.append(cTime) return False else: return True self.deque.append(time.time()) return False # function for obtaining the citations using the dimensions web url def get_gs_citations_web(title): """ Use the google scholar web URL and requests API to obtain the citations for a given title of a scholarly article Parameters ---------- arg1 | title: str The title of a scholarly article Returns ------- Dictionary dict """ while True: try: # call the lumproxy object scholarly.use_lum_proxy() # make the query query = scholarly.search_pubs(title) # come out break except Exception as e: # come out and try again break # return the response dict return next(query) # function for assigning new IP address def assign_new_ip(text=False): """ Reset the identity using TorRequest Parameters ---------- arg1 [OPTIONAL]| text: bool A boolean flag to return the IP address tuple (old, morphed) Returns ------- boolean True/False """ try: # pass the hashed password req = TorRequest(password='scholarly_password') # return the ip address normal_identity = requests.get('http://ipecho.net/plain') # reset the identity using Tor req.reset_identity() # make a request now morphed_identity = req.get('http://ipecho.net/plain') # return the status depending on the flag if morphed_identity != normal_identity: if text == True: # return the ip address pairs as a tuple return (normal_identity.text, morphed_identity.text) else: return True else: # return just the status return False except: return False # function for assigning a new proxy def set_new_proxy(text=True): """ Reset the identity using FreeProxy Parameters ---------- arg1 [OPTIONAL]| text: bool A boolean flag to return the IP address tuple (old, morphed) Returns ------- Address fp.fp.FreeProxy """ while True: # call the freeproxy object proxy = FreeProxy(rand=True, timeout=1).get() # allocate the proxy address to scholarly proxy_works = scholarly.use_proxy(http=proxy, https=proxy) # check it the ip address works if proxy_works: # come out break # print the ip address depending on the text argument if text: # print the working ip print("Working proxy:", proxy) # return the proxy details return proxy # function for connecting tor to scholarly def scholarly_init_connection(): """ Bind TorRequest to Scholarly service Parameters ---------- No arguments Returns ------- Nothing """ while True: # assign new tor identity ips = assign_new_ip(text=True) # use the tor request for scholarly tor_req = scholarly.use_tor(tor_sock_port=9050, \ tor_control_port=9051, \ tor_pw="scholarly_password") if tor_req: # come out of the loop, when successful break # print the tor identity print("Working Tor identity:", ips[1]) # function for restarting the system tor service def restart_tor_system_service(text=False): """ Use the os module to restart the tor service Parameters ---------- arg1 [OPTIONAL]| text: bool A boolean flag to return the status of the command Returns ------- Boolean bool """ # subprocess command for stopping the tor service tor_stop = subprocess.Popen(['service', 'tor', 'stop']) # subprocess command for restarting the tor service tor_restart = subprocess.Popen(['service', 'tor', 'restart']) # subprocess command for restarting the tor service tor_status = subprocess.Popen(['service', 'tor', 'status'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) # if the label is set to true then print the output if text: for output in tor_status.stdout.readlines(): print(output.strip()) # pipe out the stdout, stderr for the subprocess stdout, stderr = tor_status.communicate() if len(stderr) > 0: # return False return False else: # return true if successful return True def get_articleInfo(title): """ Use the google scholar web URL and requests API to obtain the citations for a given title of a scholarly article Parameters ---------- arg1 | title: str The title of a scholarly article Returns ------- Dictionary dict """ while True: try: # init the connection with scholarly and tor scholarly_init_connection() # search for the query search_query = scholarly.search_pubs(title) # print success print("Got the results of the query") # come out of the loop break except Exception as e: # print error message print("Attempt Failed, patching new tor identity") # restart the system tor service restart_tor_system_service(text=False) # assign new connection again scholarly_init_connection() # obtain the bib entry of the scholarly article pub = next(search_query) # return the bib entry return pub if __name__ == '__main__': # iterate over the length length_of_file = len(open('paper_titles.txt').readlines()) # place the contents of the list into a file alt_list = open('paper_titles.txt').readlines() # iterate over the length of the file # write the results to a file for i in tqdm(range(length_of_file)): alt_info = open('paper_titles.txt', 'r+') cit_info = open('citations_gs.csv', 'a') cit_info.write(str(alt_list[i].strip( ).split('\t')[0]) + ',' + str(get_articleInfo(alt_list[i].strip().split('\t')[1]))) cit_info.write('\n') cit_info.close() alt_info.seek(0) alt_info.truncate() alt_info.writelines(alt_list[i+1:]) alt_info.close()
26.847222
91
0.602431
940
7,732
4.856383
0.269149
0.009858
0.015772
0.014896
0.24644
0.169332
0.146988
0.146988
0.126835
0.126835
0
0.004897
0.313373
7,732
287
92
26.940767
0.854963
0.390843
0
0.265487
0
0
0.072748
0
0
0
0
0
0
1
0.070796
false
0.017699
0.132743
0
0.318584
0.044248
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07b56e4319a20d96b921852eb21d1c92a5f276de
4,672
py
Python
dist-packages/reportlab/pdfgen/pathobject.py
Jianwei-Wang/python2.7_lib
911b8e81512e5ac5f13e669ab46f7693ed897378
[ "PSF-2.0" ]
51
2015-01-20T19:50:34.000Z
2022-03-05T21:23:32.000Z
dist-packages/reportlab/pdfgen/pathobject.py
Jianwei-Wang/python2.7_lib
911b8e81512e5ac5f13e669ab46f7693ed897378
[ "PSF-2.0" ]
16
2015-11-15T04:23:43.000Z
2021-09-27T14:14:20.000Z
src/reportlab/pdfgen/pathobject.py
guildenstern70/pyfab
34d786fec17192591ac3c5f73913a9b04311695a
[ "MIT" ]
46
2015-03-28T10:18:14.000Z
2021-12-16T15:57:47.000Z
#Copyright ReportLab Europe Ltd. 2000-2012 #see license.txt for license details #history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/pdfgen/pathobject.py __version__=''' $Id$ ''' __doc__=""" PDFPathObject is an efficient way to draw paths on a Canvas. Do not instantiate directly, obtain one from the Canvas instead. Progress Reports: 8.83, 2000-01-13, gmcm: created from pdfgen.py """ from reportlab.pdfgen import pdfgeom from reportlab.lib.rl_accel import fp_str class PDFPathObject: """Represents a graphic path. There are certain 'modes' to PDF drawing, and making a separate object to expose Path operations ensures they are completed with no run-time overhead. Ask the Canvas for a PDFPath with getNewPathObject(); moveto/lineto/ curveto wherever you want; add whole shapes; and then add it back into the canvas with one of the relevant operators. Path objects are probably not long, so we pack onto one line the code argument allows a canvas to get the operatiosn appended directly so avoiding the final getCode """ def __init__(self,code=None): self._code = (code,[])[code is None] self._code_append = self._init_code_append def _init_code_append(self,c): assert c.endswith(' m') or c.endswith(' re'), 'path must start with a moveto or rect' code_append = self._code.append code_append('n') code_append(c) self._code_append = code_append def getCode(self): "pack onto one line; used internally" return ' '.join(self._code) def moveTo(self, x, y): self._code_append('%s m' % fp_str(x,y)) def lineTo(self, x, y): self._code_append('%s l' % fp_str(x,y)) def curveTo(self, x1, y1, x2, y2, x3, y3): self._code_append('%s c' % fp_str(x1, y1, x2, y2, x3, y3)) def arc(self, x1,y1, x2,y2, startAng=0, extent=90): """Contributed to piddlePDF by Robert Kern, 28/7/99. Draw a partial ellipse inscribed within the rectangle x1,y1,x2,y2, starting at startAng degrees and covering extent degrees. Angles start with 0 to the right (+x) and increase counter-clockwise. These should have x1<x2 and y1<y2. The algorithm is an elliptical generalization of the formulae in Jim Fitzsimmon's TeX tutorial <URL: http://www.tinaja.com/bezarc1.pdf>.""" self._curves(pdfgeom.bezierArc(x1,y1, x2,y2, startAng, extent)) def arcTo(self, x1,y1, x2,y2, startAng=0, extent=90): """Like arc, but draws a line from the current point to the start if the start is not the current point.""" self._curves(pdfgeom.bezierArc(x1,y1, x2,y2, startAng, extent),'lineTo') def rect(self, x, y, width, height): """Adds a rectangle to the path""" self._code_append('%s re' % fp_str((x, y, width, height))) def ellipse(self, x, y, width, height): """adds an ellipse to the path""" self._curves(pdfgeom.bezierArc(x, y, x + width,y + height, 0, 360)) def _curves(self,curves,initial='moveTo'): getattr(self,initial)(*curves[0][:2]) for curve in curves: self.curveTo(*curve[2:]) def circle(self, x_cen, y_cen, r): """adds a circle to the path""" x1 = x_cen - r y1 = y_cen - r width = height = 2*r self.ellipse(x1, y1, width, height) def roundRect(self, x, y, width, height, radius): """Draws a rectangle with rounded corners. The corners are approximately quadrants of a circle, with the given radius.""" #use a precomputed set of factors for the bezier approximation #to a circle. There are six relevant points on the x axis and y axis. #sketch them and it should all make sense! t = 0.4472 * radius x0 = x x1 = x0 + t x2 = x0 + radius x3 = x0 + width - radius x4 = x0 + width - t x5 = x0 + width y0 = y y1 = y0 + t y2 = y0 + radius y3 = y0 + height - radius y4 = y0 + height - t y5 = y0 + height self.moveTo(x2, y0) self.lineTo(x3, y0) #bottom row self.curveTo(x4, y0, x5, y1, x5, y2) #bottom right self.lineTo(x5, y3) #right edge self.curveTo(x5, y4, x4, y5, x3, y5) #top right self.lineTo(x2, y5) #top row self.curveTo(x1, y5, x0, y4, x0, y3) #top left self.lineTo(x0, y2) #left edge self.curveTo(x0, y1, x1, y0, x2, y0) #bottom left self.close() def close(self): "draws a line back to where it started" self._code_append('h')
36.5
109
0.627354
710
4,672
4.053521
0.35493
0.048645
0.038916
0.019458
0.12196
0.091035
0.068103
0.053509
0.053509
0.033357
0
0.041036
0.264555
4,672
127
110
36.787402
0.796566
0.376498
0
0
0
0
0.12196
0
0
0
0
0
0.013889
1
0.194444
false
0
0.027778
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07b68a248a019f47e10cdf7898c67384df56c25f
1,447
py
Python
Madlibs/madlibs.py
nikhil-amin/python-mini-project
cd70a6a43408ce74cff501ce4d4658ab82260c2d
[ "MIT" ]
2
2021-08-20T08:59:28.000Z
2021-11-23T00:17:15.000Z
Madlibs/madlibs.py
nikhil-amin/python-mini-project
cd70a6a43408ce74cff501ce4d4658ab82260c2d
[ "MIT" ]
null
null
null
Madlibs/madlibs.py
nikhil-amin/python-mini-project
cd70a6a43408ce74cff501ce4d4658ab82260c2d
[ "MIT" ]
1
2022-01-06T16:35:43.000Z
2022-01-06T16:35:43.000Z
import random print("Title : Eat, Drink, And Be Sick") noun = [] for i in range(4): n = input("Enter noun : ") noun.append(n) plural = [] for i in range(6): pn = input("Enter plural noun : ") plural.append(pn) adjective = [] for i in range(2): a = input("Enter adjective : ") adjective.append(a) adverb = input("Enter adverb : ") letter = input("Enter any letter : ") body_part = input("Enter any body part : ") print("An inspector from the Department of Health and ", random.choice(noun) , " Services paid a surprise visit to our " , random.choice(adjective) , " school cafeteria.") print("The lunch special, prepared by our " , random.choice(adjective) , "dietician, was spaghetti and " , random.choice(noun) , " balls with a choice of either a " , random.choice(noun) , " salad or French " , random.choice(plural) , ".") print("The inspector found the meat-" , random.choice(plural) , " to be overcooked and discovered a live " , random.choice(noun) , " in the fries,causing him to have a " + body_part + " ache.") print("In response, he threw up all over his " , random.choice(plural) , ".") print("In his report, the inspector " + adverb + " recommended that the school cafeteria serve only nutritious " , random.choice(plural) , " as well as low-calorie " , random.choice(plural) , " and that all of the saturated " , random.choice(plural) , " be eliminated.") print("He rated the cafeteria a " + letter + "-minus.")
60.291667
270
0.676572
209
1,447
4.674641
0.430622
0.14739
0.110542
0.033777
0
0
0
0
0
0
0
0.002536
0.182446
1,447
23
271
62.913043
0.823331
0
0
0
0
0
0.483068
0
0
0
0
0
0
1
0
false
0
0.043478
0
0.043478
0.304348
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07b6a4b93bbebd4e00153c0e368d7d75f4691489
7,337
py
Python
tests/zone_api_test/core/zone_manager_test.py
yfaway/zone-apis
4aa4120cb4a66812dac1d32e86e825bbafe652b8
[ "MIT" ]
1
2021-11-22T11:51:17.000Z
2021-11-22T11:51:17.000Z
tests/zone_api_test/core/zone_manager_test.py
yfaway/zone-apis
4aa4120cb4a66812dac1d32e86e825bbafe652b8
[ "MIT" ]
20
2021-01-31T00:20:34.000Z
2022-01-09T18:02:31.000Z
tests/zone_api_test/core/zone_manager_test.py
yfaway/zone-apis
4aa4120cb4a66812dac1d32e86e825bbafe652b8
[ "MIT" ]
null
null
null
from zone_api.core.zone_manager import ZoneManager from zone_api import platform_encapsulator as pe from zone_api.core.zone import Zone from zone_api.core.zone_event import ZoneEvent from zone_api.core.devices.dimmer import Dimmer from zone_api.core.devices.switch import Fan, Light, Switch from zone_api.core.devices.illuminance_sensor import IlluminanceSensor from zone_api.core.devices.motion_sensor import MotionSensor from zone_api.core.actions.turn_on_switch import TurnOnSwitch from zone_api_test.core.device_test import DeviceTest ILLUMINANCE_THRESHOLD_IN_LUX = 8 INVALID_ITEM_NAME = 'invalid item name' class ZoneManagerTest(DeviceTest): """ Unit tests for zone_manager.py. """ def setUp(self): items = [pe.create_switch_item('TestLightName'), pe.create_switch_item('TestMotionSensorName'), pe.create_number_item('IlluminanceSensorName'), pe.create_string_item('AstroSensorName'), pe.create_dimmer_item('TestDimmerName'), pe.create_switch_item('TestFanName'), ] self.set_items(items) super(ZoneManagerTest, self).setUp() [self.lightItem, self.motionSensorItem, self.illuminanceSensorItem, self.astroSensorItem, self.dimmerItem, self.fanItem] = items self.illuminanceSensor = IlluminanceSensor(self.illuminanceSensorItem) self.light = Light(self.lightItem, 2, ILLUMINANCE_THRESHOLD_IN_LUX) self.motionSensor = MotionSensor(self.motionSensorItem) self.dimmer = Dimmer(self.dimmerItem, 2, 100, "0-23:59") self.fan = Fan(self.fanItem, 2) self.zm = ZoneManager() def tearDown(self): self.zm.stop_auto_report_watch_dog() self.fan._cancel_timer() self.dimmer._cancel_timer() self.light._cancel_timer() super(ZoneManagerTest, self).tearDown() def testAddZone_validZone_zoneAdded(self): zone1 = Zone('ff') self.zm.add_zone(zone1) self.assertEqual(1, len(self.zm.get_zones())) zone2 = Zone('2f') self.zm.add_zone(zone2) self.assertEqual(2, len(self.zm.get_zones())) def testGetZoneById_validZoneId_returnValidZone(self): zone1 = Zone('ff') self.zm.add_zone(zone1) zone2 = Zone('2f') self.zm.add_zone(zone2) self.assertEqual(zone1.get_name(), self.zm.get_zone_by_id(zone1.get_id()).get_name()) self.assertEqual(zone2.get_name(), self.zm.get_zone_by_id(zone2.get_id()).get_name()) def testGetZoneById_invalidZoneId_returnNone(self): self.assertTrue(self.zm.get_zone_by_id('invalid zone id') is None) def testRemoveZone_validZone_zoneRemoved(self): zone1 = Zone('ff') self.zm.add_zone(zone1) zone2 = Zone('2f') self.zm.add_zone(zone2) self.assertEqual(2, len(self.zm.get_zones())) self.zm.remove_zone(zone1) self.assertEqual(1, len(self.zm.get_zones())) self.zm.remove_zone(zone2) self.assertEqual(0, len(self.zm.get_zones())) def testContainingZone_validDevice_returnsCorrectZone(self): zone1 = Zone('ff').add_device(self.light) zone2 = Zone('sf').add_device(self.fan) self.zm.add_zone(zone1) self.zm.add_zone(zone2) self.assertEqual(zone1, self.zm.get_immutable_instance().get_containing_zone(self.light)) self.assertEqual(zone2, self.zm.get_immutable_instance().get_containing_zone(self.fan)) def testContainingZone_invalidDevice_returnsNone(self): zone1 = Zone('ff').add_device(self.light) self.zm.add_zone(zone1) self.assertEqual(None, self.zm.get_immutable_instance().get_containing_zone(self.fan)) def testGetDevicesByType_variousScenarios_returnsCorrectList(self): zone1 = Zone('ff').add_device(self.light) zone2 = Zone('sf').add_device(self.fan) self.zm.add_zone(zone1) self.zm.add_zone(zone2) self.assertEqual(2, len(self.zm.get_zones())) self.assertEqual(1, len(self.zm.get_devices_by_type(Light))) self.assertEqual(2, len(self.zm.get_devices_by_type(Switch))) self.assertEqual(0, len(self.zm.get_devices_by_type(Dimmer))) def testOnMotionSensorTurnedOn_noZone_returnsFalse(self): self.assertFalse(self.zm.get_immutable_instance().dispatch_event( ZoneEvent.MOTION, pe.get_event_dispatcher(), self.motionSensor, pe.create_string_item(INVALID_ITEM_NAME))) def testOnMotionSensorTurnedOn_withNonApplicableZone_returnsFalse(self): zone = Zone('ff', [self.light, self.motionSensor]) self.zm.add_zone(zone) self.assertFalse(self.zm.get_immutable_instance().dispatch_event( ZoneEvent.MOTION, pe.get_event_dispatcher(), self.motionSensor, pe.create_string_item(INVALID_ITEM_NAME))) def testOnMotionSensorTurnedOn_withApplicableZone_returnsTrue(self): self.assertFalse(self.light.is_on()) pe.set_number_value(self.illuminanceSensorItem, ILLUMINANCE_THRESHOLD_IN_LUX - 1) zone = Zone('ff', [self.light, self.motionSensor, self.illuminanceSensor]) zone = zone.add_action(TurnOnSwitch()) self.zm.add_zone(zone) self.assertTrue(self.zm.get_immutable_instance().dispatch_event( ZoneEvent.MOTION, pe.get_event_dispatcher(), self.motionSensor, self.motionSensor.get_item())) def testOnSwitchTurnedOn_noZone_returnsFalse(self): self.assertFalse(self.zm.get_immutable_instance().on_switch_turned_on( pe.get_event_dispatcher(), self.light, pe.create_string_item(INVALID_ITEM_NAME))) def testOnSwitchTurnedOn_withNonApplicableZone_returnsFalse(self): zone = Zone('ff', [self.light, self.motionSensor]) self.zm.add_zone(zone) self.assertFalse(self.zm.get_immutable_instance().on_switch_turned_on( pe.get_event_dispatcher(), self.light, pe.create_string_item(INVALID_ITEM_NAME))) def testOnSwitchTurnedOn_withApplicableZone_returnsTrue(self): zone = Zone('ff', [self.light, self.motionSensor]) self.zm.add_zone(zone) self.assertTrue(self.zm.get_immutable_instance().on_switch_turned_on( pe.get_event_dispatcher(), self.light, self.light.get_item())) def testOnSwitchTurnedOff_noZone_returnsFalse(self): self.assertFalse(self.zm.get_immutable_instance().on_switch_turned_off( pe.get_event_dispatcher(), self.light, pe.create_string_item(INVALID_ITEM_NAME))) def testOnSwitchTurnedOff_withNonApplicableZone_returnsFalse(self): zone = Zone('ff', [self.light, self.motionSensor]) self.zm.add_zone(zone) self.assertFalse(self.zm.get_immutable_instance().on_switch_turned_off( pe.get_event_dispatcher(), self.light, pe.create_string_item(INVALID_ITEM_NAME))) def testOnSwitchTurnedOff_withApplicableZone_returnsTrue(self): zone = Zone('ff', [self.light, self.motionSensor]) self.zm.add_zone(zone) self.assertTrue(self.zm.get_immutable_instance().on_switch_turned_off( pe.get_event_dispatcher(), self.light, self.light.get_item()))
40.535912
118
0.694698
892
7,337
5.445067
0.147982
0.05559
0.044472
0.045501
0.617048
0.584311
0.578546
0.541487
0.514721
0.495162
0
0.00932
0.19572
7,337
180
119
40.761111
0.81376
0.004225
0
0.409091
0
0
0.022883
0.002878
0
0
0
0
0.189394
1
0.136364
false
0
0.075758
0
0.219697
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07b88bc36babff3a11858ef34a69a78ca7bd4caf
37,841
py
Python
src/streamlink_cli/main.py
melmorabity/streamlink
24c59a23103922977991acc28741a323d8efa7a1
[ "BSD-2-Clause" ]
null
null
null
src/streamlink_cli/main.py
melmorabity/streamlink
24c59a23103922977991acc28741a323d8efa7a1
[ "BSD-2-Clause" ]
null
null
null
src/streamlink_cli/main.py
melmorabity/streamlink
24c59a23103922977991acc28741a323d8efa7a1
[ "BSD-2-Clause" ]
null
null
null
import argparse import errno import logging import os import platform import signal import sys from collections import OrderedDict from contextlib import closing from distutils.version import StrictVersion from functools import partial from gettext import gettext from itertools import chain from pathlib import Path from time import sleep from typing import List import requests from socks import __version__ as socks_version from websocket import __version__ as websocket_version import streamlink.logger as logger from streamlink import NoPluginError, PluginError, StreamError, Streamlink, __version__ as streamlink_version from streamlink.cache import Cache from streamlink.exceptions import FatalPluginError from streamlink.plugin import Plugin, PluginOptions from streamlink.stream import StreamIO, StreamProcess from streamlink.utils.named_pipe import NamedPipe from streamlink_cli.argparser import build_parser from streamlink_cli.compat import DeprecatedPath, is_win32, stdout from streamlink_cli.console import ConsoleOutput, ConsoleUserInputRequester from streamlink_cli.constants import CONFIG_FILES, DEFAULT_STREAM_METADATA, LOG_DIR, PLUGIN_DIRS, STREAM_SYNONYMS from streamlink_cli.output import FileOutput, Output, PlayerOutput from streamlink_cli.utils import Formatter, HTTPServer, datetime, ignored, progress, stream_to_url ACCEPTABLE_ERRNO = (errno.EPIPE, errno.EINVAL, errno.ECONNRESET) try: ACCEPTABLE_ERRNO += (errno.WSAECONNABORTED,) except AttributeError: pass # Not windows QUIET_OPTIONS = ("json", "stream_url", "subprocess_cmdline", "quiet") args = None console: ConsoleOutput = None output: Output = None plugin: Plugin = None stream_fd: StreamIO = None streamlink: Streamlink = None log = logging.getLogger("streamlink.cli") def get_formatter(plugin: Plugin): return Formatter( { "url": lambda: args.url, "author": lambda: plugin.get_author(), "category": lambda: plugin.get_category(), "game": lambda: plugin.get_category(), "title": lambda: plugin.get_title(), "time": lambda: datetime.now() }, { "time": lambda dt, fmt: dt.strftime(fmt) } ) def check_file_output(filename, force): """Checks if file already exists and ask the user if it should be overwritten if it does.""" log.debug("Checking file output") if os.path.isfile(filename) and not force: if sys.stdin.isatty(): answer = console.ask(f"File {filename} already exists! Overwrite it? [y/N] ") if answer.lower() != "y": sys.exit() else: log.error(f"File {filename} already exists, use --force to overwrite it.") sys.exit() return FileOutput(filename) def create_output(formatter: Formatter): """Decides where to write the stream. Depending on arguments it can be one of these: - The stdout pipe - A subprocess' stdin pipe - A named pipe that the subprocess reads from - A regular file """ if (args.output or args.stdout) and (args.record or args.record_and_pipe): console.exit("Cannot use record options with other file output options.") if args.output: if args.output == "-": out = FileOutput(fd=stdout) else: out = check_file_output(formatter.filename(args.output, args.fs_safe_rules), args.force) elif args.stdout: out = FileOutput(fd=stdout) elif args.record_and_pipe: record = check_file_output(formatter.filename(args.record_and_pipe, args.fs_safe_rules), args.force) out = FileOutput(fd=stdout, record=record) else: http = namedpipe = record = None if not args.player: console.exit("The default player (VLC) does not seem to be " "installed. You must specify the path to a player " "executable with --player.") if args.player_fifo: try: namedpipe = NamedPipe() except OSError as err: console.exit(f"Failed to create pipe: {err}") elif args.player_http: http = create_http_server() if args.record: record = check_file_output(formatter.filename(args.record, args.fs_safe_rules), args.force) log.info(f"Starting player: {args.player}") out = PlayerOutput( args.player, args=args.player_args, quiet=not args.verbose_player, kill=not args.player_no_close, namedpipe=namedpipe, http=http, record=record, title=formatter.title(args.title, defaults=DEFAULT_STREAM_METADATA) if args.title else args.url ) return out def create_http_server(*_args, **_kwargs): """Creates a HTTP server listening on a given host and port. If host is empty, listen on all available interfaces, and if port is 0, listen on a random high port. """ try: http = HTTPServer() http.bind(*_args, **_kwargs) except OSError as err: console.exit(f"Failed to create HTTP server: {err}") return http def iter_http_requests(server, player): """Repeatedly accept HTTP connections on a server. Forever if the serving externally, or while a player is running if it is not empty. """ while not player or player.running: try: yield server.open(timeout=2.5) except OSError: continue def output_stream_http(plugin, initial_streams, formatter: Formatter, external=False, port=0): """Continuously output the stream over HTTP.""" global output if not external: if not args.player: console.exit("The default player (VLC) does not seem to be " "installed. You must specify the path to a player " "executable with --player.") server = create_http_server() player = output = PlayerOutput( args.player, args=args.player_args, filename=server.url, quiet=not args.verbose_player, title=formatter.title(args.title, defaults=DEFAULT_STREAM_METADATA) if args.title else args.url ) try: log.info(f"Starting player: {args.player}") if player: player.open() except OSError as err: console.exit(f"Failed to start player: {args.player} ({err})") else: server = create_http_server(host=None, port=port) player = None log.info("Starting server, access with one of:") for url in server.urls: log.info(" " + url) for req in iter_http_requests(server, player): user_agent = req.headers.get("User-Agent") or "unknown player" log.info(f"Got HTTP request from {user_agent}") stream_fd = prebuffer = None while not stream_fd and (not player or player.running): try: streams = initial_streams or fetch_streams(plugin) initial_streams = None for stream_name in (resolve_stream_name(streams, s) for s in args.stream): if stream_name in streams: stream = streams[stream_name] break else: log.info("Stream not available, will re-fetch streams in 10 sec") sleep(10) continue except PluginError as err: log.error(f"Unable to fetch new streams: {err}") continue try: log.info(f"Opening stream: {stream_name} ({type(stream).shortname()})") stream_fd, prebuffer = open_stream(stream) except StreamError as err: log.error(err) if stream_fd and prebuffer: log.debug("Writing stream to player") read_stream(stream_fd, server, prebuffer, formatter) server.close(True) player.close() server.close() def output_stream_passthrough(stream, formatter: Formatter): """Prepares a filename to be passed to the player.""" global output filename = f'"{stream_to_url(stream)}"' output = PlayerOutput( args.player, args=args.player_args, filename=filename, call=True, quiet=not args.verbose_player, title=formatter.title(args.title, defaults=DEFAULT_STREAM_METADATA) if args.title else args.url ) try: log.info(f"Starting player: {args.player}") output.open() except OSError as err: console.exit(f"Failed to start player: {args.player} ({err})") return False return True def open_stream(stream): """Opens a stream and reads 8192 bytes from it. This is useful to check if a stream actually has data before opening the output. """ global stream_fd # Attempts to open the stream try: stream_fd = stream.open() except StreamError as err: raise StreamError(f"Could not open stream: {err}") # Read 8192 bytes before proceeding to check for errors. # This is to avoid opening the output unnecessarily. try: log.debug("Pre-buffering 8192 bytes") prebuffer = stream_fd.read(8192) except OSError as err: stream_fd.close() raise StreamError(f"Failed to read data from stream: {err}") if not prebuffer: stream_fd.close() raise StreamError("No data returned from stream") return stream_fd, prebuffer def output_stream(stream, formatter: Formatter): """Open stream, create output and finally write the stream to output.""" global output success_open = False for i in range(args.retry_open): try: stream_fd, prebuffer = open_stream(stream) success_open = True break except StreamError as err: log.error(f"Try {i + 1}/{args.retry_open}: Could not open stream {stream} ({err})") if not success_open: console.exit(f"Could not open stream {stream}, tried {args.retry_open} times, exiting") output = create_output(formatter) try: output.open() except OSError as err: if isinstance(output, PlayerOutput): console.exit(f"Failed to start player: {args.player} ({err})") else: console.exit(f"Failed to open output: {output.filename} ({err})") with closing(output): log.debug("Writing stream to output") read_stream(stream_fd, output, prebuffer, formatter) return True def read_stream(stream, output, prebuffer, formatter: Formatter, chunk_size=8192): """Reads data from stream and then writes it to the output.""" is_player = isinstance(output, PlayerOutput) is_http = isinstance(output, HTTPServer) is_fifo = is_player and output.namedpipe show_progress = ( isinstance(output, FileOutput) and output.fd is not stdout and (sys.stdout.isatty() or args.force_progress) ) show_record_progress = ( hasattr(output, "record") and isinstance(output.record, FileOutput) and output.record.fd is not stdout and (sys.stdout.isatty() or args.force_progress) ) stream_iterator = chain( [prebuffer], iter(partial(stream.read, chunk_size), b"") ) if show_progress: stream_iterator = progress( stream_iterator, prefix=os.path.basename(output.filename) ) elif show_record_progress: stream_iterator = progress( stream_iterator, prefix=os.path.basename(output.record.filename) ) try: for data in stream_iterator: # We need to check if the player process still exists when # using named pipes on Windows since the named pipe is not # automatically closed by the player. if is_win32 and is_fifo: output.player.poll() if output.player.returncode is not None: log.info("Player closed") break try: output.write(data) except OSError as err: if is_player and err.errno in ACCEPTABLE_ERRNO: log.info("Player closed") elif is_http and err.errno in ACCEPTABLE_ERRNO: log.info("HTTP connection closed") else: console.exit(f"Error when writing to output: {err}, exiting") break except OSError as err: console.exit(f"Error when reading from stream: {err}, exiting") finally: stream.close() log.info("Stream ended") def handle_stream(plugin, streams, stream_name): """Decides what to do with the selected stream. Depending on arguments it can be one of these: - Output internal command-line - Output JSON represenation - Continuously output the stream over HTTP - Output stream data to selected output """ stream_name = resolve_stream_name(streams, stream_name) stream = streams[stream_name] # Print internal command-line if this stream # uses a subprocess. if args.subprocess_cmdline: if isinstance(stream, StreamProcess): try: cmdline = stream.cmdline() except StreamError as err: console.exit(err) console.msg(cmdline) else: console.exit("The stream specified cannot be translated to a command") # Print JSON representation of the stream elif args.json: console.msg_json( stream, metadata=plugin.get_metadata() ) elif args.stream_url: try: console.msg(stream.to_url()) except TypeError: console.exit("The stream specified cannot be translated to a URL") # Output the stream else: # Find any streams with a '_alt' suffix and attempt # to use these in case the main stream is not usable. alt_streams = list(filter(lambda k: stream_name + "_alt" in k, sorted(streams.keys()))) file_output = args.output or args.stdout formatter = get_formatter(plugin) for stream_name in [stream_name] + alt_streams: stream = streams[stream_name] stream_type = type(stream).shortname() if stream_type in args.player_passthrough and not file_output: log.info(f"Opening stream: {stream_name} ({stream_type})") success = output_stream_passthrough(stream, formatter) elif args.player_external_http: return output_stream_http(plugin, streams, formatter, external=True, port=args.player_external_http_port) elif args.player_continuous_http and not file_output: return output_stream_http(plugin, streams, formatter) else: log.info(f"Opening stream: {stream_name} ({stream_type})") success = output_stream(stream, formatter) if success: break def fetch_streams(plugin): """Fetches streams using correct parameters.""" return plugin.streams(stream_types=args.stream_types, sorting_excludes=args.stream_sorting_excludes) def fetch_streams_with_retry(plugin, interval, count): """Attempts to fetch streams repeatedly until some are returned or limit hit.""" try: streams = fetch_streams(plugin) except PluginError as err: log.error(err) streams = None if not streams: log.info(f"Waiting for streams, retrying every {interval} second(s)") attempts = 0 while not streams: sleep(interval) try: streams = fetch_streams(plugin) except FatalPluginError: raise except PluginError as err: log.error(err) if count > 0: attempts += 1 if attempts >= count: break return streams def resolve_stream_name(streams, stream_name): """Returns the real stream name of a synonym.""" if stream_name in STREAM_SYNONYMS and stream_name in streams: for name, stream in streams.items(): if stream is streams[stream_name] and name not in STREAM_SYNONYMS: return name return stream_name def format_valid_streams(plugin, streams): """Formats a dict of streams. Filters out synonyms and displays them next to the stream they point to. Streams are sorted according to their quality (based on plugin.stream_weight). """ delimiter = ", " validstreams = [] for name, stream in sorted(streams.items(), key=lambda stream: plugin.stream_weight(stream[0])): if name in STREAM_SYNONYMS: continue def synonymfilter(n): return stream is streams[n] and n is not name synonyms = list(filter(synonymfilter, streams.keys())) if len(synonyms) > 0: joined = delimiter.join(synonyms) name = f"{name} ({joined})" validstreams.append(name) return delimiter.join(validstreams) def handle_url(): """The URL handler. Attempts to resolve the URL to a plugin and then attempts to fetch a list of available streams. Proceeds to handle stream if user specified a valid one, otherwise output list of valid streams. """ try: plugin = streamlink.resolve_url(args.url) setup_plugin_options(streamlink, plugin) log.info(f"Found matching plugin {plugin.module} for URL {args.url}") if args.retry_max or args.retry_streams: retry_streams = 1 retry_max = 0 if args.retry_streams: retry_streams = args.retry_streams if args.retry_max: retry_max = args.retry_max streams = fetch_streams_with_retry(plugin, retry_streams, retry_max) else: streams = fetch_streams(plugin) except NoPluginError: console.exit(f"No plugin can handle URL: {args.url}") except PluginError as err: console.exit(err) if not streams: console.exit(f"No playable streams found on this URL: {args.url}") if args.default_stream and not args.stream and not args.json: args.stream = args.default_stream if args.stream: validstreams = format_valid_streams(plugin, streams) for stream_name in args.stream: if stream_name in streams: log.info(f"Available streams: {validstreams}") handle_stream(plugin, streams, stream_name) return err = f"The specified stream(s) '{', '.join(args.stream)}' could not be found" if args.json: console.msg_json( plugin=plugin.module, metadata=plugin.get_metadata(), streams=streams, error=err ) else: console.exit(f"{err}.\n Available streams: {validstreams}") elif args.json: console.msg_json( plugin=plugin.module, metadata=plugin.get_metadata(), streams=streams ) elif args.stream_url: try: console.msg(streams[list(streams)[-1]].to_manifest_url()) except TypeError: console.exit("The stream specified cannot be translated to a URL") else: validstreams = format_valid_streams(plugin, streams) console.msg(f"Available streams: {validstreams}") def print_plugins(): """Outputs a list of all plugins Streamlink has loaded.""" pluginlist = list(streamlink.get_plugins().keys()) pluginlist_formatted = ", ".join(sorted(pluginlist)) if args.json: console.msg_json(pluginlist) else: console.msg(f"Loaded plugins: {pluginlist_formatted}") def load_plugins(dirs: List[Path], showwarning: bool = True): """Attempts to load plugins from a list of directories.""" for directory in dirs: if directory.is_dir(): success = streamlink.load_plugins(str(directory)) if success and type(directory) is DeprecatedPath: log.info(f"Loaded plugins from deprecated path, see CLI docs for how to migrate: {directory}") elif showwarning: log.warning(f"Plugin path {directory} does not exist or is not a directory!") def setup_args(parser: argparse.ArgumentParser, config_files: List[Path] = None, ignore_unknown: bool = False): """Parses arguments.""" global args arglist = sys.argv[1:] # Load arguments from config files configs = [f"@{config_file}" for config_file in config_files or []] args, unknown = parser.parse_known_args(configs + arglist) if unknown and not ignore_unknown: msg = gettext("unrecognized arguments: %s") parser.error(msg % " ".join(unknown)) # Force lowercase to allow case-insensitive lookup if args.stream: args.stream = [stream.lower() for stream in args.stream] if not args.url and args.url_param: args.url = args.url_param def setup_config_args(parser, ignore_unknown=False): config_files = [] if args.config: # We want the config specified last to get highest priority for config_file in map(lambda path: Path(path).expanduser(), reversed(args.config)): if config_file.is_file(): config_files.append(config_file) else: # Only load first available default config for config_file in filter(lambda path: path.is_file(), CONFIG_FILES): if type(config_file) is DeprecatedPath: log.info(f"Loaded config from deprecated path, see CLI docs for how to migrate: {config_file}") config_files.append(config_file) break if streamlink and args.url: # Only load first available plugin config with ignored(NoPluginError): plugin = streamlink.resolve_url(args.url) for config_file in CONFIG_FILES: config_file = config_file.with_name(f"{config_file.name}.{plugin.module}") if not config_file.is_file(): continue if type(config_file) is DeprecatedPath: log.info(f"Loaded plugin config from deprecated path, see CLI docs for how to migrate: {config_file}") config_files.append(config_file) break if config_files: setup_args(parser, config_files, ignore_unknown=ignore_unknown) def setup_signals(): # Handle SIGTERM just like SIGINT signal.signal(signal.SIGTERM, signal.default_int_handler) def setup_http_session(): """Sets the global HTTP settings, such as proxy and headers.""" if args.http_proxy: streamlink.set_option("http-proxy", args.http_proxy) if args.https_proxy: streamlink.set_option("https-proxy", args.https_proxy) if args.http_cookie: streamlink.set_option("http-cookies", dict(args.http_cookie)) if args.http_header: streamlink.set_option("http-headers", dict(args.http_header)) if args.http_query_param: streamlink.set_option("http-query-params", dict(args.http_query_param)) if args.http_ignore_env: streamlink.set_option("http-trust-env", False) if args.http_no_ssl_verify: streamlink.set_option("http-ssl-verify", False) if args.http_disable_dh: streamlink.set_option("http-disable-dh", True) if args.http_ssl_cert: streamlink.set_option("http-ssl-cert", args.http_ssl_cert) if args.http_ssl_cert_crt_key: streamlink.set_option("http-ssl-cert", tuple(args.http_ssl_cert_crt_key)) if args.http_timeout: streamlink.set_option("http-timeout", args.http_timeout) def setup_plugins(extra_plugin_dir=None): """Loads any additional plugins.""" load_plugins(PLUGIN_DIRS, showwarning=False) if extra_plugin_dir: load_plugins([Path(path).expanduser() for path in extra_plugin_dir]) def setup_streamlink(): """Creates the Streamlink session.""" global streamlink streamlink = Streamlink({"user-input-requester": ConsoleUserInputRequester(console)}) def setup_options(): """Sets Streamlink options.""" if args.interface: streamlink.set_option("interface", args.interface) if args.ipv4: streamlink.set_option("ipv4", args.ipv4) if args.ipv6: streamlink.set_option("ipv6", args.ipv6) if args.ringbuffer_size: streamlink.set_option("ringbuffer-size", args.ringbuffer_size) if args.mux_subtitles: streamlink.set_option("mux-subtitles", args.mux_subtitles) if args.hds_live_edge: streamlink.set_option("hds-live-edge", args.hds_live_edge) if args.hls_live_edge: streamlink.set_option("hls-live-edge", args.hls_live_edge) if args.hls_playlist_reload_attempts: streamlink.set_option("hls-playlist-reload-attempts", args.hls_playlist_reload_attempts) if args.hls_playlist_reload_time: streamlink.set_option("hls-playlist-reload-time", args.hls_playlist_reload_time) if args.hls_segment_ignore_names: streamlink.set_option("hls-segment-ignore-names", args.hls_segment_ignore_names) if args.hls_segment_key_uri: streamlink.set_option("hls-segment-key-uri", args.hls_segment_key_uri) if args.hls_audio_select: streamlink.set_option("hls-audio-select", args.hls_audio_select) if args.hls_start_offset: streamlink.set_option("hls-start-offset", args.hls_start_offset) if args.hls_duration: streamlink.set_option("hls-duration", args.hls_duration) if args.hls_live_restart: streamlink.set_option("hls-live-restart", args.hls_live_restart) if args.rtmp_rtmpdump: streamlink.set_option("rtmp-rtmpdump", args.rtmp_rtmpdump) elif args.rtmpdump: streamlink.set_option("rtmp-rtmpdump", args.rtmpdump) if args.rtmp_proxy: streamlink.set_option("rtmp-proxy", args.rtmp_proxy) # deprecated if args.hds_segment_attempts: streamlink.set_option("hds-segment-attempts", args.hds_segment_attempts) if args.hds_segment_threads: streamlink.set_option("hds-segment-threads", args.hds_segment_threads) if args.hds_segment_timeout: streamlink.set_option("hds-segment-timeout", args.hds_segment_timeout) if args.hds_timeout: streamlink.set_option("hds-timeout", args.hds_timeout) if args.hls_segment_attempts: streamlink.set_option("hls-segment-attempts", args.hls_segment_attempts) if args.hls_segment_threads: streamlink.set_option("hls-segment-threads", args.hls_segment_threads) if args.hls_segment_timeout: streamlink.set_option("hls-segment-timeout", args.hls_segment_timeout) if args.hls_timeout: streamlink.set_option("hls-timeout", args.hls_timeout) if args.http_stream_timeout: streamlink.set_option("http-stream-timeout", args.http_stream_timeout) if args.rtmp_timeout: streamlink.set_option("rtmp-timeout", args.rtmp_timeout) # generic stream- arguments take precedence over deprecated stream-type arguments if args.stream_segment_attempts: streamlink.set_option("stream-segment-attempts", args.stream_segment_attempts) if args.stream_segment_threads: streamlink.set_option("stream-segment-threads", args.stream_segment_threads) if args.stream_segment_timeout: streamlink.set_option("stream-segment-timeout", args.stream_segment_timeout) if args.stream_timeout: streamlink.set_option("stream-timeout", args.stream_timeout) if args.ffmpeg_ffmpeg: streamlink.set_option("ffmpeg-ffmpeg", args.ffmpeg_ffmpeg) if args.ffmpeg_verbose: streamlink.set_option("ffmpeg-verbose", args.ffmpeg_verbose) if args.ffmpeg_verbose_path: streamlink.set_option("ffmpeg-verbose-path", args.ffmpeg_verbose_path) if args.ffmpeg_fout: streamlink.set_option("ffmpeg-fout", args.ffmpeg_fout) if args.ffmpeg_video_transcode: streamlink.set_option("ffmpeg-video-transcode", args.ffmpeg_video_transcode) if args.ffmpeg_audio_transcode: streamlink.set_option("ffmpeg-audio-transcode", args.ffmpeg_audio_transcode) if args.ffmpeg_copyts: streamlink.set_option("ffmpeg-copyts", args.ffmpeg_copyts) if args.ffmpeg_start_at_zero: streamlink.set_option("ffmpeg-start-at-zero", args.ffmpeg_start_at_zero) streamlink.set_option("subprocess-errorlog", args.subprocess_errorlog) streamlink.set_option("subprocess-errorlog-path", args.subprocess_errorlog_path) streamlink.set_option("locale", args.locale) def setup_plugin_args(session, parser): """Sets Streamlink plugin options.""" plugin_args = parser.add_argument_group("Plugin options") for pname, plugin in session.plugins.items(): defaults = {} group = plugin_args.add_argument_group(pname.capitalize()) for parg in plugin.arguments: if not parg.is_global: group.add_argument(parg.argument_name(pname), **parg.options) defaults[parg.dest] = parg.default else: pargdest = parg.dest for action in parser._actions: # find matching global argument if pargdest != action.dest: continue defaults[pargdest] = action.default # add plugin to global argument plugins = getattr(action, "plugins", []) plugins.append(pname) setattr(action, "plugins", plugins) plugin.options = PluginOptions(defaults) def setup_plugin_options(session, plugin): """Sets Streamlink plugin options.""" pname = plugin.module required = OrderedDict({}) for parg in plugin.arguments: if parg.options.get("help") == argparse.SUPPRESS: continue value = getattr(args, parg.dest if parg.is_global else parg.namespace_dest(pname)) session.set_plugin_option(pname, parg.dest, value) if not parg.is_global: if parg.required: required[parg.name] = parg # if the value is set, check to see if any of the required arguments are not set if parg.required or value: try: for rparg in plugin.arguments.requires(parg.name): required[rparg.name] = rparg except RuntimeError: log.error(f"{pname} plugin has a configuration error and the arguments cannot be parsed") break if required: for req in required.values(): if not session.get_plugin_option(pname, req.dest): prompt = f"{req.prompt or f'Enter {pname} {req.name}'}: " session.set_plugin_option( pname, req.dest, console.askpass(prompt) if req.sensitive else console.ask(prompt) ) def log_root_warning(): if hasattr(os, "getuid"): if os.geteuid() == 0: log.info("streamlink is running as root! Be careful!") def log_current_versions(): """Show current installed versions""" if not logger.root.isEnabledFor(logging.DEBUG): return # macOS if sys.platform == "darwin": os_version = f"macOS {platform.mac_ver()[0]}" # Windows elif sys.platform == "win32": os_version = f"{platform.system()} {platform.release()}" # Linux / other else: os_version = platform.platform() log.debug(f"OS: {os_version}") log.debug(f"Python: {platform.python_version()}") log.debug(f"Streamlink: {streamlink_version}") log.debug(f"Requests({requests.__version__}), " f"Socks({socks_version}), " f"Websocket({websocket_version})") def log_current_arguments(session, parser): global args if not logger.root.isEnabledFor(logging.DEBUG): return sensitive = set() for pname, plugin in session.plugins.items(): for parg in plugin.arguments: if parg.sensitive: sensitive.add(parg.argument_name(pname)) log.debug("Arguments:") for action in parser._actions: if not hasattr(args, action.dest): continue value = getattr(args, action.dest) if action.default != value: name = next( # pragma: no branch (option for option in action.option_strings if option.startswith("--")), action.option_strings[0] ) if action.option_strings else action.dest log.debug(f" {name}={value if name not in sensitive else '*' * 8}") def check_version(force=False): cache = Cache(filename="cli.json") latest_version = cache.get("latest_version") if force or not latest_version: res = requests.get("https://pypi.python.org/pypi/streamlink/json") data = res.json() latest_version = data.get("info").get("version") cache.set("latest_version", latest_version, (60 * 60 * 24)) version_info_printed = cache.get("version_info_printed") if not force and version_info_printed: return installed_version = StrictVersion(streamlink.version) latest_version = StrictVersion(latest_version) if latest_version > installed_version: log.info(f"A new version of Streamlink ({latest_version}) is available!") cache.set("version_info_printed", True, (60 * 60 * 6)) elif force: log.info(f"Your Streamlink version ({installed_version}) is up to date!") if force: sys.exit() def setup_logger_and_console(stream=sys.stdout, filename=None, level="info", json=False): global console if filename == "-": filename = LOG_DIR / f"{datetime.now()}.log" elif filename: filename = Path(filename).expanduser().resolve() if filename: filename.parent.mkdir(parents=True, exist_ok=True) streamhandler = logger.basicConfig( stream=stream, filename=filename, level=level, style="{", format=("[{asctime}]" if level == "trace" else "") + "[{name}][{levelname}] {message}", datefmt="%H:%M:%S" + (".%f" if level == "trace" else "") ) console = ConsoleOutput(streamhandler.stream, json) def main(): error_code = 0 parser = build_parser() setup_args(parser, ignore_unknown=True) # call argument set up as early as possible to load args from config files setup_config_args(parser, ignore_unknown=True) # Console output should be on stderr if we are outputting # a stream to stdout. if args.stdout or args.output == "-" or args.record_and_pipe: console_out = sys.stderr else: console_out = sys.stdout # We don't want log output when we are printing JSON or a command-line. silent_log = any(getattr(args, attr) for attr in QUIET_OPTIONS) log_level = args.loglevel if not silent_log else "none" log_file = args.logfile if log_level != "none" else None setup_logger_and_console(console_out, log_file, log_level, args.json) setup_signals() setup_streamlink() # load additional plugins setup_plugins(args.plugin_dirs) setup_plugin_args(streamlink, parser) # call setup args again once the plugin specific args have been added setup_args(parser) setup_config_args(parser) # update the logging level if changed by a plugin specific config log_level = args.loglevel if not silent_log else "none" logger.root.setLevel(log_level) setup_http_session() log_root_warning() log_current_versions() log_current_arguments(streamlink, parser) if args.version_check or args.auto_version_check: with ignored(Exception): check_version(force=args.version_check) if args.plugins: print_plugins() elif args.can_handle_url: try: streamlink.resolve_url(args.can_handle_url) except NoPluginError: error_code = 1 except KeyboardInterrupt: error_code = 130 elif args.can_handle_url_no_redirect: try: streamlink.resolve_url_no_redirect(args.can_handle_url_no_redirect) except NoPluginError: error_code = 1 except KeyboardInterrupt: error_code = 130 elif args.url: try: setup_options() handle_url() except KeyboardInterrupt: # Close output if output: output.close() console.msg("Interrupted! Exiting...") error_code = 130 finally: if stream_fd: try: log.info("Closing currently open stream...") stream_fd.close() except KeyboardInterrupt: error_code = 130 elif args.help: parser.print_help() else: usage = parser.format_usage() console.msg( f"{usage}\n" f"Use -h/--help to see the available options or read the manual at https://streamlink.github.io" ) sys.exit(error_code) def parser_helper(): session = Streamlink() parser = build_parser() setup_plugin_args(session, parser) return parser
33.999102
122
0.640258
4,624
37,841
5.084343
0.132137
0.01812
0.043641
0.012165
0.292812
0.197618
0.141557
0.115781
0.098681
0.094088
0
0.002896
0.269866
37,841
1,112
123
34.029676
0.848022
0.09273
0
0.264743
0
0.001255
0.13367
0.015871
0
0
0
0
0
1
0.043915
false
0.006274
0.040151
0.002509
0.110414
0.007528
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07b8d02790ae07e8540167a649afc0cab32e02e9
16,807
py
Python
dbaccesslibUserMailInfo.py
Koushik-ks/FlaskAPP
6f1bd98450bc8f33c3896aa7ec690c51dc414d19
[ "MIT" ]
null
null
null
dbaccesslibUserMailInfo.py
Koushik-ks/FlaskAPP
6f1bd98450bc8f33c3896aa7ec690c51dc414d19
[ "MIT" ]
null
null
null
dbaccesslibUserMailInfo.py
Koushik-ks/FlaskAPP
6f1bd98450bc8f33c3896aa7ec690c51dc414d19
[ "MIT" ]
1
2019-11-08T06:49:57.000Z
2019-11-08T06:49:57.000Z
from io import BytesIO from io import StringIO import json from bson.dbref import DBRef import datetime from bson import json_util import logging import base64 jsonCode ={ "building":{ "Essae Vaishnavi Solitaire": { "id": "B1", "division": { "SS": { "id": "D1", "dept":{ "Semicon":{ "id":"DEP1", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } }, "RND":{ "id":"DEP2", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } }, "Mobile":{ "id":"DEP3", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } } } }, "TTEC": { "id": "D2", "dept":{ "TTEC-AL":{ "id":"DEP1", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } }, "TTEC-SL":{ "id":"DEP2", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } }, "TTEC-DL":{ "id":"DEP3", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } }, "TTEC-CI":{ "id":"DEP4", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } } } } } }, "Fortune Summit": { "id": "B2", "division": { "TMSC": { "id": "D1", "dept":{ "Medical":{ "id":"DEP1", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } }, "RND":{ "id":"DEP2", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } }, "Imaging":{ "id":"DEP3", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } } } }, "tmc": { "id": "D2", "dept":{ "tmc-1":{ "id":"DEP1", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } }, "tmc-2":{ "id":"DEP2", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } }, "tmc-3":{ "id":"DEP3", "floor":{"0":"0", "1":"1", "2":"2", "3":"3", "4":"4", "5":"5", "6":"6" } } } } } } } } #Create and configure logger logging.basicConfig(filename="server.log", format='%(asctime)s %(message)s', filemode='a') #Creating an object logger=logging.getLogger() #Setting the threshold of logger to DEBUG logger.setLevel(logging.DEBUG) import pymongo uri = "mongodb://218ffa09-0ee0-4-231-b9ee:zTV4cwDG0vM49J2GFsw72JzwOD79Bv3dPU8fbVLb5pbh3p0CmTBYcvhrFKTjtl1s7hgYSfRbMOrsVve6hfvhag==@218ffa09-0ee0-4-231-b9ee.documents.azure.com:10255/?ssl=true&replicaSet=globaldb" client = pymongo.MongoClient(uri) print("Obtained the client") mydb = client.test def sortingReq(item): new_thrash_date = datetime.datetime.strptime(item["scan_date"], '%d-%m-%Y').date() return new_thrash_date def checkIfAutoThrashed(jsonData,tags): if(len(tags) < 3): return False a = mydb.userInfo.find_one({"name":jsonData["name"]}) newDbref = DBRef("mydb.userInfo",a["_id"]) foundMails = mydb.mltable.find({"otherdbref":newDbref,"status":"trash"}) foundMailsList = list(mydb.mltable.find({"otherdbref":newDbref,"status":"trash"})) if(len(foundMailsList) < 10): return False tagcount = 0 thrashcount = 0 for item in foundMails: for tag in tags: if(tag in item["tags"]): tagcount+=1 if(tagcount >= 3): thrashcount+=1 if(thrashcount >=10): return True return False def generateqrcode(jsonData,filenameJPG,tags,fromMFP): logger.debug("Received data for generating color code = ") logger.debug(jsonData) ilocation=1 today = datetime.datetime.now() date = str(today.day) time = str(today.hour) + ":" + str(today.minute) + ":" + str(today.second)+":"+str(today.microsecond) dateTimeNow = date+':'+time logger.debug("Current Datetime - "+dateTimeNow) dateTimeNow = str(today.day)+str(today.hour)+str(today.minute)+str(today.second)+(str(today.microsecond)[:2]) logger.debug("Unique Code - "+dateTimeNow) if(int(jsonData["cubicle"])>25 and int(jsonData["cubicle"])<=50): ilocation=2 elif(int(jsonData["cubicle"])>50 and int(jsonData["cubicle"])<=75): ilocation=3 else: ilocation=4 logger.debug(jsonData["building"]) colorCode=jsonCode["building"][jsonData["building"]]["id"]+':'+jsonCode["building"][jsonData["building"]]["division"][jsonData["division"]]["id"]+':'+dateTimeNow logger.debug("ColorCode - "+colorCode) logger.debug("generateColorCode:: ColorCode value ="+colorCode) import qrcode img = qrcode.make(colorCode) logger.debug(type(img)) autoThrashed = checkIfAutoThrashed(jsonData,tags) logger.debug("Auto thrashed value is %d" % autoThrashed) logger.debug("Tags are %s" % tags) import sendEmail as se se.execute(str(jsonData["email"]),filenameJPG,str(colorCode),img,autoThrashed,fromMFP) #img = qrcode.make(colorCode) #img.save(colorCode+".png") newjsonData = {"name":jsonData["name"],"code":colorCode,"email":jsonData["email"],"division":jsonData["division"],"department":jsonData["department"],"floor":jsonData["floor"],"cubicle":jsonData["cubicle"],"building":jsonData["building"]} if(fromMFP): newjsonData["source"] = "MFP" else: newjsonData["source"] = "Mobile" return addEntry(newjsonData,tags,autoThrashed); def addEntry(jsonData,tags,autoThrashed): a = mydb.userInfo.find_one({"name":jsonData["name"]}) newDbref = DBRef("mydb.userInfo",a["_id"]) scan_date = datetime.datetime.today() scan_date = scan_date + datetime.timedelta(hours=9) end_date = scan_date + datetime.timedelta(days=10) scan_date = str(scan_date.day) +"-"+ str(scan_date.month)+"-" + str(scan_date.year) end_date = str(end_date.day) +"-" +str(end_date.month)+"-" + str(end_date.year) if(autoThrashed): end_date = scan_date if( not autoThrashed and len(tags) >= 3): #mydb.mltable.insert({"code":jsonData["code"],"tags": tags,"status":"Keep","user_id":1,"otherdbref":newDbref}) Actual Code mydb.mltable.insert({"code":jsonData["code"],"tags": tags,"status":"Keep","user_id":1,"otherdbref":newDbref})#Test code to be removed #end_date = scan_date mydb.userMailInfo.insert({"code":jsonData["code"],"scan_date":scan_date,"end_date":end_date,"otherdbref":newDbref,"userDeleted":False,"user_id":1,"source":jsonData["source"]}) jsonData["autoThrashed"] = autoThrashed return json.dumps(jsonData) def read_fromDB(): new_list = list() for item in mydb.userMailInfo.find({},{"_id":0,"user_id":0}): print(item) otherdbref = item["otherdbref"] newjson = mydb.userInfo.find_one({"_id":otherdbref.id},{"_id":0,"user_id":0}) dall = {} item.pop("otherdbref") dall.update(item) dall.update(newjson) print(dall) new_list.append(dall) new_list.reverse() return json.dumps(new_list,default=json_util.default) def getspecificDate(jsonData): logger.debug(jsonData) num = int(jsonData['page']) skips = 10 * (num - 1) if(jsonData["action"] == "all"): all_list = list(mydb.userMailInfo.find({"userDeleted":False},{'_id' : 0,'user_id':0})) all_list.reverse() totalsize = len(all_list) all_list = all_list[skips:] all_list = all_list[:10] new_list_new = list() for item in all_list: otherdbref = item["otherdbref"] newjson = mydb.userInfo.find_one({"_id":otherdbref.id},{"_id":0,"user_id":0}) dall = {} item.pop("otherdbref") dall.update(item) dall.update(newjson) print(dall) new_list_new.append(dall) new_list_new.append({"totalsize":totalsize}) logger.debug(new_list_new) #new_list_new.sort(key = lambda x : x["name"]) return json.dumps(new_list_new, default=json_util.default) elif(jsonData["action"] == "today"): all_list = list(mydb.userMailInfo.find({"userDeleted":False},{'_id' : 0,'user_id':0})) thrash_date = datetime.datetime.today() thrash_date = thrash_date + datetime.timedelta(hours=9) thrash_date = str(thrash_date.day) + "-" +str(thrash_date.month)+"-" + str(thrash_date.year) thrash_date = datetime.datetime.strptime(thrash_date, '%d-%m-%Y').date() new_list = list() for item in all_list: if(item['end_date'] == "DONT TRASH"): continue db_date = datetime.datetime.strptime(item['end_date'],'%d-%m-%Y').date() if(db_date <= thrash_date): new_list.append(item) new_list.reverse() totalsize = len(new_list) new_list = new_list[skips:] new_list = new_list[:10] new_list_new = list() for item in new_list: otherdbref = item["otherdbref"] newjson = mydb.userInfo.find_one({"_id":otherdbref.id},{"_id":0,"user_id":0}) dall = {} item.pop("otherdbref") dall.update(item) dall.update(newjson) print(dall) new_list_new.append(dall) new_list_new.append({"totalsize":totalsize}) logger.debug(new_list_new) #new_list_new.sort(key = lambda x : x["name"]) return json.dumps(new_list_new, default=json_util.default) else: all_list = list(mydb.userMailInfo.find({"userDeleted":False},{'_id' : 0,'user_id':0})) thrash_date = datetime.datetime.today() thrash_date = thrash_date + datetime.timedelta(hours=9) thrash_date = str(thrash_date.day) + "-" +str(thrash_date.month)+"-" + str(thrash_date.year) thrash_date = datetime.datetime.strptime(thrash_date, '%d-%m-%Y').date() new_list = list() for item in all_list: db_date = datetime.datetime.strptime(item['scan_date'],'%d-%m-%Y').date() if(db_date == thrash_date): new_list.append(item) new_list.reverse() totalsize = len(new_list) new_list = new_list[skips:] new_list = new_list[:10] new_list_new = list() for item in new_list: otherdbref = item["otherdbref"] newjson = mydb.userInfo.find_one({"_id":otherdbref.id},{"_id":0,"user_id":0}) dall = {} item.pop("otherdbref") dall.update(item) dall.update(newjson) print(dall) new_list_new.append(dall) new_list_new.append({"totalsize":totalsize}) logger.debug(new_list_new) return json.dumps(new_list_new, default=json_util.default) def update_DB(jsonData): logger.debug("DBUMI::Update_db() entry") logger.debug(jsonData["code"]) logger.debug(jsonData["end_date"]) foundmail = mydb.userMailInfo.find_one({"code":jsonData["code"]},{"_id":1}) logger.debug(foundmail) foundMl = mydb.mltable.find_one({"code":jsonData["code"]},{"_id":1}) logger.debug(foundMl) mydb.userMailInfo.update_many({"_id":foundmail["_id"],"user_id":1},{"$set":{'end_date':str(jsonData['end_date'])}}) if(not jsonData['end_date'] == "DONT TRASH"): mydb.mltable.update_many({"_id":foundMl["_id"],"user_id":1},{"$set":{"status":"trash"}}) return json.dumps({"status": "Success","statusreason": "updateSucess"}) #Clear DB only for testing def delete_entry(jsonData): logger.debug("DBUMI::delete_entry() entry") logger.debug(jsonData["code"]) mydb.userMailInfo.delete_one({"code":jsonData["code"],"user_id":1}) return json.dumps({"status": "Success","statusreason": "updateSucess"}) def clear_db(): mydb.userMailInfo.remove({})
42.335013
242
0.404712
1,484
16,807
4.46159
0.161725
0.044404
0.034738
0.015708
0.497659
0.4599
0.450989
0.421991
0.421991
0.406434
0
0.033891
0.448742
16,807
396
243
42.441919
0.680734
0.025287
0
0.534031
0
0.002618
0.120151
0.013744
0
0
0
0
0
1
0.02356
false
0
0.028796
0
0.086387
0.015707
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07b8f248ebba127a7ba553594c485e647090e69f
3,489
py
Python
src/test/dags/bq_to_cm_dag_test.py
google/cc4d
206543832368f96bac7f55c0de93c96e32127779
[ "Apache-2.0" ]
null
null
null
src/test/dags/bq_to_cm_dag_test.py
google/cc4d
206543832368f96bac7f55c0de93c96e32127779
[ "Apache-2.0" ]
null
null
null
src/test/dags/bq_to_cm_dag_test.py
google/cc4d
206543832368f96bac7f55c0de93c96e32127779
[ "Apache-2.0" ]
null
null
null
# python3 # coding=utf-8 # Copyright 2020 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for dags.bq_to_cm_dag.""" import unittest from airflow.contrib.hooks import bigquery_hook from airflow.models import baseoperator from airflow.models import dag from airflow.models import variable import mock from gps_building_blocks.cloud.utils import cloud_auth from dags import bq_to_cm_dag from plugins.pipeline_plugins.hooks import monitoring_hook _DAG_NAME = bq_to_cm_dag._DAG_NAME AIRFLOW_VARIABLES = { 'dag_name': _DAG_NAME, f'{_DAG_NAME}_schedule': '@once', f'{_DAG_NAME}_retries': 0, f'{_DAG_NAME}_retry_delay': 3, f'{_DAG_NAME}_is_retry': True, f'{_DAG_NAME}_is_run': True, f'{_DAG_NAME}_enable_run_report': False, f'{_DAG_NAME}_enable_monitoring': True, f'{_DAG_NAME}_enable_monitoring_cleanup': False, 'monitoring_data_days_to_live': 50, 'monitoring_dataset': 'test_monitoring_dataset', 'monitoring_table': 'test_monitoring_table', 'monitoring_bq_conn_id': 'test_monitoring_conn', 'bq_dataset_id': 'test_dataset', 'bq_table_id': 'test_table', 'cm_profile_id': 'cm_profile_id', 'cm_service_account': 'cm_service_account' } class BQToCMDAGTest(unittest.TestCase): def setUp(self): super(BQToCMDAGTest, self).setUp() self.addCleanup(mock.patch.stopall) self.build_impersonated_client_mock = mock.patch.object( cloud_auth, 'build_impersonated_client', autospec=True) self.build_impersonated_client_mock.return_value = mock.Mock() self.build_impersonated_client_mock.start() self.mock_variable = mock.patch.object( variable, 'Variable', autospec=True).start() # `side_effect` is assigned to `lambda` to dynamically return values # each time when self.mock_variable is called. self.mock_variable.get.side_effect = ( lambda key, value: AIRFLOW_VARIABLES[key]) self.original_bigquery_hook_init = bigquery_hook.BigQueryHook.__init__ bigquery_hook.BigQueryHook.__init__ = mock.MagicMock() self.original_monitoring_hook = monitoring_hook.MonitoringHook monitoring_hook.MonitoringHook = mock.MagicMock() def tearDown(self): super().tearDown() bigquery_hook.BigQueryHook.__init__ = self.original_bigquery_hook_init monitoring_hook.MonitoringHook = self.original_monitoring_hook def test_create_dag(self): """Tests that returned DAG contains correct DAG and tasks.""" expected_task_ids = ['bq_to_cm_retry_task', 'bq_to_cm_task'] test_dag = bq_to_cm_dag.BigQueryToCMDag( AIRFLOW_VARIABLES['dag_name']).create_dag() self.assertIsInstance(test_dag, dag.DAG) self.assertEqual(len(test_dag.tasks), len(expected_task_ids)) for task in test_dag.tasks: self.assertIsInstance(task, baseoperator.BaseOperator) actual_task_ids = [t.task_id for t in test_dag.tasks] self.assertListEqual(actual_task_ids, expected_task_ids) if __name__ == '__main__': unittest.main()
35.602041
74
0.755804
484
3,489
5.097107
0.359504
0.036887
0.025942
0.014593
0.126064
0
0
0
0
0
0
0.004711
0.14818
3,489
97
75
35.969072
0.82537
0.219834
0
0
0
0
0.201855
0.08757
0
0
0
0
0.065574
1
0.04918
false
0
0.147541
0
0.213115
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07b971c0c6fde3e7ed6901642724f170bca1dffb
10,781
py
Python
talentmap_api/common/management/commands/load_xml.py
burgwyn/State-TalentMAP-API
1f4f3659c5743ebfd558cd87af381f5460f284b3
[ "CC0-1.0" ]
5
2018-08-09T18:51:12.000Z
2021-11-08T10:28:17.000Z
talentmap_api/common/management/commands/load_xml.py
burgwyn/State-TalentMAP-API
1f4f3659c5743ebfd558cd87af381f5460f284b3
[ "CC0-1.0" ]
232
2017-06-16T02:09:54.000Z
2018-05-10T16:15:48.000Z
talentmap_api/common/management/commands/load_xml.py
burgwyn/State-TalentMAP-API
1f4f3659c5743ebfd558cd87af381f5460f284b3
[ "CC0-1.0" ]
4
2018-06-13T14:49:27.000Z
2021-06-30T22:29:15.000Z
from django.core.management.base import BaseCommand import logging import re from talentmap_api.common.xml_helpers import XMLloader, strip_extra_spaces, parse_boolean, parse_date, get_nested_tag from talentmap_api.language.models import Language, Proficiency from talentmap_api.position.models import Grade, Skill, Position, CapsuleDescription, SkillCone from talentmap_api.organization.models import Organization, Post, TourOfDuty, Location, Country class Command(BaseCommand): help = 'Loads an XML into a supported file' logger = logging.getLogger(__name__) def __init__(self, *args, **kwargs): super(Command, self).__init__(*args, **kwargs) self.modes = { 'languages': mode_languages, 'proficiencies': mode_proficiencies, 'grades': mode_grades, 'skills': mode_skills, 'organizations': mode_organizations, 'positions': mode_positions, 'tours_of_duty': mode_tour_of_duty, 'posts': mode_post, 'countries': mode_country, 'locations': mode_location, 'capsule_descriptions': mode_capsule_description, 'skill_cone': mode_skill_cone } def add_arguments(self, parser): parser.add_argument('file', nargs=1, type=str, help="The XML file to load") parser.add_argument('type', nargs=1, type=str, choices=self.modes.keys(), help="The type of data in the XML") parser.add_argument('--delete', dest='delete', action='store_true', help='Delete collisions') parser.add_argument('--update', dest='update', action='store_true', help='Update collisions') parser.add_argument('--skippost', dest='skip_post', action='store_true', help='Skip post load functions') def handle(self, *args, **options): model, instance_tag, tag_map, collision_field, post_load_function = self.modes[options['type'][0]]() # Set / update the collision behavior collision_behavior = None if options['delete']: collision_behavior = "delete" elif options['update']: collision_behavior = "update" else: collision_behavior = "skip" loader = XMLloader(model, instance_tag, tag_map, collision_behavior, collision_field) new_ids, updated_ids = loader.create_models_from_xml(options['file'][0]) # Run the post load function, if it exists if callable(post_load_function) and not options['skip_post']: post_load_function(new_ids, updated_ids) self.logger.info(f"XML Load Report\n\tNew: {len(new_ids)}\n\tUpdated: {len(updated_ids)}\t\t") def mode_languages(): model = Language instance_tag = "LANGUAGES:LANGUAGE" collision_field = "code" tag_map = { "LANGUAGES:LANG_CODE": "code", "LANGUAGES:LANG_LONG_DESC": "long_description", "LANGUAGES:LANG_SHORT_DESC": "short_description", "LANGUAGES:LANG_EFFECTIVE_DATE": parse_date("effective_date") } return (model, instance_tag, tag_map, collision_field, None) def mode_proficiencies(): model = Proficiency instance_tag = "LANGUAGE_PROFICIENCY:LANGUAGE_PROFICIENCY" collision_field = "code" tag_map = { "LANGUAGE_PROFICIENCY:LP_CODE": "code", "LANGUAGE_PROFICIENCY:LP_DESC": "description" } return (model, instance_tag, tag_map, collision_field, None) def mode_grades(): model = Grade instance_tag = "GRADES:GRADE" collision_field = "code" tag_map = { "GRADES:GRD_GRADE_CODE": "code" } def post_load_function(new_ids, updated_ids): for pos in Grade.objects.filter(id__in=new_ids + updated_ids): pos.update_relationships() return (model, instance_tag, tag_map, collision_field, post_load_function) def mode_skills(): model = Skill instance_tag = "SKILLS:SKILL" collision_field = "code" tag_map = { "SKILLS:SKILL_CODE": "code", "SKILLS:SKILL_DESCRIPTION": "description" } return (model, instance_tag, tag_map, collision_field, None) def mode_organizations(): model = Organization instance_tag = "DATA_RECORD" collision_field = "code" tag_map = { "ORG_CODE": "code", "ORG_SHORT_DESC": "short_description", "ORG_LONG_DESC": strip_extra_spaces("long_description"), "ORG_PARENT_ORG_CODE": "_parent_organization_code", "ORG_BUREAU_ORG_CODE": "_parent_bureau_code", "ORG_LOCATION_CODE": "_location_code" } # Update relationships def post_load_function(new_ids, updated_ids): for org in Organization.objects.filter(id__in=new_ids + updated_ids): org.update_relationships() # Regional code setting is done automatically by DOS Webservices, so # we now only need this logic when loading from our sample XML files # Array of regional codes regional_codes = [ "110000", "120000", "130000", "140000", "146000", "150000", "160000" ] if org.code in regional_codes: org.is_regional = True else: org.is_regional = False if org.code == org._parent_bureau_code: org.is_bureau = True org.save() return (model, instance_tag, tag_map, collision_field, post_load_function) def mode_positions(): model = Position instance_tag = "POSITIONS:POSITION" collision_field = "_seq_num" tag_map = { "POSITIONS:POS_SEQ_NUM": "_seq_num", "POSITIONS:POS_NUM_TEXT": "position_number", "POSITIONS:POS_TITLE_CODE": "_title_code", "POSITIONS:POS_TITLE_DESC": "title", "POSITIONS:POS_ORG_CODE": "_org_code", "POSITIONS:POS_BUREAU_CODE": "_bureau_code", "POSITIONS:POS_SKILL_CODE": "_skill_code", "POSITIONS:POS_STAFF_PTRN_SKILL_CODE": "_staff_ptrn_skill_code", "POSITIONS:POS_OVERSEAS_IND": parse_boolean("is_overseas", ['O']), "POSITIONS:POS_PAY_PLAN_CODE": "_pay_plan_code", "POSITIONS:POS_STATUS_CODE": "_status_code", "POSITIONS:POS_SERVICE_TYPE_CODE": "_service_type_code", "POSITIONS:POS_GRADE_CODE": "_grade_code", "POSITIONS:POS_POST_CODE": "_post_code", "POSITIONS:POS_LANGUAGE_1_CODE": "_language_1_code", "POSITIONS:POS_LANGUAGE_2_CODE": "_language_2_code", "POSITIONS:POS_LOCATION_CODE": "_location_code", "POSITIONS:POS_LANG_REQ_1_CODE": "_language_req_1_code", "POSITIONS:POS_LANG_REQ_2_CODE": "_language_req_2_code", "POSITIONS:POS_SPEAK_PROFICIENCY_1_CODE": "_language_1_spoken_proficiency_code", "POSITIONS:POS_READ_PROFICIENCY_1_CODE": "_language_1_reading_proficiency_code", "POSITIONS:POS_SPEAK_PROFICIENCY_2_CODE": "_language_2_spoken_proficiency_code", "POSITIONS:POS_READ_PROFICIENCY_2_CODE": "_language_2_reading_proficiency_code", "POSITIONS:POS_CREATE_ID": "_create_id", "POSITIONS:POS_CREATE_DATE": parse_date("create_date"), "POSITIONS:POS_UPDATE_ID": "_update_id", "POSITIONS:POS_UPDATE_DATE": parse_date("update_date"), "POSITIONS:POS_EFFECTIVE_DATE": parse_date("effective_date"), "POSITIONS:POS_JOBCODE_CODE": "_jobcode_code", "POSITIONS:POS_OCC_SERIES_CODE": "_occ_series_code", } def post_load_function(new_ids, updated_ids): for pos in Position.objects.filter(id__in=new_ids + updated_ids): pos.update_relationships() return (model, instance_tag, tag_map, collision_field, post_load_function) def mode_tour_of_duty(): model = TourOfDuty instance_tag = "TOUR_OF_DUTIES:TOUR_OF_DUTY" collision_field = "code" tag_map = { "TOUR_OF_DUTIES:TOD_CODE": "code", "TOUR_OF_DUTIES:TOD_SHORT_DESC": "short_description", "TOUR_OF_DUTIES:TOD_DESC_TEXT": lambda instance, item: setattr(instance, "long_description", re.sub('&amp;', '&', item.text).strip()), "TOUR_OF_DUTIES:TOD_MONTHS_NUM": "months" } return (model, instance_tag, tag_map, collision_field, None) def mode_post(): model = Post instance_tag = "BIDPOSTS:BIDDING_TOOL" collision_field = "_location_code" tag_map = { "BIDPOSTS:DSC_CD": "_location_code", "BIDPOSTS:TOD_CODE": "_tod_code", "BIDPOSTS:BT_COST_OF_LIVING_ADJUST_NUM": "cost_of_living_adjustment", "BIDPOSTS:BT_DIFFERENTIAL_RATE_NUM": "differential_rate", "BIDPOSTS:BT_REST_RELAXATION_POINT_TEXT": strip_extra_spaces("rest_relaxation_point"), "BIDPOSTS:BT_DANGER_PAY_NUM": "danger_pay", "BIDPOSTS:BT_CONSUMABLE_ALLOWANCE_FLG": parse_boolean("has_consumable_allowance"), "BIDPOSTS:BT_SERVICE_NEEDS_DIFF_FLG": parse_boolean("has_service_needs_differential"), } def post_load_function(new_ids, updated_ids): for loc in Post.objects.filter(id__in=new_ids + updated_ids): loc.update_relationships() return (model, instance_tag, tag_map, collision_field, post_load_function) def mode_country(): model = Country instance_tag = "DATA_RECORD" collision_field = "code" tag_map = { "COUNTRY_CODE": "code", "FULL_NAME": "name", "SHORT_NAME": "short_name", "COUNTRY_CODE_2": "short_code", "LOCATION_PREFIX": "location_prefix" } return (model, instance_tag, tag_map, collision_field, None) def mode_location(): model = Location instance_tag = "location" collision_field = "code" tag_map = { "code": "code", "city": strip_extra_spaces("city"), "state": strip_extra_spaces("state"), "country": "_country" } def post_load_function(new_ids, updated_ids): # Connect new locations to applicable posts for loc in Location.objects.filter(id__in=new_ids + updated_ids): Post.objects.filter(_location_code=loc.code).update(location=loc) return (model, instance_tag, tag_map, collision_field, post_load_function) def mode_capsule_description(): model = CapsuleDescription instance_tag = "position" collision_field = "_pos_seq_num" tag_map = { "POS_SEQ_NUM": "_pos_seq_num", "capsuleDescription": "content", } return (model, instance_tag, tag_map, collision_field, None) def mode_skill_cone(): model = SkillCone instance_tag = "jobCategorySkill" collision_field = None tag_map = { "id": "_id", "name": strip_extra_spaces("name"), "skill": get_nested_tag("_skill_codes", "code"), } return (model, instance_tag, tag_map, collision_field, None)
36.422297
142
0.66914
1,288
10,781
5.18323
0.190217
0.053925
0.047933
0.039844
0.281156
0.226932
0.211803
0.19263
0.172558
0.140803
0
0.007509
0.221779
10,781
295
143
36.545763
0.7882
0.027548
0
0.185345
0
0.00431
0.318633
0.16638
0
0
0
0
0
1
0.086207
false
0
0.030172
0
0.181034
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07b9b8864e1cb2b18a6326e38faad88d4012991a
2,581
py
Python
gluon/tests/test_recfile.py
oscarfonts/web2py
a18e0e489fe7a770c62fca510a4299886b0a9bb7
[ "BSD-3-Clause" ]
null
null
null
gluon/tests/test_recfile.py
oscarfonts/web2py
a18e0e489fe7a770c62fca510a4299886b0a9bb7
[ "BSD-3-Clause" ]
null
null
null
gluon/tests/test_recfile.py
oscarfonts/web2py
a18e0e489fe7a770c62fca510a4299886b0a9bb7
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Unit tests for gluon.recfile """ import unittest import os import shutil import uuid from .fix_path import fix_sys_path fix_sys_path(__file__) from gluon import recfile class TestRecfile(unittest.TestCase): def setUp(self): os.mkdir('tests') def tearDown(self): shutil.rmtree('tests') def test_generation(self): for k in range(10): teststring = 'test%s' % k filename = os.path.join('tests', str(uuid.uuid4()) + '.test') with recfile.open(filename, "w") as g: g.write(teststring) self.assertEqual(recfile.open(filename, "r").read(), teststring) is_there = recfile.exists(filename) self.assertTrue(is_there) recfile.remove(filename) is_there = recfile.exists(filename) self.assertFalse(is_there) for k in range(10): teststring = 'test%s' % k filename = str(uuid.uuid4()) + '.test' with recfile.open(filename, "w", path='tests') as g: g.write(teststring) self.assertEqual(recfile.open(filename, "r", path='tests').read(), teststring) is_there = recfile.exists(filename, path='tests') self.assertTrue(is_there) recfile.remove(filename, path='tests') is_there = recfile.exists(filename, path='tests') self.assertFalse(is_there) for k in range(10): teststring = 'test%s' % k filename = os.path.join('tests', str(uuid.uuid4()), str(uuid.uuid4()) + '.test') with recfile.open(filename, "w") as g: g.write(teststring) self.assertEqual(recfile.open(filename, "r").read(), teststring) is_there = recfile.exists(filename) self.assertTrue(is_there) recfile.remove(filename) is_there = recfile.exists(filename) self.assertFalse(is_there) def test_existing(self): filename = os.path.join('tests', str(uuid.uuid4()) + '.test') with open(filename, 'w') as g: g.write('this file exists') self.assertTrue(recfile.exists(filename)) self.assertTrue(hasattr(recfile.open(filename, "r"), 'read')) recfile.remove(filename, path='tests') self.assertFalse(recfile.exists(filename)) self.assertRaises(IOError, recfile.remove, filename) self.assertRaises(IOError, recfile.open, filename, "r") if __name__ == '__main__': unittest.main()
34.413333
92
0.593181
300
2,581
5
0.22
0.056
0.084
0.08
0.739333
0.613333
0.613333
0.564
0.512
0.485333
0
0.006386
0.271988
2,581
74
93
34.878378
0.791911
0.027509
0
0.534483
0
0
0.052062
0
0
0
0
0
0.241379
1
0.068966
false
0
0.103448
0
0.189655
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07ba0eee7b3f5dcbd7cf03e65b5f80e0ede665d0
1,022
py
Python
configLambdas.py
cfrome77/liquid-stats
7a4d751dea215c94b650beb154a90abce7e1592d
[ "MIT" ]
4
2020-06-21T14:43:40.000Z
2021-12-29T17:02:18.000Z
configLambdas.py
cfrome77/liquid-stats
7a4d751dea215c94b650beb154a90abce7e1592d
[ "MIT" ]
21
2018-09-18T18:42:53.000Z
2022-01-04T05:55:12.000Z
configLambdas.py
cfrome77/liquid-stats
7a4d751dea215c94b650beb154a90abce7e1592d
[ "MIT" ]
null
null
null
import json import os import subprocess from dotenv import load_dotenv from subprocess import check_output, Popen, PIPE load_dotenv() # Accessing variables. CLIENT_ID = os.environ.get('CLIENT_ID') CLIENT_SECRET = os.environ.get('CLIENT_SECRET') USERNAME = os.environ.get('USERNAME') BUCKET_NAME = os.environ.get('BUCKET_NAME') def get_lambda_functions(): function_dict = {} res = subprocess.Popen( ["aws", "lambda", "list-functions"], stdout=subprocess.PIPE ) output = res.communicate() function_dict.update(json.loads(output[0])) return function_dict['Functions'] lambda_functions = get_lambda_functions() for lambda_function in lambda_functions: function_name = lambda_function['FunctionName'] subprocess.run([ "aws", "lambda", "update-function-configuration", "--function-name", f"{function_name}", "--environment", f"Variables={{CLIENT_ID={CLIENT_ID},CLIENT_SECRET={CLIENT_SECRET},USERNAME={USERNAME},BUCKET_NAME={BUCKET_NAME}}}" ])
26.894737
122
0.714286
123
1,022
5.707317
0.341463
0.045584
0.068376
0.051282
0
0
0
0
0
0
0
0.001153
0.151663
1,022
37
123
27.621622
0.808535
0.019569
0
0
0
0
0.277
0.14
0
0
0
0
0
1
0.037037
false
0
0.185185
0
0.259259
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07bb317b5c0941f28a4da7f31413b20f4a8bda28
7,838
py
Python
yolo3/utils.py
gaxu/keras-yolo3
7f6be0fb9a8583401246bfe65d2df2ee40777d72
[ "MIT" ]
null
null
null
yolo3/utils.py
gaxu/keras-yolo3
7f6be0fb9a8583401246bfe65d2df2ee40777d72
[ "MIT" ]
null
null
null
yolo3/utils.py
gaxu/keras-yolo3
7f6be0fb9a8583401246bfe65d2df2ee40777d72
[ "MIT" ]
4
2021-02-25T08:21:15.000Z
2021-02-25T08:56:39.000Z
"""Miscellaneous utility functions.""" from functools import reduce from PIL import Image import numpy as np from matplotlib.colors import rgb_to_hsv, hsv_to_rgb def compose(*funcs): """Compose arbitrarily many functions, evaluated left to right. Reference: https://mathieularose.com/function-composition-in-python/ """ # return lambda x: reduce(lambda v, f: f(v), funcs, x) if funcs: return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs) else: raise ValueError('Composition of empty sequence not supported.') def letterbox_image(image, size): '''resize image with unchanged aspect ratio using padding''' iw, ih = image.size w, h = size scale = min(w/iw, h/ih) nw = int(iw*scale) nh = int(ih*scale) image = image.resize((nw,nh), Image.BICUBIC) new_image = Image.new('RGB', size, (128,128,128)) new_image.paste(image, ((w-nw)//2, (h-nh)//2)) return new_image def rand(a=0, b=1): return np.random.rand()*(b-a) + a def get_random_data(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True): '''random preprocessing for real-time data augmentation''' line = annotation_line.split() image = Image.open(line[0]) iw, ih = image.size h, w = input_shape box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]]) if not random: # resize image scale = min(w/iw, h/ih) nw = int(iw*scale) nh = int(ih*scale) dx = (w-nw)//2 dy = (h-nh)//2 image_data=0 if proc_img: image = image.resize((nw,nh), Image.BICUBIC) new_image = Image.new('RGB', (w,h), (128,128,128)) new_image.paste(image, (dx, dy)) image_data = np.array(new_image)/255. # correct boxes box_data = np.zeros((max_boxes,5)) if len(box)>0: np.random.shuffle(box) if len(box)>max_boxes: box = box[:max_boxes] box[:, [0,2]] = box[:, [0,2]]*scale + dx box[:, [1,3]] = box[:, [1,3]]*scale + dy box_data[:len(box)] = box return image_data, box_data # resize image new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter) scale = rand(.25, 2) if new_ar < 1: nh = int(scale*h) nw = int(nh*new_ar) else: nw = int(scale*w) nh = int(nw/new_ar) image = image.resize((nw,nh), Image.BICUBIC) # place image dx = int(rand(0, w-nw)) dy = int(rand(0, h-nh)) new_image = Image.new('RGB', (w,h), (128,128,128)) new_image.paste(image, (dx, dy)) image = new_image # flip image or not flip = rand()<.5 if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT) # distort image hue = rand(-hue, hue) sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat) val = rand(1, val) if rand()<.5 else 1/rand(1, val) x = rgb_to_hsv(np.array(image)/255.) x[..., 0] += hue x[..., 0][x[..., 0]>1] -= 1 x[..., 0][x[..., 0]<0] += 1 x[..., 1] *= sat x[..., 2] *= val x[x>1] = 1 x[x<0] = 0 image_data = hsv_to_rgb(x) # numpy array, 0 to 1 # correct boxes box_data = np.zeros((max_boxes,5)) if len(box)>0: np.random.shuffle(box) box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy if flip: box[:, [0,2]] = w - box[:, [2,0]] box[:, 0:2][box[:, 0:2]<0] = 0 box[:, 2][box[:, 2]>w] = w box[:, 3][box[:, 3]>h] = h box_w = box[:, 2] - box[:, 0] box_h = box[:, 3] - box[:, 1] box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box if len(box)>max_boxes: box = box[:max_boxes] box_data[:len(box)] = box return image_data, box_data def get_random_data2(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True): '''random preprocessing for real-time data augmentation''' line = annotation_line.split() image = Image.open(line[0]) w, h = image.size #13 14 dx, dy = input_shape box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]]) x_min = w x_max = 0 y_min = h y_max = 0 for bbox in box: x_min = min(x_min, bbox[0]) y_min = min(y_min, bbox[1]) x_max = max(x_max, bbox[2]) y_max = max(y_max, bbox[3]) name = bbox[4] # 包含所有目标框的最小框到各个边的距离 d_to_left = x_min d_to_right = w - x_max d_to_top = y_min d_to_bottom = h - y_max # 随机扩展这个最小范围 crop_x_min = int(x_min - rand(0, d_to_left)) crop_y_min = int(y_min - rand(0, d_to_top)) crop_x_max = int(x_max + rand(0, d_to_right)) crop_y_max = int(y_max + rand(0, d_to_bottom)) # 确保不出界 crop_x_min = max(0, crop_x_min) crop_y_min = max(0, crop_y_min) crop_x_max = min(w, crop_x_max) crop_y_max = min(h, crop_y_max) cropped = image.crop((crop_x_min, crop_y_min, crop_x_max, crop_y_max)) # (left, upper, right, lower) new_image = Image.new('RGB', (w,h), (128,128,128)) new_image.paste(cropped, (dx, dy)) image_data = np.array(new_image)/255. box_data = np.zeros((max_boxes,5)) if len(box)>0: np.random.shuffle(box) if len(box)>max_boxes: box = box[:max_boxes] box[:,0] = box[:,0]-crop_y_min box[:,1] = box[:,1]-crop_y_min box[:,2] = box[:,2]-crop_x_min box[:,3] = box[:,3]-crop_y_min box_data[:len(box)] = box return image_data, box_data def get_random_data2(annotation_line, input_shape, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True): line = annotation_line.split() img = cv2.imread(line[0]) h_img, w_img, _ = img.shape w, h = input_shape box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]]) max_bbox = np.concatenate([np.min(box[:, 0:2], axis=0), np.max(box[:, 2:4], axis=0)], axis=-1)# 取得所有bbox中的最大bbox #包含所有目標框的最大框到各個邊的距離 max_l_trans = max_bbox[0] max_u_trans = max_bbox[1] max_r_trans = w_img - max_bbox[2] max_d_trans = h_img - max_bbox[3] #隨機擴展框最大範圍 crop_xmin = max(0, int(max_bbox[0] - random.uniform(0, max_l_trans)*2)) crop_ymin = max(0, int(max_bbox[1] - random.uniform(0, max_u_trans)*2)) crop_xmax = max(w_img, int(max_bbox[2] + random.uniform(0, max_r_trans)*2)) crop_ymax = max(h_img, int(max_bbox[3] + random.uniform(0, max_d_trans)*2)) img = img[crop_ymin : crop_ymax, crop_xmin : crop_xmax] #進行裁剪 image = Image.fromarray(cv2.cvtColor(img,cv2.COLOR_BGR2RGB)) #因為目前圖片格式是cv2,因此要轉換為PIL格式做貼上的語法 new_image = Image.new('RGB', (w,h), (128,128,128)) #產出一個(416,416)的灰色圖片 new_image.paste(image, (0, 0)) #將轉為PIL格式的圖片 貼到灰色圖片中 img2 = cv2.cvtColor(np.asarray(new_image),cv2.COLOR_RGB2BGR) #再將格式轉回cv2 box_data = np.zeros((max_boxes,5)) #box最多有max_boxes個,即shap->(20,5) #將剪裁後位移的框與原始框進行相減,避免變換之後的值過大或過小,並去除異常的box if len(box)>0: np.random.shuffle(box) if len(box)>max_boxes: box = box[:max_boxes] box[:, [0, 2]] = box[:, [0, 2]] - crop_xmin box[:, [1, 3]] = box[:, [1, 3]] - crop_ymin box[:, 2][box[:, 2]>w] = w box[:, 3][box[:, 3]>h] = h box_w = box[:, 2] - box[:, 0] box_h = box[:, 3] - box[:, 1] box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box if len(box)>max_boxes: box = box[:max_boxes] box_data[:len(box)] = box #標框線 # light_blue = (255,200,100) # for boxs in box: # cv2.rectangle(img2,(boxs[0],boxs[1]),(boxs[2],boxs[3]),light_blue,2) # writename=os.path.basename(line[0]) #取檔名 # cv2.imshow('My Image', img2) # cv2.waitKey(0) return img2, box_data
32.522822
130
0.578719
1,309
7,838
3.296409
0.148969
0.016686
0.025492
0.032445
0.473233
0.448204
0.422711
0.390267
0.390267
0.377984
0
0.048054
0.245981
7,838
240
131
32.658333
0.682064
0.128477
0
0.379518
0
0
0.009168
0
0
0
0
0
0
1
0.036145
false
0
0.024096
0.006024
0.10241
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07bd8ff47d8f5245f9a21b92c33b949fce1127a8
8,482
py
Python
sportsreference/ncaaf/rankings.py
JosephDErwin/sportsreference
f026366bec91fdf4bebef48e3a4bfd7c5bfab4bd
[ "MIT" ]
null
null
null
sportsreference/ncaaf/rankings.py
JosephDErwin/sportsreference
f026366bec91fdf4bebef48e3a4bfd7c5bfab4bd
[ "MIT" ]
null
null
null
sportsreference/ncaaf/rankings.py
JosephDErwin/sportsreference
f026366bec91fdf4bebef48e3a4bfd7c5bfab4bd
[ "MIT" ]
1
2020-07-08T16:05:25.000Z
2020-07-08T16:05:25.000Z
import re from pyquery import PyQuery as pq from .. import utils from .constants import RANKINGS_SCHEME, RANKINGS_URL from six.moves.urllib.error import HTTPError class Rankings: """ Get all Associated Press (AP) rankings on a week-by-week basis. Grab a list of the rankings published by the Associated Press to easily query the hierarchy of teams each week. The results expose the current and previous rankings as well as the movement for each team in the list. Parameters ---------- year : string (optional) A string of the requested year to pull rankings from. Defaults to the most recent season. """ def __init__(self, year=None): self._rankings = {} self._find_rankings(year) def _pull_rankings_page(self, year): """ Download the rankings page. Download the rankings page for the requested year and create a PyQuery object. Parameters ---------- year : string A string of the requested year to pull rankings from. Returns ------- PyQuery object Returns a PyQuery object of the rankings HTML page. """ try: return pq(RANKINGS_URL % year) except HTTPError: return None def _get_team(self, team): """ Retrieve team's name and abbreviation. The team's name and abbreviation are embedded within the 'school_name' tag and, in the case of the abbreviation, require special parsing as it is located in the middle of a URI. The name and abbreviation are returned for the requested school. Parameters ---------- team : PyQuery object A PyQuery object representing a single row in a table on the rankings page. Returns ------- tuple (string, string) Returns a tuple of two strings where the first string is the team's abbreviation, such as 'PURDUE' and the second string is the team's name, such as 'Purdue'. """ name_tag = team('td[data-stat="school_name"]') abbreviation = re.sub(r'.*/cfb/schools/', '', str(name_tag('a'))) abbreviation = re.sub(r'/.*', '', abbreviation) name = team('td[data-stat="school_name"] a').text() return abbreviation, name def _find_rankings(self, year): """ Retrieve the rankings for each week. Find and retrieve all AP rankings for the requested year and combine them on a per-week basis. Each week contains information about the name, abbreviation, rank, movement, and previous rank for each team as well as the date and week number the results were published on. Parameters ---------- year : string A string of the requested year to pull rankings from. """ if not year: year = utils._find_year_for_season('ncaaf') page = self._pull_rankings_page(year) if not page: output = ("Can't pull rankings page. Ensure the following URL " "exists: %s" % RANKINGS_URL) raise ValueError(output) rankings = page('table#ap tbody tr').items() weekly_rankings = [] week = 0 for team in rankings: if 'class="thead"' in str(team): self._rankings[int(week)] = weekly_rankings weekly_rankings = [] continue abbreviation, name = self._get_team(team) rank = utils._parse_field(RANKINGS_SCHEME, team, 'rank') week = utils._parse_field(RANKINGS_SCHEME, team, 'week') date = utils._parse_field(RANKINGS_SCHEME, team, 'date') previous = utils._parse_field(RANKINGS_SCHEME, team, 'previous') change = utils._parse_field(RANKINGS_SCHEME, team, 'change') if 'decrease' in str(team(RANKINGS_SCHEME['change'])): change = int(change) * -1 elif 'increase' in str(team(RANKINGS_SCHEME['change'])): try: change = int(change) except ValueError: change = 0 else: change = 0 rank_details = { 'abbreviation': abbreviation, 'name': name, 'rank': int(rank), 'week': int(week), 'date': date, 'previous': previous, 'change': change } weekly_rankings.append(rank_details) # Add the final rankings which is not terminated with another header # row and hence will not hit the first if statement in the loop above. self._rankings[int(week)] = weekly_rankings @property def current_extended(self): """ Returns a ``list`` of ``dictionaries`` of the most recent AP rankings. The list is ordered in terms of the ranking so the #1 team will be in the first element and the #25 team will be the last element. Each dictionary has the following structure:: { 'abbreviation': Team's abbreviation, such as 'PURDUE' (str), 'name': Team's full name, such as 'Purdue' (str), 'rank': Team's rank for the current week (int), 'week': Week number for the results, such as 19 (int), 'date': Date the rankings were released, such as '2017-03-01'. Can also be 'Final' for the final rankings or 'Preseason' for preseason rankings (str), 'previous': The team's previous rank, if applicable (str), 'change': The amount the team moved up or down the rankings. Moves up the ladder have a positive number while drops yield a negative number and teams that didn't move have 0 (int) } """ latest_week = max(self._rankings.keys()) ordered_dict = sorted(self._rankings[latest_week], key=lambda k: k['rank']) return ordered_dict @property def current(self): """ Returns a ``dictionary`` of the most recent rankings from the Associated Press where each key is a ``string`` of the team's abbreviation and each value is an ``int`` of the team's rank for the current week. """ rankings_dict = {} for team in self.current_extended: rankings_dict[team['abbreviation']] = team['rank'] return rankings_dict @property def complete(self): """ Returns a ``dictionary`` where each key is a week number as an ``int`` and each value is a ``list`` of ``dictionaries`` containing the AP rankings for each week. Within each list is a dictionary of team information such as name, abbreviation, rank, and more. Note that the list might not necessarily be in the same order as the rankings. The overall dictionary has the following structure:: { week number, ie 16 (int): [ { 'abbreviation': Team's abbreviation, such as 'PURDUE' (str), 'name': Team's full name, such as 'Purdue' (str), 'rank': Team's rank for the current week (int), 'week': Week number for the results, such as 16 (int), 'date': Date the rankings were released, such as '2017-12-03'. Can also be 'Final' for the final rankings or 'Preseason' for preseason rankings (str), 'previous': The team's previous rank, if applicable (str), 'change': The amount the team moved up or down the rankings. Moves up the ladder have a positive number while drops yield a negative number and teams that didn't move have 0 (int) }, ... ], ... } """ return self._rankings
39.821596
79
0.549399
992
8,482
4.627016
0.225806
0.015251
0.0122
0.025054
0.359477
0.3122
0.232462
0.226797
0.226797
0.226797
0
0.005807
0.370667
8,482
212
80
40.009434
0.854065
0.534072
0
0.142857
0
0
0.092076
0.017324
0
0
0
0
0
1
0.090909
false
0
0.064935
0
0.246753
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07bd958d061b56e11538171ee991b3269000d33d
5,937
py
Python
ding/hpc_rl/wrapper.py
davide97l/DI-engine
d48c93bcd5c07c29f2ce4ac1b7756b8bc255c423
[ "Apache-2.0" ]
1
2022-03-21T16:15:39.000Z
2022-03-21T16:15:39.000Z
ding/hpc_rl/wrapper.py
jiaruonan/DI-engine
268d77db3cb54401b2cfc83e2bc3ec87c31e7b83
[ "Apache-2.0" ]
null
null
null
ding/hpc_rl/wrapper.py
jiaruonan/DI-engine
268d77db3cb54401b2cfc83e2bc3ec87c31e7b83
[ "Apache-2.0" ]
null
null
null
import importlib from ditk import logging from collections import OrderedDict from functools import wraps import ding ''' Overview: `hpc_wrapper` is the wrapper for functions which are supported by hpc. If a function is wrapped by it, we will search for its hpc type and return the function implemented by hpc. We will use the following code as a sample to introduce `hpc_wrapper`: ``` @hpc_wrapper(shape_fn=shape_fn_dntd, namedtuple_data=True, include_args=[0,1,2,3], include_kwargs=['data', 'gamma', 'v_min', 'v_max'], is_cls_method=False) def dist_nstep_td_error( data: namedtuple, gamma: float, v_min: float, v_max: float, n_atom: int, nstep: int = 1, ) -> torch.Tensor: ... ``` Parameters: - shape_fn (:obj:`function`): a function which return the shape needed by hpc function. In fact, it returns all args that the hpc function needs. - nametuple_data (:obj:`bool`): If True, when hpc function is called, it will be called as hpc_function(*nametuple). If False, nametuple data will remain its `nametuple` type. - include_args (:obj:`list`): a list of index of the args need to be set in hpc function. As shown in the sample, include_args=[0,1,2,3], which means `data`, `gamma`, `v_min` and `v_max` will be set in hpc function. - include_kwargs (:obj:`list`): a list of key of the kwargs need to be set in hpc function. As shown in the sample, include_kwargs=['data', 'gamma', 'v_min', 'v_max'], which means `data`, `gamma`, `v_min` and `v_max` will be set in hpc function. - is_cls_method (:obj:`bool`): If True, it means the function we wrap is a method of a class. `self` will be put into args. We will get rid of `self` in args. Besides, we will use its classname as its fn_name. If False, it means the function is a simple method. Q&A: - Q: Is `include_args` and `include_kwargs` need to be set at the same time? - A: Yes. `include_args` and `include_kwargs` can deal with all type of input, such as (data, gamma, v_min=v_min, v_max=v_max) and (data, gamma, v_min, v_max). - Q: What is `hpc_fns`? - A: Here we show a normal `hpc_fns`: ``` hpc_fns = { 'fn_name1': { 'runtime_name1': hpc_fn1, 'runtime_name2': hpc_fn2, ... }, ... } ``` Besides, `per_fn_limit` means the max length of `hpc_fns[fn_name]`. When new function comes, the oldest function will be popped from `hpc_fns[fn_name]`. ''' hpc_fns = {} per_fn_limit = 3 def register_runtime_fn(fn_name, runtime_name, shape): fn_name_mapping = { 'gae': ['hpc_rll.rl_utils.gae', 'GAE'], 'dist_nstep_td_error': ['hpc_rll.rl_utils.td', 'DistNStepTD'], 'LSTM': ['hpc_rll.torch_utils.network.rnn', 'LSTM'], 'ppo_error': ['hpc_rll.rl_utils.ppo', 'PPO'], 'q_nstep_td_error': ['hpc_rll.rl_utils.td', 'QNStepTD'], 'q_nstep_td_error_with_rescale': ['hpc_rll.rl_utils.td', 'QNStepTDRescale'], 'ScatterConnection': ['hpc_rll.torch_utils.network.scatter_connection', 'ScatterConnection'], 'td_lambda_error': ['hpc_rll.rl_utils.td', 'TDLambda'], 'upgo_loss': ['hpc_rll.rl_utils.upgo', 'UPGO'], 'vtrace_error': ['hpc_rll.rl_utils.vtrace', 'VTrace'], } fn_str = fn_name_mapping[fn_name] cls = getattr(importlib.import_module(fn_str[0]), fn_str[1]) hpc_fn = cls(*shape).cuda() if fn_name not in hpc_fns: hpc_fns[fn_name] = OrderedDict() hpc_fns[fn_name][runtime_name] = hpc_fn while len(hpc_fns[fn_name]) > per_fn_limit: hpc_fns[fn_name].popitem(last=False) # print(hpc_fns) return hpc_fn def hpc_wrapper(shape_fn=None, namedtuple_data=False, include_args=[], include_kwargs=[], is_cls_method=False): def decorate(fn): @wraps(fn) def wrapper(*args, **kwargs): if ding.enable_hpc_rl: shape = shape_fn(args, kwargs) if is_cls_method: fn_name = args[0].__class__.__name__ else: fn_name = fn.__name__ runtime_name = '_'.join([fn_name] + [str(s) for s in shape]) if fn_name not in hpc_fns or runtime_name not in hpc_fns[fn_name]: hpc_fn = register_runtime_fn(fn_name, runtime_name, shape) else: hpc_fn = hpc_fns[fn_name][runtime_name] if is_cls_method: args = args[1:] clean_args = [] for i in include_args: if i < len(args): clean_args.append(args[i]) nouse_args = list(set(list(range(len(args)))).difference(set(include_args))) clean_kwargs = {} for k, v in kwargs.items(): if k in include_kwargs: if k == 'lambda_': k = 'lambda' clean_kwargs[k] = v nouse_kwargs = list(set(kwargs.keys()).difference(set(include_kwargs))) if len(nouse_args) > 0 or len(nouse_kwargs) > 0: logging.warn( 'in {}, index {} of args are dropped, and keys {} of kwargs are dropped.'.format( runtime_name, nouse_args, nouse_kwargs ) ) if namedtuple_data: data = args[0] # args[0] is a namedtuple return hpc_fn(*data, *clean_args[1:], **clean_kwargs) else: return hpc_fn(*clean_args, **clean_kwargs) else: return fn(*args, **kwargs) return wrapper return decorate
44.30597
120
0.578912
818
5,937
3.96088
0.224939
0.037037
0.022222
0.02963
0.259259
0.163889
0.131173
0.119444
0.062346
0.062346
0
0.005862
0.310426
5,937
133
121
44.639098
0.78554
0.006401
0
0.082192
0
0
0.159641
0.044843
0
0
0
0
0
1
0.054795
false
0
0.082192
0
0.219178
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07bf6145f03382a8bd2f1aee40dd398c31e7d6c2
10,343
py
Python
telethon_generator/parsers/tlobject.py
islam-200555/Telethon
85103bcf6de8024c902ede98f0b9bf0f7f47a0aa
[ "MIT" ]
2
2021-01-06T12:49:49.000Z
2021-04-23T16:32:13.000Z
telethon_generator/parsers/tlobject.py
islam-200555/Telethon
85103bcf6de8024c902ede98f0b9bf0f7f47a0aa
[ "MIT" ]
null
null
null
telethon_generator/parsers/tlobject.py
islam-200555/Telethon
85103bcf6de8024c902ede98f0b9bf0f7f47a0aa
[ "MIT" ]
null
null
null
import re from zlib import crc32 from ..utils import snake_to_camel_case CORE_TYPES = ( 0xbc799737, # boolFalse#bc799737 = Bool; 0x997275b5, # boolTrue#997275b5 = Bool; 0x3fedd339, # true#3fedd339 = True; 0x1cb5c415, # vector#1cb5c415 {t:Type} # [ t ] = Vector t; ) # https://github.com/telegramdesktop/tdesktop/blob/4bf66cb6e93f3965b40084771b595e93d0b11bcd/Telegram/SourceFiles/codegen/scheme/codegen_scheme.py#L57-L62 WHITELISTED_MISMATCHING_IDS = { # 0 represents any layer 0: {'ipPortSecret', 'accessPointRule', 'help.configSimple'}, 77: {'channel'}, 78: {'channel'} } class TLObject: def __init__(self, fullname, object_id, args, result, is_function, layer): """ Initializes a new TLObject, given its properties. :param fullname: The fullname of the TL object (namespace.name) The namespace can be omitted. :param object_id: The hexadecimal string representing the object ID :param args: The arguments, if any, of the TL object :param result: The result type of the TL object :param is_function: Is the object a function or a type? :param layer: The layer this TLObject belongs to. """ # The name can or not have a namespace self.fullname = fullname if '.' in fullname: self.namespace, self.name = fullname.split('.', maxsplit=1) else: self.namespace, self.name = None, fullname self.args = args self.result = result self.is_function = is_function self.id = None if object_id is None: self.id = self.infer_id() else: self.id = int(object_id, base=16) whitelist = WHITELISTED_MISMATCHING_IDS[0] |\ WHITELISTED_MISMATCHING_IDS.get(layer, set()) if self.fullname not in whitelist: assert self.id == self.infer_id(),\ 'Invalid inferred ID for ' + repr(self) self.class_name = snake_to_camel_case( self.name, suffix='Request' if self.is_function else '') self.real_args = list(a for a in self.sorted_args() if not (a.flag_indicator or a.generic_definition)) def sorted_args(self): """Returns the arguments properly sorted and ready to plug-in into a Python's method header (i.e., flags and those which can be inferred will go last so they can default =None) """ return sorted(self.args, key=lambda x: x.is_flag or x.can_be_inferred) def __repr__(self, ignore_id=False): if self.id is None or ignore_id: hex_id = '' else: hex_id = '#{:08x}'.format(self.id) if self.args: args = ' ' + ' '.join([repr(arg) for arg in self.args]) else: args = '' return '{}{}{} = {}'.format(self.fullname, hex_id, args, self.result) def infer_id(self): representation = self.__repr__(ignore_id=True) representation = representation\ .replace(':bytes ', ':string ')\ .replace('?bytes ', '?string ')\ .replace('<', ' ').replace('>', '')\ .replace('{', '').replace('}', '') representation = re.sub( r' \w+:flags\.\d+\?true', r'', representation ) return crc32(representation.encode('ascii')) class TLArg: def __init__(self, name, arg_type, generic_definition): """ Initializes a new .tl argument :param name: The name of the .tl argument :param arg_type: The type of the .tl argument :param generic_definition: Is the argument a generic definition? (i.e. {X:Type}) """ self.name = 'is_self' if name == 'self' else name # Default values self.is_vector = False self.is_flag = False self.skip_constructor_id = False self.flag_index = -1 # Special case: some types can be inferred, which makes it # less annoying to type. Currently the only type that can # be inferred is if the name is 'random_id', to which a # random ID will be assigned if left as None (the default) self.can_be_inferred = name == 'random_id' # The type can be an indicator that other arguments will be flags if arg_type == '#': self.flag_indicator = True self.type = None self.is_generic = False else: self.flag_indicator = False self.is_generic = arg_type.startswith('!') # Strip the exclamation mark always to have only the name self.type = arg_type.lstrip('!') # The type may be a flag (flags.IDX?REAL_TYPE) # Note that 'flags' is NOT the flags name; this # is determined by a previous argument # However, we assume that the argument will always be called 'flags' flag_match = re.match(r'flags.(\d+)\?([\w<>.]+)', self.type) if flag_match: self.is_flag = True self.flag_index = int(flag_match.group(1)) # Update the type to match the exact type, not the "flagged" one self.type = flag_match.group(2) # Then check if the type is a Vector<REAL_TYPE> vector_match = re.match(r'[Vv]ector<([\w\d.]+)>', self.type) if vector_match: self.is_vector = True # If the type's first letter is not uppercase, then # it is a constructor and we use (read/write) its ID # as pinpointed on issue #81. self.use_vector_id = self.type[0] == 'V' # Update the type to match the one inside the vector self.type = vector_match.group(1) # See use_vector_id. An example of such case is ipPort in # help.configSpecial if self.type.split('.')[-1][0].islower(): self.skip_constructor_id = True # The name may contain "date" in it, if this is the case and the type is "int", # we can safely assume that this should be treated as a "date" object. # Note that this is not a valid Telegram object, but it's easier to work with if self.type == 'int' and ( re.search(r'(\b|_)date\b', name) or name in ('expires', 'expires_at', 'was_online')): self.type = 'date' self.generic_definition = generic_definition def type_hint(self): type = self.type if '.' in type: type = type.split('.')[1] result = { 'int': 'int', 'long': 'int', 'int128': 'int', 'int256': 'int', 'string': 'str', 'date': 'Optional[datetime]', # None date = 0 timestamp 'bytes': 'bytes', 'true': 'bool', }.get(type, "Type{}".format(type)) if self.is_vector: result = 'List[{}]'.format(result) if self.is_flag and type != 'date': result = 'Optional[{}]'.format(result) return result def __str__(self): # Find the real type representation by updating it as required real_type = self.type if self.flag_indicator: real_type = '#' if self.is_vector: if self.use_vector_id: real_type = 'Vector<{}>'.format(real_type) else: real_type = 'vector<{}>'.format(real_type) if self.is_generic: real_type = '!{}'.format(real_type) if self.is_flag: real_type = 'flags.{}?{}'.format(self.flag_index, real_type) if self.generic_definition: return '{{{}:{}}}'.format(self.name, real_type) else: return '{}:{}'.format(self.name, real_type) def __repr__(self): return str(self).replace(':date', ':int').replace('?date', '?int') def _from_line(line, is_function, layer): match = re.match( r'^([\w.]+)' # 'name' r'(?:#([0-9a-fA-F]+))?' # '#optionalcode' r'(?:\s{?\w+:[\w\d<>#.?!]+}?)*' # '{args:.0?type}' r'\s=\s' # ' = ' r'([\w\d<>#.?]+);$', # '<result.type>;' line ) if match is None: # Probably "vector#1cb5c415 {t:Type} # [ t ] = Vector t;" raise ValueError('Cannot parse TLObject {}'.format(line)) args_match = re.findall( r'({)?' r'(\w+)' r':' r'([\w\d<>#.?!]+)' r'}?', line ) return TLObject( fullname=match.group(1), object_id=match.group(2), result=match.group(3), is_function=is_function, layer=layer, args=[TLArg(name, arg_type, brace != '') for brace, name, arg_type in args_match] ) def parse_tl(file_path, layer, ignore_core=False): """This method yields TLObjects from a given .tl file.""" with open(file_path, encoding='utf-8') as file: is_function = False for line in file: comment_index = line.find('//') if comment_index != -1: line = line[:comment_index] line = line.strip() if not line: continue match = re.match('---(\w+)---', line) if match: following_types = match.group(1) is_function = following_types == 'functions' continue try: result = _from_line(line, is_function, layer=layer) if not ignore_core or result.id not in CORE_TYPES: yield result except ValueError as e: if 'vector#1cb5c415' not in str(e): raise def find_layer(file_path): """Finds the layer used on the specified scheme.tl file.""" layer_regex = re.compile(r'^//\s*LAYER\s*(\d+)$') with open(file_path, encoding='utf-8') as file: for line in file: match = layer_regex.match(line) if match: return int(match.group(1))
35.788927
153
0.541332
1,258
10,343
4.321145
0.227345
0.022075
0.00883
0.00883
0.094555
0.066961
0.022443
0.012509
0.012509
0
0
0.019301
0.33878
10,343
288
154
35.913194
0.775552
0.252441
0
0.098958
0
0
0.088208
0.009594
0.005208
0
0.00533
0
0.005208
1
0.057292
false
0
0.015625
0.005208
0.130208
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07c02ac3abe7a10db3f40c2457c57bca98b41742
2,469
bzl
Python
dotnet/private/actions/resx_core.bzl
purkhusid/rules_dotnet
934e62d65ed3657be20b2ae3a63e032a2de9ff84
[ "Apache-2.0" ]
143
2016-03-15T20:37:54.000Z
2022-02-25T12:30:08.000Z
dotnet/private/actions/resx_core.bzl
purkhusid/rules_dotnet
934e62d65ed3657be20b2ae3a63e032a2de9ff84
[ "Apache-2.0" ]
176
2016-03-17T13:28:50.000Z
2022-03-30T21:19:24.000Z
dotnet/private/actions/resx_core.bzl
purkhusid/rules_dotnet
934e62d65ed3657be20b2ae3a63e032a2de9ff84
[ "Apache-2.0" ]
79
2016-03-16T11:34:56.000Z
2022-02-04T10:54:00.000Z
"Actions for compiling resx files" load( "@io_bazel_rules_dotnet//dotnet/private:providers.bzl", "DotnetResourceInfo", ) def _make_runner_arglist(dotnet, source, output, resgen): args = dotnet.actions.args() if type(source) == "Target": args.add_all(source.files) else: args.add(source) args.add(output) return args def emit_resx_core( dotnet, name = "", src = None, identifier = None, out = None, customresgen = None): """The function adds an action that compiles a single .resx file into .resources file. Returns [DotnetResourceInfo](api.md#dotnetresourceinfo). Args: dotnet: [DotnetContextInfo](api.md#dotnetcontextinfo). name: name of the file to generate. src: The .resx source file that is transformed into .resources file. Only `.resx` files are permitted. identifier: The logical name for the resource; the name that is used to load the resource. The default is the basename of the file name (no subfolder). out: An alternative name of the output file (if name should not be used). customresgen: custom resgen program to use. Returns: DotnetResourceInfo: [DotnetResourceInfo](api.md#dotnetresourceinfo). """ if name == "" and out == None: fail("either name or out must be set") if not out: result = dotnet.actions.declare_file(name + ".resources") else: result = dotnet.actions.declare_file(out) args = _make_runner_arglist(dotnet, src, result, customresgen.files_to_run.executable.path) # We use the command to extrace shell path and force runfiles creation resolve = dotnet._ctx.resolve_tools(tools = [customresgen]) inputs = src.files.to_list() if type(src) == "Target" else [src] dotnet.actions.run( inputs = inputs + resolve[0].to_list(), tools = customresgen.default_runfiles.files, outputs = [result], executable = customresgen.files_to_run, arguments = [args], env = {"RUNFILES_MANIFEST_FILE": customresgen.files_to_run.runfiles_manifest.path}, mnemonic = "CoreResxCompile", input_manifests = resolve[1], progress_message = ( "Compiling resoources" + dotnet.label.package + ":" + dotnet.label.name ), ) return DotnetResourceInfo( name = name, result = result, identifier = identifier, )
32.92
159
0.651681
291
2,469
5.426117
0.388316
0.032932
0.036099
0.041799
0.037999
0
0
0
0
0
0
0.00108
0.249899
2,469
74
160
33.364865
0.851512
0.332928
0
0.042553
0
0
0.130141
0.045427
0
0
0
0
0
1
0.042553
false
0
0
0
0.085106
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07c0bb5052572bdefa7514e11ef8a10e0bb0427e
1,035
py
Python
test/jit/test_modules.py
xiaohanhuang/pytorch
a31aea8eaa99a5ff72b5d002c206cd68d5467a5e
[ "Intel" ]
183
2018-04-06T21:10:36.000Z
2022-03-30T15:05:24.000Z
test/jit/test_modules.py
xiaohanhuang/pytorch
a31aea8eaa99a5ff72b5d002c206cd68d5467a5e
[ "Intel" ]
818
2020-02-07T02:36:44.000Z
2022-03-31T23:49:44.000Z
test/jit/test_modules.py
xiaohanhuang/pytorch
a31aea8eaa99a5ff72b5d002c206cd68d5467a5e
[ "Intel" ]
58
2018-06-05T16:40:18.000Z
2022-03-16T15:37:29.000Z
# Owner(s): ["oncall: jit"] import torch import os import sys from torch.testing._internal.jit_utils import JitTestCase # Make the helper files in test/ importable pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) sys.path.append(pytorch_test_dir) if __name__ == '__main__': raise RuntimeError("This test file is not meant to be run directly, use:\n\n" "\tpython test/test_jit.py TESTNAME\n\n" "instead.") class TestModules(JitTestCase): def test_script_module_with_constants_list(self): """ Test that a module that has __constants__ set to something that is not a set can be scripted. """ # torch.nn.Linear has a __constants__ attribute defined # and intialized to a list. class Net(torch.nn.Linear): x: torch.jit.Final[int] def __init__(self): super().__init__(5, 10) self.x = 0 self.checkModule(Net(), (torch.randn(5),))
30.441176
81
0.630918
139
1,035
4.410072
0.568345
0.029364
0.045677
0.04894
0.052202
0
0
0
0
0
0
0.006596
0.267633
1,035
33
82
31.363636
0.802111
0.233816
0
0
0
0
0.145119
0
0
0
0
0
0
1
0.111111
false
0
0.222222
0
0.444444
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07c22c6dbb3092192d2a4fddcabee0db528c9e22
4,720
py
Python
frontends/PyCDE/test/polynomial.py
fyquah/circt
cee685bf12dbf27a3f2274e08cd1af6874f70baa
[ "Apache-2.0" ]
null
null
null
frontends/PyCDE/test/polynomial.py
fyquah/circt
cee685bf12dbf27a3f2274e08cd1af6874f70baa
[ "Apache-2.0" ]
null
null
null
frontends/PyCDE/test/polynomial.py
fyquah/circt
cee685bf12dbf27a3f2274e08cd1af6874f70baa
[ "Apache-2.0" ]
null
null
null
# RUN: %PYTHON% %s 2>&1 | FileCheck %s from __future__ import annotations import mlir import pycde from pycde import (Input, Output, Parameter, module, externmodule, generator, types, dim) from circt.dialects import comb, hw @module def PolynomialCompute(coefficients: Coefficients): class PolynomialCompute: """Module to compute ax^3 + bx^2 + cx + d for design-time coefficients""" # Evaluate polynomial for 'x'. x = Input(types.i32) y = Output(types.int(8 * 4)) unused_parameter = Parameter(True) def __init__(self, name: str): """coefficients is in 'd' -> 'a' order.""" self.instanceName = name @staticmethod def get_module_name(): return "PolyComputeForCoeff_" + '_'.join( [str(x) for x in coefficients.coeff]) @generator def construct(mod): """Implement this module for input 'x'.""" x = mod.x taps = list() for power, coeff in enumerate(coefficients.coeff): coeffVal = hw.ConstantOp.create(types.i32, coeff) if power == 0: newPartialSum = coeffVal.result else: partialSum = taps[-1] if power == 1: currPow = x else: x_power = [x for i in range(power)] currPow = comb.MulOp.create(*x_power) newPartialSum = comb.AddOp.create( partialSum, comb.MulOp.create(coeffVal, currPow)) taps.append(newPartialSum) # Final output return {"y": taps[-1]} return PolynomialCompute @externmodule("supercooldevice") class CoolPolynomialCompute: x = Input(types.i32) y = Output(types.i32) def __init__(self, coefficients): self.coefficients = coefficients class Coefficients: def __init__(self, coeff): self.coeff = coeff class Polynomial(pycde.System): inputs = [] outputs = [('y', types.i32)] def build(self, top): i32 = types.i32 x = hw.ConstantOp.create(i32, 23) poly = PolynomialCompute(Coefficients([62, 42, 6]))("example", x=x) PolynomialCompute(coefficients=Coefficients([62, 42, 6]))("example2", x=poly.y) PolynomialCompute(Coefficients([1, 2, 3, 4, 5]))("example2", x=poly.y) CoolPolynomialCompute([4, 42], x=x) return {"y": poly.y} poly = Polynomial() poly.graph() # CHECK-LABEL: digraph "top" # CHECK: label="top"; # CHECK: [shape=record,label="{hw.constant\ni32\n\nvalue: 23 : i32}"]; poly.print() # CHECK-LABEL: hw.module @top() -> (%y: i32) # CHECK: [[REG0:%.+]] = "pycde.PolynomialCompute"(%c23_i32) {instanceName = "example", opNames = ["x"], parameters = {coefficients = {coeff = [62, 42, 6]}, module_name = "PolyComputeForCoeff_62_42_6", unused_parameter = true}, resultNames = ["y"]} : (i32) -> i32 # CHECK: [[REG1:%.+]] = "pycde.PolynomialCompute"([[REG0]]) {instanceName = "example2", opNames = ["x"], parameters = {coefficients = {coeff = [62, 42, 6]}, module_name = "PolyComputeForCoeff_62_42_6", unused_parameter = true}, resultNames = ["y"]} : (i32) -> i32 # CHECK: [[REG2:%.+]] = "pycde.PolynomialCompute"([[REG0]]) {instanceName = "example2", opNames = ["x"], parameters = {coefficients = {coeff = [1, 2, 3, 4, 5]}, module_name = "PolyComputeForCoeff_1_2_3_4_5", unused_parameter = true}, resultNames = ["y"]} : (i32) -> i32 # CHECK: [[REG3:%.+]] = "pycde.CoolPolynomialCompute"(%c23_i32) {coefficients = [4, 42], opNames = ["x"], parameters = {}, resultNames = ["y"]} : (i32) -> i32 # CHECK: hw.output [[REG0]] : i32 poly.generate() poly.print() # CHECK-LABEL: hw.module @top # CHECK: %example.y = hw.instance "example" @PolyComputeForCoeff_62_42_6(%c23_i32) {parameters = {}} : (i32) -> i32 # CHECK: %example2.y = hw.instance "example2" @PolyComputeForCoeff_62_42_6(%example.y) {parameters = {}} : (i32) -> i32 # CHECK: %example2.y_0 = hw.instance "example2" @PolyComputeForCoeff_1_2_3_4_5(%example.y) {parameters = {}} : (i32) -> i32 # CHECK: %pycde.CoolPolynomialCompute.y = hw.instance "pycde.CoolPolynomialCompute" @supercooldevice(%c23_i32) {coefficients = [4, 42], parameters = {}} : (i32) -> i32 # CHECK-LABEL: hw.module @PolyComputeForCoeff_62_42_6(%x: i32) -> (%y: i32) # CHECK: hw.constant 62 # CHECK: hw.constant 42 # CHECK: hw.constant 6 # CHECK-LABEL: hw.module @PolyComputeForCoeff_1_2_3_4_5(%x: i32) -> (%y: i32) # CHECK: hw.constant 1 # CHECK: hw.constant 2 # CHECK: hw.constant 3 # CHECK: hw.constant 4 # CHECK: hw.constant 5 # CHECK-NOT: hw.module @pycde.PolynomialCompute print("\n\n=== Verilog ===") # CHECK-LABEL: === Verilog === poly.print_verilog() # CHECK-LABEL: module PolyComputeForCoeff_62_42_6( # CHECK: input [31:0] x, # CHECK: output [31:0] y);
35.223881
272
0.63411
574
4,720
5.095819
0.219512
0.030085
0.017094
0.049231
0.305299
0.248889
0.191111
0.137094
0.122735
0.122735
0
0.055673
0.200847
4,720
133
273
35.488722
0.719777
0.513771
0
0.090909
0
0
0.036032
0
0
0
0
0
0
1
0.106061
false
0
0.075758
0.015152
0.409091
0.060606
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07c24587d23b16e92b579d57a83d8cb4e84073ef
3,430
py
Python
python/examples/service_discovery.py
davidgcameron/arc
9813ef5f45e5089507953239de8fa2248f5ad32c
[ "Apache-2.0" ]
null
null
null
python/examples/service_discovery.py
davidgcameron/arc
9813ef5f45e5089507953239de8fa2248f5ad32c
[ "Apache-2.0" ]
null
null
null
python/examples/service_discovery.py
davidgcameron/arc
9813ef5f45e5089507953239de8fa2248f5ad32c
[ "Apache-2.0" ]
null
null
null
#! /usr/bin/env python import arc import sys import os def retrieve(uc, endpoints): # The ComputingServiceRetriever needs the UserConfig to know which credentials # to use in case of HTTPS connections retriever = arc.ComputingServiceRetriever(uc, endpoints) # the constructor of the ComputingServiceRetriever returns immediately sys.stdout.write('\n') sys.stdout.write("ComputingServiceRetriever created with the following endpoints:\n") for endpoint in endpoints: sys.stdout.write("- %s\n"%endpoint.str()) # here we want to wait until all the results arrive sys.stdout.write("Waiting for the results...\n") retriever.wait() return retriever def example(): # Creating a UserConfig object with the user's proxy # and the path of the trusted CA certificates uc = arc.UserConfig() uc.ProxyPath("/tmp/x509up_u%s" % os.getuid()) uc.CACertificatesDirectory("/etc/grid-security/certificates") # Query two registries (index servers) for Computing Services registries = [ # for the index1, we specify that it is an EGIIS service arc.Endpoint("index1.nordugrid.org:2135/Mds-Vo-name=NorduGrid,o=grid", arc.Endpoint.REGISTRY, "org.nordugrid.ldapegiis"), # for the arc-emi.grid.upjs.sk, we don't specify the type (the InterfaceName) # we let the system to try all possibilities arc.Endpoint("arc-emi.grid.upjs.sk/O=Grid/Mds-Vo-Name=ARC-EMI", arc.Endpoint.REGISTRY) ] retriever = retrieve(uc, registries) # The retriever acts as a list containing all the discovered ComputingServices: sys.stdout.write("Discovered ComputingServices: %s\n"%(", ".join([service.Name for service in retriever]))) # Get all the ExecutionTargets on these ComputingServices targets = retriever.GetExecutionTargets() sys.stdout.write("Number of ExecutionTargets on these ComputingServices: %d\n"%len(targets)) # Query the local infosys (COMPUTINGINFO) of computing elements computing_elements = [ # for piff, we specify that we want to query the LDAP GLUE2 tree arc.Endpoint("piff.hep.lu.se", arc.Endpoint.COMPUTINGINFO, "org.nordugrid.ldapglue2"), # for pgs03, we don't specify the interface, we let the system try all possibilities arc.Endpoint("pgs03.grid.upjs.sk", arc.Endpoint.COMPUTINGINFO) ] retriever2 = retrieve(uc, computing_elements) # Get all the ExecutionTargets on these ComputingServices targets2 = retriever2.GetExecutionTargets() sys.stdout.write("The discovered ExecutionTargets:\n") for target in targets2: sys.stdout.write("%s\n"%str(target)) # Query both registries and computing elements at the same time: endpoints = [ arc.Endpoint("arc-emi.grid.upjs.sk/O=Grid/Mds-Vo-Name=ARC-EMI", arc.Endpoint.REGISTRY), arc.Endpoint("piff.hep.lu.se", arc.Endpoint.COMPUTINGINFO, "org.nordugrid.ldapglue2") ] retriever3 = retrieve(uc, endpoints) sys.stdout.write("Discovered ComputingServices: %s\n"%(", ".join([service.Name for service in retriever3]))) # wait for all the background threads to finish before we destroy the objects they may use import atexit @atexit.register def wait_exit(): arc.ThreadInitializer().waitExit() # arc.Logger.getRootLogger().addDestination(arc.LogStream(sys.stderr)) # arc.Logger.getRootLogger().setThreshold(arc.DEBUG) # run the example example()
40.352941
129
0.717201
449
3,430
5.469933
0.365256
0.053746
0.051303
0.017101
0.257736
0.205212
0.205212
0.165309
0.165309
0.165309
0
0.007785
0.176093
3,430
84
130
40.833333
0.861288
0.369679
0
0
0
0.045455
0.270561
0.12757
0
0
0
0
0
1
0.068182
false
0
0.090909
0
0.181818
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07c2fc0684398b705e83193b8a4ddd1551624694
46,071
py
Python
core/domain/rights_manager.py
netajik/oppia
d3780352d615db7438e010c5aa5eb60588bb7de6
[ "Apache-2.0" ]
null
null
null
core/domain/rights_manager.py
netajik/oppia
d3780352d615db7438e010c5aa5eb60588bb7de6
[ "Apache-2.0" ]
null
null
null
core/domain/rights_manager.py
netajik/oppia
d3780352d615db7438e010c5aa5eb60588bb7de6
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Domain objects and functions that manage rights for various user actions.""" import logging from constants import constants from core.domain import activity_services from core.domain import role_services from core.domain import subscription_services from core.domain import user_services from core.platform import models import feconf import utils current_user_services = models.Registry.import_current_user_services() (collection_models, exp_models,) = models.Registry.import_models([ models.NAMES.collection, models.NAMES.exploration ]) # IMPORTANT: Ensure that all changes to how these cmds are interpreted preserve # backward-compatibility with previous exploration snapshots in the datastore. # Do not modify the definitions of CMD keys that already exist. CMD_CREATE_NEW = 'create_new' CMD_CHANGE_ROLE = 'change_role' CMD_CHANGE_EXPLORATION_STATUS = 'change_exploration_status' CMD_CHANGE_COLLECTION_STATUS = 'change_collection_status' CMD_CHANGE_PRIVATE_VIEWABILITY = 'change_private_viewability' CMD_RELEASE_OWNERSHIP = 'release_ownership' CMD_UPDATE_FIRST_PUBLISHED_MSEC = 'update_first_published_msec' ACTIVITY_STATUS_PRIVATE = constants.ACTIVITY_STATUS_PRIVATE ACTIVITY_STATUS_PUBLIC = constants.ACTIVITY_STATUS_PUBLIC ROLE_OWNER = 'owner' ROLE_EDITOR = 'editor' ROLE_TRANSLATOR = 'translator' ROLE_VIEWER = 'viewer' ROLE_NONE = 'none' ROLE_ADMIN = 'admin' ROLE_MODERATOR = 'moderator' class ActivityRights(object): """Domain object for the rights/publication status of an activity (an exploration or a collection). """ def __init__( self, exploration_id, owner_ids, editor_ids, translator_ids, viewer_ids, community_owned=False, cloned_from=None, status=ACTIVITY_STATUS_PRIVATE, viewable_if_private=False, first_published_msec=None): self.id = exploration_id self.owner_ids = owner_ids self.editor_ids = editor_ids self.translator_ids = translator_ids self.viewer_ids = viewer_ids self.community_owned = community_owned self.cloned_from = cloned_from self.status = status self.viewable_if_private = viewable_if_private self.first_published_msec = first_published_msec def validate(self): """Validates an ActivityRights object. Raises: utils.ValidationError: if any of the owners, editors, translators and viewers lists overlap, or if a community-owned exploration has owners, editors, translators or viewers specified. """ if self.community_owned: if (self.owner_ids or self.editor_ids or self.translator_ids or self.viewer_ids): raise utils.ValidationError( 'Community-owned explorations should have no owners, ' 'editors, translators or viewers specified.') if self.community_owned and self.status == ACTIVITY_STATUS_PRIVATE: raise utils.ValidationError( 'Community-owned explorations cannot be private.') if self.status != ACTIVITY_STATUS_PRIVATE and self.viewer_ids: raise utils.ValidationError( 'Public explorations should have no viewers specified.') owner_editor = set(self.owner_ids).intersection(set(self.editor_ids)) owner_translator = set(self.owner_ids).intersection( set(self.translator_ids)) owner_viewer = set(self.owner_ids).intersection(set(self.viewer_ids)) editor_translator = set(self.editor_ids).intersection( set(self.translator_ids)) editor_viewer = set(self.editor_ids).intersection(set(self.viewer_ids)) translator_viewer = set(self.editor_ids).intersection( set(self.viewer_ids)) if owner_editor: raise utils.ValidationError( 'A user cannot be both an owner and an editor: %s' % owner_editor) if owner_translator: raise utils.ValidationError( 'A user cannot be both an owner and a translator: %s' % owner_translator) if owner_viewer: raise utils.ValidationError( 'A user cannot be both an owner and a viewer: %s' % owner_viewer) if editor_translator: raise utils.ValidationError( 'A user cannot be both an editor and a translator: %s' % editor_translator) if editor_viewer: raise utils.ValidationError( 'A user cannot be both an editor and a viewer: %s' % editor_viewer) if translator_viewer: raise utils.ValidationError( 'A user cannot be both a translator and a viewer: %s' % translator_viewer) def to_dict(self): """Returns a dict suitable for use by the frontend. Returns: dict. A dict version of ActivityRights suitable for use by the frontend. """ if self.community_owned: return { 'cloned_from': self.cloned_from, 'status': self.status, 'community_owned': True, 'owner_names': [], 'editor_names': [], 'translator_names': [], 'viewer_names': [], 'viewable_if_private': self.viewable_if_private, } else: return { 'cloned_from': self.cloned_from, 'status': self.status, 'community_owned': False, 'owner_names': user_services.get_human_readable_user_ids( self.owner_ids), 'editor_names': user_services.get_human_readable_user_ids( self.editor_ids), 'translator_names': user_services.get_human_readable_user_ids( self.translator_ids), 'viewer_names': user_services.get_human_readable_user_ids( self.viewer_ids), 'viewable_if_private': self.viewable_if_private, } def is_owner(self, user_id): """Checks whether given user is owner of activity. Args: user_id: str or None. Id of the user. Returns: bool. Whether user is an activity owner. """ return bool(user_id in self.owner_ids) def is_editor(self, user_id): """Checks whether given user is editor of activity. Args: user_id: str or None. Id of the user. Returns: bool. Whether user is an activity editor. """ return bool(user_id in self.editor_ids) def is_translator(self, user_id): """Checks whether given user is translator of activity. Args: user_id: str or None. Id of the user. Returns: bool. Whether user is an activity translator. """ return bool(user_id in self.translator_ids) def is_viewer(self, user_id): """Checks whether given user is viewer of activity. Args: user_id: str or None. Id of the user. Returns: bool. Whether user is an activity viewer. """ return bool(user_id in self.viewer_ids) def is_published(self): """Checks whether activity is published. Returns: bool. Whether activity is published. """ return bool(self.status == ACTIVITY_STATUS_PUBLIC) def is_private(self): """Checks whether activity is private. Returns: bool. Whether activity is private. """ return bool(self.status == ACTIVITY_STATUS_PRIVATE) def get_activity_rights_from_model(activity_rights_model, activity_type): """Constructs an ActivityRights object from the given activity rights model. Args: activity_rights_model: ActivityRightsModel. Activity rights from the datastore. activity_type: str. The type of activity. Possible values: constants.ACTIVITY_TYPE_EXPLORATION constants.ACTIVITY_TYPE_COLLECTION Returns: ActivityRights. The rights object created from the model. """ return ActivityRights( activity_rights_model.id, activity_rights_model.owner_ids, activity_rights_model.editor_ids, activity_rights_model.translator_ids, activity_rights_model.viewer_ids, community_owned=activity_rights_model.community_owned, cloned_from=( activity_rights_model.cloned_from if activity_type == constants.ACTIVITY_TYPE_EXPLORATION else None), status=activity_rights_model.status, viewable_if_private=activity_rights_model.viewable_if_private, first_published_msec=activity_rights_model.first_published_msec ) def _save_activity_rights( committer_id, activity_rights, activity_type, commit_message, commit_cmds): """Saves an ExplorationRights or CollectionRights domain object to the datastore. Args: committer_id: str. ID of the committer. activity_rights: ActivityRights. The rights object for the given activity. activity_type: str. The type of activity. Possible values: constants.ACTIVITY_TYPE_EXPLORATION constants.ACTIVITY_TYPE_COLLECTION commit_message: str. Descriptive message for the commit. commit_cmds: list(dict). A list of commands describing what kind of commit was done. """ activity_rights.validate() if activity_type == constants.ACTIVITY_TYPE_EXPLORATION: model_cls = exp_models.ExplorationRightsModel elif activity_type == constants.ACTIVITY_TYPE_COLLECTION: model_cls = collection_models.CollectionRightsModel model = model_cls.get(activity_rights.id, strict=False) model.owner_ids = activity_rights.owner_ids model.editor_ids = activity_rights.editor_ids model.viewer_ids = activity_rights.viewer_ids model.translator_ids = activity_rights.translator_ids model.community_owned = activity_rights.community_owned model.status = activity_rights.status model.viewable_if_private = activity_rights.viewable_if_private model.first_published_msec = activity_rights.first_published_msec model.commit(committer_id, commit_message, commit_cmds) def _update_exploration_summary(activity_rights): """Updates the exploration summary for the activity associated with the given rights object. The ID of rights object is the same as the ID of associated activity. Args: activity_rights: ActivityRights. The rights object for the given activity. """ # TODO(msl): get rid of inline imports by refactoring code. from core.domain import exp_services exp_services.update_exploration_summary( activity_rights.id, None) def _update_collection_summary(activity_rights): """Updates the collection summary for the given activity associated with the given rights object. The ID of rights object is the same as the ID of associated activity. Args: activity_rights: ActivityRights. The rights object for the given activity. """ from core.domain import collection_services collection_services.update_collection_summary( activity_rights.id, None) def _update_activity_summary(activity_type, activity_rights): """Updates the activity summary for the given activity associated with the given rights object. The ID of rights object is the same as the ID of associated activity. Args: activity_type: str. The type of activity. Possible values: constants.ACTIVITY_TYPE_EXPLORATION constants.ACTIVITY_TYPE_COLLECTION activity_rights: ActivityRights. The rights object for the given activity. """ if activity_type == constants.ACTIVITY_TYPE_EXPLORATION: _update_exploration_summary(activity_rights) elif activity_type == constants.ACTIVITY_TYPE_COLLECTION: _update_collection_summary(activity_rights) def update_activity_first_published_msec( activity_type, activity_id, first_published_msec): """Updates the first_published_msec field for the given activity. The caller is responsible for ensuring that this value is not already set before updating it. Args: activity_type: str. The type of activity. Possible values: constants.ACTIVITY_TYPE_EXPLORATION constants.ACTIVITY_TYPE_COLLECTION activity_id: str. ID of the activity. first_published_msec: float. First publication time in milliseconds since the Epoch. """ activity_rights = _get_activity_rights(activity_type, activity_id) commit_cmds = [{ 'cmd': CMD_UPDATE_FIRST_PUBLISHED_MSEC, 'old_first_published_msec': activity_rights.first_published_msec, 'new_first_published_msec': first_published_msec }] activity_rights.first_published_msec = first_published_msec _save_activity_rights( feconf.SYSTEM_COMMITTER_ID, activity_rights, activity_type, 'set first published time in msec', commit_cmds) def create_new_exploration_rights(exploration_id, committer_id): """Creates a new exploration rights object and saves it to the datastore. Subscribes the committer to the new exploration. Args: exploration_id: str. ID of the exploration. committer_id: str. ID of the committer. """ exploration_rights = ActivityRights( exploration_id, [committer_id], [], [], []) commit_cmds = [{'cmd': CMD_CREATE_NEW}] exp_models.ExplorationRightsModel( id=exploration_rights.id, owner_ids=exploration_rights.owner_ids, editor_ids=exploration_rights.editor_ids, translator_ids=exploration_rights.translator_ids, viewer_ids=exploration_rights.viewer_ids, community_owned=exploration_rights.community_owned, status=exploration_rights.status, viewable_if_private=exploration_rights.viewable_if_private, first_published_msec=exploration_rights.first_published_msec, ).commit(committer_id, 'Created new exploration', commit_cmds) subscription_services.subscribe_to_exploration( committer_id, exploration_id) def get_exploration_rights(exploration_id, strict=True): """Retrieves the rights for this exploration from the datastore. Args: exploration_id: str. ID of the exploration. strict: bool. Whether to raise an error if there is no exploration matching the given ID. Returns: ActivityRights. The rights object for the given exploration. Raises: EntityNotFoundError. The exploration with ID exploration_id was not found in the datastore. """ model = exp_models.ExplorationRightsModel.get( exploration_id, strict=strict) if model is None: return None return get_activity_rights_from_model( model, constants.ACTIVITY_TYPE_EXPLORATION) def get_multiple_exploration_rights_by_ids(exp_ids): """Returns a list of ActivityRights objects for given exploration ids. Args: exp_ids: list(str). List of exploration ids. Returns: list(ActivityRights or None). List of rights object containing ActivityRights object for existing exploration or None. """ exp_rights_models = exp_models.ExplorationRightsModel.get_multi( exp_ids) exp_models_list = [] for model in exp_rights_models: if model is None: exp_models_list.append(None) else: exp_models_list.append( get_activity_rights_from_model( model, constants.ACTIVITY_TYPE_EXPLORATION)) return exp_models_list def is_exploration_private(exploration_id): """Returns whether exploration is private. Args: exploration_id: str. ID of the exploration. Returns: bool. Whether the exploration is private or not. """ exploration_rights = get_exploration_rights(exploration_id) return exploration_rights.status == ACTIVITY_STATUS_PRIVATE def is_exploration_public(exploration_id): """Returns whether exploration is public. Args: exploration_id: str. ID of the exploration. Returns: bool. Whether the exploration is public. """ exploration_rights = get_exploration_rights(exploration_id) return exploration_rights.status == ACTIVITY_STATUS_PUBLIC def is_exploration_cloned(exploration_id): """Returns whether the exploration is a clone of another exploration. Args: exploration_id: str. ID of the exploration. Returns: bool. Whether the exploration is a clone of another exploration. """ exploration_rights = get_exploration_rights(exploration_id) return bool(exploration_rights.cloned_from) def create_new_collection_rights(collection_id, committer_id): """Creates a new collection rights object and saves it to the datastore. Subscribes the committer to the new collection. Args: collection_id: str. ID of the collection. committer_id: str. ID of the committer. """ collection_rights = ActivityRights( collection_id, [committer_id], [], [], []) commit_cmds = [{'cmd': CMD_CREATE_NEW}] collection_models.CollectionRightsModel( id=collection_rights.id, owner_ids=collection_rights.owner_ids, editor_ids=collection_rights.editor_ids, translator_ids=collection_rights.translator_ids, viewer_ids=collection_rights.viewer_ids, community_owned=collection_rights.community_owned, status=collection_rights.status, viewable_if_private=collection_rights.viewable_if_private, first_published_msec=collection_rights.first_published_msec ).commit(committer_id, 'Created new collection', commit_cmds) subscription_services.subscribe_to_collection(committer_id, collection_id) def get_collection_rights(collection_id, strict=True): """Retrieves the rights for this collection from the datastore. Args: collection_id: str. ID of the collection. strict: bool. Whether to raise an error if ID is not found. Returns: ActivityRights. The rights object for the collection. Raises: EntityNotFoundError. The collection with ID collection_id is not found in the datastore. """ model = collection_models.CollectionRightsModel.get( collection_id, strict=strict) if model is None: return None return get_activity_rights_from_model( model, constants.ACTIVITY_TYPE_COLLECTION) def get_collection_owner_names(collection_id): """Retrieves the owners for this collection from the datastore. Args: collection_id: str. ID of the collection. Returns: list(str). Human-readable usernames (or truncated email addresses) of owners for this collection. """ collection_rights = get_collection_rights(collection_id) return user_services.get_human_readable_user_ids( collection_rights.owner_ids) def is_collection_private(collection_id): """Returns whether the collection is private. Args: collection_id: str. ID of the collection. Returns: bool. Whether the collection is private. """ collection_rights = get_collection_rights(collection_id) return collection_rights.status == ACTIVITY_STATUS_PRIVATE def is_collection_public(collection_id): """Returns whether the collection is public. Args: collection_id: str. ID of the collection. Returns: bool. Whether the collection is public. """ collection_rights = get_collection_rights(collection_id) return collection_rights.status == ACTIVITY_STATUS_PUBLIC def _get_activity_rights(activity_type, activity_id): """Retrieves the rights object for the given activity based on its type. Args: activity_type: str. The type of activity. Possible values: constants.ACTIVITY_TYPE_EXPLORATION constants.ACTIVITY_TYPE_COLLECTION activity_id: str. ID of the activity. Returns: ActivityRights. The rights object associated with the given activity. Raises: Exception. activity_type provided is unknown. """ if activity_type == constants.ACTIVITY_TYPE_EXPLORATION: return get_exploration_rights(activity_id, strict=False) elif activity_type == constants.ACTIVITY_TYPE_COLLECTION: return get_collection_rights(activity_id, strict=False) else: raise Exception( 'Cannot get activity rights for unknown activity type: %s' % ( activity_type)) def check_can_access_activity(user, activity_rights): """Checks whether the user can access given activity. Args: user: UserActionsInfo. Object having user_id, role and actions for given user. activity_rights: AcitivityRights or None. Rights object for the given activity. Returns: bool. Whether the given activity can be accessed by the given user. """ if activity_rights is None: return False elif activity_rights.is_published(): return bool( role_services.ACTION_PLAY_ANY_PUBLIC_ACTIVITY in user.actions) elif activity_rights.is_private(): return bool( (role_services.ACTION_PLAY_ANY_PRIVATE_ACTIVITY in user.actions) or activity_rights.is_viewer(user.user_id) or activity_rights.is_owner(user.user_id) or activity_rights.is_editor(user.user_id) or activity_rights.is_translator(user.user_id) or activity_rights.viewable_if_private) def check_can_edit_activity(user, activity_rights): """Checks whether the user can edit given activity. Args: user: UserActionsInfo. Object having user_id, role and actions for given user. activity_rights: ActivityRights or None. Rights object for the given activity. Returns: bool. Whether the given user can edit this activity. """ if activity_rights is None: return False if role_services.ACTION_EDIT_OWNED_ACTIVITY not in user.actions: return False if (activity_rights.is_owner(user.user_id) or activity_rights.is_editor(user.user_id)): return True if (activity_rights.community_owned or (role_services.ACTION_EDIT_ANY_ACTIVITY in user.actions)): return True if (activity_rights.is_published() and (role_services.ACTION_EDIT_ANY_PUBLIC_ACTIVITY in user.actions)): return True return False def check_can_translate_activity(user, activity_rights): """Checks whether the user can translate given activity. Args: user: UserActionsInfo. Object having user_id, role and actions for given user. activity_rights: ActivityRights or None. Rights object for the given activity. Returns: bool. Whether the given user can translate this activity. """ if activity_rights is None: return False if role_services.ACTION_EDIT_OWNED_ACTIVITY not in user.actions: return False if (activity_rights.is_owner(user.user_id) or activity_rights.is_editor(user.user_id) or activity_rights.is_translator(user.user_id)): return True if (activity_rights.community_owned or (role_services.ACTION_EDIT_ANY_ACTIVITY in user.actions)): return True if (activity_rights.is_published() and (role_services.ACTION_EDIT_ANY_PUBLIC_ACTIVITY in user.actions)): return True return False def check_can_delete_activity(user, activity_rights): """Checks whether the user can delete given activity. Args: user: UserActionsInfo. Object having user_id, role and actions for given user. activity_rights: ActivityRights or None. Rights object for the given activity. Returns: bool. Whether the user can delete given activity. """ if activity_rights is None: return False if role_services.ACTION_DELETE_ANY_ACTIVITY in user.actions: return True elif (activity_rights.is_private() and (role_services.ACTION_DELETE_OWNED_PRIVATE_ACTIVITY in user.actions) and activity_rights.is_owner(user.user_id)): return True elif (activity_rights.is_published() and (role_services.ACTION_DELETE_ANY_PUBLIC_ACTIVITY in user.actions)): return True return False def check_can_modify_activity_roles(user, activity_rights): """Checks whether the user can modify roles for given activity. Args: user: UserActionsInfo. Object having user_id, role and actions for given user. activity_rights: ActivityRights or None. Rights object for the given activity. Returns: bool. Whether the user can modify roles for given activity. """ if activity_rights is None: return False if (activity_rights.community_owned or activity_rights.cloned_from): return False if (role_services.ACTION_MODIFY_ROLES_FOR_ANY_ACTIVITY in user.actions): return True if (role_services.ACTION_MODIFY_ROLES_FOR_OWNED_ACTIVITY in user.actions): if activity_rights.is_owner(user.user_id): return True return False def check_can_release_ownership(user, activity_rights): """Checks whether the user can release ownership for given activity. Args: user: UserActionsInfo. Object having user_id, role and actions for given user. activity_rights: ActivityRights or None. Rights object for the given activity. Returns: bool. Whether the user can release ownership for given activity. """ if activity_rights is None: return False if activity_rights.is_private(): return False return check_can_modify_activity_roles( user, activity_rights) def check_can_publish_activity(user, activity_rights): """Checks whether the user can publish given activity. Args: user: UserActionsInfo. Object having user_id, role and actions for given user. activity_rights: ActivityRights or None. Rights object for the given activity. Returns: bool. Whether the user can publish given activity. """ if activity_rights is None: return False if activity_rights.cloned_from: return False if activity_rights.is_published(): return False if role_services.ACTION_PUBLISH_ANY_ACTIVITY in user.actions: return True if role_services.ACTION_PUBLISH_OWNED_ACTIVITY in user.actions: if activity_rights.is_owner(user.user_id): return True return False def check_can_unpublish_activity(user, activity_rights): """Checks whether the user can unpublish given activity. Args: user: UserActionsInfo. Object having user_id, role and actions for given user. activity_rights: ActivityRights or None. Rights object for the given activity. Returns: bool. Whether the user can unpublish given activity. """ if activity_rights is None: return False if activity_rights.community_owned: return False if activity_rights.is_published(): if role_services.ACTION_UNPUBLISH_ANY_PUBLIC_ACTIVITY in user.actions: return True return False def _assign_role( committer, assignee_id, new_role, activity_id, activity_type): """Assigns a new role to the user. Args: committer: UserActionsInfo. UserActionInfo object for the user who is performing the action. assignee_id: str. ID of the user whose role is being changed. new_role: str. The name of the new role: One of ROLE_OWNER ROLE_EDITOR ROLE_TRANSLATOR ROLE_VIEWER activity_id: str. ID of the activity. activity_type: str. The type of activity. Possible values: constants.ACTIVITY_TYPE_EXPLORATION constants.ACTIVITY_TYPE_COLLECTION Raises: Exception. The committer does not have rights to modify a role. Exception. The user already owns the activity. Exception. The user can already edit the activity. Exception. The user can already translate the activity. Exception. The activity is already publicly editable. Exception. The activity is already publicly translatable. Exception. The user can already view the activity. Exception. The activity is already publicly viewable. Exception. The role is invalid. """ committer_id = committer.user_id activity_rights = _get_activity_rights(activity_type, activity_id) if not check_can_modify_activity_roles(committer, activity_rights): logging.error( 'User %s tried to allow user %s to be a(n) %s of activity %s ' 'but was refused permission.' % ( committer_id, assignee_id, new_role, activity_id)) raise Exception( 'UnauthorizedUserException: Could not assign new role.') assignee_username = user_services.get_username(assignee_id) old_role = ROLE_NONE if new_role == ROLE_OWNER: if activity_rights.is_owner(assignee_id): raise Exception('This user already owns this %s.' % activity_type) activity_rights.owner_ids.append(assignee_id) if assignee_id in activity_rights.viewer_ids: activity_rights.viewer_ids.remove(assignee_id) old_role = ROLE_VIEWER if assignee_id in activity_rights.editor_ids: activity_rights.editor_ids.remove(assignee_id) old_role = ROLE_EDITOR if assignee_id in activity_rights.translator_ids: activity_rights.translator_ids.remove(assignee_id) old_role = ROLE_TRANSLATOR elif new_role == ROLE_EDITOR: if (activity_rights.is_editor(assignee_id) or activity_rights.is_owner(assignee_id)): raise Exception( 'This user already can edit this %s.' % activity_type) if activity_rights.community_owned: raise Exception( 'Community-owned %ss can be edited by anyone.' % activity_type) activity_rights.editor_ids.append(assignee_id) if assignee_id in activity_rights.translator_ids: activity_rights.translator_ids.remove(assignee_id) old_role = ROLE_TRANSLATOR if assignee_id in activity_rights.viewer_ids: activity_rights.viewer_ids.remove(assignee_id) old_role = ROLE_VIEWER elif new_role == ROLE_TRANSLATOR: if (activity_rights.is_editor(assignee_id) or activity_rights.is_translator(assignee_id) or activity_rights.is_owner(assignee_id)): raise Exception( 'This user already can translate this %s.' % activity_type) if activity_rights.community_owned: raise Exception( 'Community-owned %ss can be translated by anyone.' % activity_type) activity_rights.translator_ids.append(assignee_id) if assignee_id in activity_rights.viewer_ids: activity_rights.viewer_ids.remove(assignee_id) old_role = ROLE_VIEWER elif new_role == ROLE_VIEWER: if (activity_rights.is_owner(assignee_id) or activity_rights.is_editor(assignee_id) or activity_rights.is_viewer(assignee_id)): raise Exception( 'This user already can view this %s.' % activity_type) if activity_rights.status != ACTIVITY_STATUS_PRIVATE: raise Exception( 'Public %ss can be viewed by anyone.' % activity_type) activity_rights.viewer_ids.append(assignee_id) else: raise Exception('Invalid role: %s' % new_role) commit_message = 'Changed role of %s from %s to %s' % ( assignee_username, old_role, new_role) commit_cmds = [{ 'cmd': CMD_CHANGE_ROLE, 'assignee_id': assignee_id, 'old_role': old_role, 'new_role': new_role }] _save_activity_rights( committer_id, activity_rights, activity_type, commit_message, commit_cmds) _update_activity_summary(activity_type, activity_rights) def _release_ownership_of_activity(committer, activity_id, activity_type): """Releases ownership of the given activity to the community. Args: committer: UserActionsInfo. UserActionsInfo object for the user who is performing the action. activity_id: str. ID of the activity. activity_type: str. The type of activity. Possible values: constants.ACTIVITY_TYPE_EXPLORATION constants.ACTIVITY_TYPE_COLLECTION Raise: Exception. The committer does not have release rights. """ committer_id = committer.user_id activity_rights = _get_activity_rights(activity_type, activity_id) if not check_can_release_ownership(committer, activity_rights): logging.error( 'User %s tried to release ownership of %s %s but was ' 'refused permission.' % (committer_id, activity_type, activity_id)) raise Exception( 'The ownership of this %s cannot be released.' % activity_type) activity_rights.community_owned = True activity_rights.owner_ids = [] activity_rights.editor_ids = [] activity_rights.viewer_ids = [] commit_cmds = [{ 'cmd': CMD_RELEASE_OWNERSHIP, }] _save_activity_rights( committer_id, activity_rights, activity_type, '%s ownership released to the community.' % activity_type, commit_cmds) _update_activity_summary(activity_type, activity_rights) def _change_activity_status( committer_id, activity_id, activity_type, new_status, commit_message): """Changes the status of the given activity. Args: committer_id: str. ID of the user who is performing the update action. activity_id: str. ID of the activity. activity_type: str. The type of activity. Possible values: constants.ACTIVITY_TYPE_EXPLORATION constants.ACTIVITY_TYPE_COLLECTION new_status: str. The new status of the activity. commit_message: str. The human-written commit message for this change. """ activity_rights = _get_activity_rights(activity_type, activity_id) old_status = activity_rights.status activity_rights.status = new_status if activity_type == constants.ACTIVITY_TYPE_EXPLORATION: cmd_type = CMD_CHANGE_EXPLORATION_STATUS elif activity_type == constants.ACTIVITY_TYPE_COLLECTION: cmd_type = CMD_CHANGE_COLLECTION_STATUS commit_cmds = [{ 'cmd': cmd_type, 'old_status': old_status, 'new_status': new_status }] if new_status != ACTIVITY_STATUS_PRIVATE: activity_rights.viewer_ids = [] if activity_rights.first_published_msec is None: activity_rights.first_published_msec = ( utils.get_current_time_in_millisecs()) _save_activity_rights( committer_id, activity_rights, activity_type, commit_message, commit_cmds) _update_activity_summary(activity_type, activity_rights) def _publish_activity(committer, activity_id, activity_type): """Publishes the given activity. Args: committer: UserActionsInfo. UserActionsInfo object for the committer. activity_id: str. ID of the activity. activity_type: str. The type of activity. Possible values: constants.ACTIVITY_TYPE_EXPLORATION constants.ACTIVITY_TYPE_COLLECTION Raises: Exception. The committer does not have rights to publish the activity. """ committer_id = committer.user_id activity_rights = _get_activity_rights(activity_type, activity_id) if not check_can_publish_activity(committer, activity_rights): logging.error( 'User %s tried to publish %s %s but was refused ' 'permission.' % (committer_id, activity_type, activity_id)) raise Exception('This %s cannot be published.' % activity_type) _change_activity_status( committer_id, activity_id, activity_type, ACTIVITY_STATUS_PUBLIC, '%s published.' % activity_type) def _unpublish_activity(committer, activity_id, activity_type): """Unpublishes the given activity. Args: committer: UserActionsInfo. UserActionsInfo object for the committer. activity_id: str. ID of the activity. activity_type: str. The type of activity. Possible values: constants.ACTIVITY_TYPE_EXPLORATION constants.ACTIVITY_TYPE_COLLECTION Raises: Exception. The committer does not have rights to unpublish the activity. """ committer_id = committer.user_id activity_rights = _get_activity_rights(activity_type, activity_id) if not check_can_unpublish_activity(committer, activity_rights): logging.error( 'User %s tried to unpublish %s %s but was refused ' 'permission.' % (committer_id, activity_type, activity_id)) raise Exception('This %s cannot be unpublished.' % activity_type) _change_activity_status( committer_id, activity_id, activity_type, ACTIVITY_STATUS_PRIVATE, '%s unpublished.' % activity_type) activity_services.remove_featured_activity(activity_type, activity_id) # Rights functions for activities. def assign_role_for_exploration( committer, exploration_id, assignee_id, new_role): """Assigns a user to the given role and subscribes the assignee to future exploration updates. The caller should ensure that assignee_id corresponds to a valid user in the system. Args: committer: UserActionsInfo. The UserActionsInfo object for the committer. exploration_id: str. ID of the exploration. assignee_id: str. ID of the user whose role is being changed. new_role: str. The name of the new role: One of ROLE_OWNER ROLE_EDITOR ROLE_TRANSLATOR Raises: Exception. This could potentially throw an exception from _assign_role. """ _assign_role( committer, assignee_id, new_role, exploration_id, constants.ACTIVITY_TYPE_EXPLORATION) if new_role in [ROLE_OWNER, ROLE_EDITOR, ROLE_TRANSLATOR]: subscription_services.subscribe_to_exploration( assignee_id, exploration_id) def release_ownership_of_exploration(committer, exploration_id): """Releases ownership of the given exploration to the community. Args: committer: UserActionsInfo. UserActionsInfo object for the committer. exploration_id: str. ID of the exploration. Raises: Exception. This could potentially throw an exception from _release_ownership_of_activity. """ _release_ownership_of_activity( committer, exploration_id, constants.ACTIVITY_TYPE_EXPLORATION) def set_private_viewability_of_exploration( committer, exploration_id, viewable_if_private): """Sets the viewable_if_private attribute for the given exploration's rights object. If viewable_if_private is True, this allows a private exploration to be viewed by anyone with the link. Args: committer: UserActionsInfo. UserActionsInfo object for the committer. exploration_id: str. ID of the exploration. viewable_if_private: bool. Whether the exploration should be made viewable (by anyone with the link). Raises: Exception. The committer does not have the permission to perform change action. Exception. If the viewable_if_private property is already as desired. """ committer_id = committer.user_id exploration_rights = get_exploration_rights(exploration_id) # The user who can publish activity can change its private viewability. if not check_can_publish_activity(committer, exploration_rights): logging.error( 'User %s tried to change private viewability of exploration %s ' 'but was refused permission.' % (committer_id, exploration_id)) raise Exception( 'The viewability status of this exploration cannot be changed.') old_viewable_if_private = exploration_rights.viewable_if_private if old_viewable_if_private == viewable_if_private: raise Exception( 'Trying to change viewability status of this exploration to %s, ' 'but that is already the current value.' % viewable_if_private) exploration_rights.viewable_if_private = viewable_if_private commit_cmds = [{ 'cmd': CMD_CHANGE_PRIVATE_VIEWABILITY, 'old_viewable_if_private': old_viewable_if_private, 'new_viewable_if_private': viewable_if_private, }] commit_message = ( 'Made exploration viewable to anyone with the link.' if viewable_if_private else 'Made exploration viewable only to invited playtesters.') _save_activity_rights( committer_id, exploration_rights, constants.ACTIVITY_TYPE_EXPLORATION, commit_message, commit_cmds) _update_exploration_summary(exploration_rights) def publish_exploration(committer, exploration_id): """Publishes the given exploration. It is the responsibility of the caller to check that the exploration is valid prior to publication. Args: committer: UserActionsInfo. UserActionsInfo object for the committer. exploration_id: str. ID of the exploration. Raises: Exception. This could potentially throw an exception from _publish_activity. """ _publish_activity( committer, exploration_id, constants.ACTIVITY_TYPE_EXPLORATION) def unpublish_exploration(committer, exploration_id): """Unpublishes the given exploration. Args: committer: UserActionsInfo. UserActionsInfo object for the committer. exploration_id: str. ID of the exploration. Raises: Exception. This could potentially throw an exception from _unpublish_activity. """ _unpublish_activity( committer, exploration_id, constants.ACTIVITY_TYPE_EXPLORATION) # Rights functions for collections. def assign_role_for_collection( committer, collection_id, assignee_id, new_role): """Assign the given user to the given role and subscribes the assignee to future collection updates. The caller should ensure that assignee_id corresponds to a valid user in the system. Args: committer: UserActionsInfo. UserActionsInfo object for the committer. collection_id: str. ID of the collection. assignee_id: str. ID of the user whose role is being changed. new_role: str. The name of the new role: One of ROLE_OWNER ROLE_EDITOR Raises: Exception. This could potentially throw an exception from _assign_role. """ _assign_role( committer, assignee_id, new_role, collection_id, constants.ACTIVITY_TYPE_COLLECTION) if new_role in [ROLE_OWNER, ROLE_EDITOR]: subscription_services.subscribe_to_collection( assignee_id, collection_id) def release_ownership_of_collection(committer, collection_id): """Releases ownership of the given collection to the community. Args: committer: UserActionsInfo. UserActionsInfo object for the committer. collection_id: str. ID of the collection. Raises: Exception. This could potentially throw an exception from _release_ownership_of_activity. """ _release_ownership_of_activity( committer, collection_id, constants.ACTIVITY_TYPE_COLLECTION) def publish_collection(committer, collection_id): """Publishes the given collection. It is the responsibility of the caller to check that the collection is valid prior to publication. Args: committer: UserActionsInfo. UserActionsInfo object for the committer. collection_id: str. ID of the collection. Raises: Exception. This could potentially throw an exception from _publish_activity. """ _publish_activity( committer, collection_id, constants.ACTIVITY_TYPE_COLLECTION) def unpublish_collection(committer, collection_id): """Unpublishes the given collection. Args: committer: UserActionsInfo. UserActionsInfo object for the committer. collection_id: str. ID of the collection. Raises: Exception. This could potentially throw an exception from _unpublish_activity. """ _unpublish_activity( committer, collection_id, constants.ACTIVITY_TYPE_COLLECTION)
35.303448
80
0.692323
5,531
46,071
5.502802
0.062376
0.074977
0.028289
0.009758
0.693258
0.619135
0.562492
0.493823
0.454429
0.404685
0
0.00026
0.248421
46,071
1,304
81
35.330521
0.878733
0.356081
0
0.365188
0
0.001706
0.084694
0.008021
0
0
0
0.000767
0
1
0.083618
false
0
0.022184
0
0.209898
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07c4707b6186eda818ca02fcf8c20fba9ffa25c7
7,024
py
Python
selfdrive/car/toyota/carcontroller.py
aolin480/openpilot
9ac00c3e5e111a05a0bb10018ccd190571dfff4d
[ "MIT" ]
70
2021-06-18T13:04:10.000Z
2022-03-31T13:58:09.000Z
selfdrive/car/toyota/carcontroller.py
aolin480/openpilot
9ac00c3e5e111a05a0bb10018ccd190571dfff4d
[ "MIT" ]
50
2021-06-16T23:52:55.000Z
2022-03-30T14:05:33.000Z
selfdrive/car/toyota/carcontroller.py
aolin480/openpilot
9ac00c3e5e111a05a0bb10018ccd190571dfff4d
[ "MIT" ]
88
2021-06-23T21:33:19.000Z
2022-03-31T07:27:08.000Z
from cereal import car from common.numpy_fast import clip, interp from selfdrive.car import apply_toyota_steer_torque_limits, create_gas_interceptor_command, make_can_msg from selfdrive.car.toyota.toyotacan import create_steer_command, create_ui_command, \ create_accel_command, create_acc_cancel_command, \ create_fcw_command, create_lta_steer_command from selfdrive.car.toyota.values import CAR, STATIC_DSU_MSGS, NO_STOP_TIMER_CAR, TSS2_CAR, \ MIN_ACC_SPEED, PEDAL_TRANSITION, CarControllerParams from opendbc.can.packer import CANPacker from common.op_params import opParams VisualAlert = car.CarControl.HUDControl.VisualAlert class CarController(): def __init__(self, dbc_name, CP, VM): self.last_steer = 0 self.alert_active = False self.last_standstill = False self.standstill_req = False self.steer_rate_limited = False self.standstill_hack = opParams().get('standstill_hack') self.packer = CANPacker(dbc_name) self.gas = 0 self.accel = 0 def update(self, enabled, active, CS, frame, actuators, pcm_cancel_cmd, hud_alert, left_line, right_line, lead, left_lane_depart, right_lane_depart): # gas and brake if CS.CP.enableGasInterceptor and enabled: MAX_INTERCEPTOR_GAS = 0.5 # RAV4 has very sensitive gas pedal if CS.CP.carFingerprint in [CAR.RAV4, CAR.RAV4H, CAR.HIGHLANDER, CAR.HIGHLANDERH]: PEDAL_SCALE = interp(CS.out.vEgo, [0.0, MIN_ACC_SPEED, MIN_ACC_SPEED + PEDAL_TRANSITION], [0.15, 0.3, 0.0]) elif CS.CP.carFingerprint in [CAR.COROLLA]: PEDAL_SCALE = interp(CS.out.vEgo, [0.0, MIN_ACC_SPEED, MIN_ACC_SPEED + PEDAL_TRANSITION], [0.3, 0.4, 0.0]) else: PEDAL_SCALE = interp(CS.out.vEgo, [0.0, MIN_ACC_SPEED, MIN_ACC_SPEED + PEDAL_TRANSITION], [0.4, 0.5, 0.0]) # offset for creep and windbrake pedal_offset = interp(CS.out.vEgo, [0.0, 2.3, MIN_ACC_SPEED + PEDAL_TRANSITION], [-.4, 0.0, 0.2]) pedal_command = PEDAL_SCALE * (actuators.accel + pedal_offset) interceptor_gas_cmd = clip(pedal_command, 0., MAX_INTERCEPTOR_GAS) else: interceptor_gas_cmd = 0. pcm_accel_cmd = clip(actuators.accel, CarControllerParams.ACCEL_MIN, CarControllerParams.ACCEL_MAX) # steer torque new_steer = int(round(actuators.steer * CarControllerParams.STEER_MAX)) apply_steer = apply_toyota_steer_torque_limits(new_steer, self.last_steer, CS.out.steeringTorqueEps, CarControllerParams) self.steer_rate_limited = new_steer != apply_steer # Cut steering while we're in a known fault state (2s) if not enabled or CS.steer_state in [9, 25] or abs(CS.out.steeringRateDeg) > 100: apply_steer = 0 apply_steer_req = 0 else: apply_steer_req = 1 # TODO: probably can delete this. CS.pcm_acc_status uses a different signal # than CS.cruiseState.enabled. confirm they're not meaningfully different if not enabled and CS.pcm_acc_status: pcm_cancel_cmd = 1 # on entering standstill, send standstill request if CS.out.standstill and not self.last_standstill and CS.CP.carFingerprint not in NO_STOP_TIMER_CAR and not self.standstill_hack: self.standstill_req = True if CS.pcm_acc_status != 8: # pcm entered standstill or it's disabled self.standstill_req = False self.last_steer = apply_steer self.last_standstill = CS.out.standstill can_sends = [] #*** control msgs *** #print("steer {0} {1} {2} {3}".format(apply_steer, min_lim, max_lim, CS.steer_torque_motor) # toyota can trace shows this message at 42Hz, with counter adding alternatively 1 and 2; # sending it at 100Hz seem to allow a higher rate limit, as the rate limit seems imposed # on consecutive messages can_sends.append(create_steer_command(self.packer, apply_steer, apply_steer_req, frame)) if frame % 2 == 0 and CS.CP.carFingerprint in TSS2_CAR: can_sends.append(create_lta_steer_command(self.packer, 0, 0, frame // 2)) # LTA mode. Set ret.steerControlType = car.CarParams.SteerControlType.angle and whitelist 0x191 in the panda # if frame % 2 == 0: # can_sends.append(create_steer_command(self.packer, 0, 0, frame // 2)) # can_sends.append(create_lta_steer_command(self.packer, actuators.steeringAngleDeg, apply_steer_req, frame // 2)) # we can spam can to cancel the system even if we are using lat only control if (frame % 3 == 0 and CS.CP.openpilotLongitudinalControl) or pcm_cancel_cmd: lead = lead or CS.out.vEgo < 12. # at low speed we always assume the lead is present so ACC can be engaged # Lexus IS uses a different cancellation message if pcm_cancel_cmd and CS.CP.carFingerprint in [CAR.LEXUS_IS, CAR.LEXUS_RC]: can_sends.append(create_acc_cancel_command(self.packer)) elif CS.CP.openpilotLongitudinalControl: can_sends.append(create_accel_command(self.packer, pcm_accel_cmd, pcm_cancel_cmd, self.standstill_req, lead, CS.acc_type, CS.distance_btn)) self.accel = pcm_accel_cmd else: can_sends.append(create_accel_command(self.packer, 0, pcm_cancel_cmd, False, lead, CS.acc_type, CS.distance_btn)) if frame % 2 == 0 and CS.CP.enableGasInterceptor and CS.CP.openpilotLongitudinalControl: # send exactly zero if gas cmd is zero. Interceptor will send the max between read value and gas cmd. # This prevents unexpected pedal range rescaling can_sends.append(create_gas_interceptor_command(self.packer, interceptor_gas_cmd, frame // 2)) self.gas = interceptor_gas_cmd # ui mesg is at 100Hz but we send asap if: # - there is something to display # - there is something to stop displaying fcw_alert = hud_alert == VisualAlert.fcw steer_alert = hud_alert in [VisualAlert.steerRequired, VisualAlert.ldw] send_ui = False if ((fcw_alert or steer_alert) and not self.alert_active) or \ (not (fcw_alert or steer_alert) and self.alert_active): send_ui = True self.alert_active = not self.alert_active elif pcm_cancel_cmd: # forcing the pcm to disengage causes a bad fault sound so play a good sound instead send_ui = True if (frame % 100 == 0 or send_ui): can_sends.append(create_ui_command(self.packer, steer_alert, pcm_cancel_cmd, left_line, right_line, left_lane_depart, right_lane_depart, enabled)) if frame % 100 == 0 and CS.CP.enableDsu: can_sends.append(create_fcw_command(self.packer, fcw_alert)) # *** static msgs *** for (addr, cars, bus, fr_step, vl) in STATIC_DSU_MSGS: if frame % fr_step == 0 and CS.CP.enableDsu and CS.CP.carFingerprint in cars: can_sends.append(make_can_msg(addr, vl, bus)) new_actuators = actuators.copy() new_actuators.steer = apply_steer / CarControllerParams.STEER_MAX new_actuators.accel = self.accel new_actuators.gas = self.gas return new_actuators, can_sends
48.777778
152
0.716259
1,041
7,024
4.598463
0.246878
0.010863
0.03217
0.04178
0.204512
0.140798
0.115521
0.097974
0.059536
0.040735
0
0.018499
0.199601
7,024
143
153
49.118881
0.832978
0.221669
0
0.087912
0
0
0.002758
0
0
0
0
0.006993
0
1
0.021978
false
0
0.076923
0
0.120879
0.065934
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07c6e4237d951b4abd908ba105b7d327ebecef98
484
py
Python
jp.atcoder/abc122/abc122_c/9516079.py
kagemeka/atcoder-submissions
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
[ "MIT" ]
1
2022-02-09T03:06:25.000Z
2022-02-09T03:06:25.000Z
jp.atcoder/abc122/abc122_c/9516079.py
kagemeka/atcoder-submissions
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
[ "MIT" ]
1
2022-02-05T22:53:18.000Z
2022-02-09T01:29:30.000Z
jp.atcoder/abc122/abc122_c/9516079.py
kagemeka/atcoder-submissions
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
[ "MIT" ]
null
null
null
import sys n, q = map(int, sys.stdin.readline().split()) s = '$' + sys.stdin.readline().rstrip() lr = zip(*[map(int, sys.stdin.read().split())] * 2) def main(): res = [None] * (n + 1); res[0] = 0 prev = '$' for i in range(1, n+1): res[i] = res[i-1] res[i] += (prev == 'A' and s[i] == 'C') & 1 prev = s[i] for l, r in lr: yield res[r] - res[l] if __name__ == '__main__': ans = main() print(*ans, sep='\n')
23.047619
52
0.452479
77
484
2.74026
0.480519
0.113744
0.085308
0.132701
0
0
0
0
0
0
0
0.024096
0.31405
484
20
53
24.2
0.611446
0
0
0
0
0
0.030108
0
0
0
0
0
0
1
0.0625
false
0
0.0625
0
0.125
0.0625
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07c71ca4f5171ffec7ba2da60f8676b83050c69c
2,262
py
Python
Decoder.py
gokulsg/Attention-is-all-you-need-implementation-from-scratch
f5eb591d169cbef3ef8b066d8d462fee11badc3b
[ "MIT" ]
1
2022-02-19T03:51:38.000Z
2022-02-19T03:51:38.000Z
Decoder.py
gokulsg/Attention-is-all-you-need-implementation-from-scratch
f5eb591d169cbef3ef8b066d8d462fee11badc3b
[ "MIT" ]
null
null
null
Decoder.py
gokulsg/Attention-is-all-you-need-implementation-from-scratch
f5eb591d169cbef3ef8b066d8d462fee11badc3b
[ "MIT" ]
null
null
null
import torch import torch.nn as nn from DecoderLayer import DecoderLayer import math class Decoder(nn.Module): def __init__(self, output_dim, embed_dim, num_layers, num_heads, expand_dim, dropout, device, max_length = 30): super().__init__() self.tok_embedding = nn.Embedding(output_dim, embed_dim) #self.pos_embedding = nn.Embedding(max_length, embed_dim) self.pos_embedding = nn.Embedding.from_pretrained(self.get_positional_encoding(max_length, embed_dim)) self.layers = nn.ModuleList([DecoderLayer(embed_dim, num_heads, expand_dim, dropout) for _ in range(num_layers)]) self.fc_out = nn.Linear(embed_dim, output_dim) self.dropout = nn.Dropout(dropout) self.scale = torch.sqrt(torch.FloatTensor([embed_dim])).to(device) self.device = device def forward(self, trg, enc_src, trg_mask, src_mask): #trg = [batch size, trg len] #enc_src = [batch size, src len, embed dim] #trg_mask = [batch size, 1, trg len, trg len] #src_mask = [batch size, 1, 1, src len] batch_size = trg.shape[0] trg_len = trg.shape[1] pos = torch.arange(0, trg_len).unsqueeze(0).repeat(batch_size, 1).to(self.device) #pos = [batch size, trg len] trg = self.dropout((self.tok_embedding(trg) * self.scale) + self.pos_embedding(pos)) #trg = [batch size, trg len, embed dim] for layer in self.layers: trg = layer(trg, enc_src, trg_mask, src_mask) #trg = [batch size, trg len, embed dim] output = self.fc_out(trg) #output = [batch size, trg len, output dim] return output def get_positional_encoding(self, max_seq_len, embed_dim): pos_enc = torch.zeros(max_seq_len, embed_dim) position = torch.arange(0, max_seq_len).unsqueeze(1) div_term = torch.exp(torch.arange(0, embed_dim, 2) * (-math.log(10000.0) / embed_dim)) pos_enc[:, 0::2] = torch.sin(position * div_term) pos_enc[:, 1::2] = torch.cos(position * div_term) return pos_enc
42.679245
122
0.597259
304
2,262
4.213816
0.233553
0.087432
0.056206
0.058548
0.232631
0.145199
0.145199
0.064012
0.064012
0.064012
0
0.015
0.292661
2,262
53
123
42.679245
0.785625
0.155615
0
0
0
0
0
0
0
0
0
0
0
1
0.1
false
0
0.133333
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07c821ec1109c1e7667cac57580d2ee557fe694c
901
py
Python
salt/grains/nxos.py
babs/salt
c536ea716d5308880b244e7980f4b659d86fc104
[ "Apache-2.0" ]
9,425
2015-01-01T05:59:24.000Z
2022-03-31T20:44:05.000Z
salt/grains/nxos.py
babs/salt
c536ea716d5308880b244e7980f4b659d86fc104
[ "Apache-2.0" ]
33,507
2015-01-01T00:19:56.000Z
2022-03-31T23:48:20.000Z
salt/grains/nxos.py
babs/salt
c536ea716d5308880b244e7980f4b659d86fc104
[ "Apache-2.0" ]
5,810
2015-01-01T19:11:45.000Z
2022-03-31T02:37:20.000Z
""" Grains for Cisco NX-OS minions .. versionadded:: 2016.11.0 For documentation on setting up the nxos proxy minion look in the documentation for :mod:`salt.proxy.nxos<salt.proxy.nxos>`. """ import logging import salt.utils.nxos import salt.utils.platform from salt.exceptions import NxosClientError log = logging.getLogger(__name__) __proxyenabled__ = ["nxos"] __virtualname__ = "nxos" def __virtual__(): try: salt.utils.nxos.version_info() except NxosClientError as err: return False, err return __virtualname__ def system_information(proxy=None): if salt.utils.platform.is_proxy(): if proxy is None: return {} if proxy["nxos.initialized"]() is False: return {} return {"nxos": proxy["nxos.grains"]()} else: data = salt.utils.nxos.version_info() return salt.utils.nxos.system_info(data)
21.97561
79
0.675916
113
901
5.168142
0.460177
0.092466
0.089041
0.068493
0.082192
0
0
0
0
0
0
0.009887
0.214206
901
40
80
22.525
0.814972
0.205327
0
0.086957
0
0
0.055085
0
0
0
0
0
0
1
0.086957
false
0
0.173913
0
0.521739
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07c8e1478041c57a7ff67d2397fd305a48517875
3,356
py
Python
tmsproviderapisdk/tms_device.py
tvip/tmsproviderapisdk
f385ddb0d7e87e7a62d1caef3e2c9769e844a4a1
[ "MIT" ]
null
null
null
tmsproviderapisdk/tms_device.py
tvip/tmsproviderapisdk
f385ddb0d7e87e7a62d1caef3e2c9769e844a4a1
[ "MIT" ]
null
null
null
tmsproviderapisdk/tms_device.py
tvip/tmsproviderapisdk
f385ddb0d7e87e7a62d1caef3e2c9769e844a4a1
[ "MIT" ]
null
null
null
from typing import List, Optional, Tuple from tmsproviderapisdk.tms_extended_model import TmsExtendedModel class TmsDevice(TmsExtendedModel): _path_url = "/devices/" def __init__(self, unique_id: str, account: int, device_id: int = None, ipaddr: str = None, mac: str = None, remote_custom_field: str = None, comment: str = None, last_online: str = None, last_fw_ver: str = None, first_online: str = None, use_nat: bool = False, operation_system: str = None, udpxy_addr: str = None, device_type: int = None, provider: int = None): self.unique_id = unique_id self.account = account self.id = device_id self.ipaddr = ipaddr self.mac = mac self.remote_custom_field = remote_custom_field self.comment = comment self.last_online = last_online self.last_fw_ver = last_fw_ver self.first_online = first_online self.use_nat = use_nat self.operation_system = operation_system self.udpxy_addr = udpxy_addr self.device_type = device_type self.provider = provider @staticmethod def _dict_to_object(device_dict: dict) -> object: device = TmsDevice( unique_id=device_dict["unique_id"], device_id=device_dict["id"], ipaddr=device_dict["ipaddr"], mac=device_dict["mac"], remote_custom_field=device_dict["remote_custom_field"], comment=device_dict["comment"], last_online=device_dict["last_online"], last_fw_ver=device_dict["last_fw_ver"], first_online=device_dict["first_online"], use_nat=device_dict["use_nat"], operation_system=device_dict["operation_system"], udpxy_addr=device_dict["udpxy_addr"], device_type=device_dict["device_type"], provider=device_dict["provider"], account=device_dict["account"] ) return device @classmethod def get_list(cls, account: int = None, device_type: int = None, limit: int = 50, provider: int = None, quick_search: str = "", remote_custom_field: str = None, sort: str = "", start: int = 0, unique_id: str = "") -> Optional[Tuple[List[object], int]]: devices = super().get_list(start=start, limit=limit, account=account, device_type=device_type, provider=provider, quick_search=quick_search, remote_custom_field=remote_custom_field, sort=sort, unique_id=unique_id) return devices def __str__(self): return """id:{}, ipaddr:{}, mac:{}, unique_id:{}, remote_custom_field: {}, comment: {}, last_online: {}, \ last_fw_ver: {}, first_online: {}, use_nat: {}, operation_system: {}, \ udpxy_addr: {}, device_type: {}, provider: {}, account: {}""".format( self.id, self.ipaddr, self.mac, self.unique_id, self.remote_custom_field, self.comment, self.last_online, self.last_fw_ver, self.first_online, self.use_nat, self.operation_system, self.udpxy_addr, self.device_type, self.provider, self.account )
40.926829
120
0.598033
386
3,356
4.867876
0.165803
0.085152
0.090474
0.021288
0.30016
0.081426
0
0
0
0
0
0.001269
0.29559
3,356
81
121
41.432099
0.79357
0
0
0
0
0
0.116508
0
0
0
0
0
0
1
0.056338
false
0
0.028169
0.014085
0.15493
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07ca1045f3d1ae1412959e210adfbad3cf54eff0
7,676
py
Python
fealty/fields.py
eddiejessup/fealty
03745eb98d85bc2a5d08920773ab9c4515462d30
[ "BSD-3-Clause" ]
null
null
null
fealty/fields.py
eddiejessup/fealty
03745eb98d85bc2a5d08920773ab9c4515462d30
[ "BSD-3-Clause" ]
null
null
null
fealty/fields.py
eddiejessup/fealty
03745eb98d85bc2a5d08920773ab9c4515462d30
[ "BSD-3-Clause" ]
null
null
null
""" A class hierarchy relating to fields of all kinds. """ from __future__ import print_function, division import numpy as np from ciabatta.meta import make_repr_str from fealty import lattice, field_numerics, walled_field_numerics class Space(object): def __init__(self, L, dim): self.L = L self.dim = dim @property def L_half(self): return self.L / 2.0 @property def A(self): return self.L ** self.dim def iterate(self, *args, **kwargs): pass def __repr__(self): fs = [('L', self.L), ('dim', self.dim)] return make_repr_str(self, fs) class Field(Space): def __init__(self, L, dim, dx): Space.__init__(self, L, dim) self.M = int(round(self.L / dx)) @property def dx(self): return self.L / self.M @property def A_i(self): return self.M ** self.dim @property def dA(self): return self.dx ** self.dim def density_field(self, r): return density(r, self.L, self.dx) def r_to_i(self, r): return lattice.r_to_i(r, self.L, self.dx) def i_to_r(self, i): return lattice.i_to_r(i, self.L, self.dx) def __repr__(self): fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx)] return make_repr_str(self, fs) class Scalar(Field): def __init__(self, L, dim, dx, a_0=0.0): Field.__init__(self, L, dim, dx) self.a = np.ones(self.dim * (self.M,), dtype=np.float) * a_0 def grad(self): return _grad(self.a, self.dx) def grad_i(self, r): return _grad_i(self.a, self.r_to_i(r), self.dx) def laplacian(self): return _laplace(self.a, self.dx) def __repr__(self): fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx), ('a_0', self.a_0)] return make_repr_str(self, fs) class Diffusing(Scalar): def __init__(self, L, dim, dx, D, dt, a_0=0.0): Scalar.__init__(self, L, dim, dx, a_0=a_0) self.D = D self.dt = dt if self.D > self.dx ** 2 / (2.0 * self.dim * self.dt): raise Exception('Unstable diffusion constant') def iterate(self): self.a += self.D * self.laplacian() * self.dt def __repr__(self): fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx), ('D', self.D), ('dt', self.dt), ('a_0', self.a_0)] return make_repr_str(self, fs) class WalledScalar(Scalar): def __init__(self, L, dim, dx, walls, a_0=0.0): Scalar.__init__(self, L, dim, dx, a_0=a_0) self.walls = walls # Make field zero-valued where obstructed self.a *= np.logical_not(self.walls) def grad(self): return _walled_grad(self.a, self.dx, self.walls) def grad_i(self, r): return _walled_grad_i(self.a, self.r_to_i(r), self.dx, self.walls) def laplacian(self): return _walled_laplace(self.a, self.dx, self.walls) def __repr__(self): fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx), ('walls', self.walls), ('a_0', self.a_0)] return make_repr_str(self, fs) # Note, inheritance order matters to get walled grad & laplacian call # (see diamond problem on wikipedia and how python handles it) class WalledDiffusing(WalledScalar, Diffusing): def __init__(self, L, dim, dx, walls, D, dt, a_0=0.0): Diffusing.__init__(self, L, dim, dx, D, dt, a_0=a_0) WalledScalar.__init__(self, L, dim, dx, walls, a_0=a_0) def __repr__(self): fs = [('L', self.L), ('dim', self.dim), ('dx', self.dx), ('walls', self.walls), ('D', self.D), ('dt', self.dt), ('a_0', self.a_0)] return make_repr_str(self, fs) def density(r, L, dx): assert r.ndim == 2 M = int(round(L / dx)) dx = L / M inds = lattice.r_to_i(r, L, dx) f = np.zeros(r.shape[1] * (M,), dtype=np.int) if f.ndim == 1: field_numerics.density_1d(inds, f) elif f.ndim == 2: field_numerics.density_2d(inds, f) elif f.ndim == 3: field_numerics.density_3d(inds, f) else: raise Exception('Density calc not implemented in this dimension') return f / dx ** r.shape[1] def _laplace(field, dx): assert dx > 0.0 laplace = np.empty_like(field) if field.ndim == 1: field_numerics.laplace_1d(field, laplace, dx) elif field.ndim == 2: field_numerics.laplace_2d(field, laplace, dx) elif field.ndim == 3: field_numerics.laplace_3d(field, laplace, dx) else: raise Exception('Laplacian not implemented in this dimension') return laplace def _grad_i(field, inds, dx): assert dx > 0.0 assert inds.ndim == 2 assert field.ndim == inds.shape[1] grad_i = np.empty(inds.shape, dtype=field.dtype) if field.ndim == 1: field_numerics.grad_i_1d(field, inds, grad_i, dx) elif field.ndim == 2: field_numerics.grad_i_2d(field, inds, grad_i, dx) elif field.ndim == 3: field_numerics.grad_i_3d(field, grad_i, dx) else: raise Exception("Grad_i not implemented in this dimension") return grad_i def _grad(field, dx): assert dx > 0.0 grad = np.empty(field.shape + (field.ndim,), dtype=field.dtype) if field.ndim == 1: field_numerics.grad_1d(field, grad, dx) elif field.ndim == 2: field_numerics.grad_2d(field, grad, dx) elif field.ndim == 3: field_numerics.grad_3d(field, grad, dx) else: raise Exception('Grad not implemented in this dimension') return grad def _div(field, dx): assert dx > 0.0 div = np.empty(field.shape[:-1], dtype=field.dtype) if field.ndim == 2: field_numerics.div_1d(field, div, dx) elif field.ndim == 3: field_numerics.div_2d(field, div, dx) elif field.ndim == 4: field_numerics.div_3d(field, div, dx) else: raise Exception('Divergence not implemented in this dimension') return div def _walled_grad(field, dx, walls): assert field.shape == walls.shape assert dx > 0.0 grad = np.empty(field.shape + (field.ndim,), dtype=field.dtype) if field.ndim == 1: walled_field_numerics.grad_1d(field, grad, dx, walls) elif field.ndim == 2: walled_field_numerics.grad_2d(field, grad, dx, walls) elif field.ndim == 3: walled_field_numerics.grad_3d(field, grad, dx, walls) else: raise Exception("Walled grad not implemented in this dimension") return grad def _walled_grad_i(field, inds, dx, walls): assert field.shape == walls.shape assert dx > 0.0 assert inds.ndim == 2 assert field.ndim == inds.shape[1] grad_i = np.empty(inds.shape, dtype=field.dtype) if field.ndim == 1: walled_field_numerics.grad_i_1d(field, inds, grad_i, dx, walls) elif field.ndim == 2: walled_field_numerics.grad_i_2d(field, inds, grad_i, dx, walls) elif field.ndim == 3: walled_field_numerics.grad_i_3d(field, inds, grad_i, dx, walls) else: raise Exception("Walled Grad_i not implemented in this dimension") return grad_i def _walled_laplace(field, dx, walls): assert field.shape == walls.shape assert dx > 0.0 laplace = np.empty_like(field) if field.ndim == 1: walled_field_numerics.laplace_1d(field, laplace, dx, walls) elif field.ndim == 2: walled_field_numerics.laplace_2d(field, laplace, dx, walls) elif field.ndim == 3: walled_field_numerics.laplace_3d(field, laplace, dx, walls) else: raise Exception('Laplacian not implemented in this dimension') return laplace
29.186312
74
0.60839
1,174
7,676
3.777683
0.099659
0.076212
0.032469
0.032469
0.695829
0.632694
0.570688
0.450282
0.416235
0.369335
0
0.01823
0.256774
7,676
262
75
29.29771
0.759159
0.028661
0
0.432161
0
0
0.058421
0
0
0
0
0
0.075377
1
0.180905
false
0.005025
0.020101
0.070352
0.371859
0.005025
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07cb2143ea47874c58c2e1ce2165dfb24f78e2e2
583
py
Python
examples/example_django/example_django/asgi.py
cpascariello/aleph-vm
1b4920bec211ef3bd379e9359f57f06b9308c1a1
[ "MIT" ]
19
2021-04-13T14:01:06.000Z
2022-02-19T00:45:05.000Z
examples/example_django/example_django/asgi.py
cpascariello/aleph-vm
1b4920bec211ef3bd379e9359f57f06b9308c1a1
[ "MIT" ]
65
2021-04-14T09:02:24.000Z
2022-03-29T07:56:03.000Z
examples/example_django/example_django/asgi.py
cpascariello/aleph-vm
1b4920bec211ef3bd379e9359f57f06b9308c1a1
[ "MIT" ]
9
2021-06-01T23:03:28.000Z
2022-02-17T20:24:57.000Z
""" ASGI config for example_django project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example_django.settings") application = get_asgi_application() os.system("/usr/bin/python3 /opt/code/manage.py migrate") os.system("/usr/bin/python3 /opt/code/manage.py " "loaddata /opt/code/blog/fixtures/default_articles.json")
26.5
78
0.766724
85
583
5.152941
0.647059
0.047945
0.082192
0.091324
0.164384
0.164384
0.164384
0.164384
0.164384
0
0
0.007692
0.108062
583
21
79
27.761905
0.834615
0.377358
0
0
0
0
0.507042
0.253521
0
0
0
0
0
1
0
false
0
0.285714
0
0.285714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07cebbfa2f7886d8fbfa5e82f77ae6da2ef52744
703
py
Python
src/oncall/messengers/teams_messenger.py
navoday-91/oncall
0a977f06bbf308978d0d2c2b46e0aca23937ca9a
[ "BSD-2-Clause" ]
857
2017-05-03T00:59:10.000Z
2022-03-29T06:45:23.000Z
src/oncall/messengers/teams_messenger.py
navoday-91/oncall
0a977f06bbf308978d0d2c2b46e0aca23937ca9a
[ "BSD-2-Clause" ]
142
2017-05-03T02:00:58.000Z
2022-03-25T20:58:11.000Z
src/oncall/messengers/teams_messenger.py
navoday-91/oncall
0a977f06bbf308978d0d2c2b46e0aca23937ca9a
[ "BSD-2-Clause" ]
218
2017-05-03T02:04:56.000Z
2022-03-25T18:28:04.000Z
import pymsteams import logging from oncall.constants import TEAMS_SUPPORT class teams_messenger(object): supports = frozenset([TEAMS_SUPPORT]) def __init__(self, config): self.webhook = config['webhook'] def send(self, message): heading = message.get("subject") final_message = "User: " + message.get("user") + " Message: " + message.get("body") try: myTeamsMessage = pymsteams.connectorcard(self.webhook) myTeamsMessage.title(str(heading)) myTeamsMessage.text(str(final_message)) myTeamsMessage.send() except: logging.info("An issue occured while sending message to teams messenger")
30.565217
91
0.651494
73
703
6.150685
0.547945
0.066815
0
0
0
0
0
0
0
0
0
0
0.243243
703
22
92
31.954545
0.843985
0
0
0
0
0
0.135135
0
0
0
0
0
0
1
0.117647
false
0
0.176471
0
0.411765
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07cf43823d8f9a2b976bcf3456b4ba03ebef67d9
11,632
py
Python
python/chronos/src/bigdl/chronos/autots/model/auto_prophet.py
Laniakea94/BigDL
4d01734086dda893a7f08ba53251dc3c5c8ecfd1
[ "Apache-2.0" ]
3
2021-07-14T01:28:47.000Z
2022-03-02T01:16:32.000Z
python/chronos/src/bigdl/chronos/autots/model/auto_prophet.py
liangs6212/BigDL
3c89ff7e8bbdc713110536c18099506811cd2b3a
[ "Apache-2.0" ]
null
null
null
python/chronos/src/bigdl/chronos/autots/model/auto_prophet.py
liangs6212/BigDL
3c89ff7e8bbdc713110536c18099506811cd2b3a
[ "Apache-2.0" ]
null
null
null
# + # # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either exp' # ress or implied. # See the License for the specific language governing permissions and # limitations under the License. # import pandas as pd import warnings from bigdl.chronos.model.prophet import ProphetBuilder, ProphetModel from bigdl.chronos.autots.utils import recalculate_n_sampling # - class AutoProphet: def __init__(self, changepoint_prior_scale=None, seasonality_prior_scale=None, holidays_prior_scale=None, seasonality_mode=None, changepoint_range=None, metric='mse', logs_dir="/tmp/auto_prophet_logs", cpus_per_trial=1, name="auto_prophet", remote_dir=None, load_dir=None, **prophet_config ): """ Create an automated Prophet Model. User need to specify either the exact value or the search space of the Prophet model hyperparameters. For details of the Prophet model hyperparameters, refer to https://facebook.github.io/prophet/docs/diagnostics.html#hyperparameter-tuning. :param changepoint_prior_scale: Int or hp sampling function from an integer space for hyperparameter changepoint_prior_scale for the Prophet model. For hp sampling, see bigdl.chronos.orca.automl.hp for more details. e.g. hp.loguniform(0.001, 0.5). :param seasonality_prior_scale: hyperparameter seasonality_prior_scale for the Prophet model. e.g. hp.loguniform(0.01, 10). :param holidays_prior_scale: hyperparameter holidays_prior_scale for the Prophet model. e.g. hp.loguniform(0.01, 10). :param seasonality_mode: hyperparameter seasonality_mode for the Prophet model. e.g. hp.choice(['additive', 'multiplicative']). :param changepoint_range: hyperparameter changepoint_range for the Prophet model. e.g. hp.uniform(0.8, 0.95). :param metric: String. The evaluation metric name to optimize. e.g. "mse" :param logs_dir: Local directory to save logs and results. It defaults to "/tmp/auto_prophet_logs" :param cpus_per_trial: Int. Number of cpus for each trial. It defaults to 1. :param name: name of the AutoProphet. It defaults to "auto_prophet" :param remote_dir: String. Remote directory to sync training results and checkpoints. It defaults to None and doesn't take effects while running in local. While running in cluster, it defaults to "hdfs:///tmp/{name}". :param load_dir: Load the ckpt from load_dir. The value defaults to None. :param prophet_config: Other Prophet hyperparameters. """ if load_dir: self.best_model = ProphetModel() self.best_model.restore(load_dir) try: from bigdl.orca.automl.auto_estimator import AutoEstimator import bigdl.orca.automl.hp as hp self.search_space = { "changepoint_prior_scale": hp.grid_search([0.005, 0.05, 0.1, 0.5]) if changepoint_prior_scale is None else changepoint_prior_scale, "seasonality_prior_scale": hp.grid_search([0.01, 0.1, 1.0, 10.0]) if seasonality_prior_scale is None else seasonality_prior_scale, "holidays_prior_scale": hp.loguniform(0.01, 10) if holidays_prior_scale is None else holidays_prior_scale, "seasonality_mode": hp.choice(['additive', 'multiplicative']) if seasonality_mode is None else seasonality_mode, "changepoint_range": hp.uniform(0.8, 0.95) if changepoint_range is None else changepoint_range } self.search_space.update(prophet_config) # update other configs self.metric = metric model_builder = ProphetBuilder() self.auto_est = AutoEstimator(model_builder=model_builder, logs_dir=logs_dir, resources_per_trial={"cpu": cpus_per_trial}, remote_dir=remote_dir, name=name) except ImportError: warnings.warn("You need to install `bigdl-orca[automl]` to use `fit` function.") def fit(self, data, cross_validation=True, expect_horizon=None, freq=None, metric_threshold=None, n_sampling=16, search_alg=None, search_alg_params=None, scheduler=None, scheduler_params=None, ): """ Automatically fit the model and search for the best hyperparameters. :param data: training data, a pandas dataframe with Td rows, and 2 columns, with column 'ds' indicating date and column 'y' indicating value and Td is the time dimension :param cross_validation: bool, if the eval result comes from cross_validation. The value is set to True by default. Setting this option to False to speed up the process. :param expect_horizon: int, validation data will be automatically splited from training data, and expect_horizon is the horizon you may need to use once the mode is fitted. The value defaults to None, where 10% of training data will be taken as the validation data. :param freq: the freqency of the training dataframe. the frequency can be anything from the pandas list of frequency strings here: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliasesDefaulted to None, where an unreliable frequency will be infer implicitly. :param metric_threshold: a trial will be terminated when metric threshold is met :param n_sampling: Number of trials to evaluate in total. Defaults to 16. If hp.grid_search is in search_space, the grid will be run n_sampling of trials and round up n_sampling according to hp.grid_search. If this is -1, (virtually) infinite samples are generated until a stopping condition is met. :param search_alg: str, all supported searcher provided by ray tune (i.e."variant_generator", "random", "ax", "dragonfly", "skopt", "hyperopt", "bayesopt", "bohb", "nevergrad", "optuna", "zoopt" and "sigopt") :param search_alg_params: extra parameters for searcher algorithm besides search_space, metric and searcher mode :param scheduler: str, all supported scheduler provided by ray tune :param scheduler_params: parameters for scheduler """ if expect_horizon is None: expect_horizon = int(0.1*len(data)) if freq is None: assert len(data) >= 2, "The training dataframe should contains more than 2 records." assert pd.api.types.is_datetime64_any_dtype(data["ds"].dtypes), \ "The 'ds' col should be in datetime 64 type, or you need to set `freq` in fit." self._freq = data["ds"].iloc[1] - data["ds"].iloc[0] else: self._freq = pd.Timedelta(freq) expect_horizon_str = str(self._freq * expect_horizon) self.search_space.update({"expect_horizon": expect_horizon_str, "cross_validation": cross_validation}) train_data = data if cross_validation else data[:len(data)-expect_horizon] validation_data = None if cross_validation else data[len(data)-expect_horizon:] n_sampling = recalculate_n_sampling(self.search_space, n_sampling) if n_sampling != -1 else -1 self.auto_est.fit(data=train_data, validation_data=validation_data, metric=self.metric, metric_threshold=metric_threshold, n_sampling=n_sampling, search_space=self.search_space, search_alg=search_alg, search_alg_params=search_alg_params, scheduler=scheduler, scheduler_params=scheduler_params ) # use the best config to fit a new prophet model on whole data self.best_model = ProphetBuilder().build(self.auto_est.get_best_config()) self.best_model.model.fit(data) def predict(self, horizon=1, freq="D", ds_data=None): """ Predict using the best model after HPO. :param horizon: the number of steps forward to predict :param freq: the freqency of the predicted dataframe, defaulted to day("D"), the frequency can be anything from the pandas list of frequency strings here: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#timeseries-offset-aliases :param ds_data: a dataframe that has 1 column 'ds' indicating date. """ if self.best_model.model is None: raise RuntimeError( "You must call fit or restore first before calling predict!") return self.best_model.predict(horizon=horizon, freq=freq, ds_data=ds_data) def evaluate(self, data, metrics=['mse']): """ Evaluate using the best model after HPO. :param data: evaluation data, a pandas dataframe with Td rows, and 2 columns, with column 'ds' indicating date and column 'y' indicating value and Td is the time dimension :param metrics: A list contains metrics for test/valid data. """ if data is None: raise ValueError("Input invalid data of None") if self.best_model.model is None: raise RuntimeError( "You must call fit or restore first before calling evaluate!") return self.best_model.evaluate(target=data, metrics=metrics) def save(self, checkpoint_file): """ Save the best model after HPO. :param checkpoint_file: The location you want to save the best model, should be a json file """ if self.best_model.model is None: raise RuntimeError( "You must call fit or restore first before calling save!") self.best_model.save(checkpoint_file) def restore(self, checkpoint_file): """ Restore the best model after HPO. :param checkpoint_file: The checkpoint file location you want to load the best model. """ self.best_model.restore(checkpoint_file) def get_best_model(self): """ Get the best Prophet model. """ return self.best_model.model
48.066116
121
0.616145
1,432
11,632
4.869413
0.242318
0.024523
0.022372
0.012907
0.215402
0.18672
0.165209
0.150294
0.150294
0.125341
0
0.010662
0.314649
11,632
241
122
48.26556
0.864024
0.454178
0
0.069565
0
0.008696
0.10633
0.012091
0
0
0
0
0.017391
1
0.06087
false
0
0.06087
0
0.156522
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07cfa7c073692f7ff81f78defb2e3541ec3803be
13,313
py
Python
nf/flows.py
arita37/normalizing-flows
c9896656bfd2007b0c17b801c0fe068560127301
[ "MIT" ]
1
2019-11-11T05:40:30.000Z
2019-11-11T05:40:30.000Z
nf/flows.py
arita37/normalizing-flows
c9896656bfd2007b0c17b801c0fe068560127301
[ "MIT" ]
null
null
null
nf/flows.py
arita37/normalizing-flows
c9896656bfd2007b0c17b801c0fe068560127301
[ "MIT" ]
null
null
null
import math import numpy as np import scipy as sp import scipy.linalg import torch import torch.nn as nn import torch.nn.init as init import torch.nn.functional as F from nf.utils import unconstrained_RQS # supported non-linearities: note that the function must be invertible functional_derivatives = { torch.tanh: lambda x: 1 - torch.pow(torch.tanh(x), 2), F.leaky_relu: lambda x: (x > 0).type(torch.FloatTensor) + \ (x < 0).type(torch.FloatTensor) * -0.01, F.elu: lambda x: (x > 0).type(torch.FloatTensor) + \ (x < 0).type(torch.FloatTensor) * torch.exp(x) } class Planar(nn.Module): """ Planar flow. z = f(x) = x + u h(wᵀx + b) [Rezende and Mohamed, 2015] """ def __init__(self, dim, nonlinearity=torch.tanh): super().__init__() self.h = nonlinearity self.w = nn.Parameter(torch.Tensor(dim)) self.u = nn.Parameter(torch.Tensor(dim)) self.b = nn.Parameter(torch.Tensor(1)) self.reset_parameters(dim) def reset_parameters(self, dim): init.uniform_(self.w, -math.sqrt(1/dim), math.sqrt(1/dim)) init.uniform_(self.u, -math.sqrt(1/dim), math.sqrt(1/dim)) init.uniform_(self.b, -math.sqrt(1/dim), math.sqrt(1/dim)) def forward(self, x): """ Given x, returns z and the log-determinant log|df/dx|. Returns ------- """ if self.h in (F.elu, F.leaky_relu): u = self.u elif self.h == torch.tanh: scal = torch.log(1+torch.exp(self.w @ self.u)) - self.w @ self.u - 1 u = self.u + scal * self.w / torch.norm(self.w) else: raise NotImplementedError("Non-linearity is not supported.") lin = torch.unsqueeze(x @ self.w, 1) + self.b z = x + u * self.h(lin) phi = functional_derivatives[self.h](lin) * self.w log_det = torch.log(torch.abs(1 + phi @ u) + 1e-4) return z, log_det def backward(self, z): raise NotImplementedError("Planar flow has no algebraic inverse.") class Radial(nn.Module): """ Radial flow. z = f(x) = = x + β h(α, r)(z − z0) [Rezende and Mohamed 2015] """ def __init__(self, dim): super().__init__() self.x0 = nn.Parameter(torch.Tensor(dim)) self.log_alpha = nn.Parameter(torch.Tensor(1)) self.beta = nn.Parameter(torch.Tensor(1)) def reset_parameters(dim): init.uniform_(self.z0, -math.sqrt(1/dim), math.sqrt(1/dim)) init.uniform_(self.log_alpha, -math.sqrt(1/dim), math.sqrt(1/dim)) init.uniform_(self.beta, -math.sqrt(1/dim), math.sqrt(1/dim)) def forward(self, x): """ Given x, returns z and the log-determinant log|df/dx|. """ m, n = x.shape r = torch.norm(x - self.x0) h = 1 / (torch.exp(self.log_alpha) + r) beta = -torch.exp(self.log_alpha) + torch.log(1 + torch.exp(self.beta)) z = x + beta * h * (x - self.x0) log_det = (n - 1) * torch.log(1 + beta * h) + \ torch.log(1 + beta * h - \ beta * r / (torch.exp(self.log_alpha) + r) ** 2) return z, log_det class FCNN(nn.Module): """ Simple fully connected neural network. """ def __init__(self, in_dim, out_dim, hidden_dim): super().__init__() self.network = nn.Sequential( nn.Linear(in_dim, hidden_dim), nn.Tanh(), nn.Linear(hidden_dim, hidden_dim), nn.Tanh(), nn.Linear(hidden_dim, out_dim), ) def forward(self, x): return self.network(x) class RealNVP(nn.Module): """ Non-volume preserving flow. [Dinh et. al. 2017] """ def __init__(self, dim, hidden_dim = 8, base_network=FCNN): super().__init__() self.dim = dim self.t1 = base_network(dim // 2, dim // 2, hidden_dim) self.s1 = base_network(dim // 2, dim // 2, hidden_dim) self.t2 = base_network(dim // 2, dim // 2, hidden_dim) self.s2 = base_network(dim // 2, dim // 2, hidden_dim) def forward(self, x): lower, upper = x[:,:self.dim // 2], x[:,self.dim // 2:] t1_transformed = self.t1(lower) s1_transformed = self.s1(lower) upper = t1_transformed + upper * torch.exp(s1_transformed) t2_transformed = self.t2(upper) s2_transformed = self.s2(upper) lower = t2_transformed + lower * torch.exp(s2_transformed) z = torch.cat([lower, upper], dim=1) log_det = torch.sum(s1_transformed, dim=1) + \ torch.sum(s2_transformed, dim=1) return z, log_det def backward(self, z): lower, upper = z[:,:self.dim // 2], z[:,self.dim // 2:] t2_transformed = self.t2(upper) s2_transformed = self.s2(upper) lower = (lower - t2_transformed) * torch.exp(-s2_transformed) t1_transformed = self.t1(lower) s1_transformed = self.s1(lower) upper = (upper - t1_transformed) * torch.exp(-s1_transformed) x = torch.cat([lower, upper], dim=1) log_det = torch.sum(-s1_transformed, dim=1) + \ torch.sum(-s2_transformed, dim=1) return x, log_det class MAF(nn.Module): """ Masked auto-regressive flow. [Papamakarios et al. 2018] """ def __init__(self, dim, hidden_dim = 8, base_network=FCNN): super().__init__() self.dim = dim self.layers = nn.ModuleList() self.initial_param = nn.Parameter(torch.Tensor(2)) for i in range(1, dim): self.layers += [base_network(i, 2, hidden_dim)] self.reset_parameters() def reset_parameters(self): init.uniform_(self.initial_param, -math.sqrt(0.5), math.sqrt(0.5)) def forward(self, x): z = torch.zeros_like(x) log_det = torch.zeros(z.shape[0]) for i in range(self.dim): if i == 0: mu, alpha = self.initial_param[0], self.initial_param[1] else: out = self.layers[i - 1](x[:, :i]) mu, alpha = out[:, 0], out[:, 1] z[:, i] = (x[:, i] - mu) / torch.exp(alpha) log_det -= alpha return z.flip(dims=(1,)), log_det def backward(self, z): x = torch.zeros_like(z) log_det = torch.zeros(z.shape[0]) z = z.flip(dims=(1,)) for i in range(self.dim): if i == 0: mu, alpha = self.initial_param[0], self.initial_param[1] else: out = self.layers[i - 1](x[:, :i]) mu, alpha = out[:, 0], out[:, 1] x[:, i] = mu + torch.exp(alpha) * z[:, i] log_det += alpha return x, log_det class ActNorm(nn.Module): """ ActNorm layer. [Kingma and Dhariwal, 2018.] """ def __init__(self, dim): super().__init__() self.dim = dim self.mu = nn.Parameter(torch.zeros(dim, dtype = torch.float)) self.log_sigma = nn.Parameter(torch.zeros(dim, dtype = torch.float)) def forward(self, x): z = x * torch.exp(self.log_sigma) + self.mu log_det = torch.sum(self.log_sigma) return z, log_det def backward(self, z): x = (z - self.mu) / torch.exp(self.log_sigma) log_det = -torch.sum(self.log_sigma) return x, log_det class OneByOneConv(nn.Module): """ Invertible 1x1 convolution. [Kingma and Dhariwal, 2018.] """ def __init__(self, dim): super().__init__() self.dim = dim W, _ = sp.linalg.qr(np.random.randn(dim, dim)) P, L, U = sp.linalg.lu(W) self.P = torch.tensor(P, dtype = torch.float) self.L = nn.Parameter(torch.tensor(L, dtype = torch.float)) self.S = nn.Parameter(torch.tensor(np.diag(U), dtype = torch.float)) self.U = nn.Parameter(torch.triu(torch.tensor(U, dtype = torch.float), diagonal = 1)) self.W_inv = None def forward(self, x): L = torch.tril(self.L, diagonal = -1) + torch.diag(torch.ones(self.dim)) U = torch.triu(self.U, diagonal = 1) z = x @ self.P @ L @ (U + torch.diag(self.S)) log_det = torch.sum(torch.log(torch.abs(self.S))) return z, log_det def backward(self, z): if not self.W_inv: L = torch.tril(self.L, diagonal = -1) + \ torch.diag(torch.ones(self.dim)) U = torch.triu(self.U, diagonal = 1) W = self.P @ L @ (U + torch.diag(self.S)) self.W_inv = torch.inverse(W) x = z @ self.W_inv log_det = -torch.sum(torch.log(torch.abs(self.S))) return x, log_det class NSF_AR(nn.Module): """ Neural spline flow, auto-regressive. [Durkan et al. 2019] """ def __init__(self, dim, K = 5, B = 3, hidden_dim = 8, base_network = FCNN): super().__init__() self.dim = dim self.K = K self.B = B self.layers = nn.ModuleList() self.init_param = nn.Parameter(torch.Tensor(3 * K - 1)) for i in range(1, dim): self.layers += [base_network(i, 3 * K - 1, hidden_dim)] self.reset_parameters() def reset_parameters(self): init.uniform_(self.init_param, - 1 / 2, 1 / 2) def forward(self, x): z = torch.zeros_like(x) log_det = torch.zeros(z.shape[0]) for i in range(self.dim): if i == 0: init_param = self.init_param.expand(x.shape[0], 3 * self.K - 1) W, H, D = torch.split(init_param, self.K, dim = 1) else: out = self.layers[i - 1](x[:, :i]) W, H, D = torch.split(out, self.K, dim = 1) W, H = torch.softmax(W, dim = 1), torch.softmax(H, dim = 1) W, H = 2 * self.B * W, 2 * self.B * H D = F.softplus(D) z[:, i], ld = unconstrained_RQS( x[:, i], W, H, D, inverse=False, tail_bound=self.B) log_det += ld return z, log_det def backward(self, z): x = torch.zeros_like(z) log_det = torch.zeros(x.shape[0]) for i in range(self.dim): if i == 0: init_param = self.init_param.expand(x.shape[0], 3 * self.K - 1) W, H, D = torch.split(init_param, self.K, dim = 1) else: out = self.layers[i - 1](x[:, :i]) W, H, D = torch.split(out, self.K, dim = 1) W, H = torch.softmax(W, dim = 1), torch.softmax(H, dim = 1) W, H = 2 * self.B * W, 2 * self.B * H D = F.softplus(D) x[:, i], ld = unconstrained_RQS( z[:, i], W, H, D, inverse = True, tail_bound = self.B) log_det += ld return x, log_det class NSF_CL(nn.Module): """ Neural spline flow, coupling layer. [Durkan et al. 2019] """ def __init__(self, dim, K = 5, B = 3, hidden_dim = 8, base_network = FCNN): super().__init__() self.dim = dim self.K = K self.B = B self.f1 = base_network(dim // 2, (3 * K - 1) * dim // 2, hidden_dim) self.f2 = base_network(dim // 2, (3 * K - 1) * dim // 2, hidden_dim) def forward(self, x): log_det = torch.zeros(x.shape[0]) lower, upper = x[:, :self.dim // 2], x[:, self.dim // 2:] out = self.f1(lower).reshape(-1, self.dim // 2, 3 * self.K - 1) W, H, D = torch.split(out, self.K, dim = 2) W, H = torch.softmax(W, dim = 2), torch.softmax(H, dim = 2) W, H = 2 * self.B * W, 2 * self.B * H D = F.softplus(D) upper, ld = unconstrained_RQS( upper, W, H, D, inverse=False, tail_bound=self.B) log_det += torch.sum(ld, dim = 1) out = self.f2(upper).reshape(-1, self.dim // 2, 3 * self.K - 1) W, H, D = torch.split(out, self.K, dim = 2) W, H = torch.softmax(W, dim = 2), torch.softmax(H, dim = 2) W, H = 2 * self.B * W, 2 * self.B * H D = F.softplus(D) lower, ld = unconstrained_RQS( lower, W, H, D, inverse=False, tail_bound=self.B) log_det += torch.sum(ld, dim = 1) return torch.cat([lower, upper], dim = 1), log_det def backward(self, z): log_det = torch.zeros(z.shape[0]) lower, upper = z[:, :self.dim // 2], z[:, self.dim // 2:] out = self.f2(upper).reshape(-1, self.dim // 2, 3 * self.K - 1) W, H, D = torch.split(out, self.K, dim = 2) W, H = torch.softmax(W, dim = 2), torch.softmax(H, dim = 2) W, H = 2 * self.B * W, 2 * self.B * H D = F.softplus(D) lower, ld = unconstrained_RQS( lower, W, H, D, inverse=True, tail_bound=self.B) log_det += torch.sum(ld, dim = 1) out = self.f1(lower).reshape(-1, self.dim // 2, 3 * self.K - 1) W, H, D = torch.split(out, self.K, dim = 2) W, H = torch.softmax(W, dim = 2), torch.softmax(H, dim = 2) W, H = 2 * self.B * W, 2 * self.B * H D = F.softplus(D) upper, ld = unconstrained_RQS( upper, W, H, D, inverse = True, tail_bound = self.B) log_det += torch.sum(ld, dim = 1) return torch.cat([lower, upper], dim = 1), log_det
35.312997
80
0.533539
1,962
13,313
3.504587
0.103976
0.031414
0.027196
0.020942
0.718877
0.663031
0.620419
0.612711
0.551338
0.522251
0
0.027957
0.312176
13,313
376
81
35.406915
0.722835
0.053857
0
0.542857
0
0
0.005503
0
0
0
0
0
0
1
0.103571
false
0
0.032143
0.003571
0.221429
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07d1d3b232129762c8438710d6931bc551a36122
5,286
py
Python
ioos_qc/config_creator/fx_parser.py
HakaiInstitute/ioos_qc
dfb28ee404a17c8355747b792fba0471093953c4
[ "Apache-2.0" ]
null
null
null
ioos_qc/config_creator/fx_parser.py
HakaiInstitute/ioos_qc
dfb28ee404a17c8355747b792fba0471093953c4
[ "Apache-2.0" ]
null
null
null
ioos_qc/config_creator/fx_parser.py
HakaiInstitute/ioos_qc
dfb28ee404a17c8355747b792fba0471093953c4
[ "Apache-2.0" ]
1
2021-01-20T23:20:06.000Z
2021-01-20T23:20:06.000Z
# module pyparsing.py # # Copyright (c) 2003-2019 Paul T. McGuire # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # from pyparsing import ( Literal, Word, Group, Forward, alphas, alphanums, Regex, CaselessKeyword, Suppress, delimitedList, ) import math import operator # map operator symbols to corresponding arithmetic operations epsilon = 1e-12 opn = { "+": operator.add, "-": operator.sub, "*": operator.mul, "/": operator.truediv, "^": operator.pow, } fn = { "sin": math.sin, "cos": math.cos, "tan": math.tan, "exp": math.exp, "abs": abs, "trunc": lambda a: int(a), "round": round, "sgn": lambda a: -1 if a < -epsilon else 1 if a > epsilon else 0, } exprStack = [] def push_first(toks): exprStack.append(toks[0]) def push_unary_minus(toks): for t in toks: if t == "-": exprStack.append("unary -") else: break def BNF(): """ expop :: '^' multop :: '*' | '/' addop :: '+' | '-' integer :: ['+' | '-'] '0'..'9'+ atom :: PI | E | real | fn '(' expr ')' | '(' expr ')' factor :: atom [ expop factor ]* term :: factor [ multop factor ]* expr :: term [ addop term ]* """ # use CaselessKeyword for e and pi, to avoid accidentally matching # functions that start with 'e' or 'pi' (such as 'exp'); Keyword # and CaselessKeyword only match whole words e = CaselessKeyword("E") pi = CaselessKeyword("PI") # fnumber = Combine(Word("+-"+nums, nums) + # Optional("." + Optional(Word(nums))) + # Optional(e + Word("+-"+nums, nums))) # or use provided pyparsing_common.number, but convert back to str: # fnumber = ppc.number().addParseAction(lambda t: str(t[0])) fnumber = Regex(r"[+-]?\d+(?:\.\d*)?(?:[eE][+-]?\d+)?") ident = Word(alphas, alphanums + "_$") plus, minus, mult, div = map(Literal, "+-*/") lpar, rpar = map(Suppress, "()") addop = plus | minus multop = mult | div expop = Literal("^") expr = Forward() expr_list = delimitedList(Group(expr)) # add parse action that replaces the function identifier with a (name, number of args) tuple fn_call = (ident + lpar - Group(expr_list) + rpar).setParseAction( lambda t: t.insert(0, (t.pop(0), len(t[0]))) ) atom = ( addop[...] + ( (fn_call | pi | e | fnumber | ident).setParseAction(push_first) | Group(lpar + expr + rpar) ) ).setParseAction(push_unary_minus) # by defining exponentiation as "atom [ ^ factor ]..." instead of "atom [ ^ atom ]...", we get right-to-left # exponents, instead of left-to-right that is, 2^3^2 = 2^(3^2), not (2^3)^2. factor = Forward() factor <<= atom + (expop + factor).setParseAction(push_first)[...] term = factor + (multop + factor).setParseAction(push_first)[...] expr <<= term + (addop + term).setParseAction(push_first)[...] bnf = expr return bnf def evaluate_stack(s, stats): op, num_args = s.pop(), 0 if isinstance(op, tuple): op, num_args = op if op == "unary -": return -evaluate_stack(s, stats) if op in "+-*/^": # note: operands are pushed onto the stack in reverse order op2 = evaluate_stack(s, stats) op1 = evaluate_stack(s, stats) return opn[op](op1, op2) elif op == "PI": return math.pi # 3.1415926535 elif op == "E": return math.e # 2.718281828 elif op == "mean": return stats['mean'] elif op == "min": return stats['min'] elif op == "max": return stats['max'] elif op == "std": return stats['std'] elif op in fn: # note: args are pushed onto the stack in reverse order args = reversed([evaluate_stack(s, stats) for _ in range(num_args)]) return fn[op](*args) elif op[0].isalpha(): raise Exception("invalid identifier '%s'" % op) else: return float(op) def eval_fx(fx, stats): """Given fx and stats ('min', 'max', 'mean', 'std') return the result""" _ = BNF().parseString(fx, parseAll=True) val = evaluate_stack(exprStack[:], stats) return val
31.278107
112
0.600643
682
5,286
4.615836
0.379765
0.027954
0.022236
0.030178
0.031766
0.022236
0.022236
0.022236
0
0
0
0.014545
0.258608
5,286
168
113
31.464286
0.788722
0.439274
0
0.019608
0
0
0.052833
0.012165
0
0
0
0
0
1
0.04902
false
0
0.029412
0
0.196078
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07d2a6693b06381833978cbf344d430f2c2b518b
4,539
py
Python
src/util/util.py
ashleylst/DSDmodel
4276c832e0335539aef2ae2b33e23719957a3f08
[ "MIT" ]
1
2021-07-26T17:30:16.000Z
2021-07-26T17:30:16.000Z
src/util/util.py
ashleylst/DSDmodel
4276c832e0335539aef2ae2b33e23719957a3f08
[ "MIT" ]
null
null
null
src/util/util.py
ashleylst/DSDmodel
4276c832e0335539aef2ae2b33e23719957a3f08
[ "MIT" ]
2
2020-06-29T15:43:22.000Z
2021-05-12T08:25:03.000Z
from itertools import combinations import copy def get_reverse(n): if n == 1: return 0 else: return 1 def get_edge_info(e): v = [0 for i in range(2)] n = [0 for i in range(2)] t = 0 for x in e: v[t], n[t] = x t += 1 return v, n def sort_e_by_domain(val): return val[0][1] def sort_by_strand(val): return val[0][0] def check_edge_in_tuplelist(edge, tpl): for i in tpl: if edge in i: return True return False def compare(a, b): return (a > b) - (a < b) def flip(i): if i == 0: i = 1 elif i == 1: i = 0 return i def get_free_domains(limits, blocks, bound): limits = sorted(limits) interval = limits[1] - limits[0] for i in blocks: if limits[1] > i > limits[0]: tmp = abs(bound - i) if tmp < interval: interval = tmp return interval def get_combinations(oldlen, newlen, cursor, indexlist): combold = list(combinations(indexlist[cursor:oldlen], 2)) combself = [(i, i) for i in range(0, oldlen)] combnew = [] if oldlen != newlen: for i in range(0, oldlen): for j in range(oldlen, newlen): combnew.append((i, j)) return combold + combnew + combself def get_migrate_nodes(edges, indices, startstrand): d = [] for i in indices: vi, ni = get_edge_info(edges[i][0]) if vi[0] == startstrand: d.append(ni[0]) else: d.append(ni[1]) d.sort() return d def check_following_migration(edges, p=0): """ :param edges: :return: """ e = copy.copy(edges) visited = [False for _ in e] miggroup = [] cnt = -1 for i in range(0, len(e)): if visited[i]: continue e[i] = list(e[i]) e[i][p] = list(e[i][p]) t1 = sorted(e[i][p], key=lambda tup: tup[0]) if not visited[i]: visited[i] = True miggroup.append([i]) cnt += 1 for j in range(0, len(e)): if j != i and not visited[j]: e[j] = list(e[j]) e[j][p] = list(e[j][p]) t2 = sorted(e[j][p], key=lambda tup: tup[0]) if (t2[0][0] != t1[0][0]) or (t2[1][0] != t1[1][0]): continue for num in range(0, len(miggroup[cnt])): t1 = sorted(e[miggroup[cnt][num]][p], key=lambda tup: tup[0]) if (t1[0][1] + 1 == t2[0][1] and t1[1][1] - 1 == t2[1][1]) \ or (t1[0][1] - 1 == t2[0][1] and t1[1][1] + 1 == t2[1][1]): visited[j] = True miggroup[cnt].append(j) break return miggroup def get_absdist(domain1, domain2): """ :param domain1: :param domain2: :return: """ return abs(domain1[1] - domain2[1]) def get_closet_domain_to_target(target, domains): """ :param target: :param domains: :return: """ closet = 10000 closetd = () for i in domains: dist = get_absdist(i, target) if dist < closet: closet = dist closetd = i return closetd def get_domains_on_2sides(target1, target2, domains1, domains2): """ :param target1: :param target2: :param domains1: :param domains2: :return: """ if target1[0] == domains1[0][0]: closetd1 = get_closet_domain_to_target(target1, domains1) elif target2[0] == domains1[0][0]: closetd1 = get_closet_domain_to_target(target2, domains1) if target1[0] == domains2[0][0]: closetd2 = get_closet_domain_to_target(target1, domains2) elif target2[0] == domains2[0][0]: closetd2 = get_closet_domain_to_target(target2, domains2) return closetd1, closetd2 def get_closest_target(domains, targets): """ :return: """ domains = sorted(domains, key=lambda tup: tup[1]) mindist = 10000 mint = None for t in targets: dist = min(get_absdist(t, domains[0]), get_absdist(t, domains[len(domains) - 1])) if dist < mindist: mint = t return mint def check_continuity(a, b): for i in a: for j in b: if i + 1 == j or i - 1 == j: return i, j return None def check_bond_existence(d1, d2, l1, l2): for i in range(len(l1)): if d1 == l1[i] and d2 == l2[i]: return True return False
22.695
89
0.516193
638
4,539
3.589342
0.181818
0.019214
0.028821
0.028821
0.199127
0.168122
0.116594
0.091703
0.091703
0.091703
0
0.0524
0.348315
4,539
199
90
22.809045
0.721771
0.041199
0
0.061069
0
0
0
0
0
0
0
0
0
1
0.129771
false
0
0.015267
0.022901
0.305344
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07d6951331a7c029a2698e9098ae813a023bef49
10,729
py
Python
ironic/drivers/modules/ilo/raid.py
armohamm/ironic
21093ca886ed736a7a25bf5e71e05d41e132fd2f
[ "Apache-2.0" ]
2
2019-06-17T21:37:53.000Z
2020-07-11T03:58:39.000Z
ironic/drivers/modules/ilo/raid.py
armohamm/ironic
21093ca886ed736a7a25bf5e71e05d41e132fd2f
[ "Apache-2.0" ]
5
2019-08-14T06:46:03.000Z
2021-12-13T20:01:25.000Z
ironic/drivers/modules/ilo/raid.py
armohamm/ironic
21093ca886ed736a7a25bf5e71e05d41e132fd2f
[ "Apache-2.0" ]
6
2019-06-13T12:49:33.000Z
2021-04-17T16:33:19.000Z
# Copyright 2018 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ iLO5 RAID specific methods """ from ironic_lib import metrics_utils from oslo_log import log as logging from oslo_utils import importutils from ironic.common import exception from ironic.common.i18n import _ from ironic.common import raid from ironic.common import states from ironic.conductor import utils as manager_utils from ironic import conf from ironic.drivers import base from ironic.drivers.modules import deploy_utils from ironic.drivers.modules.ilo import common as ilo_common LOG = logging.getLogger(__name__) CONF = conf.CONF METRICS = metrics_utils.get_metrics_logger(__name__) ilo_error = importutils.try_import('proliantutils.exception') class Ilo5RAID(base.RAIDInterface): """Implementation of OOB RAIDInterface for iLO5.""" def get_properties(self): """Return the properties of the interface.""" return ilo_common.REQUIRED_PROPERTIES def _set_clean_failed(self, task, msg, exc): LOG.error("RAID configuration job failed for node %(node)s. " "Message: '%(message)s'.", {'node': task.node.uuid, 'message': msg}) task.node.last_error = msg task.process_event('fail') def _set_driver_internal_true_value(self, task, *keys): driver_internal_info = task.node.driver_internal_info for key in keys: driver_internal_info[key] = True task.node.driver_internal_info = driver_internal_info task.node.save() def _set_driver_internal_false_value(self, task, *keys): driver_internal_info = task.node.driver_internal_info for key in keys: driver_internal_info[key] = False task.node.driver_internal_info = driver_internal_info task.node.save() def _pop_driver_internal_values(self, task, *keys): driver_internal_info = task.node.driver_internal_info for key in keys: driver_internal_info.pop(key, None) task.node.driver_internal_info = driver_internal_info task.node.save() def _prepare_for_read_raid(self, task, raid_step): deploy_opts = deploy_utils.build_agent_options(task.node) task.driver.boot.prepare_ramdisk(task, deploy_opts) manager_utils.node_power_action(task, states.REBOOT) if raid_step == 'create_raid': self._set_driver_internal_true_value( task, 'ilo_raid_create_in_progress') else: self._set_driver_internal_true_value( task, 'ilo_raid_delete_in_progress') self._set_driver_internal_true_value(task, 'cleaning_reboot') self._set_driver_internal_false_value(task, 'skip_current_clean_step') @METRICS.timer('Ilo5RAID.create_configuration') @base.clean_step(priority=0, abortable=False, argsinfo={ 'create_root_volume': { 'description': ( 'This specifies whether to create the root volume. ' 'Defaults to `True`.' ), 'required': False }, 'create_nonroot_volumes': { 'description': ( 'This specifies whether to create the non-root volumes. ' 'Defaults to `True`.' ), 'required': False } }) def create_configuration(self, task, create_root_volume=True, create_nonroot_volumes=True): """Create a RAID configuration on a bare metal using agent ramdisk. This method creates a RAID configuration on the given node. :param task: a TaskManager instance. :param create_root_volume: If True, a root volume is created during RAID configuration. Otherwise, no root volume is created. Default is True. :param create_nonroot_volumes: If True, non-root volumes are created. If False, no non-root volumes are created. Default is True. :raises: MissingParameterValue, if node.target_raid_config is missing or was found to be empty after skipping root volume and/or non-root volumes. :raises: NodeCleaningFailure, on failure to execute step. """ node = task.node target_raid_config = raid.filter_target_raid_config( node, create_root_volume=create_root_volume, create_nonroot_volumes=create_nonroot_volumes) driver_internal_info = node.driver_internal_info driver_internal_info['target_raid_config'] = target_raid_config LOG.debug("Calling OOB RAID create_configuration for node %(node)s " "with the following target RAID configuration: %(target)s", {'node': node.uuid, 'target': target_raid_config}) ilo_object = ilo_common.get_ilo_object(node) try: # Raid configuration in progress, checking status if not driver_internal_info.get('ilo_raid_create_in_progress'): ilo_object.create_raid_configuration(target_raid_config) self._prepare_for_read_raid(task, 'create_raid') return states.CLEANWAIT else: # Raid configuration is done, updating raid_config raid_conf = ( ilo_object.read_raid_configuration( raid_config=target_raid_config)) if len(raid_conf['logical_disks']): raid.update_raid_info(node, raid_conf) LOG.debug("Node %(uuid)s raid create clean step is done.", {'uuid': node.uuid}) self._pop_driver_internal_values( task, 'ilo_raid_create_in_progress', 'cleaning_reboot', 'skip_current_clean_step') node.driver_internal_info = driver_internal_info node.save() else: # Raid configuration failed msg = "Unable to create raid" self._pop_driver_internal_values( task, 'ilo_raid_create_in_progress', 'cleaning_reboot', 'skip_current_clean_step') node.driver_internal_info = driver_internal_info node.save() raise exception.NodeCleaningFailure( "Clean step create_configuration failed " "on node %(node)s with error: %(err)s" % {'node': node.uuid, 'err': msg}) except ilo_error.IloError as ilo_exception: operation = (_("Failed to create raid configuration on node %s") % node.uuid) self._pop_driver_internal_values(task, 'ilo_raid_create_in_progress', 'cleaning_reboot', 'skip_current_clean_step') node.driver_internal_info = driver_internal_info node.save() self._set_clean_failed(task, operation, ilo_exception) @METRICS.timer('Ilo5RAID.delete_configuration') @base.clean_step(priority=0, abortable=False) def delete_configuration(self, task): """Delete the RAID configuration. :param task: a TaskManager instance containing the node to act on. :raises: NodeCleaningFailure, on failure to execute step. """ node = task.node LOG.debug("OOB RAID delete_configuration invoked for node %s.", node.uuid) driver_internal_info = node.driver_internal_info ilo_object = ilo_common.get_ilo_object(node) try: # Raid configuration in progress, checking status if not driver_internal_info.get('ilo_raid_delete_in_progress'): ilo_object.delete_raid_configuration() self._prepare_for_read_raid(task, 'delete_raid') return states.CLEANWAIT else: # Raid configuration is done, updating raid_config raid_conf = ilo_object.read_raid_configuration() if not len(raid_conf['logical_disks']): node.raid_config = {} LOG.debug("Node %(uuid)s raid delete clean step is done.", {'uuid': node.uuid}) self._pop_driver_internal_values( task, 'ilo_raid_delete_in_progress', 'cleaning_reboot', 'skip_current_clean_step') node.driver_internal_info = driver_internal_info node.save() else: # Raid configuration failed msg = ("Unable to delete this logical disks: %s" % raid_conf['logical_disks']) self._pop_driver_internal_values( task, 'ilo_raid_delete_in_progress', 'cleaning_reboot', 'skip_current_clean_step') node.driver_internal_info = driver_internal_info node.save() raise exception.NodeCleaningFailure( "Clean step delete_configuration failed " "on node %(node)s with error: %(err)s" % {'node': node.uuid, 'err': msg}) except ilo_error.IloLogicalDriveNotFoundError: LOG.info("No logical drive found to delete on node %(node)s", {'node': node.uuid}) except ilo_error.IloError as ilo_exception: operation = (_("Failed to delete raid configuration on node %s") % node.uuid) self._pop_driver_internal_values(task, 'ilo_raid_delete_in_progress', 'cleaning_reboot', 'skip_current_clean_step') node.driver_internal_info = driver_internal_info node.save() self._set_clean_failed(task, operation, ilo_exception)
45.461864
79
0.609004
1,203
10,729
5.143807
0.184539
0.106335
0.098901
0.049774
0.535068
0.476891
0.456529
0.421623
0.405785
0.392534
0
0.002322
0.317737
10,729
235
80
45.655319
0.843033
0.164321
0
0.47093
0
0
0.188168
0.060751
0
0
0
0
0
1
0.046512
false
0
0.075581
0
0.145349
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07d9159827c315605286d46a4f7de494b7d7489e
3,251
py
Python
examples/dump-properties.py
zachjweiner/pyopencl
4e2e4f3150c331680e6d9e36c59290411e4a0c40
[ "Apache-2.0" ]
7
2020-11-23T09:26:17.000Z
2021-01-26T08:23:30.000Z
examples/dump-properties.py
zachjweiner/pyopencl
4e2e4f3150c331680e6d9e36c59290411e4a0c40
[ "Apache-2.0" ]
null
null
null
examples/dump-properties.py
zachjweiner/pyopencl
4e2e4f3150c331680e6d9e36c59290411e4a0c40
[ "Apache-2.0" ]
1
2020-12-14T05:12:10.000Z
2020-12-14T05:12:10.000Z
import pyopencl as cl from optparse import OptionParser parser = OptionParser() parser.add_option("-s", "--short", action="store_true", help="don't print all device properties") (options, args) = parser.parse_args() def print_info(obj, info_cls): for info_name in sorted(dir(info_cls)): if not info_name.startswith("_") and info_name != "to_string": info = getattr(info_cls, info_name) try: info_value = obj.get_info(info) except: info_value = "<error>" if (info_cls == cl.device_info and info_name == "PARTITION_TYPES_EXT" and isinstance(info_value, list)): print("{}: {}".format(info_name, [ cl.device_partition_property_ext.to_string(v, "<unknown device partition property %d>") for v in info_value])) else: try: print(f"{info_name}: {info_value}") except: print("%s: <error>" % info_name) for platform in cl.get_platforms(): print(75*"=") print(platform) print(75*"=") if not options.short: print_info(platform, cl.platform_info) for device in platform.get_devices(): if not options.short: print(75*"-") print(device) if not options.short: print(75*"-") print_info(device, cl.device_info) ctx = cl.Context([device]) for mf in [ cl.mem_flags.READ_ONLY, #cl.mem_flags.READ_WRITE, #cl.mem_flags.WRITE_ONLY ]: for itype in [ cl.mem_object_type.IMAGE2D, cl.mem_object_type.IMAGE3D ]: try: formats = cl.get_supported_image_formats(ctx, mf, itype) except: formats = "<error>" else: def str_chd_type(chdtype): result = cl.channel_type.to_string(chdtype, "<unknown channel data type %d>") result = result.replace("_INT", "") result = result.replace("UNSIGNED", "U") result = result.replace("SIGNED", "S") result = result.replace("NORM", "N") result = result.replace("FLOAT", "F") return result formats = ", ".join( "{}-{}".format( cl.channel_order.to_string(iform.channel_order, "<unknown channel order 0x%x>"), str_chd_type(iform.channel_data_type)) for iform in formats) print("{} {} FORMATS: {}\n".format( cl.mem_object_type.to_string(itype), cl.mem_flags.to_string(mf), formats)) del ctx
38.702381
83
0.450015
311
3,251
4.488746
0.318328
0.045845
0.068052
0.036533
0.057307
0.041547
0.041547
0
0
0
0
0.006135
0.448477
3,251
83
84
39.168675
0.772448
0.014457
0
0.239437
0
0
0.091818
0
0
0
0
0
0
1
0.028169
false
0
0.028169
0
0.070423
0.197183
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07da573d0a32afbfa2ea160efcad1a4e21eef4c3
42,106
py
Python
interfaces/acados_template/acados_template/acados_ocp_solver.py
jgillis/acados
3119e2dda636a8358fbd52247eb0163a167cbc97
[ "BSD-2-Clause" ]
1
2020-07-30T10:54:35.000Z
2020-07-30T10:54:35.000Z
interfaces/acados_template/acados_template/acados_ocp_solver.py
jgillis/acados
3119e2dda636a8358fbd52247eb0163a167cbc97
[ "BSD-2-Clause" ]
null
null
null
interfaces/acados_template/acados_template/acados_ocp_solver.py
jgillis/acados
3119e2dda636a8358fbd52247eb0163a167cbc97
[ "BSD-2-Clause" ]
null
null
null
# -*- coding: future_fstrings -*- # # Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren, # Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor, # Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan, # Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl # # This file is part of acados. # # The 2-Clause BSD License # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE.; # import sys, os, json import numpy as np from ctypes import * from casadi import CasadiMeta, Function, SX from copy import deepcopy from .generate_c_code_explicit_ode import generate_c_code_explicit_ode from .generate_c_code_implicit_ode import generate_c_code_implicit_ode from .generate_c_code_gnsf import generate_c_code_gnsf from .generate_c_code_constraint import generate_c_code_constraint from .generate_c_code_nls_cost import generate_c_code_nls_cost from .generate_c_code_external_cost import generate_c_code_external_cost from .acados_ocp import AcadosOcp from .acados_model import acados_model_strip_casadi_symbolics from .utils import is_column, is_empty, casadi_length, render_template, acados_class2dict,\ format_class_dict, ocp_check_against_layout, np_array_to_list, make_model_consistent,\ set_up_imported_gnsf_model def make_ocp_dims_consistent(acados_ocp): dims = acados_ocp.dims cost = acados_ocp.cost constraints = acados_ocp.constraints model = acados_ocp.model opts = acados_ocp.solver_options # nx if is_column(model.x): dims.nx = casadi_length(model.x) else: raise Exception('model.x should be column vector!') # nu if is_empty(model.u): dims.nu = 0 else: dims.nu = casadi_length(model.u) # nz if is_empty(model.z): dims.nz = 0 else: dims.nz = casadi_length(model.z) # np if is_empty(model.p): dims.np = 0 else: dims.np = casadi_length(model.p) if acados_ocp.parameter_values.shape[0] != dims.np: raise Exception('inconsistent dimension np, regarding model.p and parameter_values.') ## cost # path if cost.cost_type == 'LINEAR_LS': ny = cost.W.shape[0] if cost.Vx.shape[0] != ny or cost.Vu.shape[0] != ny: raise Exception('inconsistent dimension ny, regarding W, Vx, Vu.' + \ f'\nGot W[{cost.W.shape}], Vx[{cost.Vx.shape}], Vu[{cost.Vu.shape}]\n') if dims.nz != 0 and cost.Vz.shape[0] != ny: raise Exception('inconsistent dimension ny, regarding W, Vx, Vu, Vz.' + \ f'\nGot W[{cost.W.shape}], Vx[{cost.Vx.shape}], Vu[{cost.Vu.shape}], Vz[{cost.Vz.shape}]\n') if cost.Vx.shape[1] != dims.nx and ny != 0: raise Exception('inconsistent dimension: Vx should have nx columns.') if cost.Vu.shape[1] != dims.nu and ny != 0: raise Exception('inconsistent dimension: Vu should have nu columns.') if cost.yref.shape[0] != ny: raise Exception('inconsistent dimension: regarding W, yref.' + \ f'\nGot W[{cost.W.shape}], yref[{cost.yref.shape}]\n') dims.ny = ny elif cost.cost_type == 'NONLINEAR_LS': ny = cost.W.shape[0] if is_empty(model.cost_y_expr) and ny != 0: raise Exception('inconsistent dimension ny: regarding W, cost_y_expr.') elif casadi_length(model.cost_y_expr) != ny: raise Exception('inconsistent dimension ny: regarding W, cost_y_expr.') if cost.yref.shape[0] != ny: raise Exception('inconsistent dimension: regarding W, yref.' + \ f'\nGot W[{cost.W.shape}], yref[{cost.yref.shape}]\n') dims.ny = ny # terminal if cost.cost_type_e == 'LINEAR_LS': ny_e = cost.W_e.shape[0] if cost.Vx_e.shape[0] != ny_e: raise Exception('inconsistent dimension ny_e: regarding W_e, cost_y_expr_e.' + \ f'\nGot W_e[{cost.W_e.shape}], Vx_e[{cost.Vx_e.shape}]') if cost.Vx_e.shape[1] != dims.nx and ny_e != 0: raise Exception('inconsistent dimension: Vx_e should have nx columns.') if cost.yref_e.shape[0] != ny_e: raise Exception('inconsistent dimension: regarding W_e, yref_e.') dims.ny_e = ny_e elif cost.cost_type_e == 'NONLINEAR_LS': ny_e = cost.W_e.shape[0] if is_empty(model.cost_y_expr_e) and ny_e != 0: raise Exception('inconsistent dimension ny_e: regarding W_e, cost_y_expr_e.') elif casadi_length(model.cost_y_expr_e) != ny_e: raise Exception('inconsistent dimension ny_e: regarding W_e, cost_y_expr_e.') if cost.yref_e.shape[0] != ny_e: raise Exception('inconsistent dimension: regarding W_e, yref_e.') dims.ny_e = ny_e ## constraints # initial if (constraints.lbx_0 == [] and constraints.ubx_0 == []): dims.nbx_0 = 0 else: this_shape = constraints.lbx_0.shape other_shape = constraints.ubx_0.shape if not this_shape == other_shape: raise Exception('lbx_0, ubx_0 have different shapes!') if not is_column(constraints.lbx_0): raise Exception('lbx_0, ubx_0 must be column vectors!') dims.nbx_0 = constraints.lbx_0.size if all(constraints.lbx_0 == constraints.ubx_0): dims.nbxe_0 = dims.nbx_0 # path nbx = constraints.idxbx.shape[0] if constraints.ubx.shape[0] != nbx or constraints.lbx.shape[0] != nbx: raise Exception('inconsistent dimension nbx, regarding idxbx, ubx, lbx.') else: dims.nbx = nbx nbu = constraints.idxbu.shape[0] if constraints.ubu.shape[0] != nbu or constraints.lbu.shape[0] != nbu: raise Exception('inconsistent dimension nbu, regarding idxbu, ubu, lbu.') else: dims.nbu = nbu ng = constraints.lg.shape[0] if constraints.ug.shape[0] != ng or constraints.C.shape[0] != ng \ or constraints.D.shape[0] != ng: raise Exception('inconsistent dimension ng, regarding lg, ug, C, D.') else: dims.ng = ng if not is_empty(model.con_h_expr): nh = casadi_length(model.con_h_expr) else: nh = 0 if constraints.uh.shape[0] != nh or constraints.lh.shape[0] != nh: raise Exception('inconsistent dimension nh, regarding lh, uh, con_h_expr.') else: dims.nh = nh if is_empty(model.con_phi_expr): dims.nphi = 0 dims.nr = 0 else: dims.nphi = casadi_length(model.con_phi_expr) if is_empty(model.con_r_expr): raise Exception('convex over nonlinear constraints: con_r_expr but con_phi_expr is nonempty') else: dims.nr = casadi_length(model.con_r_expr) # terminal nbx_e = constraints.idxbx_e.shape[0] if constraints.ubx_e.shape[0] != nbx_e or constraints.lbx_e.shape[0] != nbx_e: raise Exception('inconsistent dimension nbx_e, regarding idxbx_e, ubx_e, lbx_e.') else: dims.nbx_e = nbx_e ng_e = constraints.lg_e.shape[0] if constraints.ug_e.shape[0] != ng_e or constraints.C_e.shape[0] != ng_e: raise Exception('inconsistent dimension ng_e, regarding_e lg_e, ug_e, C_e.') else: dims.ng_e = ng_e if not is_empty(model.con_h_expr_e): nh_e = casadi_length(model.con_h_expr_e) else: nh_e = 0 if constraints.uh_e.shape[0] != nh_e or constraints.lh_e.shape[0] != nh_e: raise Exception('inconsistent dimension nh_e, regarding lh_e, uh_e, con_h_expr_e.') else: dims.nh_e = nh_e if is_empty(model.con_phi_expr_e): dims.nphi_e = 0 dims.nr_e = 0 else: dims.nphi_e = casadi_length(model.con_phi_expr_e) if is_empty(model.con_r_expr_e): raise Exception('convex over nonlinear constraints: con_r_expr_e but con_phi_expr_e is nonempty') else: dims.nr_e = casadi_length(model.con_r_expr_e) # Slack dimensions nsbx = constraints.idxsbx.shape[0] if is_empty(constraints.lsbx): constraints.lsbx = np.zeros((nsbx,)) elif constraints.lsbx.shape[0] != nsbx: raise Exception('inconsistent dimension nsbx, regarding idxsbx, lsbx.') if is_empty(constraints.usbx): constraints.usbx = np.zeros((nsbx,)) elif constraints.usbx.shape[0] != nsbx: raise Exception('inconsistent dimension nsbx, regarding idxsbx, usbx.') dims.nsbx = nsbx nsbu = constraints.idxsbu.shape[0] if is_empty(constraints.lsbu): constraints.lsbu = np.zeros((nsbu,)) elif constraints.lsbu.shape[0] != nsbu: raise Exception('inconsistent dimension nsbu, regarding idxsbu, lsbu.') if is_empty(constraints.usbu): constraints.usbu = np.zeros((nsbu,)) elif constraints.usbu.shape[0] != nsbu: raise Exception('inconsistent dimension nsbu, regarding idxsbu, usbu.') dims.nsbu = nsbu nsh = constraints.idxsh.shape[0] if is_empty(constraints.lsh): constraints.lsh = np.zeros((nsh,)) elif constraints.lsh.shape[0] != nsh: raise Exception('inconsistent dimension nsh, regarding idxsh, lsh.') if is_empty(constraints.ush): constraints.ush = np.zeros((nsh,)) elif constraints.ush.shape[0] != nsh: raise Exception('inconsistent dimension nsh, regarding idxsh, ush.') dims.nsh = nsh nsphi = constraints.idxsphi.shape[0] if is_empty(constraints.lsphi): constraints.lsphi = np.zeros((nsphi,)) elif constraints.lsphi.shape[0] != nsphi: raise Exception('inconsistent dimension nsphi, regarding idxsphi, lsphi.') if is_empty(constraints.usphi): constraints.usphi = np.zeros((nsphi,)) elif constraints.usphi.shape[0] != nsphi: raise Exception('inconsistent dimension nsphi, regarding idxsphi, usphi.') dims.nsphi = nsphi nsg = constraints.idxsg.shape[0] if is_empty(constraints.lsg): constraints.lsg = np.zeros((nsg,)) elif constraints.lsg.shape[0] != nsg: raise Exception('inconsistent dimension nsg, regarding idxsg, lsg.') if is_empty(constraints.usg): constraints.usg = np.zeros((nsg,)) elif constraints.usg.shape[0] != nsg: raise Exception('inconsistent dimension nsg, regarding idxsg, usg.') dims.nsg = nsg ns = nsbx + nsbu + nsh + nsg + nsphi wrong_field = "" if cost.Zl.shape[0] != ns: wrong_field = "Zl" dim = cost.Zl.shape[0] elif cost.Zu.shape[0] != ns: wrong_field = "Zu" dim = cost.Zu.shape[0] elif cost.zl.shape[0] != ns: wrong_field = "zl" dim = cost.zl.shape[0] elif cost.zu.shape[0] != ns: wrong_field = "zu" dim = cost.zu.shape[0] if wrong_field != "": raise Exception(f'Inconsistent size for field {wrong_field}, with dimension {dim}, \n\t'\ + f'Detected ns = {ns} = nsbx + nsbu + nsg + nsh + nsphi.\n\t'\ + f'With nsbx = {nsbx}, nsbu = {nsbu}, nsg = {nsg}, nsh = {nsh}, nsphi = {nsphi}') dims.ns = ns nsbx_e = constraints.idxsbx_e.shape[0] if is_empty(constraints.lsbx_e): constraints.lsbx_e = np.zeros((nsbx_e,)) elif constraints.lsbx_e.shape[0] != nsbx_e: raise Exception('inconsistent dimension nsbx_e, regarding idxsbx_e, lsbx_e.') if is_empty(constraints.usbx_e): constraints.usbx_e = np.zeros((nsbx_e,)) elif constraints.usbx_e.shape[0] != nsbx_e: raise Exception('inconsistent dimension nsbx_e, regarding idxsbx_e, usbx_e.') dims.nsbx_e = nsbx_e nsh_e = constraints.idxsh_e.shape[0] if is_empty(constraints.lsh_e): constraints.lsh_e = np.zeros((nsh_e,)) elif constraints.lsh_e.shape[0] != nsh_e: raise Exception('inconsistent dimension nsh_e, regarding idxsh_e, lsh_e.') if is_empty(constraints.ush_e): constraints.ush_e = np.zeros((nsh_e,)) elif constraints.ush_e.shape[0] != nsh_e: raise Exception('inconsistent dimension nsh_e, regarding idxsh_e, ush_e.') dims.nsh_e = nsh_e nsg_e = constraints.idxsg_e.shape[0] if is_empty(constraints.lsg_e): constraints.lsg_e = np.zeros((nsg_e,)) elif constraints.lsg_e.shape[0] != nsg_e: raise Exception('inconsistent dimension nsg_e, regarding idxsg_e, lsg_e.') if is_empty(constraints.usg_e): constraints.usg_e = np.zeros((nsg_e,)) elif constraints.usg_e.shape[0] != nsg_e: raise Exception('inconsistent dimension nsg_e, regarding idxsg_e, usg_e.') dims.nsg_e = nsg_e nsphi_e = constraints.idxsphi_e.shape[0] if is_empty(constraints.lsphi_e): constraints.lsphi_e = np.zeros((nsphi_e,)) elif constraints.lsphi_e.shape[0] != nsphi_e: raise Exception('inconsistent dimension nsphi_e, regarding idxsphi_e, lsphi_e.') if is_empty(constraints.usphi_e): constraints.usphi_e = np.zeros((nsphi_e,)) elif constraints.usphi_e.shape[0] != nsphi_e: raise Exception('inconsistent dimension nsphi_e, regarding idxsphi_e, usphi_e.') dims.nsphi_e = nsphi_e # terminal ns_e = nsbx_e + nsh_e + nsg_e + nsphi_e wrong_field = "" if cost.Zl_e.shape[0] != ns_e: wrong_field = "Zl_e" dim = cost.Zl_e.shape[0] elif cost.Zu_e.shape[0] != ns_e: wrong_field = "Zu_e" dim = cost.Zu_e.shape[0] elif cost.zl_e.shape[0] != ns_e: wrong_field = "zl_e" dim = cost.zl_e.shape[0] elif cost.zu_e.shape[0] != ns_e: wrong_field = "zu_e" dim = cost.zu_e.shape[0] if wrong_field != "": raise Exception(f'Inconsistent size for field {wrong_field}, with dimension {dim}, \n\t'\ + f'Detected ns_e = {ns_e} = nsbx_e + nsg_e + nsh_e + nsphi_e.\n\t'\ + f'With nsbx_e = {nsbx_e}, nsg_e = {nsg_e}, nsh_e = {nsh_e}, nsphi_e = {nsphi_e}') dims.ns_e = ns_e # discretization if is_empty(opts.time_steps) and is_empty(opts.shooting_nodes): # uniform discretization opts.time_steps = opts.tf / dims.N * np.ones((dims.N,)) elif not is_empty(opts.shooting_nodes): if np.shape(opts.shooting_nodes)[0] != dims.N+1: raise Exception('inconsistent dimension N, regarding shooting_nodes.') time_steps = np.zeros((dims.N,)) for i in range(dims.N): time_steps[i] = opts.shooting_nodes[i+1] - opts.shooting_nodes[i] opts.time_steps = time_steps elif (not is_empty(opts.time_steps)) and (not is_empty(opts.shooting_nodes)): Exception('Please provide either time_steps or shooting_nodes for nonuniform discretization') tf = np.sum(opts.time_steps) if (tf - opts.tf) / tf > 1e-15: raise Exception(f'Inconsistent discretization: {opts.tf}'\ f' = tf != sum(opts.time_steps) = {tf}.') def get_ocp_nlp_layout(): current_module = sys.modules[__name__] acados_path = os.path.dirname(current_module.__file__) with open(acados_path + '/acados_layout.json', 'r') as f: ocp_nlp_layout = json.load(f) return ocp_nlp_layout def ocp_formulation_json_dump(acados_ocp, json_file='acados_ocp_nlp.json'): # Load acados_ocp_nlp structure description ocp_layout = get_ocp_nlp_layout() # Copy input ocp object dictionary ocp_nlp_dict = dict(deepcopy(acados_ocp).__dict__) # TODO: maybe make one funciton with formatting for acados_struct, v in ocp_layout.items(): # skip non dict attributes if not isinstance(v, dict): continue # setattr(ocp_nlp, acados_struct, dict(getattr(acados_ocp, acados_struct).__dict__)) # Copy ocp object attributes dictionaries ocp_nlp_dict[acados_struct]=dict(getattr(acados_ocp, acados_struct).__dict__) ocp_nlp_dict = format_class_dict(ocp_nlp_dict) # strip symbolics ocp_nlp_dict['model'] = acados_model_strip_casadi_symbolics(ocp_nlp_dict['model']) # strip shooting_nodes ocp_nlp_dict['solver_options'].pop('shooting_nodes', None) dims_dict = acados_class2dict(acados_ocp.dims) ocp_check_against_layout(ocp_nlp_dict, dims_dict) with open(json_file, 'w') as f: json.dump(ocp_nlp_dict, f, default=np_array_to_list, indent=4, sort_keys=True) def ocp_formulation_json_load(json_file='acados_ocp_nlp.json'): # Load acados_ocp_nlp structure description ocp_layout = get_ocp_nlp_layout() with open(json_file, 'r') as f: ocp_nlp_json = json.load(f) ocp_nlp_dict = json2dict(ocp_nlp_json, ocp_nlp_json['dims']) # Instantiate AcadosOcp object acados_ocp = AcadosOcp() # load class dict acados_ocp.__dict__ = ocp_nlp_dict # laod class attributes dict, dims, constraints, etc for acados_struct, v in ocp_layout.items(): # skip non dict attributes if not isinstance(v, dict): continue acados_attribute = getattr(acados_ocp, acados_struct) acados_attribute.__dict__ = ocp_nlp_dict[acados_struct] setattr(acados_ocp, acados_struct, acados_attribute) return acados_ocp def ocp_generate_external_functions(acados_ocp, model): model = make_model_consistent(model) if acados_ocp.solver_options.integrator_type == 'ERK': # explicit model -- generate C code generate_c_code_explicit_ode(model) elif acados_ocp.solver_options.integrator_type == 'IRK': # implicit model -- generate C code opts = dict(generate_hess=1) generate_c_code_implicit_ode(model, opts) elif acados_ocp.solver_options.integrator_type == 'GNSF': generate_c_code_gnsf(model) else: raise Exception("ocp_generate_external_functions: unknown integrator type.") if acados_ocp.solver_options.hessian_approx == 'EXACT': opts = dict(generate_hess=1) else: opts = dict(generate_hess=0) if acados_ocp.dims.nphi > 0 or acados_ocp.dims.nh > 0: generate_c_code_constraint(model, model.name, False, opts) if acados_ocp.dims.nphi_e > 0 or acados_ocp.dims.nh_e > 0: generate_c_code_constraint(model, model.name, True, opts) # dummy matrices if not acados_ocp.cost.cost_type == 'LINEAR_LS': acados_ocp.cost.Vx = np.zeros((acados_ocp.dims.ny, acados_ocp.dims.nx)) acados_ocp.cost.Vu = np.zeros((acados_ocp.dims.ny, acados_ocp.dims.nu)) if not acados_ocp.cost.cost_type_e == 'LINEAR_LS': acados_ocp.cost.Vx_e = np.zeros((acados_ocp.dims.ny_e, acados_ocp.dims.nx)) if acados_ocp.cost.cost_type == 'NONLINEAR_LS': generate_c_code_nls_cost(model, model.name, False) elif acados_ocp.cost.cost_type == 'EXTERNAL': generate_c_code_external_cost(model, False) if acados_ocp.cost.cost_type_e == 'NONLINEAR_LS': generate_c_code_nls_cost(model, model.name, True) elif acados_ocp.cost.cost_type_e == 'EXTERNAL': generate_c_code_external_cost(model, True) def ocp_render_templates(acados_ocp, json_file): name = acados_ocp.model.name # setting up loader and environment json_path = '{cwd}/{json_file}'.format( cwd=os.getcwd(), json_file=json_file) if not os.path.exists(json_path): raise Exception('{} not found!'.format(json_path)) template_dir = 'c_generated_code/' ## Render templates in_file = 'main.in.c' out_file = 'main_{}.c'.format(name) render_template(in_file, out_file, template_dir, json_path) in_file = 'acados_solver.in.c' out_file = 'acados_solver_{}.c'.format(name) render_template(in_file, out_file, template_dir, json_path) in_file = 'acados_solver.in.h' out_file = 'acados_solver_{}.h'.format(name) render_template(in_file, out_file, template_dir, json_path) in_file = 'Makefile.in' out_file = 'Makefile' render_template(in_file, out_file, template_dir, json_path) in_file = 'acados_solver_sfun.in.c' out_file = 'acados_solver_sfunction_{}.c'.format(name) render_template(in_file, out_file, template_dir, json_path) in_file = 'make_sfun.in.m' out_file = 'make_sfun.m' render_template(in_file, out_file, template_dir, json_path) in_file = 'acados_sim_solver.in.c' out_file = 'acados_sim_solver_{}.c'.format(name) render_template(in_file, out_file, template_dir, json_path) in_file = 'acados_sim_solver.in.h' out_file = 'acados_sim_solver_{}.h'.format(name) render_template(in_file, out_file, template_dir, json_path) ## folder model template_dir = 'c_generated_code/{}_model/'.format(name) in_file = 'model.in.h' out_file = '{}_model.h'.format(name) render_template(in_file, out_file, template_dir, json_path) # constraints on convex over nonlinear function if acados_ocp.constraints.constr_type == 'BGP' and acados_ocp.dims.nphi > 0: # constraints on outer function template_dir = 'c_generated_code/{}_constraints/'.format(name) in_file = 'phi_constraint.in.h' out_file = '{}_phi_constraint.h'.format(name) render_template(in_file, out_file, template_dir, json_path) # terminal constraints on convex over nonlinear function if acados_ocp.constraints.constr_type_e == 'BGP' and acados_ocp.dims.nphi_e > 0: # terminal constraints on outer function template_dir = 'c_generated_code/{}_constraints/'.format(name) in_file = 'phi_e_constraint.in.h' out_file = '{}_phi_e_constraint.h'.format(name) render_template(in_file, out_file, template_dir, json_path) # nonlinear constraints if acados_ocp.constraints.constr_type == 'BGH' and acados_ocp.dims.nh > 0: template_dir = 'c_generated_code/{}_constraints/'.format(name) in_file = 'h_constraint.in.h' out_file = '{}_h_constraint.h'.format(name) render_template(in_file, out_file, template_dir, json_path) # terminal nonlinear constraints if acados_ocp.constraints.constr_type_e == 'BGH' and acados_ocp.dims.nh_e > 0: template_dir = 'c_generated_code/{}_constraints/'.format(name) in_file = 'h_e_constraint.in.h' out_file = '{}_h_e_constraint.h'.format(name) render_template(in_file, out_file, template_dir, json_path) # nonlinear cost function if acados_ocp.cost.cost_type == 'NONLINEAR_LS': template_dir = 'c_generated_code/{}_cost/'.format(name) in_file = 'cost_y_fun.in.h' out_file = '{}_cost_y_fun.h'.format(name) render_template(in_file, out_file, template_dir, json_path) # terminal nonlinear cost function if acados_ocp.cost.cost_type_e == 'NONLINEAR_LS': template_dir = 'c_generated_code/{}_cost/'.format(name) in_file = 'cost_y_e_fun.in.h' out_file = '{}_cost_y_e_fun.h'.format(name) render_template(in_file, out_file, template_dir, json_path) # external cost if acados_ocp.cost.cost_type == 'EXTERNAL': template_dir = 'c_generated_code/{}_cost/'.format(name) in_file = 'external_cost.in.h' out_file = '{}_external_cost.h'.format(name) render_template(in_file, out_file, template_dir, json_path) # external cost - terminal if acados_ocp.cost.cost_type_e == 'EXTERNAL': template_dir = 'c_generated_code/{}_cost/'.format(name) in_file = 'external_cost_e.in.h' out_file = '{}_external_cost_e.h'.format(name) render_template(in_file, out_file, template_dir, json_path) class AcadosOcpSolver: """ class to interact with the acados ocp solver C object """ def __init__(self, acados_ocp, json_file='acados_ocp_nlp.json'): self.solver_created = False model = acados_ocp.model # make dims consistent make_ocp_dims_consistent(acados_ocp) if acados_ocp.solver_options.integrator_type == 'GNSF': set_up_imported_gnsf_model(acados_ocp) # set integrator time automatically acados_ocp.solver_options.Tsim = acados_ocp.solver_options.time_steps[0] # generate external functions ocp_generate_external_functions(acados_ocp, model) # dump to json ocp_formulation_json_dump(acados_ocp, json_file) # render templates ocp_render_templates(acados_ocp, json_file) ## Compile solver os.chdir('c_generated_code') os.system('make clean_ocp_shared_lib') os.system('make ocp_shared_lib') os.chdir('..') self.shared_lib_name = 'c_generated_code/libacados_ocp_solver_' + model.name + '.so' # get self.shared_lib = CDLL(self.shared_lib_name) self.shared_lib.acados_create() self.solver_created = True self.shared_lib.acados_get_nlp_opts.restype = c_void_p self.nlp_opts = self.shared_lib.acados_get_nlp_opts() self.shared_lib.acados_get_nlp_dims.restype = c_void_p self.nlp_dims = self.shared_lib.acados_get_nlp_dims() self.shared_lib.acados_get_nlp_config.restype = c_void_p self.nlp_config = self.shared_lib.acados_get_nlp_config() self.shared_lib.acados_get_nlp_out.restype = c_void_p self.nlp_out = self.shared_lib.acados_get_nlp_out() self.shared_lib.acados_get_nlp_in.restype = c_void_p self.nlp_in = self.shared_lib.acados_get_nlp_in() self.shared_lib.acados_get_nlp_solver.restype = c_void_p self.nlp_solver = self.shared_lib.acados_get_nlp_solver() self.acados_ocp = acados_ocp def solve(self): """ solve the ocp with current input """ status = self.shared_lib.acados_solve() return status def get(self, stage_, field_): """ get the last solution of the solver: :param stage: integer corresponding to shooting node :param field_: string in ['x', 'u', 'z', 'pi', 'lam', 't', 'sl', 'su',] .. note:: regarding lam, t: \n the inequalities are internally organized in the following order: \n [ lbu lbx lg lh lphi ubu ubx ug uh uphi; \n lsbu lsbx lsg lsh lsphi usbu usbx usg ush usphi] .. note:: pi: multipliers for dynamics equality constraints \n lam: multipliers for inequalities \n t: slack variables corresponding to evaluation of all inequalities (at the solution) \n sl: slack variables of soft lower inequality constraints \n su: slack variables of soft upper inequality constraints \n """ out_fields = ['x', 'u', 'z', 'pi', 'lam', 't'] mem_fields = ['sl', 'su'] field = field_ field = field.encode('utf-8') if (field_ not in out_fields + mem_fields): raise Exception('AcadosOcpSolver.get(): {} is an invalid argument.\ \n Possible values are {}. Exiting.'.format(field_, out_fields + mem_fields)) self.shared_lib.ocp_nlp_dims_get_from_attr.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p] self.shared_lib.ocp_nlp_dims_get_from_attr.restype = c_int dims = self.shared_lib.ocp_nlp_dims_get_from_attr(self.nlp_config, \ self.nlp_dims, self.nlp_out, stage_, field) out = np.ascontiguousarray(np.zeros((dims,)), dtype=np.float64) out_data = cast(out.ctypes.data, POINTER(c_double)) if (field_ in out_fields): self.shared_lib.ocp_nlp_out_get.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_out_get(self.nlp_config, \ self.nlp_dims, self.nlp_out, stage_, field, out_data) elif field_ in mem_fields: self.shared_lib.ocp_nlp_get_at_stage.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_get_at_stage(self.nlp_config, \ self.nlp_dims, self.nlp_solver, stage_, field, out_data) return out def print_statistics(self): stat = self.get_stats("statistics") if self.acados_ocp.solver_options.nlp_solver_type == 'SQP': print('\niter\tres_stat\tres_eq\t\tres_ineq\tres_comp\tqp_stat\tqp_iter') if stat.shape[0]>7: print('\tqp_res_stat\tqp_res_eq\tqp_res_ineq\tqp_res_comp') for jj in range(stat.shape[1]): print('{:d}\t{:e}\t{:e}\t{:e}\t{:e}\t{:d}\t{:d}'.format( \ int(stat[0][jj]), stat[1][jj], stat[2][jj], \ stat[3][jj], stat[4][jj], int(stat[5][jj]), int(stat[6][jj]))) if stat.shape[0]>7: print('\t{:e}\t{:e}\t{:e}\t{:e}'.format( \ stat[7][jj], stat[8][jj], stat[9][jj], stat[10][jj])) print('\n') elif self.acados_ocp.solver_options.nlp_solver_type == 'SQP_RTI': print('\niter\tqp_stat\tqp_iter') if stat.shape[0]>3: print('\tqp_res_stat\tqp_res_eq\tqp_res_ineq\tqp_res_comp') for jj in range(stat.shape[1]): print('{:d}\t{:d}\t{:d}'.format( int(stat[0][jj]), int(stat[1][jj]), int(stat[2][jj]))) if stat.shape[0]>3: print('\t{:e}\t{:e}\t{:e}\t{:e}'.format( \ stat[3][jj], stat[4][jj], stat[5][jj], stat[6][jj])) print('\n') return def get_stats(self, field_): """ get the information of the last solver call: :param field_: string in ['statistics', 'time_tot', 'time_lin', 'time_sim', 'time_sim_ad', 'time_sim_la', 'time_qp', 'time_qp_solver_call', 'time_reg', 'sqp_iter'] """ fields = ['time_tot', # total cpu time previous call 'time_lin', # cpu time for linearization 'time_sim', # cpu time for integrator 'time_sim_ad', # cpu time for integrator contribution of external function calls 'time_sim_la', # cpu time for integrator contribution of linear algebra 'time_qp', # cpu time qp solution 'time_qp_solver_call', # cpu time inside qp solver (without converting the QP) 'time_qp_xcond', 'time_reg', # cpu time regularization 'sqp_iter', # number of SQP iterations 'statistics', # table with info about last iteration 'stat_m', 'stat_n', ] field = field_ field = field.encode('utf-8') if (field_ not in fields): raise Exception('AcadosOcpSolver.get_stats(): {} is not a valid argument.\ \n Possible values are {}. Exiting.'.format(fields, fields)) if field_ in ['sqp_iter', 'stat_m', 'stat_n']: out = np.ascontiguousarray(np.zeros((1,)), dtype=np.int64) out_data = cast(out.ctypes.data, POINTER(c_int64)) elif field_ == 'statistics': sqp_iter = self.get_stats("sqp_iter") stat_m = self.get_stats("stat_m") stat_n = self.get_stats("stat_n") min_size = min([stat_m, sqp_iter+1]) out = np.ascontiguousarray( np.zeros( (stat_n[0]+1, min_size[0]) ), dtype=np.float64) out_data = cast(out.ctypes.data, POINTER(c_double)) else: out = np.ascontiguousarray(np.zeros((1,)), dtype=np.float64) out_data = cast(out.ctypes.data, POINTER(c_double)) self.shared_lib.ocp_nlp_get.argtypes = [c_void_p, c_void_p, c_char_p, c_void_p] self.shared_lib.ocp_nlp_get(self.nlp_config, self.nlp_solver, field, out_data) return out # Note: this function should not be used anymore, better use cost_set, constraints_set def set(self, stage_, field_, value_): cost_fields = ['y_ref', 'yref'] constraints_fields = ['lbx', 'ubx', 'lbu', 'ubu'] out_fields = ['x', 'u', 'pi', 'lam', 't'] # cast value_ to avoid conversion issues value_ = value_.astype(float) field = field_ field = field.encode('utf-8') stage = c_int(stage_) # treat parameters separately if field_ is 'p': self.shared_lib.acados_update_params.argtypes = [c_int, POINTER(c_double)] self.shared_lib.acados_update_params.restype = c_int value_data = cast(value_.ctypes.data, POINTER(c_double)) self.shared_lib.acados_update_params(stage, value_data, value_.shape[0]) else: if field_ not in constraints_fields + cost_fields + out_fields: raise Exception("AcadosOcpSolver.set(): {} is not a valid argument.\ \nPossible values are {}. Exiting.".format(field, \ constraints_fields + cost_fields + out_fields + ['p'])) self.shared_lib.ocp_nlp_dims_get_from_attr.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p] self.shared_lib.ocp_nlp_dims_get_from_attr.restype = c_int dims = self.shared_lib.ocp_nlp_dims_get_from_attr(self.nlp_config, \ self.nlp_dims, self.nlp_out, stage_, field) if value_.shape[0] != dims: msg = 'AcadosOcpSolver.set(): mismatching dimension for field "{}" '.format(field_) msg += 'with dimension {} (you have {})'.format(dims, value_.shape[0]) raise Exception(msg) value_data = cast(value_.ctypes.data, POINTER(c_double)) value_data_p = cast((value_data), c_void_p) if field_ in constraints_fields: self.shared_lib.ocp_nlp_constraints_model_set.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_constraints_model_set(self.nlp_config, \ self.nlp_dims, self.nlp_in, stage, field, value_data_p) elif field_ in cost_fields: self.shared_lib.ocp_nlp_cost_model_set.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_cost_model_set(self.nlp_config, \ self.nlp_dims, self.nlp_in, stage, field, value_data_p) elif field_ in out_fields: self.shared_lib.ocp_nlp_out_set.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_out_set(self.nlp_config, \ self.nlp_dims, self.nlp_out, stage, field, value_data_p) return def cost_set(self, stage_, field_, value_): """ set numerical data in the cost module of the solver: :param stage_: integer corresponding to shooting node :param field_: string, e.g. 'yref', 'W', 'ext_cost_num_hess' :param value_: of appropriate size """ # cast value_ to avoid conversion issues value_ = value_.astype(float) field = field_ field = field.encode('utf-8') stage = c_int(stage_) self.shared_lib.ocp_nlp_cost_dims_get_from_attr.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, POINTER(c_int)] self.shared_lib.ocp_nlp_cost_dims_get_from_attr.restype = c_int dims = np.ascontiguousarray(np.zeros((2,)), dtype=np.intc) dims_data = cast(dims.ctypes.data, POINTER(c_int)) self.shared_lib.ocp_nlp_cost_dims_get_from_attr(self.nlp_config, \ self.nlp_dims, self.nlp_out, stage_, field, dims_data) value_shape = value_.shape if len(value_shape) == 1: value_shape = (value_shape[0], 0) if value_shape != tuple(dims): raise Exception('AcadosOcpSolver.cost_set(): mismatching dimension', \ ' for field "{}" with dimension {} (you have {})'.format( \ field_, tuple(dims), value_shape)) value_data = cast(value_.ctypes.data, POINTER(c_double)) value_data_p = cast((value_data), c_void_p) self.shared_lib.ocp_nlp_cost_model_set.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_cost_model_set(self.nlp_config, \ self.nlp_dims, self.nlp_in, stage, field, value_data_p) return def constraints_set(self, stage_, field_, value_): """ set numerical data in the constraint module of the solver: Parameters: :param stage_: integer corresponding to shooting node :param field_: string, e.g. 'lbx' :param value_: of appropriate size """ # cast value_ to avoid conversion issues value_ = value_.astype(float) field = field_ field = field.encode('utf-8') stage = c_int(stage_) self.shared_lib.ocp_nlp_constraint_dims_get_from_attr.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, POINTER(c_int)] self.shared_lib.ocp_nlp_constraint_dims_get_from_attr.restype = c_int dims = np.ascontiguousarray(np.zeros((2,)), dtype=np.intc) dims_data = cast(dims.ctypes.data, POINTER(c_int)) self.shared_lib.ocp_nlp_constraint_dims_get_from_attr(self.nlp_config, \ self.nlp_dims, self.nlp_out, stage_, field, dims_data) value_shape = value_.shape if len(value_shape) == 1: value_shape = (value_shape[0], 0) if value_shape != tuple(dims): raise Exception('AcadosOcpSolver.constraints_set(): mismatching dimension' \ ' for field "{}" with dimension {} (you have {})'.format(field_, tuple(dims), value_shape)) value_data = cast(value_.ctypes.data, POINTER(c_double)) value_data_p = cast((value_data), c_void_p) self.shared_lib.ocp_nlp_constraints_model_set.argtypes = \ [c_void_p, c_void_p, c_void_p, c_int, c_char_p, c_void_p] self.shared_lib.ocp_nlp_constraints_model_set(self.nlp_config, \ self.nlp_dims, self.nlp_in, stage, field, value_data_p) return def options_set(self, field_, value_): """ set options of the solver: Parameters: :param field_: string, e.g. 'print_level', 'rti_phase', 'initialize_t_slacks', 'step_length' :param value_: of type int, float """ int_fields = ['print_level', 'rti_phase', 'initialize_t_slacks'] double_fields = ['step_length'] string_fields = ['globalization'] if field_ in int_fields: if not isinstance(value_, int): raise Exception('solver option {} must be of type int. You have {}.'.format(field_, type(value_))) else: value_ctypes = c_int(value_) elif field_ in double_fields: if not isinstance(value_, float): raise Exception('solver option {} must be of type float. You have {}.'.format(field_, type(value_))) else: value_ctypes = c_double(value_) elif field_ in string_fields: if not isinstance(value_, str): raise Exception('solver option {} must be of type str. You have {}.'.format(field_, type(value_))) else: value_ctypes = value_.encode('utf-8') if field_ == 'rti_phase': if value_ < 0 or value_ > 2: raise Exception('AcadosOcpSolver.solve(): argument \'rti_phase\' can ' 'take only values 0, 1, 2 for SQP-RTI-type solvers') if self.acados_ocp.solver_options.nlp_solver_type != 'SQP_RTI' and value_ > 0: raise Exception('AcadosOcpSolver.solve(): argument \'rti_phase\' can ' 'take only value 0 for SQP-type solvers') field = field_ field = field.encode('utf-8') if field_ in string_fields: self.shared_lib.ocp_nlp_solver_opts_set.argtypes = \ [c_void_p, c_void_p, c_char_p, c_char_p] self.shared_lib.ocp_nlp_solver_opts_set(self.nlp_config, \ self.nlp_opts, field, value_ctypes) else: self.shared_lib.ocp_nlp_solver_opts_set.argtypes = \ [c_void_p, c_void_p, c_char_p, c_void_p] self.shared_lib.ocp_nlp_solver_opts_set(self.nlp_config, \ self.nlp_opts, field, byref(value_ctypes)) return def __del__(self): if self.solver_created: self.shared_lib.acados_free() del self.shared_lib # NOTE: DLL cannot be easily unloaded!!! # see https://stackoverflow.com/questions/359498/how-can-i-unload-a-dll-using-ctypes-in-python # while isLoaded(self.shared_lib_name): # dlclose(handle)
40.525505
175
0.644682
5,968
42,106
4.255027
0.096682
0.020083
0.013468
0.056509
0.648618
0.556313
0.499606
0.439395
0.398126
0.370363
0
0.006798
0.245428
42,106
1,038
176
40.564547
0.792459
0.1244
0
0.27208
0
0.007123
0.162951
0.028801
0
0
0
0.000963
0
1
0.022792
false
0
0.022792
0
0.061254
0.017094
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07da9e40651ed6fdc8a2ae109d81653125f5c378
5,128
py
Python
noise/estimation/PCA/analyticNoiseEstimation_PCA.py
MaikWischow/Camera-Condition-Monitoring
910f9192d6309a6803ab76c346269fa5029c38e6
[ "MIT" ]
3
2021-12-10T14:52:23.000Z
2022-03-22T08:48:08.000Z
noise/estimation/PCA/analyticNoiseEstimation_PCA.py
MaikWischow/Camera-Condition-Monitoring
910f9192d6309a6803ab76c346269fa5029c38e6
[ "MIT" ]
null
null
null
noise/estimation/PCA/analyticNoiseEstimation_PCA.py
MaikWischow/Camera-Condition-Monitoring
910f9192d6309a6803ab76c346269fa5029c38e6
[ "MIT" ]
2
2021-12-13T08:12:52.000Z
2022-01-17T14:00:48.000Z
import numpy as np import cv2 import sys import os import glob def im2patch(im, pch_size, stride=1): ''' Transform image to patches. Input: im: 3 x H x W or 1 X H x W image, numpy format pch_size: (int, int) tuple or integer stride: (int, int) tuple or integer ''' if isinstance(pch_size, tuple): pch_H, pch_W = pch_size elif isinstance(pch_size, int): pch_H = pch_W = pch_size else: sys.exit('The input of pch_size must be a integer or a int tuple!') if isinstance(stride, tuple): stride_H, stride_W = stride elif isinstance(stride, int): stride_H = stride_W = stride else: sys.exit('The input of stride must be a integer or a int tuple!') C, H, W = im.shape num_H = len(range(0, H-pch_H+1, stride_H)) num_W = len(range(0, W-pch_W+1, stride_W)) num_pch = num_H * num_W pch = np.zeros((C, pch_H*pch_W, num_pch), dtype=im.dtype) kk = 0 for ii in range(pch_H): for jj in range(pch_W): temp = im[:, ii:H-pch_H+ii+1:stride_H, jj:W-pch_W+jj+1:stride_W] pch[:, kk, :] = temp.reshape((C, num_pch)) kk += 1 return pch.reshape((C, pch_H, pch_W, num_pch)) def noise_estimate(im, pch_size=8): ''' Implement of noise level estimation of the following paper: Chen G , Zhu F , Heng P A . An Efficient Statistical Method for Image Noise Level Estimation[C]// 2015 IEEE International Conference on Computer Vision (ICCV). IEEE Computer Society, 2015. Input: im: the noise image, H x W x 3 or H x W numpy tensor, range [0,1] pch_size: patch_size Output: noise_level: the estimated noise level ''' if im.ndim == 3: im = im.transpose((2, 0, 1)) else: im = np.expand_dims(im, axis=0) # image to patch pch = im2patch(im, pch_size, 3) # C x pch_size x pch_size x num_pch tensor num_pch = pch.shape[3] pch = pch.reshape((-1, num_pch)) # d x num_pch matrix d = pch.shape[0] mu = pch.mean(axis=1, keepdims=True) # d x 1 X = pch - mu sigma_X = np.matmul(X, X.transpose()) / num_pch sig_value, _ = np.linalg.eigh(sigma_X) sig_value.sort() for ii in range(-1, -d-1, -1): tau = np.mean(sig_value[:ii]) if np.sum(sig_value[:ii]>tau) == np.sum(sig_value[:ii] < tau): return np.sqrt(tau) def run(imgPath, patchSize, internalNumPatches, dirOut, saveResults=True): """ Estimates the standard deviation of (additive white gaussian) noise of image patches. The noise is estimated patch by patch. Based on: "An Efficient Statistical Method for Image Noise Level Estimation" (2015) :param imgPath: Path to the input image. :param patchSize: Image patch size. :param internalNumPatches: Internal number of sub-image-patches. :param dirOut: Directory where to save the noise estimation results. :param saveResults: Whether to save the estimation results or not. :return: None """ # Load image img = np.array(cv2.imread(imgPath)) try: img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) img = img / 255.0 h, w = img.shape psize = min(min(patchSize, h), w) psize -= psize % 2 patch_step = psize shift_factor = 2 # Result array estimatedNoiseMap = np.zeros([h, w], dtype=np.int8) rangex = range(0, w, patch_step) rangey = range(0, h, patch_step) for start_x in rangex: for start_y in rangey: end_x = start_x + psize end_y = start_y + psize if end_x > w: end_x = w end_x = shift_factor * ((end_x) // shift_factor) start_x = end_x - psize if end_y > h: end_y = h end_y = shift_factor * ((end_y) // shift_factor) start_y = end_y - psize tileM = img[start_y:end_y, start_x:end_x] h_, w_ = tileM.shape sigma = noise_estimate(tileM, internalNumPatches) * 255.0 estimatedNoiseMap[start_y :start_y + h_, start_x : start_x + w_] = sigma if saveResults: if dirOut is not None: imgName = imgPath.split(os.sep)[-1].split(".")[0] dirOut = os.path.join(dirOut) if not os.path.exists(dirOut): os.makedirs(dirOut) noiseMapPath = os.path.join(dirOut, imgName + ".npz") if not os.path.exists(noiseMapPath): np.savez_compressed(noiseMapPath, estimatedNoiseMap) return estimatedNoiseMap except: return None # Example # if __name__ == '__main__': # dirIn = r"../../../data/udacity/img/GT" # dirOut = r"../../../data/udacity/labels_noise_patchwise/PCA" # imgFileEnding = ".jpg" # for imgPath in glob.glob(os.path.join(dirIn, "*" + imgFileEnding)): # run(imgPath, 128, 8, dirOut)
34.884354
136
0.576248
738
5,128
3.855014
0.247967
0.029525
0.004218
0.011248
0.15536
0.105448
0.067487
0.056942
0.039367
0
0
0.01911
0.316303
5,128
147
137
34.884354
0.792356
0.284126
0
0.033708
0
0
0.031939
0
0
0
0
0
0
1
0.033708
false
0
0.05618
0
0.134831
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07db7794b6d98e1a7fc4b7e5727cce2c98e1474f
8,162
py
Python
python/paddle/tensor/attribute.py
douch/Paddle
81c40722869935d6e897f4b1aeb6e6f67606188a
[ "Apache-2.0" ]
1
2021-12-27T02:40:41.000Z
2021-12-27T02:40:41.000Z
python/paddle/tensor/attribute.py
LiYuRio/Paddle
dbd6e2df9d074973b7ee177e2d6b96ed2318008e
[ "Apache-2.0" ]
null
null
null
python/paddle/tensor/attribute.py
LiYuRio/Paddle
dbd6e2df9d074973b7ee177e2d6b96ed2318008e
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function from ..framework import core from ..fluid.layer_helper import LayerHelper from ..fluid.data_feeder import check_variable_and_dtype # TODO: define functions to get tensor attributes from ..fluid.layers import rank # noqa: F401 from ..fluid.layers import shape # noqa: F401 import paddle from paddle import _C_ops from paddle.static import Variable from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode __all__ = [] def _complex_to_real_dtype(dtype): if dtype == core.VarDesc.VarType.COMPLEX64: return core.VarDesc.VarType.FP32 elif dtype == core.VarDesc.VarType.COMPLEX128: return core.VarDesc.VarType.FP64 else: return dtype def _real_to_complex_dtype(dtype): if dtype == core.VarDesc.VarType.FP32: return core.VarDesc.VarType.COMPLEX64 elif dtype == core.VarDesc.VarType.FP64: return core.VarDesc.VarType.COMPLEX128 else: return dtype def is_complex(x): """Return whether x is a tensor of complex data type(complex64 or complex128). Args: x (Tensor): The input tensor. Returns: bool: True if the data type of the input is complex data type, otherwise false. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1 + 2j, 3 + 4j]) print(paddle.is_complex(x)) # True x = paddle.to_tensor([1.1, 1.2]) print(paddle.is_complex(x)) # False x = paddle.to_tensor([1, 2, 3]) print(paddle.is_complex(x)) # False """ if not isinstance(x, (paddle.Tensor, paddle.static.Variable)): raise TypeError("Expected Tensor, but received type of x: {}".format( type(x))) dtype = x.dtype is_complex_dtype = (dtype == core.VarDesc.VarType.COMPLEX64 or dtype == core.VarDesc.VarType.COMPLEX128) return is_complex_dtype def is_floating_point(x): """ Returns whether the dtype of `x` is one of paddle.float64, paddle.float32, paddle.float16, and paddle.bfloat16. Args: x (Tensor): The input tensor. Returns: bool: True if the dtype of `x` is floating type, otherwise false. Examples: .. code-block:: python import paddle x = paddle.arange(1., 5., dtype='float32') y = paddle.arange(1, 5, dtype='int32') print(paddle.is_floating_point(x)) # True print(paddle.is_floating_point(y)) # False """ if not isinstance(x, (paddle.Tensor, paddle.static.Variable)): raise TypeError("Expected Tensor, but received type of x: {}".format( type(x))) dtype = x.dtype is_fp_dtype = (dtype == core.VarDesc.VarType.FP32 or dtype == core.VarDesc.VarType.FP64 or dtype == core.VarDesc.VarType.FP16 or dtype == core.VarDesc.VarType.BF16) return is_fp_dtype def is_integer(x): """Return whether x is a tensor of integeral data type. Args: x (Tensor): The input tensor. Returns: bool: True if the data type of the input is integer data type, otherwise false. Examples: .. code-block:: python import paddle x = paddle.to_tensor([1 + 2j, 3 + 4j]) print(paddle.is_integer(x)) # False x = paddle.to_tensor([1.1, 1.2]) print(paddle.is_integer(x)) # False x = paddle.to_tensor([1, 2, 3]) print(paddle.is_integer(x)) # True """ if not isinstance(x, (paddle.Tensor, paddle.static.Variable)): raise TypeError("Expected Tensor, but received type of x: {}".format( type(x))) dtype = x.dtype is_int_dtype = (dtype == core.VarDesc.VarType.UINT8 or dtype == core.VarDesc.VarType.INT8 or dtype == core.VarDesc.VarType.INT16 or dtype == core.VarDesc.VarType.INT32 or dtype == core.VarDesc.VarType.INT64) return is_int_dtype def real(x, name=None): """ Returns a new tensor containing real values of the input tensor. Args: x (Tensor): the input tensor, its data type could be complex64 or complex128. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Returns: Tensor: a tensor containing real values of the input tensor. Examples: .. code-block:: python import paddle x = paddle.to_tensor( [[1 + 6j, 2 + 5j, 3 + 4j], [4 + 3j, 5 + 2j, 6 + 1j]]) # Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True, # [[(1+6j), (2+5j), (3+4j)], # [(4+3j), (5+2j), (6+1j)]]) real_res = paddle.real(x) # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[1., 2., 3.], # [4., 5., 6.]]) real_t = x.real() # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[1., 2., 3.], # [4., 5., 6.]]) """ if in_dygraph_mode(): return _C_ops.final_state_real(x) if _in_legacy_dygraph(): return _C_ops.real(x) check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'real') helper = LayerHelper('real', **locals()) out = helper.create_variable_for_type_inference( dtype=_complex_to_real_dtype(helper.input_dtype())) helper.append_op(type='real', inputs={'X': x}, outputs={'Out': out}) return out def imag(x, name=None): """ Returns a new tensor containing imaginary values of input tensor. Args: x (Tensor): the input tensor, its data type could be complex64 or complex128. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Returns: Tensor: a tensor containing imaginary values of the input tensor. Examples: .. code-block:: python import paddle x = paddle.to_tensor( [[1 + 6j, 2 + 5j, 3 + 4j], [4 + 3j, 5 + 2j, 6 + 1j]]) # Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True, # [[(1+6j), (2+5j), (3+4j)], # [(4+3j), (5+2j), (6+1j)]]) imag_res = paddle.imag(x) # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[6., 5., 4.], # [3., 2., 1.]]) imag_t = x.imag() # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, # [[6., 5., 4.], # [3., 2., 1.]]) """ if in_dygraph_mode(): return _C_ops.final_state_imag(x) if _in_legacy_dygraph(): return _C_ops.imag(x) check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'imag') helper = LayerHelper('imag', **locals()) out = helper.create_variable_for_type_inference( dtype=_complex_to_real_dtype(helper.input_dtype())) helper.append_op(type='imag', inputs={'X': x}, outputs={'Out': out}) return out
33.178862
115
0.590787
1,068
8,162
4.39794
0.195693
0.044496
0.072812
0.073451
0.690228
0.589099
0.5693
0.554396
0.499255
0.466042
0
0.037664
0.29086
8,162
245
116
33.314286
0.773842
0.549253
0
0.342105
0
0
0.062075
0
0
0
0
0.004082
0
1
0.092105
false
0
0.131579
0
0.421053
0.013158
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07dedb4b0df0aef6400f3a4ca8f79f22e2a6cabd
2,990
py
Python
appcodec.py
guardhunt/TelemterRC
679f99b317ecc6cbef6e022ae861cde18594f6a0
[ "MIT" ]
null
null
null
appcodec.py
guardhunt/TelemterRC
679f99b317ecc6cbef6e022ae861cde18594f6a0
[ "MIT" ]
null
null
null
appcodec.py
guardhunt/TelemterRC
679f99b317ecc6cbef6e022ae861cde18594f6a0
[ "MIT" ]
null
null
null
import evdev import time import struct class appcodec(): def __init__(self): self.device = evdev.InputDevice("/dev/input/event2") self.capabilities = self.device.capabilities(verbose=True) self.capaRAW = self.device.capabilities(absinfo=False) self.config = {} self.state = {} def build(self): """build state dictionary for controller""" #build config dictionary by code and name for key, value in self.capabilities.items(): for element in value: if type(element[0]) is tuple: self.config[element[0][1]] = element[0][0] elif type(element[0]) is list: self.config[element[1]] = element[0][0] elif ("SYN" in str(element[0])) or ("FF" in str(element[0])): pass else: self.config[element[1]] = element[0] #build state dictionary from raw codes for code in self.capaRAW[1]: self.state[self.config[code]] = 0 for code in self.capaRAW[3]: self.state[self.config[code]] = 0 print("waiting for event") for event in self.device.read_loop(): if event.type == evdev.ecodes.EV_KEY or event.type == evdev.ecodes.EV_ABS: return(self.update_state(event)) def update_state(self, event): self.state[self.config[event.code]] = event.value buttons1_state = 0 buttons1_state = buttons1_state | self.state["BTN_A"] buttons1_state = buttons1_state | self.state["BTN_B"] << 1 buttons1_state = buttons1_state | self.state["BTN_NORTH"] << 2 buttons1_state = buttons1_state | self.state["BTN_WEST"] << 3 buttons2_state = 0 buttons2_state = buttons2_state | self.state["BTN_START"] buttons2_state = buttons2_state | self.state["BTN_MODE"] << 1 buttons2_state = buttons2_state | self.state["BTN_SELECT"] << 2 buttons2_state = buttons2_state | self.state["BTN_TR"] << 3 buttons2_state = buttons2_state | self.state["BTN_TL"] << 4 packet = struct.pack('6h2c', self.state["ABS_X"], self.state["ABS_Y"], self.state["ABS_RX"], self.state["ABS_RY"], self.state["ABS_HAT0X"], self.state["ABS_HAT0Y"], buttons1_state.to_bytes(1, byteorder="big"), buttons2_state.to_bytes(1, byteorder="big")) return packet def decode(self, packet): buttons = [] state = packet[14:30] state = struct.unpack('6h2B2c', state) buttons1 = state[8] buttons2 = state[9] holder1 = '{0:06b}'.format(int.from_bytes(buttons1, byteorder="big")) holder2 = '{0:05b}'.format(int.from_bytes(buttons2, byteorder="big")) for i in holder1: buttons.append(int(i)) for i in holder2: buttons.append(int(i)) state = list(state[ :7]) + buttons return state
37.375
262
0.585619
374
2,990
4.548128
0.286096
0.100529
0.074074
0.089947
0.349794
0.289242
0.201058
0
0
0
0
0.036968
0.285284
2,990
79
263
37.848101
0.759008
0.038462
0
0.068966
0
0
0.06311
0
0
0
0
0
0
1
0.068966
false
0.017241
0.051724
0
0.172414
0.017241
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07e01a30113159cdf23415b99ef5107294e27bab
606
py
Python
mundo 3/099.py
thiagofreitascarneiro/Curso-de-Python---Curso-em-Video
0342e482780b5a1c6f78cddd51d9bfad785c79fa
[ "MIT" ]
1
2021-08-04T13:21:22.000Z
2021-08-04T13:21:22.000Z
mundo 3/099.py
thiagofreitascarneiro/Curso-de-Python---Curso-em-Video
0342e482780b5a1c6f78cddd51d9bfad785c79fa
[ "MIT" ]
null
null
null
mundo 3/099.py
thiagofreitascarneiro/Curso-de-Python---Curso-em-Video
0342e482780b5a1c6f78cddd51d9bfad785c79fa
[ "MIT" ]
null
null
null
import time # O * é para desempacotar o paramêtro. Permite atribuir inumeros parametros. def maior(* num): contador = maior = 0 print('Analisando os valores passados...') for v in num: contador = contador + 1 print(f'{v} ', end='', flush=True) time.sleep(0.3) if contador == 1: maior = v else: if v > maior: maior = v print(f'Foram informado o total de {len(num)}') print(f'O maior valor informado foi {max(num)}') print(30 * '-') maior(2, 1, 7) maior(5, 4, 7, 9, 2) maior(1, 4, 7, 20, 2) maior(0)
23.307692
76
0.544554
89
606
3.707865
0.539326
0.054545
0
0
0
0
0
0
0
0
0
0.05314
0.316832
606
25
77
24.24
0.743961
0.122112
0
0.1
0
0
0.214015
0
0
0
0
0
0
1
0.05
false
0.05
0.05
0
0.1
0.25
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07e0ba1b41406f219565825c5f80d1ee0cfaded5
8,638
py
Python
easyric/tests/test_io_geotiff.py
HowcanoeWang/EasyRIC
a3420bc7b1e0f1013411565cf0e66dd2d2ba5371
[ "MIT" ]
12
2021-01-25T07:11:52.000Z
2022-02-14T11:57:03.000Z
easyric/tests/test_io_geotiff.py
HowcanoeWang/EasyRIC
a3420bc7b1e0f1013411565cf0e66dd2d2ba5371
[ "MIT" ]
null
null
null
easyric/tests/test_io_geotiff.py
HowcanoeWang/EasyRIC
a3420bc7b1e0f1013411565cf0e66dd2d2ba5371
[ "MIT" ]
null
null
null
import pyproj import pytest import numpy as np from easyric.io import geotiff, shp from skimage.io import imread from skimage.color import rgb2gray import matplotlib.pyplot as plt def test_prase_header_string_width(): out_dict = geotiff._prase_header_string("* 256 image_width (1H) 13503") assert out_dict['width'] == 13503 def test_prase_header_string_length(): out_dict = geotiff._prase_header_string("* 257 image_length (1H) 19866") assert out_dict['length'] == 19866 def test_prase_header_string_scale(): in_str = "* 33550 model_pixel_scale (3d) (0.0029700000000000004, 0.0029700000000000004, 0" out_dict = geotiff._prase_header_string(in_str) assert out_dict['scale'] == (0.0029700000000000004, 0.0029700000000000004) def test_prase_header_string_tie_point(): in_str = "* 33922 model_tie_point (6d) (0.0, 0.0, 0.0, 368090.77975000005, 3956071.13823," out_dict = geotiff._prase_header_string(in_str) assert out_dict['tie_point'] == (368090.77975000005, 3956071.13823) in_str = "* 33922 model_tie_point (6d) (0.0, 0.0, 0.0, 368090.77975000005, 3956071.13823, 0" out_dict = geotiff._prase_header_string(in_str) assert out_dict['tie_point'] == (368090.77975000005, 3956071.13823) def test_prase_header_string_nodata(): out_dict = geotiff._prase_header_string("* 42113 gdal_nodata (7s) b'-10000'") assert out_dict['nodata'] == -10000 def test_prase_header_string_proj_normal(capsys): in_str = "* 34737 geo_ascii_params (30s) b'WGS 84 / UTM zone 54N|WGS 84|'" out_dict = geotiff._prase_header_string(in_str) captured = capsys.readouterr() assert f"[io][geotiff][GeoCorrd] Comprehense [{in_str}]" in captured.out assert out_dict['proj'] == pyproj.CRS.from_epsg(32654) def test_prase_header_string_proj_error(capsys): # should raise error because WGS 84 / UTM ... should be full out_dict = geotiff._prase_header_string("* 34737 geo_ascii_params (30s) b'UTM zone 54N|WGS 84|'") captured = capsys.readouterr() assert '[io][geotiff][GeoCorrd] Generation failed, because [Input is not a CRS: UTM zone 54N]' in captured.out assert out_dict['proj'] == None def test_get_imarray_without_header(capsys): pass def test_get_imarray_with_header(capsys): pass def test_point_query_one_point(): point = (368023.004, 3955500.669) out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', point) np.testing.assert_almost_equal(out, np.float32(97.45558), decimal=3) def test_point_query_numpy_points(): points = np.asarray([[368022.581, 3955501.054], [368024.032, 3955500.465]]) out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', points) expected = np.asarray([97.624344, 97.59617]) np.testing.assert_almost_equal(out, expected, decimal=3) def test_point_query_list_numpy_points(): points = np.asarray([[368022.581, 3955501.054], [368024.032, 3955500.465]]) point = np.asarray([[368023.004, 3955500.669]]) p_list = [point, points] expected = [np.asarray([97.45558]), np.asarray([97.624344, 97.59617])] out = geotiff.point_query(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif', p_list) assert type(expected) == type(out) np.testing.assert_almost_equal(expected[0], out[0], decimal=3) np.testing.assert_almost_equal(expected[1], out[1], decimal=3) def test_point_query_wrong_types(): # [TODO] pass def test_point_query_input_ndarray(): # [Todo] pass def test_mean_values(capsys): mean_ht = geotiff.mean_values(r'file/pix4d.diy/hasu_tanashi_20170525_Ins1RGB_30m_dsm.tif') captured = capsys.readouterr() # When not convert to float, mean_values = 97.562584 # assert mean_ht == np.float32(97.562584) np.testing.assert_almost_equal(mean_ht, np.float32(97.562584), decimal=3) # another case that not working in previous version: # Cannot convert np.nan to int, fixed by astype(float) mean_ht = geotiff.mean_values(r'file/tiff_test/2_12.tif') captured = capsys.readouterr() np.testing.assert_almost_equal(mean_ht, np.float(72.31657466298653), decimal=3) def test_gis2pixel2gis(): geo_head_txt = """ TIFF file: 200423_G_M600pro_transparent_mosaic_group1.tif, 411 MiB, little endian, bigtiff Series 0: 31255x19436x4, uint8, YXS, 1 pages, not mem-mappable Page 0: 31255x19436x4, uint8, 8 bit, rgb, lzw * 256 image_width (1H) 19436 * 257 image_length (1H) 31255 * 258 bits_per_sample (4H) (8, 8, 8, 8) * 259 compression (1H) 5 * 262 photometric (1H) 2 * 273 strip_offsets (31255Q) (500650, 501114, 501578, 502042, 502506, 502970, 5 * 277 samples_per_pixel (1H) 4 * 278 rows_per_strip (1H) 1 * 279 strip_byte_counts (31255Q) (464, 464, 464, 464, 464, 464, 464, 464, 464, * 284 planar_configuration (1H) 1 * 305 software (12s) b'pix4dmapper' * 317 predictor (1H) 2 * 338 extra_samples (1H) 2 * 339 sample_format (4H) (1, 1, 1, 1) * 33550 model_pixel_scale (3d) (0.001, 0.001, 0.0) * 33922 model_tie_point (6d) (0.0, 0.0, 0.0, 484576.70205, 3862285.5109300003, * 34735 geo_key_directory (32H) (1, 1, 0, 7, 1024, 0, 1, 1, 1025, 0, 1, 1, 1026 * 34737 geo_ascii_params (30s) b'WGS 84 / UTM zone 53N|WGS 84|' """ gis_coord = np.asarray([[ 484593.67474654, 3862259.42413431], [ 484593.41064743, 3862259.92582402], [ 484593.64841806, 3862260.06515117], [ 484593.93077419, 3862259.55455913], [ 484593.67474654, 3862259.42413431]]) header = geotiff._prase_header_string(geo_head_txt) expected_pixel = np.asarray([[16972, 26086], [16708, 25585], [16946, 25445], [17228, 25956], [16972, 26086]]) pixel_coord = geotiff.geo2pixel(gis_coord, header) np.testing.assert_almost_equal(pixel_coord, expected_pixel) gis_revert = geotiff.pixel2geo(pixel_coord, header) np.testing.assert_almost_equal(gis_revert, gis_coord, decimal=3) def test_is_roi_type(): roi1 = np.asarray([[123, 456], [456, 789]]) roi2 = [roi1, roi1] roi_wrong_1 = (123, 345) roi_wrong_2 = [123, 456] roi_wrong_3 = [[123, 345], [456, 789]] roi1_out = geotiff._is_roi_type(roi1) assert roi1_out == [roi1] roi2_out = geotiff._is_roi_type(roi2) assert roi2_out == roi2 with pytest.raises(TypeError) as errinfo: roi_w1_out = geotiff._is_roi_type(roi_wrong_1) assert 'Only numpy.ndarray points and list contains numpy.ndarray points are supported' in str(errinfo.value) with pytest.raises(TypeError) as errinfo: roi_w2_out = geotiff._is_roi_type(roi_wrong_2) assert 'Only list contains numpy.ndarray points are supported' in str(errinfo.value) with pytest.raises(TypeError) as errinfo: roi_w3_out = geotiff._is_roi_type(roi_wrong_3) assert 'Only list contains numpy.ndarray points are supported' in str(errinfo.value) def test_imarray_clip_2d_rgb_rgba(): photo_path = 'file/pix4d.diy/photos/DJI_0174.JPG' roi = np.asarray([[2251, 1223], [2270, 1270], [2227, 1263], [2251, 1223]]) fig, ax = plt.subplots(1,3, figsize=(12,4)) # ----------------------------------------------- imarray_rgb = imread(photo_path) assert imarray_rgb.shape == (3456, 4608, 3) im_out_rgb, offsets_rgb = geotiff.imarray_clip(imarray_rgb, roi) ax[1].imshow(im_out_rgb / 255) ax[1].set_title('rgb') # ----------------------------------------------- imarray_2d = rgb2gray(imarray_rgb) assert imarray_2d.shape == (3456, 4608) im_out_2d, offsets_2d = geotiff.imarray_clip(imarray_2d, roi) ax[0].imshow(im_out_2d, cmap='gray') ax[0].set_title('gray') # ----------------------------------------------- imarray_rgba = np.dstack((imarray_rgb, np.ones((3456, 4608)) * 255)) assert imarray_rgba.shape == (3456, 4608, 4) im_out_rgba, offsets = geotiff.imarray_clip(imarray_rgba, roi) ax[2].imshow(im_out_rgba/255) ax[2].set_title('rgba') plt.show() def test_clip_roi_pixel(): poly = shp.read_shp2d('file/shp_test/test.shp') poly_pixel = geotiff.geo2pixel(poly['0'], geotiff.get_header('file/tiff_test/2_12.tif')) imarray, offset = geotiff.clip_roi(poly_pixel, 'file/tiff_test/2_12.tif', is_geo=False) assert len(imarray) == 1 def test_clip_roi_geo(): poly = shp.read_shp2d('file/shp_test/test.shp') imarray, offset = geotiff.clip_roi(poly['0'], 'file/tiff_test/2_12.tif', is_geo=True) assert len(imarray) == 1
37.232759
117
0.683376
1,266
8,638
4.406003
0.262243
0.025099
0.048763
0.038724
0.459842
0.406597
0.293654
0.239333
0.207601
0.195411
0
0.174127
0.177587
8,638
232
118
37.232759
0.611064
0.04758
0
0.158228
0
0.037975
0.27078
0.064744
0
0
0
0.00431
0.183544
1
0.126582
false
0.025316
0.044304
0
0.170886
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07e0cae8b52445fc39f360d69691eeb8e3e8f35e
9,833
py
Python
src/ebay_rest/api/buy_marketplace_insights/models/item_location.py
matecsaj/ebay_rest
dd23236f39e05636eff222f99df1e3699ce47d4a
[ "MIT" ]
3
2021-12-12T04:28:03.000Z
2022-03-10T03:29:18.000Z
src/ebay_rest/api/buy_marketplace_insights/models/item_location.py
jdavv/ebay_rest
20fc88c6aefdae9ab90f9c1330e79abddcd750cd
[ "MIT" ]
33
2021-06-16T20:44:36.000Z
2022-03-30T14:55:06.000Z
src/ebay_rest/api/buy_marketplace_insights/models/item_location.py
jdavv/ebay_rest
20fc88c6aefdae9ab90f9c1330e79abddcd750cd
[ "MIT" ]
7
2021-06-03T09:30:23.000Z
2022-03-08T19:51:33.000Z
# coding: utf-8 """ Marketplace Insights API <a href=\"https://developer.ebay.com/api-docs/static/versioning.html#limited\" target=\"_blank\"> <img src=\"/cms/img/docs/partners-api.svg\" class=\"legend-icon partners-icon\" title=\"Limited Release\" alt=\"Limited Release\" />(Limited Release)</a> The Marketplace Insights API provides the ability to search for sold items on eBay by keyword, GTIN, category, and product and returns the of sales history of those items. # noqa: E501 OpenAPI spec version: v1_beta.2.2 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class ItemLocation(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'address_line1': 'str', 'address_line2': 'str', 'city': 'str', 'country': 'str', 'county': 'str', 'postal_code': 'str', 'state_or_province': 'str' } attribute_map = { 'address_line1': 'addressLine1', 'address_line2': 'addressLine2', 'city': 'city', 'country': 'country', 'county': 'county', 'postal_code': 'postalCode', 'state_or_province': 'stateOrProvince' } def __init__(self, address_line1=None, address_line2=None, city=None, country=None, county=None, postal_code=None, state_or_province=None): # noqa: E501 """ItemLocation - a model defined in Swagger""" # noqa: E501 self._address_line1 = None self._address_line2 = None self._city = None self._country = None self._county = None self._postal_code = None self._state_or_province = None self.discriminator = None if address_line1 is not None: self.address_line1 = address_line1 if address_line2 is not None: self.address_line2 = address_line2 if city is not None: self.city = city if country is not None: self.country = country if county is not None: self.county = county if postal_code is not None: self.postal_code = postal_code if state_or_province is not None: self.state_or_province = state_or_province @property def address_line1(self): """Gets the address_line1 of this ItemLocation. # noqa: E501 The first line of the street address. # noqa: E501 :return: The address_line1 of this ItemLocation. # noqa: E501 :rtype: str """ return self._address_line1 @address_line1.setter def address_line1(self, address_line1): """Sets the address_line1 of this ItemLocation. The first line of the street address. # noqa: E501 :param address_line1: The address_line1 of this ItemLocation. # noqa: E501 :type: str """ self._address_line1 = address_line1 @property def address_line2(self): """Gets the address_line2 of this ItemLocation. # noqa: E501 The second line of the street address. This field may contain such values as an apartment or suite number. # noqa: E501 :return: The address_line2 of this ItemLocation. # noqa: E501 :rtype: str """ return self._address_line2 @address_line2.setter def address_line2(self, address_line2): """Sets the address_line2 of this ItemLocation. The second line of the street address. This field may contain such values as an apartment or suite number. # noqa: E501 :param address_line2: The address_line2 of this ItemLocation. # noqa: E501 :type: str """ self._address_line2 = address_line2 @property def city(self): """Gets the city of this ItemLocation. # noqa: E501 The city in which the item is located. # noqa: E501 :return: The city of this ItemLocation. # noqa: E501 :rtype: str """ return self._city @city.setter def city(self, city): """Sets the city of this ItemLocation. The city in which the item is located. # noqa: E501 :param city: The city of this ItemLocation. # noqa: E501 :type: str """ self._city = city @property def country(self): """Gets the country of this ItemLocation. # noqa: E501 The two-letter <a href=\"https://www.iso.org/iso-3166-country-codes.html\">ISO 3166</a> standard code that indicates the country in which the item is located. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/buy/marketplace_insights/types/ba:CountryCodeEnum'>eBay API documentation</a> # noqa: E501 :return: The country of this ItemLocation. # noqa: E501 :rtype: str """ return self._country @country.setter def country(self, country): """Sets the country of this ItemLocation. The two-letter <a href=\"https://www.iso.org/iso-3166-country-codes.html\">ISO 3166</a> standard code that indicates the country in which the item is located. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/buy/marketplace_insights/types/ba:CountryCodeEnum'>eBay API documentation</a> # noqa: E501 :param country: The country of this ItemLocation. # noqa: E501 :type: str """ self._country = country @property def county(self): """Gets the county of this ItemLocation. # noqa: E501 The county in which the item is located. # noqa: E501 :return: The county of this ItemLocation. # noqa: E501 :rtype: str """ return self._county @county.setter def county(self, county): """Sets the county of this ItemLocation. The county in which the item is located. # noqa: E501 :param county: The county of this ItemLocation. # noqa: E501 :type: str """ self._county = county @property def postal_code(self): """Gets the postal_code of this ItemLocation. # noqa: E501 The postal code (or zip code in US) where the item is located.<br /> <br /><span class=\"tablenote\"> <b> Note: </b>Beginning in late January 2020, the displayed postal code will be masked to all users. Different countries will mask postal/zip codes in slightly different ways, but an example would be <code>951**</code>.</span> # noqa: E501 :return: The postal_code of this ItemLocation. # noqa: E501 :rtype: str """ return self._postal_code @postal_code.setter def postal_code(self, postal_code): """Sets the postal_code of this ItemLocation. The postal code (or zip code in US) where the item is located.<br /> <br /><span class=\"tablenote\"> <b> Note: </b>Beginning in late January 2020, the displayed postal code will be masked to all users. Different countries will mask postal/zip codes in slightly different ways, but an example would be <code>951**</code>.</span> # noqa: E501 :param postal_code: The postal_code of this ItemLocation. # noqa: E501 :type: str """ self._postal_code = postal_code @property def state_or_province(self): """Gets the state_or_province of this ItemLocation. # noqa: E501 The state or province in which the item is located. # noqa: E501 :return: The state_or_province of this ItemLocation. # noqa: E501 :rtype: str """ return self._state_or_province @state_or_province.setter def state_or_province(self, state_or_province): """Sets the state_or_province of this ItemLocation. The state or province in which the item is located. # noqa: E501 :param state_or_province: The state_or_province of this ItemLocation. # noqa: E501 :type: str """ self._state_or_province = state_or_province def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ItemLocation, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ItemLocation): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
34.992883
442
0.616699
1,256
9,833
4.700637
0.16879
0.051491
0.085366
0.078252
0.571477
0.508977
0.492547
0.445122
0.365346
0.280318
0
0.027718
0.288213
9,833
280
443
35.117857
0.815831
0.485203
0
0.075
0
0
0.059952
0
0
0
0
0
0
1
0.166667
false
0
0.025
0
0.325
0.016667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
07e1c7308c17fb9509fe646f19b12d537b946ccf
502
py
Python
fractionalKnapsack.py
aadishgoel2013/Algos-with-Python
19541607c8ede9a76a8cbbe047e01343080cfd5b
[ "Apache-2.0" ]
6
2017-11-12T05:13:27.000Z
2022-03-13T07:58:31.000Z
fractionalKnapsack.py
aadishgoel2013/Algos-with-Python
19541607c8ede9a76a8cbbe047e01343080cfd5b
[ "Apache-2.0" ]
null
null
null
fractionalKnapsack.py
aadishgoel2013/Algos-with-Python
19541607c8ede9a76a8cbbe047e01343080cfd5b
[ "Apache-2.0" ]
null
null
null
# Fractional Knapsack wt = [40,50,30,10,10,40,30] pro = [30,20,20,25,5,35,15] n = len(wt) data = [ (i,pro[i],wt[i]) for i in range(n) ] bag = 100 data.sort(key=lambda x: x[1]/x[2], reverse=True) profit=0 ans=[] i=0 while i<n: if data[i][2]<=bag: bag-=data[i][2] ans.append(data[i][0]) profit+=data[i][1] i+=1 else: break if i<n: ans.append(data[i][0]) profit += (bag*data[i][1])/data[i][2] print(profit,ans)
17.310345
49
0.498008
94
502
2.659574
0.414894
0.16
0.072
0.112
0.168
0.168
0
0
0
0
0
0.116667
0.282869
502
28
50
17.928571
0.577778
0.037849
0
0.095238
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.047619
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0