hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
e5137ca9a23bdb8d2e99e6fe8d556b4318b8b2ca
9,531
py
Python
components/fatfs/fatfsgen_utils/fs_object.py
iPlon-org/esp-idf
a5227db2a75102ca1a17860188c3c352a529a01b
[ "Apache-2.0" ]
5
2021-11-22T06:47:54.000Z
2022-01-04T06:58:43.000Z
components/fatfs/fatfsgen_utils/fs_object.py
iPlon-org/esp-idf
a5227db2a75102ca1a17860188c3c352a529a01b
[ "Apache-2.0" ]
null
null
null
components/fatfs/fatfsgen_utils/fs_object.py
iPlon-org/esp-idf
a5227db2a75102ca1a17860188c3c352a529a01b
[ "Apache-2.0" ]
2
2022-01-05T05:09:13.000Z
2022-02-09T22:32:54.000Z
# SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD # SPDX-License-Identifier: Apache-2.0 import os from typing import List, Optional, Tuple from .entry import Entry from .exceptions import FatalError, WriteDirectoryException from .fat import FAT, Cluster from .fatfs_state import FATFSState from .utils import required_clusters_count, split_content_into_sectors, split_to_name_and_extension class File: """ The class File provides API to write into the files. It represents file in the FS. """ ATTR_ARCHIVE = 0x20 ENTITY_TYPE = ATTR_ARCHIVE def __init__(self, name: str, fat: FAT, fatfs_state: FATFSState, entry: Entry, extension: str = '') -> None: self.name = name self.extension = extension self.fatfs_state = fatfs_state self.fat = fat self.size = 0 self._first_cluster = None self._entry = entry @property def entry(self) -> Entry: return self._entry @property def first_cluster(self) -> Optional[Cluster]: return self._first_cluster @first_cluster.setter def first_cluster(self, value: Cluster) -> None: self._first_cluster = value def name_equals(self, name: str, extension: str) -> bool: return self.name == name and self.extension == extension def write(self, content: str) -> None: self.entry.update_content_size(len(content)) # we assume that the correct amount of clusters is allocated current_cluster = self._first_cluster for content_part in split_content_into_sectors(content, self.fatfs_state.sector_size): content_as_list = content_part.encode() if current_cluster is None: raise FatalError('No free space left!') address = current_cluster.cluster_data_address self.fatfs_state.binary_image[address: address + len(content_part)] = content_as_list current_cluster = current_cluster.next_cluster class Directory: """ The Directory class provides API to add files and directories into the directory and to find the file according to path and write it. """ ATTR_DIRECTORY = 0x10 ATTR_ARCHIVE = 0x20 ENTITY_TYPE = ATTR_DIRECTORY def __init__(self, name, fat, fatfs_state, entry=None, cluster=None, size=None, extension='', parent=None): # type: (str, FAT, FATFSState, Optional[Entry], Cluster, Optional[int], str, Directory) -> None self.name = name self.fatfs_state = fatfs_state self.extension = extension self.fat = fat self.size = size or self.fatfs_state.sector_size # if directory is root its parent is itself self.parent: Directory = parent or self self._first_cluster = cluster # entries will be initialized after the cluster allocation self.entries: List[Entry] = [] self.entities = [] # type: ignore self._entry = entry # currently not in use (will use later for e.g. modification time, etc.) @property def is_root(self) -> bool: return self.parent is self @property def first_cluster(self) -> Cluster: return self._first_cluster @first_cluster.setter def first_cluster(self, value: Cluster) -> None: self._first_cluster = value def name_equals(self, name: str, extension: str) -> bool: return self.name == name and self.extension == extension def create_entries(self, cluster: Cluster) -> list: return [Entry(entry_id=i, parent_dir_entries_address=cluster.cluster_data_address, fatfs_state=self.fatfs_state) for i in range(self.size // self.fatfs_state.entry_size)] def init_directory(self) -> None: self.entries = self.create_entries(self._first_cluster) if not self.is_root: # the root directory doesn't contain link to itself nor the parent free_entry1 = self.find_free_entry() or self.chain_directory() free_entry1.allocate_entry(first_cluster_id=self.first_cluster.id, entity_name='.', entity_extension='', entity_type=self.ENTITY_TYPE) self.first_cluster = self._first_cluster free_entry2 = self.find_free_entry() or self.chain_directory() free_entry2.allocate_entry(first_cluster_id=self.parent.first_cluster.id, entity_name='..', entity_extension='', entity_type=self.parent.ENTITY_TYPE) self.parent.first_cluster = self.parent.first_cluster def lookup_entity(self, object_name: str, extension: str): # type: ignore for entity in self.entities: if entity.name == object_name and entity.extension == extension: return entity return None def recursive_search(self, path_as_list, current_dir): # type: ignore name, extension = split_to_name_and_extension(path_as_list[0]) next_obj = current_dir.lookup_entity(name, extension) if next_obj is None: raise FileNotFoundError('No such file or directory!') if len(path_as_list) == 1 and next_obj.name_equals(name, extension): return next_obj return self.recursive_search(path_as_list[1:], next_obj) def find_free_entry(self) -> Optional[Entry]: for entry in self.entries: if entry.is_empty: return entry return None def _extend_directory(self) -> None: current = self.first_cluster while current.next_cluster is not None: current = current.next_cluster new_cluster = self.fat.find_free_cluster() current.set_in_fat(new_cluster.id) current.next_cluster = new_cluster self.entries += self.create_entries(new_cluster) def chain_directory(self) -> Entry: self._extend_directory() free_entry = self.find_free_entry() if free_entry is None: raise FatalError('No more space left!') return free_entry def allocate_object(self, name, entity_type, path_from_root=None, extension=''): # type: (str, int, Optional[List[str]], str) -> Tuple[Cluster, Entry, Directory] """ Method finds the target directory in the path and allocates cluster (both the record in FAT and cluster in the data region) and entry in the specified directory """ free_cluster = self.fat.find_free_cluster() target_dir = self if not path_from_root else self.recursive_search(path_from_root, self) free_entry = target_dir.find_free_entry() or target_dir.chain_directory() free_entry.allocate_entry(first_cluster_id=free_cluster.id, entity_name=name, entity_extension=extension, entity_type=entity_type) return free_cluster, free_entry, target_dir def new_file(self, name: str, extension: str, path_from_root: Optional[List[str]]) -> None: free_cluster, free_entry, target_dir = self.allocate_object(name=name, extension=extension, entity_type=Directory.ATTR_ARCHIVE, path_from_root=path_from_root) file = File(name, fat=self.fat, extension=extension, fatfs_state=self.fatfs_state, entry=free_entry) file.first_cluster = free_cluster target_dir.entities.append(file) def new_directory(self, name, parent, path_from_root): # type: (str, Directory, Optional[List[str]]) -> None free_cluster, free_entry, target_dir = self.allocate_object(name=name, entity_type=Directory.ATTR_DIRECTORY, path_from_root=path_from_root) directory = Directory(name=name, fat=self.fat, parent=parent, fatfs_state=self.fatfs_state, entry=free_entry) directory.first_cluster = free_cluster directory.init_directory() target_dir.entities.append(directory) def write_to_file(self, path: List[str], content: str) -> None: """ Writes to file existing in the directory structure. :param path: path split into the list :param content: content as a string to be written into a file :returns: None :raises WriteDirectoryException: raised is the target object for writing is a directory """ entity_to_write = self.recursive_search(path, self) if isinstance(entity_to_write, File): clusters_cnt = required_clusters_count(cluster_size=self.fatfs_state.sector_size, content=content) self.fat.allocate_chain(entity_to_write.first_cluster, clusters_cnt) entity_to_write.write(content) else: raise WriteDirectoryException(f'`{os.path.join(*path)}` is a directory!')
42.172566
117
0.618193
1,129
9,531
4.977857
0.159433
0.057651
0.034164
0.013523
0.281139
0.21637
0.140569
0.140569
0.127046
0.112456
0
0.003474
0.305319
9,531
225
118
42.36
0.845341
0.135663
0
0.234568
0
0
0.013091
0.002841
0
0
0.001482
0
0
1
0.135802
false
0
0.04321
0.04321
0.314815
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e513cf10b1e10610d52f4966327c7e9e04459ede
385
py
Python
titanic/migrations/0002_passenger_name.py
azharmunir43/insightish
28c13b87ca1678c27de902b9633af4804d4e77cc
[ "Apache-2.0" ]
null
null
null
titanic/migrations/0002_passenger_name.py
azharmunir43/insightish
28c13b87ca1678c27de902b9633af4804d4e77cc
[ "Apache-2.0" ]
null
null
null
titanic/migrations/0002_passenger_name.py
azharmunir43/insightish
28c13b87ca1678c27de902b9633af4804d4e77cc
[ "Apache-2.0" ]
null
null
null
# Generated by Django 2.0 on 2018-03-07 12:12 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('titanic', '0001_initial'), ] operations = [ migrations.AddField( model_name='passenger', name='name', field=models.CharField(default='', max_length=200), ), ]
20.263158
63
0.587013
40
385
5.575
0.8
0
0
0
0
0
0
0
0
0
0
0.076923
0.290909
385
18
64
21.388889
0.739927
0.111688
0
0
1
0
0.094118
0
0
0
0
0
0
1
0
false
0.083333
0.083333
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
2
e514935f4c1b7392f48ec35e4356650b174def56
6,625
py
Python
addons14/calendar_base_booking/models/bookable_mixin.py
odoochain/addons_oca
55d456d798aebe16e49b4a6070765f206a8885ca
[ "MIT" ]
1
2021-06-10T14:59:13.000Z
2021-06-10T14:59:13.000Z
addons14/calendar_base_booking/models/bookable_mixin.py
odoochain/addons_oca
55d456d798aebe16e49b4a6070765f206a8885ca
[ "MIT" ]
null
null
null
addons14/calendar_base_booking/models/bookable_mixin.py
odoochain/addons_oca
55d456d798aebe16e49b4a6070765f206a8885ca
[ "MIT" ]
1
2021-04-09T09:44:44.000Z
2021-04-09T09:44:44.000Z
# Copyright 2020 Akretion (http://www.akretion.com). # @author Sébastien BEAU <sebastien.beau@akretion.com> # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). from collections import defaultdict from dateutil.relativedelta import relativedelta from odoo import _, fields, models from odoo.exceptions import UserError from odoo.osv import expression # Concept # open_slot is the range of time where the ressource can be book # available_slot is the range of time where the ressource is available for booking # booked_slot is a slot already booked # bookable_slot is a slot (with a size if slot_duration) that fit into # an available slot class BookableMixin(models.AbstractModel): _name = "bookable.mixin" _description = "Bookable Mixin" slot_duration = fields.Float() slot_capacity = fields.Integer() def _get_slot_duration(self): return self.slot_duration def _get_slot_capacity(self): return self.slot_capacity def _get_booked_slot(self, start, stop): domain = self._get_domain(start, stop) return self.env["calendar.event"].search( expression.AND([domain, [("booking_type", "=", "booked")]]) ) def _build_timeline_load(self, start, stop): timeline = defaultdict(int) timeline.update({start: 0, stop: 0}) for booked_slot in self._get_booked_slot(start, stop): if booked_slot.start < start: timeline[start] += 1 else: timeline[booked_slot.start] += 1 if booked_slot.stop < stop: timeline[booked_slot.stop] -= 1 timeline = list(timeline.items()) timeline.sort() return timeline def _get_available_slot(self, start, stop): load_timeline = self._build_timeline_load(start, stop) load = 0 slots = [] slot = None capacity = self._get_slot_capacity() for dt, load_delta in load_timeline: load += load_delta if not slot and load < capacity: slot = [dt, None] slots.append(slot) else: slot[1] = dt if load >= capacity: slot = None return slots def _prepare_bookable_slot(self, open_slot, start, stop): # If need you can inject extra information from the open_slot return {"start": start, "stop": stop} def _build_bookable_slot(self, open_slot, start, stop): bookable_slots = [] # now we have to care about datetime vs string delta = self._get_slot_duration() while True: slot_stop = start + relativedelta(minutes=delta) if slot_stop > stop: break bookable_slots.append( self._prepare_bookable_slot(open_slot, start, slot_stop) ) start += relativedelta(minutes=delta) return bookable_slots def get_open_slot(self, start, stop): domain = self._get_domain(start, stop) domain = expression.AND([domain, [("booking_type", "=", "bookable")]]) return self.env["calendar.event"].search(domain, order="start_date") def get_bookable_slot(self, start, stop): start = fields.Datetime.to_datetime(start) stop = fields.Datetime.to_datetime(stop) slots = [] for open_slot in self.get_open_slot(start, stop): for slot_start, slot_stop in self._get_available_slot( max(open_slot.start, start), min(open_slot.stop, stop) ): slots += self._build_bookable_slot(open_slot, slot_start, slot_stop) return slots def _get_domain_for_current_object(self): return [ ("res_model", "=", self._name), ("res_id", "=", self.id), ] def _get_domain(self, start, stop): # be carefull we need to search for every slot (bookable and booked) # that exist in the range start/stop # This mean that we need the slot # - started before and finishing in the range # - started and finished in the range # - started in the range and fisnish after # In an other expression it's # - all slot that start in the range # - all slot that finish in the range domain = self._get_domain_for_current_object() return expression.AND( [ domain, [ "|", "&", ("start", ">=", start), ("start", "<", stop), "&", ("stop", ">", start), ("stop", "<=", stop), ], ] ) def _check_load(self, start, stop): load_timeline = self._build_timeline_load(start, stop) capacity = self._get_slot_capacity() load = 0 for _dt, load_delta in load_timeline: load += load_delta if load > capacity: raise UserError(_("The slot is not available anymore")) def _prepare_booked_slot(self, vals): vals.update( { "res_model_id": self.env["ir.model"] .search([("model", "=", self._name)]) .id, "res_id": self.id, "booking_type": "booked", "start": fields.Datetime.to_datetime(vals["start"]), "stop": fields.Datetime.to_datetime(vals["stop"]), } ) return vals def _check_duration(self, start, stop): duration = (stop - start).total_seconds() / 60.0 if duration != self._get_slot_duration(): raise UserError(_("The slot duration is not valid")) def _check_on_open_slot(self, start, stop): domain = self._get_domain_for_current_object() domain = expression.AND( [ domain, [ ("start", "<=", start), ("stop", ">=", stop), ], ] ) open_slot = self.env["calendar.event"].search(domain) if not open_slot: raise UserError(_("The slot is not on a bookable zone")) def book_slot(self, vals): self.ensure_one() vals = self._prepare_booked_slot(vals) self._check_on_open_slot(vals["start"], vals["stop"]) self._check_duration(vals["start"], vals["stop"]) slot = self.env["calendar.event"].create(vals) self._check_load(vals["start"], vals["stop"]) return slot
34.505208
84
0.570717
764
6,625
4.730366
0.205497
0.059768
0.032374
0.02352
0.306586
0.226065
0.144162
0.111787
0.111787
0.079137
0
0.003801
0.324981
6,625
191
85
34.685864
0.804338
0.135849
0
0.180556
0
0
0.068724
0
0
0
0
0
0
1
0.111111
false
0
0.034722
0.027778
0.270833
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e514aa24f7ecd44a5165bc096f98b796fc4f0c6f
2,432
py
Python
mc/helpers.py
sprout42/binja-i8086
a0131f3c9a57572ae286029cd2c35ef40a7511d7
[ "0BSD" ]
13
2018-08-21T09:58:04.000Z
2021-08-21T16:40:23.000Z
mc/helpers.py
sprout42/binja-i8086
a0131f3c9a57572ae286029cd2c35ef40a7511d7
[ "0BSD" ]
1
2019-07-04T20:11:53.000Z
2019-09-04T19:52:51.000Z
mc/helpers.py
sprout42/binja-i8086
a0131f3c9a57572ae286029cd2c35ef40a7511d7
[ "0BSD" ]
1
2022-02-04T03:22:47.000Z
2022-02-04T03:22:47.000Z
from binaryninja import InstructionTextToken from binaryninja.enums import InstructionTextTokenType __all__ = ['fmt_dec', 'fmt_dec_sign', 'fmt_hex', 'fmt_hex2', 'fmt_hex4', 'fmt_hexW', 'fmt_hex_sign'] __all__ += ['fmt_imm', 'fmt_imm_sign', 'fmt_disp', 'fmt_code_rel', 'fmt_code_abs'] __all__ += ['token', 'asm'] def fmt_dec(value): return "{:d}".format(value) def fmt_dec_sign(value): return "{:+d}".format(value) def fmt_hex(value): return "{:#x}".format(value) def fmt_hex2(value): return "{:#02x}".format(value) def fmt_hex4(value): return "{:#04x}".format(value) def fmt_hexW(value, width): if width == 1: return fmt_hex2(value) elif width == 2: return fmt_hex4(value) else: raise ValueError('Invalid width {}'.format(width)) def fmt_hex_sign(value): return "{:+#x}".format(value) def fmt_imm(value): if value < 256: return fmt_dec(value) else: return fmt_hex(value) def fmt_imm_sign(value): if abs(value) < 256: return fmt_dec_sign(value) else: return fmt_hex_sign(value) def fmt_disp(value): return fmt_hex(value) def fmt_code_abs(value): return fmt_hex4(value) def fmt_code_rel(value): return fmt_hex_sign(value) def token(kind, text, *data): if kind == 'opcode': tokenType = InstructionTextTokenType.OpcodeToken elif kind == 'opsep': tokenType = InstructionTextTokenType.OperandSeparatorToken elif kind == 'instr': tokenType = InstructionTextTokenType.InstructionToken elif kind == 'text': tokenType = InstructionTextTokenType.TextToken elif kind == 'reg': tokenType = InstructionTextTokenType.RegisterToken elif kind == 'int': tokenType = InstructionTextTokenType.IntegerToken elif kind == 'addr': tokenType = InstructionTextTokenType.PossibleAddressToken elif kind == 'codeRelAddr': tokenType = InstructionTextTokenType.CodeRelativeAddressToken elif kind == 'beginMem': tokenType = InstructionTextTokenType.BeginMemoryOperandToken elif kind == 'endMem': tokenType = InstructionTextTokenType.EndMemoryOperandToken else: raise ValueError('Invalid token kind {}'.format(kind)) return InstructionTextToken(tokenType, text, *data) def asm(*parts): tokens = [] for part in parts: tokens.append(token(*part)) return tokens
27.636364
82
0.672286
274
2,432
5.755474
0.248175
0.045656
0.069753
0.06468
0.169943
0.133164
0.073557
0
0
0
0
0.009875
0.208882
2,432
87
83
27.954023
0.809771
0
0
0.142857
0
0
0.101563
0
0
0
0
0
0
1
0.2
false
0
0.028571
0.128571
0.471429
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
0
0
0
1
e516cc75de61891aa46b50bf8464eedef233e194
148
py
Python
beerlists/__init__.py
dmofot/beerlists
deb2ab2416b63a8cb9fe9115b6650fdaa7ba36ca
[ "MIT" ]
null
null
null
beerlists/__init__.py
dmofot/beerlists
deb2ab2416b63a8cb9fe9115b6650fdaa7ba36ca
[ "MIT" ]
2
2017-06-21T17:33:24.000Z
2021-11-15T17:46:49.000Z
beerlists/__init__.py
dmofot/beerlists
deb2ab2416b63a8cb9fe9115b6650fdaa7ba36ca
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """Top-level package for beerlists.""" __author__ = """David Todd""" __email__ = 'dmofot@gmail.com' __version__ = '0.9.0'
18.5
38
0.628378
19
148
4.263158
0.947368
0
0
0
0
0
0
0
0
0
0
0.031496
0.141892
148
7
39
21.142857
0.606299
0.371622
0
0
0
0
0.356322
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
e517fa480acd67dfee5f3aaa95a82cf7997e2c8a
6,551
py
Python
layers/modules/precision_loss.py
laycoding/ssd.pytorch
6b9263d9d59e348398335dc91d59af658f2e8d35
[ "MIT" ]
null
null
null
layers/modules/precision_loss.py
laycoding/ssd.pytorch
6b9263d9d59e348398335dc91d59af658f2e8d35
[ "MIT" ]
null
null
null
layers/modules/precision_loss.py
laycoding/ssd.pytorch
6b9263d9d59e348398335dc91d59af658f2e8d35
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable from data import coco as cfg from ..box_utils import match, log_sum_exp, decode, nms class PrecisionLoss(nn.Module): """SSD Weighted Loss Function Compute Targets: 1) Produce Confidence Target Indices by matching ground truth boxes with (default) 'priorboxes' that have jaccard index > threshold parameter (default threshold: 0.5). 2) Produce localization target by 'encoding' variance into offsets of ground truth boxes and their matched 'priorboxes'. 3) Hard negative mining to filter the excessive number of negative examples that comes with using a large number of default bounding boxes. (default negative:positive ratio 3:1) Objective Loss: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss weighted by α which is set to 1 by cross val. Args: c: class confidences, l: predicted boxes, g: ground truth boxes N: number of matched default boxes See: https://arxiv.org/pdf/1512.02325.pdf for more details. """ def __init__(self, num_classes, overlap_thresh, prior_for_matching, bkg_label, top_k, encode_target, nms_thresh, conf_thresh, use_gpu=True): super(PrecisionLoss, self).__init__() self.use_gpu = use_gpu self.num_classes = num_classes self.threshold = overlap_thresh self.background_label = bkg_label self.encode_target = encode_target self.use_prior_for_matching = prior_for_matching self.variance = cfg['variance'] self.top_k = top_k if nms_thresh <= 0: raise ValueError('nms_threshold must be non negative.') self.nms_thresh = nms_thresh self.softmax = nn.Softmax(dim=-1) self.conf_thresh = conf_thresh def forward(self, predictions, targets): """Multibox Loss Args: predictions (tuple): A tuple containing loc preds, conf preds, and prior boxes from SSD net. conf shape: torch.size(batch_size,num_priors,num_classes) loc shape: torch.size(batch_size,num_priors,4) priors shape: torch.size(num_priors,4) targets (tensor): Ground truth boxes and labels for a batch, shape: [batch_size,num_objs,5] (last idx is the label). """ loc_data, conf_data, priors = predictions # torch.save(loc_data, 'inter/loc_data.pt') # torch.save(conf_data, 'inter/conf_data.pt') # torch.save(priors, 'inter/priors.pt') # torch.save(targets, 'inter/targets.pt') num = loc_data.size(0) priors = priors[:loc_data.size(1), :] # confused here, why stuck at loc_data size 1 num_priors = (priors.size(0)) # prior_data = priors.view(1, num_priors, 4) # print(prior_data.size()) num_classes = self.num_classes # match priors (default boxes) and ground truth boxes loc_t = torch.Tensor(num, num_priors, 4) # [num, num_priors, 4] conf_t = torch.LongTensor(num, num_priors) # [num_priors] top class label for each prior for idx in range(num): truths = targets[idx][:, :-1].data labels = targets[idx][:, -1].data defaults = priors.data match(self.threshold, truths, defaults, self.variance, labels, loc_t, conf_t, idx) if self.use_gpu: loc_t = loc_t.cuda() conf_t = conf_t.cuda() # wrap targets loc_t = Variable(loc_t, requires_grad=False) conf_t = Variable(conf_t, requires_grad=False) conf_preds = self.softmax(conf_data.view(num, num_priors, self.num_classes)) # print(conf_preds.max()) 0.98 conf_preds_trans = conf_preds.transpose(2,1) # [num, num_classes, num_priors] conf_p = torch.zeros(num, num_priors, num_classes).cuda() # [num, num_priors, num_classes] loc_p = torch.zeros(num, num_priors, 4).cuda() # Decode predictions into bboxes for i in range(num): decoded_boxes = decode(loc_data[i], priors, self.variance) # For each class, perform nms conf_scores = conf_preds_trans[i].clone() for cl in range(1, self.num_classes): c_mask = conf_scores[cl].gt(self.conf_thresh) scores = conf_scores[cl][c_mask] if scores.size(0) == 0: continue # fliter low conf predictions l_mask = c_mask.unsqueeze(1).expand_as(decoded_boxes) boxes = Variable(decoded_boxes[l_mask].view(-1, 4), requires_grad=False) # idx of highest scoring and non-overlapping boxes per class # boxes [num_priors(has been flitered), 4] location preds for i'th image ids, count = nms(boxes, scores, self.nms_thresh, self.top_k) conf_p[i, c_mask, cl] = conf_preds[i, c_mask, cl] # [num, num_priors, num_classes] loc_p[i, l_mask[:,0].nonzero()[ids][:count]] = loc_data[i, l_mask[:,0].nonzero()[ids][:count]] # [num, num_priors, 4] # check each result if match the ground truth effect_conf = conf_p.sum(2) != 0 effect_conf_idx = effect_conf.unsqueeze(2).expand_as(conf_p) effect_loc_idx = effect_conf.unsqueeze(2).expand_as(loc_t) # [num, num_priors, num_classes] binary metric, thousands will be True in million # torch.save(conf_preds, 'inter/conf_preds.pt') # torch.save(effect_conf, 'inter/effect_conf.pt') # torch.save(effect_loc, 'inter/effect_loc.pt') # torch.save(conf_p, 'inter/conf_p.pt') # torch.save(conf_t, 'inter/conf_t.pt') # torch.save(effect_conf, 'inter/effect_conf.pt') loss_c = F.cross_entropy(conf_p[effect_conf_idx].view(-1, num_classes), conf_t[effect_conf].view(-1), size_average=False) loss_l = F.smooth_l1_loss(loc_p[effect_loc_idx], loc_t[effect_loc_idx], size_average=False) # conf_p [num*num_p, num_classes] conf_t [num*num_p, 1(label)] N = effect_conf_idx.data.sum() loss_l /= N.float() loss_c /= N.float() return loss_l, loss_c
47.471014
133
0.615479
909
6,551
4.226623
0.247525
0.042166
0.031234
0.024727
0.112441
0.089276
0.077564
0.019781
0.019781
0
0
0.012343
0.282705
6,551
137
134
47.817518
0.805278
0.412914
0
0
0
0
0.011778
0
0
0
0
0
0
1
0.028571
false
0
0.085714
0
0.142857
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e51832dae171e962185a92eae9c69298fd915ba4
545
py
Python
BufferOverflow3/p1.py
Mitsububunu/picoCTF_Code
69bc2fda655f68d619d559d8ebcac3f3002e1e9b
[ "MIT" ]
null
null
null
BufferOverflow3/p1.py
Mitsububunu/picoCTF_Code
69bc2fda655f68d619d559d8ebcac3f3002e1e9b
[ "MIT" ]
null
null
null
BufferOverflow3/p1.py
Mitsububunu/picoCTF_Code
69bc2fda655f68d619d559d8ebcac3f3002e1e9b
[ "MIT" ]
null
null
null
#!/usr/bin/env python from pwn import * debug = 0 user = 'mitsububunu' pw = 'password' if debug: p = process('./vuln') else: s = ssh(host = '2018shell.picoctf.com', user=user, password=pw) s.set_working_directory('/problems/buffer-overflow-3_1_2e6726e5326a80f8f5a9c350284e6c7f') p = s.process('./vuln') binary = ELF('./vuln') canary = '4xV,' print p.recvuntil('>') p.sendline('300') print p.recvuntil('>') p.sendline('AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' + canary + 'AAAABBBBCCCCDDDD' + p32(binary.symbols['win'])) print p.recvall()
20.961538
105
0.695413
69
545
5.434783
0.681159
0.048
0.08
0.085333
0.128
0
0
0
0
0
0
0.073222
0.122936
545
26
106
20.961538
0.711297
0.036697
0
0.117647
0
0
0.342857
0.219048
0
0
0
0
0
0
null
null
0.117647
0.058824
null
null
0.176471
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
1
0
0
0
0
0
2
e518de7808c648780c618b4d314bc02b24cf5ad5
197
py
Python
urbanairship/reports/__init__.py
kaellis/airship-python-library
d583138a1c12442020a3ae93d35817624ed6afb5
[ "Apache-2.0" ]
null
null
null
urbanairship/reports/__init__.py
kaellis/airship-python-library
d583138a1c12442020a3ae93d35817624ed6afb5
[ "Apache-2.0" ]
null
null
null
urbanairship/reports/__init__.py
kaellis/airship-python-library
d583138a1c12442020a3ae93d35817624ed6afb5
[ "Apache-2.0" ]
null
null
null
from .reports import ( IndividualResponseStats, ResponseList, DevicesReport, OptInList, OptOutList, PushList, ResponseReportList, AppOpensList, TimeInAppList, )
16.416667
28
0.685279
12
197
11.25
1
0
0
0
0
0
0
0
0
0
0
0
0.253807
197
11
29
17.909091
0.918367
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.090909
0
0.090909
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
3
e51b72feb934b5d22f70cc68b5686a57177d55f4
18,195
py
Python
umbrella-sampling_1D_reweight_bs.py
cbatton/umbrella_sampling_pymbar
9133c0079afe916da5f11051052828b67288efb3
[ "MIT" ]
null
null
null
umbrella-sampling_1D_reweight_bs.py
cbatton/umbrella_sampling_pymbar
9133c0079afe916da5f11051052828b67288efb3
[ "MIT" ]
null
null
null
umbrella-sampling_1D_reweight_bs.py
cbatton/umbrella_sampling_pymbar
9133c0079afe916da5f11051052828b67288efb3
[ "MIT" ]
null
null
null
# Example illustrating the application of MBAR to compute a 1D PMF from an umbrella sampling simulation. # # The data represents an umbrella sampling simulation for the magnetization of the Ising model # Adapted from one of the pymbar example scripts for 1D PMFs import numpy as np # numerical array library import pymbar # multistate Bennett acceptance ratio import os from pymbar import timeseries # timeseries analysis from pymbar.utils import logsumexp from glob import glob from matplotlib.ticker import AutoMinorLocator from scipy.optimize import brentq import scipy.signal as signal from scipy.signal import savgol_filter kB = 1.0 # Boltzmann constant # Parameters temperature = 3.0 # assume a single temperature -- can be overridden with data from param file N_max = 50000 # maximum number of snapshots/simulation N_max_ref = 50000 # maximum number of snapshots/simulation folders_top = glob("*/") # total number of temperatures folders_1 = [] curdir = os.getcwd() for i in range(len(folders_top)): os.chdir(curdir+'/'+folders_top[i]) folders_bottom = glob("*/") for j in range(len(folders_bottom)): os.chdir(curdir+'/'+folders_top[i]+'/'+folders_bottom[j]) folders_1.append(os.getcwd()) os.chdir(curdir) K = len(folders_1) T_k = np.ones(K,float)*temperature # inital temperatures are all equal beta = 1.0 / (kB * temperature) # inverse temperature of simulations mag_min = -1580 # min for magnetization mag_max = 1580 # max for magnetization mag_nbins = 395 # number of bins for magnetization # Need to delete ext terms # Allocate storage for simulation data N_max = 50000 N_k = np.zeros([K], np.int32) # N_k[k] is the number of snapshots from umbrella simulation k K_k = np.zeros([K], np.float64) # K_1_k[k] is the spring constant 1 for umbrella simulation k mu_k = np.zeros([K], np.float64) # mu_k[k] is the chemical potential for umbrella simulation k mag0_k = np.zeros([K], np.float64) # mag0_k[k] is the spring center location for umbrella simulation k mag_kn = np.zeros([K,N_max], np.float64) # mag_kn[k,n] is the magnetization for snapshot n from umbrella simulation k u_kn = np.zeros([K,N_max], np.float64) # u_kn[k,n] is the reduced potential energy without umbrella restraints of snapshot n of umbrella simulation k g_k = np.zeros([K],np.float32); # Read in umbrella spring constants and centers. # Go through directories and read umbrella_index = 0 for i in range(K): infile = open(folders_1[i]+'/param') for line in infile: line_strip = line.strip() if line_strip.startswith('harmon'): print(line_strip) line_split = line_strip.split()[1] K_k[i] = float(line_split) if line_strip.startswith('window'): print(line_strip) line_split = line_strip.split()[1] mag0_k[i] = float(line_split) if line_strip.startswith('T'): print(line_strip) line_split = line_strip.split()[1] T_k[i] = float(line_split) if line_strip.startswith('h_external'): print(line_strip) line_split = line_strip.split()[1] mu_k[i] = float(line_split) beta_k = 1.0/(kB*T_k) # beta factor for the different temperatures print(beta_k) print(mu_k) if (np.min(T_k) == np.max(T_k)): DifferentTemperatures = False # if all the temperatures are the same, then we don't have to read in energies. # Read the simulation data for i in range(K): k = i string_base = folders_1[i] # Read magnetization data. filename_mag = string_base+'/mbar_data.txt' print("Reading %s..." % filename_mag) infile = open(filename_mag, 'r') lines = infile.readlines() infile.close() # Parse data. n = 0 for line in lines: tokens = line.split() mag = float(tokens[2]) # Magnetization u_kn[k,n] = float(tokens[1]) - float(tokens[0]) + mu_k[k]*mag # reduced potential energy without umbrella restraint and external field mag_kn[k,n] = mag n += 1 N_k[k] = n # Compute correlation times for potential energy and magnetization # timeseries. If the temperatures differ, use energies to determine samples; otherwise, magnetization g_k[k] = timeseries.statisticalInefficiency(mag_kn[k,0:N_k[k]]) print("Correlation time for set %5d is %10.3f" % (k,g_k[k])) indices = timeseries.subsampleCorrelatedData(mag_kn[k,0:N_k[k]], g=g_k[k]) # Subsample data. N_k[k] = len(indices) u_kn[k,0:N_k[k]] = u_kn[k,indices] mag_kn[k,0:N_k[k]] = mag_kn[k,indices] N_max = np.max(N_k) # shorten the array size # At this point, start diverting from the usual path and allow a method that allows us to perform blocking/bootstrapping analysis mag_n = mag_kn[0,0:N_k[0]] # mag_n[k] is the magnetization from some simulation snapshot u_n = u_kn[0,0:N_k[0]] # u_n[k] is the potential energy from some snapshot that has mag value mag_n[k] # Now append values allN = N_k.sum() for k in range(1,K): mag_n = np.append(mag_n, mag_kn[k,0:N_k[k]]) u_n = np.append(u_n, u_kn[k,0:N_k[k]]) # Bootstrap time N_bs = 20 # number of bootstrap samples N_bs_start = 0 # index to start with outputs np.random.seed(0) # Some variable to skip output # mbar_ref = [] mbar_count = 0 for N_ in range(N_bs_start,N_bs_start+N_bs): print("Iteration %d" % (N_)) f_bs = open('mbar_'+str(N_)+'.txt', 'w') print("Iteration %d" % (N_), file=f_bs) # Select random samples g_reduction = 50 N_red = np.random.randint(allN, size=allN//g_reduction) N_red = np.sort(N_red) N_k_red = np.zeros([K], np.int32) N_cumsum = np.cumsum(N_k) N_cumsum = np.hstack((np.array([0]), N_cumsum)) # Determine N_k_red by binning for i in range(K): N_bin = (N_cumsum[i] <= N_red[:]) & (N_red[:] < N_cumsum[i+1]) N_k_red[i] = N_bin.sum() u_n_red = u_n[N_red] mag_n_red = mag_n[N_red] u_kn_red = np.zeros((K, allN//g_reduction)) for k in range(K): # Compute from umbrella center k dmag = mag_n_red[:] - mag0_k[k] # Compute energy of samples with respect to umbrella potential k u_kn_red[k,:] = beta_k[k]*(u_n_red[:] + (K_k[k]/2.0) * (dmag/1575.0)**2 - mu_k[k]*mag_n_red[:]) # Construct magnetization bins print("Binning data...", file=f_bs) delta_mag = (mag_max - mag_min) / float(mag_nbins) # compute bin centers bin_center_i_mag = np.zeros([mag_nbins], np.float64) for i in range(mag_nbins): bin_center_i_mag[i] = mag_min + delta_mag/2 + delta_mag * i # Bin data bin_n = np.zeros([allN//g_reduction], np.int64)+mag_nbins+10 nbins = 0 bin_counts = list() bin_centers = list() # bin_centers[i] is a tuple that gives the center of bin i for j in range(mag_nbins): # Determine which configurations lie in this bin in_bin = (bin_center_i_mag[j]-delta_mag/2 <= mag_n_red[:]) & (mag_n_red[:] < bin_center_i_mag[j]+delta_mag/2) # Count number of configurations in this bin bin_count = in_bin.sum() if (bin_count > 0): # store bin bin_centers.append(bin_center_i_mag[j]) bin_counts.append( bin_count ) # assign these conformations to the bin index bin_n[np.where(in_bin)[0]] = nbins # increment number of bins nbins += 1 # Get total number of things that were binned bin_counts_np = np.array(bin_counts) bin_count_total = bin_counts_np.sum() bin_count_ideal = allN # Make array with total combinations of bin_center_i_mag and bin_center_i_mag bin_center_possible = np.zeros((mag_nbins,1)) bin_center_empty = np.zeros((mag_nbins,1)) for i in range(mag_nbins): bin_center_possible[i] = bin_center_i_mag[i] # Determine empty bins for i in range(nbins): for k in range(mag_nbins): if((bin_centers[i] == bin_center_i_mag[k])): bin_center_empty[k] = 1 print("%d bins were populated:" % nbins, file=f_bs) for i in range(nbins): print("bin %5d (%6.5f) %12d conformations" % (i, bin_centers[i], bin_counts[i]), file=f_bs) print("%d empty bins" % (mag_nbins-nbins), file=f_bs) for j in range(mag_nbins): if(bin_center_empty[j] == 0): print("bin (%6.5f)" % (bin_center_possible[j]), file=f_bs) print("%d / %d data used" % (bin_count_total, bin_count_ideal), file=f_bs) # Initialize MBAR. print("Running MBAR...", file=f_bs) if(mbar_count == 0): mbar = pymbar.MBAR(u_kn_red, N_k_red, verbose = True, relative_tolerance=1e-10) mbar_ref = mbar.f_k mbar_count = mbar_count+1 else: mbar = pymbar.MBAR(u_kn_red, N_k_red, verbose = True, relative_tolerance=1e-10, initial_f_k=mbar_ref) print('At reweighting step', file=f_bs) # Now have weights, time to have some fun reweighting u_n_red_original = u_n_red.copy() T_targets_low = np.linspace(2.0,3.0,26) T_targets_high = np.linspace(3.025, 3.7, 28) T_targets = np.hstack((T_targets_low, T_targets_high)) low_comp_storage = np.zeros(T_targets.shape) high_comp_storage = np.zeros(T_targets.shape) mu_1_storage = np.zeros(T_targets.shape) mu_2_storage = np.zeros(T_targets.shape) mu_storage = np.zeros(T_targets.shape) # Compute PMF in unbiased potential (in units of kT) at kT = 1 (f_i, df_i) = mbar.computePMF(u_n_red, bin_n, nbins) # Show free energy and uncertainty of each occupied bin relative to lowest free energy print("1D PMF", file=f_bs) print("", file=f_bs) print("%8s %6s %8s %10s %10s" % ('bin', 'mass', 'N', 'f', 'df'), file=f_bs) for i in range(nbins): print('%8d %10.8e %8d %10.10e %10.10e' % (i, bin_centers[i], bin_counts[i], f_i[i], df_i[i]), file=f_bs) # Write out PMF to file f_ = open('free_energy_'+str(mag_nbins)+'_original_'+str(N_)+'.txt', 'w') print("PMF (in units of kT)", file=f_) print("%8s %6s %8s %10s %10s" % ('bin', 'mass', 'N', 'f', 'df'), file=f_) for i in range(nbins): print('%8d %10.8g %8d %16.16e %16.16e' % (i, bin_centers[i], bin_counts[i], f_i[i], df_i[i]), file=f_) f_.close() for j in range(len(T_targets)): print("Reweighting at temperature "+str(T_targets[j]), file=f_bs) # reweight to temperature of interest u_n_red = u_n_red_original.copy() beta_reweight = 1.0/(kB*T_targets[j]) # beta factor for the different temperatures u_n_red = beta_reweight*u_n_red # Compute PMF in unbiased potential (in units of kT) at kT = 1 (f_i_base, df_i_base) = mbar.computePMF(u_n_red, bin_n, nbins) mu_low = -1.0 mu_high = 1.0 # Now have mu_low and mu_high, use a bounded method to find mu which causes # f_i(comp_low) \approx f_i(comp_high) # let's use scipy's minimize_scalar solver for this # Have to define a function that we want to operate on def free_diff_comp(mu, f_i_base, bin_centers, beta_reweight): f_i = f_i_base - beta_reweight*mu*bin_centers mid_comp = int(3.0*nbins/4.0) f_i_low_comp = f_i[0:mid_comp].min() f_i_high_comp = f_i[mid_comp:nbins].min() return f_i_high_comp-f_i_low_comp print("", file=f_bs) print("Finding mu_eq_1", file=f_bs) # Find minimum mu_eq_1 = brentq(free_diff_comp, a=mu_low, b=mu_high, args=(f_i_base, np.array(bin_centers), beta_reweight)) mu_1_storage[j] = mu_eq_1 print("mu_eq_1 %17.17e"%(mu_eq_1), file=f_bs) print("", file=f_bs) # Now output results # Reweight to mu_eq f_i = f_i_base.copy() f_i = f_i - beta_reweight*mu_eq_1*np.array(bin_centers) f_i -= f_i.min() # Show free energy and uncertainty of each occupied bin relative to lowest free energy print("1D PMF with mu_eq_1", file=f_bs) print("", file=f_bs) print("%8s %6s %8s %10s" % ('bin', 'mass', 'N', 'f'), file=f_bs) for i in range(nbins): print('%8d %10.8g %8d %10.8e' % (i, bin_centers[i], bin_counts[i], f_i[i]), file=f_bs) f_ = open('mu_eq_1_'+str(mag_nbins)+'_'+str(T_targets[j])+'_'+str(N_)+'.txt', 'w') print("%17.17e"%(mu_eq_1), file=f_) f_.close() # Write out PMF to file f_ = open('pmf_eq_1_'+str(mag_nbins)+'_'+str(T_targets[j])+'_'+str(N_)+'.txt', 'w') print("PMF with mu_eq_1 (in units of kT)", file=f_) print("%8s %6s %8s %10s" % ('bin', 'mass', 'N', 'f'), file=f_) for i in range(nbins): print('%8d %10.8g %8d %16.16e' % (i, bin_centers[i], bin_counts[i], f_i[i]), file=f_) f_.close() # Write out probability to file p_i=np.exp(-f_i-logsumexp(-f_i)) f_ = open('p_i_eq_1_'+str(mag_nbins)+'_'+str(T_targets[j])+'_'+str(N_)+'.txt', 'w') print("PMF with mu_eq_1 (in units of kT)", file=f_) print("%8s %6s %8s %10s" % ('bin', 'mass', 'N', 'p'), file=f_) for i in range(nbins): print('%8d %10.8g %8d %16.16e' % (i, bin_centers[i], bin_counts[i], p_i[i]), file=f_) f_.close() # Now do it such that areas under peaks are the same def free_diff_comp_area(mu, f_i_base, nbins, bin_centers, beta_reweight): f_i = f_i_base - beta_reweight*mu*bin_centers p_i=np.exp(-f_i-logsumexp(-f_i)) # Determine mid_comp # Filter f_i to determine where to divide peak f_i_filter = savgol_filter(f_i, window_length=41, polyorder=3) f_i_filter_2 = savgol_filter(f_i_filter, window_length=41, polyorder=3) rel_max = signal.argrelmax(f_i_filter_2, order=10) # print rel_max npeak = nbins//2 if(len(rel_max[0]) == 0): npeak = nbins//2 else: npeak = signal.argrelmax(f_i_filter_2, order=10)[0].max() # As bin size is equal for now, can just do naive sum as equivalent to # midpoint rule barring a constant factor low_area = np.trapz(p_i[0:npeak], x = bin_centers[0:npeak]) high_area = np.trapz(p_i[npeak:nbins], x = bin_centers[npeak:nbins]) return high_area-low_area print("", file=f_bs) print("Finding mu_eq_2", file=f_bs) # Find minimum mu_eq_2 = brentq(free_diff_comp_area, a=mu_eq_1-0.05, b=mu_high+0.05, args=(f_i_base, nbins, np.array(bin_centers), beta_reweight)) mu_2_storage[j] = mu_eq_2 print("mu_eq_2 %17.17e"%(mu_eq_2), file=f_bs) print("", file=f_bs) # Now output results # Reweight to mu_eq f_i = f_i_base.copy() f_i = f_i - beta_reweight*mu_eq_2*np.array(bin_centers) f_i -= f_i.min() # Show free energy and uncertainty of each occupied bin relative to lowest free energy print("1D PMF with mu_eq_2", file=f_bs) print("", file=f_bs) print("%8s %6s %8s %10s %10s" % ('bin', 'mass', 'N', 'f', 'df'), file=f_bs) for i in range(nbins): print('%8d %10.8g %8d %10.8e %10.8e' % (i, bin_centers[i], bin_counts[i], f_i[i], df_i[i]), file=f_bs) f_ = open('mu_eq_2_'+str(mag_nbins)+'_'+str(T_targets[j])+'_'+str(N_)+'.txt', 'w') print("%17.17e"%(mu_eq_2), file=f_) f_.close() # Write out PMF to file f_ = open('pmf_eq_2_'+str(mag_nbins)+'_'+str(T_targets[j])+'_'+str(N_)+'.txt', 'w') print("PMF with mu_eq_2 (in units of kT)", file=f_) print("%8s %6s %8s %10s %10s" % ('bin', 'mass', 'N', 'f', 'df'), file=f_) for i in range(nbins): print('%8d %10.8g %8d %16.16e %16.16e' % (i, bin_centers[i], bin_counts[i], f_i[i], df_i[i]), file=f_) f_.close() # Get compositions p_i=np.exp(-f_i-logsumexp(-f_i)) f_ = open('p_i_eq_2_'+str(mag_nbins)+'_'+str(T_targets[j])+'_'+str(N_)+'.txt', 'w') print("PMF with mu_eq_1 (in units of kT)", file=f_) print("%8s %6s %8s %10s" % ('bin', 'mass', 'N', 'p'), file=f_) for i in range(nbins): print('%8d %10.8g %8d %16.16e' % (i, bin_centers[i], bin_counts[i], p_i[i]), file=f_) f_.close() # Determine mid_comp f_i_filter = savgol_filter(f_i, window_length=41, polyorder=3) f_i_filter_2 = savgol_filter(f_i_filter, window_length=41, polyorder=3) rel_max = signal.argrelmax(f_i_filter_2, order=10) npeak = nbins//2 if(len(rel_max[0]) == 0): npeak = nbins//2 print('Weird divergence at %8d' % (j), file=f_bs) else: npeak = signal.argrelmax(f_i_filter_2, order=10)[0].max() bin_centers_np = np.array(bin_centers) p_i_mass = bin_centers_np*p_i mass_avg = p_i_mass.sum() bin_closest = np.abs(bin_centers-mass_avg) print("mass_avg %17.17e"%(mass_avg)) # Now get entry that is closest to value mid_comp = np.argmin(bin_closest) mid_comp = npeak # Take low_comp = p_i_mass[0:mid_comp].sum()/p_i[0:mid_comp].sum() high_comp = p_i_mass[mid_comp:nbins].sum()/p_i[mid_comp:nbins].sum() print(low_comp, high_comp, T_targets[j]) low_comp_storage[j] = low_comp/1575.0 high_comp_storage[j] = high_comp/1575.0 f_ = open('composition_reweight_'+str(N_)+'.txt', 'w') print('T phi_low phi_high', end=' ', file=f_) print("%10s %10s %10s" % ('T', 'phi_low', 'phi_high'), file=f_) for i in range(len(T_targets)): print('%16.16e %16.16e %16.16e' % (T_targets[i], low_comp_storage[i], high_comp_storage[i]), file=f_) f_.close() f_ = open('mu_reweight'+str(N_)+'.txt', 'w') print("%10s %10s %10s" % ('T', 'mu_peaks', 'mu_area'), file=f_) for i in range(len(T_targets)): print('%16.16e %16.16e %16.16e' % (T_targets[i], mu_1_storage[i], mu_2_storage[i]), file=f_) f_.close() f_bs.close()
42.912736
149
0.62061
3,040
18,195
3.468092
0.129605
0.026558
0.020582
0.01878
0.447975
0.399886
0.355971
0.319738
0.288058
0.257043
0
0.032523
0.242924
18,195
423
150
43.014184
0.732849
0.216928
0
0.277228
0
0
0.0947
0.001484
0
0
0
0
0
1
0.006601
false
0
0.033003
0
0.046205
0.211221
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e51be0d0735b03c3a1c680ed0f0bc082a49173dd
241
py
Python
dictionary.py
Zackyy1/ehitusepood
130157f36be81523e65879e7fc08940c55a4dfa7
[ "MIT" ]
null
null
null
dictionary.py
Zackyy1/ehitusepood
130157f36be81523e65879e7fc08940c55a4dfa7
[ "MIT" ]
null
null
null
dictionary.py
Zackyy1/ehitusepood
130157f36be81523e65879e7fc08940c55a4dfa7
[ "MIT" ]
null
null
null
texts = { "browse":"🗂️ Browse categories", "orders":"📥 My orders", "cart":"🛒 My cart", "settings":"⚙ Settings", "contact":"📞 Contact us", "home":"🏠 Home", "contact1":"{Store_name} - {store_phone}", }
20.083333
47
0.497925
28
241
4.464286
0.714286
0
0
0
0
0
0
0
0
0
0
0.00565
0.26556
241
12
48
20.083333
0.661017
0
0
0
0
0
0.601732
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
e51c14edee81c07664d02c94e1ae537e4d6867aa
396
py
Python
django_coverage_plugin/__init__.py
joshuadavidthomas/django_coverage_plugin
c4ec0691906dc0923c494efc9c9236d3aa21be73
[ "Apache-2.0" ]
172
2015-01-03T20:26:42.000Z
2022-02-18T20:38:59.000Z
django_coverage_plugin/__init__.py
joshuadavidthomas/django_coverage_plugin
c4ec0691906dc0923c494efc9c9236d3aa21be73
[ "Apache-2.0" ]
71
2015-01-17T19:22:53.000Z
2022-02-03T09:09:35.000Z
django_coverage_plugin/__init__.py
joshuadavidthomas/django_coverage_plugin
c4ec0691906dc0923c494efc9c9236d3aa21be73
[ "Apache-2.0" ]
31
2015-01-18T14:32:46.000Z
2022-02-18T20:39:07.000Z
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/django_coverage_plugin/blob/master/NOTICE.txt """Django Template Coverage Plugin""" from .plugin import DjangoTemplatePluginException # noqa from .plugin import DjangoTemplatePlugin def coverage_init(reg, options): reg.add_file_tracer(DjangoTemplatePlugin(options))
33
86
0.800505
50
396
6.24
0.74
0.089744
0.102564
0
0
0
0
0
0
0
0
0.005587
0.09596
396
11
87
36
0.865922
0.505051
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.5
0
0.75
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
1
0
0
3
e51e619a4db0befa87c8923ad6fae00488c53cd0
3,006
py
Python
src/nucleotide/component/windows/translator.py
dmilos/nucleotide
aad5d60508c9e4baf4888069284f2cb5c9fd7c55
[ "Apache-2.0" ]
1
2020-09-04T13:00:04.000Z
2020-09-04T13:00:04.000Z
src/nucleotide/component/windows/translator.py
dmilos/nucleotide
aad5d60508c9e4baf4888069284f2cb5c9fd7c55
[ "Apache-2.0" ]
1
2020-04-10T01:52:32.000Z
2020-04-10T09:11:29.000Z
src/nucleotide/component/windows/translator.py
dmilos/nucleotide
aad5d60508c9e4baf4888069284f2cb5c9fd7c55
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python2 # Copyright 2015 Dejan D. M. Milosavljevic # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import subprocess import platform import os import nucleotide import nucleotide.component import nucleotide.component.windows import nucleotide.component.windows._common import nucleotide.component.windows._common.translator import nucleotide.component.windows.mingw import nucleotide.component.windows.mingw.translator import nucleotide.component.windows.msvc import nucleotide.component.windows.msvc.translator import nucleotide.component.windows.cygwingcc import nucleotide.component.windows.cygwingcc.translator ## Detect MinGW on Windows class Translator: m_list = [] def __init__(self): self.m_list = [] if( False == Translator._detect() ): return I__common = nucleotide.component.windows._common.translator.Translator() self.m_list += I__common.get() I_mingw = nucleotide.component.windows.mingw.translator.Translator() self.m_list += I_mingw.get() if( 'Windows' == platform.system() ): I_msvc = nucleotide.component.windows.msvc.translator.Translator() self.m_list += I_msvc.get() if( 'CYGWIN_NT' in platform.system() ): I_cygwin = nucleotide.component.windows.mingw.translator.Translator() self.m_list += I_cygwin.get() def get(self): return self.m_list def check(self): pass @staticmethod def extend(P_options): if( False == Translator._detect() ): return nucleotide.component.windows._common.translator.Translator.extend(P_options) nucleotide.component.windows.mingw.translator.Translator.extend(P_options) if( 'Windows' == platform.system() ): nucleotide.component.windows.msvc.translator.Translator.extend(P_options) if( 'CYGWIN_NT' in platform.system() ): nucleotide.component.windows.cygwingcc.translator.Translator.extend(P_options) @staticmethod def _detect(): if( 'Windows' == platform.system() ): #print( "Platform: " + platform.system() ) return True if( 'CYGWIN_NT' in platform.system() ): #print( "Platform: " + platform.system() ) return True print( "Unknown Platform: " + platform.system() ) return False
33.4
91
0.664338
339
3,006
5.784661
0.306785
0.174401
0.225395
0.146864
0.556859
0.302397
0.114227
0.114227
0.062213
0.062213
0
0.003935
0.239188
3,006
89
92
33.775281
0.85352
0.236194
0
0.269231
0
0
0.030151
0
0
0
0
0
0
1
0.096154
false
0.019231
0.269231
0.019231
0.519231
0.019231
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
2
e51e71629e6870db5d4127796afc6d44a91db669
1,511
py
Python
module1-introduction-to-sql/321_assignment_notes.py
Edudeiko/DS-Unit-3-Sprint-2-SQL-and-Databases
e164db12684286e50a9e585da475ca34692c55d7
[ "MIT" ]
null
null
null
module1-introduction-to-sql/321_assignment_notes.py
Edudeiko/DS-Unit-3-Sprint-2-SQL-and-Databases
e164db12684286e50a9e585da475ca34692c55d7
[ "MIT" ]
null
null
null
module1-introduction-to-sql/321_assignment_notes.py
Edudeiko/DS-Unit-3-Sprint-2-SQL-and-Databases
e164db12684286e50a9e585da475ca34692c55d7
[ "MIT" ]
null
null
null
import os import pandas as pd import sqlite3 CSV_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "data", "buddymove_holidayiq.csv") DB_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "data", "buddymove_holidayiq.db") connection = sqlite3.connect(DB_FILEPATH) table_name = "reviews2" df = pd.read_csv(CSV_FILEPATH) # assigns a column label "id" for the index column df.index.rename("id", inplace=True) df.index += 1 # starts ids at 1 instead of 0 print(df.head()) df.to_sql(table_name, con=connection) cursor = connection.cursor() cursor.execute(f"SELECT count(distinct id) as review_count FROM {table_name};") results = cursor.fetchone() print(results, "RECORDS") # Other approach # conn = sqlite3.connect("buddymove_holidayiq.sqlite3") # data.to_sql('review', conn, if_exists = 'replace') # curs = conn.cursor() # query = "SELECT * FROM review" # results = curs.execute(query).fetchall() # print("There are", len(results), "rows") # ---------------------------------------- # (Stretch) What are the average number of reviews for each category? conn = sqlite3.connect("buddymove_holidayiq.sqlite3") curs = conn.cursor() categories = ['Sports', 'Religious', 'Nature', 'Theatre', 'Shopping', 'Picnic'] query = "SELECT * FROM review" length = len(curs.execute(query).fetchall()) for item in categories: query = f"SELECT SUM({item}) FROM review" results = curs.execute(query).fetchall() print(f'Average number of reviews for {item} column:', round(results[0][0]/length))
30.836735
95
0.698213
206
1,511
5.004854
0.432039
0.023278
0.046557
0.069835
0.331717
0.28322
0.199806
0.199806
0.110572
0.110572
0
0.009063
0.123759
1,511
48
96
31.479167
0.769637
0.291198
0
0
0
0
0.280718
0.068053
0
0
0
0
0
1
0
false
0
0.12
0
0.12
0.12
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e51e8a5a943efe4c5fabe22a092353ca252b4062
971
py
Python
eyesore/decision_graph/compacting/_1_similar_actions_compacter.py
twizmwazin/hacrs
3c9386b0fa5f5ea6b93b2bc8b3c4eed6abceec6a
[ "BSD-2-Clause" ]
2
2019-11-07T02:55:40.000Z
2021-12-30T01:37:43.000Z
eyesore/decision_graph/compacting/_1_similar_actions_compacter.py
twizmwazin/hacrs
3c9386b0fa5f5ea6b93b2bc8b3c4eed6abceec6a
[ "BSD-2-Clause" ]
null
null
null
eyesore/decision_graph/compacting/_1_similar_actions_compacter.py
twizmwazin/hacrs
3c9386b0fa5f5ea6b93b2bc8b3c4eed6abceec6a
[ "BSD-2-Clause" ]
2
2019-09-27T12:01:50.000Z
2019-10-09T21:39:52.000Z
from .. import ActionsNode from ..visitor import Visitor class SimilarActionsCompacter(Visitor): def _visit_actions_node(self, node, replacements): """ :param node: :type node: ActionsNode :return: """ compact_successors = replacements[node.successor] #import ipdb #ipdb.set_trace() assert len(compact_successors) < 2, "The {} visitor returned more than one successor for an ActionNode, this is" \ "not allowed. Got: {}".format(self, compact_successors) compact_successor = compact_successors[0] if isinstance(compact_successor, ActionsNode) and compact_successor.get_action_type() == node.get_action_type(): node.actions_info = node.actions_info + compact_successor.actions_info node.successor = compact_successor.successor else: node.successor = compact_successor return [node]
37.346154
122
0.642636
99
971
6.090909
0.464646
0.159204
0.043118
0.056385
0
0
0
0
0
0
0
0.002837
0.273944
971
25
123
38.84
0.852482
0.07518
0
0
0
0
0.109813
0
0
0
0
0
0.071429
1
0.071429
false
0
0.142857
0
0.357143
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e520edd0d04c2e5662e9df5187c8127d43b40f48
7,416
py
Python
boost-hic.py
CellFateNucOrg/Boost-HiC
637299b0ba41f6511015a6249efb150cf5991804
[ "MIT" ]
null
null
null
boost-hic.py
CellFateNucOrg/Boost-HiC
637299b0ba41f6511015a6249efb150cf5991804
[ "MIT" ]
null
null
null
boost-hic.py
CellFateNucOrg/Boost-HiC
637299b0ba41f6511015a6249efb150cf5991804
[ "MIT" ]
null
null
null
#!/usr/bin/python3 -u import argparse import logging import os import h5py import numpy as np import pandas as pd import sys # my own toolkit import HiCutils import convert import utils DEFAULT_OUTPUT_FOLDER = './boosted/' logging.basicConfig(level=logging.DEBUG) logging.getLogger("").setLevel(logging.INFO) logger = logging.getLogger(f'Boos-HiC') p = argparse.ArgumentParser() p.add_argument("operation", default="boost", choices=["boost", "sample"], help="Operation to be executed") p.add_argument("-m", "--matrixfilename", required=True, help="contact map stored in tab separated file as : " "bin_i / bin_j / counts_ij Only no zero values are stored. Contact map are symmetric. " "Alternatively, you can provide a cooler format file (.cool), in this case no --bedfilename is needed.") p.add_argument("-b", "--bedfilename", help="bed file of genomic coordinate of each bin") p.add_argument("-c", "--chromosomes", nargs='+', help="Which chromosomes to boost, otherwise all chromosomes") p.add_argument("-o", "--output_prefix", default=None, help="Prefix for output files, including the output folder. " f"If not given, it will be in subfolder '{DEFAULT_OUTPUT_FOLDER}' plus basename of the input matrixfilename " "without its file extension.") p.add_argument("-f", "--format", default="cool", choices=["cool", "hdf5"], help="output file format") p.add_argument("-g", "--genome_assembly", default="ce11", help="genome assembly as metadata for .cool file") p.add_argument("-k", "--keep_filtered_bins", action='store_true', help="Whether to keep filtered out bins, otherwise they will be removed from the result matrix. " "Not used yet.") p.add_argument("-a", "--alpha", default=0.24, type=float, help="AFTER a lot of test : 0.24 is always a good and safe compromise, you must use this value") args = p.parse_args(sys.argv[1:]) # input file Operation = args.operation bedfilename = args.bedfilename matrixfilename = args.matrixfilename chromosomes = args.chromosomes format = args.format keep_filtered_bins = args.keep_filtered_bins genome_assembly = args.genome_assembly alpha = args.alpha if args.output_prefix: output_prefix = args.output_prefix else: if not os.path.exists(DEFAULT_OUTPUT_FOLDER): os.mkdir(DEFAULT_OUTPUT_FOLDER) output_prefix = DEFAULT_OUTPUT_FOLDER + os.path.splitext(os.path.basename(matrixfilename))[0] # alternative in the same folder of the input matrix # output_prefix = os.path.splitext(matrixfilename)[0] ### def BoostHiC(amat): normmat = HiCutils.SCN(np.copy(amat)) ff_normmat = HiCutils.fastFloyd(1 / np.power(np.copy(normmat), alpha)) FFmat = np.power(ff_normmat, -1 / alpha) # to dist, FF, to contact in one line boostedmat = HiCutils.adjustPdS(normmat, FFmat) return boostedmat def Sample(amat, repositoryout): percentofsample = [0.1, 1., 10.] for j in percentofsample: logger.info(f"Value of sample: {j}") chrmat_s = np.copy(amat) chrmat = HiCutils.downsample_basic(chrmat_s, j) fh5 = h5py.File(repositoryout + "inputmat_sampleat_" + str(j) + "_percent.hdf5", "w") fh5['data'] = chrmat fh5.close() # ## CODE EXECUTION ## # # load the data logger.info("LOADING MATRIX") if matrixfilename.endswith('.cool'): D, total, resolution, D_cooler = convert.loadabsdatafile_cool(matrixfilename) else: D, total, resolution = convert.loadabsdatafile(bedfilename) D_cooler = None print(*D.items(), sep='\n') print(f'Total bins:{total} resolution:{resolution}') bins_boosted = pd.DataFrame(columns=['chrom', 'start', 'end']) pixels_boosted = pd.DataFrame(columns=['bin1_id', 'bin2_id', 'count']) chroms = chromosomes if chromosomes else D.keys() bin_offs = 0 for chrom in chroms: repositoryout = f'{output_prefix}_{chrom}_' if D_cooler: basemat = D_cooler.matrix(balance=False).fetch(chrom) else: beginfend = D[chrom][0] endfend = D[chrom][1] logger.info(f"Chromosome {chrom} data fend : {beginfend},{endfend}") basemat = convert.loadmatrixselected(matrixfilename, beginfend, endfend) # matrix filtering logger.info("FILTERING") bins_num = basemat.shape[0] pos_out = HiCutils.get_outliers(basemat) utils.savematrixasfilelist3(pos_out, repositoryout + "filteredbin.txt") basematfilter = basemat[np.ix_(~pos_out, ~pos_out)] basematfilter = np.copy(basematfilter) # basematfilter=basematfilter[0:1000,0:1000] logger.info(f'len(basemat):{len(basemat)}, len(basematfilter):{len(basematfilter)}') if format is None or format == "hdf5": fh5 = h5py.File(repositoryout + "inputmat.hdf5", "w") fh5['data'] = basemat fh5.close() if format is None or format == "cool": convert.hic_to_cool(basemat, chrom, resolution, repositoryout + "inputmat.cool", genome_assembly=genome_assembly) if format is None or format == "hdf5": fh5 = h5py.File(repositoryout + "inputmat_filtered.hdf5", "w") fh5['data'] = basematfilter fh5.close() if format is None or format == "cool": convert.hic_to_cool(basematfilter, chrom, resolution, repositoryout + "inputmat_filtered.cool", genome_assembly=genome_assembly) if Operation == "boost": logger.info("Boost Hic") boosted = BoostHiC(basematfilter) # save if format is None or format == "hdf5": fh5 = h5py.File(repositoryout + "boostedmat.hdf5", "w") fh5['data'] = boosted fh5.close() if format is None or format == "cool": filtered_bins = pos_out if keep_filtered_bins else None chrom_bins, chrom_pixels = convert.get_bins_pixels(boosted, chrom, resolution, bin_offs=bin_offs, bins_num=bins_num, filtered_bins=filtered_bins) # save as cool cool_file = f"{repositoryout}boosted.cool" convert.create_cool(chrom_bins, chrom_pixels, resolution, cool_file, genome_assembly=genome_assembly) # collecting all boosted chromosomes in one bins_boosted = pd.concat([bins_boosted, chrom_bins]) pixels_boosted = pd.concat([pixels_boosted, chrom_pixels]) bin_offs += bins_num elif Operation == "sample": logger.info("SAMPLING") Sample(basematfilter, repositoryout) if Operation == "boost" and format is None or format == "cool": # combined file support only for .cool repositoryout = output_prefix + (f'_{"_".join(chromosomes)}_' if chromosomes else '_') cool_file = f"{repositoryout}boosted{'_kfb' if keep_filtered_bins else ''}.cool" convert.create_cool(bins_boosted, pixels_boosted, resolution, cool_file, genome_assembly=genome_assembly) cmd = f'cooler balance --cis-only --force {cool_file}' logger.info(f'CALL: {cmd}') os.system(cmd) resolutions = [5000, 10000, 20000, 50000, 100000, 200000, 500000, 1000000] resolutions_str = ','.join([str(r) for r in resolutions]) cmd = f'cooler zoomify -r "{resolutions_str}" {cool_file}' logger.info(f'CALL: {cmd}') os.system(cmd)
42.136364
129
0.663026
934
7,416
5.130621
0.284797
0.035058
0.022538
0.020451
0.14399
0.116027
0.097245
0.078047
0.078047
0.070952
0
0.018051
0.215615
7,416
175
130
42.377143
0.805742
0.050432
0
0.137681
0
0.014493
0.242023
0.040598
0
0
0
0
0
1
0.014493
false
0
0.072464
0
0.094203
0.014493
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e52143225da56c9f67ad6d80e159ab308dbbde12
5,560
py
Python
tests/test_agents.py
fkamrani/adversarial-policies
53e129c2083f6557ddc18dbb39e4e633a2d7ab9b
[ "MIT" ]
211
2019-02-22T08:07:25.000Z
2022-03-14T10:44:20.000Z
tests/test_agents.py
fkamrani/adversarial-policies
53e129c2083f6557ddc18dbb39e4e633a2d7ab9b
[ "MIT" ]
51
2019-02-08T01:39:49.000Z
2022-02-15T21:21:46.000Z
tests/test_agents.py
fkamrani/adversarial-policies
53e129c2083f6557ddc18dbb39e4e633a2d7ab9b
[ "MIT" ]
41
2019-04-23T05:01:49.000Z
2022-03-16T06:51:19.000Z
import gym from ilqr import iLQR import numpy as np import pytest from aprl.agents.monte_carlo import ( MonteCarloParallel, MonteCarloSingle, MujocoResettableWrapper, receding_horizon, ) from aprl.agents.mujoco_lqr import ( MujocoFiniteDiffCost, MujocoFiniteDiffDynamicsBasic, MujocoFiniteDiffDynamicsPerformance, ) dynamics_list = [MujocoFiniteDiffDynamicsBasic, MujocoFiniteDiffDynamicsPerformance] @pytest.mark.parametrize("dynamics_cls", dynamics_list) def test_lqr_mujoco(dynamics_cls): """Smoke test for MujcooFiniteDiff{Dynamics,Cost}. Jupyter notebook experiments/mujoco_control.ipynb has quantitative results attained; for efficiency, we only run for a few iterations here.""" env = gym.make("Reacher-v2").unwrapped env.seed(42) env.reset() dynamics = dynamics_cls(env) cost = MujocoFiniteDiffCost(env) N = 10 ilqr = iLQR(dynamics, cost, N) x0 = dynamics.get_state() us_init = np.array([env.action_space.sample() for _ in range(N)]) xs, us = ilqr.fit(x0, us_init, n_iterations=3) assert x0.shape == xs[0].shape assert xs.shape[0] == N + 1 assert us.shape == (N, 2) assert env.action_space.contains(us[0]) def rollout(env, actions): obs, rews, dones, infos = [], [], [], [] for a in actions: ob, rew, done, info = env.step(a) obs.append(ob) rews.append(rew) dones.append(done) infos.append(info) obs = np.array(obs) rews = np.array(rews) dones = np.array(dones) return obs, rews, dones, infos def make_mujoco_env(env_name, seed): env = gym.make(env_name) env = MujocoResettableWrapper(env.unwrapped) env.seed(seed) env.reset() return env MONTE_CARLO_ENVS = ["Reacher-v2", "HalfCheetah-v2", "Hopper-v2"] @pytest.mark.parametrize("env_name", MONTE_CARLO_ENVS) def test_mujoco_reset_env(env_name, horizon=10, seed=42): env = make_mujoco_env(env_name, seed) state = env.get_state() actions = [env.action_space.sample() for _ in range(horizon)] first_obs, first_rews, first_dones, _first_infos = rollout(env, actions) env.set_state(state) second_obs, second_rews, second_dones, _second_infos = rollout(env, actions) np.testing.assert_almost_equal(second_obs, first_obs, decimal=5) np.testing.assert_almost_equal(second_rews, first_rews, decimal=5) assert (first_dones == second_dones).all() def check_monte_carlo( kind, score_thresholds, total_horizon, planning_horizon, trajectories, seed=42 ): def f(env_name): # Setup env = make_mujoco_env(env_name, seed) if kind == "single": mc = MonteCarloSingle(env, planning_horizon, trajectories) elif kind == "parallel": env_fns = [lambda: make_mujoco_env(env_name, seed) for _ in range(2)] mc = MonteCarloParallel(env_fns, planning_horizon, trajectories) else: # pragma: no cover raise ValueError("Unrecognized kind '{}'".format(kind)) mc.seed(seed) # Check for side-effects state = env.get_state() _ = mc.best_action(state) assert (env.get_state() == state).all(), "Monte Carlo search has side effects" # One receding horizon rollout of Monte Carlo search total_rew = 0 prev_done = False for i, (a, ob, rew, done, info) in enumerate(receding_horizon(mc, env)): assert not prev_done, "should terminate if env returns done" prev_done = done assert env.action_space.contains(a) assert env.observation_space.contains(ob) total_rew += rew if i >= total_horizon: break assert i == total_horizon or done # Check it does better than random sequences random_rews = [] for i in range(10): env.action_space.np_random.seed(seed + i) action_seq = [env.action_space.sample() for _ in range(total_horizon)] env.set_state(state) _, rews, _, _ = rollout(env, action_seq) random_rew = sum(rews) random_rews.append(random_rew) assert total_rew >= random_rew, "random sequence {}".format(i) print( f"Random actions on {env_name} for {total_horizon} obtains " f"mean {np.mean(random_rews)} s.d. {np.std(random_rews)}" ) # Check against pre-defined score threshold assert total_rew >= score_thresholds[env_name] # Cleanup if kind == "parallel": mc.close() with pytest.raises(BrokenPipeError): mc.best_action(state) return f MC_SINGLE_THRESHOLDS = { "Reacher-v2": -11, # tested -9.5, random -17.25 s.d. 1.5 "HalfCheetah-v2": 19, # tested 21.6, random -4.2 s.d. 3.7 "Hopper-v2": 29, # tested 31.1, random 15.2 s.d. 5.9 } MC_PARALLEL_THRESHOLDS = { "Reacher-v2": -17, # tested at -15.3; random -25.8 s.d. 1.8 "HalfCheetah-v2": 33, # tested at 35.5; random -6.0 s.d. 7.1 "Hopper-v2": 52, # tested at 54.7; random 21.1 s.d. 13.2 } _test_mc_single = check_monte_carlo( "single", MC_SINGLE_THRESHOLDS, total_horizon=20, planning_horizon=10, trajectories=100 ) _test_mc_parallel = check_monte_carlo( "parallel", MC_PARALLEL_THRESHOLDS, total_horizon=30, planning_horizon=15, trajectories=200 ) test_mc_single = pytest.mark.parametrize("env_name", MONTE_CARLO_ENVS)(_test_mc_single) test_mc_parallel = pytest.mark.parametrize("env_name", MONTE_CARLO_ENVS)(_test_mc_parallel)
34.534161
95
0.658453
747
5,560
4.697456
0.262383
0.023938
0.023938
0.018239
0.128242
0.112283
0.080365
0.039327
0.027358
0.027358
0
0.02524
0.230396
5,560
160
96
34.75
0.794812
0.107194
0
0.063492
0
0
0.081612
0.008708
0
0
0
0
0.111111
1
0.047619
false
0
0.047619
0
0.119048
0.007937
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e52269e8fc4cd21064d9405f8d3f8808a86630c6
4,219
py
Python
handlers/passport.py
python9339/tornado_ihome
a128f2140749186f177417c76472aded12427e6b
[ "Apache-2.0" ]
null
null
null
handlers/passport.py
python9339/tornado_ihome
a128f2140749186f177417c76472aded12427e6b
[ "Apache-2.0" ]
null
null
null
handlers/passport.py
python9339/tornado_ihome
a128f2140749186f177417c76472aded12427e6b
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # @Time : 20-4-13 下午4:40 # @File : handlers.py import logging import re import hashlib import config from utils.response_code import RET from utils.session import Session from basehandler import BaseHandler class RegisterHandler(BaseHandler): def post(self, *args, **kwargs): # 获取前端传递过来的JSON数据,并将其提取出来 mobile = self.json_args.get('mobile') smsCode = self.json_args.get('phoneCode') password = self.json_args.get('password') password2 = self.json_args.get('password2') # 判断传递过来的参数是否为空 if not all((mobile, smsCode, password, password2)): return self.write(dict(errorcode=RET.NODATA, errormsg='参数不完整')) # 判断手机号输入格式是否正确 if not re.match(r"^1\d{10}$", mobile): return self.write(dict(errorcode=RET.NODATA, errormsg='输入手机号码不正确')) # 判断两次输入的密码是否相同 if password != password2: return self.write(dict(errorcode=RET.PARAMERR, errormsg='两次输入密码不一致')) # # 判断手机验证码是否正确 # try: # real_sms_code = self.redis.get('sms_code_%s' % mobile) # except Exception as e: # logging.error(e) # return self.write(dict(errorcode=RET.DBERR, errormsg='查询短信验证码出错')) # # 判断短信验证码是否过期 # if not real_sms_code: # return self.write(dict(errorcode=RET.DBERR, errormsg='短信验证码过期')) # # 对比用户填写的验证码与真实值 # if smsCode != real_sms_code: # return self.write(dict(errorcode=RET.DATAERR, errormsg='短信验证码输入有误')) # # 删除掉存储在Redis中的短信验证码 # try: # self.redis.delete('sms_code_%s' % mobile) # except Exception as e: # logging.error(e) # 保存数据,同时判断手机号是否存在,判断的依据是数据库中mobile字段的唯一约束 password = hashlib.sha256(password + config.passwd_hash_key).hexdigest() sql_str = "insert into ih_user_profile(up_name, up_mobile, up_passwd) values (%(name)s, %(mobile)s, %(passwd)s);" try: user_id = self.db.execute(sql_str, name=mobile, mobile=mobile, passwd=password) except Exception as e: logging.error(e) return self.write(dict(errorcode=RET.DBERR, errormsg='手机号码已被注册')) # 用session记录用户的登录状态 session = Session(self) session.data['user_id'] = user_id session.data['mobile'] = mobile session.data['name'] = mobile try: session.save() except Exception as e: logging.error(e) return self.write(dict(errorcode=RET.OK, errormsg='注册成功')) class LoginHandler(BaseHandler): def post(self, *args, **kwargs): # 获取前端传递过来的JSON数据,并将其提取出来 mobile = self.json_args.get('mobile') password = self.json_args.get('password') # 检查参数 if not all([mobile, password]): return self.write(dict(errorcode=RET.PARAMERR, errormsg="参数错误")) if not re.match(r"^1\d{10}$", mobile): return self.write(dict(errorcode=RET.DATAERR, errormsg="手机号错误")) # 检查秘密是否正确 res = self.db.get("select up_user_id,up_name,up_passwd from ih_user_profile where up_mobile=%(mobile)s", mobile=mobile) password = hashlib.sha256(password + config.passwd_hash_key).hexdigest() if res and res["up_passwd"] == unicode(password): # 生成session数据 # 返回客户端 try: self.session = Session(self) self.session.data['user_id'] = res['up_user_id'] self.session.data['name'] = res['up_name'] self.session.data['mobile'] = mobile self.session.save() except Exception as e: logging.error(e) return self.write(dict(errorcode=RET.OK, errormsg="OK")) else: return self.write(dict(errorcode=RET.DATAERR, errormsg="手机号或密码错误!")) class CheckLoginHandler(BaseHandler): def get(self, *args, **kwargs): # 如果返回真,则说明data中有数据 if self.get_current_user(): return self.write(dict(errorcode=RET.OK, errormsg="true", data={"name":self.session.data['name']})) else: return self.write(dict(errorcode=RET.USERERR, errormsg="false"))
37.008772
121
0.603698
484
4,219
5.169421
0.266529
0.055955
0.083933
0.106315
0.520783
0.503997
0.479217
0.463629
0.33693
0.257794
0
0.008125
0.27068
4,219
113
122
37.336283
0.805005
0.189144
0
0.328125
0
0.015625
0.112293
0.015366
0
0
0
0
0
1
0.046875
false
0.1875
0.109375
0
0.375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
e524ba293266362fb0efb791ef7088c82a4d7dd1
1,091
py
Python
asana/resources/custom_field_settings.py
FiyaFly/python-asana
ef9e6ff3e82e9f1ca18d526401f524698c7215c7
[ "MIT" ]
266
2015-02-13T18:14:08.000Z
2022-03-29T22:03:33.000Z
asana/resources/custom_field_settings.py
FiyaFly/python-asana
ef9e6ff3e82e9f1ca18d526401f524698c7215c7
[ "MIT" ]
77
2015-02-13T00:22:11.000Z
2022-02-20T07:56:14.000Z
asana/resources/custom_field_settings.py
FiyaFly/python-asana
ef9e6ff3e82e9f1ca18d526401f524698c7215c7
[ "MIT" ]
95
2015-03-18T23:28:57.000Z
2022-02-20T23:28:58.000Z
from .gen.custom_field_settings import _CustomFieldSettings class CustomFieldSettings(_CustomFieldSettings): """Custom Field Settings resource""" def find_by_project(self, project, params={}, **options): """Returns a list of all of the custom fields settings on a project. Parameters ---------- project : {Gid} The ID of the project for which to list custom field settings [params] : {Object} Parameters for the request """ path = "/projects/%s/custom_field_settings" % (project) return self.client.get_collection(path, params, **options) def find_by_portfolio(self, portfolio, params={}, **options): """Returns a list of all of the custom fields settings on a portfolio. Parameters ---------- portfolio : {Gid} The ID of the portfolio for which to list custom field settings [params] : {Object} Parameters for the request """ path = "/portfolios/%s/custom_field_settings" % (portfolio) return self.client.get_collection(path, params, **options)
40.407407
89
0.653529
128
1,091
5.460938
0.320313
0.094421
0.16309
0.060086
0.540773
0.503577
0.503577
0.503577
0.37196
0.37196
0
0
0.237397
1,091
26
90
41.961538
0.840144
0.425298
0
0.25
0
0
0.132576
0.132576
0
0
0
0
0
1
0.25
false
0
0.125
0
0.75
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
e525b73b902da2ecc912124afa970b8027a3b324
1,049
py
Python
graphql/app/models/ndb/faction.py
py-in-the-sky/appengine-swapi
824d770cd11e5510b2300d1e248a9474e3fde8c2
[ "MIT" ]
null
null
null
graphql/app/models/ndb/faction.py
py-in-the-sky/appengine-swapi
824d770cd11e5510b2300d1e248a9474e3fde8c2
[ "MIT" ]
null
null
null
graphql/app/models/ndb/faction.py
py-in-the-sky/appengine-swapi
824d770cd11e5510b2300d1e248a9474e3fde8c2
[ "MIT" ]
null
null
null
from google.appengine.ext import ndb from .character import Character from .utils import paginated_query class Faction(ndb.Model): name = ndb.StringProperty(required=True) description = ndb.TextProperty() created = ndb.DateTimeProperty(required=True, auto_now_add=True) updated = ndb.DateTimeProperty(required=True, auto_now=True) @classmethod def get_factions(cls, **kwargs): "Return all factions in alphabetical order." q = cls.query() q_forward = q.order(cls.name) q_backward = q.order(-cls.name) return paginated_query(q_forward, q_backward, **kwargs) def get_characters(self, **kwargs): "Return characters in faction in alphabetical order." q = Character.query(Character.faction_key == self.key) q_forward = q.order(Character.name) q_backward = q.order(-Character.name) return paginated_query(q_forward, q_backward, **kwargs) @classmethod def get_by_name(cls, name): return cls.query(cls.name == name).get_async()
33.83871
68
0.691134
134
1,049
5.261194
0.328358
0.04539
0.051064
0.059574
0.295035
0.241135
0.133333
0.133333
0.133333
0
0
0
0.204004
1,049
30
69
34.966667
0.844311
0.089609
0
0.166667
0
0
0.088656
0
0
0
0
0
0
1
0.125
false
0
0.125
0.041667
0.583333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
e526d02ced91a04d460abfc609eec486e2c0557a
3,790
py
Python
falcon/request_helpers.py
elishacook/falcon
0a9431ff2fbd60329532c726744a842b7caeeaf9
[ "Apache-2.0" ]
1
2020-07-11T14:38:32.000Z
2020-07-11T14:38:32.000Z
falcon/request_helpers.py
elishacook/falcon
0a9431ff2fbd60329532c726744a842b7caeeaf9
[ "Apache-2.0" ]
null
null
null
falcon/request_helpers.py
elishacook/falcon
0a9431ff2fbd60329532c726744a842b7caeeaf9
[ "Apache-2.0" ]
null
null
null
# Copyright 2013 by Rackspace Hosting, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def header_property(wsgi_name): """Creates a read-only header property. Args: wsgi_name (str): Case-sensitive name of the header as it would appear in the WSGI environ dict (i.e., 'HTTP_*') Returns: A property instance than can be assigned to a class variable. """ def fget(self): try: return self.env[wsgi_name] or None except KeyError: return None return property(fget) class Body(object): """Wrap wsgi.input streams to make them more robust. The socket._fileobject and io.BufferedReader are sometimes used to implement wsgi.input. However, app developers are often burned by the fact that the read() method for these objects block indefinitely if either no size is passed, or a size greater than the request's content length is passed to the method. This class normalizes wsgi.input behavior between WSGI servers by implementing non-blocking behavior for the cases mentioned above. Args: stream: Instance of socket._fileobject from environ['wsgi.input'] stream_len: Expected content length of the stream. """ def __init__(self, stream, stream_len): self.stream = stream self.stream_len = stream_len def __iter__(self): return self def __next__(self): return next(self.stream) next = __next__ def _read(self, size, target): """Helper function for proxing reads to the underlying stream. Args: size (int): Maximum number of bytes/characters to read. Will be coerced, if None or -1, to `self.stream_len`. Will likewise be coerced if greater than `self.stream_len`, so that if the stream doesn't follow standard io semantics, the read won't block. target (callable): Once `size` has been fixed up, this function will be called to actually do the work. Returns: Data read from the stream, as returned by `target`. """ if size is None or size == -1 or size > self.stream_len: size = self.stream_len return target(size) def read(self, size=None): """Read from the stream. Args: size (int): Maximum number of bytes/characters to read. Defaults to reading until EOF. Returns: Data read from the stream. """ return self._read(size, self.stream.read) def readline(self, limit=None): """Read a line from the stream. Args: limit (int): Maximum number of bytes/characters to read. Defaults to reading until EOF. Returns: Data read from the stream. """ return self._read(limit, self.stream.readline) def readlines(self, hint=None): """Read lines from the stream. Args: hint (int): Maximum number of bytes/characters to read. Defaults to reading until EOF. Returns: Data read from the stream. """ return self._read(hint, self.stream.readlines)
28.931298
75
0.632718
506
3,790
4.66996
0.389328
0.046551
0.03851
0.035971
0.174778
0.174778
0.162928
0.162928
0.162928
0.162928
0
0.003757
0.297625
3,790
130
76
29.153846
0.883922
0.656728
0
0
0
0
0
0
0
0
0
0
0
1
0.346154
false
0
0
0.076923
0.769231
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
2
e52d87ade902887855b10cfb23d4264c5e93b1d3
7,190
py
Python
scripts/msh_process.py
mou3adb/spread_the_particle
6cc666fded62f07380ed1e3ed52969c436295906
[ "MIT" ]
4
2020-08-18T18:33:05.000Z
2021-05-18T23:55:56.000Z
scripts/msh_process.py
mou3adb/spread_the_particle
6cc666fded62f07380ed1e3ed52969c436295906
[ "MIT" ]
null
null
null
scripts/msh_process.py
mou3adb/spread_the_particle
6cc666fded62f07380ed1e3ed52969c436295906
[ "MIT" ]
2
2021-03-03T18:57:06.000Z
2021-05-18T20:43:44.000Z
""" Gmsh format 2.2 """ import numpy as np from flow import Flow from element import Element from element_search import find_neighbors from text.text_flow import write_flow from text.text_elements import write_elements from text.text_geometries import write_geometries #============================================================================== def intIt(l): return np.array([int(e) for e in l]) def floatIt(l): return np.array([float(e) for e in l]) def extract_msh(path_msh): f = open(path_msh, 'r') nodes_X, nodes_Y = [], [] elements = [] line = f.readline() # ... # $Nodes\n # n_nodes # ... while line != '$Nodes\n': line = f.readline() line = f.readline() n_nodes = int(line.strip()) for i in range(n_nodes): # line = id x y z line = f.readline() coord = floatIt(line.strip().split()) nodes_X.append(coord[1]) nodes_Y.append(coord[2]) # ... # $Elements\n # n_elements # ... while line != '$Elements\n': line = f.readline() line = f.readline() n_elements = int(line.strip()) count = 0 for i in range(n_elements): # element_id element_type ... ... nodes_id line = f.readline() coord = intIt(line.strip().split()) element_type = coord[1] if element_type == 9: # 6-node second order triangle count += 1 e = Element(count) e.nodes = np.array(coord[-6:]) elements.append(e) # if element_type == 1: # 2-node line # e.element_type = 1 # e.nodes = coord[-2:] # # elif element_type == 2: # 3-node triangle # e.element_type = 2 # e.nodes = coord[-3:] # # elif element_type == 3: # 4-node quadrangle # e.element_type = 3 # e.nodes = coord[-4:] # # elif element_type == 8: # 3-node second order line # e.element_type = 8 # e.nodes = coord[-3:] # # elif element_type == 9: # 6-node second order triangle # e.element_type = 9 # e.nodes = coord[-6:] # # elif element_type == 10: # 9-node second order quadrangle # e.element_type = 10 # e.nodes = coord[-9:] # # elif element_type == 15: # 1-node point # e.element_type = 15 # e.nodes = coord[-1:] # # elements.append(e) f.close() return np.array(nodes_X), np.array(nodes_Y), np.array(elements) def generate_poiseuille(path_msh, parent_folder): single_nodes_X, single_nodes_Y, elements = extract_msh(path_msh) d = np.max(single_nodes_Y) - np.min(single_nodes_Y) y_middle = np.min(single_nodes_Y) + d/2 n_nodes = len(single_nodes_X) mu = 1e-3 p = 2*mu*single_nodes_X U = d**2/4 - (single_nodes_Y - y_middle)**2 V = np.zeros(n_nodes) nodes_X, nodes_Y = np.array([]), np.array([]) Us, Vs, ps = np.array([]), np.array([]), np.array([]) Nt = 101 times = np.linspace(0, 1, Nt) for t in times: nodes_X = np.vstack([nodes_X, single_nodes_X]) if nodes_X.size else single_nodes_X nodes_Y = np.vstack([nodes_Y, single_nodes_Y]) if nodes_Y.size else single_nodes_Y Us = np.vstack([Us, U]) if Us.size else U Vs = np.vstack([Vs, V]) if Vs.size else V ps = np.vstack([ps, p]) if ps.size else p Re, Ur = 1e-3*1*d/mu, np.inf # Reynolds number and reduced velocity are not # defined in the Hagen-Poiseuille problem flow = Flow() flow.Re, flow.Ur = Re, Ur flow.times = times flow.nodes_X, flow.nodes_Y = nodes_X, nodes_Y flow.Us, flow.Vs, flow.ps = Us, Vs, ps write_flow(flow, parent_folder + 'flows/poiseuille') find_neighbors(elements) write_elements(elements, parent_folder + 'elements/poiseuille') write_geometries(np.array([]), parent_folder + 'geometries/poiseuille') def generate_periodic(path_msh, parent_folder): single_nodes_X, single_nodes_Y, elements = extract_msh(path_msh) d = np.max(single_nodes_Y) - np.min(single_nodes_Y) Nt = 101 times = np.linspace(0, 1, Nt) period = 0.25 w = 2*np.pi/period # U = U0*cos(wt) with U0 = 1 # Navier-Stokes, uniform: # rho dU/dt + 0 = - dp/dx with rho = 1 # dp/dx = rhoU0*w*sin(wt) # p = p0 + rhoU0*w*sin(wt) with p0 = 0 nodes_X, nodes_Y = np.array([]), np.array([]) Us, Vs, ps = np.array([]), np.array([]), np.array([]) for t in times: nodes_X = np.vstack([nodes_X, single_nodes_X]) if nodes_X.size else single_nodes_X nodes_Y = np.vstack([nodes_Y, single_nodes_Y]) if nodes_Y.size else single_nodes_Y U = 0*nodes_X + np.cos(w*t) V = 0*nodes_X p = 0*nodes_X + w*np.sin(w*t) Us = np.vstack([Us, U]) if Us.size else U Vs = np.vstack([Vs, V]) if Vs.size else V ps = np.vstack([ps, p]) if ps.size else p Re, Ur = 1*1*d/1e-6, np.inf flow = Flow() flow.Re, flow.Ur = Re, Ur flow.times = times flow.nodes_X, flow.nodes_Y = nodes_X, nodes_Y flow.Us, flow.Vs, flow.ps = Us, Vs, ps write_flow(flow, parent_folder + 'flows/periodic') find_neighbors(elements) write_elements(elements, parent_folder + 'elements/periodic') write_geometries(np.array([]), parent_folder + 'geometries/periodic') def generate_inviscid(path_msh, parent_folder): single_nodes_X, single_nodes_Y, elements = extract_msh(path_msh) rs = np.sqrt(single_nodes_X**2 + single_nodes_Y**2) thetas = np.arctan2(single_nodes_Y, single_nodes_X) Ur, Utheta, p = [], [], [] for r, theta in zip(rs, thetas): if r == 0: Ur.append(0) Utheta.append(0) p.append(0) else: Ur.append((1 - (0.5/r)**2)*np.cos(theta)) Utheta.append((1 + (0.5/r)**2)*np.sin(theta)) p.append(2*(0.5/r)**2 * np.cos(2*theta) - (0.5/r)**4) Ur = np.array(Ur) Utheta = np.array(Utheta) p = np.array(p) U = Ur*np.cos(thetas) - Utheta*np.sin(thetas) V = Ur*np.sin(thetas) - Utheta*np.cos(thetas) nodes_X, nodes_Y = np.array([]), np.array([]) Us, Vs, ps = np.array([]), np.array([]), np.array([]) Nt = 101 times = np.linspace(0, 1, Nt) for t in times: nodes_X = np.vstack([nodes_X, single_nodes_X]) if nodes_X.size else single_nodes_X nodes_Y = np.vstack([nodes_Y, single_nodes_Y]) if nodes_Y.size else single_nodes_Y Us = np.vstack([Us, U]) if Us.size else U Vs = np.vstack([Vs, V]) if Vs.size else V ps = np.vstack([ps, p]) if ps.size else p Re, Ur = 1e+6, 0. flow = Flow() flow.Re, flow.Ur = Re, Ur flow.times = times flow.nodes_X, flow.nodes_Y = nodes_X, nodes_Y flow.Us, flow.Vs, flow.ps = Us, Vs, ps write_flow(flow, parent_folder + 'flows/potential') find_neighbors(elements) write_elements(elements, parent_folder + 'elements/potential') write_geometries(np.array([[5,407,404,408,405,409,406,410,6,414,411,415,412,416,413,417]]), parent_folder + 'geometries/potential')
28.085938
95
0.577886
1,111
7,190
3.576958
0.139514
0.057373
0.051334
0.030196
0.526422
0.509562
0.501007
0.4615
0.423251
0.376447
0
0.030044
0.268567
7,190
255
96
28.196078
0.725613
0.180807
0
0.434783
0
0
0.030724
0.003605
0
0
0
0
0
1
0.043478
false
0
0.050725
0.014493
0.115942
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e5303983f7442fd8fa1385cd72155b552858073b
1,581
py
Python
plugins/module_utils/syntropy.py
SyntropyNet/syntropy-ansible-collection
9ec9020faf81bc1b9d46aef71de88397a2b5d625
[ "MIT" ]
4
2021-01-06T08:24:45.000Z
2021-09-15T20:05:15.000Z
plugins/module_utils/syntropy.py
SyntropyNet/syntropy-ansible-collection
9ec9020faf81bc1b9d46aef71de88397a2b5d625
[ "MIT" ]
null
null
null
plugins/module_utils/syntropy.py
SyntropyNet/syntropy-ansible-collection
9ec9020faf81bc1b9d46aef71de88397a2b5d625
[ "MIT" ]
1
2021-03-30T20:38:03.000Z
2021-03-30T20:38:03.000Z
# Copyright: (c) 2020, Syntropy Network # MIT License from __future__ import absolute_import, division, print_function __metaclass__ = type import os import traceback SDK_IMP_ERR = None try: from syntropy_sdk import ApiClient, ApiKeysApi, AuthApi, Configuration, PlatformApi from syntropy_sdk.exceptions import ApiException, SyntropyError from syntropy_sdk.models import AccessTokenData from syntropy_sdk.utils import ( MAX_QUERY_FIELD_SIZE, BatchedRequest, login_with_access_token, ) from syntropynac.configure import configure_network from syntropynac.exceptions import ConfigureNetworkError from syntropynac.fields import ConfigFields HAS_SDK = True except ImportError: HAS_SDK = False SDK_IMP_ERR = traceback.format_exc() class EnvVars: API_URL = "SYNTROPY_API_SERVER" TOKEN = "SYNTROPY_API_TOKEN" def get_api_client(api_url=None, api_key=None): config = Configuration() config.host = api_url if api_url else os.environ.get(EnvVars.API_URL) access_token = api_key if api_key else os.environ.get(EnvVars.TOKEN) config.api_key["Authorization"] = login_with_access_token(config.host, access_token) return ApiClient(config) def api_getter_builder(T): def get(api_url=None, api_key=None, client=None): return T(get_api_client(api_url, api_key)) if client is None else T(client) return get if HAS_SDK: get_auth_api = api_getter_builder(AuthApi) get_api_keys_api = api_getter_builder(ApiKeysApi) get_platform_api = api_getter_builder(PlatformApi)
29.277778
88
0.762176
215
1,581
5.27907
0.362791
0.037004
0.052863
0.05022
0.102203
0.035242
0
0
0
0
0
0.003058
0.172676
1,581
53
89
29.830189
0.864679
0.030993
0
0
0
0
0.032701
0
0
0
0
0
0
1
0.078947
false
0
0.289474
0.026316
0.526316
0.026316
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
e5318d57c5b94068601a78c0c8bed490f74a1be5
1,394
py
Python
custard/tests/settings.py
kunitoki/django-custard
3cf3aa5acf84de2f653e96469e2f9c42813df50a
[ "MIT" ]
6
2015-06-15T07:40:26.000Z
2016-06-27T08:01:34.000Z
custard/tests/settings.py
kunitoki/django-custard
3cf3aa5acf84de2f653e96469e2f9c42813df50a
[ "MIT" ]
3
2015-03-11T22:43:01.000Z
2015-06-07T21:50:36.000Z
custard/tests/settings.py
kunitoki/django-custard
3cf3aa5acf84de2f653e96469e2f9c42813df50a
[ "MIT" ]
6
2015-03-11T22:19:57.000Z
2021-03-10T15:40:52.000Z
# Django settings for testproject project. import os DIRNAME = os.path.dirname(__file__) DEBUG = True TEMPLATE_DEBUG = DEBUG DEBUG_PROPAGATE_EXCEPTIONS = True ADMINS = () MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(DIRNAME, 'db.sqlite3'), 'TEST_NAME': os.path.join(DIRNAME, 'test_db.sqlite3'), } } TIME_ZONE = 'Europe/Rome' LANGUAGE_CODE = 'en-us' SITE_ID = 1 USE_I18N = True USE_L10N = True MEDIA_ROOT = '' MEDIA_URL = '' SECRET_KEY = 'vaO4Y<g#YRWG8;Md8noiLp>.w(w~q_b=|1`?9<x>0KxA%UB!63' TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = 'custard.tests.urls' TEMPLATE_DIRS = () INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.admin', 'custard', 'custard.tests', ) TEST_RUNNER = 'django.test.runner.DiscoverRunner' STATIC_URL = '/static/'
22.483871
65
0.705165
161
1,394
5.944099
0.546584
0.122257
0.020899
0.029258
0.043887
0
0
0
0
0
0
0.013502
0.149928
1,394
61
66
22.852459
0.794093
0.028694
0
0
0
0.021277
0.504811
0.377498
0
0
0
0
0
1
0
false
0
0.021277
0
0.021277
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e531bfa00c295cacf19737b9ff6143ce8070896d
149
py
Python
Python/desafios/d12.py
prazerfelipe/Python
b1f4c768b1823d34898935bbd037ae43711c79e1
[ "MIT" ]
null
null
null
Python/desafios/d12.py
prazerfelipe/Python
b1f4c768b1823d34898935bbd037ae43711c79e1
[ "MIT" ]
null
null
null
Python/desafios/d12.py
prazerfelipe/Python
b1f4c768b1823d34898935bbd037ae43711c79e1
[ "MIT" ]
null
null
null
p = float(input('Digite o preço do produto')) d = p - (p* 5/100) print ('o preço do produto é {:.2f} e com 5% de desconto fica {:.2f}'.format(p,d))
29.8
82
0.610738
30
149
3.033333
0.666667
0.131868
0.175824
0.32967
0
0
0
0
0
0
0
0.057851
0.187919
149
5
82
29.8
0.694215
0
0
0
0
0
0.566667
0
0
0
0
0
0
1
0
false
0
0
0
0
0.333333
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
e531c9c1ac9eaf5cf3f53b6dbc300b0ff9a2f799
238
py
Python
app/modules/dht_sensor/temp_controller.py
bytecode-tech/my-tank
e37dc844fd2801b26710b461f64a6f938a5371db
[ "MIT" ]
1
2020-05-21T04:56:51.000Z
2020-05-21T04:56:51.000Z
app/modules/dht_sensor/temp_controller.py
kandiki/my-tank
e37dc844fd2801b26710b461f64a6f938a5371db
[ "MIT" ]
null
null
null
app/modules/dht_sensor/temp_controller.py
kandiki/my-tank
e37dc844fd2801b26710b461f64a6f938a5371db
[ "MIT" ]
1
2020-04-21T20:24:36.000Z
2020-04-21T20:24:36.000Z
from flask import (Blueprint, request) from . import temp temp_controller = Blueprint('temp-controller', __name__, url_prefix='/api/temp') @temp_controller.route('/', methods=["GET"]) def api_temp_control(): return temp.read_temp()
26.444444
80
0.739496
31
238
5.354839
0.580645
0.253012
0.216867
0
0
0
0
0
0
0
0
0
0.109244
238
9
81
26.444444
0.783019
0
0
0
0
0
0.117155
0
0
0
0
0
0
1
0.166667
false
0
0.333333
0.166667
0.666667
0.333333
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
6
e5320220c6e0bdc466d30a55af8b5a6073894184
2,753
py
Python
home/utils.py
ryankicks/collection-pipeline
2f4b6f154baba90aad39d490fd1dc170ba7ae4e4
[ "MIT" ]
null
null
null
home/utils.py
ryankicks/collection-pipeline
2f4b6f154baba90aad39d490fd1dc170ba7ae4e4
[ "MIT" ]
null
null
null
home/utils.py
ryankicks/collection-pipeline
2f4b6f154baba90aad39d490fd1dc170ba7ae4e4
[ "MIT" ]
null
null
null
from inspect import stack import logging from time import mktime import pytz from datetime import * from calendar import timegm # from django.http import HttpResponse, HttpResponseRedirect, HttpResponseRedirectBase from django.conf import settings from django.utils import timezone from social.apps.django_app.default.models import UserSocialAuth import twitter from twitter import * EPOCH = 1970 _EPOCH_ORD = date(EPOCH, 1, 1).toordinal() class Tz: # assumes a date, unless you pass date_format, and then assumes it needs to be parsed @staticmethod def convert_to_utc(naive, date_format=None, user_tz=None): if date_format: naive = datetime.strptime (naive, date_format) # if not specified, default to user context if not user_tz: user_tz = timezone.get_current_timezone() local_dt = user_tz.localize(naive, is_dst=None) utc_dt = local_dt.astimezone(pytz.utc) return utc_dt @staticmethod def convert_to_local(dt, user_tz=None): # if not specified, default to user context if not user_tz: user_tz = timezone.get_current_timezone() local_dt = dt.astimezone(user_tz) return local_dt class Logger(): @staticmethod def info(str): LOGGER.info(str) @staticmethod def exception(str): LOGGER.exception(str) class Twitter: @staticmethod def get_twitter(user): from django.conf import settings consumer_key = settings.SOCIAL_AUTH_TWITTER_KEY consumer_secret = settings.SOCIAL_AUTH_TWITTER_SECRET access_token_key = settings.TWITTER_ACCESS_TOKEN access_token_secret = settings.TWITTER_ACCESS_TOKEN_SECRET usa = UserSocialAuth.objects.get(user=user, provider='twitter') if usa: access_token = usa.extra_data['access_token'] if access_token: access_token_key = access_token['oauth_token'] access_token_secret = access_token['oauth_token_secret'] if not access_token_key or not access_token_secret: raise Exception('No user for twitter API call') api = twitter.Api( base_url='https://api.twitter.com/1.1', consumer_key=consumer_key, consumer_secret=consumer_secret, access_token_key=access_token_key, access_token_secret=access_token_secret) return api @staticmethod def get_access_tokens(user): usa = UserSocialAuth.objects.get(user=user, provider='twitter') access_token = usa.extra_data['access_token'] return access_token
28.677083
89
0.661097
336
2,753
5.166667
0.285714
0.126728
0.058756
0.034562
0.288594
0.193548
0.193548
0.154378
0.096774
0.096774
0
0.004
0.27352
2,753
95
90
28.978947
0.864
0.091537
0
0.25
0
0
0.048878
0
0
0
0
0
0
1
0.09375
false
0
0.1875
0
0.390625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e53252e0bf00080e9f60852ab7af61c8c73fe549
35
py
Python
modules/2.79/bpy/types/ShaderNodeGroup.py
cmbasnett/fake-bpy-module
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
[ "MIT" ]
null
null
null
modules/2.79/bpy/types/ShaderNodeGroup.py
cmbasnett/fake-bpy-module
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
[ "MIT" ]
null
null
null
modules/2.79/bpy/types/ShaderNodeGroup.py
cmbasnett/fake-bpy-module
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
[ "MIT" ]
null
null
null
ShaderNodeGroup.interface = None
8.75
32
0.8
3
35
9.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.142857
35
3
33
11.666667
0.933333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
4
e533417d7abdd0f090ec7b783eb6b6201bbec4a5
360
py
Python
pypresence/__init__.py
GhostofGoes/pypresence
902f406345213fed4acd10d567f76abc64df5ace
[ "MIT" ]
null
null
null
pypresence/__init__.py
GhostofGoes/pypresence
902f406345213fed4acd10d567f76abc64df5ace
[ "MIT" ]
null
null
null
pypresence/__init__.py
GhostofGoes/pypresence
902f406345213fed4acd10d567f76abc64df5ace
[ "MIT" ]
null
null
null
""" Python RPC Client for Discord ----------------------------- By: qwertyquerty and LewdNeko """ from .baseclient import BaseClient from .client import Client from .presence import Presence from .exceptions import * __title__ = 'pypresence' __author__ = 'qwertyquerty' __copyright__ = 'Copyright 2018 qwertyquerty' __license__ = 'MIT' __version__ = '1.0.9'
21.176471
45
0.708333
38
360
6.184211
0.657895
0
0
0
0
0
0
0
0
0
0
0.022364
0.130556
360
16
46
22.5
0.728435
0.247222
0
0
0
0
0.21673
0
0
0
0
0
0
1
0
false
0
0.444444
0
0.444444
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
e533e0071445be02c8ad1c3692ec2dd70b4b8806
2,193
py
Python
test/python/transpiler/test_preset_passmanagers.py
chowington/qiskit-terra
a782c64c736fedd6a541bb45dbf89737a52b7c39
[ "Apache-2.0" ]
null
null
null
test/python/transpiler/test_preset_passmanagers.py
chowington/qiskit-terra
a782c64c736fedd6a541bb45dbf89737a52b7c39
[ "Apache-2.0" ]
null
null
null
test/python/transpiler/test_preset_passmanagers.py
chowington/qiskit-terra
a782c64c736fedd6a541bb45dbf89737a52b7c39
[ "Apache-2.0" ]
1
2019-06-13T08:07:26.000Z
2019-06-13T08:07:26.000Z
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2017, 2019. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Tests preset pass manager functionalities""" from qiskit.test import QiskitTestCase from qiskit.compiler import transpile from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister from qiskit.test.mock import FakeTenerife, FakeMelbourne, FakeRueschlikon, FakeTokyo class TestPresetPassManager(QiskitTestCase): """Test preset passmanagers work as expected.""" def test_no_coupling_map(self): """Test that coupling_map can be None""" q = QuantumRegister(2, name='q') test = QuantumCircuit(q) test.cz(q[0], q[1]) for level in [0, 1, 2, 3]: with self.subTest(level=level): test2 = transpile(test, basis_gates=['u1', 'u2', 'u3', 'cx'], optimization_level=level) self.assertIsInstance(test2, QuantumCircuit) class TestFakeBackendTranspiling(QiskitTestCase): """Test transpiling on mock backends work properly""" def setUp(self): q = QuantumRegister(2) c = ClassicalRegister(2) self._circuit = QuantumCircuit(q, c) self._circuit.h(q[0]) self._circuit.cx(q[0], q[1]) self._circuit.measure(q, c) def test_optimization_level(self): """Test several backends with all optimization levels""" for backend in [FakeTenerife(), FakeMelbourne(), FakeRueschlikon(), FakeTokyo()]: for optimization_level in range(4): result = transpile( [self._circuit], backend=backend, optimization_level=optimization_level ) self.assertIsInstance(result, QuantumCircuit)
35.95082
89
0.653443
260
2,193
5.45
0.488462
0.059986
0.014114
0.06916
0
0
0
0
0
0
0
0.018856
0.250342
2,193
60
90
36.55
0.843066
0.320109
0
0
0
0
0.00619
0
0
0
0
0
0.064516
1
0.096774
false
0.032258
0.129032
0
0.290323
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e5342b8791c68216bf30896c7274b41364db27db
4,291
py
Python
scripts/sptk/visualize_spectrogram.py
funcwj/kaldi_enhan
50e4da07c4e7fce7439da9be2b0bb1a0079491c3
[ "Apache-2.0" ]
35
2018-04-02T06:09:26.000Z
2019-02-19T08:27:10.000Z
scripts/sptk/visualize_spectrogram.py
funcwj/kaldi_enhan
50e4da07c4e7fce7439da9be2b0bb1a0079491c3
[ "Apache-2.0" ]
3
2018-11-08T10:21:34.000Z
2019-01-24T02:49:47.000Z
scripts/sptk/visualize_spectrogram.py
funcwj/kaldi_enhan
50e4da07c4e7fce7439da9be2b0bb1a0079491c3
[ "Apache-2.0" ]
17
2018-03-08T06:59:31.000Z
2019-02-19T08:27:41.000Z
#!/usr/bin/env python # coding=utf-8 # wujian@2020 import argparse from pathlib import Path import matplotlib.pyplot as plt import numpy as np from libs.data_handler import SpectrogramReader from libs.opts import StftParser from libs.utils import get_logger default_font = "Times New Roman" default_font_size = 10 default_dpi = 200 default_fmt = "jpg" logger = get_logger(__name__) def save_figure(key, mat, dest, cmap="jet", hop=256, sr=16000, title=""): """ Save figure to disk """ def sub_plot(ax, mat, num_frames, num_bins, xticks=True, title=""): ax.imshow(np.transpose(mat), origin="lower", cmap=cmap, aspect="auto", interpolation="none") if xticks: xp = np.linspace(0, num_frames - 1, 5) ax.set_xticks(xp) ax.set_xticklabels([f"{t:.2f}" for t in (xp * hop / sr)], fontproperties=default_font) ax.set_xlabel("Time (s)", fontdict={"family": default_font}) else: ax.set_xticks([]) yp = np.linspace(0, num_bins - 1, 6) fs = np.linspace(0, sr / 2, 6) / 1000 ax.set_yticks(yp) ax.set_yticklabels([f"{t:.1f}" for t in fs], fontproperties=default_font) ax.set_ylabel("Frequency (kHz)", fontdict={"family": default_font}) if title: ax.set_title(title, fontdict={"family": default_font}) logger.info(f"Plot TF-mask of utterance {key} to {dest}.{default_fmt}...") if mat.ndim == 3: N, T, F = mat.shape else: T, F = mat.shape N = 1 fig, ax = plt.subplots(nrows=N) if N != 1: ts = title.split(";") for i in range(N): if len(ts) == N: sub_plot(ax[i], mat[i], T, F, xticks=i == N - 1, title=ts[i]) else: sub_plot(ax[i], mat[i], T, F, xticks=i == N - 1) else: sub_plot(ax, mat, T, F, title=title) fig.savefig(f"{dest}.{default_fmt}", dpi=default_dpi, format=default_fmt) plt.close(fig) def run(args): cache_dir = Path(args.cache_dir) cache_dir.mkdir(parents=True, exist_ok=True) stft_kwargs = { "frame_len": args.frame_len, "frame_hop": args.frame_hop, "round_power_of_two": args.round_power_of_two, "window": args.window, "center": args.center # false to comparable with kaldi } reader = SpectrogramReader(args.wav_scp, **stft_kwargs, apply_abs=True, apply_log=True, transpose=True) for key, mat in reader: if mat.ndim == 3 and args.index >= 0: mat = mat[args.index] save_figure(key, mat, cache_dir / key.replace(".", "-"), cmap=args.cmap, hop=args.frame_hop, sr=args.sr, title=args.title) if __name__ == "__main__": parser = argparse.ArgumentParser( description="Command to visualize audio spectrogram.", formatter_class=argparse.ArgumentDefaultsHelpFormatter, parents=[StftParser.parser]) parser.add_argument("wav_scp", type=str, help="Read specifier of audio") parser.add_argument("--sr", type=int, default=16000, help="Sample frequency (Hz)") parser.add_argument("--cache-dir", type=str, default="spectrogram", help="Directory to dump spectrograms") parser.add_argument("--cmap", choices=["binary", "jet", "hot"], default="jet", help="Colormap used when save figures") parser.add_argument("--index", type=int, default=-1, help="Channel index to plot, -1 means all") parser.add_argument("--title", type=str, default="", help="Title of the pictures") args = parser.parse_args() run(args)
33.523438
78
0.519226
507
4,291
4.242604
0.372781
0.018596
0.04742
0.034868
0.051139
0.023245
0.023245
0.023245
0.023245
0.023245
0
0.017078
0.358658
4,291
127
79
33.787402
0.764535
0.022372
0
0.093458
0
0
0.118295
0.005508
0
0
0
0
0
1
0.028037
false
0
0.065421
0
0.093458
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e535e6dd0129597b74bb4ecff114f37663cccbbf
9,246
py
Python
pyocni/adapters/httpResponse_Formater.py
MarouenMechtri/CNG-Manager
9535b721e7b832d72fd7bba6d2a29e76a0d4bdb7
[ "Apache-2.0" ]
1
2015-02-28T21:26:07.000Z
2015-02-28T21:26:07.000Z
pyocni/adapters/httpResponse_Formater.py
MarouenMechtri/CNG-Manager
9535b721e7b832d72fd7bba6d2a29e76a0d4bdb7
[ "Apache-2.0" ]
null
null
null
pyocni/adapters/httpResponse_Formater.py
MarouenMechtri/CNG-Manager
9535b721e7b832d72fd7bba6d2a29e76a0d4bdb7
[ "Apache-2.0" ]
null
null
null
# Copyright 2010-2012 Institut Mines-Telecom # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Created on Jun 21, 2012 @author: Bilel Msekni @contact: bilel.msekni@telecom-sudparis.eu @author: Houssem Medhioub @contact: houssem.medhioub@it-sudparis.eu @organization: Institut Mines-Telecom - Telecom SudParis @license: Apache License, Version 2.0 """ try: import simplejson as json except ImportError: import json import pyocni.adapters.cnv_toHTTP as extractor from webob import Response class To_HTTP_Text_Plain(): """ Converts Response data from application/occi+json object to HTTP text/plain descriptions """ def format_to_text_plain_categories(self, var): """ Format JSON categories into HTTP text/plain categories Args: @param var: JSON categories """ resp = "" if var.has_key('kinds'): items = var['kinds'] for item in items: resp += "Category :" + cnv_JSON_category(item, "kind") + "\n" if var.has_key('mixins'): items = var['mixins'] for item in items: resp += "Category :" + cnv_JSON_category(item, "mixin") + "\n" if var.has_key('actions'): items = var['actions'] for item in items: resp += "Category :" + cnv_JSON_category(item, "action") + "\n" return resp def format_to_text_plain_entities(self, var): """ Convert a JSON resource description into a text/plain resource description Args: @param var: JSON resource description """ response = "" if var.has_key('resources'): items = var['resources'] for item in items: cat, link, att = cnv_JSON_Resource(item) for c in cat: response += "Category: " + c + "\n" for l in link: response += "Link: " + l + "\n" for a in att: response += "X-OCCI-Attribute: " + a + "\n" response = response[:-1] + ",\n" response = response[:-2] if var.has_key('links'): items = var['links'] response += ",\n" for item in items: cat, link, att = cnv_JSON_Resource(item) for c in cat: response += "Category: " + c + "\n" for l in link: response += "Link: " + l + "\n" for a in att: response += "X-OCCI-Attribute: " + a + "\n" response = response[:-1] + ",\n" response = response[:-2] return response def format_to_text_plain_locations(self, var): """ Converts JSON locations into HTTP locations Args: var: JSON locations """ locs = "" for item in var: locs += "Location: " + item + "\n" return locs def format_to_text_plain_x_locations(self, var): """ Converts JSON locations into HTTP locations Args: var: JSON locations """ locs = "" for item in var: locs += "X-OCCI-Location: " + item + "\n" return locs class To_HTTP_Text_OCCI(): """ Converts Response data from application/occi+json object to HTTP text/occi descriptions """ def format_to_text_occi_categories(self, var): """ Format JSON categories into HTTP text/plain categories Args: @param var: JSON categories """ resp = Response() resp.headers.clear() value = "" if var.has_key('kinds'): items = var['kinds'] for item in items: value = cnv_JSON_category(item, "kind") + ",\n" resp.headers.add('Category', value[:-2]) if var.has_key('mixins'): items = var['mixins'] for item in items: value = cnv_JSON_category(item, "mixin") + ",\n" resp.headers.add('Category', value[:-2]) if var.has_key('actions'): items = var['actions'] for item in items: value = cnv_JSON_category(item, "action") + ",\n" resp.headers.add('Category', value[:-2]) return resp.headers def format_to_text_occi_entities(self, var): """ Convert a JSON resource description into a text/occi resource description Args: @param var: JSON resource description """ response = Response() response.headers.clear() if var.has_key('resources'): items = var['resources'] for item in items: cat, link, att = cnv_JSON_Resource(item) for c in cat: response.headers.add("Category", c) for l in link: response.headers.add("Link", l) for a in att: response.headers.add("X-OCCI-Attribute", a) if var.has_key('links'): items = var['links'] for item in items: cat, link, att = cnv_JSON_Resource(item) for c in cat: response.headers.add("Category", c) for l in link: response.headers.add("Link", l) for a in att: response.headers.add("X-OCCI-Attribute", a) return response.headers def format_to_text_occi_locations(self, var): """ Converts JSON locations into HTTP locations Args: var: JSON locations """ locs = "" resp = Response() resp.headers.clear() for item in var: locs += item + "," resp.headers.add("Location", locs[:-1]) return resp.headers def format_to_text_x_occi_locations(self, var): """ Converts JSON locations into HTTP locations Args: var: JSON locations """ locs = "" resp = Response() resp.headers.clear() for item in var: locs += item + "," resp.headers.add("X-OCCI-Location", locs[:-1]) return resp.headers class To_HTTP_Text_URI_List(): """ Converts Response data from application/occi+json object to HTTP text/uri descriptions """ def __init__(self): pass def check_for_uri_locations(self, var): """ Checks for the existence of path URIs in a JSON location object Args: @param var: JSON location object """ resp = "" for item in var: resp += item + "\n" return resp, True def cnv_JSON_category(category, type): """ Converts a json category into a HTTP category Args: @param category: JSON category @param type: Category type = (kind || mixin || action) """ http_cat = extractor.extract_term_from_category(category) + ';' http_cat += "scheme=\"" + extractor.extract_scheme_from_category(category) + "\";" http_cat += "class=\"" + type + "\";" title = extractor.extract_title_from_category(category) if title is not None: http_cat += "title=\"" + title + "\";" rel = extractor.extract_related_from_category(category) if rel is not None: http_cat += "rel=\"" + rel + "\";" attributes = extractor.extract_attributes_from_category(category) if attributes is not None: http_cat += "attributes=\"" + attributes + "\";" actions = extractor.extract_actions_from_category(category) if actions is not None: http_cat += "actions=\"" + actions + "\";" location = extractor.extract_location_from_category(category) if location is not None: http_cat += "location=\"" + location + "\";" return http_cat def cnv_JSON_Resource(json_object): """ Converts a JSON Resource into a HTTP Resource """ res_cat = list() res_links = list() res_cat.append(extractor.extract_kind_from_entity(json_object)) items = extractor.extract_mixin_from_entity(json_object) if items is not None: res_cat.extend(items) var = extractor.extract_attributes_from_entity(json_object) if var is not None: res_att = var else: res_att = list() items = extractor.extract_internal_link_from_entity(json_object) if items is not None: res_links.extend(items) items = extractor.extract_actions_from_entity(json_object) if items is not None: res_links.extend(items) return res_cat, res_links, res_att
29.259494
92
0.561865
1,075
9,246
4.693953
0.156279
0.020809
0.026754
0.021799
0.608205
0.530321
0.510107
0.491677
0.480182
0.44134
0
0.004698
0.33236
9,246
315
93
29.352381
0.812733
0.228856
0
0.559524
0
0
0.093403
0.007196
0
0
0
0
0
1
0.071429
false
0.005952
0.029762
0
0.184524
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e536c28016db6252f76243560211157a7504c1af
3,352
py
Python
restapi/endpoints/profile.py
rapydo/http-api
ef0a299173195145303069534d45d446ea4da93a
[ "MIT" ]
8
2018-07-04T09:54:46.000Z
2022-03-17T08:21:06.000Z
restapi/endpoints/profile.py
rapydo/http-api
ef0a299173195145303069534d45d446ea4da93a
[ "MIT" ]
19
2018-04-18T07:24:55.000Z
2022-03-04T01:03:15.000Z
restapi/endpoints/profile.py
rapydo/http-api
ef0a299173195145303069534d45d446ea4da93a
[ "MIT" ]
7
2018-07-03T12:17:50.000Z
2021-05-05T04:33:32.000Z
from typing import Any, Optional from restapi import decorators from restapi.endpoints.schemas import NewPassword, profile_output, profile_patch_input from restapi.exceptions import Unauthorized from restapi.rest.definition import EndpointResource, Response from restapi.services.authentication import AuthMissingTOTP, User from restapi.utilities.globals import mem from restapi.utilities.logs import log class Profile(EndpointResource): depends_on = ["MAIN_LOGIN_ENABLE", "AUTH_ENABLE"] labels = ["profile"] @decorators.auth.require() @decorators.marshal_with(profile_output(), code=200) @decorators.endpoint( path="/auth/profile", summary="List profile attributes", responses={200: "User profile is returned"}, ) def get(self, user: User) -> Response: data = { "uuid": user.uuid, "email": user.email, "name": user.name, "surname": user.surname, "isAdmin": self.auth.is_admin(user), "isStaff": self.auth.is_staff(user), "isCoordinator": self.auth.is_coordinator(user), "privacy_accepted": user.privacy_accepted, "last_password_change": user.last_password_change, "first_login": user.first_login, "last_login": user.last_login, "is_active": user.is_active, "expiration": user.expiration, "belongs_to": user.belongs_to, # Convert list of Roles into a dict with name: description "roles": {role.name: role.description for role in user.roles}, "two_factor_enabled": self.auth.SECOND_FACTOR_AUTHENTICATION, } data = mem.customizer.manipulate_profile(ref=self, user=user, data=data) return self.response(data) @decorators.auth.require() @decorators.use_kwargs(NewPassword) @decorators.endpoint( path="/auth/profile", summary="Update user password", responses={204: "Password updated"}, ) def put( self, password: str, new_password: str, password_confirm: str, user: User, totp_code: Optional[str] = None, ) -> Response: try: self.auth.make_login(user.email, password, totp_code) except AuthMissingTOTP: raise Unauthorized("Verification code is missing") self.auth.change_password(user, password, new_password, password_confirm) self.auth.save_user(user) return self.empty_response() @decorators.auth.require() @decorators.use_kwargs(profile_patch_input()) @decorators.endpoint( path="/auth/profile", summary="Update profile information", responses={204: "Profile updated"}, ) def patch(self, user: User, **kwargs: Any) -> Response: """Update profile for current user""" # mypy correctly raises errors because update_properties is not defined # in generic Connector instances, but in this case this is an instance # of an auth db and their implementation always contains this method self.auth.db.update_properties(user, kwargs) # type: ignore log.info("Profile updated") self.auth.save_user(user) self.log_event(self.events.modify, user, kwargs) return self.empty_response()
34.204082
86
0.651253
377
3,352
5.657825
0.376658
0.033755
0.029536
0.043601
0.118143
0.099391
0.043132
0
0
0
0
0.004752
0.246718
3,352
97
87
34.556701
0.84
0.091885
0
0.175676
0
0
0.13085
0
0
0
0
0
0
1
0.040541
false
0.135135
0.108108
0
0.22973
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
e5371254ed22fa060db8dd66fc1b046f1463cc0b
928
py
Python
tests/sample/comprehension.py
JarryShaw/walrus
cb748496974c4c8b1a68907d46eff2845db75e84
[ "MIT" ]
4
2020-06-01T17:38:15.000Z
2022-03-26T12:01:16.000Z
tests/sample/comprehension.py
pybpc/walrus
cb748496974c4c8b1a68907d46eff2845db75e84
[ "MIT" ]
23
2020-05-31T09:24:49.000Z
2021-02-22T19:29:23.000Z
tests/sample/comprehension.py
pybpc/walrus
cb748496974c4c8b1a68907d46eff2845db75e84
[ "MIT" ]
1
2019-11-09T00:34:37.000Z
2019-11-09T00:34:37.000Z
comp1 = [x1 := x * x for x in range(10)] print(comp1) print(x1) print(globals().get('x')) def f2(): comp2 = [x2 := x ** 3 for x in range(9)] print(comp2) print(x2) print(locals().get('x')) def f3(): global x3 comp3 = [x3 := x ** 4 for x in range(8)] print(comp3) print(locals().get('x')) def f4(): x4 = 0 def g4(): nonlocal x4 comp4 = [x4 := x ** 5 for x in range(7)] print(comp4) print(locals().get('x')) g4() print(x4) def f5(): comp5 = [[x5 := i for i in range(3)] for j in range(2)] print(comp5) print(x5) print(locals().get('i')) print(locals().get('j')) f2() print(globals().get('x')) print(globals().get('x2')) f3() print(globals().get('x')) print(globals().get('x3')) f4() print(globals().get('x')) print(globals().get('x4')) f5() print(globals().get('i')) print(globals().get('j')) print(globals().get('x5'))
17.509434
59
0.528017
148
928
3.310811
0.25
0.244898
0.306122
0.089796
0.263265
0.189796
0.189796
0
0
0
0
0.065734
0.229526
928
52
60
17.846154
0.61958
0
0
0.166667
0
0
0.020474
0
0
0
0
0
0
1
0.119048
false
0
0
0
0.119048
0.571429
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
3
e5377b6da443630c0d016aff8eb0fc9c6d8663e8
3,358
py
Python
venv/lib/python3.6/site-packages/ansible_collections/community/hashi_vault/tests/unit/plugins/module_utils/authentication/test_auth_approle.py
usegalaxy-no/usegalaxy
75dad095769fe918eb39677f2c887e681a747f3a
[ "MIT" ]
1
2020-01-22T13:11:23.000Z
2020-01-22T13:11:23.000Z
venv/lib/python3.6/site-packages/ansible_collections/community/hashi_vault/tests/unit/plugins/module_utils/authentication/test_auth_approle.py
usegalaxy-no/usegalaxy
75dad095769fe918eb39677f2c887e681a747f3a
[ "MIT" ]
12
2020-02-21T07:24:52.000Z
2020-04-14T09:54:32.000Z
venv/lib/python3.6/site-packages/ansible_collections/community/hashi_vault/tests/unit/plugins/module_utils/authentication/test_auth_approle.py
usegalaxy-no/usegalaxy
75dad095769fe918eb39677f2c887e681a747f3a
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright (c) 2021 Brian Scholer (@briantist) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import pytest from ansible_collections.community.hashi_vault.tests.unit.compat import mock from ansible_collections.community.hashi_vault.plugins.module_utils._auth_method_approle import ( HashiVaultAuthMethodApprole, ) from ansible_collections.community.hashi_vault.plugins.module_utils._hashi_vault_common import ( HashiVaultAuthMethodBase, HashiVaultValueError, ) @pytest.fixture def option_dict(): return { 'auth_method': 'approle', 'secret_id': None, 'role_id': None, 'mount_point': None, } @pytest.fixture def secret_id(): return 'opaque' @pytest.fixture def role_id(): return 'fake-role' @pytest.fixture def auth_approle(adapter, warner): return HashiVaultAuthMethodApprole(adapter, warner) @pytest.fixture def approle_login_response(fixture_loader): return fixture_loader('approle_login_response.json') class TestAuthApprole(object): def test_auth_approle_is_auth_method_base(self, auth_approle): assert isinstance(auth_approle, HashiVaultAuthMethodApprole) assert issubclass(HashiVaultAuthMethodApprole, HashiVaultAuthMethodBase) def test_auth_approle_validate_direct(self, auth_approle, adapter, role_id): adapter.set_option('role_id', role_id) auth_approle.validate() @pytest.mark.parametrize('opt_patch', [ {}, {'secret_id': 'secret_id-only'}, ]) def test_auth_approle_validate_xfailures(self, auth_approle, adapter, opt_patch): adapter.set_options(**opt_patch) with pytest.raises(HashiVaultValueError, match=r'Authentication method approle requires options .*? to be set, but these are missing:'): auth_approle.validate() @pytest.mark.parametrize('use_token', [True, False], ids=lambda x: 'use_token=%s' % x) @pytest.mark.parametrize('mount_point', [None, 'other'], ids=lambda x: 'mount_point=%s' % x) def test_auth_approle_authenticate(self, auth_approle, client, adapter, secret_id, role_id, mount_point, use_token, approle_login_response): adapter.set_option('secret_id', secret_id) adapter.set_option('role_id', role_id) adapter.set_option('mount_point', mount_point) expected_login_params = { 'secret_id': secret_id, 'role_id': role_id, 'use_token': use_token, } if mount_point: expected_login_params['mount_point'] = mount_point def _set_client_token(*args, **kwargs): if kwargs['use_token']: client.token = approle_login_response['auth']['client_token'] return approle_login_response with mock.patch.object(client.auth.approle, 'login', side_effect=_set_client_token) as approle_login: response = auth_approle.authenticate(client, use_token=use_token) approle_login.assert_called_once_with(**expected_login_params) assert response['auth']['client_token'] == approle_login_response['auth']['client_token'] assert (client.token == approle_login_response['auth']['client_token']) is use_token
33.919192
144
0.712627
405
3,358
5.575309
0.311111
0.068202
0.070859
0.031887
0.251107
0.193534
0.139947
0.139947
0.052259
0
0
0.00327
0.180465
3,358
98
145
34.265306
0.817224
0.047052
0
0.132353
0
0
0.125782
0.008448
0
0
0
0
0.073529
1
0.147059
false
0
0.073529
0.073529
0.323529
0.014706
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e53797255ddf8c7e28dc3a9ceca68b9b6f0d9c22
323
py
Python
main/amy/setup.py
drepetto/alles
bb72c36f1b07f539e73726a91c9538482b4c06cb
[ "MIT" ]
81
2020-03-01T22:18:48.000Z
2022-03-20T16:53:01.000Z
main/amy/setup.py
bwhitman/synthserver
98f824fd5c2056823727884a3bdf8c8a6ff7ced1
[ "MIT" ]
12
2021-04-22T01:21:25.000Z
2021-12-11T18:29:18.000Z
main/amy/setup.py
bwhitman/synthserver
98f824fd5c2056823727884a3bdf8c8a6ff7ced1
[ "MIT" ]
8
2020-04-29T18:26:25.000Z
2021-12-07T22:55:00.000Z
from distutils.core import setup, Extension import glob import os # the c++ extension module sources = glob.glob("*.c") os.environ["CC"] = "gcc" os.environ["CXX"] = "g++" extension_mod = Extension("libamy", sources=sources, extra_link_args=["-lsoundio", "-lpthread"]) setup(name = "libamy", ext_modules=[extension_mod])
26.916667
96
0.708978
44
323
5.090909
0.613636
0.080357
0
0
0
0
0
0
0
0
0
0
0.111455
323
12
97
26.916667
0.780488
0.074303
0
0
0
0
0.147651
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
e53b4e82439a5be972e3455d4b58df9c2ff3541c
826
py
Python
app/model/job_keyword.py
awesome-archive/webspider
072e9944db8fe05cbb47f8ea6d1a327c2a8929b1
[ "MIT" ]
null
null
null
app/model/job_keyword.py
awesome-archive/webspider
072e9944db8fe05cbb47f8ea6d1a327c2a8929b1
[ "MIT" ]
null
null
null
app/model/job_keyword.py
awesome-archive/webspider
072e9944db8fe05cbb47f8ea6d1a327c2a8929b1
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from sqlalchemy import Column from sqlalchemy.dialects.mysql import INTEGER from common.db import BaseModel class JobKeywordModel(BaseModel): __tablename__ = 'job_keyword' id = Column(INTEGER, primary_key=True, autoincrement=True) job_id = Column(INTEGER, doc=u'工作 id') keyword_id = Column(INTEGER, doc=u'关键词 id') city_id = Column(INTEGER, doc=u'冗余:所在城市 id') @classmethod def list(cls, job_id=None): query = cls.session.query(cls) if job_id: query = query.filter(cls.job_id == job_id) return query.all() @classmethod def add(cls, job_id, keyword_id, city_id): job_keyword = cls(job_id=int(job_id), keyword_id=int(keyword_id), city_id=int(city_id)) cls.session.merge(job_keyword) cls.session.commit()
29.5
95
0.671913
119
826
4.462185
0.378151
0.07533
0.112994
0.101695
0.107345
0
0
0
0
0
0
0.001531
0.209443
826
27
96
30.592593
0.811639
0.025424
0
0.1
0
0
0.039851
0
0
0
0
0
0
1
0.1
false
0
0.15
0
0.6
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
e53b8de7cefb3da0c77b80958c4124a1178847f8
905
py
Python
ST_DM/KDD2022-DuMapper/DME/arch/utils/ll_2_mc.py
zhangyimi/Research
866f91d9774a38d205d6e9a3b1ee6293748261b3
[ "Apache-2.0" ]
1
2022-03-18T08:32:37.000Z
2022-03-18T08:32:37.000Z
ST_DM/KDD2022-DuMapper/DME/arch/utils/ll_2_mc.py
green9989/Research
94519a72e7936c77f62a31709634b72c09aabf74
[ "Apache-2.0" ]
null
null
null
ST_DM/KDD2022-DuMapper/DME/arch/utils/ll_2_mc.py
green9989/Research
94519a72e7936c77f62a31709634b72c09aabf74
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # coding=utf-8 """ Copyright (c) 2020 Baidu.com, Inc. All Rights Reserved File: ll_2_mc.py func: 墨卡托与经纬度间相互转换 Author: yuwei09(yuwei09@baidu.com) Date: 2021/07/21 """ import math SCALE_S = 20037508.34 def lonLat2Mercator(x, y): """Convert longitude/latitude to Mercator coordinate""" mx = x * SCALE_S / 180. my = math.log(math.tan((90. + y) * math.pi / 360.)) / (math.pi / 180.) my = y * SCALE_S / 180. return mx, my def Mercator2LonLat(x, y): """Convert Mercotor point to longitude/latitude cooridinat""" lx = x / SCALE_S * 180. ly = y / SCALE_S * 180. ly = 180 / math.pi * (2 * math.atan(math.exp(ly * math.pi / 180.)) - math.pi / 2) return lx, ly if __name__ == '__main__': x, y = 12962922.3800, 4832335.0200 lx, ly = Mercator2LonLat(x, y) print(lx, ly) # lx, ly = bd09mc_to_bd09ll(x, y) # print(lx, ly)
23.205128
85
0.612155
141
905
3.808511
0.510638
0.055866
0.067039
0.037244
0.040968
0
0
0
0
0
0
0.123741
0.232044
905
38
86
23.815789
0.648921
0.362431
0
0
0
0
0.014388
0
0
0
0
0
0
1
0.125
false
0
0.0625
0
0.3125
0.0625
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e541c66e26b2dcd462e9c9a22b50ce0b746cca85
7,028
py
Python
process_azure_roles.py
noamsdahan/iam-dataset
da640ba65906f3f6091c6cbfdfdc0ca03df83f8f
[ "MIT" ]
58
2021-06-23T07:12:19.000Z
2022-03-26T14:55:00.000Z
process_azure_roles.py
noamsdahan/iam-dataset
da640ba65906f3f6091c6cbfdfdc0ca03df83f8f
[ "MIT" ]
8
2021-11-01T15:41:19.000Z
2022-02-08T08:04:05.000Z
process_azure_roles.py
noamsdahan/iam-dataset
da640ba65906f3f6091c6cbfdfdc0ca03df83f8f
[ "MIT" ]
4
2021-07-31T03:13:12.000Z
2022-03-22T08:28:08.000Z
import os import json import time import requests import re result = { 'roles': [] } raw_roles = [] with open("azure/built-in-roles-raw.json", "r") as f: raw_roles = json.loads(f.read()) provider_ops = [] with open("azure/provider-operations.json", "r") as f: provider_ops = json.loads(f.read()) for raw_role in raw_roles: if raw_role['roleType'] != "BuiltInRole": continue permitted_actions = [] permitted_data_actions = [] has_unknown = False has_external = False for permission in raw_role['permissions']: for action in permission['actions']: matched = False matchexpression = "^" + action.replace(".", "\\.").replace("*", ".*").replace("?", ".{{1}}") + "$" for provider in provider_ops: for operation in provider['operations']: if not operation['isDataAction'] and re.search(matchexpression.lower(), operation['name'].lower()): permitted_actions.append({ 'name': operation['name'], 'description': operation['description'], 'displayName': operation['displayName'], 'providerName': provider['name'], 'providerDisplayName': provider['displayName'] }) matched = True for resource_type in provider['resourceTypes']: for operation in resource_type['operations']: if not operation['isDataAction'] and re.search(matchexpression.lower(), operation['name'].lower()): permitted_actions.append({ 'name': operation['name'], 'description': operation['description'], 'displayName': operation['displayName'], 'providerName': provider['name'], 'providerDisplayName': provider['displayName'] }) matched = True if not action.lower().startswith("microsoft."): has_external = True if not matched: has_unknown = True for permission in raw_role['permissions']: for action in permission['dataActions']: matched = False matchexpression = "^" + action.replace(".", "\\.").replace("*", ".*").replace("?", ".{{1}}") + "$" for provider in provider_ops: for operation in provider['operations']: if operation['isDataAction'] and re.search(matchexpression.lower(), operation['name'].lower()): permitted_data_actions.append({ 'name': operation['name'], 'description': operation['description'], 'displayName': operation['displayName'], 'providerName': provider['name'], 'providerDisplayName': provider['displayName'] }) matched = True for resource_type in provider['resourceTypes']: for operation in resource_type['operations']: if operation['isDataAction'] and re.search(matchexpression.lower(), operation['name'].lower()): permitted_data_actions.append({ 'name': operation['name'], 'description': operation['description'], 'displayName': operation['displayName'], 'providerName': provider['name'], 'providerDisplayName': provider['displayName'] }) matched = True if not action.lower().startswith("microsoft."): has_external = True if not matched: has_unknown = True for permission in raw_role['permissions']: for action in permission['notActions']: matched = False matchexpression = "^" + action.replace(".", "\\.").replace("*", ".*").replace("?", ".{{1}}") + "$" for provider in provider_ops: for operation in provider['operations']: if not operation['isDataAction'] and re.search(matchexpression.lower(), operation['name'].lower()): permitted_actions = list(filter(lambda x: x['name'].lower() != operation['name'].lower(), permitted_actions)) matched = True for resource_type in provider['resourceTypes']: for operation in resource_type['operations']: if not operation['isDataAction'] and re.search(matchexpression.lower(), operation['name'].lower()): permitted_actions = list(filter(lambda x: x['name'].lower() != operation['name'].lower(), permitted_actions)) matched = True if not action.lower().startswith("microsoft."): has_external = True if not matched: has_unknown = True for permission in raw_role['permissions']: for action in permission['notDataActions']: matched = False matchexpression = "^" + action.replace(".", "\\.").replace("*", ".*").replace("?", ".{{1}}") + "$" for provider in provider_ops: for operation in provider['operations']: if operation['isDataAction'] and re.search(matchexpression.lower(), operation['name'].lower()): permitted_data_actions = list(filter(lambda x: x['name'].lower() != operation['name'].lower(), permitted_data_actions)) matched = True for resource_type in provider['resourceTypes']: for operation in resource_type['operations']: if operation['isDataAction'] and re.search(matchexpression.lower(), operation['name'].lower()): permitted_data_actions = list(filter(lambda x: x['name'].lower() != operation['name'].lower(), permitted_data_actions)) matched = True if not action.lower().startswith("microsoft."): has_external = True if not matched: has_unknown = True result['roles'].append({ 'name': raw_role['roleName'], 'description': raw_role['description'], 'permittedActions': permitted_actions, 'permittedDataActions': permitted_data_actions, 'rawPermissions': raw_role['permissions'], 'hasUnknown': has_unknown, 'hasExternal': has_external }) with open("azure/built-in-roles.json", "w") as f: f.write(json.dumps(result, indent=2, sort_keys=True))
50.2
147
0.517075
585
7,028
6.104274
0.136752
0.058247
0.060487
0.077289
0.8317
0.8317
0.817698
0.817698
0.817698
0.817698
0
0.0011
0.353159
7,028
139
148
50.561151
0.784426
0
0
0.726563
0
0
0.157371
0.011952
0
0
0
0
0
1
0
false
0
0.039063
0
0.039063
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
e542dba77b736c853c5f81ad6228519c31bfebd5
6,656
py
Python
tests/usecases/test_exchange_rate_interactors.py
sdediego/forex-django-clean-architecture
915a8d844a8db5a40c726fe4cf9f6d50f7c95275
[ "MIT" ]
8
2021-11-09T16:43:38.000Z
2022-03-25T16:04:26.000Z
tests/usecases/test_exchange_rate_interactors.py
sdediego/forex-django-clean-architecture
915a8d844a8db5a40c726fe4cf9f6d50f7c95275
[ "MIT" ]
null
null
null
tests/usecases/test_exchange_rate_interactors.py
sdediego/forex-django-clean-architecture
915a8d844a8db5a40c726fe4cf9f6d50f7c95275
[ "MIT" ]
2
2021-11-16T21:17:31.000Z
2022-02-11T11:15:29.000Z
# coding: utf-8 import datetime import random from unittest.mock import Mock from django.db import reset_queries import pytest from src.domain.exchange_rate import CurrencyEntity, CurrencyExchangeRateEntity from src.usecases.exchange_rate import CurrencyInteractor, CurrencyExchangeRateInteractor from tests.fixtures import currency, exchange_rate @pytest.mark.unit def test_currency_interactor_get(currency): currency_repo = Mock() currency_repo.get.return_value = currency currency_interactor = CurrencyInteractor(currency_repo) result = currency_interactor.get(currency.code) assert currency_repo.get.called assert result.code == currency.code assert result.name == currency.name assert result.symbol == currency.symbol assert CurrencyEntity.to_string(result) == CurrencyEntity.to_string(currency) @pytest.mark.unit def test_currency_interactor_get_availables(currency): num_of_currencies = random.randint(1, 10) currencies_available = [currency for _ in range(num_of_currencies)] currency_repo = Mock() currency_repo.get_availables.return_value = currencies_available currency_interactor = CurrencyInteractor(currency_repo) result = currency_interactor.get_availables() assert currency_repo.get_availables.called assert isinstance(result, list) assert len(result) == num_of_currencies assert all([isinstance(currency, CurrencyEntity) for currency in result]) @pytest.mark.unit def test_currency_interactor_save(currency): currency_repo = Mock() currency_repo.save.return_value = None currency_interactor = CurrencyInteractor(currency_repo) result = currency_interactor.save(currency) assert currency_repo.save.called assert result is None @pytest.mark.unit def test_currency_interactor_bulk_save(currency): currencies = [currency for _ in range(random.randint(1, 10))] currency_repo = Mock() currency_repo.bulk_save.return_value = None currency_interactor = CurrencyInteractor(currency_repo) result = currency_interactor.bulk_save(currencies) assert currency_repo.bulk_save.called assert result is None @pytest.mark.unit def test_currency_exchange_rate_interactor_get(exchange_rate): exchange_rate_repo = Mock() exchange_rate_repo.get.return_value = exchange_rate exchange_rate_interactor = CurrencyExchangeRateInteractor(exchange_rate_repo) filter = { 'source_currency': exchange_rate.source_currency, 'exchanged_currency': exchange_rate.exchanged_currency, 'valuation_date': exchange_rate.valuation_date } result = exchange_rate_interactor.get(**filter) assert exchange_rate_repo.get.called assert result.source_currency == exchange_rate.source_currency assert result.exchanged_currency == exchange_rate.exchanged_currency assert result.valuation_date == exchange_rate.valuation_date assert result.rate_value == exchange_rate.rate_value assert CurrencyExchangeRateEntity.to_string( result) == CurrencyExchangeRateEntity.to_string(exchange_rate) @pytest.mark.unit def test_currency_exchange_rate_interactor_get_latest(exchange_rate): exchange_rate_repo = Mock() exchange_rate_repo.get.return_value = exchange_rate exchange_rate_interactor = CurrencyExchangeRateInteractor(exchange_rate_repo) filter = { 'source_currency': exchange_rate.source_currency, 'exchanged_currency': exchange_rate.exchanged_currency } result = exchange_rate_interactor.get_latest(**filter) assert exchange_rate_repo.get.called assert result.source_currency == exchange_rate.source_currency assert result.exchanged_currency == exchange_rate.exchanged_currency assert result.valuation_date == datetime.date.today().strftime('%Y-%m-%d') assert result.rate_value == exchange_rate.rate_value assert CurrencyExchangeRateEntity.to_string( result) == CurrencyExchangeRateEntity.to_string(exchange_rate) @pytest.mark.unit def test_currency_exchange_rate_interactor_get_rate_series(exchange_rate): num_of_rates = random.randint(1, 10) rate_series = [round(random.uniform(0.8, 1.2), 6) for _ in range(num_of_rates)] exchange_rate_repo = Mock() exchange_rate_repo.get_rate_series.return_value = rate_series exchange_rate_interactor = CurrencyExchangeRateInteractor(exchange_rate_repo) filter = { 'source_currency': exchange_rate.source_currency, 'exchanged_currency': exchange_rate.exchanged_currency, 'date_from': datetime.date.today() + datetime.timedelta(days=-num_of_rates), 'date_to': datetime.date.today() } result = exchange_rate_interactor.get_rate_series(**filter) assert exchange_rate_repo.get_rate_series.called assert isinstance(result, list) assert len(result) == num_of_rates assert all([isinstance(rate, float) for rate in result]) @pytest.mark.unit def test_currency_exchange_rate_interactor_get_time_series(exchange_rate): series_length = random.randint(1, 10) time_series = [exchange_rate for _ in range(series_length)] exchange_rate_repo = Mock() exchange_rate_repo.get_time_series.return_value = time_series exchange_rate_interactor = CurrencyExchangeRateInteractor(exchange_rate_repo) filter = { 'source_currency': exchange_rate.source_currency, 'exchanged_currency': exchange_rate.exchanged_currency, 'date_from': datetime.date.today() + datetime.timedelta(days=-series_length), 'date_to': datetime.date.today() } result = exchange_rate_interactor.get_time_series(**filter) assert exchange_rate_repo.get_time_series.called assert isinstance(result, list) assert len(result) == series_length assert all([isinstance(cer, CurrencyExchangeRateEntity) for cer in result]) @pytest.mark.unit def test_currency_exchange_rate_interactor_save(exchange_rate): exchange_rate_repo = Mock() exchange_rate_repo.save.return_value = None exchange_rate_interactor = CurrencyExchangeRateInteractor(exchange_rate_repo) result = exchange_rate_interactor.save(exchange_rate) assert exchange_rate_repo.save.called assert result is None @pytest.mark.unit def test_currency_exchange_rate_interactor_bulk_save(exchange_rate): exchange_rates = [exchange_rate for _ in range(random.randint(1, 10))] exchange_rate_repo = Mock() exchange_rate_repo.bulk_save.return_value = None exchange_rate_interactor = CurrencyExchangeRateInteractor(exchange_rate_repo) result = exchange_rate_interactor.bulk_save(exchange_rates) assert exchange_rate_repo.bulk_save.called assert result is None
41.341615
89
0.783804
810
6,656
6.088889
0.106173
0.180049
0.077859
0.034469
0.763382
0.723033
0.664436
0.627534
0.575831
0.523723
0
0.003667
0.139724
6,656
160
90
41.6
0.857667
0.001953
0
0.470588
0
0
0.028008
0
0
0
0
0
0.272059
1
0.073529
false
0
0.058824
0
0.132353
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
e5435081eee984010a042f8f54a44d659b9e9dc8
1,978
py
Python
pypret/io/tests/test_io.py
liam-clink/pypret
c84e954efc12137c6b5ade4fae920d60a15d4875
[ "MIT" ]
36
2019-03-16T18:38:10.000Z
2022-02-15T14:25:30.000Z
pypret/io/tests/test_io.py
liam-clink/pypret
c84e954efc12137c6b5ade4fae920d60a15d4875
[ "MIT" ]
1
2019-06-24T21:32:14.000Z
2019-07-03T12:46:28.000Z
pypret/io/tests/test_io.py
liam-clink/pypret
c84e954efc12137c6b5ade4fae920d60a15d4875
[ "MIT" ]
12
2019-07-23T22:03:55.000Z
2022-01-06T08:50:52.000Z
""" This module tests the io subpackage implementation. Author: Nils Geib, nils.geib@uni-jena.de """ import numpy as np from pypret import io from pprint import pformat from os import remove class IO1(io.IO): x = 1 def squared(self): return self.x * self.x def __repr__(self): return "IO1(x={0})".format(self.x) class Grid(io.IO): _io_store = ['N', 'dx', 'x0'] def __init__(self, N, dx, x0=0.0): # This is _not_ called upon loading from storage self.N = N self.dx = dx self.x0 = x0 self._post_init() def _post_init(self): # this is called upon loading from storage # calculate the grids n = np.arange(self.N) self.x = self.x0 + n * self.dx def __repr__(self): return "TestIO1(N={0}, dx={1}, x0={2})".format( self.N, self.dx, self.x0) def test_io(): # test flat arrays _assert_io(np.arange(5)) _assert_io(np.arange(5, dtype=np.complex128)) # test nested structures of various types _assert_io([{'a': 1.0, 'b': np.uint16(1)}, np.random.rand(10), True, None, "hello", 1231241512354134123412353124, b"bytes"]) _assert_io([[[1]], [[[[1], 2], 3], 4], 5]) # Test custom objects _assert_io(IO1()) _assert_io(Grid(128, 0.23, x0=-2.3)) def _assert_io(x): """ This is slightly hacky: we use pprint to recursively print the objects and compare the resulting strings to make sure they are the same. This only works as pprint sorts the dictionary entries by their keys before printing. This requires custom objects to implement __repr__. """ io.save(x, "test.hdf5") x2 = io.load("test.hdf5") remove("test.hdf5") s1 = pformat(x) s2 = pformat(x2) if s1 != s2: print(s1) print(s2) assert False if __name__ == "__main__": test_io()
26.026316
79
0.577351
285
1,978
3.838596
0.417544
0.051188
0.019196
0.031079
0.082267
0
0
0
0
0
0
0.060345
0.296259
1,978
75
80
26.373333
0.725575
0.280586
0
0.046512
0
0
0.070336
0
0
0
0
0
0.186047
1
0.162791
false
0
0.093023
0.069767
0.418605
0.069767
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e543a1470c327269bc5d0fa3125cb8ab3fe77488
14,682
py
Python
src/main/python/main.py
wong-justin/quick-bible
035db43eca2c811792e32b123fa81f679ac5f168
[ "MIT" ]
null
null
null
src/main/python/main.py
wong-justin/quick-bible
035db43eca2c811792e32b123fa81f679ac5f168
[ "MIT" ]
null
null
null
src/main/python/main.py
wong-justin/quick-bible
035db43eca2c811792e32b123fa81f679ac5f168
[ "MIT" ]
null
null
null
from utils import * from shared import * from updating import MyAppContext from threading import Thread import re import sys import os class BooksPage(Page, FilterableList): '''Lists books from Gen->Rev and connects to next chapters page. First page of application.''' def __init__(self): Page.__init__(self) FilterableList.__init__(self) self.set_items(BOOK_NAMES) # self.set_items([c for c in 'abcdefghijklmnopqrstuvwxyz']) # for testing self.itemActivated.connect(self.on_book_selected) def on_book_selected(self, book_item): # book_item is QtListItem book = book_item.text() # show content if has_chapters(book): # go to chapter screen self.nav.to(ChaptersPage, state=get_num_chapters(book)) else: # skip to verses screen self.nav.to(VersesPage, state=data.bible[book]) # or get_bible_content(data.curr_scripture.inc(bok)) # widget cleanup self.nav.set_title(data.curr_scripture.inc(book, inplace=True)) self.searchbox.deactivate() self.show_all() # reset any searches when naving back def keyPressEvent(self, event): if event.key() == Qt.Key_Escape: QApplication.exit(2)#RESTART_EXIT_CODE) if ctrl_f_event(event): self.nav.to(SearchResultsPage, state=lambda: iter_verses_in_whole_bible()) self.searchbox.deactivate() else: FilterableList.keyPressEvent(self, event) # this is 0th page; don't need nav back class ChaptersPage(Page, FilterableList): '''List of chapter numbers 1->n for given book and connects to next verses page.''' def __init__(self): Page.__init__(self) FilterableList.__init__(self) self.itemActivated.connect(self.on_chapter_selected) def load_state(self, state): num_chapters = state self.set_items(range(1, num_chapters+1)) def on_chapter_selected(self, chapter_item): chapter = chapter_item.text() data.curr_scripture.inc(chapter, inplace=True) # show the content verses = get_bible_content(data.curr_scripture) self.nav.to(VersesPage, state=verses) # widget cleanup self.nav.set_title(str(data.curr_scripture)) self.searchbox.deactivate() self.show_all() # reset any searches when naving back def keyPressEvent(self, event): if not self.search_is_active() and event.key() == Qt.Key_Backspace: self.nav.back() self.nav.set_title(data.curr_scripture.dec(inplace=True)) elif ctrl_f_event(event): # book_scripture = data.curr_scripture self.nav.to(SearchResultsPage, state=lambda: iter_verses_in_book(data.curr_scripture)) self.searchbox.deactivate() else: FilterableList.keyPressEvent(self, event) class VersesPage(Page, QTextEdit, Filterable): '''Formats dict of verses {num: text} into text display. Filterable by verse num, isolating and highlighting text.''' def __init__(self): Page.__init__(self) QTextEdit.__init__(self) Filterable.__init__(self) # style self.setReadOnly(True) set_font_size(self, 11) def load_state(self, state): # state = dict of verses in chapter self.verses = state self.show_all() def show_all(self): # render html = format_to_html(self.verses) self.set_html(html) def set_html(self, html): # wrapping textEdit.setHtml to keep scroll position scroll_pos = self.verticalScrollBar().value() self.setHtml(html) # this resets scroll self.verticalScrollBar().setValue(scroll_pos) def filter_items(self, pattern): # highlight verse, given number # make sure the verse is there if pattern not in self.verses.keys(): self.show_all() return n = int(pattern) verse = self.verses[str(n)] # divide text around verse pre_verses = dict_where_keys(self.verses, lambda k: int(k) < n) main_verse = {n: verse} post_verses = dict_where_keys(self.verses, lambda k: int(k) > n) pre, main, post = (format_to_html(vs) for vs in (pre_verses, main_verse, post_verses)) html = ( OPACITY_TEMPLATE.format(pre) + f' {main} ' + OPACITY_TEMPLATE.format(post) ) self.set_html(html) # find verse position in text widget plain_verse = to_plaintext(main) plain_start = self.toPlainText().index(plain_verse) c = self.textCursor() c.setPosition(plain_start) self.setTextCursor(c) # scroll to verse position rect = self.cursorRect() top = rect.top() vbar = self.verticalScrollBar() vbar.setValue(vbar.value() + top) # top of verse is top of screen if not vbar.value() == vbar.maximum(): # avoid edge case of last verse: it stays maximum scroll, else hiding last line vbar.triggerAction(QAbstractSlider.SliderSingleStepSub) # but in general content looks nicer when not pinned to top def change_highlighted_scripture(self, diff): pattern = self.searchbox.text() # allow new highlight from beginning or end if pattern == '': last_verse = list(self.verses.keys())[-1] n = (1 if diff == 1 else last_verse) # else make sure a verse is already selected elif pattern not in self.verses.keys(): return # make sure new verse within bounds else: n = int(pattern) + diff if str(n) not in self.verses.keys(): return # update searchbox, which triggers new highlight filter and updates user self.searchbox.activate(str(n)) def keyPressEvent(self, event): keypress = event.key() # nav back when backspacing without searchbox if not self.search_is_active() and keypress == Qt.Key_Backspace: self.nav.back() self.nav.set_title(data.curr_scripture.dec(inplace=True)) self.verticalScrollBar().setValue(0) # scroll back to top elif event.modifiers() == Qt.ControlModifier: # scripture up/down if keypress in (Qt.Key_Down, Qt.Key_Up): diff = (1 if keypress == Qt.Key_Down else -1) self.change_highlighted_scripture(diff) # search this chapter elif keypress == Qt.Key_F: self.nav.to(SearchResultsPage, state=lambda: scriptures_with_verses(data.curr_scripture, self.verses)) self.searchbox.deactivate() self.verticalScrollBar().setValue(0) # scroll back to top # scroll elif keypress in (Qt.Key_Down, Qt.Key_Up): QTextEdit.keyPressEvent(self, event) # keypress goes to searchbox else: Filterable.keyPressEvent(self, event) class SearchResultDelegate(QStyledItemDelegate): # custom list item rendering, # mainly just to format a title and subtitle while looking like default list widget item def paint(self, painter, option, index): # turns item text into title and subtitle. # imitates standard list widget item style on select. # title bolded, subtitle beneath. # maybe custom eliding for ellipsis on both left and right, focused around match? # or at least on right, with match surely in view starting from left painter.save() item = index.data(Qt.DisplayRole) # default item data is at role 0 # custom data was passed into this item, no longer usual type str title = str(item['scripture']) + '\n' subtitle = '\n' + item['text'] given_rect = option.rect # from size hint states = option.state # bitwise OR of QStyle.State_ flags if states & QStyle.State_Selected: palette = QApplication.palette() painter.setPen(palette.color(QPalette.HighlightedText)) painter.fillRect(given_rect, palette.color(QPalette.Highlight)) # text inset by small margin text_rect = given_rect.adjusted(2, 2, -2, -2) # draw title text em_font = QFont(option.font) # copy em_font.setWeight(QFont.Bold) painter.setFont(em_font) painter.drawText(text_rect, option.displayAlignment, title) # draw subtitle text painter.setFont(option.font) # back to default font # painter.translate(3, 0) # slight indent under title might look nice elided_subtitle = QFontMetrics(QFont(option.font)).elidedText(subtitle, Qt.ElideRight, text_rect.width())#, Qt.TextShowMnemonic) # elided_subtitle = painter.fontMetrics().elidedText(subtitle, Qt.ElideRight, text_rect.width())#, Qt.TextShowMnemonic) painter.drawText(text_rect, option.displayAlignment, elided_subtitle) painter.restore() def sizeHint(self, option, index): # fit to width, creating ellipsis on long text with no need for horiz scroll # default height seems to have been n*line_height of str in option.data(Qt.DisplayRole) s = QSize() font_metrics = QFontMetrics(option.font) line_height = font_metrics.height() extra = 4 # produces more comfortable line spacing; 'elbow room' s.setHeight(2*line_height + extra) # 1 line for title, subtitle each s.setWidth(0) # don't allow horiz scroll when there's wide items return s class SearchResultsPage(Page, FilterableList): '''Searches given verses by regex from searchbox and shows matches in list.''' def __init__(self): self.default_placeholder_msg = 'search regex:' Page.__init__(self) FilterableList.__init__(self, placeholder=self.default_placeholder_msg) self.setItemDelegate(SearchResultDelegate(self)) # custom rendering of list item # self.itemActivated.connect(self.on_result_item_selected) # dummy searchbox serves as visual prompt on empty screen # gives better communication to user self.fake_searchbox = SearchBox(None) add_grid_child(self, self.fake_searchbox, Qt.AlignRight | Qt.AlignBottom, grid=self.layout()) self.fake_searchbox.show() # to decrease stalling when doing a large search? # self._thread = None # batches aren't working/helping, maybe because it's a listwidget instead of listview # QListView.setLayoutMode(self, QListView.Batched) # self.setBatchSize(5) # self.setUniformItemSizes(True) # don't think it's helping # maybe implement a list view instead of a list widget? def load_state(self, state): # state = callable that produces iter of verses in desired scope self.verses_iter_factory = state scope = str(data.curr_scripture) self.nav.set_title('Search ' + scope) self.show_all() # trigger empty search display def show_all(self): # called when searchbox is empty, which means # show placeholder and extra searchbox prompt for user. self.clear() self.fake_searchbox.show() self.placeholder.setText(self.default_placeholder_msg) def show_items(self, items): # replaced by custom filter_items, so override and do nothing return # def on_result_item_selected(self, item): # # callback for list widget selection # d = item.data(Qt.DisplayRole) # self.nav.to(SearchedVersePage, state=d['location']) def filter_items(self, search_text): # show matches of search in a list self.fake_searchbox.hide() # could be showing if this is first char of search self.placeholder.setText(self.default_placeholder_msg) # could be diff if last search was error try: re.compile(search_text) except re.error: self.placeholder.setText('invalid regex') self.clear() return self.clear() # items = [] for scripture, verse_text in self.verses_iter_factory(): match = re.search(search_text, verse_text) if match is not None: item = QListWidgetItem()#self) item.setData(Qt.DisplayRole, { 'scripture': scripture, 'text': verse_text.replace('\n', ' '), }) # items.append(item) self.addItem(item) # for i in items: # self.addItem(i) # print(self.item(100).data(0)) # when finished iter and no matches if self.is_empty(): self.placeholder.setText('no results') else: self.placeholder.setText('') def is_empty(self): # return QListWidget.count(self) == 0 # works if you used addItem return self.itemAt(0, 0) is None # works with just making ListItem(self), not having called addItem def keyPressEvent(self, event): empty_search = not self.search_is_active() or self.searchbox.text() == '' if empty_search and event.key() == Qt.Key_Backspace: self.nav.back() self.nav.set_title(str(data.curr_scripture)) # self.clear() else: FilterableList.keyPressEvent(self, event) class Main(QWidget): # outer window shown; wraps child and restores settings from last session def __init__(self, child): super().__init__() layout = MarginGrid() layout.addWidget(child, 0, 0) self.setLayout(layout) child.setParent(self) self.settings = QSettings(str(RESOURCE_DIR / 'settings.ini'), QSettings.IniFormat) # I can specify the location # self.settings = QSettings('FastBible', 'FastBible') # saved in some OS specific location default = bytes('', encoding='utf-8') geometry = self.settings.value('geometry', default) self.restoreGeometry(geometry) def closeEvent(self, event): geometry = self.saveGeometry() self.settings.setValue('geometry', geometry) super().closeEvent(event) # --- run if __name__ == '__main__': appctxt = MyAppContext() set_theme(appctxt.app) init_data() main = Main(PageManager(BooksPage, ChaptersPage, VersesPage, SearchResultsPage)) main.show() main.setWindowTitle('Bible') # exit_code = appctxt.app.exec_() # sys.exit(exit_code) appctxt.app.run()
36.431762
136
0.63697
1,783
14,682
5.102075
0.261357
0.012312
0.022425
0.016159
0.217874
0.196548
0.141365
0.122348
0.092888
0.062218
0
0.003364
0.271216
14,682
402
137
36.522388
0.846822
0.288721
0
0.266094
0
0
0.012614
0
0
0
0
0
0
1
0.107296
false
0
0.030043
0.008584
0.193133
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e54424cb8a2e93ad7659fa0e0f3c23ae851505b8
5,458
py
Python
armada/tests/unit/utils/test_lint.py
sktelecom-oslab/armada
ebc71ff8eca7ecf0560493d5cdafc14e34c783c9
[ "Apache-2.0" ]
null
null
null
armada/tests/unit/utils/test_lint.py
sktelecom-oslab/armada
ebc71ff8eca7ecf0560493d5cdafc14e34c783c9
[ "Apache-2.0" ]
null
null
null
armada/tests/unit/utils/test_lint.py
sktelecom-oslab/armada
ebc71ff8eca7ecf0560493d5cdafc14e34c783c9
[ "Apache-2.0" ]
2
2018-05-28T13:00:42.000Z
2021-09-02T07:28:59.000Z
# Copyright 2017 The Armada Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import yaml import os from armada.utils import lint class LintTestCase(unittest.TestCase): def setUp(self): self.basepath = os.path.join(os.path.dirname(__file__)) def test_lint_armada_yaml_pass(self): template = '{}/templates/valid_armada_document.yaml'.format( self.basepath) document = yaml.safe_load_all(open(template).read()) resp = lint.validate_armada_documents(document) self.assertTrue(resp) def test_lint_armada_manifest_no_groups(self): template_manifest = """ schema: armada/Manifest/v1 metadata: schema: metadata/Document/v1 name: example-manifest data: release_prefix: example """ document = yaml.safe_load_all(template_manifest) with self.assertRaises(Exception): lint.validate_armada_documents(document) def test_lint_validate_manifest_pass(self): template_manifest = """ schema: armada/Manifest/v1 metadata: schema: metadata/Document/v1 name: example-manifest data: release_prefix: example chart_groups: - example-group """ document = yaml.safe_load_all(template_manifest) self.assertTrue(lint.validate_manifest_document(document)) def test_lint_validate_manifest_no_prefix(self): template_manifest = """ schema: armada/Manifest/v1 metadata: schema: metadata/Document/v1 name: example-manifest data: chart_groups: - example-group """ document = yaml.safe_load_all(template_manifest) with self.assertRaises(Exception): lint.validate_manifest_document(document) def test_lint_validate_group_pass(self): template_manifest = """ schema: armada/ChartGroup/v1 metadata: schema: metadata/Document/v1 name: example-manifest data: description: this is sample chart_group: - example-group """ document = yaml.safe_load_all(template_manifest) self.assertTrue(lint.validate_chart_group_document(document)) def test_lint_validate_group_no_chart_group(self): template_manifest = """ schema: armada/ChartGroup/v1 metadata: schema: metadata/Document/v1 name: example-manifest data: description: this is sample """ document = yaml.safe_load_all(template_manifest) with self.assertRaises(Exception): lint.validate_chart_group_document(document) def test_lint_validate_chart_pass(self): template_manifest = """ schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: example-chart data: name: keystone release: keystone namespace: undercloud timeout: 100 install: no_hooks: false upgrade: no_hooks: false values: {} source: type: git location: git://github.com/example/example subpath: example-chart reference: master dependencies: - dep-chart """ document = yaml.safe_load_all(template_manifest) self.assertTrue(lint.validate_chart_document(document)) def test_lint_validate_chart_no_release(self): template_manifest = """ schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: example-chart data: name: keystone namespace: undercloud timeout: 100 install: no_hooks: false upgrade: no_hooks: false values: {} source: type: git location: git://github.com/example/example subpath: example-chart reference: master dependencies: - dep-chart """ document = yaml.safe_load_all(template_manifest) with self.assertRaises(Exception): lint.validate_chart_document(document) def test_lint_validate_manifest_url(self): value = 'url' assert lint.validate_manifest_url(value) is False value = 'https://raw.githubusercontent.com/att-comdev/' \ 'armada/master/examples/simple.yaml' assert lint.validate_manifest_url(value) is True def test_lint_validate_manifest_filepath(self): value = 'filepath' assert lint.validate_manifest_filepath(value) is False value = '{}/templates/valid_armada_document.yaml'.format( self.basepath) assert lint.validate_manifest_filepath(value) is True
32.295858
74
0.622206
581
5,458
5.641997
0.253012
0.073215
0.033557
0.04881
0.733069
0.702563
0.688225
0.630262
0.599756
0.552166
0
0.007339
0.301026
5,458
168
75
32.488095
0.8519
0.101869
0
0.721429
0
0
0.485065
0.036007
0
0
0
0
0.085714
1
0.078571
false
0.028571
0.028571
0
0.114286
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
e545cf0e67826a43917bc1e5665f0de69c08ad62
278
py
Python
scripts/rename_fq.py
jodyphelan/pathogenseq
2e04190f25063d722ef653e819b94eb66407ea8d
[ "MIT" ]
null
null
null
scripts/rename_fq.py
jodyphelan/pathogenseq
2e04190f25063d722ef653e819b94eb66407ea8d
[ "MIT" ]
null
null
null
scripts/rename_fq.py
jodyphelan/pathogenseq
2e04190f25063d722ef653e819b94eb66407ea8d
[ "MIT" ]
1
2018-05-11T14:54:51.000Z
2018-05-11T14:54:51.000Z
#! /usr/bin/env python import csv import sys import os import pathogenseq.files as psf infile = sys.argv[1] for row in csv.DictReader(open(infile)): f1 = "%s.fastq.gz" % row["Barcode"] f2 = "%s.%s.fastq.gz" % (row["Name"],row["Barcode"]) psf.filecheck(f1) os.rename(f1,f2)
21.384615
53
0.672662
48
278
3.895833
0.604167
0.064171
0.085562
0.117647
0
0
0
0
0
0
0
0.024896
0.133094
278
12
54
23.166667
0.751037
0.07554
0
0
0
0
0.167969
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
e54612725dff063fe507222226db0fb8875e240a
4,100
py
Python
synapse/tools/cryo/cat.py
vertexmc/synapse
bd1f8ab1abcbaac20dc9afb9ad385cf831278ada
[ "Apache-2.0" ]
null
null
null
synapse/tools/cryo/cat.py
vertexmc/synapse
bd1f8ab1abcbaac20dc9afb9ad385cf831278ada
[ "Apache-2.0" ]
4
2017-10-03T21:50:40.000Z
2017-11-20T15:49:38.000Z
synapse/tools/cryo/cat.py
vertexmc/synapse
bd1f8ab1abcbaac20dc9afb9ad385cf831278ada
[ "Apache-2.0" ]
null
null
null
import sys import json import pprint import argparse import logging import synapse.common as s_common import synapse.cryotank as s_cryotank import synapse.lib.cell as s_cell import synapse.lib.output as s_output import synapse.lib.msgpack as s_msgpack logger = logging.getLogger(__name__) def _except_wrap(it, error_str_func): ''' Wrap an iterator and adds a bit of context to the exception message ''' item_no = 0 while True: item_no += 1 try: yield next(it) except StopIteration: return except Exception as e: extra_context = error_str_func(item_no) e.args = (extra_context + ': ' + str(e.args[0]), ) + e.args[1:] raise def main(argv, outp=s_output.stdout): pars = argparse.ArgumentParser(prog='cryo.cat', description='display data items from a cryo cell') pars.add_argument('cryocell', help='The cell descriptor and cryo tank path (cell://<host:port>/<name>).') pars.add_argument('--list', default=False, action='store_true', help='List tanks in the remote cell and return') pars.add_argument('--offset', default=0, type=int, help='Begin at offset index') pars.add_argument('--size', default=10, type=int, help='How many items to display') pars.add_argument('--timeout', default=10, type=int, help='The network timeout setting') pars.add_argument('--authfile', help='Path to your auth file for the remote cell') group = pars.add_mutually_exclusive_group() group.add_argument('--jsonl', action='store_true', help='Input/Output items in jsonl format') group.add_argument('--msgpack', action='store_true', help='Input/Output items in msgpack format') pars.add_argument('--verbose', '-v', default=False, action='store_true', help='Verbose output') pars.add_argument('--ingest', '-i', default=False, action='store_true', help='Reverses direction: feeds cryotank from stdin in msgpack or jsonl format') pars.add_argument('--omit-offset', default=False, action='store_true', help="Don't output offsets of objects. This is recommended to be used when jsonl/msgpack" " output is used.") opts = pars.parse_args(argv) if opts.verbose: logger.setLevel(logging.INFO) if not opts.authfile: logger.error('Currently requires --authfile until neuron protocol is supported') return 1 if opts.ingest and not opts.jsonl and not opts.msgpack: logger.error('Must specify exactly one of --jsonl or --msgpack if --ingest is specified') return 1 authpath = s_common.genpath(opts.authfile) auth = s_msgpack.loadfile(authpath) netw, path = opts.cryocell[7:].split('/', 1) host, portstr = netw.split(':') addr = (host, int(portstr)) logger.info('connecting to: %r', addr) cuser = s_cell.CellUser(auth) with cuser.open(addr, timeout=opts.timeout) as sess: cryo = s_cryotank.CryoClient(sess) if opts.list: for name, info in cryo.list(timeout=opts.timeout): outp.printf('%s: %r' % (name, info)) return 0 if opts.ingest: if opts.msgpack: fd = sys.stdin.buffer item_it = _except_wrap(s_msgpack.iterfd(fd), lambda x: 'Error parsing item %d' % x) else: fd = sys.stdin item_it = _except_wrap((json.loads(s) for s in fd), lambda x: ('Failure parsing line %d of input' % x)) cryo.puts(path, item_it) else: for item in cryo.slice(path, opts.offset, opts.size, opts.timeout): i = item[1] if opts.omit_offset else item if opts.jsonl: outp.printf(json.dumps(i, sort_keys=True)) elif opts.msgpack: sys.stdout.write(s_msgpack.en(i)) else: outp.printf(pprint.pformat(i)) return 0 if __name__ == '__main__': # pragma: no cover logging.basicConfig() sys.exit(main(sys.argv[1:]))
38.679245
119
0.628537
557
4,100
4.509874
0.332136
0.048169
0.053742
0.045382
0.094745
0.078822
0.029459
0.029459
0
0
0
0.005552
0.253171
4,100
105
120
39.047619
0.814827
0.020976
0
0.084337
0
0
0.224857
0.006988
0
0
0
0
0
1
0.024096
false
0
0.120482
0
0.204819
0.048193
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e546ebdb04dff83307e0ea85b193a4c434f9cc11
3,905
py
Python
bdd100k/eval/lane_test.py
bdd100k/bdd100k
c8b54044038d2a03dcb10dcc6d9aef361639ffec
[ "BSD-3-Clause" ]
193
2020-09-22T09:48:17.000Z
2022-03-31T20:49:24.000Z
bdd100k/eval/lane_test.py
bdd100k/bdd100k
c8b54044038d2a03dcb10dcc6d9aef361639ffec
[ "BSD-3-Clause" ]
60
2020-09-28T15:44:40.000Z
2022-03-31T07:58:58.000Z
bdd100k/eval/lane_test.py
bdd100k/bdd100k
c8b54044038d2a03dcb10dcc6d9aef361639ffec
[ "BSD-3-Clause" ]
41
2020-09-27T02:52:20.000Z
2022-02-21T03:33:39.000Z
"""Test cases for lane.py.""" import os import unittest import numpy as np from ..common.utils import list_files from .lane import ( eval_lane_per_threshold, evaluate_lane_marking, get_foreground, get_lane_class, sub_task_funcs, ) class TestGetLaneClass(unittest.TestCase): """Test cases for the lane specific channel extraction.""" def test_partialled_classes(self) -> None: """Check the function that partial get_lane_class.""" for num in range(255): byte = np.array(num, dtype=np.uint8) if num & 8: self.assertTrue(get_lane_class(byte, 1, 3, 1)) else: self.assertTrue(get_lane_class(byte, 0, 3, 1)) self.assertTrue(get_foreground(byte)) if num & (1 << 5): self.assertTrue(sub_task_funcs["direction"](byte, 1)) else: self.assertTrue(sub_task_funcs["direction"](byte, 0)) if num & (1 << 4): self.assertTrue(sub_task_funcs["style"](byte, 1)) else: self.assertTrue(sub_task_funcs["style"](byte, 0)) class TestEvalLanePerThreshold(unittest.TestCase): """Test cases for the per image per threshold lane marking evaluation.""" def test_two_parallel_lines(self) -> None: """Check the correctness of the function in general cases.""" a = np.zeros((10, 10), dtype=bool) b = np.zeros((10, 10), dtype=bool) a[3, 3:7] = True b[7, 3:7] = True for radius in [1, 2, 3]: self.assertAlmostEqual(eval_lane_per_threshold(a, b, radius), 0.0) for radius in [4, 5, 6]: self.assertAlmostEqual(eval_lane_per_threshold(a, b, radius), 1.0) def test_two_vertical_lines(self) -> None: """Check the correctness of the function in general cases.""" a = np.zeros((10, 10), dtype=bool) b = np.zeros((10, 10), dtype=bool) a[3, 3:6] = True b[5:8, 7] = True self.assertAlmostEqual(eval_lane_per_threshold(a, b, 2), 0.0) self.assertAlmostEqual(eval_lane_per_threshold(a, b, 3), 1 / 3) self.assertAlmostEqual(eval_lane_per_threshold(a, b, 4), 2 / 3) self.assertAlmostEqual(eval_lane_per_threshold(a, b, 5), 1.0) class TestEvaluateLaneMarking(unittest.TestCase): """Test cases for the evaluate_lane_marking function.""" def test_mock_cases(self) -> None: """Check the peformance of the mock case.""" cur_dir = os.path.dirname(os.path.abspath(__file__)) gt_dir = "{}/testcases/lane/gts".format(cur_dir) res_dir = "{}/testcases/lane/res".format(cur_dir) result = evaluate_lane_marking( list_files(gt_dir, ".png", with_prefix=True), list_files(res_dir, ".png", with_prefix=True), nproc=1, ) data_frame = result.pd_frame() data_arr = data_frame.to_numpy() gt_data_arr = np.array( [ [70.53328267, 80.9831119, 100.0], [100.0, 100.0, 100.0], [70.53328267, 80.9831119, 100.0], [100.0, 100.0, 100.0], [99.82147748, 100.0, 100.0], [100.0, 100.0, 100.0], [100.0, 100.0, 100.0], [75.33066961, 79.34917317, 100.0], [71.02916505, 86.25984707, 100.0], [100.0, 100.0, 100.0], [96.43828133, 100.0, 100.0], [94.79621737, 100.0, 100.0], [85.26664133, 90.49155595, 100.0], [85.26664133, 90.49155595, 100.0], [92.17697636, 95.70112753, 100.0], [87.57008634, 92.22807981, 100.0], ] ) data_arr = data_frame.to_numpy() self.assertTrue(np.isclose(data_arr, gt_data_arr).all()) if __name__ == "__main__": unittest.main()
35.5
78
0.568246
522
3,905
4.070881
0.270115
0.054588
0.059294
0.067765
0.494118
0.475294
0.381647
0.340235
0.234824
0.188235
0
0.136248
0.297055
3,905
109
79
35.825688
0.637887
0.100896
0
0.216867
0
0
0.024784
0.012104
0
0
0
0
0.168675
1
0.048193
false
0
0.060241
0
0.144578
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e547362978e6693dbe91b5bc878d2ea775c9f025
1,561
py
Python
comp_modules/Quicksort.py
SpyrosMouselinos/CompSciStructs
e45743cc4fdf103d4cc0b48d62086226e1cbe726
[ "Apache-2.0" ]
null
null
null
comp_modules/Quicksort.py
SpyrosMouselinos/CompSciStructs
e45743cc4fdf103d4cc0b48d62086226e1cbe726
[ "Apache-2.0" ]
null
null
null
comp_modules/Quicksort.py
SpyrosMouselinos/CompSciStructs
e45743cc4fdf103d4cc0b48d62086226e1cbe726
[ "Apache-2.0" ]
null
null
null
import random def partition(array, low, high): pivot = array[(low + high) // 2] left = low - 1 right = high + 1 while(True): left += 1 while array[left] < pivot: left += 1 right -= 1 while array[right] > pivot: right -= 1 if left >= right: return right array[left], array[right] = array[right], array[left] def quicksort(array, low, high): if low < high: pivot = partition(array, low, high) quicksort(array=array, low=low, high=pivot) quicksort(array=array, low=pivot+1, high=high) return def randomized_partition(array, low, high): pivot = array[random.randint(low, high)] left = low - 1 right = high + 1 while(True): left += 1 while array[left] < pivot: left += 1 right -= 1 while array[right] > pivot: right -= 1 if left >= right: return right array[left], array[right] = array[right], array[left] def randomized_quicksort(array, low, high): if low < high: pivot = randomized_partition(array, low, high) quicksort(array=array, low=low, high=pivot) quicksort(array=array, low=pivot+1, high=high) return def qsort(array, mode='normal'): if mode == 'normal': quicksort(array, 0, len(array) -1) elif mode == 'randomized': randomized_quicksort(array, 0, len(array) -1) return array
22.623188
61
0.53171
184
1,561
4.48913
0.13587
0.101695
0.101695
0.101695
0.849879
0.825666
0.692494
0.692494
0.607748
0.607748
0
0.018738
0.350416
1,561
69
62
22.623188
0.795858
0
0
0.680851
0
0
0.014085
0
0
0
0
0
0
1
0.106383
false
0
0.021277
0
0.234043
0
0
0
0
null
0
0
0
1
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
e547b6a838dfd62eee61bf8e0dfa8ecb24093404
18,522
py
Python
backend/generate_z3.py
uwplse/stng
ce12c2c079516df873382a5aa3c18c407833d130
[ "MIT" ]
14
2017-03-07T00:14:33.000Z
2022-02-09T00:59:22.000Z
backend/generate_z3.py
uwplse/stng
ce12c2c079516df873382a5aa3c18c407833d130
[ "MIT" ]
11
2016-11-22T13:14:55.000Z
2021-12-14T00:56:51.000Z
backend/generate_z3.py
uwplse/stng
ce12c2c079516df873382a5aa3c18c407833d130
[ "MIT" ]
6
2016-11-07T13:38:45.000Z
2021-04-04T12:13:31.000Z
import logging from stencil_ir import * from verify import * from assertion_to_z3 import * import generate_sketch import asp.codegen.ast_tools as ast_tools def loop_key(node): import hashlib return hashlib.sha224(tree_to_str(node)).hexdigest()[0:10] class Z3Generator(object): """ Generates a Z3 script, with the parsed postcondition from the output of Sketch. The output of this class is a script ready to send to Z3 for verification. """ def __init__(self, program, inputs, loopvars, invariant): """ program is the AST of the loop nest to process. inputs is a dict mapping names to (Sketch) types (most importantly for arrays). invariant is a dict mapping generated function names from sketch to strings that can be parsed by parse_ir """ self.program = program self.inputs = inputs self.loopvars = loopvars self.loopvar_mins = {} self.loopvar_maxs = {} self.set_maxs_and_mins() logging.debug("Preprocessing, invariat is %s", invariant) self.synthesized_invariant = self.process_invariants(invariant) logging.debug("Synthesized invariant: %s", self.synthesized_invariant) self.out_array = generate_sketch.OutputArrayFinder().get_output_arrays(program) self.containing_loop_invs = {} def process_invariants(self, invariant): """ Take strings in the invariant dict and convert into Z3 syntax. """ from backend_halide import ToHalide import parse_ir ret = {} for inv_key in invariant.keys(): ir = parse_ir.parse_expression(invariant[inv_key]) logging.debug("loopvars are %s", self.loopvars) if "gen" in inv_key: converted_invariant = ToZ3(ir,self.loopvars,None,False,invariant,self.inputs).to_str() ret[inv_key] = converted_invariant else: ret[inv_key] = tree_to_str(ir) logging.debug("Processed invariants: ", ret) return ret def generate(self): """ Top-level. Generates an entire Z3 script for the given program and inputs. """ # first, we generate the invariant & postcondition call postcondition = CallExp(VarNode("postcondition"), [VarNode(x) for x in self.get_params_without_types()] + map(lambda x: VarNode(x), self.get_loopvars()) + map(lambda x: VarNode(x+"_p"), self.get_loopvars())) new_invariant_signatures = self.generate_invariant_func_signatures() for x in new_invariant_signatures.keys(): logging.debug("inv: %s", tree_to_str(new_invariant_signatures[x])) # get verification conditions logging.debug("invariant signatures: %s", [tree_to_str(new_invariant_signatures[x]) for x in new_invariant_signatures.keys()]) wpc = WeakestPrecondition(self.program, postcondition, [], invariant_call=new_invariant_signatures) conds = wpc.get() additional_conds = wpc.additional_conditions from generate_sketch import RHSInvariantReplacer conds = RHSInvariantReplacer(self.get_loopvars()).visit(conds) additional_conds = map(RHSInvariantReplacer(self.get_loopvars()).visit, additional_conds) # translate verification conditions to Z3 logging.debug("Translating the following VCs: %s %s", tree_to_str(conds), '\n\n'.join([tree_to_str(x) for x in additional_conds])) vc = ToZ3(conds, self.get_loopvars(), additional_conds, True, self.synthesized_invariant, self.inputs).to_str() # put it all together ret = self.generate_invariant_funcs() ret += self.generate_postcon_func() ret += self.generate_constants() + "\n\n" ret += self.generate_assumptions() ret += self.generate_signature() + vc + "))\n\n" ret += "(assert (not main))\n(check-sat)\n(get-model)\n" return ret def generate_invariant_func_signatures(self): """ Generates signatures for each invariant function into a dict keyed by a hash of the loop body. """ class InvGenLoopVisitor(asp.codegen.ast_tools.NodeVisitor): def __init__(self, inputs, loopvars, params_without_types): super(InvGenLoopVisitor, self).__init__() self.invariants = {} self.invariant_names_to_loops = {} # dict from names to loops self.inputs = inputs self.loopvars = loopvars self.params_without_types = params_without_types def visit_Block(self, node): map(self.visit, node.body) def visit_WhileLoop(self, node): key = loop_key(node) invariant_name = "I_%s_%s" % (node.iter_var.name, key) self.invariants[key] = CallExp(VarNode(invariant_name), [VarNode(x) for x in self.params_without_types] + map(lambda x: VarNode(x), self.loopvars)) self.invariant_names_to_loops[invariant_name] = node self.visit(node.body) visitor = InvGenLoopVisitor(self.inputs, self.get_loopvars(), self.get_params_without_types()) visitor.visit(self.program) self.invariant_names_to_loops = visitor.invariant_names_to_loops return visitor.invariants def generate_invariant_funcs(self): """ Generates the Z3 function for the invariant. """ self.find_dependent_loopvars() self.find_loopvar_nesting() self.find_output_nesting() from mako.template import Template inv_template = Template(filename="templates/z3/invariant.2.z3.mako", format_exceptions=True) ret = "" #for looplevel in range(len(self.get_loopvars())): #var = self.get_loopvars()[looplevel] #ret += inv_template.render(name="I_"+var, #looplevel=looplevel, #loopvar_maxs=self.loopvar_maxs, #loopvar_mins=self.loopvar_mins, #parameters=self.get_params(), #call_params=self.get_params_without_types(), #outarray=self.get_out_array(), #synthesized_invariant=self.get_synthesized_invariant_rhs(), #loopvar=self.get_loopvars(), #dependent_loopvars=self.dependent_loopvars, #loopvar_nesting=self.loopvar_nesting, #output_nesting=self.output_nesting) for invariant in self.invariant_names_to_loops.keys(): #FIXME looplevel = 0 node = self.invariant_names_to_loops[invariant] thiskey = loop_key(node) var = node.iter_var.name containing_loop_invs = self.get_containing_loop_invs(node) # we need to also know which loops this loop contains thisloopcontains = self.get_loops_contained_by(node) ret += inv_template.render(name=invariant, synthesized_invariant=self.get_synthesized_invariant_rhs(), looplevel=looplevel, output_nesting=self.output_nesting, containing_loop_invs=containing_loop_invs, parameters=self.get_params(), int_params=[x[0] for x in self.inputs if x[1]=="int"] + self.get_loopvars(), call_params=self.get_params_without_types(), outarray=self.get_out_array(), thisloopvar=var, thiskey=thiskey, thisloopcontains=thisloopcontains, loopvar=self.get_loopvars(), mins=self.loopvar_mins, maxs=self.loopvar_maxs, loopvar_nesting=self.loopvar_nesting, dependent_loopvars=self.dependent_loopvars) return ret def generate_postcon_func(self): """ Generate the Z3 function for the postcondition. """ from mako.template import Template pcon_template = Template(filename="templates/z3/postcondition.z3.mako") return pcon_template.render(parameters=self.get_params(), call_params=self.get_params_without_types(), loopvar_maxs=self.loopvar_maxs, loopvar_mins=self.loopvar_mins, outarray=self.get_out_array(), synthesized_invariant=self.get_synthesized_invariant_rhs(), loopvar=self.get_loopvars()) def generate_constants(self): """ Generates declarations for constants at the top-level of the script. """ all_params = [(x, "Int") for x in self.get_loopvars()] #+ [(x+"_to_check", "Int") for x in self.get_loopvars()] all_params += [(x+"_p", "Int") for x in self.get_loopvars()] + self.get_params() ret = "\n".join(["(declare-const %s %s)" % (x[0], x[1]) for x in all_params]) return ret def get_params(self): """ Returns a list of tuples of (name, type) for each input. """ def is_arr(tp): return "[" in tp[1] def convert_type(tp): translation_dict = {"double":"Real", "int":"Int"} return translation_dict[tp.split()[0]] def convert_type_array(tp): scalar_tp = convert_type(tp.split("[")[0] + " ") ret = "" dim = len(tp.split("*")) for x in range(dim): ret += "(Array Int " ret += scalar_tp for x in range(dim): ret += ")" return ret def is_float(tp): return tp[1] == "double" or tp[1] == "float" arrs = filter(is_arr, self.inputs) non_arrs = filter(lambda x: not is_arr(x) and not is_float(x), self.inputs) floats = filter(is_float, self.inputs) return [(x[0], convert_type(x[1])) for x in floats] + [(x[0], "%s" % convert_type_array(x[1])) for x in arrs] + [(x[0], convert_type(x[1])) for x in non_arrs] def generate_signature(self): """ Generate the signature for the main Z3 function. """ return "(define-fun main () Bool\n(and \n" def generate_assumptions(self): """ Generates the necessary assumptions. Right now, it generates, for a loopvar `i`, lower and upper bounds for `i` and `i_valp`. For arrays of the type `T[N]` it generates bounds for `N` such that it is greater than 3. """ import asp.codegen.ast_tools import re ret = "" for x in self.get_loopvars(): ret += "(assert (> (- %s %s) 1))" % (self.loopvar_maxs[x], self.loopvar_mins[x]) + "\n" return ret def get_params_without_types(self): #return ', '.join(["%s" % (x[0]) for x in self.inputs]) return [x[0] for x in self.get_params()] def get_out_array(self): return self.out_array def get_loopvars(self): return self.loopvars def get_synthesized_invariant_rhs(self): #return "(select b (+ i_to_check 1))" #return "(select b (+ (- i_to_check 1) (* j_to_check N)))" return self.synthesized_invariant def set_maxs_and_mins(self): for x in self.get_loopvars(): maxfinder = generate_sketch.MaxFinder(x) maxfinder.visit(self.program) initfinder = generate_sketch.InitFinder(x) initfinder.visit(self.program) self.loopvar_mins[x] = ToZ3(initfinder.init,None,None).to_str() self.loopvar_maxs[x] = ToZ3(maxfinder.maximum,None,None).to_str() def replace_idx_vars(self, tree): """ Given an expression, replace the loopvariables `x` with `x_to_check`. """ import asp.codegen.ast_tools as ast_tools import grammar import copy tree_copy = copy.deepcopy(tree) class IdxReplacer(ast_tools.NodeTransformer): def __init__(self, loopvars): self.loopvars = loopvars def visit_VarNode(self, node): if node.name in self.loopvars: return grammar.VarNode(node.name+"_to_check") else: return node return IdxReplacer(self.get_loopvars()).visit(tree_copy) def find_dependent_loopvars(self): """ For each output array, find which loopvars it depends on. """ class DependenceFinder(ast_tools.NodeVisitor): def __init__(self, outputs, loopvars): super(DependenceFinder, self).__init__() self.outputs = outputs self.loopvars = loopvars self.dependences = {} for x in self.outputs: self.dependences[x] = [] self.in_lhs = False self.in_arr_access = None def visit_Block(self, node): map(self.visit, node.body) def visit_AssignExp(self, node): self.in_lhs = True self.visit(node.lval) self.in_lhs = False self.visit(node.rval) def visit_ArrExp(self, node): if self.in_lhs: self.in_arr_access = node.name.name self.visit(node.loc) self.in_arr_access = None def visit_VarNode(self, node): if self.in_lhs and self.in_arr_access and node.name in self.loopvars: self.dependences[self.in_arr_access].append(node.name) df = DependenceFinder(self.get_out_array(), self.loopvars) df.visit(self.program) logging.debug("Dependent loop vars: %s", df.dependences) self.dependent_loopvars = df.dependences def find_loopvar_nesting(self): """ Find the nesting structure for the loops. Returns loop->[containing loops] dict. """ self.loopvar_nesting = {} for lv in self.get_loopvars(): self.loopvar_nesting[lv] = [] for inv in self.invariant_names_to_loops.keys(): node = self.invariant_names_to_loops[inv] thisnodevar = node.iter_var.name for x in self.get_containing_loop_invs(node): logging.debug("%s contained by %s", thisnodevar, x[1].iter_var.name) self.loopvar_nesting[thisnodevar].append(x[1].iter_var.name) def find_output_nesting(self): """ Creates a structure to map from output->innermost loop. """ class OutputNestFinder(ast_tools.NodeVisitor): def __init__(self, outputs): self.outputs = outputs self.cur_loopvar = None self.output_nesting = {} def visit_Block(self, node): map(self.visit, node.body) def visit_WhileLoop(self, node): old_loopvar = self.cur_loopvar self.cur_loopvar = node.iter_var.name self.visit(node.body) self.cur_loopvar = old_loopvar def visit_AssignExp(self, node): if self.cur_loopvar and isinstance(node.lval, ArrExp): self.output_nesting[node.lval.name.name] = self.cur_loopvar onf = OutputNestFinder(self.get_out_array()) onf.visit(self.program) logging.debug("Output nesting: %s", onf.output_nesting) self.output_nesting = onf.output_nesting def get_containing_loop_invs(self, node): """ Return a list of (invariant function name, node) that correspond to the loops outside a given loop. """ class ContainingLoopVisitor(asp.codegen.ast_tools.NodeVisitor): def __init__(self): super(ContainingLoopVisitor, self).__init__() self.containing_loops = {} self.current_outerloops = [] def visit_Block(self, node): # need to do this sequentially for n in node.body: self.visit(n) def visit_WhileLoop(self, node): key = loop_key(node) invariant_name = "I_%s_%s" % (node.iter_var.name, key) self.containing_loops[invariant_name] = self.current_outerloops[:] self.current_outerloops.append((invariant_name, node)) self.visit(node.body) self.current_outerloops.pop() if not self.containing_loop_invs: visitor = ContainingLoopVisitor() visitor.visit(self.program) self.containing_loop_invs = visitor.containing_loops logging.debug("Containing loops: %s", visitor.containing_loops) key = loop_key(node) invariant_name = "I_%s_%s" % (node.iter_var.name, key) return self.containing_loop_invs[invariant_name] def get_loops_contained_by(self, node): """ Return a list of (invariant function name, node) that correspond to the loops contained by node. """ class ContainedLoopVisitor(asp.codegen.ast_tools.NodeVisitor): def __init__(self): super(ContainedLoopVisitor, self).__init__() self.contained_loops = [] def visit_Block(self, node): map(self.visit, node.body) def visit_WhileLoop(self, node): key = loop_key(node) invariant_name = "I_%s_%s" % (node.iter_var.name, key) self.contained_loops.append((invariant_name, node)) self.visit(node.body) visitor = ContainedLoopVisitor() visitor.visit(node.body) return visitor.contained_loops
43.275701
166
0.576774
2,089
18,522
4.89325
0.137865
0.027392
0.012326
0.011739
0.367638
0.246821
0.207983
0.156329
0.126785
0.106046
0
0.003938
0.32815
18,522
427
167
43.377049
0.817502
0.137134
0
0.242215
1
0
0.035933
0.006451
0
0
0
0.002342
0.010381
1
0.15917
false
0
0.058824
0.020761
0.321799
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
e548c969786feae43c81dceea46b23eaaf132846
2,991
py
Python
user_program/old/firmware_tester.py
dekuNukem/USB4VC
66c4f0b4a4acd7cec6654ea0dd4da026edf5d24c
[ "MIT" ]
78
2022-02-07T16:48:11.000Z
2022-03-31T12:25:35.000Z
user_program/old/firmware_tester.py
dekuNukem/USB4VC
66c4f0b4a4acd7cec6654ea0dd4da026edf5d24c
[ "MIT" ]
1
2022-02-26T20:16:08.000Z
2022-02-26T20:24:04.000Z
user_program/old/firmware_tester.py
dekuNukem/USB4VC
66c4f0b4a4acd7cec6654ea0dd4da026edf5d24c
[ "MIT" ]
1
2022-02-24T03:34:15.000Z
2022-02-24T03:34:15.000Z
import os import sys import time import spidev import RPi.GPIO as GPIO PBOARD_RESET_PIN = 25 PBOARD_BOOT0_PIN = 12 SLAVE_REQ_PIN = 16 GPIO.setmode(GPIO.BCM) GPIO.setup(PBOARD_RESET_PIN, GPIO.IN) GPIO.setup(PBOARD_BOOT0_PIN, GPIO.IN) GPIO.setup(SLAVE_REQ_PIN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) is_dfu = False def enter_dfu(): # RESET LOW: Enter reset GPIO.setup(PBOARD_RESET_PIN, GPIO.OUT) GPIO.output(PBOARD_RESET_PIN, GPIO.LOW) time.sleep(0.05) # BOOT0 HIGH: Boot into DFU mode GPIO.setup(PBOARD_BOOT0_PIN, GPIO.OUT) GPIO.output(PBOARD_BOOT0_PIN, GPIO.HIGH) time.sleep(0.05) # Release RESET, BOOT0 still HIGH, STM32 now in DFU mode GPIO.setup(PBOARD_RESET_PIN, GPIO.IN) time.sleep(1) def exit_dfu(): # Release BOOT0 GPIO.setup(PBOARD_BOOT0_PIN, GPIO.IN) # Activate RESET GPIO.setup(PBOARD_RESET_PIN, GPIO.OUT) GPIO.output(PBOARD_RESET_PIN, GPIO.LOW) time.sleep(0.05) # Release RESET, BOOT0 is LOW, STM32 boots in normal mode GPIO.setup(PBOARD_RESET_PIN, GPIO.IN) time.sleep(0.2) def flash_firmware(fw_path): for x in range(5): print(f"----------------- {fw_path.split('/')[-1]} -----------------") enter_dfu() if is_dfu: exit_code = os.system(f'sudo dfu-util --device ,0483:df11 -a 0 -D {fw_path}') >> 8 else: exit_code = os.system(f'sudo stm32flash -w {fw_path} -a 0x3b /dev/i2c-1') >> 8 exit_dfu() if exit_code != 0: for x in range(5): print("!!!!!!!!!!!!!!!!! TEST FLASH FAILED !!!!!!!!!!!!!!!!!") exit() if(len(sys.argv) < 3): print (__file__ + ' payload_fw test_fw') exit() os.system("clear") pcard_spi = spidev.SpiDev(0, 0) pcard_spi.max_speed_hz = 2000000 payload_fw_path = sys.argv[1] test_fw_path = sys.argv[2] if '.dfu' in payload_fw_path.lower() or '.dfu' in test_fw_path.lower(): is_dfu = True flash_firmware(test_fw_path) req_result = [] for x in range(10): req_result.append(GPIO.input(SLAVE_REQ_PIN)) time.sleep(0.1) print(req_result) if 0 not in req_result or 1 not in req_result or req_result.count(0) <= 3 or req_result.count(1) <= 3: for x in range(5): print("!!!!!!!!!!!!!!!!! SLAVE REQ ERROR !!!!!!!!!!!!!!!!!") exit() while 1: if len(input("Press enter to continue\n")) == 0: break; flash_firmware(payload_fw_path) SPI_MOSI_MAGIC = 0xde SPI_MOSI_MSG_TYPE_INFO_REQUEST = 1 nop_spi_msg_template = [SPI_MOSI_MAGIC] + [0]*31 info_request_spi_msg_template = [SPI_MOSI_MAGIC, 0, SPI_MOSI_MSG_TYPE_INFO_REQUEST] + [0]*29 this_msg = list(info_request_spi_msg_template) pcard_spi.xfer(this_msg) time.sleep(0.1) response = pcard_spi.xfer(list(nop_spi_msg_template)) time.sleep(0.1) print(response) if response[0] != 205: for x in range(5): print("!!!!!!!!!!!!!!!!! WRONG RESPONSE !!!!!!!!!!!!!!!!!") else: print("----------------- OK OK OK OK OK OK -----------------") print("----------------- OK OK OK OK OK OK -----------------")
27.694444
102
0.638248
482
2,991
3.721992
0.261411
0.046823
0.06243
0.070234
0.438127
0.371795
0.256968
0.133779
0.133779
0.133779
0
0.037866
0.17887
2,991
108
103
27.694444
0.69259
0.064527
0
0.317073
0
0
0.170129
0.008596
0
0
0.002865
0
0
1
0.036585
false
0
0.060976
0
0.097561
0.109756
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e54bb2a421e7b64f44e6913ef7732630a953e801
8,394
py
Python
dataset/text.py
scfrank/deep-generative-lm
70067fcda82aa035bba805ce6c2709097166a7a4
[ "MIT" ]
null
null
null
dataset/text.py
scfrank/deep-generative-lm
70067fcda82aa035bba805ce6c2709097166a7a4
[ "MIT" ]
null
null
null
dataset/text.py
scfrank/deep-generative-lm
70067fcda82aa035bba805ce6c2709097166a7a4
[ "MIT" ]
null
null
null
""" Text datatset iterators, as an extension of the PyTorch Dataset class. class SimpleTextData(): reads a text file line by line up to a specified sequence length. class SimpleTextDataSplit(): extends SimpleTextData() by splitting the data in train and val sets. class TextDataPadded(): extends SimpleTextData() by padding the text up to the specified sequence length. """ import os.path as osp import sys import numpy as np import torch from torch.utils.data import Dataset # We include the path of the toplevel package in the system path so we can always use absolute imports within the package. toplevel_path = osp.abspath(osp.join(osp.dirname(__file__), "..")) if toplevel_path not in sys.path: sys.path.insert(1, toplevel_path) from util.error import InvalidLengthError # noqa: E402 __author__ = "Tom Pelsmaeker" __copyright__ = "Copyright 2020" class SimpleTextData(Dataset): """Dataset of text that reads the first N tokens from each line in the given textfile as data. Args: file(str): name of the file containing the text data already converted to indices. seq_len(int): maximum length of sequences. Longer sequences will be cut at this length. """ def __init__(self, file, seq_len): if seq_len == 0: self._seq_len = len(max(open(file, "r"), key=len).split()) else: self._seq_len = seq_len self._data = [ line.split()[: self._seq_len] for line in open(file, "r") if line != "\n" ] self._data_len = len(self._data) def __len__(self): return self._data_len def __getitem__(self, idx): return torch.LongTensor(self._data[idx]) class TextDataSplit(SimpleTextData): """Dataset of text that allows a train/validation split from a single file. Extends SimpleTextData(). Args: file(str): name of the file containing the text data already converted to indices. seq_len(int): maximum length of sequences. Longer sequences will be cut at this length. train(bool): True when training, False when testing. """ def __init__(self, file, seq_len, train): super().__init__(file, seq_len) if train: self._data = self._data[: int(self.data.shape[0] * 0.9), :] else: self._data = self._data[int(self.data.shape[0] * 0.9) :, :] self._data_len = self.data.shape[0] class TextDataUnPadded(SimpleTextData): """ Dataset of text that prepares sequences for padding, but does not pad them yet. Extends SimpleTextData(). Args: file(str): name of the file containing the text data already converted to indices. seq_len(int): maximum length of sequences. shorter sequences will be padded to this length. pad_token(int): token that is appended to sentences shorter than seq_len. """ def __init__(self, file, seq_len, pad_token): super().__init__(file, seq_len) # This class also provides reversed sequences that are needed in certain generative model training self._reverse_data = [ line.split()[: self._seq_len][::-1] for line in open(file, "r") if line != "\n" ] self._pad_token = pad_token def __getitem__(self, idx): return self._data[idx], self._reverse_data[idx], self._pad_token class TextDataPadded(TextDataUnPadded): """ Dataset of text that pads sequences up to the specified sequence length. Extends TextDataUnPadded(). Args: file(str): name of the file containing the text data already converted to indices. seq_len(int): maximum length of sequences. shorter sequences will be padded to this length. pad_token(int): token that is appended to sentences shorter than seq_len. """ def __init__(self, file, seq_len, pad_token): super().__init__(file, seq_len, pad_token) self._seq_lens = [] for line in self._data: self._seq_lens.append(len(line)) if len(line) < self._seq_len: line.extend([pad_token] * (self._seq_len - len(line))) for reverse_line in self._reverse_data: if len(reverse_line) < self._seq_len: reverse_line.extend([pad_token] * (self._seq_len - len(reverse_line))) self._seq_lens = torch.LongTensor(self._seq_lens) self._data = torch.from_numpy(np.array(self._data, dtype=np.int64)) self._reverse_data = torch.from_numpy( np.array(self._reverse_data, dtype=np.int64) ) self._mask = 1.0 - (self._data == pad_token).float() def __getitem__(self, idx): return ( self._data[idx], self._seq_lens[idx], self._mask[idx], self._reverse_data[idx], ) def sort_collate(batch): """Custom collate_fn for DataLoaders, sorts data based on sequence lengths. Note that it is assumed that the variable on which to sort will be in the second position of the input tuples. Args: batch(list of tuples): a batch of data provided by a DataLoader given a Dataset, i.e a list of length batch_size of tuples, where each tuple contains the variables of the DataSet at a single index. Returns: list of tensors: the batch of data, with a tensor of length batch_size per variable in the DataSet, sorted according to the second variable which is assumed to be length information. The list contains [data, lengths, ...]. Raises: InvalidLengthError: if the input has less than two variables per index. """ if len(batch[0]) < 2: raise InvalidLengthError( "Batch needs to contain at least data (batch[0]) and lengths (batch[1])." ) # Unpack batch from list of tuples [(x_i, y_i, ...), ...] to list of tensors [x, y, ...] batch = [torch.stack([b[i] for b in batch]) for i in range(len(batch[0]))] # Get lengths from second tensor in batch and sort all batch data based on those lengths _, indices = torch.sort(batch[1], descending=True) batch = [data[indices] for data in batch] return batch def sort_pad_collate(batch): """Custom collate_fn for DataLoaders, pads data and sorts based on sequence lengths. This collate function works together with the TextDataUnPadded Dataset, that provides a batch of data in the correct format for this function to pad and sort. Args: batch(list of tuples): a batch of data provided by a DataLoader given a Dataset, i.e a list of length batch_size of tuples, where each tuple contains the variables of the DataSet at a single index. Each tuple must contain (data_i, reversed_data_i, pad_token). Returns: list of tensors: the batch of data, with a tensor of length batch_size per variable in the DataSet, sorted according to the second variable which is assumed to be length information. The list contains: [data, lengths, mask, reversed data]. Raises: InvalidLengthError: if the input does not have three variables per index. """ if len(batch[0]) != 3: raise InvalidLengthError( "Batch needs to contain data (batch[0]), reverse_data (batch[1]) and pad_token (batch[2])." ) # Unpack batch from list of tuples [(x_i, y_i, ...), ...] to list of lists [x, y, ...] batch = [[b[i] for b in batch] for i in range(len(batch[0]))] # Pad tensors x_len = torch.tensor([len(line) for line in batch[0]]) max_len = x_len.max().item() pad_token = batch[2][0] for line in batch[0]: if len(line) < max_len: line.extend([pad_token] * (max_len - len(line))) for line in batch[1]: if len(line) < max_len: line.extend([pad_token] * (max_len - len(line))) # Store data tensors in correct format and order batch[0] = torch.from_numpy(np.array(batch[0], dtype=np.int64)) batch.append(torch.from_numpy(np.array(batch[1], dtype=np.int64))) # Store length and mask in correct format and order batch[1] = x_len batch[2] = 1.0 - (batch[0] == pad_token).float() # Get lengths from second tensor in batch and sort all batch data based on those lengths _, indices = torch.sort(batch[1], descending=True) batch = [data[indices] for data in batch] return batch
38.861111
122
0.65928
1,220
8,394
4.382787
0.186066
0.025809
0.014962
0.012717
0.594352
0.535814
0.465682
0.429026
0.41743
0.403217
0
0.008381
0.246605
8,394
215
123
39.04186
0.837128
0.481296
0
0.212121
0
0.010101
0.047827
0
0
0
0
0
0
1
0.10101
false
0
0.060606
0.040404
0.262626
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e54c9a30192e6b4af7abb9251e624d83d9672e92
2,938
py
Python
object_detection/pytorch/demo/webcam.py
lamyiowce/training
da4c959b5a7b65091b850872cdd4014d768c087c
[ "Apache-2.0" ]
567
2018-09-13T05:07:49.000Z
2020-11-23T11:52:11.000Z
object_detection/pytorch/demo/webcam.py
lamyiowce/training
da4c959b5a7b65091b850872cdd4014d768c087c
[ "Apache-2.0" ]
222
2018-09-14T10:15:39.000Z
2020-11-20T22:21:09.000Z
object_detection/pytorch/demo/webcam.py
ltechkorea/mlperf-training
498b945dd914573bdbf7a871eaeebd9388b60b76
[ "Apache-2.0" ]
279
2018-09-16T12:40:29.000Z
2020-11-17T14:22:52.000Z
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import argparse import cv2 from maskrcnn_benchmark.config import cfg from predictor import COCODemo import time def main(): parser = argparse.ArgumentParser(description="PyTorch Object Detection Webcam Demo") parser.add_argument( "--config-file", default="../configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml", metavar="FILE", help="path to config file", ) parser.add_argument( "--confidence-threshold", type=float, default=0.7, help="Minimum score for the prediction to be shown", ) parser.add_argument( "--min-image-size", type=int, default=224, help="Smallest size of the image to feed to the model. " "Model was trained with 800, which gives best results", ) parser.add_argument( "--show-mask-heatmaps", dest="show_mask_heatmaps", help="Show a heatmap probability for the top masks-per-dim masks", action="store_true", ) parser.add_argument( "--masks-per-dim", type=int, default=2, help="Number of heatmaps per dimension to show", ) parser.add_argument( "opts", help="Modify model config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() # load config from file and command-line arguments cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() # prepare object that handles inference plus adds predictions on top of image coco_demo = COCODemo( cfg, confidence_threshold=args.confidence_threshold, show_mask_heatmaps=args.show_mask_heatmaps, masks_per_dim=args.masks_per_dim, min_image_size=args.min_image_size, ) cam = cv2.VideoCapture(0) while True: start_time = time.time() ret_val, img = cam.read() composite = coco_demo.run_on_opencv_image(img) print("Time: {:.2f} s / img".format(time.time() - start_time)) cv2.imshow("COCO detections", composite) if cv2.waitKey(1) == 27: break # esc to quit cv2.destroyAllWindows() if __name__ == "__main__": main()
31.255319
88
0.663036
387
2,938
4.896641
0.509044
0.031662
0.053826
0.016887
0
0
0
0
0
0
0
0.014831
0.242682
2,938
93
89
31.591398
0.836854
0.26855
0
0.119403
0
0
0.266542
0.036133
0
0
0
0
0
1
0.014925
false
0
0.074627
0
0.089552
0.014925
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e54d49eead1974109248b8035d8a201f05e5a2c9
1,886
py
Python
os_xenapi/client/host_agent.py
jonarrien/openstack-xenapi
8961dcbdecfbe7c3e3b5d05e71011c3a70d273a6
[ "Apache-2.0" ]
null
null
null
os_xenapi/client/host_agent.py
jonarrien/openstack-xenapi
8961dcbdecfbe7c3e3b5d05e71011c3a70d273a6
[ "Apache-2.0" ]
null
null
null
os_xenapi/client/host_agent.py
jonarrien/openstack-xenapi
8961dcbdecfbe7c3e3b5d05e71011c3a70d273a6
[ "Apache-2.0" ]
null
null
null
# Copyright 2017 Citrix Systems # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def version(session, uuid, dom_id, timeout): args = {'id': uuid, 'dom_id': dom_id, 'timeout': timeout} return session.call_plugin('agent.py', 'version', args) def key_init(session, uuid, dom_id, timeout, pub=''): args = {'id': uuid, 'dom_id': dom_id, 'timeout': timeout, 'pub': pub} return session.call_plugin('agent.py', 'key_init', args) def agent_update(session, uuid, dom_id, timeout, url='', md5sum=''): args = {'id': uuid, 'dom_id': dom_id, 'timeout': timeout, 'url': url, 'md5sum': md5sum} return session.call_plugin('agent.py', 'agentupdate', args) def password(session, uuid, dom_id, timeout, enc_pass=''): args = {'id': uuid, 'dom_id': dom_id, 'timeout': timeout, 'enc_pass': enc_pass} return session.call_plugin('agent.py', 'password', args) def inject_file(session, uuid, dom_id, timeout, b64_path='', b64_contents=''): args = {'id': uuid, 'dom_id': dom_id, 'timeout': timeout, 'b64_path': b64_path, 'b64_contents': b64_contents} return session.call_plugin('agent.py', 'inject_file', args) def reset_network(session, uuid, dom_id, timeout): args = {'id': uuid, 'dom_id': dom_id, 'timeout': timeout} return session.call_plugin('agent.py', 'resetnetwork', args)
39.291667
78
0.671262
267
1,886
4.595506
0.344569
0.07335
0.08802
0.07824
0.425428
0.350448
0.252649
0.252649
0.252649
0.141809
0
0.015023
0.188229
1,886
47
79
40.12766
0.786414
0.306999
0
0.272727
0
0
0.181748
0
0
0
0
0
0
1
0.272727
false
0.136364
0
0
0.545455
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
3
e55136ee5d85881c01e65dc049c23752a163d827
8,328
py
Python
changes/api/build_details.py
bowlofstew/changes
ebd393520e0fdb07c240a8d4e8747281b6186e28
[ "Apache-2.0" ]
null
null
null
changes/api/build_details.py
bowlofstew/changes
ebd393520e0fdb07c240a8d4e8747281b6186e28
[ "Apache-2.0" ]
null
null
null
changes/api/build_details.py
bowlofstew/changes
ebd393520e0fdb07c240a8d4e8747281b6186e28
[ "Apache-2.0" ]
null
null
null
from __future__ import absolute_import from collections import defaultdict from flask_restful.reqparse import RequestParser from itertools import groupby from sqlalchemy.orm import contains_eager, joinedload, subqueryload_all from uuid import UUID from changes.api.base import APIView from changes.api.serializer.models.testcase import TestCaseWithOriginCrumbler from changes.config import db from changes.constants import Result, Status from changes.models import ( Build, BuildPriority, Source, Event, FailureReason, Job, TestCase, BuildSeen, User ) from changes.utils.originfinder import find_failure_origins def find_changed_tests(current_build, previous_build, limit=25): current_job_ids = [j.id.hex for j in current_build.jobs] previous_job_ids = [j.id.hex for j in previous_build.jobs] if not (current_job_ids and previous_job_ids): return [] current_job_clause = ', '.join( ':c_job_id_%s' % i for i in range(len(current_job_ids)) ) previous_job_clause = ', '.join( ':p_job_id_%s' % i for i in range(len(previous_job_ids)) ) params = {} for idx, job_id in enumerate(current_job_ids): params['c_job_id_%s' % idx] = job_id for idx, job_id in enumerate(previous_job_ids): params['p_job_id_%s' % idx] = job_id # find all tests that have appeared in one job but not the other # we have to build this query up manually as sqlalchemy doesnt support # the FULL OUTER JOIN clause query = """ SELECT c.id AS c_id, p.id AS p_id FROM ( SELECT label_sha, id FROM test WHERE job_id IN (%(current_job_clause)s) ) as c FULL OUTER JOIN ( SELECT label_sha, id FROM test WHERE job_id IN (%(previous_job_clause)s) ) as p ON c.label_sha = p.label_sha WHERE (c.id IS NULL OR p.id IS NULL) """ % { 'current_job_clause': current_job_clause, 'previous_job_clause': previous_job_clause } total = db.session.query( 'count' ).from_statement( 'SELECT COUNT(*) FROM (%s) as a' % (query,) ).params(**params).scalar() if not total: return { 'total': 0, 'changes': [], } results = db.session.query( 'c_id', 'p_id' ).from_statement( '%s LIMIT %d' % (query, limit) ).params(**params) all_test_ids = set() for c_id, p_id in results: if c_id: all_test_ids.add(c_id) else: all_test_ids.add(p_id) test_map = dict( (t.id, t) for t in TestCase.query.filter( TestCase.id.in_(all_test_ids), ).options( joinedload('job', innerjoin=True), ) ) diff = [] for c_id, p_id in results: if p_id: diff.append(('-', test_map[UUID(p_id)])) else: diff.append(('+', test_map[UUID(c_id)])) return { 'total': total, 'changes': sorted(diff, key=lambda x: (x[1].package, x[1].name)), } def get_failure_reasons(build): from changes.buildfailures import registry rows = FailureReason.query.filter( FailureReason.build_id == build.id, ) failure_reasons = [] for row in rows: failure_reasons.append({ 'id': row.reason, 'reason': registry[row.reason].get_html_label(build), 'step_id': row.step_id, 'job_id': row.job_id, 'data': dict(row.data or {}), }) return failure_reasons def get_parents_last_builds(build): # A patch have only one parent, while a revision can have more. if build.source.patch: parents = [build.source.patch.parent_revision_sha] elif build.source.revision: parents = build.source.revision.parents if parents: parent_builds = list(Build.query.filter( Build.project == build.project, Build.status == Status.finished, Build.id != build.id, Source.patch_id == None, # NOQA ).join( Source, Build.source_id == Source.id, ).options( contains_eager('source').joinedload('revision'), ).filter( Source.revision_sha.in_(parents) ).order_by(Build.date_created.desc())) if parent_builds: # This returns a list with the last build of each revision. return [ list(builds)[0] for sha, builds in groupby( parent_builds, lambda rev: rev.source.revision_sha ) ] return [] class BuildDetailsAPIView(APIView): post_parser = RequestParser() post_parser.add_argument('priority', choices=BuildPriority._member_names_) def get(self, build_id): build = Build.query.options( joinedload('project', innerjoin=True), joinedload('author'), joinedload('source').joinedload('revision'), subqueryload_all('stats'), ).get(build_id) if build is None: return '', 404 try: most_recent_run = Build.query.filter( Build.project == build.project, Build.date_created < build.date_created, Build.status == Status.finished, Build.id != build.id, Source.patch_id == None, # NOQA ).join( Source, Build.source_id == Source.id, ).options( contains_eager('source').joinedload('revision'), joinedload('author'), ).order_by(Build.date_created.desc())[0] except IndexError: most_recent_run = None jobs = list(Job.query.filter( Job.build_id == build.id, )) # identify failures test_failures = TestCase.query.options( joinedload('job', innerjoin=True), ).filter( TestCase.job_id.in_([j.id for j in jobs]), TestCase.result == Result.failed, ).order_by(TestCase.name.asc()) num_test_failures = test_failures.count() test_failures = test_failures[:25] failures_by_job = defaultdict(list) for failure in test_failures: failures_by_job[failure.job].append(failure) failure_origins = find_failure_origins( build, test_failures) for test_failure in test_failures: test_failure.origin = failure_origins.get(test_failure) # identify added/removed tests if most_recent_run and build.status == Status.finished: changed_tests = find_changed_tests(build, most_recent_run) else: changed_tests = [] seen_by = list(User.query.join( BuildSeen, BuildSeen.user_id == User.id, ).filter( BuildSeen.build_id == build.id, )) extended_serializers = { TestCase: TestCaseWithOriginCrumbler(), } event_list = list(Event.query.filter( Event.item_id == build.id, ).order_by(Event.date_created.desc())) context = self.serialize(build) context.update({ 'jobs': jobs, 'seenBy': seen_by, 'events': event_list, 'failures': get_failure_reasons(build), 'testFailures': { 'total': num_test_failures, 'tests': self.serialize(test_failures, extended_serializers), }, 'testChanges': self.serialize(changed_tests, extended_serializers), 'parents': self.serialize(get_parents_last_builds(build)), }) return self.respond(context) def post(self, build_id): build = Build.query.options( joinedload('project', innerjoin=True), joinedload('author'), joinedload('source').joinedload('revision'), ).get(build_id) if build is None: return '', 404 args = self.post_parser.parse_args() if args.priority is not None: build.priority = BuildPriority[args.priority] db.session.add(build) context = self.serialize(build) return self.respond(context, serialize=False)
31.074627
79
0.587416
975
8,328
4.809231
0.213333
0.022393
0.017914
0.014929
0.259117
0.204948
0.178076
0.178076
0.143741
0.121135
0
0.002604
0.308237
8,328
267
80
31.191011
0.811317
0.040226
0
0.266055
0
0
0.099587
0.006138
0
0
0
0
0
1
0.022936
false
0
0.059633
0
0.137615
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e55245ae534826a12c1057928c01b0d967155c85
633
py
Python
test/test_one_or_greater.py
kant/stream-daemon
729bc576b74dcd9f1e2021a2433d176d33c413c9
[ "MIT" ]
2
2016-06-06T22:50:21.000Z
2018-01-17T16:14:05.000Z
test/test_one_or_greater.py
kant/stream-daemon
729bc576b74dcd9f1e2021a2433d176d33c413c9
[ "MIT" ]
null
null
null
test/test_one_or_greater.py
kant/stream-daemon
729bc576b74dcd9f1e2021a2433d176d33c413c9
[ "MIT" ]
1
2018-08-27T19:57:03.000Z
2018-08-27T19:57:03.000Z
import unittest from Monitor import five_or_greater class MockProject(object): def __init__(self, message_count, keyword_counts): self.message_count = message_count self.keyword_counts = keyword_counts class TestOneOrGreater(unittest.TestCase): def test_some_above_some_below(self): total = 1000 sample_dataset = { "keep1" : 1000, "keep2" : 800, "not1" : 5, "keep3" : 100, "not2" : 1, } project = MockProject(total, sample_dataset) self.assertEquals(five_or_greater(project), ["keep1", "keep2", "keep3",])
30.142857
81
0.616114
67
633
5.522388
0.567164
0.097297
0.07027
0
0
0
0
0
0
0
0
0.052863
0.28278
633
21
81
30.142857
0.762115
0
0
0
0
0
0.059937
0
0
0
0
0
0.055556
1
0.111111
false
0
0.111111
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e552b7215e51626970b737861ed04a252b5cb791
1,293
py
Python
biblepaycentral/masternodes/migrations/0001_initial.py
Lichtsucher/biblepaycentral
0575c9c6851bd87d35d8cda9840301ee510698e8
[ "MIT" ]
3
2018-03-18T22:36:20.000Z
2020-02-13T15:52:25.000Z
biblepaycentral/masternodes/migrations/0001_initial.py
Lichtsucher/biblepaycentral
0575c9c6851bd87d35d8cda9840301ee510698e8
[ "MIT" ]
null
null
null
biblepaycentral/masternodes/migrations/0001_initial.py
Lichtsucher/biblepaycentral
0575c9c6851bd87d35d8cda9840301ee510698e8
[ "MIT" ]
1
2018-10-16T10:51:11.000Z
2018-10-16T10:51:11.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.11.11 on 2018-05-20 16:32 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Masternode', fields=[ ('txid', models.CharField(editable=False, max_length=64, primary_key=True, serialize=False)), ('address', models.CharField(max_length=64)), ('inserted_at', models.DateTimeField(auto_now_add=True)), ('last_seen_at', models.DateTimeField(auto_now_add=True)), ('status', models.CharField(max_length=30)), ('version', models.IntegerField()), ], ), migrations.CreateModel( name='MasternodeHistory', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('txid', models.CharField(max_length=64)), ('inserted_at', models.DateTimeField(auto_now_add=True)), ('status', models.CharField(max_length=30)), ('version', models.IntegerField()), ], ), ]
34.026316
114
0.572312
126
1,293
5.68254
0.47619
0.104749
0.100559
0.134078
0.47905
0.400838
0.400838
0.400838
0.400838
0.400838
0
0.030635
0.293117
1,293
37
115
34.945946
0.752735
0.053364
0
0.482759
1
0
0.086814
0
0
0
0
0
0
1
0
false
0
0.068966
0
0.206897
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
e552e80c6cecaff697d61ab26de1caa136a85aa7
100
py
Python
taobaotmcpy/__init__.py
baocaixiong/taobao-tmc-python
7fea2e481f303aae172606bcc9e17d3ce5500abe
[ "MIT" ]
3
2015-09-17T14:35:45.000Z
2017-01-23T13:18:48.000Z
taobaotmcpy/__init__.py
baocaixiong/taobao-tmc-python
7fea2e481f303aae172606bcc9e17d3ce5500abe
[ "MIT" ]
null
null
null
taobaotmcpy/__init__.py
baocaixiong/taobao-tmc-python
7fea2e481f303aae172606bcc9e17d3ce5500abe
[ "MIT" ]
7
2016-03-29T07:02:50.000Z
2020-06-01T07:37:18.000Z
# coding: utf8 __author__ = 'baocaixiong' __all__ = ['TmcClient'] from tmcclient import TmcClient
14.285714
31
0.75
10
100
6.7
0.8
0
0
0
0
0
0
0
0
0
0
0.011765
0.15
100
6
32
16.666667
0.776471
0.12
0
0
0
0
0.232558
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
3
e554f5b6e49841dbafd67f8647e1ce20f5fc6e43
17
py
Python
test.py
HayatoNagasaki/HayatoNagasaki.github.io
98ca109f327ad2e079b2143d46487ed60209c83d
[ "MIT" ]
1
2019-07-11T01:06:01.000Z
2019-07-11T01:06:01.000Z
test.py
HayatoNagasaki/HayatoNagasaki.github.io
98ca109f327ad2e079b2143d46487ed60209c83d
[ "MIT" ]
1
2019-07-08T05:28:59.000Z
2021-01-14T10:44:49.000Z
test.py
HayatoNagasaki/HayatoNagasaki.github.io
98ca109f327ad2e079b2143d46487ed60209c83d
[ "MIT" ]
null
null
null
print("test.py")
8.5
16
0.647059
3
17
3.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.058824
17
1
17
17
0.6875
0
0
0
0
0
0.411765
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
e559a7fb012d10180f481f20a5e14d31f9279b77
2,368
py
Python
helper.py
HackerLion123/CNN_for_Fashion
25eb3fc449cf9ee2f1591e3c137a11915f0ee199
[ "MIT" ]
null
null
null
helper.py
HackerLion123/CNN_for_Fashion
25eb3fc449cf9ee2f1591e3c137a11915f0ee199
[ "MIT" ]
null
null
null
helper.py
HackerLion123/CNN_for_Fashion
25eb3fc449cf9ee2f1591e3c137a11915f0ee199
[ "MIT" ]
null
null
null
import numpy as np import tensorflow as tf from tensorflow import gfile from PIL import Image from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelBinarizer import multiprocessing import sys import os def resize(img,shape): return tf.image.resize(img,shape) def load_mnist(): pass def preprocess_input(path): with Image.open(path) as img: img = np.array(img, np.float32) img = img/255 img = create_dataset(np.array([img]),np.array([[0]*10],np.float32),1) return img def create_dataset(images, labels, batch_size): dataset = tf.data.Dataset.from_tensor_slices((images, labels)) dataset = dataset.shuffle(len(images)) dataset = dataset.batch(batch_size) dataset = dataset.prefetch(1) return dataset def load_data(train_path, batch_size=1, test_path=None): train_images, train_labels = get_data(train_path) if test_path: test_images, test_labels = get_data(test_path) else: train_images, test_images, train_labels, test_labels = train_test_split(train_images, train_labels, shuffle=True, test_size=0.04) val_images, test_images, val_labels, test_labels = train_test_split(test_images, test_labels, shuffle=False, test_size=0.5) print(val_images[1].shape) print(len(val_labels)) train = create_dataset(train_images, train_labels, batch_size) val = create_dataset(val_images, val_labels, len(val_images)) test = create_dataset(test_images, test_labels, batch_size) del train_images, train_labels, test_images, test_labels, val_images, val_labels return train, val, test def get_data(data_path): images, labels = [], [] # classes = os.listdir(path) # cur_dir = os.getcwd() # os.chdir(path) for cls in os.listdir(data_path): path = os.path.join(data_path,cls) for img_path in os.listdir(path): try: with Image.open(os.path.join(path,img_path)) as img: images.append(np.array(img)) labels.append(cls) except Exception as e: pass images = [ image/255 for image in images] encoder = LabelBinarizer() encoder.fit(labels) labels = encoder.transform(labels) labels = labels.astype(np.float32) # os.chdir(cur_dir) print(len(images)) print(len(labels)) return np.array(images,np.float32), np.array(labels) def get_batch(): pass def plot(): pass def stack_plot(): pass if __name__ == '__main__': images, labels, _ = load_data("data/notMNIST_small")
26.606742
132
0.752956
367
2,368
4.632153
0.239782
0.041176
0.05
0.051765
0.035294
0.035294
0
0
0
0
0
0.012658
0.132601
2,368
89
133
26.606742
0.814995
0.034206
0
0.075758
0
0
0.011827
0
0
0
0
0
0
1
0.136364
false
0.075758
0.136364
0.015152
0.348485
0.060606
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
e55aa2f041ab96556aa76a0a7df9e2eb922247e5
1,570
py
Python
row.py
txt/se4dm
c38c742039eaa7a15730eb655c4eed067c8a5409
[ "Unlicense" ]
null
null
null
row.py
txt/se4dm
c38c742039eaa7a15730eb655c4eed067c8a5409
[ "Unlicense" ]
9
2015-10-30T12:46:53.000Z
2015-11-25T03:27:49.000Z
row.py
txt/se4dm
c38c742039eaa7a15730eb655c4eed067c8a5409
[ "Unlicense" ]
2
2018-06-22T15:23:44.000Z
2020-11-05T01:47:54.000Z
from __future__ import print_function, division import sys sys.dont_write_bytecode = True """ # Rows """ from lib import * class Row: n = -1 def __init__(i,t): Row.n = i.n = Row.n + 1 i.t, i.dists = t,{} def dist(j,k): if j.n == k.n : return 0 if j.n > k.n : return k.dist(j) key = (j.n, k.n) if not key in j.dists : j.dists[key] = dist(i.t,j,k) return j.dists[key] def furthest(j,lst=None,best=-1,better=gt): lst = lst or t.rows out = j for k in lst: tmp = dist(i.t,j,k) if tmp and better(tmp,best): out,best = k,tmp return best def closest(j,lst=None): return j.furthest(lst,best=1e32,better=lt) def knn(i,k=1,lst=None): lst = lst or t.rows out = {} for r1 in lst: for r2 in lst: all = [(dist(i.t,r1,r2),r2) for r2 in lst] out[r1] = sorted(all)[:k] return out def dist(t,j,k): def colxy(cols,xs,ys): for col in cols: x = xs[col.pos] y = ys[col.pos] if x == "?" and y=="?": continue yield col,x,y def far(col,x,y): y = col.norm(y) x = 0 if y > 0.5 else 1 return x,y #--------- n = all = 0 for col in colsxy(t.indep.syms,j,k): if x== "?" or y == "?": n += 1 all += 1 else: inc = 0 if x == y else 1 n += 1 all += inc for col,x,y in colxy(t.indep.nums,j,k): if x == "?" : x,y = far(col,x,y) elif y == "?" : y,x = far(col,y,x) else : x,y = col.norm(x), col.norm(y) n += 1 all += (x-y)**2 return all**0.5 / n**0.5
22.112676
50
0.499363
302
1,570
2.559603
0.235099
0.023286
0.020699
0.015524
0.093144
0.072445
0
0
0
0
0
0.028999
0.319108
1,570
70
51
22.428571
0.694107
0.005732
0
0.081967
0
0
0.003886
0
0
0
0
0
0
1
0.131148
false
0
0.04918
0.016393
0.311475
0.016393
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e55cd024add940dff887d317c65342a61070e10c
306
py
Python
hyperparams.py
nce3xin/spam
908421d5cf2dd103e2a7044bf1c8586aaf5f2ada
[ "MIT" ]
1
2019-03-13T10:49:25.000Z
2019-03-13T10:49:25.000Z
hyperparams.py
nce3xin/spam
908421d5cf2dd103e2a7044bf1c8586aaf5f2ada
[ "MIT" ]
null
null
null
hyperparams.py
nce3xin/spam
908421d5cf2dd103e2a7044bf1c8586aaf5f2ada
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Mon Jul 9 11:17:24 2018 @author: nce3xin """ seed_num=1 learning_rate=1e-3 #epochs=109 #epochs=90 epochs=20 batch_size=16 log_interval=1 no_cuda=False MODEL='LSTM' cnn_out_dims=25 CNN_mapping=False normalization=False standard_scale=False min_max_scaler=False
11.769231
35
0.754902
54
306
4.074074
0.851852
0
0
0
0
0
0
0
0
0
0
0.104089
0.120915
306
26
36
11.769231
0.713755
0.310458
0
0
0
0
0.019802
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e55d3600b75d26796ee6898a2f70d9baa125034f
1,091
py
Python
elkm1_lib/settings.py
stevemcquaid/elkm1
843092329208a3a2bafdedc0a8984a6f20626480
[ "MIT" ]
null
null
null
elkm1_lib/settings.py
stevemcquaid/elkm1
843092329208a3a2bafdedc0a8984a6f20626480
[ "MIT" ]
null
null
null
elkm1_lib/settings.py
stevemcquaid/elkm1
843092329208a3a2bafdedc0a8984a6f20626480
[ "MIT" ]
null
null
null
"""Definition of an ElkM1 Custom Value""" from .const import Max, TextDescriptions from .elements import Element, Elements from .message import add_message_handler, cp_encode, cw_encode class Setting(Element): """Class representing an Custom Value""" def __init__(self, index, elk): super().__init__(index, elk) self.value_format = 0 self.value = None def set(self, value): """(Helper) Set custom value.""" self._elk.send(cw_encode(self._index, value, self.value_format)) class Settings(Elements): """Handling for multiple custom values""" def __init__(self, elk): super().__init__(elk, Setting, Max.SETTINGS.value) add_message_handler('CR', self._cr_handler) def sync(self): """Retrieve custom values from ElkM1""" self.elk.send(cp_encode()) self.get_descriptions(TextDescriptions.SETTING.value) def _cr_handler(self, index, value, value_format): custom_value = self.elements[index] custom_value.value_format = value_format custom_value.value = value
32.088235
72
0.679193
137
1,091
5.124088
0.321168
0.094017
0.048433
0.062678
0
0
0
0
0
0
0
0.003476
0.208983
1,091
33
73
33.060606
0.809965
0.153071
0
0
0
0
0.002227
0
0
0
0
0
0
1
0.238095
false
0
0.142857
0
0.47619
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
e55fd1183bbea7baae5e8cb8798441212de67d2a
286
py
Python
rxbp/indexed/init/initindexedflowable.py
MichaelSchneeberger/rx_backpressure
16173827498bf1bbee3344933cb9efbfd19699f5
[ "Apache-2.0" ]
24
2018-11-22T21:04:49.000Z
2021-11-08T11:18:09.000Z
rxbp/indexed/init/initindexedflowable.py
MichaelSchneeberger/rx_backpressure
16173827498bf1bbee3344933cb9efbfd19699f5
[ "Apache-2.0" ]
1
2019-02-06T15:58:46.000Z
2019-02-12T20:31:50.000Z
rxbp/indexed/init/initindexedflowable.py
MichaelSchneeberger/rx_backpressure
16173827498bf1bbee3344933cb9efbfd19699f5
[ "Apache-2.0" ]
1
2021-01-26T12:41:37.000Z
2021-01-26T12:41:37.000Z
from rxbp.indexed.impl.indexedflowableimpl import IndexedFlowableImpl from rxbp.indexed.mixins.indexedflowablemixin import IndexedFlowableMixin def init_indexed_flowable( underlying: IndexedFlowableMixin, ): return IndexedFlowableImpl( underlying=underlying )
26
73
0.800699
24
286
9.458333
0.541667
0.070485
0.132159
0
0
0
0
0
0
0
0
0
0.15035
286
10
74
28.6
0.934156
0
0
0
0
0
0
0
0
0
0
0
0
1
0.125
false
0
0.25
0.125
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
0
0
0
2
e560c0065add902aec8b6f052dd6151fd1ebe484
291
py
Python
src/__init__.py
drkostas/COSC525-Project2
a33c786621e6047b0a586c7c3a3b5b85cb51fd6d
[ "Apache-2.0" ]
null
null
null
src/__init__.py
drkostas/COSC525-Project2
a33c786621e6047b0a586c7c3a3b5b85cb51fd6d
[ "Apache-2.0" ]
null
null
null
src/__init__.py
drkostas/COSC525-Project2
a33c786621e6047b0a586c7c3a3b5b85cb51fd6d
[ "Apache-2.0" ]
null
null
null
"""Top-level package for COSC525-Project2.""" from .neuron import * from .fully_connected_layer import * from .convolutional_layer import * from .max_pooling_layer import * from .flatten_layer import * from .neural_network import * from .data_generator import * from .tensor_files import *
26.454545
45
0.786942
39
291
5.641026
0.564103
0.318182
0.272727
0
0
0
0
0
0
0
0
0.015748
0.127148
291
10
46
29.1
0.850394
0.134021
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
e560f9f2e17600df62f3bea76144c341a81a3cc7
7,581
py
Python
source/02_ssd_large/lib/model.py
toshi-k/kaggle-3d-object-detection-for-autonomous-vehicles
af2e0db16281fb997a9bd5149c478095128a627e
[ "MIT" ]
24
2019-11-28T05:54:58.000Z
2021-06-14T07:38:30.000Z
source/03_ssd_small/lib/model.py
toshi-k/kaggle-3d-object-detection-for-autonomous-vehicles
af2e0db16281fb997a9bd5149c478095128a627e
[ "MIT" ]
null
null
null
source/03_ssd_small/lib/model.py
toshi-k/kaggle-3d-object-detection-for-autonomous-vehicles
af2e0db16281fb997a9bd5149c478095128a627e
[ "MIT" ]
5
2019-12-06T05:59:32.000Z
2021-09-16T13:30:29.000Z
import math from pathlib import Path import numpy as np import torch from torch import nn import torch.nn.functional as F from torch.autograd import Variable from torchvision import models from PIL import Image from lib.default_box import dbox_params from lib.visualize import Visualizer from common import numpy2pil def set_batch_norm_eval(model): bn_count = 0 bn_training = 0 for module in model.modules(): if isinstance(module, torch.nn.modules.batchnorm.BatchNorm2d): if module.training: bn_training += 1 module.eval() bn_count += 1 module.weight.requires_grad = False module.bias.requires_grad = False print('{} BN modules are set to eval'.format(bn_count)) class Model(nn.Module): def __init__(self): super().__init__() self.num_classes = 10 self.outoput_channel = self.num_classes + 7 resnet34 = models.resnet34(pretrained=True) self.resnet34_main = nn.Sequential( resnet34.conv1, resnet34.bn1, resnet34.relu, resnet34.maxpool, resnet34.layer1, resnet34.layer2, resnet34.layer3 ) self.conv_ex1 = resnet34.layer4 self.conv_ex2 = nn.Sequential( nn.Conv2d(512, 256, kernel_size=1, padding=0, stride=1), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Conv2d(256, 512, kernel_size=3, padding=1, stride=2), nn.BatchNorm2d(512), nn.ReLU(inplace=True) ) self.conv_up2 = nn.Sequential( nn.ConvTranspose2d(512, 256, kernel_size=3, padding=1, stride=1), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.ConvTranspose2d(256, 512, kernel_size=2, padding=0, stride=2), nn.BatchNorm2d(512), nn.ReLU(inplace=True) ) # self.conv_ex3 = nn.Sequential(nn.Conv2d(512, 128, kernel_size=1, padding=0, stride=1), # nn.ReLU(inplace=True), # nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=2), # nn.ReLU(inplace=True) # ) # self.ex0_intermediate = nn.Conv2d(256, 4 * self.outoput_channel, kernel_size=3, padding=1, stride=1) self.ex1_intermediate = nn.Sequential( nn.Conv2d(1024, 512, kernel_size=3, padding=1, stride=1), nn.Softplus(), nn.Conv2d(512, 4 * self.outoput_channel, kernel_size=1, padding=0, stride=1) ) # self.ex2_intermediate = nn.Conv2d(512, 4 * self.outoput_channel, kernel_size=3, padding=1, stride=1) # self.ex3_intermediate = nn.Conv2d(256, 32, kernel_size=3, padding=1, stride=1) @staticmethod def header(h, img_size): batch_size = len(h) step = img_size / h.shape[-1] points = np.arange(step / 2 - 0.5, img_size, step, dtype=np.float32) assignment, x, y, length, width, z, height, rotate = torch.split( h, [10, 1, 1, 1, 1, 1, 1, 1], dim=2) x_points = np.tile(points.reshape(1, 1, 1, h.shape[-1], 1), (batch_size, len(dbox_params), 1, 1, h.shape[-1])) y_points = np.tile(points.reshape(1, 1, 1, 1, h.shape[-1]), (batch_size, len(dbox_params), 1, h.shape[-1], 1)) rotate_vars = dbox_params['rotate_vars'].values rotate_vars = np.tile(rotate_vars.reshape(1, len(rotate_vars), 1, 1, 1), (batch_size, 1, 1, h.shape[-1], h.shape[-1])) length_shifts = dbox_params['length_shifts'].values length_shifts = np.tile(length_shifts.reshape(1, len(length_shifts), 1, 1, 1), (batch_size, 1, 1, h.shape[-1], h.shape[-1])) width_shifts = dbox_params['width_shifts'].values width_shifts = np.tile(width_shifts.reshape(1, len(width_shifts), 1, 1, 1), (batch_size, 1, 1, h.shape[-1], h.shape[-1])) height_shifts = dbox_params['height_shifts'].values height_shifts = np.tile(height_shifts.reshape(1, len(height_shifts), 1, 1, 1), (batch_size, 1, 1, h.shape[-1], h.shape[-1])) assignment = torch.softmax(assignment, dim=2) # [batch_size, dbox, channel, x, y] x_abs = torch.tanh(x) * step + torch.from_numpy(x_points).cuda() y_abs = torch.tanh(y) * step + torch.from_numpy(y_points).cuda() z_abs = z + 1010.0 length_abs = torch.exp(length * 0.1 + math.log2(step) / 1.5) * torch.from_numpy(length_shifts).cuda() + 1 width_abs = torch.exp(width * 0.1 + math.log2(step) / 1.5) * torch.from_numpy(width_shifts).cuda() + 1 height_abs = torch.exp(height * 0.1 + math.log2(step) / 1.5) * torch.from_numpy(height_shifts).cuda() + 1 rotate_abs = torch.atan(rotate) + torch.from_numpy(rotate_vars).cuda() return torch.cat([assignment, x_abs, y_abs, length_abs, width_abs, z_abs, height_abs, rotate_abs], dim=2) def forward_main(self, x): list_output = list() main_out = self.resnet34_main.forward(x) ex1_down = F.relu(self.conv_ex1(main_out)) ex2_down = self.conv_ex2(ex1_down) ex1_up = self.conv_up2(ex2_down) ex1_out = torch.cat([ex1_down, ex1_up], 1) ex1_branch = self.ex1_intermediate(ex1_out) # 24x24 list_output.append(ex1_branch) return list_output def forward(self, x): list_output = list() list_main = self.forward_main(x) for out in list_main: size = out.shape[-1] h = self.header(out.reshape(-1, 4, self.outoput_channel, size, size), img_size=x.shape[-1]) list_output.append(h.reshape(-1, 4 * self.outoput_channel, size, size)) return list_output def build_model(): model = Model() model.cuda() return model if __name__ == '__main__': dir_debug = Path('_debug') dir_debug.mkdir(exist_ok=True) model = build_model() print(model) viz = Visualizer('colors.json') # 768 x 768 in_arr1 = np.zeros((2, 3, 768, 768), dtype=np.float32) in_tensor1 = torch.from_numpy(in_arr1) out_vars1 = model.forward(in_tensor1.cuda()) [print(out_var.shape) for out_var in out_vars1] out_var_numpy1 = [tensor.cpu().data.numpy() for tensor in out_vars1] out_var_numpy_batch1 = [[tensor[b, :, :, :] for tensor in out_var_numpy1] for b in range(2)] img = viz.draw_predicted_boxes(out_var_numpy_batch1[0], dbox_params, img_size=in_arr1.shape[-1]) numpy2pil(img).save(dir_debug / 'sample_1-0.png') img = viz.draw_predicted_boxes(out_var_numpy_batch1[1], dbox_params, img_size=in_arr1.shape[-1]) numpy2pil(img).save(dir_debug / 'sample_1-1.png') # 1024 x 1024 in_arr2 = np.zeros((2, 3, 1024, 1024), dtype=np.float32) in_tensor2 = torch.from_numpy(in_arr2) out_vars2 = model.forward(in_tensor2.cuda()) [print(out_var.shape) for out_var in out_vars2] out_var_numpy2 = [tensor.cpu().data.numpy() for tensor in out_vars2] out_var_numpy_batch2 = [[tensor[b, :, :, :] for tensor in out_var_numpy2] for b in range(2)] img = viz.draw_predicted_boxes(out_var_numpy_batch2[0], dbox_params, img_size=in_arr2.shape[-1]) numpy2pil(img).save(dir_debug / 'sample_2-0.png') img = viz.draw_predicted_boxes(out_var_numpy_batch2[1], dbox_params, img_size=in_arr2.shape[-1]) numpy2pil(img).save(dir_debug / 'sample_2-1.png')
34.616438
118
0.61298
1,092
7,581
4.044872
0.165751
0.012225
0.020602
0.021734
0.396423
0.35952
0.347295
0.313335
0.238397
0.221417
0
0.065341
0.25709
7,581
218
119
34.775229
0.718928
0.086136
0
0.115108
0
0
0.022993
0
0
0
0
0
0
1
0.043165
false
0
0.086331
0
0.165468
0.028777
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e5616dbad551125f0ff82bbdd7078f807585a1f9
2,560
py
Python
tests/datastructures_tests/physical_data_tests.py
czbiohub/reconstruct-order
e729ae3871aea0a5ec2d42744a9448c7f0a93037
[ "Unlicense" ]
6
2019-10-30T23:00:01.000Z
2021-03-02T19:09:07.000Z
tests/datastructures_tests/physical_data_tests.py
czbiohub/ReconstructOrder
e729ae3871aea0a5ec2d42744a9448c7f0a93037
[ "Unlicense" ]
14
2019-07-08T22:51:29.000Z
2019-07-13T15:44:01.000Z
tests/datastructures_tests/physical_data_tests.py
mehta-lab/reconstruct-order
e729ae3871aea0a5ec2d42744a9448c7f0a93037
[ "Unlicense" ]
2
2020-05-02T23:28:36.000Z
2020-07-16T23:46:46.000Z
import numpy as np import pytest, os from numpy.testing import assert_array_equal from ReconstructOrder.datastructures.physical_data import PhysicalData def test_basic_constructor_nparray(): """ test assignment using numpy arrays """ phys = PhysicalData() phys.I_trans = np.ones((512, 512)) phys.polarization = 2 * np.ones((512, 512)) phys.retard = 3 * np.ones((512, 512)) phys.depolarization = 4 * np.ones((512, 512)) phys.azimuth = 5 * np.ones((512, 512)) phys.azimuth_degree = 6 * np.ones((512, 512)) phys.azimuth_vector = 7 * np.ones((512, 512)) assert_array_equal(phys.I_trans, np.ones((512, 512))) assert_array_equal(phys.polarization, 2*np.ones((512, 512))) assert_array_equal(phys.retard, 3*np.ones((512, 512))) assert_array_equal(phys.depolarization, 4*np.ones((512, 512))) assert_array_equal(phys.azimuth, 5*np.ones((512, 512))) assert_array_equal(phys.azimuth_degree, 6*np.ones((512, 512))) assert_array_equal(phys.azimuth_vector, 7*np.ones((512, 512))) def test_basic_constructor_memap(setup_temp_data): """ test assignment using memory mapped files """ mm = setup_temp_data phys = PhysicalData() phys.I_trans = mm phys.polarization = 2 * mm phys.retard = 3 * mm phys.depolarization = 4 * mm phys.azimuth = 5 * mm phys.azimuth_degree = 6 * mm phys.azimuth_vector = 7 * mm assert_array_equal(phys.I_trans, mm) assert_array_equal(phys.polarization, 2*mm) assert_array_equal(phys.retard, 3*mm) assert_array_equal(phys.depolarization, 4*mm) assert_array_equal(phys.azimuth, 5*mm) assert_array_equal(phys.azimuth_degree, 6*mm) assert_array_equal(phys.azimuth_vector, 7*mm) def test_instances(): """ test instance attributes """ phs1 = PhysicalData() phs2 = PhysicalData() with pytest.raises(AssertionError): assert(phs1 == phs2) with pytest.raises(AssertionError): phs1.retard = 1 phs2.retard = 2 assert(phs1.retard == phs2.retard) def test_private_access(setup_physical_data): """ test that private attributes are not accessible """ phys = setup_physical_data with pytest.raises(AttributeError): print(phys.__I_trans) print(phys.__retard) # ==== Attribute assignment ========== def test_assignment(setup_physical_data): """ test exception handling of improper assignment """ phys = setup_physical_data with pytest.raises(TypeError): phys.incorrect_attribute = 1
28.444444
70
0.679688
344
2,560
4.851744
0.209302
0.098862
0.143799
0.100659
0.525464
0.47154
0.37867
0.191132
0.070102
0
0
0.058077
0.199609
2,560
90
71
28.444444
0.756467
0.091406
0
0.109091
0
0
0
0
0
0
0
0
0.345455
1
0.090909
false
0
0.072727
0
0.163636
0.036364
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e5620e85ec34ab2ff5817e8825c91c57685d44ba
6,349
py
Python
2019/14.py
IsaacG/Advent-of-Code
1e970c6a4abc4a2025f7c70323e70aee64d0bc21
[ "MIT" ]
3
2020-12-19T09:01:03.000Z
2021-12-16T13:05:03.000Z
2019/14.py
IsaacG/Advent-of-Code
1e970c6a4abc4a2025f7c70323e70aee64d0bc21
[ "MIT" ]
null
null
null
2019/14.py
IsaacG/Advent-of-Code
1e970c6a4abc4a2025f7c70323e70aee64d0bc21
[ "MIT" ]
null
null
null
#!/bin/python """Day 14: Space Stoichiometry. Handle chemical reactions, converting ORE to FUEL. """ import collections import math import typer from typing import Dict, List, Set, Tuple import data from lib import aoc SAMPLE = data.D14 TRILLION = int(1e12) class Reaction: """Wrapper around a single reaction.""" def __init__(self, product: Tuple[int, str], reactants: List[Tuple[int, str]]): self._reactants = reactants self.product_amt, self.product = product self.reactants = {r[1] for r in self._reactants} def needed(self, count: int) -> Tuple[List[Tuple[int, str]], int]: """Calculate much of of each reactant is needed to make `count` product. Returns the reactants needed and the amount of product produced. """ factor = math.ceil(count / self.product_amt) return [(factor * c, e) for c, e in self._reactants], factor * self.product_amt class Day14(aoc.Challenge): TESTS = ( aoc.TestCase(inputs=SAMPLE[0], part=1, want=165), aoc.TestCase(inputs=SAMPLE[1], part=1, want=13312), aoc.TestCase(inputs=SAMPLE[2], part=1, want=180697), aoc.TestCase(inputs=SAMPLE[3], part=1, want=2210736), aoc.TestCase(inputs=SAMPLE[1], part=2, want=82892753), aoc.TestCase(inputs=SAMPLE[2], part=2, want=5586022), aoc.TestCase(inputs=SAMPLE[3], part=2, want=460664), ) def part1(self, reactions: Dict[str, Reaction]) -> int: """Calculate how much ore is needed for 1 unit of fuel.""" return self.ore_per_fuel(reactions, 1) def part2(self, reactions: Dict[str, Reaction]) -> int: """Determine how much fuel can be made with 1e12 ore. Use the `ore_per_fuel()` function to binary search from 0 to 2e12 / ore_per_fuel(1). """ low, high = 1, 2 * TRILLION // self.ore_per_fuel(reactions, 1) while (high - low) > 1: mid = (low + high) // 2 ore = self.ore_per_fuel(reactions, mid) if ore == TRILLION: # Unlikely to occur but it doesn't hurt to be safe. return mid elif ore > TRILLION: high = mid else: low = mid return low def part2_via_reactions(self, reactions: Dict[str, Reaction]) -> int: """Solve part2 by actually running reactions until we run out of ore.""" # Track inventory of products as we run reactions and have leftovers. inventory = {product: 0 for product in reactions} inventory['ORE'] = TRILLION def react(product: str, amount: int, inv: Dict[str, int]) -> bool: """Run a reaction to produce `amount` of `product` using mutatable inventory `inv`. Returns a bool indicating if we can actually pull off the reaction. On False, `inv` is a bit trashed. """ def _react(product, amount): """Closure on `inv` to avoid passing it around.""" # If we do not have enough ore and are trying to produce some, this reaction fails. if product == 'ORE': return False needs, gets = reactions[product].needed(amount) # Produce all the needed reactants to run the reaction. # Some reactants might use up others to be formed, hence the loop. while any(inv[reactant] < uses for uses, reactant in needs): for uses, reactant in needs: if inv[reactant] >= uses: continue # We need more of this reactant. Try to produce it. Mutates `inv`. short = uses - inv[reactant] if not _react(reactant, short): return False # Mutate `inv` and run the reaction. Use up reactants, produce product. for uses, reactant in needs: inv[reactant] -= uses inv[product] += gets return True return _react(product, amount) # Try to produce fuel in large quantities at first. # Reduce reaction size as they fail. volume = TRILLION // self.part1(reactions) while True: # Since failed reactions mutate the inventory, first see if they will work # on a copy. Then actually update the inventory. if react('FUEL', volume, inventory.copy()): react('FUEL', volume, inventory) else: # Failed to produce 1 fuel. We are at the end. if volume == 1: return inventory['FUEL'] volume = volume // 2 or 1 def ore_per_fuel(self, reactions: Dict[str, Reaction], fuel: int) -> int: """Calculate how much ore is required to produce `fuel` units of fuel.""" _dependencies = {'ORE': set()} # type: Dict[str, Set[str]] def dependencies(product: str) -> Set[str]: """Compute *all* reactants (recursively) involved in producing `product`.""" # Cache results for dynamic programming. if product not in _dependencies: # Collect all reactants ... recursively. deps = set(reactions[product].reactants) for reactant in list(deps): deps.update(dependencies(reactant)) _dependencies[product] = deps return _dependencies[product] # Iteratively resolve all products to the reactants needed to produce them. # Stop when we get down to just ore. want = collections.defaultdict(int) want['FUEL'] = fuel while list(want.keys()) != ['ORE']: # Find all products which are not also reactants of other products. # If a product is also a reactant, we may need more of it so it cannot yet be solved. products = {r for r in want.keys() if not any(r in dependencies(other) for other in want)} for product in products: # Add all the required reactants to the want list and remove the product. for amount, reactant in reactions[product].needed(want[product])[0]: want[reactant] += amount del want[product] return want['ORE'] def parse_input(self, puzzle_input: str) -> Dict[str, Reaction]: """Build a dictionary of material produced to Reaction.""" reactions = {} # type: Dict[str, Reaction] def to_tuple(pair: str) -> Tuple[int, str]: a, b = pair.split() return (int(a), b) for line in puzzle_input.split('\n'): reactants, product = line.split('=>') reaction = Reaction( to_tuple(product), [to_tuple(p) for p in reactants.split(', ')], ) reactions[reaction.product] = reaction return reactions if __name__ == '__main__': typer.run(Day14().run) # vim:ts=2:sw=2:expandtab
35.668539
96
0.642778
877
6,349
4.602052
0.279361
0.013875
0.029485
0.039891
0.1167
0.087711
0
0
0
0
0
0.020088
0.247283
6,349
177
97
35.870057
0.82444
0.335486
0
0.059406
0
0
0.010968
0
0
0
0
0
0
1
0.108911
false
0
0.059406
0
0.326733
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e5636f16a4be081479c9bb8479ea7b652ed01784
530
py
Python
src/pynauty/tests/test_autgrp.py
sammorley-short/pynauty-1
852ee738174179c242913ff2afa8b47715d0947b
[ "Apache-2.0" ]
16
2021-02-05T10:15:57.000Z
2022-03-07T21:51:09.000Z
src/pynauty/tests/test_autgrp.py
sammorley-short/pynauty-1
852ee738174179c242913ff2afa8b47715d0947b
[ "Apache-2.0" ]
20
2021-01-31T11:48:56.000Z
2022-01-25T15:16:05.000Z
src/pynauty/tests/test_autgrp.py
sammorley-short/pynauty-1
852ee738174179c242913ff2afa8b47715d0947b
[ "Apache-2.0" ]
6
2021-02-18T11:55:17.000Z
2021-08-21T03:24:58.000Z
#!/usr/bin/env python import sys from pynauty import autgrp, Version import pytest # List of graphs for testing # # Structure: # [[name, Graph, numorbit, grpsize, generators]] # # numorbit, grpsize, generators was calculated by dreadnut # def test_autgrp(graph): gname, g, numorbit, grpsize, gens = graph print(Version()) print('%-17s ...' % gname, end=' ') sys.stdout.flush() generators, order, o2, orbits, orbit_no = autgrp(g) assert generators == gens and orbit_no == numorbit and order == grpsize
24.090909
75
0.683019
68
530
5.279412
0.632353
0.125348
0.139276
0
0
0
0
0
0
0
0
0.006993
0.190566
530
21
76
25.238095
0.829837
0.309434
0
0
0
0
0.027933
0
0
0
0
0
0.1
1
0.1
false
0
0.3
0
0.4
0.2
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e563d88bca3ef7c6f932060d4826c5924fd546b6
3,284
py
Python
easytrader/pazq_remote_trader.py
CHiSec/nextrader
28b9becf23f6e40d36f5bda0515ac383913899d6
[ "MIT" ]
null
null
null
easytrader/pazq_remote_trader.py
CHiSec/nextrader
28b9becf23f6e40d36f5bda0515ac383913899d6
[ "MIT" ]
null
null
null
easytrader/pazq_remote_trader.py
CHiSec/nextrader
28b9becf23f6e40d36f5bda0515ac383913899d6
[ "MIT" ]
null
null
null
import abc import requests import time import json from easytrader.utils.misc import file2dict class IRemoteTrader(abc.ABC): @abc.abstractmethod def prepare( self, config_path=None, user=None, password=None, token=None, address=None, ): pass @property @abc.abstractmethod def balance(self): pass @property @abc.abstractmethod def position(self): pass @property @abc.abstractmethod def today_trades(self): pass @property @abc.abstractmethod def today_entrusts(self): pass @abc.abstractmethod def buy(self, stock_id, price: float, amount: int): pass @abc.abstractmethod def sell(self, stock_id, price: float, amount: int): pass @abc.abstractmethod def cancel_entrust(self, entrust_no: str): pass class PAZQRemoteTrader(IRemoteTrader): def _api_get(self, func_name: str): try: return requests.get( self.address + func_name, timeout=self.timeout, headers={'trader-token': self.token} ).json() except Exception as e: print(e) return {'status': 'fail', 'msg': 'Network error.'} def _api_post(self, func_name: str, params: dict): try: return requests.post( self.address + func_name, timeout=self.timeout, headers={'trader-token': self.token}, params=params ).json() except Exception as e: return {'status': 'fail', 'msg': 'Network error.', 'info': e} def prepare( self, config_path=None, user=None, password=None, token=None, address=None, timeout=5, ): if config_path is not None: account = file2dict(config_path) token = account['token'] #user = account['user'] #password = account['password'] address = account['address'] timeout = account['timeout'] self.token = token self.user = user self.password = password self.address = address self.timeout = timeout return self._api_get('prepare') @property def balance(self): return self._api_get("balance") @property def position(self): return self._api_get("position") @property def today_trades(self): return self._api_get("today_trades") @property def today_entrusts(self): return self._api_get("today_entrusts") def buy(self, stock_id, price: float, amount: int): return self._api_post("buy", {'stock_id': stock_id, 'price': price, 'amount': amount}) def sell(self, stock_id, price: float, amount: int): return self._api_post("sell", {'stock_id': stock_id, 'price': price, 'amount': amount}) def cancel_entrust(self, entrust_no: str): data = self._api_post("cancel_entrust", {'entrust_no': entrust_no}) if data['status'] == 'success' and 'success' not in data['data']['message']: data['status'] = 'fail' return data
26.063492
95
0.563033
356
3,284
5.05618
0.213483
0.075556
0.088889
0.044444
0.538333
0.474444
0.426667
0.313333
0.313333
0.258889
0
0.001356
0.326431
3,284
125
96
26.272
0.812387
0.015834
0
0.621359
0
0
0.080186
0
0
0
0
0
0
1
0.174757
false
0.106796
0.048544
0.058252
0.359223
0.009709
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
e565c470b23648889679a52fd97863eab35ec86e
3,496
py
Python
Game/images.py
mrElnekave/Hallow-Valley
6c3ba0dc3932839941a00362da0212850b2b20a6
[ "MIT" ]
null
null
null
Game/images.py
mrElnekave/Hallow-Valley
6c3ba0dc3932839941a00362da0212850b2b20a6
[ "MIT" ]
null
null
null
Game/images.py
mrElnekave/Hallow-Valley
6c3ba0dc3932839941a00362da0212850b2b20a6
[ "MIT" ]
null
null
null
import pygame, constants, copy # pygame.init() pygame.display.set_mode(constants.default_size) current_path = constants.current_path + "Pixel Images\\" def load_img(path, colorkey=(255,255,255)): img = pygame.image.load(current_path + path).convert() img.set_colorkey(colorkey) return img def create_path(path:str): """ :param path:path is the relative path from the pixel images folder :return: the relative path from roots of project """ return current_path + path def darken_except(pic, pos): dark_picture = obscure(pic, (0,0,0), 200) pygame.draw.circle(dark_picture, (255, 255, 255), pos, 20) dark_picture.set_colorkey((255,255,255)) pic.blit(dark_picture, (0, 0)) pass def switch_base(): global menu_base if menu_base == menu_base_dark: menu_base = menu_base_clear else: menu_base = menu_base_dark def obscure(pic, color, alpha): overlay = pygame.Surface(pic.get_size()) overlay.fill(color) overlay.set_alpha(alpha) return overlay # intro small_bolt = load_img("small_bolt.png", (0, 0, 0)) medium_bolt = load_img("medium_bolt.png", (0, 0, 0)) large_bolt = load_img("large_bolt.png", (0, 0, 0)) clearCloud = pygame.image.load(create_path("Clear Clouds.png")) stormCloud = pygame.image.load(create_path("Storm Clouds.png")) mountain_range_height = 200 menu_base = pygame.transform.scale(load_img("main_menu.png"), constants.size) mountain_1 = load_img("Title Screen Mountain.png", (0, 0, 0)) mountain_2 = load_img("Title Screen Mountain 2.png", (0, 0, 0)) mountain_3 = load_img("Title Screen Mountain 3.png", (0, 0, 0)) pygame.draw.rect(menu_base, (139, 195, 74), pygame.Rect((0,mountain_range_height + mountain_1.get_height() - 20), menu_base.get_size())) menu_base.blit(mountain_1, (-20, mountain_range_height)) menu_base.blit(mountain_2, (200, mountain_range_height)) menu_base.blit(mountain_3, (120, mountain_range_height)) menu_base_clear = copy.copy(menu_base) menu_base = menu_base_clear menu_base_clear.blit(pygame.transform.scale(clearCloud, (60,20)), (15,20)) menu_base_clear.blit(pygame.transform.scale(clearCloud, (70,30)), (70,40)) menu_base_clear.blit(clearCloud, (120,0)) menu_base_clear.blit(pygame.transform.scale(clearCloud, (79,30)), (250,30)) menu_base_clear.blit(clearCloud, (275,0)) menu_base_dark = copy.copy(menu_base) dark_picture = obscure(menu_base_dark, (0,0,0), 200) # drawing on all the lightnings menu_base_dark.blit(dark_picture, (0, 0)) menu_base_dark.blit(pygame.transform.scale(stormCloud, (60,20)), (15,20)) menu_base_dark.blit(pygame.transform.scale(stormCloud, (70,30)), (70,40)) menu_base_dark.blit(stormCloud, (120,0)) menu_base_dark.blit(pygame.transform.scale(stormCloud, (79,30)), (250,30)) menu_base_dark.blit(stormCloud, (275,0)) menu_base_dark.blit(small_bolt, (40, 40)) menu_base_dark.blit(small_bolt, (200, 50)) menu_base_dark.blit(medium_bolt, (100, 70)) menu_base_dark.blit(medium_bolt, (350, 10)) menu_base_dark.blit(medium_bolt, (150, 20)) menu_base_dark.blit(medium_bolt, (300, 60)) # map and notifs demo_map = pygame.image.load(create_path("Demo Map.png")).convert() demo_map = pygame.transform.scale(demo_map,(360,360)) demo_mask = demo_map.copy() demo_mask.fill((0, 0, 0)) simple_map = pygame.image.load(create_path("Simple Map.png")).convert() # 150 by 150 lava = pygame.image.load(create_path("Lava.png")) poison = pygame.image.load(create_path("Poison Lake.png")) cactus = pygame.image.load(create_path("Cactus1.png"))
36.041237
136
0.735984
564
3,496
4.324468
0.207447
0.121361
0.083641
0.078721
0.472325
0.266913
0.147191
0.115211
0.03854
0
0
0.066065
0.112414
3,496
96
137
36.416667
0.719948
0.05492
0
0.028986
0
0
0.073476
0
0
0
0
0
0
1
0.072464
false
0.014493
0.014493
0
0.130435
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e5669614b0e617bd22827739713131fdd9270edc
2,030
py
Python
examples/old-examples/Kivy/window_coordinates.py
asnt/moderngl
b39cedd8cf216c34e43371b4aec822f6084f0f79
[ "MIT" ]
916
2019-03-11T19:15:20.000Z
2022-03-31T19:22:16.000Z
examples/old-examples/Kivy/window_coordinates.py
asnt/moderngl
b39cedd8cf216c34e43371b4aec822f6084f0f79
[ "MIT" ]
218
2019-03-11T06:05:52.000Z
2022-03-30T16:59:22.000Z
examples/old-examples/Kivy/window_coordinates.py
asnt/moderngl
b39cedd8cf216c34e43371b4aec822f6084f0f79
[ "MIT" ]
110
2019-04-06T18:32:24.000Z
2022-03-21T20:30:47.000Z
import struct import ModernGL from kivy.app import App from kivy.core.window import Window from kivy.graphics import Callback from kivy.uix.widget import Widget class CustomWidget(Widget): def __init__(self, **kwargs): super(CustomWidget, self).__init__(**kwargs) with self.canvas: self.ctx = ModernGL.create_context() self.prog = self.ctx.program( vertex_shader=''' #version 330 uniform vec2 WindowSize; in vec2 in_vert; in vec3 in_color; out vec3 v_color; void main() { v_color = in_color; gl_Position = vec4(in_vert / WindowSize * 2.0, 0.0, 1.0); } '''), fragment_shader=''' #version 330 in vec3 v_color; out vec4 f_color; void main() { f_color = vec4(v_color, 1.0); } '''), ]) self.window_size = self.prog.uniforms['WindowSize'] self.vbo = self.ctx.buffer(struct.pack( '15f', 0.0, 100.0, 1.0, 0.0, 0.0, -86.0, -50.0, 0.0, 1.0, 0.0, 86.0, -50.0, 0.0, 0.0, 1.0, )) self.vao = self.ctx.simple_vertex_array(self.prog, self.vbo, ['in_vert', 'in_color']) Callback(self.draw) def draw(self, *args): self.width, self.height = Window.size self.ctx.viewport = (0, 0, self.width, self.height) self.ctx.clear(0.9, 0.9, 0.9) self.ctx.enable(ModernGL.BLEND) self.window_size.value = (self.width, self.height) self.vao.render() def ask_update(self, *args): self.canvas.ask_update() class MainApp(App): def build(self): return CustomWidget() if __name__ == '__main__': MainApp().run()
26.363636
97
0.484729
238
2,030
3.97479
0.336134
0.033827
0.028541
0.016913
0.086681
0.023256
0.023256
0.023256
0.023256
0
0
0.055738
0.399015
2,030
76
98
26.710526
0.719672
0
0
0.109091
0
0
0.328571
0
0
0
0
0
0
0
null
null
0
0.109091
null
null
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
e566b297876dff7f7531c8e89abb6a0cb5ac2b19
3,589
py
Python
shogun/records/generic.py
menpo/shogun
013baff88cb495e8f4314826865ec7e332859636
[ "MIT", "BSD-3-Clause" ]
1
2020-11-26T21:04:20.000Z
2020-11-26T21:04:20.000Z
shogun/records/generic.py
menpo/shogun
013baff88cb495e8f4314826865ec7e332859636
[ "MIT", "BSD-3-Clause" ]
null
null
null
shogun/records/generic.py
menpo/shogun
013baff88cb495e8f4314826865ec7e332859636
[ "MIT", "BSD-3-Clause" ]
null
null
null
import inspect from abc import ABCMeta, abstractmethod from typing import Any, Generic, List, Mapping, Optional, Type, TypeVar from .error import NotARecordClass T = TypeVar("T") FieldType = TypeVar("FieldType") class DatargsParams: def __init__(self, parser: Optional[Mapping[str, Any]] = None): self.parser = parser or {} class RecordField(Generic[FieldType, T], metaclass=ABCMeta): """ Abstract base class for fields of dataclasses or attrs classes. """ field: FieldType def __init__(self, field): self.field = field @abstractmethod def is_required(self) -> bool: """ Return whether field is required. """ pass @property @abstractmethod def default(self) -> T: pass @property @abstractmethod def converter(self): pass @property @abstractmethod def name(self) -> str: pass @property @abstractmethod def type(self) -> Type[T]: pass @property @abstractmethod def metadata(self) -> Mapping[str, Any]: pass def has_default(self) -> bool: """ Helper method to indicate whether a field has a default value. Used to make intention clearer in call sites. """ return not self.is_required() class RecordClass(Generic[FieldType], metaclass=ABCMeta): """ Abstract base class for dataclasses or attrs classes. """ # The name of the attribute that holds field definitions fields_attribute: str = "__invalid__" # The type to wrap fields with field_wrapper_type: Type[RecordField] _implementors: List[Type["RecordClass"]] = [] def __init_subclass__(cls, **kwargs) -> None: super().__init_subclass__() if not inspect.isabstract(cls): cls._implementors.append(cls) def __init__(self, cls) -> None: self.cls: type = cls @property def datargs_params(self) -> DatargsParams: return getattr(self.cls, "__datargs_params__", DatargsParams()) @property def parser_params(self) -> Mapping[str, Any]: return self.datargs_params.parser @property def name(self) -> str: return self.cls.__name__ @abstractmethod def fields_dict(self) -> Mapping[str, RecordField]: """ Returns a mapping of field names to field wrapper classes. """ pass @classmethod def can_wrap_class(cls, potential_record_class) -> bool: """ Returns whether this class is the appropriate implementation for wrapping `potential_record_class`. """ return getattr(potential_record_class, cls.fields_attribute, None) is not None @classmethod def wrap_class(cls, record_class) -> "RecordClass": """ Wrap `record_class` with the appropriate wrapper. """ for candidate in cls._implementors: if candidate.can_wrap_class(record_class): return candidate(record_class) if getattr(record_class, "__attrs_attrs__", None) is not None: raise NotARecordClass( f"can't accept '{record_class.__name__}' because it is an attrs class and attrs is not installed" ) raise NotARecordClass( f"class '{record_class.__name__}' is not a dataclass nor an attrs class" ) @classmethod def get_field(cls, field: FieldType) -> RecordField: """ Wrap field with field classes with a uniform interface. """ return cls.field_wrapper_type(field)
26.783582
113
0.633881
403
3,589
5.439206
0.272953
0.050182
0.059307
0.06615
0.060219
0.032847
0
0
0
0
0
0
0.275007
3,589
133
114
26.984962
0.842429
0.169964
0
0.371795
0
0
0.085327
0.017851
0
0
0
0
0
1
0.230769
false
0.089744
0.051282
0.038462
0.461538
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
2
e566f80f2af89ff3cebc3584e47b99d358ead339
481
py
Python
example/get_concurrent.py
sojin-project/scrape-academy
5a18f5b497a6b3b85049ec1a4451b6a333e84353
[ "MIT" ]
null
null
null
example/get_concurrent.py
sojin-project/scrape-academy
5a18f5b497a6b3b85049ec1a4451b6a333e84353
[ "MIT" ]
null
null
null
example/get_concurrent.py
sojin-project/scrape-academy
5a18f5b497a6b3b85049ec1a4451b6a333e84353
[ "MIT" ]
null
null
null
# type: ignore import asyncio from scrapeacademy import context, run async def get_concurrent(url): # Get a same page 10 times simultaneously tasks = [context.get(url) for _ in range(10)] n = 1 while tasks: done, tasks = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED) for result in done: print(f"done #{n}", result.result()[:10]) n += 1 print("done") run(get_concurrent("https://www.python.jp/"))
20.913043
84
0.632017
66
481
4.530303
0.606061
0.086957
0.026756
0
0
0
0
0
0
0
0
0.022099
0.247401
481
22
85
21.863636
0.803867
0.108108
0
0
0
0
0.08216
0
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0.166667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e56779436b35301359589f1c68b90f43fe79050c
395
py
Python
leetcode/maximum_difference_between_node_and_ancestor/maximum_difference_between_node_and_ancestor.py
sagasu/python-algorithms
d630777a3f17823165e4d72ab780ede7b10df752
[ "MIT" ]
null
null
null
leetcode/maximum_difference_between_node_and_ancestor/maximum_difference_between_node_and_ancestor.py
sagasu/python-algorithms
d630777a3f17823165e4d72ab780ede7b10df752
[ "MIT" ]
null
null
null
leetcode/maximum_difference_between_node_and_ancestor/maximum_difference_between_node_and_ancestor.py
sagasu/python-algorithms
d630777a3f17823165e4d72ab780ede7b10df752
[ "MIT" ]
null
null
null
class Solution: def maxAncestorDiff(self, root: Optional[TreeNode]) -> int: def dfs(root, mn, mx): if not root: return 0 res = max(abs(root.val - mn), abs(root.val - mx)) mn, mx = min(mn, root.val), max(mx, root.val) return max(res, dfs(root.left, mn, mx), dfs(root.right, mn, mx)) return dfs(root, root.val, root.val)
43.888889
76
0.546835
59
395
3.661017
0.389831
0.194444
0.092593
0
0
0
0
0
0
0
0
0.003636
0.303797
395
9
77
43.888889
0.781818
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0
0
0.625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
3
e568447e5a2058ff096a7764cac780efbf11101d
213
py
Python
edx_6.00.1x_python/ex7_7.py
TimothyDJones/learn-python
687239572bee5e5c94bebb6b175b3fba4d47600e
[ "MIT" ]
null
null
null
edx_6.00.1x_python/ex7_7.py
TimothyDJones/learn-python
687239572bee5e5c94bebb6b175b3fba4d47600e
[ "MIT" ]
null
null
null
edx_6.00.1x_python/ex7_7.py
TimothyDJones/learn-python
687239572bee5e5c94bebb6b175b3fba4d47600e
[ "MIT" ]
null
null
null
# ex7_7.py def f(n): """ n: integer, n >= 0. """ if n == 0: return 1 else: return n * f(n-1) if __name__ == "__main__": print(f(0)) print(f(1)) print(f(3)) print(f(5))
12.529412
26
0.43662
36
213
2.333333
0.472222
0.285714
0
0
0
0
0
0
0
0
0
0.072464
0.352113
213
16
27
13.3125
0.536232
0.13615
0
0
0
0
0.047059
0
0
0
0
0
0
1
0.1
false
0
0
0
0.3
0.4
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
e568882780ef0ac998bdbf653d401680333f8d0d
919
py
Python
zeus/factories/revision.py
conrad-kronos/zeus
ddb6bc313e51fb22222b30822b82d76f37dbbd35
[ "Apache-2.0" ]
221
2017-07-03T17:29:21.000Z
2021-12-07T19:56:59.000Z
zeus/factories/revision.py
conrad-kronos/zeus
ddb6bc313e51fb22222b30822b82d76f37dbbd35
[ "Apache-2.0" ]
298
2017-07-04T18:08:14.000Z
2022-03-03T22:24:51.000Z
zeus/factories/revision.py
conrad-kronos/zeus
ddb6bc313e51fb22222b30822b82d76f37dbbd35
[ "Apache-2.0" ]
24
2017-07-15T13:46:45.000Z
2020-08-16T16:14:45.000Z
import factory import factory.faker from datetime import timedelta from faker import Factory faker = Factory.create() from zeus import models from zeus.config import db from zeus.utils import timezone from .base import ModelFactory class RevisionFactory(ModelFactory): sha = factory.Faker("sha1") repository = factory.SubFactory("zeus.factories.RepositoryFactory") repository_id = factory.SelfAttribute("repository.id") message = factory.LazyAttribute( lambda o: "{}\n\n{}".format(faker.sentence(), faker.sentence()) ) date_created = factory.LazyAttribute( lambda o: timezone.now() - timedelta(minutes=30) ) @factory.post_generation def authors(self, create, extracted, **kwargs): if not create: return if extracted: self.authors = extracted db.session.flush() class Meta: model = models.Revision
23.564103
71
0.68444
102
919
6.137255
0.509804
0.0623
0.057508
0.086262
0
0
0
0
0
0
0
0.004184
0.219804
919
38
72
24.184211
0.868898
0
0
0
0
0
0.062024
0.03482
0
0
0
0
0
1
0.035714
false
0
0.285714
0
0.607143
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
e569092e20bd1bfebd7bbcdc409a0332fd58245a
18,701
py
Python
xga/xspec/run.py
DavidT3/XGA
cde51c3f29f98b5f1e981fb6d327c04072b0ba38
[ "BSD-3-Clause" ]
12
2020-05-16T09:45:45.000Z
2022-02-14T14:41:46.000Z
xga/xspec/run.py
DavidT3/XGA
cde51c3f29f98b5f1e981fb6d327c04072b0ba38
[ "BSD-3-Clause" ]
684
2020-05-28T08:52:09.000Z
2022-03-31T10:56:24.000Z
xga/xspec/run.py
DavidT3/XGA
cde51c3f29f98b5f1e981fb6d327c04072b0ba38
[ "BSD-3-Clause" ]
2
2022-02-04T10:55:55.000Z
2022-02-04T11:30:56.000Z
# This code is a part of XMM: Generate and Analyse (XGA), a module designed for the XMM Cluster Survey (XCS). # Last modified by David J Turner (david.turner@sussex.ac.uk) 09/06/2021, 16:34. Copyright (c) David J Turner import os import warnings from functools import wraps # from multiprocessing.dummy import Pool from multiprocessing import Pool from subprocess import Popen, PIPE, TimeoutExpired from typing import Tuple, Union import fitsio import pandas as pd from fitsio import FITS from tqdm import tqdm from .. import XSPEC_VERSION from ..exceptions import XSPECFitError, MultipleMatchError, NoMatchFoundError, XSPECNotFoundError from ..samples.base import BaseSample from ..sources import BaseSource def execute_cmd(x_script: str, out_file: str, src: str, run_type: str, timeout: float) \ -> Tuple[Union[FITS, str], str, bool, list, list]: """ This function is called for the local compute option. It will run the supplied XSPEC script, then check parse the output for errors and check that the expected output file has been created. :param str x_script: The path to an XSPEC script to be run. :param str out_file: The expected path for the output file of that XSPEC script. :param str src: A string representation of the source object that this fit is associated with. :param str run_type: A flag that tells this function what type of run this is; e.g. fit or conv_factors. :param float timeout: The length of time (in seconds) which the XSPEC script is allowed to run for before being killed. :return: FITS object of the results, string repr of the source associated with this fit, boolean variable describing if this fit can be used, list of any errors found, list of any warnings found. :rtype: Tuple[Union[FITS, str], str, bool, list, list] """ if XSPEC_VERSION is None: raise XSPECNotFoundError("There is no XSPEC installation detectable on this machine.") # We assume the output will be usable to start with usable = True cmd = "xspec - {}".format(x_script) # I add exec to the beginning to make sure that the command inherits the same process ID as the shell, which # allows the timeout to kill the XSPEC run rather than the shell process. Entirely thanks to slayton on # https://stackoverflow.com/questions/4789837/how-to-terminate-a-python-subprocess-launched-with-shell-true xspec_proc = Popen("exec " + cmd, shell=True, stdout=PIPE, stderr=PIPE) # This makes sure the process is killed if it does timeout try: out, err = xspec_proc.communicate(timeout=timeout) except TimeoutExpired: xspec_proc.kill() out, err = xspec_proc.communicate() # Need to infer the name of the source to supply it in the warning source_name = x_script.split('/')[-1].split("_")[0] warnings.warn("An XSPEC fit for {} has timed out".format(source_name)) usable = False out = out.decode("UTF-8").split("\n") err = err.decode("UTF-8").split("\n") err_out_lines = [line.split("***Error: ")[-1] for line in out if "***Error" in line] warn_out_lines = [line.split("***Warning: ")[-1] for line in out if "***Warning" in line] err_err_lines = [line.split("***Error: ")[-1] for line in err if "***Error" in line] warn_err_lines = [line.split("***Warning: ")[-1] for line in err if "***Warning" in line] if usable and len(err_out_lines) == 0 and len(err_err_lines) == 0: usable = True else: usable = False error = err_out_lines + err_err_lines warn = warn_out_lines + warn_err_lines if os.path.exists(out_file + "_info.csv") and run_type == "fit": # The original version of the xga_output.tcl script output everything as one nice neat fits file # but life is full of extraordinary inconveniences and for some reason it didn't work if called from # a Jupyter Notebook. So now I'm going to smoosh all the csv outputs into one fits. results = pd.read_csv(out_file + "_results.csv", header="infer") # This is the csv with the fit results in, creates new fits file and adds in fitsio.write(out_file + ".fits", results.to_records(index=False), extname="results", clobber=True) del results # The information about individual spectra, exposure times, luminosities etc. spec_info = pd.read_csv(out_file + "_info.csv", header="infer") # Gets added into the existing file fitsio.write(out_file + ".fits", spec_info.to_records(index=False), extname="spec_info") del spec_info # This finds all of the matching spectrum plot csvs were generated rel_path = "/".join(out_file.split('/')[0:-1]) # This is mostly just used to find how many files there are spec_tabs = [rel_path + "/" + sp for sp in os.listdir(rel_path) if "{}_spec".format(out_file) in rel_path + "/" + sp] for spec_i in range(1, len(spec_tabs)+1): # Loop through and redefine names like this to ensure they're in the right order spec_plot = pd.read_csv(out_file + "_spec{}.csv".format(spec_i), header="infer") # Adds all the plot tables into the existing fits file in the right order fitsio.write(out_file + ".fits", spec_plot.to_records(index=False), extname="plot{}".format(spec_i)) del spec_plot # This reads in the fits we just made with FITS(out_file + ".fits") as res_tables: tab_names = [tab.get_extname() for tab in res_tables] if "results" not in tab_names or "spec_info" not in tab_names: usable = False # I'm going to try returning the file path as that should be pickleable res_tables = out_file + ".fits" elif os.path.exists(out_file) and run_type == "conv_factors": res_tables = out_file usable = True else: res_tables = None usable = False return res_tables, src, usable, error, warn def xspec_call(xspec_func): """ This is used as a decorator for functions that produce XSPEC scripts. Depending on the system that XGA is running on (and whether the user requests parallel execution), the method of executing the XSPEC commands will change. This supports multi-threading. :return: """ @wraps(xspec_func) def wrapper(*args, **kwargs): # The first argument of all of these XSPEC functions will be the source object (or a list of), # so rather than return them from the XSPEC model function I'll just access them like this. if isinstance(args[0], BaseSource): sources = [args[0]] elif isinstance(args[0], (list, BaseSample)): sources = args[0] else: raise TypeError("Please pass a source object, or a list of source objects.") # This is the output from whatever function this is a decorator for # First return is a list of paths of XSPEC scripts to execute, second is the expected output paths, # and 3rd is the number of cores to use. # run_type describes the type of XSPEC script being run, for instance a fit or a fakeit run to measure # countrate to luminosity conversion constants script_list, paths, cores, run_type, src_inds, radii, timeout = xspec_func(*args, **kwargs) src_lookup = {repr(src): src_ind for src_ind, src in enumerate(sources)} rel_src_repr = [repr(sources[src_ind]) for src_ind in src_inds] # Make sure the timeout is converted to seconds, then just stored as a float timeout = timeout.to('second').value # This is what the returned information from the execute command gets stored in before being parceled out # to source and spectrum objects results = {s: [] for s in src_lookup} if run_type == "fit": desc = "Running XSPEC Fits" elif run_type == "conv_factors": desc = "Running XSPEC Simulations" if len(script_list) > 0: # This mode runs the XSPEC locally in a multiprocessing pool. with tqdm(total=len(script_list), desc=desc) as fit, Pool(cores) as pool: def callback(results_in): """ Callback function for the apply_async pool method, gets called when a task finishes and something is returned. """ nonlocal fit # The progress bar will need updating nonlocal results # The dictionary the command call results are added to if results_in[0] is None: fit.update(1) return else: res_fits, rel_src, successful, err_list, warn_list = results_in results[rel_src].append([res_fits, successful, err_list, warn_list]) fit.update(1) for s_ind, s in enumerate(script_list): pth = paths[s_ind] src = rel_src_repr[s_ind] pool.apply_async(execute_cmd, args=(s, pth, src, run_type, timeout), callback=callback) pool.close() # No more tasks can be added to the pool pool.join() # Joins the pool, the code will only move on once the pool is empty. elif len(script_list) == 0: warnings.warn("All XSPEC operations had already been run.") # Now we assign the fit results to source objects for src_repr in results: # Made this lookup list earlier, using string representations of source objects. # Finds the ind of the list of sources that we should add these results to ind = src_lookup[src_repr] s = sources[ind] # This flag tells this method if the current set of fits are part of an annular spectra or not ann_fit = False ann_results = {} ann_lums = {} ann_obs_order = {} for res_set in results[src_repr]: if len(res_set) != 0 and res_set[1] and run_type == "fit": with FITS(res_set[0]) as res_table: global_results = res_table["RESULTS"][0] model = global_results["MODEL"].strip(" ") # Just define this to check if this is an annular fit or not first_key = res_table["SPEC_INFO"][0]["SPEC_PATH"].strip(" ").split("/")[-1].split('ra')[-1] first_key = first_key.split('_spec.fits')[0] if "_ident" in first_key: ann_fit = True inst_lums = {} obs_order = [] for line_ind, line in enumerate(res_table["SPEC_INFO"]): sp_info = line["SPEC_PATH"].strip(" ").split("/")[-1].split("_") # Want to derive the spectra storage key from the file name, this strips off some # unnecessary info sp_key = line["SPEC_PATH"].strip(" ").split("/")[-1].split('ra')[-1].split('_spec.fits')[0] # If its not an AnnularSpectra fit then we can just fetch the spectrum from the source # the normal way if not ann_fit: # This adds ra back on, and removes any ident information if it is there sp_key = 'ra' + sp_key # Finds the appropriate matching spectrum object for the current table line spec = s.get_products("spectrum", sp_info[0], sp_info[1], extra_key=sp_key)[0] else: obs_order.append([sp_info[0], sp_info[1]]) ann_id = int(sp_key.split("_ident")[-1].split("_")[1]) sp_key = 'ra' + sp_key.split('_ident')[0] first_part = sp_key.split('ri')[0] second_part = "_" + "_".join(sp_key.split('ro')[-1].split("_")[1:]) ann_sp_key = first_part + "ar" + "_".join(radii[ind].value.astype(str)) + second_part ann_specs = s.get_products("combined_spectrum", extra_key=ann_sp_key) if len(ann_specs) > 1: raise MultipleMatchError("I have found multiple matches for that AnnularSpectra, " "this is the developers fault, not yours.") elif len(ann_specs) == 0: raise NoMatchFoundError("Somehow I haven't found the AnnularSpectra that you " "fitted, this is the developers fault, not yours") else: ann_spec = ann_specs[0] spec = ann_spec.get_spectra(ann_id, sp_info[0], sp_info[1]) # Adds information from this fit to the spectrum object. spec.add_fit_data(str(model), line, res_table["PLOT"+str(line_ind+1)]) # The add_fit_data method formats the luminosities nicely, so we grab them back out # to help grab the luminosity needed to pass to the source object 'add_fit_data' method processed_lums = spec.get_luminosities(model) if spec.instrument not in inst_lums: inst_lums[spec.instrument] = processed_lums # Ideally the luminosity reported in the source object will be a PN lum, but its not impossible # that a PN value won't be available. - it shouldn't matter much, lums across the cameras are # consistent if "pn" in inst_lums: chosen_lums = inst_lums["pn"] # mos2 generally better than mos1, as mos1 has CCD damage after a certain point in its life elif "mos2" in inst_lums: chosen_lums = inst_lums["mos2"] else: chosen_lums = inst_lums["mos1"] if ann_fit: ann_results[spec.annulus_ident] = global_results ann_lums[spec.annulus_ident] = chosen_lums ann_obs_order[spec.annulus_ident] = obs_order elif not ann_fit: # Push global fit results, luminosities etc. into the corresponding source object. s.add_fit_data(model, global_results, chosen_lums, sp_key) elif len(res_set) != 0 and res_set[1] and run_type == "conv_factors": res_table = pd.read_csv(res_set[0], dtype={"lo_en": str, "hi_en": str}) # Gets the model name from the file name of the output results table model = res_set[0].split("_")[-3] # We can infer the storage key from the name of the results table, just makes it easier to # grab the correct spectra storage_key = res_set[0].split('/')[-1].split(s.name)[-1][1:].split(model)[0][:-1] # Grabs the ObsID+instrument combinations from the headers of the csv. Makes sure they are unique # by going to a set (because there will be two columns for each ObsID+Instrument, rate and Lx) # First two columns are skipped because they are energy limits combos = list(set([c.split("_")[1] for c in res_table.columns[2:]])) # Getting the spectra for each column, then assigning rates and lums for comb in combos: spec = s.get_products("spectrum", comb[:10], comb[10:], extra_key=storage_key)[0] spec.add_conv_factors(res_table["lo_en"].values, res_table["hi_en"].values, res_table["rate_{}".format(comb)].values, res_table["Lx_{}".format(comb)].values, model) elif len(res_set) != 0 and not res_set[1]: for err in res_set[2]: raise XSPECFitError(err) if ann_fit: # We fetch the annular spectra object that we just fitted, searching by using the set ID of # the last spectra that was opened in the loop ann_spec = s.get_annular_spectra(set_id=spec.set_ident) try: ann_spec.add_fit_data(model, ann_results, ann_lums, ann_obs_order) # The most likely reason for running XSPEC fits to a profile is to create a temp. profile # so we check whether constant*tbabs*apec (single_temp_apec function)has been run and if so # generate a Tx profile automatically if model == "constant*tbabs*apec": temp_prof = ann_spec.generate_profile(model, 'kT', 'keV') s.update_products(temp_prof) # Normalisation profiles can be useful for many things, so we generate them too norm_prof = ann_spec.generate_profile(model, 'norm', 'cm^-5') s.update_products(norm_prof) if 'Abundanc' in ann_spec.get_results(0, 'constant*tbabs*apec'): met_prof = ann_spec.generate_profile(model, 'Abundanc', '') s.update_products(met_prof) else: raise NotImplementedError("How have you even managed to fit this model to a profile?! Its not" " supported yet.") except ValueError: warnings.warn("{src} annular spectra profile fit was not successful".format(src=ann_spec.src_name)) # If only one source was passed, turn it back into a source object rather than a source # object in a list. if len(sources) == 1: sources = sources[0] return sources return wrapper
55.492582
119
0.579006
2,465
18,701
4.260852
0.223124
0.009997
0.004665
0.003808
0.124155
0.08112
0.048177
0.028373
0.005713
0.005713
0
0.008347
0.340142
18,701
336
120
55.657738
0.842788
0.331587
0
0.107143
1
0
0.092035
0
0.005102
0
0
0
0
1
0.020408
false
0.005102
0.071429
0
0.112245
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
e56946e13d2d2d1c51e541739f896030848cdd8a
76,894
py
Python
utils/utils_df_nn.py
Lifelong-ML/LASEM
c4ec052c850e37f54bc3e6faf6b988a4c5239f10
[ "MIT" ]
8
2021-07-06T14:35:50.000Z
2022-03-03T08:45:13.000Z
utils/utils_df_nn.py
Lifelong-ML/LASEM
c4ec052c850e37f54bc3e6faf6b988a4c5239f10
[ "MIT" ]
null
null
null
utils/utils_df_nn.py
Lifelong-ML/LASEM
c4ec052c850e37f54bc3e6faf6b988a4c5239f10
[ "MIT" ]
1
2021-07-09T09:26:11.000Z
2021-07-09T09:26:11.000Z
import numpy as np import tensorflow as tf from utils.utils import * from utils.utils_nn import * ########################################################### ##### functions to generate parameter ##### ########################################################### #### function to generate knowledge-base parameters for ELLA_tensorfactor layer def new_ELLA_KB_param(shape, layer_number, task_number, reg_type, init_tensor=None, trainable=True): #kb_name = 'KB_'+str(layer_number)+'_'+str(task_number) kb_name = 'KB_'+str(layer_number) if init_tensor is None: param_to_return = tf.get_variable(name=kb_name, shape=shape, dtype=tf.float32, regularizer=reg_type, trainable=trainable) elif type(init_tensor) == np.ndarray: param_to_return = tf.get_variable(name=kb_name, shape=shape, dtype=tf.float32, regularizer=reg_type, initializer=tf.constant_initializer(init_tensor), trainable=trainable) else: param_to_return = init_tensor return param_to_return #### function to generate task-specific parameters for ELLA_tensorfactor layer def new_ELLA_cnn_deconv_TS_param(shape, layer_number, task_number, reg_type): ts_w_name, ts_b_name, ts_p_name = 'TS_DeconvW0_'+str(layer_number)+'_'+str(task_number), 'TS_Deconvb0_'+str(layer_number)+'_'+str(task_number), 'TS_Convb0_'+str(layer_number)+'_'+str(task_number) return [tf.get_variable(name=ts_w_name, shape=shape[0], dtype=tf.float32, regularizer=reg_type), tf.get_variable(name=ts_b_name, shape=shape[1], dtype=tf.float32, regularizer=reg_type), tf.get_variable(name=ts_p_name, shape=shape[2], dtype=tf.float32, regularizer=reg_type)] #### function to generate task-specific parameters for ELLA_tensorfactor layer def new_ELLA_cnn_deconv_tensordot_TS_param(shape, layer_number, task_number, reg_type, init_tensor, trainable): ts_w_name, ts_b_name, ts_k_name, ts_p_name = 'TS_DeconvW0_'+str(layer_number)+'_'+str(task_number), 'TS_Deconvb0_'+str(layer_number)+'_'+str(task_number), 'TS_ConvW1_'+str(layer_number)+'_'+str(task_number), 'TS_Convb0_'+str(layer_number)+'_'+str(task_number) params_to_return, params_name = [], [ts_w_name, ts_b_name, ts_k_name, ts_p_name] for i, (t, n) in enumerate(zip(init_tensor, params_name)): if t is None: params_to_return.append(tf.get_variable(name=n, shape=shape[i], dtype=tf.float32, regularizer=reg_type if trainable and i<3 else None, trainable=trainable)) elif type(t) == np.ndarray: params_to_return.append(tf.get_variable(name=n, shape=shape[i], dtype=tf.float32, regularizer=reg_type if trainable and i<3 else None, trainable=trainable, initializer=tf.constant_initializer(t))) else: params_to_return.append(t) return params_to_return #### function to generate task-specific parameters for ELLA_tensorfactor layer def new_ELLA_cnn_deconv_tensordot_TS_param2(shape, layer_number, task_number, reg_type): ts_w_name, ts_b_name, ts_k_name, ts_k_name2, ts_p_name = 'TS_DeconvW0_'+str(layer_number)+'_'+str(task_number), 'TS_Deconvb0_'+str(layer_number)+'_'+str(task_number), 'TS_tdot_W1_'+str(layer_number)+'_'+str(task_number), 'TS_tdot_W2_'+str(layer_number)+'_'+str(task_number), 'TS_tdot_b0_'+str(layer_number)+'_'+str(task_number) return [tf.get_variable(name=ts_w_name, shape=shape[0], dtype=tf.float32, regularizer=reg_type), tf.get_variable(name=ts_b_name, shape=shape[1], dtype=tf.float32, regularizer=reg_type), tf.get_variable(name=ts_k_name, shape=shape[2], dtype=tf.float32, regularizer=reg_type), tf.get_variable(name=ts_k_name2, shape=shape[3], dtype=tf.float32, regularizer=reg_type), tf.get_variable(name=ts_p_name, shape=shape[4], dtype=tf.float32, regularizer=reg_type)] ############################################################### ##### functions for adding ELLA network (CNN/Deconv ver) ##### ############################################################### #### function to generate convolutional layer with shared knowledge base #### KB_size : [filter_height(and width), num_of_channel] #### TS_size : deconv_filter_height(and width) #### TS_stride_size : [stride_in_height, stride_in_width] def new_ELLA_cnn_deconv_layer(layer_input, k_size, ch_size, stride_size, KB_size, TS_size, TS_stride_size, layer_num, task_num, activation_fn=tf.nn.relu, para_activation_fn=tf.nn.relu, KB_param=None, TS_param=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_size=None, skip_connect_input=None): assert (k_size[0] == k_size[1] and k_size[0] == (KB_size[0]-1)*TS_stride_size[0]+1), "CNN kernel size does not match the output size of Deconv from KB" with tf.name_scope('ELLA_cdnn_KB'): if KB_param is None: ## KB \in R^{1 \times h \times w \times c} KB_param = new_ELLA_KB_param([1, KB_size[0], KB_size[0], KB_size[1]], layer_num, task_num, KB_reg_type) if TS_param is None: ## TS1 : Deconv W \in R^{h \times w \times ch_in*ch_out \times c} ## TS2 : Deconv bias \in R^{ch_out} TS_param = new_ELLA_cnn_deconv_TS_param([[TS_size, TS_size, ch_size[0]*ch_size[1], KB_size[1]], [1, 1, 1, ch_size[0]*ch_size[1]], [ch_size[1]]], layer_num, task_num, TS_reg_type) with tf.name_scope('ELLA_cdnn_TS'): para_tmp = tf.add(tf.nn.conv2d_transpose(KB_param, TS_param[0], [1, k_size[0], k_size[1], ch_size[0]*ch_size[1]], strides=[1, TS_stride_size[0], TS_stride_size[1], 1]), TS_param[1]) if para_activation_fn is not None: para_tmp = para_activation_fn(para_tmp) W, b = tf.reshape(para_tmp, k_size+ch_size), TS_param[2] layer_eqn, _ = new_cnn_layer(layer_input, k_size+ch_size, stride_size=stride_size, activation_fn=activation_fn, weight=W, bias=b, padding_type=padding_type, max_pooling=max_pool, pool_size=pool_size, skip_connect_input=skip_connect_input) return layer_eqn, [KB_param], TS_param, [W, b] #### function to generate network of convolutional layers with shared knowledge base def new_ELLA_cnn_deconv_net(net_input, k_sizes, ch_sizes, stride_sizes, KB_sizes, TS_sizes, TS_stride_sizes, activation_fn=tf.nn.relu, para_activation_fn=tf.nn.relu, KB_params=None, TS_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, flat_output=False, input_size=[0, 0], task_index=0, skip_connections=[]): _num_TS_param_per_layer = 3 ## first element : make new KB&TS / second element : make new TS / third element : not make new para control_flag = [(KB_params is None and TS_params is None), (not (KB_params is None) and (TS_params is None)), not (KB_params is None or TS_params is None)] if control_flag[1]: TS_params = [] elif control_flag[0]: KB_params, TS_params = [], [] cnn_gen_params=[] layers_for_skip, next_skip_connect = [net_input], None with tf.name_scope('ELLA_cdnn_net'): layers = [] for layer_cnt in range(len(k_sizes)//2): next_skip_connect = skip_connections.pop(0) if (len(skip_connections) > 0 and next_skip_connect is None) else next_skip_connect if next_skip_connect is not None: skip_connect_in, skip_connect_out = next_skip_connect assert (skip_connect_in > -1 and skip_connect_out > -1), "Given skip connection has error (try connecting non-existing layer)" else: skip_connect_in, skip_connect_out = -1, -1 if layer_cnt == skip_connect_out: processed_skip_connect_input = layers_for_skip[skip_connect_in] for layer_cnt_tmp in range(skip_connect_in, skip_connect_out): if max_pool and (pool_sizes[2*layer_cnt_tmp]>1 or pool_sizes[2*layer_cnt_tmp+1]>1): processed_skip_connect_input = tf.nn.max_pool(processed_skip_connect_input, ksize=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], strides=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], padding=padding_type) else: processed_skip_connect_input = None if layer_cnt == 0 and control_flag[0]: layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp = new_ELLA_cnn_deconv_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[layer_cnt], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input) elif layer_cnt == 0 and control_flag[1]: layer_tmp, _, TS_para_tmp, cnn_gen_para_tmp = new_ELLA_cnn_deconv_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[layer_cnt], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input) elif layer_cnt == 0 and control_flag[2]: layer_tmp, _, _, cnn_gen_para_tmp = new_ELLA_cnn_deconv_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[layer_cnt], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input) elif control_flag[0]: layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp = new_ELLA_cnn_deconv_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[layer_cnt], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input) elif control_flag[1]: layer_tmp, _, TS_para_tmp, cnn_gen_para_tmp = new_ELLA_cnn_deconv_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[layer_cnt], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input) elif control_flag[2]: layer_tmp, _, _, cnn_gen_para_tmp = new_ELLA_cnn_deconv_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[layer_cnt], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input) layers.append(layer_tmp) layers_for_skip.append(layer_tmp) cnn_gen_params = cnn_gen_params + cnn_gen_para_tmp if control_flag[1]: TS_params = TS_params + TS_para_tmp elif control_flag[0]: KB_params = KB_params + KB_para_tmp TS_params = TS_params + TS_para_tmp if layer_cnt == skip_connect_out: next_skip_connect = None #### flattening output if flat_output: output_dim = [int(layers[-1].shape[1]*layers[-1].shape[2]*layers[-1].shape[3])] layers.append(tf.reshape(layers[-1], [-1, output_dim[0]])) else: output_dim = layers[-1].shape[1:] #### add dropout layer if dropout: layers.append(tf.nn.dropout(layers[-1], dropout_prob)) return (layers, KB_params, TS_params, cnn_gen_params, output_dim) #### function to generate network of cnn->ffnn def new_ELLA_cnn_deconv_fc_net(net_input, k_sizes, ch_sizes, stride_sizes, fc_sizes, cnn_KB_sizes, cnn_TS_sizes, cnn_TS_stride_sizes, cnn_activation_fn=tf.nn.relu, cnn_para_activation_fn=tf.nn.relu, cnn_KB_params=None, cnn_TS_params=None, fc_activation_fn=tf.nn.relu, fc_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, input_size=[0, 0], output_type=None, task_index=0, skip_connections=[]): ## add CNN layers cnn_model, cnn_KB_params, cnn_TS_params, cnn_gen_params, cnn_output_dim = new_ELLA_cnn_deconv_net(net_input, k_sizes, ch_sizes, stride_sizes, cnn_KB_sizes, cnn_TS_sizes, cnn_TS_stride_sizes, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_params=cnn_KB_params, TS_params=cnn_TS_params, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_sizes=pool_sizes, dropout=dropout, dropout_prob=dropout_prob, flat_output=True, input_size=input_size, task_index=task_index, skip_connections=skip_connections) ## add fc layers #fc_model, fc_params = new_fc_net(cnn_model[-1], [cnn_output_dim[0]]+fc_sizes, activation_fn=fc_activation_fn, params=fc_params, output_type=output_type, tensorboard_name_scope='fc_net') fc_model, fc_params = new_fc_net(cnn_model[-1], fc_sizes, activation_fn=fc_activation_fn, params=fc_params, output_type=output_type, tensorboard_name_scope='fc_net') return (cnn_model+fc_model, cnn_KB_params, cnn_TS_params, cnn_gen_params, fc_params) ########################################################################### ##### functions for adding ELLA network (CNN/Deconv & Tensordot ver) ##### ########################################################################### #### KB_size : [filter_height(and width), num_of_channel] #### TS_size : [deconv_filter_height(and width), deconv_filter_channel] #### TS_stride_size : [stride_in_height, stride_in_width] def new_ELLA_cnn_deconv_tensordot_layer(layer_input, k_size, ch_size, stride_size, KB_size, TS_size, TS_stride_size, layer_num, task_num, activation_fn=tf.nn.relu, para_activation_fn=tf.nn.relu, KB_param=None, TS_param=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_size=None, skip_connect_input=None, highway_connect_type=0, highway_W=None, highway_b=None, trainable=True, trainable_KB=True): assert (k_size[0] == k_size[1] and k_size[0] == (KB_size[0]-1)*TS_stride_size[0]+1), "CNN kernel size does not match the output size of Deconv from KB" with tf.name_scope('ELLA_cdnn_KB'): ## KB \in R^{1 \times h \times w \times c} KB_param = new_ELLA_KB_param([1, KB_size[0], KB_size[0], KB_size[1]], layer_num, task_num, KB_reg_type, KB_param, trainable=trainable_KB) ## TS1 : Deconv W \in R^{h \times w \times kb_c_out \times c} ## TS2 : Deconv bias \in R^{kb_c_out} ## TS3 : tensor W \in R^{kb_c_out \times ch_in \times ch_out} ## TS4 : Conv bias \in R^{ch_out} TS_param = new_ELLA_cnn_deconv_tensordot_TS_param([[TS_size[0], TS_size[0], TS_size[1], KB_size[1]], [1, 1, 1, TS_size[1]], [TS_size[1], ch_size[0], ch_size[1]], [ch_size[1]]], layer_num, task_num, TS_reg_type, [None, None, None, None] if TS_param is None else TS_param, trainable=trainable) with tf.name_scope('DFCNN_param_gen'): para_tmp = tf.add(tf.nn.conv2d_transpose(KB_param, TS_param[0], [1, k_size[0], k_size[1], TS_size[1]], strides=[1, TS_stride_size[0], TS_stride_size[1], 1]), TS_param[1]) para_tmp = tf.reshape(para_tmp, [k_size[0], k_size[1], TS_size[1]]) if para_activation_fn is not None: para_tmp = para_activation_fn(para_tmp) W = tf.tensordot(para_tmp, TS_param[2], [[2], [0]]) b = TS_param[3] ## HighwayNet's skip connection highway_params, gate = [], None if highway_connect_type > 0: with tf.name_scope('highway_connection'): if highway_connect_type == 1: x = layer_input if highway_W is None: highway_W = new_weight([k_size[0], k_size[1], ch_size[0], ch_size[1]]) if highway_b is None: highway_b = new_bias([ch_size[1]], init_val=-2.0) gate, _ = new_cnn_layer(x, k_size+ch_size, stride_size=stride_size, activation_fn=None, weight=highway_W, bias=highway_b, padding_type=padding_type, max_pooling=False) elif highway_connect_type == 2: x = tf.reshape(layer_input, [-1, int(layer_input.shape[1]*layer_input.shape[2]*layer_input.shape[3])]) if highway_W is None: highway_W = new_weight([int(x.shape[1]), 1]) if highway_b is None: highway_b = new_bias([1], init_val=-2.0) gate = tf.broadcast_to(tf.stack([tf.stack([tf.matmul(x, highway_W) + highway_b], axis=2)], axis=3), layer_input.get_shape()) gate = tf.nn.sigmoid(gate) highway_params = [highway_W, highway_b] layer_eqn, _ = new_cnn_layer(layer_input, k_size+ch_size, stride_size=stride_size, activation_fn=activation_fn, weight=W, bias=b, padding_type=padding_type, max_pooling=max_pool, pool_size=pool_size, skip_connect_input=skip_connect_input, highway_connect_type=highway_connect_type, highway_gate=gate) return layer_eqn, [KB_param], TS_param, [W, b], highway_params #### function to generate network of convolutional layers with shared knowledge base def new_ELLA_cnn_deconv_tensordot_net(net_input, k_sizes, ch_sizes, stride_sizes, KB_sizes, TS_sizes, TS_stride_sizes, activation_fn=tf.nn.relu, para_activation_fn=tf.nn.relu, KB_params=None, TS_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, flat_output=False, input_size=[0, 0], task_index=0, skip_connections=[]): _num_TS_param_per_layer = 4 ## first element : make new KB&TS / second element : make new TS / third element : not make new para / fourth element : make new KB control_flag = [(KB_params is None and TS_params is None), (not (KB_params is None) and (TS_params is None)), not (KB_params is None or TS_params is None), ((KB_params is None) and not (TS_params is None))] if control_flag[1]: TS_params = [] elif control_flag[3]: KB_params = [] elif control_flag[0]: KB_params, TS_params = [], [] cnn_gen_params = [] layers_for_skip, next_skip_connect = [net_input], None with tf.name_scope('ELLA_cdnn_net'): layers = [] for layer_cnt in range(len(k_sizes)//2): next_skip_connect = skip_connections.pop(0) if (len(skip_connections) > 0 and next_skip_connect is None) else next_skip_connect if next_skip_connect is not None: skip_connect_in, skip_connect_out = next_skip_connect assert (skip_connect_in > -1 and skip_connect_out > -1), "Given skip connection has error (try connecting non-existing layer)" else: skip_connect_in, skip_connect_out = -1, -1 if layer_cnt == skip_connect_out: processed_skip_connect_input = layers_for_skip[skip_connect_in] for layer_cnt_tmp in range(skip_connect_in, skip_connect_out): if max_pool and (pool_sizes[2*layer_cnt_tmp]>1 or pool_sizes[2*layer_cnt_tmp+1]>1): processed_skip_connect_input = tf.nn.max_pool(processed_skip_connect_input, ksize=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], strides=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], padding=padding_type) else: processed_skip_connect_input = None if layer_cnt == 0 and control_flag[0]: layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, _ = new_ELLA_cnn_deconv_tensordot_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input) elif layer_cnt == 0 and control_flag[1]: layer_tmp, _, TS_para_tmp, cnn_gen_para_tmp, _ = new_ELLA_cnn_deconv_tensordot_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input) elif layer_cnt == 0 and control_flag[2]: layer_tmp, _, _, cnn_gen_para_tmp, _ = new_ELLA_cnn_deconv_tensordot_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input) elif layer_cnt == 0 and control_flag[3]: layer_tmp, KB_para_tmp, _, cnn_gen_para_tmp, _ = new_ELLA_cnn_deconv_tensordot_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input) elif control_flag[0]: layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, _ = new_ELLA_cnn_deconv_tensordot_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input) elif control_flag[1]: layer_tmp, _, TS_para_tmp, cnn_gen_para_tmp, _ = new_ELLA_cnn_deconv_tensordot_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input) elif control_flag[2]: layer_tmp, _, _, cnn_gen_para_tmp, _ = new_ELLA_cnn_deconv_tensordot_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input) elif control_flag[3]: layer_tmp, KB_para_tmp, _, cnn_gen_para_tmp, _ = new_ELLA_cnn_deconv_tensordot_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input) layers.append(layer_tmp) layers_for_skip.append(layer_tmp) cnn_gen_params = cnn_gen_params + cnn_gen_para_tmp if control_flag[1]: TS_params = TS_params + TS_para_tmp elif control_flag[3]: KB_params = KB_params + KB_para_tmp elif control_flag[0]: KB_params = KB_params + KB_para_tmp TS_params = TS_params + TS_para_tmp if layer_cnt == skip_connect_out: next_skip_connect = None #### flattening output if flat_output: output_dim = [int(layers[-1].shape[1]*layers[-1].shape[2]*layers[-1].shape[3])] layers.append(tf.reshape(layers[-1], [-1, output_dim[0]])) else: output_dim = layers[-1].shape[1:] #### add dropout layer if dropout: layers.append(tf.nn.dropout(layers[-1], dropout_prob)) return (layers, KB_params, TS_params, cnn_gen_params, output_dim) #### function to generate network of cnn (with shared KB through deconv)-> simple ffnn def new_ELLA_cnn_deconv_tensordot_fc_net(net_input, k_sizes, ch_sizes, stride_sizes, fc_sizes, cnn_KB_sizes, cnn_TS_sizes, cnn_TS_stride_sizes, cnn_activation_fn=tf.nn.relu, cnn_para_activation_fn=tf.nn.relu, cnn_KB_params=None, cnn_TS_params=None, fc_activation_fn=tf.nn.relu, fc_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, input_size=[0, 0], output_type=None, task_index=0, skip_connections=[]): ## add CNN layers cnn_model, cnn_KB_params, cnn_TS_params, cnn_gen_params, cnn_output_dim = new_ELLA_cnn_deconv_tensordot_net(net_input, k_sizes, ch_sizes, stride_sizes, cnn_KB_sizes, cnn_TS_sizes, cnn_TS_stride_sizes, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_params=cnn_KB_params, TS_params=cnn_TS_params, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_sizes=pool_sizes, dropout=dropout, dropout_prob=dropout_prob, flat_output=True, input_size=input_size, task_index=task_index, skip_connections=skip_connections) ## add fc layers #fc_model, fc_params = new_fc_net(cnn_model[-1], [cnn_output_dim[0]]+fc_sizes, activation_fn=fc_activation_fn, params=fc_params, output_type=output_type, tensorboard_name_scope='fc_net') fc_model, fc_params = new_fc_net(cnn_model[-1], fc_sizes, activation_fn=fc_activation_fn, params=fc_params, output_type=output_type, tensorboard_name_scope='fc_net') return (cnn_model+fc_model, cnn_KB_params, cnn_TS_params, cnn_gen_params, fc_params) ########################################################################### ##### functions for adding ELLA network (CNN/Deconv & Tensordot ver2) ##### ########################################################################### #### KB_size : [filter_height(and width), num_of_channel0, num_of_channel1] #### TS_size : [deconv_filter_height(and width), deconv_filter_channel] #### TS_stride_size : [stride_in_height, stride_in_width] def new_ELLA_cnn_deconv_tensordot_layer2(layer_input, k_size, ch_size, stride_size, KB_size, TS_size, TS_stride_size, layer_num, task_num, activation_fn=tf.nn.relu, para_activation_fn=tf.nn.relu, KB_param=None, TS_param=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_size=None, skip_connect_input=None): assert (k_size[0] == k_size[1] and k_size[0] == (KB_size[0]-1)*TS_stride_size[0]+1), "CNN kernel size does not match the output size of Deconv from KB" with tf.name_scope('ELLA_cdnn_KB'): if KB_param is None: ## KB \in R^{d \times h \times w \times c} KB_param = new_ELLA_KB_param([KB_size[1], KB_size[0], KB_size[0], KB_size[2]], layer_num, task_num, KB_reg_type) if TS_param is None: ## TS1 : Deconv W \in R^{h \times w \times kb_c_out \times c} ## TS2 : Deconv bias \in R^{kb_c_out} ## TS3 : tensor W \in R^{d \times ch_in} ## TS4 : tensor W \in R^{kb_c_out \times ch_out} ## TS5 : Conv bias \in R^{ch_out} TS_param = new_ELLA_cnn_deconv_tensordot_TS_param2([[TS_size[0], TS_size[0], TS_size[1], KB_size[2]], [1, 1, 1, TS_size[1]], [KB_size[1], ch_size[0]], [TS_size[1], ch_size[1]], [1, 1, 1, ch_size[1]]], layer_num, task_num, TS_reg_type) with tf.name_scope('ELLA_cdnn_TS'): para_tmp = tf.add(tf.nn.conv2d_transpose(KB_param, TS_param[0], [KB_size[1], k_size[0], k_size[1], TS_size[1]], strides=[1, TS_stride_size[0], TS_stride_size[1], 1]), TS_param[1]) if para_activation_fn is not None: para_tmp = para_activation_fn(para_tmp) para_tmp = tf.tensordot(para_tmp, TS_param[2], [[0], [0]]) W = tf.tensordot(para_tmp, TS_param[3], [[2], [0]]) b = TS_param[4] layer_eqn, _ = new_cnn_layer(layer_input, k_size+ch_size, stride_size=stride_size, activation_fn=activation_fn, weight=W, bias=b, padding_type=padding_type, max_pooling=max_pool, pool_size=pool_size, skip_connect_input=skip_connect_input) return layer_eqn, [KB_param], TS_param, [W, b] #### function to generate network of convolutional layers with shared knowledge base def new_ELLA_cnn_deconv_tensordot_net2(net_input, k_sizes, ch_sizes, stride_sizes, KB_sizes, TS_sizes, TS_stride_sizes, activation_fn=tf.nn.relu, para_activation_fn=tf.nn.relu, KB_params=None, TS_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, flat_output=False, input_size=[0, 0], task_index=0, skip_connections=[]): _num_TS_param_per_layer = 5 ## first element : make new KB&TS / second element : make new TS / third element : not make new para / fourth element : make new KB control_flag = [(KB_params is None and TS_params is None), (not (KB_params is None) and (TS_params is None)), not (KB_params is None or TS_params is None), ((KB_params is None) and not (TS_params is None))] if control_flag[1]: TS_params = [] elif control_flag[3]: KB_params = [] elif control_flag[0]: KB_params, TS_params = [], [] cnn_gen_params = [] layers_for_skip, next_skip_connect = [net_input], None with tf.name_scope('ELLA_cdnn_net'): layers = [] for layer_cnt in range(len(k_sizes)//2): next_skip_connect = skip_connections.pop(0) if (len(skip_connections) > 0 and next_skip_connect is None) else next_skip_connect if next_skip_connect is not None: skip_connect_in, skip_connect_out = next_skip_connect assert (skip_connect_in > -1 and skip_connect_out > -1), "Given skip connection has error (try connecting non-existing layer)" else: skip_connect_in, skip_connect_out = -1, -1 if layer_cnt == skip_connect_out: processed_skip_connect_input = layers_for_skip[skip_connect_in] for layer_cnt_tmp in range(skip_connect_in, skip_connect_out): if max_pool and (pool_sizes[2*layer_cnt_tmp]>1 or pool_sizes[2*layer_cnt_tmp+1]>1): processed_skip_connect_input = tf.nn.max_pool(processed_skip_connect_input, ksize=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], strides=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], padding=padding_type) else: processed_skip_connect_input = None if layer_cnt == 0 and control_flag[0]: layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp = new_ELLA_cnn_deconv_tensordot_layer2(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[3*layer_cnt:3*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input) elif layer_cnt == 0 and control_flag[1]: layer_tmp, _, TS_para_tmp, cnn_gen_para_tmp = new_ELLA_cnn_deconv_tensordot_layer2(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[3*layer_cnt:3*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input) elif layer_cnt == 0 and control_flag[2]: layer_tmp, _, _, cnn_gen_para_tmp = new_ELLA_cnn_deconv_tensordot_layer2(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[3*layer_cnt:3*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input) elif layer_cnt == 0 and control_flag[3]: layer_tmp, KB_para_tmp, _, cnn_gen_para_tmp = new_ELLA_cnn_deconv_tensordot_layer2(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[3*layer_cnt:3*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input) elif control_flag[0]: layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp = new_ELLA_cnn_deconv_tensordot_layer2(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[3*layer_cnt:3*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input) elif control_flag[1]: layer_tmp, _, TS_para_tmp, cnn_gen_para_tmp = new_ELLA_cnn_deconv_tensordot_layer2(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[3*layer_cnt:3*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input) elif control_flag[2]: layer_tmp, _, _, cnn_gen_para_tmp = new_ELLA_cnn_deconv_tensordot_layer2(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[3*layer_cnt:3*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=KB_params[layer_cnt], TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input) elif control_flag[3]: layer_tmp, KB_para_tmp, _, cnn_gen_para_tmp = new_ELLA_cnn_deconv_tensordot_layer2(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], KB_sizes[3*layer_cnt:3*(layer_cnt+1)], TS_sizes[2*layer_cnt:2*(layer_cnt+1)], TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=para_activation_fn, KB_param=None, TS_param=TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input) layers.append(layer_tmp) layers_for_skip.append(layer_tmp) cnn_gen_params = cnn_gen_params + cnn_gen_para_tmp if control_flag[1]: TS_params = TS_params + TS_para_tmp elif control_flag[3]: KB_params = KB_params + KB_para_tmp elif control_flag[0]: KB_params = KB_params + KB_para_tmp TS_params = TS_params + TS_para_tmp if layer_cnt == skip_connect_out: next_skip_connect = None #### flattening output if flat_output: output_dim = [int(layers[-1].shape[1]*layers[-1].shape[2]*layers[-1].shape[3])] layers.append(tf.reshape(layers[-1], [-1, output_dim[0]])) else: output_dim = layers[-1].shape[1:] #### add dropout layer if dropout: layers.append(tf.nn.dropout(layers[-1], dropout_prob)) return (layers, KB_params, TS_params, cnn_gen_params, output_dim) #### function to generate network of cnn (with shared KB through deconv)-> simple ffnn def new_ELLA_cnn_deconv_tensordot_fc_net2(net_input, k_sizes, ch_sizes, stride_sizes, fc_sizes, cnn_KB_sizes, cnn_TS_sizes, cnn_TS_stride_sizes, cnn_activation_fn=tf.nn.relu, cnn_para_activation_fn=tf.nn.relu, cnn_KB_params=None, cnn_TS_params=None, fc_activation_fn=tf.nn.relu, fc_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, input_size=[0, 0], output_type=None, task_index=0, skip_connections=[]): ## add CNN layers cnn_model, cnn_KB_params, cnn_TS_params, cnn_gen_params, cnn_output_dim = new_ELLA_cnn_deconv_tensordot_net2(net_input, k_sizes, ch_sizes, stride_sizes, cnn_KB_sizes, cnn_TS_sizes, cnn_TS_stride_sizes, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_params=cnn_KB_params, TS_params=cnn_TS_params, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_sizes=pool_sizes, dropout=dropout, dropout_prob=dropout_prob, flat_output=True, input_size=input_size, task_index=task_index, skip_connections=skip_connections) ## add fc layers #fc_model, fc_params = new_fc_net(cnn_model[-1], [cnn_output_dim[0]]+fc_sizes, activation_fn=fc_activation_fn, params=fc_params, output_type=output_type, tensorboard_name_scope='fc_net') fc_model, fc_params = new_fc_net(cnn_model[-1], fc_sizes, activation_fn=fc_activation_fn, params=fc_params, output_type=output_type, tensorboard_name_scope='fc_net') return (cnn_model+fc_model, cnn_KB_params, cnn_TS_params, cnn_gen_params, fc_params) ############################################################################################################## #### functions for Conv-FC nets whose conv layers are freely set to shared across tasks by DeconvFactor #### ############################################################################################################## def new_ELLA_flexible_cnn_deconv_tensordot_fc_net(net_input, k_sizes, ch_sizes, stride_sizes, fc_sizes, cnn_sharing, cnn_KB_sizes, cnn_TS_sizes, cnn_TS_stride_sizes, cnn_activation_fn=tf.nn.relu, cnn_para_activation_fn=tf.nn.relu, cnn_KB_params=None, cnn_TS_params=None, cnn_params=None, fc_activation_fn=tf.nn.relu, fc_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, input_size=[0, 0], output_type=None, task_index=0, skip_connections=[], highway_connect_type=0, cnn_highway_params=None, trainable=True, trainable_KB=True): _num_TS_param_per_layer = 4 num_conv_layers = [len(k_sizes)//2, len(ch_sizes)-1, len(stride_sizes)//2, len(cnn_sharing), len(cnn_KB_sizes)//2, len(cnn_TS_sizes)//2, len(cnn_TS_stride_sizes)//2] assert (all([(num_conv_layers[i]==num_conv_layers[i+1]) for i in range(len(num_conv_layers)-1)])), "Parameters related to conv layers are wrong!" num_conv_layers = num_conv_layers[0] ''' if cnn_KB_params is not None: assert (len(cnn_KB_params) == 1), "Given init value of KB (last layer) is wrong!" if cnn_TS_params is not None: assert (len(cnn_TS_params) == 4), "Given init value of TS (last layer) is wrong!" ''' ## add CNN layers ## first element : make new KB&TS / second element : make new TS / third element : not make new para / fourth element : make new KB control_flag = [(cnn_KB_params is None and cnn_TS_params is None), (not (cnn_KB_params is None) and (cnn_TS_params is None)), not (cnn_KB_params is None or cnn_TS_params is None), ((cnn_KB_params is None) and not (cnn_TS_params is None))] if control_flag[1]: cnn_TS_params = [] elif control_flag[3]: cnn_KB_params = [] elif control_flag[0]: cnn_KB_params, cnn_TS_params = [], [] cnn_gen_params = [] if cnn_params is None: cnn_params = [None for _ in range(2*num_conv_layers)] layers_for_skip, next_skip_connect = [net_input], None with tf.name_scope('Hybrid_DFCNN'): cnn_model, cnn_params_to_return, cnn_highway_params_to_return = [], [], [] cnn_KB_to_return, cnn_TS_to_return = [], [] for layer_cnt in range(num_conv_layers): KB_para_tmp, TS_para_tmp, para_tmp = [None], [None for _ in range(_num_TS_param_per_layer)], [None, None] highway_para_tmp = [None, None] if cnn_highway_params is None else cnn_highway_params[2*layer_cnt:2*(layer_cnt+1)] cnn_gen_para_tmp = [None, None] next_skip_connect = skip_connections.pop(0) if (len(skip_connections) > 0 and next_skip_connect is None) else next_skip_connect if next_skip_connect is not None: skip_connect_in, skip_connect_out = next_skip_connect assert (skip_connect_in > -1 and skip_connect_out > -1), "Given skip connection has error (try connecting non-existing layer)" else: skip_connect_in, skip_connect_out = -1, -1 if layer_cnt == skip_connect_out: processed_skip_connect_input = layers_for_skip[skip_connect_in] for layer_cnt_tmp in range(skip_connect_in, skip_connect_out): if max_pool and (pool_sizes[2*layer_cnt_tmp]>1 or pool_sizes[2*layer_cnt_tmp+1]>1): processed_skip_connect_input = tf.nn.max_pool(processed_skip_connect_input, ksize=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], strides=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], padding=padding_type) else: processed_skip_connect_input = None if layer_cnt == 0: if control_flag[0] and cnn_sharing[layer_cnt]: layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, highway_para_tmp = new_ELLA_cnn_deconv_tensordot_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], cnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_param=None, TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, highway_connect_type=highway_connect_type, highway_W=highway_para_tmp[0], highway_b=highway_para_tmp[1], trainable=trainable, trainable_KB=trainable_KB) elif control_flag[1] and cnn_sharing[layer_cnt]: layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, highway_para_tmp = new_ELLA_cnn_deconv_tensordot_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], cnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_param=cnn_KB_params[layer_cnt], TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, highway_connect_type=highway_connect_type, highway_W=highway_para_tmp[0], highway_b=highway_para_tmp[1], trainable=trainable, trainable_KB=trainable_KB) elif control_flag[2] and cnn_sharing[layer_cnt]: layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, highway_para_tmp = new_ELLA_cnn_deconv_tensordot_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], cnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_param=cnn_KB_params[layer_cnt], TS_param=cnn_TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, highway_connect_type=highway_connect_type, highway_W=highway_para_tmp[0], highway_b=highway_para_tmp[1], trainable=trainable, trainable_KB=trainable_KB) elif control_flag[3] and cnn_sharing[layer_cnt]: layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, highway_para_tmp = new_ELLA_cnn_deconv_tensordot_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], cnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_param=None, TS_param=cnn_TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, highway_connect_type=highway_connect_type, highway_W=highway_para_tmp[0], highway_b=highway_para_tmp[1], trainable=trainable, trainable_KB=trainable_KB) elif (not cnn_sharing[layer_cnt]): layer_tmp, para_tmp = new_cnn_layer(layer_input=net_input, k_size=k_sizes[2*layer_cnt:2*(layer_cnt+1)]+ch_sizes[layer_cnt:layer_cnt+2], stride_size=[1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], activation_fn=cnn_activation_fn, weight=cnn_params[2*layer_cnt], bias=cnn_params[2*layer_cnt+1], padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, trainable=trainable) else: if control_flag[0] and cnn_sharing[layer_cnt]: layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, highway_para_tmp = new_ELLA_cnn_deconv_tensordot_layer(cnn_model[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], cnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_param=None, TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, highway_connect_type=highway_connect_type, highway_W=highway_para_tmp[0], highway_b=highway_para_tmp[1], trainable=trainable, trainable_KB=trainable_KB) elif control_flag[1] and cnn_sharing[layer_cnt]: layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, highway_para_tmp = new_ELLA_cnn_deconv_tensordot_layer(cnn_model[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], cnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_param=cnn_KB_params[layer_cnt], TS_param=None, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, highway_connect_type=highway_connect_type, highway_W=highway_para_tmp[0], highway_b=highway_para_tmp[1], trainable=trainable, trainable_KB=trainable_KB) elif control_flag[2] and cnn_sharing[layer_cnt]: layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, highway_para_tmp = new_ELLA_cnn_deconv_tensordot_layer(cnn_model[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], cnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_param=cnn_KB_params[layer_cnt], TS_param=cnn_TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, highway_connect_type=highway_connect_type, highway_W=highway_para_tmp[0], highway_b=highway_para_tmp[1], trainable=trainable, trainable_KB=trainable_KB) elif control_flag[3] and cnn_sharing[layer_cnt]: layer_tmp, KB_para_tmp, TS_para_tmp, cnn_gen_para_tmp, highway_para_tmp = new_ELLA_cnn_deconv_tensordot_layer(cnn_model[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], cnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], cnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=cnn_activation_fn, para_activation_fn=cnn_para_activation_fn, KB_param=None, TS_param=cnn_TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, highway_connect_type=highway_connect_type, highway_W=highway_para_tmp[0], highway_b=highway_para_tmp[1], trainable=trainable, trainable_KB=trainable_KB) elif (not cnn_sharing[layer_cnt]): layer_tmp, para_tmp = new_cnn_layer(layer_input=cnn_model[layer_cnt-1], k_size=k_sizes[2*layer_cnt:2*(layer_cnt+1)]+ch_sizes[layer_cnt:layer_cnt+2], stride_size=[1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], activation_fn=cnn_activation_fn, weight=cnn_params[2*layer_cnt], bias=cnn_params[2*layer_cnt+1], padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], skip_connect_input=processed_skip_connect_input, trainable=trainable) cnn_model.append(layer_tmp) layers_for_skip.append(layer_tmp) cnn_KB_to_return = cnn_KB_to_return + KB_para_tmp cnn_TS_to_return = cnn_TS_to_return + TS_para_tmp cnn_params_to_return = cnn_params_to_return + para_tmp cnn_gen_params = cnn_gen_params + cnn_gen_para_tmp cnn_highway_params_to_return = cnn_highway_params_to_return + highway_para_tmp if layer_cnt == skip_connect_out: next_skip_connect = None #### flattening output output_dim = [int(cnn_model[-1].shape[1]*cnn_model[-1].shape[2]*cnn_model[-1].shape[3])] cnn_model.append(tf.reshape(cnn_model[-1], [-1, output_dim[0]])) #### add dropout layer if dropout: cnn_model.append(tf.nn.dropout(cnn_model[-1], dropout_prob)) ## add fc layers fc_model, fc_params = new_fc_net(cnn_model[-1], fc_sizes, activation_fn=fc_activation_fn, params=fc_params, output_type=output_type, tensorboard_name_scope='fc_net', trainable=trainable) #return (cnn_model+fc_model, cnn_KB_params, cnn_TS_params, cnn_gen_params, cnn_params_to_return, cnn_highway_params_to_return, fc_params) return (cnn_model+fc_model, cnn_KB_to_return, cnn_TS_to_return, cnn_gen_params, cnn_params_to_return, cnn_highway_params_to_return, fc_params) #### function to generate DARTS-based network for selective sharing on DF-CNN def new_darts_dfcnn_layer(layer_input, k_size, ch_size, stride_size, KB_size, TS_size, TS_stride_size, layer_num, task_num, activation_fn=tf.nn.relu, para_activation_fn=tf.nn.relu, KB_param=None, TS_param=None, conv_param=None, select_param=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pooling=False, pool_size=None, trainable=True, skip_connect_input=None, name_scope='darts_dfcnn_layer', use_numpy_var_in_graph=False): with tf.name_scope(name_scope): ## init DF-CNN KB params if KB_param is None or (type(KB_param) == np.ndarray and not use_numpy_var_in_graph): KB_param = new_ELLA_KB_param([1, KB_size[0], KB_size[0], KB_size[1]], layer_num, task_num, KB_reg_type, KB_param, trainable=trainable) ## init DF-CNN task-specific mapping params if TS_param is None or (type(TS_param) == np.ndarray and not use_numpy_var_in_graph): TS_param = new_ELLA_cnn_deconv_tensordot_TS_param([[TS_size[0], TS_size[0], TS_size[1], KB_size[1]], [1, 1, 1, TS_size[1]], [TS_size[1], ch_size[0], ch_size[1]], [ch_size[1]]], layer_num, task_num, TS_reg_type, [None, None, None, None] if TS_param is None else TS_param, trainable=trainable) ## init task-specific conv params if conv_param is None: conv_param = [new_weight(shape=k_size+ch_size, trainable=trainable), new_bias(shape=[ch_size[-1]], trainable=trainable)] else: if conv_param[0] is None or (type(conv_param[0]) == np.ndarray and not use_numpy_var_in_graph): conv_param[0] = new_weight(shape=k_size+ch_size, init_tensor=conv_param[0], trainable=trainable) if conv_param[1] is None or (type(conv_param[1]) == np.ndarray and not use_numpy_var_in_graph): conv_param[1] = new_bias(shape=[ch_size[-1]], init_tensor=conv_param[1], trainable=trainable) ## init DARTS-selection params if select_param is None: select_param = new_weight(shape=[2], init_tensor=np.zeros(2, dtype=np.float32), trainable=trainable) elif (type(select_param) == np.ndarray) and not use_numpy_var_in_graph: select_param = new_weight(shape=[2], init_tensor=select_param, trainable=trainable) with tf.name_scope('DFCNN_param_gen'): para_tmp = tf.add(tf.nn.conv2d_transpose(KB_param, TS_param[0], [1, k_size[0], k_size[1], TS_size[1]], strides=[1, TS_stride_size[0], TS_stride_size[1], 1]), TS_param[1]) para_tmp = tf.reshape(para_tmp, [k_size[0], k_size[1], TS_size[1]]) if para_activation_fn is not None: para_tmp = para_activation_fn(para_tmp) W = tf.tensordot(para_tmp, TS_param[2], [[2], [0]]) b = TS_param[3] mixing_weight = tf.reshape(tf.nn.softmax(select_param), [2,1]) shared_conv_layer = tf.nn.conv2d(layer_input, W, strides=stride_size, padding=padding_type) + b TS_conv_layer = tf.nn.conv2d(layer_input, conv_param[0], strides=stride_size, padding=padding_type) + conv_param[1] if skip_connect_input is not None: shape1, shape2 = shared_conv_layer.get_shape().as_list(), skip_connect_input.get_shape().as_list() assert (len(shape1) == len(shape2)), "Shape of layer's output and input of skip connection do not match!" assert (all([(x==y) for (x, y) in zip(shape1, shape2)])), "Shape of layer's output and input of skip connection do NOT match!" shared_conv_layer = shared_conv_layer + skip_connect_input TS_conv_layer = TS_conv_layer + skip_connect_input if not (activation_fn is None): shared_conv_layer = activation_fn(shared_conv_layer) TS_conv_layer = activation_fn(TS_conv_layer) mixed_conv_temp = tf.tensordot(tf.stack([TS_conv_layer, shared_conv_layer], axis=4), mixing_weight, axes=[[4], [0]]) conv_layer = tf.reshape(mixed_conv_temp, mixed_conv_temp.get_shape()[0:-1]) if max_pooling and (pool_size[1] > 1 or pool_size[2] > 1): layer = tf.nn.max_pool(conv_layer, ksize=pool_size, strides=pool_size, padding=padding_type) else: layer = conv_layer return (layer, [KB_param], TS_param, conv_param, [select_param]) def new_darts_dfcnn_net(net_input, k_sizes, ch_sizes, stride_sizes, dfcnn_KB_sizes, dfcnn_TS_sizes, dfcnn_TS_stride_sizes, activation_fn=tf.nn.relu, dfcnn_TS_activation_fn=tf.nn.relu, dfcnn_KB_params=None, dfcnn_TS_params=None, cnn_TS_params=None, select_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, flat_output=False, trainable=True, task_index=0, skip_connections=[], use_numpy_var_in_graph=False): _num_TS_param_per_layer = 4 num_conv_layers = [len(k_sizes)//2, len(ch_sizes)-1, len(stride_sizes)//2, len(dfcnn_KB_sizes)//2, len(dfcnn_TS_sizes)//2, len(dfcnn_TS_stride_sizes)//2] assert (all([(num_conv_layers[i]==num_conv_layers[i+1]) for i in range(len(num_conv_layers)-1)])), "Parameters related to conv layers are wrong!" num_conv_layers = num_conv_layers[0] ## first element : make new KB&TS / second element : make new TS / third element : not make new para / fourth element : make new KB control_flag = [(dfcnn_KB_params is None and dfcnn_TS_params is None), (not (dfcnn_KB_params is None) and (dfcnn_TS_params is None)), not (dfcnn_KB_params is None or dfcnn_TS_params is None), ((dfcnn_KB_params is None) and not (dfcnn_TS_params is None))] if cnn_TS_params is None: cnn_TS_params = [None for _ in range(2*num_conv_layers)] else: assert(len(cnn_TS_params) == 2*num_conv_layers), "Check given parameters!" if select_params is None: select_params = [None for _ in range(num_conv_layers)] layers_for_skip, next_skip_connect = [net_input], None layers, dfcnn_shared_params_return, dfcnn_TS_params_return, cnn_TS_params_return, select_params_return = [], [], [], [], [] with tf.name_scope('DARTS_DFCNN_net'): for layer_cnt in range(num_conv_layers): next_skip_connect = skip_connections.pop(0) if (len(skip_connections) > 0 and next_skip_connect is None) else None if next_skip_connect is not None: skip_connect_in, skip_connect_out = next_skip_connect assert (skip_connect_in > -1 and skip_connect_out > -1), "Given skip connection has error (try connecting non-existing layer)" else: skip_connect_in, skip_connect_out = -1, -1 if layer_cnt == skip_connect_out: processed_skip_connect_input = layers_for_skip[skip_connect_in] for layer_cnt_tmp in range(skip_connect_in, skip_connect_out): if max_pool and (pool_sizes[2*layer_cnt_tmp]>1 or pool_sizes[2*layer_cnt_tmp+1]>1): processed_skip_connect_input = tf.nn.max_pool(processed_skip_connect_input, ksize=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], strides=[1]+pool_sizes[2*layer_cnt_tmp:2*(layer_cnt_tmp+1)]+[1], padding=padding_type) else: processed_skip_connect_input = None if layer_cnt == 0: if control_flag[0]: layer_tmp, KB_para_tmp, TS_para_tmp, cnn_TS_para_tmp, select_para_tmp = new_darts_dfcnn_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], dfcnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=dfcnn_TS_activation_fn, KB_param=None, TS_param=None, conv_param=cnn_TS_params[2*layer_cnt:2*(layer_cnt+1)], select_param=select_params[layer_cnt], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], trainable=trainable, skip_connect_input=processed_skip_connect_input, use_numpy_var_in_graph=use_numpy_var_in_graph) elif control_flag[1]: layer_tmp, KB_para_tmp, TS_para_tmp, cnn_TS_para_tmp, select_para_tmp = new_darts_dfcnn_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], dfcnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=dfcnn_TS_activation_fn, KB_param=dfcnn_KB_params[layer_cnt], TS_param=None, conv_param=cnn_TS_params[2*layer_cnt:2*(layer_cnt+1)], select_param=select_params[layer_cnt], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], trainable=trainable, skip_connect_input=processed_skip_connect_input, use_numpy_var_in_graph=use_numpy_var_in_graph) elif control_flag[2]: layer_tmp, KB_para_tmp, TS_para_tmp, cnn_TS_para_tmp, select_para_tmp = new_darts_dfcnn_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], dfcnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=dfcnn_TS_activation_fn, KB_param=dfcnn_KB_params[layer_cnt], TS_param=dfcnn_TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], conv_param=cnn_TS_params[2*layer_cnt:2*(layer_cnt+1)], select_param=select_params[layer_cnt], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], trainable=trainable, skip_connect_input=processed_skip_connect_input, use_numpy_var_in_graph=use_numpy_var_in_graph) elif control_flag[3]: layer_tmp, KB_para_tmp, TS_para_tmp, cnn_TS_para_tmp, select_para_tmp = new_darts_dfcnn_layer(net_input, k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], dfcnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=dfcnn_TS_activation_fn, KB_param=None, TS_param=dfcnn_TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], conv_param=cnn_TS_params[2*layer_cnt:2*(layer_cnt+1)], select_param=select_params[layer_cnt], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], trainable=trainable, skip_connect_input=processed_skip_connect_input, use_numpy_var_in_graph=use_numpy_var_in_graph) else: if control_flag[0]: layer_tmp, KB_para_tmp, TS_para_tmp, cnn_TS_para_tmp, select_para_tmp = new_darts_dfcnn_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], dfcnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=dfcnn_TS_activation_fn, KB_param=None, TS_param=None, conv_param=cnn_TS_params[2*layer_cnt:2*(layer_cnt+1)], select_param=select_params[layer_cnt], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], trainable=trainable, skip_connect_input=processed_skip_connect_input, use_numpy_var_in_graph=use_numpy_var_in_graph) elif control_flag[1]: layer_tmp, KB_para_tmp, TS_para_tmp, cnn_TS_para_tmp, select_para_tmp = new_darts_dfcnn_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], dfcnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=dfcnn_TS_activation_fn, KB_param=dfcnn_KB_params[layer_cnt], TS_param=None, conv_param=cnn_TS_params[2*layer_cnt:2*(layer_cnt+1)], select_param=select_params[layer_cnt], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], trainable=trainable, skip_connect_input=processed_skip_connect_input, use_numpy_var_in_graph=use_numpy_var_in_graph) elif control_flag[2]: layer_tmp, KB_para_tmp, TS_para_tmp, cnn_TS_para_tmp, select_para_tmp = new_darts_dfcnn_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], dfcnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=dfcnn_TS_activation_fn, KB_param=dfcnn_KB_params[layer_cnt], TS_param=dfcnn_TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], conv_param=cnn_TS_params[2*layer_cnt:2*(layer_cnt+1)], select_param=select_params[layer_cnt], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], trainable=trainable, skip_connect_input=processed_skip_connect_input, use_numpy_var_in_graph=use_numpy_var_in_graph) elif control_flag[3]: layer_tmp, KB_para_tmp, TS_para_tmp, cnn_TS_para_tmp, select_para_tmp = new_darts_dfcnn_layer(layers[layer_cnt-1], k_sizes[2*layer_cnt:2*(layer_cnt+1)], ch_sizes[layer_cnt:layer_cnt+2], [1]+stride_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], dfcnn_KB_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_sizes[2*layer_cnt:2*(layer_cnt+1)], dfcnn_TS_stride_sizes[2*layer_cnt:2*(layer_cnt+1)], layer_cnt, task_index, activation_fn=activation_fn, para_activation_fn=dfcnn_TS_activation_fn, KB_param=None, TS_param=dfcnn_TS_params[_num_TS_param_per_layer*layer_cnt:_num_TS_param_per_layer*(layer_cnt+1)], conv_param=cnn_TS_params[2*layer_cnt:2*(layer_cnt+1)], select_param=select_params[layer_cnt], KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pooling=max_pool, pool_size=[1]+pool_sizes[2*layer_cnt:2*(layer_cnt+1)]+[1], trainable=trainable, skip_connect_input=processed_skip_connect_input, use_numpy_var_in_graph=use_numpy_var_in_graph) layers.append(layer_tmp) layers_for_skip.append(layer_tmp) dfcnn_shared_params_return = dfcnn_shared_params_return + KB_para_tmp dfcnn_TS_params_return = dfcnn_TS_params_return + TS_para_tmp cnn_TS_params_return = cnn_TS_params_return + cnn_TS_para_tmp select_params_return = select_params_return + select_para_tmp if layer_cnt == skip_connect_out: next_skip_connect = None #### flattening output if flat_output: output_dim = [int(layers[-1].shape[1]*layers[-1].shape[2]*layers[-1].shape[3])] layers.append(tf.reshape(layers[-1], [-1, output_dim[0]])) else: output_dim = layers[-1].shape[1:] #### add dropout layer if dropout: layers.append(tf.nn.dropout(layers[-1], dropout_prob)) return (layers, dfcnn_shared_params_return, dfcnn_TS_params_return, cnn_TS_params_return, select_params_return, output_dim) def new_darts_dfcnn_fc_net(net_input, k_sizes, ch_sizes, stride_sizes, fc_sizes, dfcnn_KB_sizes, dfcnn_TS_sizes, dfcnn_TS_stride_sizes, cnn_activation_fn=tf.nn.relu, dfcnn_TS_activation_fn=tf.nn.relu, fc_activation_fn=tf.nn.relu, dfcnn_KB_params=None, dfcnn_TS_params=None, cnn_TS_params=None, select_params=None, fc_params=None, KB_reg_type=None, TS_reg_type=None, padding_type='SAME', max_pool=False, pool_sizes=None, dropout=False, dropout_prob=None, output_type=None, trainable=True, task_index=0, skip_connections=[], use_numpy_var_in_graph=False): cnn_model, dfcnn_shared_params_return, dfcnn_TS_params_return, cnn_TS_params_return, cnn_select_params_return, cnn_output_dim = new_darts_dfcnn_net(net_input, k_sizes, ch_sizes, stride_sizes, dfcnn_KB_sizes, dfcnn_TS_sizes, dfcnn_TS_stride_sizes, activation_fn=cnn_activation_fn, dfcnn_TS_activation_fn=dfcnn_TS_activation_fn, dfcnn_KB_params=dfcnn_KB_params, dfcnn_TS_params=dfcnn_TS_params, cnn_TS_params=cnn_TS_params, select_params=select_params, KB_reg_type=KB_reg_type, TS_reg_type=TS_reg_type, padding_type=padding_type, max_pool=max_pool, pool_sizes=pool_sizes, dropout=dropout, dropout_prob=dropout_prob, flat_output=True, trainable=trainable, task_index=task_index, skip_connections=skip_connections, use_numpy_var_in_graph=use_numpy_var_in_graph) fc_model, fc_params_return = new_fc_net(cnn_model[-1], fc_sizes, activation_fn=fc_activation_fn, params=fc_params, output_type=output_type, use_numpy_var_in_graph=use_numpy_var_in_graph) return (cnn_model+fc_model, dfcnn_shared_params_return, dfcnn_TS_params_return, cnn_TS_params_return, cnn_select_params_return, fc_params_return)
116.154079
1,000
0.734986
13,174
76,894
3.849552
0.01837
0.11973
0.087313
0.066254
0.936566
0.925011
0.911287
0.90403
0.892929
0.887073
0
0.023474
0.136292
76,894
661
1,001
116.329803
0.74013
0.053801
0
0.599147
0
0
0.01676
0
0
0
0
0
0.027719
1
0.036247
false
0
0.008529
0
0.081023
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
1
1
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
8
e569a5dbd4c731524441ab30a896814a5ca98109
22,303
py
Python
cogs/device.py
quiprr/AutoTSS
8d78db17ed5a7f6200955689bfb7580b7eba7183
[ "MIT" ]
null
null
null
cogs/device.py
quiprr/AutoTSS
8d78db17ed5a7f6200955689bfb7580b7eba7183
[ "MIT" ]
null
null
null
cogs/device.py
quiprr/AutoTSS
8d78db17ed5a7f6200955689bfb7580b7eba7183
[ "MIT" ]
null
null
null
from aioify import aioify from discord.ext import commands import aiofiles import aiohttp import aiosqlite import asyncio import discord import json import shutil class Device(commands.Cog): def __init__(self, bot): self.bot = bot self.shutil = aioify(shutil, name='shutil') self.utils = self.bot.get_cog('Utils') @commands.group(name='device', invoke_without_command=True) @commands.guild_only() async def device_cmd(self, ctx: commands.Context) -> None: prefix = await self.utils.get_prefix(ctx.guild.id) embed = discord.Embed(title='Device Commands') embed.add_field(name='Add a device', value=f'`{prefix}device add`', inline=False) embed.add_field(name='Remove a device', value=f'`{prefix}device remove`', inline=False) embed.add_field(name='List your devices', value=f'`{prefix}device list`', inline=False) embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png')) await ctx.send(embed=embed) @device_cmd.command(name='add') @commands.guild_only() @commands.max_concurrency(1, per=commands.BucketType.user) async def add_device(self, ctx: commands.Context) -> None: prefix = await self.utils.get_prefix(ctx.guild.id) timeout_embed = discord.Embed(title='Add Device', description='No response given in 1 minute, cancelling.') cancelled_embed = discord.Embed(title='Add Device', description='Cancelled.') for embed in (timeout_embed, cancelled_embed): embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png')) max_devices = 10 #TODO: Export this option to a separate config file async with aiosqlite.connect('Data/autotss.db') as db, db.execute('SELECT devices from autotss WHERE user = ?', (ctx.author.id,)) as cursor: try: devices = json.loads((await cursor.fetchone())[0]) except TypeError: devices = list() await db.execute('INSERT INTO autotss(user, devices, enabled) VALUES(?,?,?)', (ctx.author.id, json.dumps(devices), True)) await db.commit() if len(devices) > max_devices and await ctx.bot.is_owner(ctx.author) == False: # Error out if you attempt to add over 'max_devices' devices, and if you're not the owner of the bot embed = discord.Embed(title='Error', description=f'You cannot add over {max_devices} devices to AutoTSS.') await ctx.send(embed=embed) return device = dict() async with aiohttp.ClientSession() as session: for x in range(4): # Loop that gets all of the required information to save blobs with from the user descriptions = ( 'Enter a name for your device', "Enter your device's identifier (e.g. `iPhone6,1`)", "Enter your device's ECID (hex)", "Enter your device's Board Config (e.g. `n51ap`). \ This value ends in `ap`, and can be found with [System Info](https://arx8x.github.io/depictions/systeminfo.html) \ under the `Platform` section, or by running `gssc | grep HWModelStr` in a terminal on your iOS device." ) embed = discord.Embed(title='Add Device', description='\n'.join((descriptions[x], 'Type `cancel` to cancel.'))) embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png')) if x == 0: message = await ctx.send(embed=embed) else: await message.edit(embed=embed) # Wait for a response from the user, and error out if the user takes over 1 minute to respond try: response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60) if x == 0: answer = response.content # Don't make the device's name lowercase else: answer = response.content.lower() except asyncio.exceptions.TimeoutError: await message.edit(embed=timeout_embed) return # Delete the message try: await response.delete() except discord.errors.NotFound: pass if answer.lower() == 'cancel' or answer.startswith(prefix): await message.edit(embed=cancelled_embed) return # Make sure given information is valid if x == 0: device['name'] = answer name_check = await self.utils.check_name(device['name'], ctx.author.id) if name_check != True: embed = discord.Embed(title='Error', description = f"Device name `{device['name']}` is not valid.") if name_check == 0: embed.description += " A device's name must be between 4 and 20 characters." elif name_check == -1: embed.description += " You cannot use a device's name more than once." embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png')) await message.edit(embed=embed) return elif x == 1: device['identifier'] = 'P'.join(answer.split('p')) if await self.utils.check_identifier(session, device['identifier']) is False: embed = discord.Embed(title='Error', description=f"Device Identifier `{device['identifier']}` is not valid.") embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png')) await message.edit(embed=embed) return elif x == 2: if answer.startswith('0x'): device['ecid'] = answer[2:] else: device['ecid'] = answer ecid_check = await self.utils.check_ecid(device['ecid'], ctx.author.id) if ecid_check != True: embed = discord.Embed(title='Error', description=f"Device ECID `{device['ecid']}` is not valid.") embed.set_footer(text=f'{ctx.author.display_name} | This message will be censored in 5 seconds to protect your ECID(s).', icon_url=ctx.author.avatar_url_as(static_format='png')) if ecid_check == -1: embed.description += ' This ECID has already been added to AutoTSS.' await message.edit(embed=embed) embed.description = embed.description.replace(f"`{device['ecid']}` ", '') embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png')) await asyncio.sleep(5) await message.edit(embed=embed) return else: device['boardconfig'] = answer if await self.utils.check_boardconfig(session, device['identifier'], device['boardconfig']) is False: embed = discord.Embed(title='Error', description=f"Device boardconfig `{device['boardconfig']}` is not valid.") embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png')) await message.edit(embed=embed) return cpid = await self.utils.get_cpid(session, device['identifier'], device['boardconfig']) generator_description = [ 'Would you like to save blobs with a custom generator?', '*If being ran on A12+ devices, you **will** need to provide a matching apnonce for SHSH blobs to be saved correctly.*', 'Guide for jailbroken A12+ devices: [Click here](https://ios.cfw.guide/tss-web#getting-generator-and-apnonce-jailbroken-a12-only)', 'Guide for nonjailbroken A12+ devices: [Click here](https://ios.cfw.guide/tss-computer#get-your-device-specific-apnonce-and-generator)', 'This value is hexadecimal, 16 characters long, and begins with `0x`.' ] embed = discord.Embed(title='Add Device', description='\n'.join(generator_description)) # Ask the user if they'd like to save blobs with a custom generator embed.add_field(name='Options', value='Type **yes** to add a custom generator, **cancel** to cancel adding this device, or anything else to skip.', inline=False) embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png')) await message.edit(embed=embed) try: response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60) answer = response.content.lower() except asyncio.exceptions.TimeoutError: await message.edit(embed=timeout_embed) return try: await response.delete() except discord.errors.NotFound: pass if answer == 'yes': embed = discord.Embed(title='Add Device', description='Please enter the custom generator you wish to save blobs with.\nType `cancel` to cancel.') embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png')) await message.edit(embed=embed) try: response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60) answer = response.content.lower() except asyncio.exceptions.TimeoutError: await message.edit(embed=timeout_embed) return try: await response.delete() except discord.errors.NotFound: pass if answer == 'cancel' or answer.startswith(prefix): await message.edit(embed=cancelled_embed) return else: device['generator'] = answer if await self.utils.check_generator(device['generator']) is False: embed = discord.Embed(title='Error', description=f"Device Generator `{device['generator']}` is not valid.") embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png')) await message.edit(embed=embed) return elif answer == 'cancel' or answer.startswith(prefix): await message.edit(embed=cancelled_embed) return else: device['generator'] = None apnonce_description = [ 'Would you like to save blobs with a custom apnonce?', ] if device['generator'] is not None: apnonce_description.append(f"This custom apnonce MUST match with your custom generator `{device['generator']}`, or else your SHSH blobs **will be invalid**.") if cpid >= 32800: if len(apnonce_description) == 2: a12_apnonce_desc = 'This also MUST be done for your device, or else your SHSH blobs **will be invalid**. More info \ [here](https://www.reddit.com/r/jailbreak/comments/f5wm6l/tutorial_repost_easiest_way_to_save_a12_blobs/).' else: a12_apnonce_desc = 'This MUST be done for your device, or else your SHSH blobs **will be invalid**. More info \ [here](https://www.reddit.com/r/jailbreak/comments/f5wm6l/tutorial_repost_easiest_way_to_save_a12_blobs/).' apnonce_description.append(a12_apnonce_desc) apnonce_description.append('NOTE: This is **NOT** the same as your **generator**, which is hex, begins with `0x`, and is 16 characters long.') embed = discord.Embed(title='Add Device', description='\n'.join(apnonce_description)) # Ask the user if they'd like to save blobs with a custom ApNonce embed.add_field(name='Options', value='Type **yes** to add a custom apnonce, **cancel** to cancel adding this device, or anything else to skip.', inline=False) embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png')) await message.edit(embed=embed) try: response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60) answer = response.content.lower() except asyncio.exceptions.TimeoutError: await message.edit(embed=timeout_embed) return try: await response.delete() except discord.errors.NotFound: pass if answer == 'yes': embed = discord.Embed(title='Add Device', description='Please enter the custom apnonce you wish to save blobs with.\nType `cancel` to cancel.') embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png')) await message.edit(embed=embed) try: response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60) answer = response.content.lower() except asyncio.exceptions.TimeoutError: await message.edit(embed=timeout_embed) return try: await response.delete() except discord.errors.NotFound: pass if answer == 'cancel' or answer.startswith(prefix): await message.edit(embed=cancelled_embed) return else: device['apnonce'] = answer if await self.utils.check_apnonce(cpid, device['apnonce']) is False: embed = discord.Embed(title='Error', description=f"Device ApNonce `{device['apnonce']}` is not valid.") embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png')) await message.edit(embed=embed) return elif answer == 'cancel' or answer.startswith(prefix): await message.edit(embed=cancelled_embed) return else: device['apnonce'] = None if 32800 <= cpid < 35072 and device['apnonce'] is None: # If A12+ and no apnonce was specified embed = discord.Embed(title='Add Device') apnonce_warning = ( 'You are attempting to add an A12+ device while choosing to not specify a custom apnonce.', 'This will save **non-working SHSH blobs**.', 'Are you sure you want to do this?' ) embed.add_field(name='Warning', value='\n'.join(apnonce_warning), inline=False) embed.add_field(name='Options', value='Type **yes** to go back and add a custom apnonce, **cancel** to cancel adding this device, or anything else to skip.', inline=False) embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png')) await message.edit(embed=embed) try: response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60) answer = response.content.lower() except asyncio.exceptions.TimeoutError: await message.edit(embed=timeout_embed) return try: await response.delete() except discord.errors.NotFound: pass if answer == 'yes': embed = discord.Embed(title='Add Device', description='Please enter the custom apnonce you wish to save blobs with.\nType `cancel` to cancel.') embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png')) await message.edit(embed=embed) try: response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60) answer = response.content.lower() except asyncio.exceptions.TimeoutError: await message.edit(embed=timeout_embed) return try: await response.delete() except discord.errors.NotFound: pass if answer == 'cancel' or answer.startswith(prefix): await message.edit(embed=cancelled_embed) return else: device['apnonce'] = answer if await self.utils.check_apnonce(device['apnonce']) is False: embed = discord.Embed(title='Error', description=f"Device ApNonce `{device['apnonce']}` is not valid.") embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png')) await message.edit(embed=embed) return elif answer == 'cancel' or answer.startswith(prefix): await message.edit(embed=cancelled_embed) return else: device['apnonce'] = None device['saved_blobs'] = list() # Add device information into the database devices.append(device) async with aiosqlite.connect('Data/autotss.db') as db: await db.execute('UPDATE autotss SET devices = ? WHERE user = ?', (json.dumps(devices), ctx.author.id)) await db.commit() embed = discord.Embed(title='Add Device', description=f"Device `{device['name']}` added successfully!") embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png')) await message.edit(embed=embed) await self.utils.update_device_count() @device_cmd.command(name='remove') @commands.guild_only() @commands.max_concurrency(1, per=commands.BucketType.user) async def remove_device(self, ctx: commands.Context) -> None: prefix = await self.utils.get_prefix(ctx.guild.id) cancelled_embed = discord.Embed(title='Remove Device', description='Cancelled.') invalid_embed = discord.Embed(title='Error', description='Invalid input given.') timeout_embed = discord.Embed(title='Remove Device', description='No response given in 1 minute, cancelling.') for x in (cancelled_embed, invalid_embed, timeout_embed): x.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png')) async with aiosqlite.connect('Data/autotss.db') as db, db.execute('SELECT devices from autotss WHERE user = ?', (ctx.author.id,)) as cursor: try: devices = json.loads((await cursor.fetchone())[0]) except TypeError: devices = list() if len(devices) == 0: embed = discord.Embed(title='Error', description='You have no devices added to AutoTSS.') await ctx.send(embed=embed) return embed = discord.Embed(title='Remove Device', description="Choose the number of the device you'd like to remove.\nType `cancel` to cancel.") embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png')) for x in range(len(devices)): device_info = [ f"Name: `{devices[x]['name']}`", f"Device Identifier: `{devices[x]['identifier']}`", f"Boardconfig: `{devices[x]['boardconfig']}`" ] if devices[x]['apnonce'] is not None: device_info.append(f"Custom ApNonce: `{devices[x]['apnonce']}`") embed.add_field(name=x + 1, value='\n'.join(device_info), inline=False) message = await ctx.send(embed=embed) try: response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60) answer = response.content.lower() except asyncio.exceptions.TimeoutError: await message.edit(embed=timeout_embed) return try: await response.delete() except: pass if answer == 'cancel' or answer.startswith(prefix): await message.edit(embed=cancelled_embed) return try: num = int(answer) - 1 except: await message.edit(embed=invalid_embed) return if num not in range(len(devices)): await message.edit(embed=invalid_embed) return embed = discord.Embed(title='Remove Device', description=f"Are you **absolutely sure** you want to delete `{devices[num]['name']}`?") embed.add_field(name='Options', value='Type **yes** to delete your device & blobs from AutoTSS, or anything else to cancel.', inline=False) embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png')) await message.edit(embed=embed) try: response = await self.bot.wait_for('message', check=lambda message: message.author == ctx.author, timeout=60) answer = response.content.lower() except asyncio.exceptions.TimeoutError: await message.edit(embed=timeout_embed) return try: await response.delete() except discord.errors.NotFound: pass if answer == 'yes': embed = discord.Embed(title='Remove Device', description='Removing device...') embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png')) await message.edit(embed=embed) async with aiofiles.tempfile.TemporaryDirectory() as tmpdir: url = await self.utils.backup_blobs(tmpdir, devices[num]['ecid']) if url is None: embed = discord.Embed(title='Remove Device', description=f"Device `{devices[num]['name']}` removed.") embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png')) await message.edit(embed=embed) else: await self.shutil.rmtree(f"Data/Blobs/{devices[num]['ecid']}") embed = discord.Embed(title='Remove Device') embed.description = f"Blobs from `{devices[num]['name']}`: [Click here]({url})" embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png')) try: await ctx.author.send(embed=embed) embed.description = f"Device `{devices[num]['name']}` removed." await message.edit(embed=embed) except: embed.description = f"Device `{devices[num]['name']}` removed.\nBlobs from `{devices[num]['name']}`: [Click here]({url})" embed.set_footer( text=f'{ctx.author.display_name} | This message will automatically be deleted in 15 seconds to protect your ECID(s).', icon_url=ctx.author.avatar_url_as(static_format='png') ) await message.edit(embed=embed) await asyncio.sleep(15) await ctx.message.delete() await message.delete() devices.pop(num) async with aiosqlite.connect('Data/autotss.db') as db: await db.execute('UPDATE autotss SET devices = ? WHERE user = ?', (json.dumps(devices), ctx.author.id)) await db.commit() await message.edit(embed=embed) await self.utils.update_device_count() else: await message.edit(embed=cancelled_embed) @device_cmd.command(name='list') @commands.guild_only() async def list_devices(self, ctx: commands.Context) -> None: async with aiosqlite.connect('Data/autotss.db') as db, db.execute('SELECT devices from autotss WHERE user = ?', (ctx.author.id,)) as cursor: try: devices = json.loads((await cursor.fetchone())[0]) except TypeError: devices = list() if len(devices) == 0: embed = discord.Embed(title='Error', description='You have no devices added to AutoTSS.') await ctx.send(embed=embed) return embed = discord.Embed(title=f"{ctx.author.display_name}'s Devices") for device in devices: device_info = [ f"Device Identifier: `{device['identifier']}`", f"ECID: ||`{device['ecid']}`||", f"Boardconfig: `{device['boardconfig']}`" ] if device['generator'] is not None: device_info.append(f"Custom generator: `{device['generator']}`") if device['apnonce'] is not None: device_info.append(f"Custom ApNonce: `{device['apnonce']}`") embed.add_field(name=f"`{device['name']}`", value='\n'.join(device_info), inline=False) embed.set_footer(text=f'{ctx.author.display_name} | This message will be censored in 10 seconds to protect your ECID(s).', icon_url=ctx.author.avatar_url_as(static_format='png')) message = await ctx.send(embed=embed) await asyncio.sleep(10) for x in range(len(embed.fields)): field_values = [value for value in embed.fields[x].value.split('\n') if 'ECID' not in value] embed.set_field_at(index=x, name=embed.fields[x].name, value='\n'.join(field_values), inline=False) embed.set_footer(text=ctx.author.display_name, icon_url=ctx.author.avatar_url_as(static_format='png')) await message.edit(embed=embed) def setup(bot): bot.add_cog(Device(bot))
40.922936
183
0.706407
3,143
22,303
4.916322
0.106586
0.043101
0.044525
0.058439
0.736992
0.696026
0.666192
0.639658
0.622314
0.591639
0
0.005824
0.160875
22,303
544
184
40.998162
0.819824
0.027978
0
0.602353
0
0.042353
0.227478
0.02949
0
0
0
0.001838
0
1
0.004706
false
0.021176
0.021176
0
0.096471
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e569cd589ab6a45a901427f44fecdf26b339de4e
3,725
py
Python
applications/AdminSItes/controllers/sites.py
pigaov10/prodam.site.admin
9105c092a76fb7158085a08cd4d30d99b6e02ba9
[ "BSD-3-Clause" ]
null
null
null
applications/AdminSItes/controllers/sites.py
pigaov10/prodam.site.admin
9105c092a76fb7158085a08cd4d30d99b6e02ba9
[ "BSD-3-Clause" ]
null
null
null
applications/AdminSItes/controllers/sites.py
pigaov10/prodam.site.admin
9105c092a76fb7158085a08cd4d30d99b6e02ba9
[ "BSD-3-Clause" ]
null
null
null
### SITE PLONE import os, sys import xml.etree.cElementTree as tree_element_first @auth.requires_membership('admin') def add(): """ MÉTODO RESPONSÁVEL POR GERAR O SKELETON DE UM SITE PLONE @dir /sites/prodam.gerenciador.<project_name>/src/prodam/gerenciador/<project_name>/ """ if request.post_vars.site: site = request.post_vars.site folders = ['sites','/viewlets','/browser','/src','/templates'] try: project_name = "prodam.gerenciador."+str(site) path = folders[0] path += "/"+str(project_name) os.mkdir(path, 0755 ); # plone name site path += folders[3] os.mkdir(path, 0755 ); # directory src #split do nome do site separado por . directories = project_name.split(".") for directory in directories: path += "/"+directory os.mkdir(path, 0755 ); # profiles profiles = path os.mkdir(profiles+"/profiles", 0755); os.mkdir(profiles+"/profiles/default/", 0755); add_profile_file(path) #browser path += folders[2] os.mkdir(path, 0755); # viewlets path += folders[1] os.mkdir(path, 0755); # zcml file add_configure_file(path,project_name) # python file configure add_viewlets_file(path) # templates path += folders[4] os.mkdir(path, 0755); # pt file add_template_file(path) except: raise HTTP(500, T('Ocorreu um erro...')) else: site = None lista = os.listdir("sites/") return dict(lista=lista,site=site) def add_configure_file(path,project_name,param1='IPortalHeader',param2='IProdamPortal'): """ MÉTODO RESPONSÁVEL POR GERAR O ARQUIVO ZCML """ #seta nó para configuração dos namespaces configure = tree_element_first.Element('configure') configure.set('xmlns','http://namespaces.zope.org/zope') configure.set('xmlns:browser','http://namespaces.zope.org/browser') configure.set('i18n_domain','prodam.portal') #seta nó para configuração das Viewlets do Plone browser = tree_element_first.SubElement(configure,"browser:viewlet") browser.set("name","plone.logo") browser.set("manager","plone.app.layout.viewlets.interfaces."+param1) browser.set("class",".logo.LogoViewlet") browser.set("permission","zope2.View") browser.set("layer",project_name+".interfaces."+param2) tree = tree_element_first.ElementTree(configure) configure_name = "/configure.zcml" indent(configure) tree.write(path+configure_name,encoding="utf-8") def add_template_file(path): """ MÉTODO RESPONSÁVEL POR GERAR O TEMPLATE alerta.pt """ text = open('sites/components/alerta.pt','r').read() file_name = "alerta.pt" file = open(path+"/"+file_name,"a+") file.write(text) file.close() def add_profile_file(path): """ MÉTODO RESPONSÁVEL POR GERAR O PROFILE CONFIGURAÇÃO VIEWLET viewlets.xml """ #seta nó para configuração dos namespaces xml_object = tree_element_first.Element('object') tree = tree_element_first.ElementTree(xml_object) configure_name = "/profiles/default/viewlets.xml" indent(xml_object) tree.write(path+configure_name,encoding="utf-8") def add_viewlets_file(path): """ MÉTODO RESPONSÁVEL POR GERAR O viewlets.py """ file_name = "viewlets.py" file = open(path+"/"+file_name,"a+") file.close() def indent(elem, level=0): i = "\n" + level*" " if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " if not elem.tail or not elem.tail.strip(): elem.tail = i for elem in elem: indent(elem, level+1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i def testando(): users = db(db.t_tbl_components).select() return dict(users=users)
27.389706
88
0.682953
506
3,725
4.922925
0.284585
0.028904
0.038539
0.03613
0.242473
0.196708
0.143717
0.082698
0.082698
0.082698
0
0.016537
0.172081
3,725
136
89
27.389706
0.79118
0.074899
0
0.223529
0
0
0.167712
0.030703
0
0
0
0.036765
0
0
null
null
0
0.023529
null
null
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
1
0
0
0
0
0
0
0
0
2
e56a51d213bf7149b8e6be6f0bd4f017978c2a3f
1,365
py
Python
fraktal_kocha_obiektowy.py
dkosztowniak/krzywaKocha
1ece53d0fda51565eedd7e5427a82e72e019a21d
[ "MIT" ]
null
null
null
fraktal_kocha_obiektowy.py
dkosztowniak/krzywaKocha
1ece53d0fda51565eedd7e5427a82e72e019a21d
[ "MIT" ]
null
null
null
fraktal_kocha_obiektowy.py
dkosztowniak/krzywaKocha
1ece53d0fda51565eedd7e5427a82e72e019a21d
[ "MIT" ]
null
null
null
import turtle class fraktalKocha(turtle.Turtle): def __init__(self): super().__init__(shape='classic', visible=False) def krzywaKocha(self, d, n): self.pendown() if n == 0: self.forward(d) else: self.krzywaKocha(d/3, n-1) self.left(60) self.krzywaKocha(d/3, n-1) self.right(120) self.krzywaKocha(d/3, n-1) self.left(60) self.krzywaKocha(d/3, n-1) self.penup() def platekKocha(self, d, n): for i in range(3): self.krzywaKocha(d, n) self.right(120) kolory = ('#ffbd20', '#20bd20', '#ff3c00', '#f000ff', '#004aff') xPlatek = (-400, -400, 200, 200, -100) yPlatek = (-50, 250, 250, -50, 150) f = fraktalKocha() turtle.title('Krzywa Kocha') f.home() f.speed(0) # 0..10 - najszybciej 0 f.penup() f.pensize(2) f.clear() for n in range(5): # Legenda f.pencolor(kolory[n]) f.goto(-450+(turtle.window_width()//5)*n, -380) f.write('n = ', True, align="left", font=("Arial", 12, "normal")) f.write(n, True, align="left", font=("Arial", 12, "normal")) f.goto(-480, -350) for n in range(5): f.pencolor(kolory[n]) f.krzywaKocha(turtle.window_width()//5, n) for n in range(5): f.pencolor(kolory[n]) f.goto(xPlatek[n], yPlatek[n]) f.platekKocha(200, n)
24.375
69
0.556044
199
1,365
3.763819
0.371859
0.100134
0.106809
0.090788
0.416555
0.3498
0.316422
0.316422
0.316422
0.316422
0
0.091535
0.255678
1,365
56
70
24.375
0.645669
0.021245
0
0.318182
0
0
0.065967
0
0
0
0
0
0
1
0.068182
false
0
0.022727
0
0.113636
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e56b278c08c9a210c958ffc52ff78cb97e76e231
718
py
Python
scripts/metadata_comparison/metadata_comparison/lib/digester_keys.py
leipzig/cromwell
e46adf05cc96343c4b4b9f081e68160b7f178ded
[ "Apache-2.0", "BSD-3-Clause" ]
850
2015-05-17T12:45:42.000Z
2022-03-31T08:36:54.000Z
scripts/metadata_comparison/metadata_comparison/lib/digester_keys.py
leipzig/cromwell
e46adf05cc96343c4b4b9f081e68160b7f178ded
[ "Apache-2.0", "BSD-3-Clause" ]
5,136
2015-05-06T14:58:16.000Z
2022-03-31T15:44:37.000Z
scripts/metadata_comparison/metadata_comparison/lib/digester_keys.py
leipzig/cromwell
e46adf05cc96343c4b4b9f081e68160b7f178ded
[ "Apache-2.0", "BSD-3-Clause" ]
363
2015-11-18T13:15:45.000Z
2022-03-31T08:05:56.000Z
Attempt = "attempt" CromwellAdditionalTotalTimeSeconds = "cromwellAdditionalTotalTimeSeconds" CromwellEnd = "cromwellEnd" CromwellStart = "cromwellStart" CromwellTotalTimeSeconds = "cromwellTotalTimeSeconds" DelocalizationTimeSeconds = "delocalizationTimeSeconds" Disks = "disks" DockerImagePullTimeSeconds = "dockerImagePullTimeSeconds" LocalizationTimeSeconds = "localizationTimeSeconds" MachineType = "machineType" OperationId = "operationId" OtherTimeSeconds = "otherTimeSeconds" PapiCreate = "papiCreate" PapiEnd = "papiEnd" PapiStart = "papiStart" PapiTotalTimeSeconds = "papiTotalTimeSeconds" ShardIndex = "shardIndex" StartupTimeSeconds = "startupTimeSeconds" UserCommandTimeSeconds = "userCommandTimeSeconds"
35.9
73
0.841226
38
718
15.894737
0.5
0
0
0
0
0
0
0
0
0
0
0
0.079387
718
19
74
37.789474
0.913767
0
0
0
0
0
0.420613
0.214485
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
e56d517d2e7a6f8ec101aff647f99c75f46d671c
8,765
py
Python
VPR_Techniques/HybridNet.py
oravus/VPR-Bench
80ff8d1f18e3cd102af94b96b6fe9bd0041257cc
[ "MIT" ]
30
2021-04-24T12:25:50.000Z
2022-03-23T05:28:19.000Z
VPR_Techniques/HybridNet.py
UQEKV/VPR-Bench
5b3c6170a1c3f6137aba371468302efd612de66d
[ "MIT" ]
5
2021-06-28T00:48:24.000Z
2022-03-04T16:05:57.000Z
VPR_Techniques/HybridNet.py
UQEKV/VPR-Bench
5b3c6170a1c3f6137aba371468302efd612de66d
[ "MIT" ]
7
2021-04-16T06:46:05.000Z
2022-03-23T05:28:09.000Z
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Created on Thu Mar 5 15:34:18 2020 @author: mubariz """ import caffe import numpy as np import os def compute_map_features(ref_map): mean_npy = np.load(str(os.path.abspath(os.curdir))+'/VPR_Techniques/HybridNet/hybridnet_mean.npy') print('Mean Array Shape:' + str(mean_npy.shape)) net = caffe.Net(str(os.path.abspath(os.curdir))+'/VPR_Techniques/HybridNet/deploy.prototxt',str(os.path.abspath(os.curdir))+'/VPR_Techniques/HybridNet/HybridNet.caffemodel', caffe.TEST) transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) print(net.blobs['data'].data.shape) transformer.set_transpose('data', (2,0,1)) # move image channels to outermost dimension transformer.set_mean('data', mean_npy) # subtract the dataset-mean value in each channel transformer.set_raw_scale('data', 255) # rescale from [0, 1] to [0, 255] transformer.set_channel_swap('data', (2,1,0)) # swap channels from RGB to BGR ref_features=[] features_ref_local=np.zeros((256,30)) for image_reference in ref_map: image_reference = image_reference / 255. image_reference = image_reference[:,:,(2,1,0)] features_ref_local=np.zeros((256,30)) if(image_reference is not None): transformed_image_ref = transformer.preprocess('data', image_reference) net.blobs['data'].data[...] = transformed_image_ref.copy() out = net.forward() features_ref=np.asarray(net.blobs['conv5'].data)[1,:,:,:].copy() for i in range(256): #S=1 features_ref_local[i,0]=np.max(features_ref[i,:,:]) #S=2 features_ref_local[i,1]=np.max(features_ref[i,0:6,0:6]) features_ref_local[i,2]=np.max(features_ref[i,0:6,7:12]) features_ref_local[i,3]=np.max(features_ref[i,7:12,0:6]) features_ref_local[i,4]=np.max(features_ref[i,7:12,7:12]) #S=3 features_ref_local[i,5]=np.max(features_ref[i,0:4,0:4]) features_ref_local[i,6]=np.max(features_ref[i,0:4,5:8]) features_ref_local[i,7]=np.max(features_ref[i,0:4,9:12]) features_ref_local[i,8]=np.max(features_ref[i,5:8,0:4]) features_ref_local[i,9]=np.max(features_ref[i,5:8,5:8]) features_ref_local[i,10]=np.max(features_ref[i,5:8,9:12]) features_ref_local[i,11]=np.max(features_ref[i,9:12,0:4]) features_ref_local[i,12]=np.max(features_ref[i,9:12,5:8]) features_ref_local[i,13]=np.max(features_ref[i,9:12,9:12]) #S=4 features_ref_local[i,14]=np.max(features_ref[i,0:3,0:3]) features_ref_local[i,15]=np.max(features_ref[i,0:3,4:6]) features_ref_local[i,16]=np.max(features_ref[i,0:3,7:9]) features_ref_local[i,17]=np.max(features_ref[i,0:3,10:12]) features_ref_local[i,18]=np.max(features_ref[i,4:6,0:3]) features_ref_local[i,19]=np.max(features_ref[i,4:6,4:6]) features_ref_local[i,20]=np.max(features_ref[i,4:6,7:9]) features_ref_local[i,21]=np.max(features_ref[i,4:6,10:12]) features_ref_local[i,22]=np.max(features_ref[i,7:9,0:3]) features_ref_local[i,23]=np.max(features_ref[i,7:9,4:6]) features_ref_local[i,24]=np.max(features_ref[i,7:9,7:9]) features_ref_local[i,25]=np.max(features_ref[i,7:9,10:12]) features_ref_local[i,26]=np.max(features_ref[i,10:12,0:3]) features_ref_local[i,27]=np.max(features_ref[i,10:12,4:6]) features_ref_local[i,28]=np.max(features_ref[i,10:12,7:9]) features_ref_local[i,29]=np.max(features_ref[i,10:12,10:12]) # print(features_ref_local) ref_features.append(features_ref_local) print('Reference images descriptors computed!') return ref_features def compute_query_desc(image_query): mean_npy = np.load(str(os.path.abspath(os.curdir))+'/VPR_Techniques/HybridNet/hybridnet_mean.npy') print('Mean Array Shape:' + str(mean_npy.shape)) net = caffe.Net(str(os.path.abspath(os.curdir))+'/VPR_Techniques/HybridNet/deploy.prototxt',str(os.path.abspath(os.curdir))+'/VPR_Techniques/HybridNet/HybridNet.caffemodel', caffe.TEST) transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) print(net.blobs['data'].data.shape) transformer.set_transpose('data', (2,0,1)) # move image channels to outermost dimension transformer.set_mean('data', mean_npy) # subtract the dataset-mean value in each channel transformer.set_raw_scale('data', 255) # rescale from [0, 1] to [0, 255] transformer.set_channel_swap('data', (2,1,0)) # swap channels from RGB to BGR features_query_local=np.zeros((256,30)) image_query = image_query / 255. image_query = image_query[:,:,(2,1,0)] if (image_query is not None): transformed_image_query = transformer.preprocess('data', image_query) net.blobs['data'].data[...] = transformed_image_query.copy() out = net.forward() features_query=np.asarray(net.blobs['conv5'].data)[1,:,:,:].copy() features_query_local=np.zeros((256,30)) for i in range(256): #S=1 features_query_local[i,0]=np.max(features_query[i,:,:]) #S=2 features_query_local[i,1]=np.max(features_query[i,0:6,0:6]) features_query_local[i,2]=np.max(features_query[i,0:6,7:12]) features_query_local[i,3]=np.max(features_query[i,7:12,0:6]) features_query_local[i,4]=np.max(features_query[i,7:12,7:12]) #S=3 features_query_local[i,5]=np.max(features_query[i,0:4,0:4]) features_query_local[i,6]=np.max(features_query[i,0:4,5:8]) features_query_local[i,7]=np.max(features_query[i,0:4,9:12]) features_query_local[i,8]=np.max(features_query[i,5:8,0:4]) features_query_local[i,9]=np.max(features_query[i,5:8,5:8]) features_query_local[i,10]=np.max(features_query[i,5:8,9:12]) features_query_local[i,11]=np.max(features_query[i,9:12,0:4]) features_query_local[i,12]=np.max(features_query[i,9:12,5:8]) features_query_local[i,13]=np.max(features_query[i,9:12,9:12]) #S=4 features_query_local[i,14]=np.max(features_query[i,0:3,0:3]) features_query_local[i,15]=np.max(features_query[i,0:3,4:6]) features_query_local[i,16]=np.max(features_query[i,0:3,7:9]) features_query_local[i,17]=np.max(features_query[i,0:3,10:12]) features_query_local[i,18]=np.max(features_query[i,4:6,0:3]) features_query_local[i,19]=np.max(features_query[i,4:6,4:6]) features_query_local[i,20]=np.max(features_query[i,4:6,7:9]) features_query_local[i,21]=np.max(features_query[i,4:6,10:12]) features_query_local[i,22]=np.max(features_query[i,7:9,0:3]) features_query_local[i,23]=np.max(features_query[i,7:9,4:6]) features_query_local[i,24]=np.max(features_query[i,7:9,7:9]) features_query_local[i,25]=np.max(features_query[i,7:9,10:12]) features_query_local[i,26]=np.max(features_query[i,10:12,0:3]) features_query_local[i,27]=np.max(features_query[i,10:12,4:6]) features_query_local[i,28]=np.max(features_query[i,10:12,7:9]) features_query_local[i,29]=np.max(features_query[i,10:12,10:12]) return features_query_local def perform_VPR(features_query_local,ref_map_features): total_Ref_Images=len(ref_map_features) confusion_vector=np.zeros(total_Ref_Images) # print(features_query_local) for j in range(total_Ref_Images): match_score=1-(np.sum(abs(np.subtract(features_query_local,ref_map_features[j])))/(256*256)) # match_score=np.sum(np.dot(features_query_local,ref_map_features[j].T))/(256*256) confusion_vector[j]=match_score # print(np.amax(confusion_vector), np.argmax(confusion_vector)) return np.amax(confusion_vector), np.argmax(confusion_vector), confusion_vector
50.66474
189
0.610496
1,382
8,765
3.664978
0.10275
0.174531
0.153998
0.100691
0.835341
0.807305
0.546101
0.279171
0.226654
0.226654
0
0.073255
0.236851
8,765
172
190
50.959302
0.683959
0.074843
0
0.224138
0
0
0.051485
0.032426
0
0
0
0
0
1
0.025862
false
0
0.025862
0
0.077586
0.043103
0
0
0
null
0
0
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
e56e6882dba09fa5e87e1ace9bbb92be2582bd23
7,228
py
Python
adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/lib/adafruit_ble_radio.py
jacoblb64/pico_rgb_keypad_hid
3251ca6a98ef86d9f98c54f639c4d61810601a0b
[ "MIT" ]
47
2021-02-15T23:02:36.000Z
2022-03-04T21:30:03.000Z
adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/lib/adafruit_ble_radio.py
jacoblb64/pico_rgb_keypad_hid
3251ca6a98ef86d9f98c54f639c4d61810601a0b
[ "MIT" ]
7
2021-02-19T20:00:08.000Z
2022-01-14T10:51:12.000Z
adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/lib/adafruit_ble_radio.py
jacoblb64/pico_rgb_keypad_hid
3251ca6a98ef86d9f98c54f639c4d61810601a0b
[ "MIT" ]
14
2021-02-20T17:40:56.000Z
2022-01-01T19:53:38.000Z
# SPDX-FileCopyrightText: 2019 Nicholas H. Tollervey for Adafruit Industries # # SPDX-License-Identifier: MIT """ `adafruit_ble_radio` ================================================================================ Simple byte and string based inter-device communication via BLE. * Author(s): Nicholas H.Tollervey for Adafruit Industries **Hardware:** Adafruit Feather nRF52840 Express <https://www.adafruit.com/product/4062> Adafruit Circuit Playground Bluefruit <https://www.adafruit.com/product/4333> **Software and Dependencies:** * Adafruit CircuitPython firmware for the supported boards: https://github.com/adafruit/circuitpython/releases """ import time import struct from micropython import const from adafruit_ble import BLERadio from adafruit_ble.advertising import Advertisement, LazyObjectField from adafruit_ble.advertising.standard import ManufacturerData __version__ = "0.3.3" __repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_BLE_Radio.git" #: Maximum length of a message (in bytes). MAX_LENGTH = 248 #: Amount of time to advertise a message (in seconds). AD_DURATION = 0.5 _MANUFACTURING_DATA_ADT = const(0xFF) _ADAFRUIT_COMPANY_ID = const(0x0822) _RADIO_DATA_ID = const(0x0001) # TODO: check this isn't already taken. class _RadioAdvertisement(Advertisement): """Broadcast arbitrary bytes as a radio message.""" match_prefixes = (struct.pack("<BH", 0xFF, _ADAFRUIT_COMPANY_ID),) manufacturer_data = LazyObjectField( ManufacturerData, "manufacturer_data", advertising_data_type=_MANUFACTURING_DATA_ADT, company_id=_ADAFRUIT_COMPANY_ID, key_encoding="<H", ) @classmethod def matches(cls, entry): """Checks for ID matches""" if len(entry.advertisement_bytes) < 6: return False # Check the key position within the manufacturer data. We already know # prefix matches so we don't need to check it twice. return ( struct.unpack_from("<H", entry.advertisement_bytes, 5)[0] == _RADIO_DATA_ID ) @property def msg(self): """Raw radio data""" if _RADIO_DATA_ID not in self.manufacturer_data.data: return b"" return self.manufacturer_data.data[_RADIO_DATA_ID] @msg.setter def msg(self, value): self.manufacturer_data.data[_RADIO_DATA_ID] = value class Radio: """ Represents a connection through which one can send or receive strings and bytes. The radio can be tuned to a specific channel upon initialisation or via the `configure` method. """ def __init__(self, **args): """ Takes the same configuration arguments as the `configure` method. """ # For BLE related operations. self.ble = BLERadio() # The uid for outgoing message. Incremented by one on each send, up to # 255 when it's reset to 0. self.uid = 0 # Contains timestamped message metadata to mitigate report of # receiving of duplicate messages within AD_DURATION time frame. self.msg_pool = set() # Handle user related configuration. self.configure(**args) def configure(self, channel=42): """ Set configuration values for the radio. :param int channel: The channel (0-255) the radio is listening / broadcasting on. """ if -1 < channel < 256: self._channel = channel else: raise ValueError("Channel must be in range 0-255") def send(self, message): """ Send a message string on the channel to which the radio is broadcasting. :param str message: The message string to broadcast. """ return self.send_bytes(message.encode("utf-8")) def send_bytes(self, message): """ Send bytes on the channel to which the radio is broadcasting. :param bytes message: The bytes to broadcast. """ # Ensure length of message. if len(message) > MAX_LENGTH: raise ValueError("Message too long (max length = {})".format(MAX_LENGTH)) advertisement = _RadioAdvertisement() # Concatenate the bytes that make up the advertised message. advertisement.msg = struct.pack("<BB", self._channel, self.uid) + message self.uid = (self.uid + 1) % 255 # Advertise (block) for AD_DURATION period of time. self.ble.start_advertising(advertisement) time.sleep(AD_DURATION) self.ble.stop_advertising() def receive(self): """ Returns a message received on the channel on which the radio is listening. :return: A string representation of the received message, or else None. """ msg = self.receive_full() if msg: return msg[0].decode("utf-8").replace("\x00", "") return None def receive_full(self): """ Returns a tuple containing three values representing a message received on the channel on which the radio is listening. If no message was received then `None` is returned. The three values in the tuple represent: * the bytes received. * the RSSI (signal strength: 0 = max, -255 = min). * a microsecond timestamp: the value returned by time.monotonic() when the message was received. :return: A tuple representation of the received message, or else None. """ try: for entry in self.ble.start_scan( _RadioAdvertisement, minimum_rssi=-255, timeout=1, extended=True ): # Extract channel and unique message ID bytes. chan, uid = struct.unpack("<BB", entry.msg[:2]) if chan == self._channel: now = time.monotonic() addr = entry.address.address_bytes # Ensure this message isn't a duplicate. Message metadata # is a tuple of (now, chan, uid, addr), to (mostly) # uniquely identify a specific message in a certain time # window. expired_metadata = set() duplicate = False for msg_metadata in self.msg_pool: if msg_metadata[0] < now - AD_DURATION: # Ignore expired entries and mark for removal. expired_metadata.add(msg_metadata) elif (chan, uid, addr) == msg_metadata[1:]: # Ignore matched messages to avoid duplication. duplicate = True # Remove expired entries. self.msg_pool = self.msg_pool - expired_metadata if not duplicate: # Add new message's metadata to the msg_pool and # return it as a result. self.msg_pool.add((now, chan, uid, addr)) msg = entry.msg[2:] return (msg, entry.rssi, now) finally: self.ble.stop_scan() return None
35.258537
87
0.606253
844
7,228
5.07346
0.325829
0.013078
0.012844
0.014012
0.113965
0.101822
0.083606
0.067258
0.046707
0.046707
0
0.01587
0.302573
7,228
204
88
35.431373
0.833565
0.417128
0
0.022727
0
0
0.045595
0
0
0
0.005152
0.004902
0
1
0.102273
false
0
0.068182
0
0.318182
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e56ec1987b2697fa228af32ab9806f8ffcfab331
536
py
Python
01-algorithmic-design-and-techniques/week-2/fibonacci-fast.py
andrewnachtigal/UCSD-Algorithms
25acae36752e37fab74b8e331db554af704ccf4c
[ "MIT" ]
null
null
null
01-algorithmic-design-and-techniques/week-2/fibonacci-fast.py
andrewnachtigal/UCSD-Algorithms
25acae36752e37fab74b8e331db554af704ccf4c
[ "MIT" ]
null
null
null
01-algorithmic-design-and-techniques/week-2/fibonacci-fast.py
andrewnachtigal/UCSD-Algorithms
25acae36752e37fab74b8e331db554af704ccf4c
[ "MIT" ]
null
null
null
#!/user/bin/python '''FibonacciList(n) create an array F[0... n] F[0] <- 0 F[1] <- 1 for i from 2 to n: F[i] <- F[i-1] + F[i-2] return F[n] ''' def fibonacci_recurs(n): if (n <= 1): return n else: return fibonacci_recurs(n - 1) + fibonacci_recurs(n - 2) def fibonacci_iter(n): fib = [] fib.append(0) fib.append(1) for i in range(2, n + 1): fib.append(fib[i - 1] + fib[i - 2]) return fib[n] if __name__ == "__main__": print(fibonacci_iter(10))
16.242424
64
0.516791
89
536
2.966292
0.348315
0.022727
0.181818
0
0
0
0
0
0
0
0
0.051213
0.307836
536
32
65
16.75
0.660377
0.259328
0
0
0
0
0.021918
0
0
0
0
0
0
1
0.142857
false
0
0
0
0.357143
0.071429
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
e56ec44fd01f43bc003d160541bb7a0ae0b9a6ae
1,715
py
Python
pynYNAB/schema/catalog.py
rienafairefr/pynYNAB
d5fc0749618409c6bb01cc2b93832cc59d780eaa
[ "MIT" ]
82
2017-02-21T11:07:24.000Z
2022-03-20T21:56:17.000Z
pynYNAB/schema/catalog.py
rienafairefr/pynYNAB
d5fc0749618409c6bb01cc2b93832cc59d780eaa
[ "MIT" ]
37
2017-02-19T10:28:03.000Z
2021-01-23T07:44:06.000Z
pynYNAB/schema/catalog.py
rienafairefr/pynYNAB
d5fc0749618409c6bb01cc2b93832cc59d780eaa
[ "MIT" ]
13
2017-03-07T10:08:59.000Z
2018-05-11T04:53:25.000Z
from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy import ForeignKey from sqlalchemy import String from sqlalchemy.ext.declarative import declared_attr from sqlalchemy.orm import relationship from pynYNAB.schema.Entity import Entity, Base from pynYNAB.schema.types import ArrayType class CatalogEntity(Entity): @declared_attr def parent_id(self): return Column(ForeignKey('catalog.id')) @declared_attr def parent(self): return relationship('Catalog') class CatalogBudget(Base, CatalogEntity): budget_name = Column(String) created_at = Column(DateTime) class User(Base, CatalogEntity): username = Column(String) trial_expires_on = Column(String) email = Column(String) feature_flags = Column(ArrayType) is_subscribed = Column(Boolean) class UserSetting(Base, CatalogEntity): setting_name = Column(String) user_id = Column(ForeignKey('user.id')) user = relationship('User', foreign_keys=user_id, backref='settings') setting_value = Column(String) class UserBudget(Base, CatalogEntity): budget_id = Column(ForeignKey('catalogbudget.id')) budget = relationship('CatalogBudget') user_id = Column(ForeignKey('user.id')) user = relationship('User', foreign_keys=user_id, backref='budgets') permissions = Column(String) class BudgetVersion(Base, CatalogEntity): date_format = Column(String) last_accessed_on = Column(String) currency_format = Column(String) budget_id = Column(ForeignKey('catalogbudget.id')) budget = relationship('CatalogBudget', foreign_keys=budget_id) version_name = Column(String) source = Column(String)
28.583333
73
0.744606
197
1,715
6.345178
0.309645
0.1152
0.08
0.0336
0.2272
0.2272
0.2272
0.2272
0.2272
0.1152
0
0
0.161516
1,715
59
74
29.067797
0.869263
0
0
0.136364
0
0
0.065306
0
0
0
0
0
0
1
0.045455
false
0
0.204545
0.045455
0.954545
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
e56f6d2a048e2089110b635b9fc2860c2724c363
13,775
py
Python
layers/MultiWaveletCorrelation.py
MAZiqing/FEDformer
7914d39df829494a8172afb9676982c3789d491d
[ "MIT" ]
7
2022-02-20T13:03:25.000Z
2022-03-30T09:27:38.000Z
layers/MultiWaveletCorrelation.py
MAZiqing/FEDformer
7914d39df829494a8172afb9676982c3789d491d
[ "MIT" ]
null
null
null
layers/MultiWaveletCorrelation.py
MAZiqing/FEDformer
7914d39df829494a8172afb9676982c3789d491d
[ "MIT" ]
4
2022-03-05T09:09:28.000Z
2022-03-21T08:46:23.000Z
import torch import numpy as np import torch.nn as nn import torch.nn.functional as F from torch import Tensor from typing import List, Tuple import math from functools import partial from einops import rearrange, reduce, repeat from torch import nn, einsum, diagonal from math import log2, ceil import pdb from utils.masking import LocalMask from layers.utils import get_filter device = torch.device("cuda" if torch.cuda.is_available() else "cpu") class MultiWaveletTransform(nn.Module): """ 1D multiwavelet block. """ def __init__(self, ich=1, k=8, alpha=16, c=128, nCZ=1, L=0, base='legendre', attention_dropout=0.1): super(MultiWaveletTransform, self).__init__() print('base', base) self.k = k self.c = c self.L = L self.nCZ = nCZ self.Lk0 = nn.Linear(ich, c * k) self.Lk1 = nn.Linear(c * k, ich) self.ich = ich self.MWT_CZ = nn.ModuleList(MWT_CZ1d(k, alpha, L, c, base) for i in range(nCZ)) def forward(self, queries, keys, values, attn_mask): B, L, H, E = queries.shape _, S, _, D = values.shape if L > S: zeros = torch.zeros_like(queries[:, :(L - S), :]).float() values = torch.cat([values, zeros], dim=1) keys = torch.cat([keys, zeros], dim=1) else: values = values[:, :L, :, :] keys = keys[:, :L, :, :] values = values.view(B, L, -1) V = self.Lk0(values).view(B, L, self.c, -1) for i in range(self.nCZ): V = self.MWT_CZ[i](V) if i < self.nCZ - 1: V = F.relu(V) V = self.Lk1(V.view(B, L, -1)) V = V.view(B, L, -1, D) return (V.contiguous(), None) class MultiWaveletCross(nn.Module): """ 1D Multiwavelet Cross Attention layer. """ def __init__(self, in_channels, out_channels, seq_len_q, seq_len_kv, modes, c=64, k=8, ich=512, L=0, base='legendre', mode_select_method='random', initializer=None, activation='tanh', **kwargs): super(MultiWaveletCross, self).__init__() print('base', base) self.c = c self.k = k self.L = L H0, H1, G0, G1, PHI0, PHI1 = get_filter(base, k) H0r = H0 @ PHI0 G0r = G0 @ PHI0 H1r = H1 @ PHI1 G1r = G1 @ PHI1 H0r[np.abs(H0r) < 1e-8] = 0 H1r[np.abs(H1r) < 1e-8] = 0 G0r[np.abs(G0r) < 1e-8] = 0 G1r[np.abs(G1r) < 1e-8] = 0 self.max_item = 3 self.attn1 = FourierCrossAttentionW(in_channels=in_channels, out_channels=out_channels, seq_len_q=seq_len_q, seq_len_kv=seq_len_kv, modes=modes, activation=activation, mode_select_method=mode_select_method) self.attn2 = FourierCrossAttentionW(in_channels=in_channels, out_channels=out_channels, seq_len_q=seq_len_q, seq_len_kv=seq_len_kv, modes=modes, activation=activation, mode_select_method=mode_select_method) self.attn3 = FourierCrossAttentionW(in_channels=in_channels, out_channels=out_channels, seq_len_q=seq_len_q, seq_len_kv=seq_len_kv, modes=modes, activation=activation, mode_select_method=mode_select_method) self.attn4 = FourierCrossAttentionW(in_channels=in_channels, out_channels=out_channels, seq_len_q=seq_len_q, seq_len_kv=seq_len_kv, modes=modes, activation=activation, mode_select_method=mode_select_method) self.T0 = nn.Linear(k, k) self.register_buffer('ec_s', torch.Tensor( np.concatenate((H0.T, H1.T), axis=0))) self.register_buffer('ec_d', torch.Tensor( np.concatenate((G0.T, G1.T), axis=0))) self.register_buffer('rc_e', torch.Tensor( np.concatenate((H0r, G0r), axis=0))) self.register_buffer('rc_o', torch.Tensor( np.concatenate((H1r, G1r), axis=0))) self.Lk = nn.Linear(ich, c * k) self.Lq = nn.Linear(ich, c * k) self.Lv = nn.Linear(ich, c * k) self.out = nn.Linear(c * k, ich) self.modes1 = modes def forward(self, q, k, v, mask=None): B, N, H, E = q.shape # (B, N, H, E) torch.Size([3, 768, 8, 2]) _, S, _, _ = k.shape # (B, S, H, E) torch.Size([3, 96, 8, 2]) q = q.view(q.shape[0], q.shape[1], -1) k = k.view(k.shape[0], k.shape[1], -1) v = v.view(v.shape[0], v.shape[1], -1) q = self.Lq(q) q = q.view(q.shape[0], q.shape[1], self.c, self.k) k = self.Lk(k) k = k.view(k.shape[0], k.shape[1], self.c, self.k) v = self.Lv(v) v = v.view(v.shape[0], v.shape[1], self.c, self.k) if N > S: zeros = torch.zeros_like(q[:, :(N - S), :]).float() v = torch.cat([v, zeros], dim=1) k = torch.cat([k, zeros], dim=1) else: v = v[:, :N, :, :] k = k[:, :N, :, :] ns = math.floor(np.log2(N)) nl = pow(2, math.ceil(np.log2(N))) extra_q = q[:, 0:nl - N, :, :] extra_k = k[:, 0:nl - N, :, :] extra_v = v[:, 0:nl - N, :, :] q = torch.cat([q, extra_q], 1) k = torch.cat([k, extra_k], 1) v = torch.cat([v, extra_v], 1) Ud_q = torch.jit.annotate(List[Tuple[Tensor]], []) Ud_k = torch.jit.annotate(List[Tuple[Tensor]], []) Ud_v = torch.jit.annotate(List[Tuple[Tensor]], []) Us_q = torch.jit.annotate(List[Tensor], []) Us_k = torch.jit.annotate(List[Tensor], []) Us_v = torch.jit.annotate(List[Tensor], []) Ud = torch.jit.annotate(List[Tensor], []) Us = torch.jit.annotate(List[Tensor], []) # decompose for i in range(ns - self.L): # print('q shape',q.shape) d, q = self.wavelet_transform(q) Ud_q += [tuple([d, q])] Us_q += [d] for i in range(ns - self.L): d, k = self.wavelet_transform(k) Ud_k += [tuple([d, k])] Us_k += [d] for i in range(ns - self.L): d, v = self.wavelet_transform(v) Ud_v += [tuple([d, v])] Us_v += [d] for i in range(ns - self.L): dk, sk = Ud_k[i], Us_k[i] dq, sq = Ud_q[i], Us_q[i] dv, sv = Ud_v[i], Us_v[i] Ud += [self.attn1(dq[0], dk[0], dv[0], mask)[0] + self.attn2(dq[1], dk[1], dv[1], mask)[0]] Us += [self.attn3(sq, sk, sv, mask)[0]] v = self.attn4(q, k, v, mask)[0] # reconstruct for i in range(ns - 1 - self.L, -1, -1): v = v + Us[i] v = torch.cat((v, Ud[i]), -1) v = self.evenOdd(v) v = self.out(v[:, :N, :, :].contiguous().view(B, N, -1)) return (v.contiguous(), None) def wavelet_transform(self, x): xa = torch.cat([x[:, ::2, :, :], x[:, 1::2, :, :], ], -1) d = torch.matmul(xa, self.ec_d) s = torch.matmul(xa, self.ec_s) return d, s def evenOdd(self, x): B, N, c, ich = x.shape # (B, N, c, k) assert ich == 2 * self.k x_e = torch.matmul(x, self.rc_e) x_o = torch.matmul(x, self.rc_o) x = torch.zeros(B, N * 2, c, self.k, device=x.device) x[..., ::2, :, :] = x_e x[..., 1::2, :, :] = x_o return x class FourierCrossAttentionW(nn.Module): def __init__(self, in_channels, out_channels, seq_len_q, seq_len_kv, modes=16, activation='tanh', mode_select_method='random'): super(FourierCrossAttentionW, self).__init__() print('corss fourier correlation used!') self.in_channels = in_channels self.out_channels = out_channels self.modes1 = modes self.activation = activation def forward(self, q, k, v, mask): B, L, E, H = q.shape xq = q.permute(0, 3, 2, 1) # size = [B, H, E, L] torch.Size([3, 8, 64, 512]) xk = k.permute(0, 3, 2, 1) xv = v.permute(0, 3, 2, 1) self.index_q = list(range(0, min(int(L // 2), self.modes1))) self.index_k_v = list(range(0, min(int(xv.shape[3] // 2), self.modes1))) # Compute Fourier coefficients xq_ft_ = torch.zeros(B, H, E, len(self.index_q), device=xq.device, dtype=torch.cfloat) xq_ft = torch.fft.rfft(xq, dim=-1) for i, j in enumerate(self.index_q): xq_ft_[:, :, :, i] = xq_ft[:, :, :, j] xk_ft_ = torch.zeros(B, H, E, len(self.index_k_v), device=xq.device, dtype=torch.cfloat) xk_ft = torch.fft.rfft(xk, dim=-1) for i, j in enumerate(self.index_k_v): xk_ft_[:, :, :, i] = xk_ft[:, :, :, j] xqk_ft = (torch.einsum("bhex,bhey->bhxy", xq_ft_, xk_ft_)) if self.activation == 'tanh': xqk_ft = xqk_ft.tanh() elif self.activation == 'softmax': xqk_ft = torch.softmax(abs(xqk_ft), dim=-1) xqk_ft = torch.complex(xqk_ft, torch.zeros_like(xqk_ft)) else: raise Exception('{} actiation function is not implemented'.format(self.activation)) xqkv_ft = torch.einsum("bhxy,bhey->bhex", xqk_ft, xk_ft_) xqkvw = xqkv_ft out_ft = torch.zeros(B, H, E, L // 2 + 1, device=xq.device, dtype=torch.cfloat) for i, j in enumerate(self.index_q): out_ft[:, :, :, j] = xqkvw[:, :, :, i] out = torch.fft.irfft(out_ft / self.in_channels / self.out_channels, n=xq.size(-1)).permute(0, 3, 2, 1) # size = [B, L, H, E] return (out, None) class sparseKernelFT1d(nn.Module): def __init__(self, k, alpha, c=1, nl=1, initializer=None, **kwargs): super(sparseKernelFT1d, self).__init__() self.modes1 = alpha self.scale = (1 / (c * k * c * k)) self.weights1 = nn.Parameter(self.scale * torch.rand(c * k, c * k, self.modes1, dtype=torch.cfloat)) self.weights1.requires_grad = True self.k = k def compl_mul1d(self, x, weights): # (batch, in_channel, x ), (in_channel, out_channel, x) -> (batch, out_channel, x) return torch.einsum("bix,iox->box", x, weights) def forward(self, x): B, N, c, k = x.shape # (B, N, c, k) x = x.view(B, N, -1) x = x.permute(0, 2, 1) x_fft = torch.fft.rfft(x) # Multiply relevant Fourier modes l = min(self.modes1, N // 2 + 1) # l = N//2+1 out_ft = torch.zeros(B, c * k, N // 2 + 1, device=x.device, dtype=torch.cfloat) out_ft[:, :, :l] = self.compl_mul1d(x_fft[:, :, :l], self.weights1[:, :, :l]) x = torch.fft.irfft(out_ft, n=N) x = x.permute(0, 2, 1).view(B, N, c, k) return x # ## class MWT_CZ1d(nn.Module): def __init__(self, k=3, alpha=64, L=0, c=1, base='legendre', initializer=None, **kwargs): super(MWT_CZ1d, self).__init__() self.k = k self.L = L H0, H1, G0, G1, PHI0, PHI1 = get_filter(base, k) H0r = H0 @ PHI0 G0r = G0 @ PHI0 H1r = H1 @ PHI1 G1r = G1 @ PHI1 H0r[np.abs(H0r) < 1e-8] = 0 H1r[np.abs(H1r) < 1e-8] = 0 G0r[np.abs(G0r) < 1e-8] = 0 G1r[np.abs(G1r) < 1e-8] = 0 self.max_item = 3 self.A = sparseKernelFT1d(k, alpha, c) self.B = sparseKernelFT1d(k, alpha, c) self.C = sparseKernelFT1d(k, alpha, c) self.T0 = nn.Linear(k, k) self.register_buffer('ec_s', torch.Tensor( np.concatenate((H0.T, H1.T), axis=0))) self.register_buffer('ec_d', torch.Tensor( np.concatenate((G0.T, G1.T), axis=0))) self.register_buffer('rc_e', torch.Tensor( np.concatenate((H0r, G0r), axis=0))) self.register_buffer('rc_o', torch.Tensor( np.concatenate((H1r, G1r), axis=0))) def forward(self, x): B, N, c, k = x.shape # (B, N, k) ns = math.floor(np.log2(N)) nl = pow(2, math.ceil(np.log2(N))) extra_x = x[:, 0:nl - N, :, :] x = torch.cat([x, extra_x], 1) Ud = torch.jit.annotate(List[Tensor], []) Us = torch.jit.annotate(List[Tensor], []) # decompose for i in range(ns - self.L): # print('x shape',x.shape) d, x = self.wavelet_transform(x) Ud += [self.A(d) + self.B(x)] Us += [self.C(d)] x = self.T0(x) # coarsest scale transform # reconstruct for i in range(ns - 1 - self.L, -1, -1): x = x + Us[i] x = torch.cat((x, Ud[i]), -1) x = self.evenOdd(x) x = x[:, :N, :, :] return x def wavelet_transform(self, x): xa = torch.cat([x[:, ::2, :, :], x[:, 1::2, :, :], ], -1) d = torch.matmul(xa, self.ec_d) s = torch.matmul(xa, self.ec_s) return d, s def evenOdd(self, x): B, N, c, ich = x.shape # (B, N, c, k) assert ich == 2 * self.k x_e = torch.matmul(x, self.rc_e) x_o = torch.matmul(x, self.rc_o) x = torch.zeros(B, N * 2, c, self.k, device=x.device) x[..., ::2, :, :] = x_e x[..., 1::2, :, :] = x_o return x
36.345646
116
0.503739
2,008
13,775
3.323705
0.118028
0.01798
0.031316
0.014984
0.550195
0.488762
0.422835
0.401858
0.394966
0.35421
0
0.03286
0.335027
13,775
379
117
36.345646
0.695742
0.039637
0
0.419672
0
0
0.016315
0
0
0
0
0
0.006557
1
0.04918
false
0
0.045902
0.003279
0.144262
0.009836
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e56f87f0384ecbf57f48e8eac0641bcfd48082b7
5,636
py
Python
perrec/cbr.py
Tbabm/PerRec
1f711d70df8354156b37857719db0559876be08c
[ "MIT" ]
3
2019-07-24T12:03:24.000Z
2019-08-28T14:42:51.000Z
perrec/cbr.py
Tbabm/PerRec
1f711d70df8354156b37857719db0559876be08c
[ "MIT" ]
null
null
null
perrec/cbr.py
Tbabm/PerRec
1f711d70df8354156b37857719db0559876be08c
[ "MIT" ]
null
null
null
# encoding=utf-8 import os import fire import numpy as np from scipy.sparse.csr import csr_matrix from sklearn.base import BaseEstimator from sklearn.model_selection import cross_validate from sklearn.preprocessing import normalize from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from .common.similarities import SIM_FUNCTIONS from .common.dataset import prepare_shuffled_dataset from .common.scorers import map_scorer, trr_scorer, nr_scorer from .executor import BaseExecutor SCORING = { 'MAP': map_scorer, 'TRR': trr_scorer, 'NR': nr_scorer } def do_nothing_tokenizer(tokens): return tokens class PerRecCBR(BaseEstimator): """CBR component for recommending permission lists Input: A list of used apis. Output: The ranked permission list of the app. """ def __init__(self, sim_func="cosine"): if callable(sim_func): self.sim_func = sim_func else: self.sim_func = SIM_FUNCTIONS.get(sim_func, None) if not self.sim_func: raise ValueError("Error sim_func" + str(sim_func)) @staticmethod def build_perm_docs(perm_vectors, api_vectors): """Build permission profiles Args: perm_vectors (Matrix): app perm vectors api_vectors (Matrix): app api vectors perm_list (List): list of permissions """ perm_docs = [] # for each column of permission vectors (e.g., each permission) for col in perm_vectors.T: # find the apps which require this permissions if isinstance(col, csr_matrix): col = col.toarray().reshape(-1, ) apps = np.where(col == 1) # find the api vectors of such apps cur_api_vectors = api_vectors[apps].toarray() # construct permission doc cur_perm_doc = cur_api_vectors.sum(axis=0) perm_docs.append(cur_perm_doc) return np.array(perm_docs) def fit(self, X, y): """Build the profiles for training permissions Args: X (List(List(API))): The api lists of the training apps. y (List(List(Perm))): The permission lists of all apps Returns: self object: return self """ # Steps: # 1. build permission doc # 2. calculate the tfidf vector for each permission doc as the profiles of permissions # 3. build API CountVectorizer self.api_vectorizer_ = CountVectorizer(binary=True, tokenizer=do_nothing_tokenizer, preprocessor=None, lowercase=False) self.train_api_vectors_ = self.api_vectorizer_.fit_transform(X) self.perm_vectorizer_ = CountVectorizer(binary=True, tokenizer=do_nothing_tokenizer, preprocessor=None, lowercase=False) self.train_perm_vectors_ = self.perm_vectorizer_.fit_transform(y) self.perm_list_ = self.perm_vectorizer_.get_feature_names() # build permission doc self.perm_docs_ = self.build_perm_docs(self.train_perm_vectors_, self.train_api_vectors_) # idf = log(total_num / num) + 1 self.tfidf_transformer_ = TfidfTransformer(norm="l1", use_idf=True, smooth_idf=False) tfidf_matrix = self.tfidf_transformer_.fit_transform(self.perm_docs_) self.perm_profiles_ = normalize(tfidf_matrix, norm='l2', axis=1) def transform(self, X, *fit_params): """Recommend permissions for new apps Args: X (List(List(API))): A list of apps for testing. Returns: Perms (List(List(Permission))): The ranked permission lists recommended for input apps """ # ranked the permissions # construct app profiles (api vectors) test_api_vectors = self.api_vectorizer_.transform(X) # calculate the similarities between API vector and permission profiles # test_num * perm_num similarities = self.sim_func(test_api_vectors, self.perm_profiles_) perm_scores = normalize(similarities, norm="l1", axis=1) # for fusion self.perm_scores_ = perm_scores sorted_perm_index = np.argsort(-1.0 * perm_scores, 1) # each row: perm_i, perm_j, per_k (sorted) return np.take(self.perm_list_, sorted_perm_index) def predict(self, X): return self.transform(X) class CBR(BaseExecutor): def __init__(self, dataset, scoring, **kwargs): super().__init__("CBR", dataset, scoring) self.sim_func = kwargs.get("sim_func", "cosine") self.smooth_idf = kwargs.get("smooth_idf", True) def get_result_file(self, data_dir): file_name = "_".join([self.name, self.sim_func, str(self.smooth_idf)]) return os.path.join(data_dir, file_name + ".json") def construct_estimator(self): return PerRecCBR(sim_func=self.sim_func) def run(self): api_lists = self.dataset.extract_api_lists() perm_lists = self.dataset.extract_perm_lists() estimator = self.construct_estimator() scores = cross_validate(estimator, api_lists, perm_lists, scoring=self.scoring, cv=10, n_jobs=-1, verbose=1, return_train_score=False) return scores def main(sim_func="cosine"): dataset = prepare_shuffled_dataset() scoring = SCORING executor = CBR(dataset, scoring, sim_func=sim_func) scores = executor.run() print(scores['test_MAP'].mean()) if __name__ == "__main__": fire.Fire({ 'main': main })
36.836601
98
0.649929
703
5,636
4.958748
0.271693
0.036145
0.025244
0.008032
0.104131
0.057946
0.057946
0.057946
0.057946
0.057946
0
0.004802
0.261001
5,636
152
99
37.078947
0.832173
0.225515
0
0.023256
0
0
0.022228
0
0
0
0
0
0
1
0.127907
false
0
0.139535
0.034884
0.372093
0.011628
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e57008133545852ca9c37cb6b1fc3ddad68b824d
675
py
Python
registry/migrations/0041_auto_20210505_1210.py
not-vibhu/aerobridge
2edcd5e75782663184ac57da8145427613f4ef2b
[ "Apache-2.0" ]
null
null
null
registry/migrations/0041_auto_20210505_1210.py
not-vibhu/aerobridge
2edcd5e75782663184ac57da8145427613f4ef2b
[ "Apache-2.0" ]
null
null
null
registry/migrations/0041_auto_20210505_1210.py
not-vibhu/aerobridge
2edcd5e75782663184ac57da8145427613f4ef2b
[ "Apache-2.0" ]
null
null
null
# Generated by Django 3.2 on 2021-05-05 12:10 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('registry', '0040_auto_20210216_1404'), ] operations = [ migrations.AlterField( model_name='firmware', name='binary_file_url', field=models.URLField(help_text='Enter a url from where the firmware can be downloaded'), ), migrations.AlterField( model_name='firmware', name='public_key', field=models.TextField(help_text='Enter a SHA / Digest or public key to test used to secure the firmware'), ), ]
28.125
119
0.619259
79
675
5.164557
0.658228
0.098039
0.122549
0.142157
0.20098
0.20098
0
0
0
0
0
0.061983
0.282963
675
23
120
29.347826
0.780992
0.063704
0
0.352941
1
0
0.309524
0.036508
0
0
0
0
0
1
0
false
0
0.058824
0
0.235294
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
e57010f041e3598f7f9d0eae8d7344c471cefd8d
847
py
Python
topic_modeling.py
francescoferretto/Investigating-relationships-between-Twitter-latent-topics-and-political-orientation
c6f1ff05ac8960885f26a153d7c0c8ecf1364a6a
[ "MIT" ]
null
null
null
topic_modeling.py
francescoferretto/Investigating-relationships-between-Twitter-latent-topics-and-political-orientation
c6f1ff05ac8960885f26a153d7c0c8ecf1364a6a
[ "MIT" ]
null
null
null
topic_modeling.py
francescoferretto/Investigating-relationships-between-Twitter-latent-topics-and-political-orientation
c6f1ff05ac8960885f26a153d7c0c8ecf1364a6a
[ "MIT" ]
null
null
null
import csv import sys import json import pandas as pd import os import logging # Configure the path os.chdir('/home/emi/unipd/Sartori_CBSD/project/cbsdproject') accounts = pd.read_excel('LabelledAccounts.xlsx', skiprows=1) import operator import collections def get_party_label(votes, porcentage=0.75): porcentage = len(votes)*porcentage porcentage = int(porcentage) max_party, max_votes = collections.Counter(votes).most_common(1)[0] if (max_votes == porcentage): return max_party else: return '' accounts['party_threshold']=pd.Series(np.zeros()) #Fix a threshold about the political party labelling #(we suggest to repeat the analysis with the 100% of agreement of the five judges #and with the 75% of agreement of the five judges) data = pd.read_csv('Tweets/Massimogazza_tweets.csv') data.head()
26.46875
82
0.744982
123
847
5.03252
0.577236
0.072698
0.042003
0.051696
0.084006
0.084006
0
0
0
0
0
0.015449
0.159386
847
31
83
27.322581
0.853933
0.237308
0
0
0
0
0.177847
0.154446
0
0
0
0
0
1
0.047619
false
0
0.380952
0
0.52381
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
e570191797fda76257f543ceca066c63d6087a58
1,410
py
Python
examples/tournament/tournament.py
gavento/orco
07e90bf87246f4577c8e3653b34474a69cc5338e
[ "MIT" ]
null
null
null
examples/tournament/tournament.py
gavento/orco
07e90bf87246f4577c8e3653b34474a69cc5338e
[ "MIT" ]
null
null
null
examples/tournament/tournament.py
gavento/orco
07e90bf87246f4577c8e3653b34474a69cc5338e
[ "MIT" ]
null
null
null
import itertools import random import orco # Function that trains "players" @orco.builder() def train_player(config): # We will simulate trained players by a dictionary with a "strength" key return {"strength": random.randint(0, 10)} # Build function for "games" @orco.builder() def play_game(config): player1 = train_player(config["player1"]) player2 = train_player(config["player2"]) yield # Simulation of playing a game between two players, # They just throw k-sided dices, where k is trength of the player # The difference of throw is the result r1 = random.randint(0, player1.value["strength"] * 2) r2 = random.randint(0, player2.value["strength"] * 2) return r1 - r2 # Build function for a tournament, return score for each player @orco.builder() def play_tournament(config): # For evaluating a tournament, we need to know the results of games between # each pair of its players. games = [ play_game({"player1": p1, "player2": p2}) for (p1, p2) in itertools.product(config["players"], config["players"]) ] yield score = {} for game in games: player1 = game.config["player1"] player2 = game.config["player2"] score.setdefault(player1, 0) score.setdefault(player2, 0) score[player1] += game.value score[player2] -= game.value return score orco.run_cli()
26.603774
79
0.664539
188
1,410
4.946809
0.388298
0.035484
0.045161
0.03871
0
0
0
0
0
0
0
0.030247
0.226241
1,410
52
80
27.115385
0.822181
0.313475
0
0.16129
0
0
0.083507
0
0
0
0
0
0
1
0.096774
false
0
0.096774
0.032258
0.290323
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e57024f83b49dd4e2de2007ee984d24ae347e3b1
2,546
py
Python
scripts/injection_ROI_visualization.py
karimi-ali/brainrender
04be6a05fdfdd22424c4c499f4563943436faf6f
[ "BSD-3-Clause" ]
null
null
null
scripts/injection_ROI_visualization.py
karimi-ali/brainrender
04be6a05fdfdd22424c4c499f4563943436faf6f
[ "BSD-3-Clause" ]
null
null
null
scripts/injection_ROI_visualization.py
karimi-ali/brainrender
04be6a05fdfdd22424c4c499f4563943436faf6f
[ "BSD-3-Clause" ]
null
null
null
import os from pathlib import Path import brainrender from brainrender import Scene, actor, Animation from rich import color, print from myterial import orange from vedo import Volume, io, load, show import numpy as np import pandas as pd import util # path names and roi names paths = util.get_paths() roi_names = util.roi_names() print(f"[{orange}]Running example: {Path(__file__).name}") # Create a brainrender scene scene = Scene(title="Injection ROIs", atlas_name='allen_mouse_10um') # injection site meshes mesh_names = [os.path.join(paths['data'], 'meshes', f'{roi}.obj') for roi in roi_names] meshes = [load(cur_name) for cur_name in mesh_names] # overlapping atlas rois csv_names_atlas = [os.path.join(paths['data'], 'csv_acronyms', f'{roi}.csv') for roi in roi_names] csv_atlas_acronym = [pd.read_csv(name) for name in csv_names_atlas] colors = ['#6DB546', '#C30017', '#9D9D9C'] alpha_rois = 0.6 for cur_idx, cur_mesh in enumerate(meshes): # Create the injection site actors cur_actor = actor.Actor(cur_mesh, name=roi_names[cur_idx], color=colors[cur_idx], alpha=alpha_rois) scene.add(cur_actor) scene.add_silhouette(cur_actor) # Overlapping atlas cur_overlapping_acronyms = list(csv_atlas_acronym[cur_idx]["acronym_keepSingleChild"]) # scene.add_brain_region(*cur_overlapping_acronyms, # alpha=0.2, # color=colors[cur_idx], # hemisphere='right') # Render and save screen shots screen_shot_dir = os.path.join(paths['data'], 'screen_shots_no_region') os.makedirs(screen_shot_dir, exist_ok = True) camera_names = list(brainrender.camera.cameras.keys()) zoom_vals = [2.0, 0.8, 1.0, 1.0, 1.0, 1.0] for idx, c in enumerate(camera_names): scene.render(camera=c, zoom=zoom_vals[idx], interactive=False) scene.screenshot(name=os.path.join(screen_shot_dir, f'{c}_alpha_{alpha_rois}.png')) # Animation animate_flag = True if animate_flag: anim = Animation(scene, screen_shot_dir, "ROI_inj_animation",size="6480x4200") # Specify camera position and zoom at some key frames # each key frame defines the scene's state after n seconds have passed anim.add_keyframe(0, camera="top", zoom=0.3) anim.add_keyframe(5, camera="sagittal", zoom=1.0) anim.add_keyframe(9, camera="frontal", zoom=1.0) anim.add_keyframe( 10, camera="frontal", ) # Make videos anim.make_video(duration=10, fps=10)
36.371429
98
0.688138
376
2,546
4.460106
0.364362
0.028623
0.023852
0.026834
0.082886
0.029815
0
0
0
0
0
0.025892
0.195994
2,546
70
99
36.371429
0.793356
0.193637
0
0
0
0
0.131992
0.045142
0
0
0
0
0
1
0
false
0
0.217391
0
0.217391
0.043478
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e57240c96406c5bb3b032eec708032b091e297a7
7,664
py
Python
scripts/wk/exe.py
2Shirt/WizardK
82a2e7f85c80a52f892c1553e7a45ec0174e7bc6
[ "MIT" ]
null
null
null
scripts/wk/exe.py
2Shirt/WizardK
82a2e7f85c80a52f892c1553e7a45ec0174e7bc6
[ "MIT" ]
178
2017-11-17T19:14:31.000Z
2021-12-15T07:43:29.000Z
scripts/wk/exe.py
2Shirt/WizardK
82a2e7f85c80a52f892c1553e7a45ec0174e7bc6
[ "MIT" ]
1
2017-11-17T19:32:36.000Z
2017-11-17T19:32:36.000Z
"""WizardKit: Execution functions""" #vim: sts=2 sw=2 ts=2 import json import logging import os import re import subprocess import time from threading import Thread from queue import Queue, Empty import psutil # STATIC VARIABLES LOG = logging.getLogger(__name__) # Classes class NonBlockingStreamReader(): """Class to allow non-blocking reads from a stream.""" # pylint: disable=too-few-public-methods # Credits: ## https://gist.github.com/EyalAr/7915597 ## https://stackoverflow.com/a/4896288 def __init__(self, stream): self.stream = stream self.queue = Queue() def populate_queue(stream, queue): """Collect lines from stream and put them in queue.""" while not stream.closed: try: line = stream.read(1) except ValueError: # Assuming the stream was closed line = None if line: queue.put(line) self.thread = start_thread( populate_queue, args=(self.stream, self.queue), ) def stop(self): """Stop reading from input stream.""" self.stream.close() def read(self, timeout=None): """Read from queue if possible, returns item from queue.""" try: return self.queue.get(block=timeout is not None, timeout=timeout) except Empty: return None def save_to_file(self, proc, out_path): """Continuously save output to file while proc is running.""" LOG.debug('Saving process %s output to %s', proc, out_path) while proc.poll() is None: out = b'' out_bytes = b'' while out is not None: out = self.read(0.1) if out: out_bytes += out with open(out_path, 'a', encoding='utf-8') as _f: _f.write(out_bytes.decode('utf-8', errors='ignore')) # Close stream to prevent 100% CPU usage self.stream.close() # Functions def build_cmd_kwargs(cmd, minimized=False, pipe=True, shell=False, **kwargs): """Build kwargs for use by subprocess functions, returns dict. Specifically subprocess.run() and subprocess.Popen(). NOTE: If no encoding specified then UTF-8 will be used. """ LOG.debug( 'cmd: %s, minimized: %s, pipe: %s, shell: %s, kwargs: %s', cmd, minimized, pipe, shell, kwargs, ) cmd_kwargs = { 'args': cmd, 'shell': shell, } # Strip sudo if appropriate if cmd[0] == 'sudo': if os.name == 'posix' and os.geteuid() == 0: # pylint: disable=no-member cmd.pop(0) # Add additional kwargs if applicable for key in 'check cwd encoding errors stderr stdin stdout'.split(): if key in kwargs: cmd_kwargs[key] = kwargs[key] # Default to UTF-8 encoding if not ('encoding' in cmd_kwargs or 'errors' in cmd_kwargs): cmd_kwargs['encoding'] = 'utf-8' cmd_kwargs['errors'] = 'ignore' # Start minimized if minimized: startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = 6 cmd_kwargs['startupinfo'] = startupinfo # Pipe output if pipe: cmd_kwargs['stderr'] = subprocess.PIPE cmd_kwargs['stdout'] = subprocess.PIPE # Done LOG.debug('cmd_kwargs: %s', cmd_kwargs) return cmd_kwargs def get_json_from_command(cmd, check=True, encoding='utf-8', errors='ignore'): """Capture JSON content from cmd output, returns dict. If the data can't be decoded then either an exception is raised or an empty dict is returned depending on errors. """ LOG.debug('Loading JSON data from cmd: %s', cmd) json_data = {} try: proc = run_program(cmd, check=check, encoding=encoding, errors=errors) json_data = json.loads(proc.stdout) except (subprocess.CalledProcessError, json.decoder.JSONDecodeError): if errors != 'ignore': raise return json_data def get_procs(name, exact=True, try_again=True): """Get process object(s) based on name, returns list of proc objects.""" LOG.debug('name: %s, exact: %s', name, exact) processes = [] regex = f'^{name}$' if exact else name # Iterate over all processes for proc in psutil.process_iter(): if re.search(regex, proc.name(), re.IGNORECASE): processes.append(proc) # Try again? if not processes and try_again: time.sleep(1) processes = get_procs(name, exact, try_again=False) # Done return processes def kill_procs(name, exact=True, force=False, timeout=30): """Kill all processes matching name (case-insensitively). NOTE: Under Posix systems this will send SIGINT to allow processes to gracefully exit. If force is True then it will wait until timeout specified and then send SIGKILL to any processes still alive. """ LOG.debug( 'name: %s, exact: %s, force: %s, timeout: %s', name, exact, force, timeout, ) target_procs = get_procs(name, exact=exact) for proc in target_procs: proc.terminate() # Force kill if necesary if force: results = psutil.wait_procs(target_procs, timeout=timeout) for proc in results[1]: # Alive processes proc.kill() def popen_program(cmd, minimized=False, pipe=False, shell=False, **kwargs): """Run program and return a subprocess.Popen object.""" LOG.debug( 'cmd: %s, minimized: %s, pipe: %s, shell: %s', cmd, minimized, pipe, shell, ) LOG.debug('kwargs: %s', kwargs) cmd_kwargs = build_cmd_kwargs( cmd, minimized=minimized, pipe=pipe, shell=shell, **kwargs) try: # pylint: disable=consider-using-with proc = subprocess.Popen(**cmd_kwargs) except FileNotFoundError: LOG.error('Command not found: %s', cmd) raise LOG.debug('proc: %s', proc) # Done return proc def run_program(cmd, check=True, pipe=True, shell=False, **kwargs): # pylint: disable=subprocess-run-check """Run program and return a subprocess.CompletedProcess object.""" LOG.debug( 'cmd: %s, check: %s, pipe: %s, shell: %s', cmd, check, pipe, shell, ) LOG.debug('kwargs: %s', kwargs) cmd_kwargs = build_cmd_kwargs( cmd, check=check, pipe=pipe, shell=shell, **kwargs) try: proc = subprocess.run(**cmd_kwargs) except FileNotFoundError: LOG.error('Command not found: %s', cmd) raise LOG.debug('proc: %s', proc) # Done return proc def start_thread(function, args=None, daemon=True): """Run function as thread in background, returns Thread object.""" LOG.debug( 'Starting background thread for function: %s, args: %s, daemon: %s', function, args, daemon, ) args = args if args else [] thread = Thread(target=function, args=args, daemon=daemon) thread.start() return thread def stop_process(proc, graceful=True): """Stop process. NOTES: proc should be a subprocess.Popen obj. If graceful is True then a SIGTERM is sent before SIGKILL. """ # Graceful exit if graceful: if os.name == 'posix' and os.geteuid() != 0: # pylint: disable=no-member run_program(['sudo', 'kill', str(proc.pid)], check=False) else: proc.terminate() time.sleep(2) # Force exit if os.name == 'posix' and os.geteuid() != 0: # pylint: disable=no-member run_program(['sudo', 'kill', '-9', str(proc.pid)], check=False) else: proc.kill() def wait_for_procs(name, exact=True, timeout=None): """Wait for all process matching name.""" LOG.debug('name: %s, exact: %s, timeout: %s', name, exact, timeout) target_procs = get_procs(name, exact=exact) procs = psutil.wait_procs(target_procs, timeout=timeout) # Raise exception if necessary if procs[1]: # Alive processes raise psutil.TimeoutExpired(name=name, seconds=timeout) if __name__ == '__main__': print("This file is not meant to be called directly.")
26.797203
78
0.662578
1,059
7,664
4.716714
0.245515
0.034234
0.016817
0.013614
0.226026
0.191391
0.153554
0.126326
0.11031
0.11031
0
0.006956
0.212161
7,664
285
79
26.891228
0.820305
0.255219
0
0.261628
0
0.005814
0.125022
0
0
0
0
0
0
1
0.081395
false
0
0.052326
0
0.186047
0.005814
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e572bb9c99a87131d6b55a9f4ae160708893aa34
808
py
Python
ch03/question_5.py
dhrey112/IntroToPython_Deitel
7ecbff931d05c467ad64da0bd829f79fedf729ba
[ "MIT" ]
null
null
null
ch03/question_5.py
dhrey112/IntroToPython_Deitel
7ecbff931d05c467ad64da0bd829f79fedf729ba
[ "MIT" ]
null
null
null
ch03/question_5.py
dhrey112/IntroToPython_Deitel
7ecbff931d05c467ad64da0bd829f79fedf729ba
[ "MIT" ]
null
null
null
# TODO: 5 (if…else Statements) Reimplement the script of Fig2.1. using three # if…else statements rather than six if statements. [Hint: For example, # think of == and != as “opposite” tests.] print('Enter two integers and I will tell you', 'the relationships they satisfy.') # read first integer number1 = int(input('Enter first integer: ')) # read second integer number2 = int(input('Enter second integer: ')) if number1 == number2: print(number1, 'is equal to', number2) else: print(number1, 'is not equal to', number2) if number1 < number2: print(number1, 'is less than', number2) else: print(number1, 'is greater than', number2) if number1 <= number2: print(number1, 'is less than or equal to', number2) else: print(number1, 'is greater than or equal to', number2)
27.862069
76
0.690594
118
808
4.779661
0.432203
0.12766
0.148936
0.111702
0.441489
0.409574
0.356383
0.159574
0.159574
0
0
0.035222
0.191832
808
28
77
28.857143
0.819296
0.279703
0
0.1875
0
0
0.375
0
0
0
0
0.035714
0
1
0
false
0
0
0
0
0.4375
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
1
0
2
e5738670ee63aa457dda6f798c0a759c27cefdc5
7,815
py
Python
src/libnbnotify/plugins/bus.py
webnull/nbnotify
54f7d0db0656053680466537aeba35f348147830
[ "Python-2.0", "OLDAP-2.7" ]
1
2015-12-03T06:41:23.000Z
2015-12-03T06:41:23.000Z
src/libnbnotify/plugins/bus.py
webnull/nbnotify
54f7d0db0656053680466537aeba35f348147830
[ "Python-2.0", "OLDAP-2.7" ]
2
2019-03-02T08:02:34.000Z
2019-03-02T08:02:47.000Z
src/libnbnotify/plugins/bus.py
webnull/nbnotify
54f7d0db0656053680466537aeba35f348147830
[ "Python-2.0", "OLDAP-2.7" ]
null
null
null
#-*- coding: utf-8 -*- import libnbnotify import socket import ssl import json import asyncore import re import sys from threading import Thread import string import random import os import BaseHTTPServer, SimpleHTTPServer PluginInfo = {'Requirements' : { 'OS' : 'All'}, 'API': 2, 'Authors': 'webnull', 'domain': '', 'type': 'extension', 'isPlugin': False, 'Description': 'Remote control throught sockets'} app = "" def id_generator(size=6, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for x in range(size)) class SocketInterface(SimpleHTTPServer.SimpleHTTPRequestHandler): """ Very simple socket interface """ def log_message(self, format, *args): return False def ping(self, data=''): return "pong"; def getConfigAndEntries(self, data=''): """ Returns all configuration variables and links """ return [self.app.configGetSection('links'), self.app.Config.Config] def getAllEntries(self, data=''): """ Returns all links from database """ return self.app.configGetSection('links') def notifyNewData(self, data): """ Create new notification from data """ content = data['data'] title = data['title'] icon = data['icon'] pageID = data['pageid'] self.app.notifyNewData(content, title, icon, pageID) def configSetKey(self, data): """ Set configuration key """ Section = data['section'] Option = data['option'] Value = data['value'] return self.app.configSetKey(Section, Option, Value) def saveConfiguration(self, data=''): """ Force save configuration to file """ return self.app.saveConfiguration() def configGetSection(self, data): """ Returns section as dictionary Args: Section - name of section of ini file ([section] header) Returns: Dictionary - on success False - on false """ return self.app.configGetSection(data) def configGetKey(self, data): """ Returns value of Section->Value configuration variable Args: Section - name of section of ini file ([section] header) Key - variable name Returns: False - when section or key does not exists False - when value of variable is "false" or "False" or just False string value - value of variable """ Section = data['section'] Key = data['key'] return self.app.configGetKey(Section, Key) def addPage(self, link): """ Add page to database, return True if added sucessfuly """ return self.app.addPage(link) def setType(self, data): """ Set specified extension to handle specified link Return md5 hash of link on success """ Link = data['link'] Type = data['type'] return self.app.setType(Link, Type) def removePage(self, pageID): """ Remove page with specified pageID """ return self.app.removePage(pageID) def loadCommentsFromDB(self, data=''): """ Reload comments cache from SQLite database """ return self.app.loadCommentsFromDB() def configCheckChanges(self, data=''): """ Reload configuration if changed """ return self.app.configCheckChanges() def togglePlugin(self, data): """ Activate or deactivate plugin Plugin - name of plugin Toggle - True or False """ Plugin = data['name'] Toggle = data['toggle'] if Toggle == True: return self.app.togglePlugin(Plugin, 'activate') return self.app.togglePlugin(Plugin, 'deactivate') def do_POST(self): contentLen = int(self.headers.getheader('content-length')) postBody = self.rfile.read(contentLen) # response self.send_response(200) self.send_header("Content-type", "text/html") self.end_headers() self.wfile.write(self.handle_read(postBody)) def do_GET(self): self.send_response(200) self.send_header("Content-type", "text/html") self.end_headers() self.wfile.write("Hello world.") def handle_read(self, data): global app self.app = app if data: if data == "ping": return "pong" try: #if t == False: # return "Error: Cannot parse HTTP request, "+str(t)+", "+str(jsonData) if data == False: return "Error: Cannot parse HTTP request, empty request, "+str(jsonData) text = json.loads(data) if text['function'] == "handle_read" or text['function'] == "__init__" or text['function'] == "httpRequestParser": return "Error: Function not avaliable" if hasattr(self, text['function']): exec("r = str(self."+text['function']+"(text['data']))") else: r = "Error: Function not found" self.app.Logging.output("Socket::GET="+str(text['function'])+"&addr="+str(self.client_address[0]), "debug", False) # send response return json.dumps({'response': r}) except Exception as e: self.app.Logging.output("SubgetSocketInterface: Cannot parse json data, is the client bugged? "+str(e), "warning", True) return "Error: "+str(e) class SocketServer: """ Very simple connections listener """ host = "127.0.0.1" port = 9954 def __init__(self, host, port): self.host = host self.port = port def serve(self): httpd = BaseHTTPServer.HTTPServer((self.host, self.port), SocketInterface) httpd.serve_forever() class PluginMain(libnbnotify.Plugin): name = "bus" host = "127.0.0.1" port = 9954 bus = "" def _pluginInit(self): #self.initSSL() global app app = self.app self.host = str(self.app.Config.getKey("bus_socket", "host", "127.0.0.1")) if self.app.Config.getKey("bus_socket", "port") == False: self.app.Config.setKey("bus_socket", "port", 9954) else: try: self.port = int(self.app.Config.getKey("bus_socket", "port")) except ValueError: self.port = 9954 self.app.Config.setKey("bus_socket", "port", 9954) if self.app.cli == False: self.startServer() return True else: return False #def initSSL(self): # path = os.path.expanduser("~/.nbnotify/ssl") # create ssl directory # if not os.path.isdir(path): # os.mkdir(path) # if not os.path.isfile(path+"/private.pem"): # passwd = id_generator(size=32) # self.app.Logging.output("Cannot find SSL cert, creating new one...", "debug", True) # os.system("openssl genrsa -out "+path+"/private.pem 1024") # os.system("openssl rsa -in "+path+"/private.pem -pubout > "+path+"/public.pem") def startServer(self): try: self.app.Logging.output("Socket server is running on "+str(self.host)+":"+str(self.port), "debug", False) self.bus = SocketServer(self.host, self.port) self.thread = Thread(target=self.bus.serve) self.thread.setDaemon(True) self.thread.start() except Exception as e: self.app.Logging.output("Only one instance of nbnotify is allowed, "+str(e), "debug", False) sys.exit(0)
29.490566
183
0.572105
863
7,815
5.144844
0.290846
0.044144
0.038063
0.022523
0.179279
0.136036
0.12973
0.09009
0.056757
0.056757
0
0.010277
0.302751
7,815
264
184
29.602273
0.804551
0.209853
0
0.191176
0
0
0.134382
0.003761
0
0
0
0
0
1
0.169118
false
0
0.088235
0.022059
0.492647
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e574327e0656040a8425a4febb6293a932d17cd0
3,738
py
Python
main.py
Sujay-Paul/Clara-Music-Bot-Telegram
deb4623185e2b6d09f55e65c4e738c49e22ee1dc
[ "MIT" ]
1
2022-01-11T16:43:57.000Z
2022-01-11T16:43:57.000Z
main.py
Sujay-Paul/Clara-Music-Bot-Telegram
deb4623185e2b6d09f55e65c4e738c49e22ee1dc
[ "MIT" ]
1
2021-10-01T17:01:48.000Z
2021-10-01T17:01:48.000Z
main.py
Sujay-Paul/Clara-Music-Bot-Telegram
deb4623185e2b6d09f55e65c4e738c49e22ee1dc
[ "MIT" ]
1
2021-10-01T16:59:49.000Z
2021-10-01T16:59:49.000Z
from pyrogram import Client, filters from pyrogram.types import ( InlineKeyboardButton, InlineKeyboardMarkup ) import youtube_dl from youtube_search import YoutubeSearch import requests import json import os with open('./config.json', 'r') as config: data = json.load(config) bot_token = data['token'] api_id = data['api_id'] api_hash = data['api_hash'] bot = Client( 'Clara', bot_token = bot_token, api_id = api_id, api_hash = api_hash ) # Convert hh:mm:ss to seconds def time_to_seconds(time): stringt = str(time) return sum(int(x) * 60 ** i for i, x in enumerate(reversed(stringt.split(':')))) @bot.on_message(filters.command(['start'])) def start(client, message): help_text = f'👋 Hello @{message.from_user.username}\n I\'m Clara, developed by Shambo, I can download songs from YouTube. Type /a song name\n e.g - `/a tokyo drift`' message.reply_text( text=help_text, quote=False, reply_markup=InlineKeyboardMarkup( [ [ InlineKeyboardButton('Github', url='https://github.com/typhonshambo'), ] ] ) ) @bot.on_message(filters.command(['a'])) def a(client, message): query = '' for i in message.command[1:]: query += ' ' + str(i) print(query) m = message.reply('🔎 Searching the song...') ydl_opts = {"format": "bestaudio[ext=m4a]"} try: results = [] count = 0 while len(results) == 0 and count < 6: if count>0: os.times.sleep(1) results = YoutubeSearch(query, max_results=1).to_dict() count += 1 # results = YoutubeSearch(query, max_results=1).to_dict() try: link = f"https://youtube.com{results[0]['url_suffix']}" # print(results) title = results[0]["title"] thumbnail = results[0]["thumbnails"][0] duration = results[0]["duration"] ## UNCOMMENT THIS IF YOU WANT A LIMIT ON DURATION. CHANGE 1800 TO YOUR OWN PREFFERED DURATION AND EDIT THE MESSAGE (30 minutes cap) LIMIT IN SECONDS if time_to_seconds(duration) >= 1800: # duration limit m.edit("Exceeded video duration limit : 30 mins") return views = results[0]["views"] thumb_name = f'thumb{message.message_id}.jpg' thumb = requests.get(thumbnail, allow_redirects=True) open(thumb_name, 'wb').write(thumb.content) except Exception as e: print(e) m.edit('Found nothing. Try changing the spelling a little.') return except Exception as e: m.edit( "✖️ Found Nothing. Sorry.\n\nTry another keywork or maybe spell it properly." ) print(str(e)) return m.edit("⏬ Downloading.") try: with youtube_dl.YoutubeDL(ydl_opts) as ydl: info_dict = ydl.extract_info(link, download=False) audio_file = ydl.prepare_filename(info_dict) ydl.process_info(info_dict) rep = f'🎧 **Title**: [{title[:35]}]({link})\n⏳ **Duration**: `{duration}`\n👁‍🗨 **Views**: `{views}`' secmul, dur, dur_arr = 1, 0, duration.split(':') for i in range(len(dur_arr)-1, -1, -1): dur += (int(dur_arr[i]) * secmul) secmul *= 60 message.reply_audio(audio_file, caption=rep, parse_mode='md',quote=False, title=title, duration=dur, thumb=thumb_name) m.delete() except Exception as e: m.edit('❌ Error') print(e) try: os.remove(audio_file) os.remove(thumb_name) except Exception as e: print(e) bot.run()
31.948718
169
0.581594
480
3,738
4.44375
0.391667
0.022504
0.03188
0.033755
0.108767
0.084388
0.040319
0.040319
0.040319
0
0
0.014667
0.288657
3,738
116
170
32.224138
0.783377
0.069288
0
0.142857
0
0
0.15985
0.024482
0
0
0
0
0
1
0.030612
false
0
0.071429
0
0.142857
0.05102
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e575ae4d7ce1c8acba084bc5319b860ff622e79d
12,371
py
Python
main.py
dzinghan/Bouncing-Ball-Simulation
b476af0df40cdd76a9d1256d95de1393748e9edc
[ "MIT" ]
null
null
null
main.py
dzinghan/Bouncing-Ball-Simulation
b476af0df40cdd76a9d1256d95de1393748e9edc
[ "MIT" ]
null
null
null
main.py
dzinghan/Bouncing-Ball-Simulation
b476af0df40cdd76a9d1256d95de1393748e9edc
[ "MIT" ]
null
null
null
''' Bouncing Ball Simulation This is an implementation of a bouncing ball simulation using mainly the Tkinter library in Python. It includes physics and mechanics-related concepts such as gravity, air resistance, and collision. Before the start of the simulation, the program prompts the user to enter a value for gravity and air density. If you do not want to enter a value, please click on cancel or the window's exit button and the default value is going to be applied (9.8 m/s^2 for gravity and 1.225 km/m^3 for air resistance). If a vacuum setting is preferred, please enter 0 for both windows. by Jing Han Sun Updated September 21, 2020 ''' import tkinter as tk from tkinter import simpledialog import random import math import sys class Visual(tk.Tk): '''This is the main class the will run the simulation''' #define width and height for window HEIGHT = 500 WIDTH = 500 #define a list of colors for the balls colors = ['#FF4325', '#E72020', #red '#FF9333', #orange '#FEFA5F', #yellow '#89F45E', '#9DFFA7', '#278A2A', #green '#6A8EFF', '#A8E5F9', '#1FFBF8', '#3253F4', '#2A438B', #blue '#67419E', '#C280FF', '#E12FE1', '#F1BFFC', #purple '#FCBFE9', '#FC22A0' #pink ] def __init__(self, argv): super().__init__() #create canvas self.canvas = tk.Canvas(self, width = self.WIDTH, height = self.HEIGHT, bg = 'white') self.canvas.pack() self.update() #window title self.title('Bouncing Balls') #add label self.label = tk.Label(self, text = 'Welcome!') self.label.pack() #add quit button self.button = tk.Button(self, text = "Quit", fg = 'red', command = self.quit()) self.button.configure(width = 10, activebackground = "#33B5E5", relief = tk.FLAT) #self.button_window = self.canvas.create_window(10, 10, anchor = tk.NW , window = self.button) self.button.pack() self.update() #create dictionary to store info about circles (radius, dir_x, dir_y) self.circles_id = {} # ask the user to enter a value for gravity gravity = simpledialog.askfloat("Input", "Please enter a value for gravity (e.g.: 9.8)") if gravity is None: # use Earth's gravitational constant if no value is entered gravity = 9.8 air_density = simpledialog.askfloat("Input", "Please enter a value for air density (e.g.: 1.225)") if air_density is None: # use the air density at STP if no value is entered air_density = 1.225 for i in range(6): #set up a random radius radius = random.randint(20, 30) #set up a random initial center for each circle cx = random.randint(radius + 10, self.WIDTH - radius - 10) cy = random.randint(radius + 10, self.HEIGHT - radius - 10) #set up a random initial direction for each circle, from 1 to 360 degrees dir_x = random.randint(-10, 10) dir_y = random.randint(-10, 10) #create the circle ids = self.canvas.create_oval(cx - radius, cy - radius, cx + radius, cy + radius, fill = random.choice(self.colors), outline = 'black') #fill each list for each ball's characteristics #circles_id = {ids: [radius, dir_x, dir_y]} self.circles_id[ids] = [radius, dir_x, dir_y] #boolean that returns true if 2 balls overlap self.overlaps = False #actual animation while True: self.move_circles() #if it hits a wall self.bounce() self.collision() self.gravity(gravity) self.air_resistance(air_density) def center(self, circle): '''Get the center coordinates of a given ball''' x0, y0, x1, y1 = self.canvas.coords(circle) x = (x0 + x1) / 2 y = (y0 + y1) / 2 return x, y def distance(self, circle1, circle2): '''Get the distance between the center of 2 given balls''' x1, y1 = self.center(circle1) x2, y2 = self.center(circle2) return math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2) def theta(self, x, y): '''Get the angle in radians (between 0 and 2pi) of a ball's movement using its x and y directions''' #first and fourth quadrant if x > 0: if y > 0: return math.atan(y / x) else: return math.atan(y / x) + 2 * math.pi #second and third quadrant elif x < 0: return math.atan(y / x) + math.pi # x = 0 is undefined for arctan else: if y > 0: return math.pi/2 else: return 3 * math.pi/2 def overlap(self): '''Return True if 2 balls overlap in the canvas''' for circle1 in self.circles_id: for circle2 in self.circles_id: if circle1 != circle2 and \ self.distance(circle1, circle2) <= \ (self.circles_id.get(circle1)[0] + self.circles_id.get(circle2)[0]): self.overlaps = True return self.overlaps def move_circles(self): '''Movement of the balls in the frame using the generated direction for each ball''' for i in self.circles_id: dir_x = self.circles_id.get(i)[1] dir_y = self.circles_id.get(i)[2] self.canvas.move(i, dir_x, dir_y) self.canvas.update() def bounce(self): '''When a ball hits one of the 4 borders of the window, it bounces off according to their initial hit angle''' # x and y directions for a given ball for i in self.circles_id: dir_x = self.circles_id.get(i)[1] dir_y = self.circles_id.get(i)[2] #retrieve the initial coordinates of the ball x0, y0, x1, y1 = self.canvas.coords(i) #if it hits the left or right wall, reverse the x direction if x0 <= 10 or x1 >= self.WIDTH - 10: dir_x = -dir_x # update the x direction in the direction list to continue moving self.circles_id.get(i)[1] = dir_x #while x0 <= 0 or x1 >= self.SIZE: self.canvas.move(i, dir_x, dir_y) self.canvas.update() #if it hits the top or bottom wall, reverse the y direction if y0 <= 10 or y1 >= self.HEIGHT - 10: dir_y = -dir_y #update the y direction in the direction list to continue moving self.circles_id.get(i)[2] = dir_y #while y0 <= 0 or y1 >= self.SIZE: self.canvas.move(i, dir_x, dir_y) self.canvas.update() def collision(self): '''Check for collisions between 2 balls in the canvas. When 2 balls collide, they will bounce away as an elastic collision while conserving their momentum within the system involved''' for circle1 in self.circles_id: for circle2 in self.circles_id: #check if the distance between 2 distinct balls is smaller than the sum of their radius #if yes, it means collision #give a bit of space for collision to avoid bug when overlapping if -12 < self.distance(circle1, circle2) - \ (self.circles_id.get(circle1)[0] + self.circles_id.get(circle2)[0]) <= 0\ and circle1 != circle2: #define initial x and y directions x1 = self.circles_id.get(circle1)[1] y1 = self.circles_id.get(circle1)[2] x2 = self.circles_id.get(circle2)[1] y2 = self.circles_id.get(circle2)[2] #assume each ball weighs its radius squared with density pi^-1 m1 = (self.circles_id.get(circle1)[0]) ** 2 m2 = (self.circles_id.get(circle2)[0]) ** 2 #define initial speeds using the x and y directions v1 = math.sqrt(x1 ** 2 + y1 ** 2) v2 = math.sqrt(x2 ** 2 + y2 ** 2) #define initial movement angles theta1 = self.theta(x1, y1) theta2 = self.theta(x2, y2) #define the contact angle of the balls right before collision phi = theta2 - theta1 # pi = pf (conservation of momentum) #calculate the final x and y velocities after the collision #source for the formula: https://en.wikipedia.org/wiki/Elastic_collision x1 = ((v1 * math.cos(theta1 - phi) * (m1 - m2)) + 2 * m2 * v2 * math.cos(theta2 - phi)) \ * (math.cos(phi) / (m1 + m2)) + v1 * math.sin(theta1 - phi) * math.cos(phi + math.pi/2) y1 = ((v1 * math.cos(theta1 - phi) * (m1 - m2)) + 2 * m2 * v2 * math.cos(theta2 - phi)) \ * (math.sin(phi) / (m1 + m2)) + v1 * math.sin(theta1 - phi) * math.sin(phi + math.pi/2) x2 = ((v2 * math.cos(theta2 - phi) * (m2 - m1)) + 2 * m1 * v1 * math.cos(theta1 - phi)) \ * (math.cos(phi) / (m1 + m2)) + v2 * math.sin(theta2 - phi) * math.cos(phi + math.pi/2) y2 = ((v2 * math.cos(theta2 - phi) * (m2 - m1)) + 2 * m1 * v1 * math.cos(theta1 - phi)) \ * (math.sin(phi) / (m1 + m2)) + v2 * math.sin(theta2 - phi) * math.sin(phi + math.pi/2) #update the circles dictionary to make them continue moving after the collision self.circles_id.get(circle1)[1] = x1 self.circles_id.get(circle1)[2] = y1 self.circles_id.get(circle2)[1] = x2 self.circles_id.get(circle2)[2] = y2 self.canvas.move(circle1, x1, y1) self.canvas.move(circle2, x2, y2) self.canvas.update() #avoid pushing the ball out of the canvas when the collision happens near the canvas border self.bounce() def gravity(self, a): '''Adds some gravity to the balls which attracts them to the ground''' for i in self.circles_id: vy = self.circles_id.get(i)[2] #kinematic equation: (vf = vi + a * t) to apply the acceleration to the velocity vy = vy + a / 5 #update the y velocity after applying gravity self.circles_id.get(i)[2] = vy # avoid pushing the ball out of the canvas when the collision happens near the canvas border self.bounce() def air_resistance(self, air_density): '''Adds some air resistance to the balls which attracts them to the ground''' for i in self.circles_id: vx = self.circles_id.get(i)[1] vy = self.circles_id.get(i)[2] m = (self.circles_id.get(i)[0]) ** 2 / 1000 cd = 1.05 #drag coefficient of a cube area = (self.circles_id.get(i)[0] / 1000) ** 2 * math.pi #calculate the air resistance #source for the formula: https://www.softschools.com/formulas/physics/air_resistance_formula/85/ fx = (air_density * cd * area * vx ** 2) / 2 fy = (air_density * cd * area * vy ** 2) / 2 #calculate the acceleration ax = fx / m ay = fy / m # kinematic equation: (vf = vi + a * t) to apply the acceleration to the velocity vx = vx + ax / 5 vy = vy + ay / 5 # update the y velocity after applying gravity self.circles_id.get(i)[1] = vx self.circles_id.get(i)[2] = vy # avoid pushing the ball out of the canvas when the collision happens near the canvas border self.bounce() def drag(self): self.canvas.bind('<B1-Motion>', self.move_circles()) if __name__ == '__main__': Visual(sys.argv[1:]).mainloop()
38.780564
120
0.547167
1,660
12,371
4.019277
0.219277
0.052608
0.074041
0.067146
0.384742
0.346673
0.293615
0.279227
0.235911
0.218525
0
0.045098
0.351144
12,371
318
121
38.902516
0.786097
0.325681
0
0.209877
0
0
0.035906
0
0
0
0
0
0
1
0.067901
false
0
0.030864
0
0.17284
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e575dbc2852de3f51f4fc99d4e2297f4d5034e48
1,935
py
Python
examples/validation/core/06_vuetify_components.py
Kitware/trame
41c4d62e7a6f5dba41fd9305b314c87fa8ed7b6f
[ "Apache-2.0" ]
42
2021-09-24T22:10:32.000Z
2022-03-30T19:39:25.000Z
examples/validation/core/06_vuetify_components.py
Kitware/trame
41c4d62e7a6f5dba41fd9305b314c87fa8ed7b6f
[ "Apache-2.0" ]
31
2021-10-01T21:19:56.000Z
2022-03-04T00:14:28.000Z
examples/validation/core/06_vuetify_components.py
Kitware/trame
41c4d62e7a6f5dba41fd9305b314c87fa8ed7b6f
[ "Apache-2.0" ]
7
2021-11-17T16:12:06.000Z
2022-03-26T21:08:40.000Z
from trame.app import get_server from trame.widgets import vtk, trame, vuetify from trame.ui.vuetify import SinglePageLayout # ----------------------------------------------------------------------------- # Trame setup # ----------------------------------------------------------------------------- server = get_server() state, ctrl = server.state, server.controller def reset_resolution(): state.resolution = 6 # ----------------------------------------------------------------------------- # UI setup # ----------------------------------------------------------------------------- layout = SinglePageLayout(server) with layout: # Validate client life cycle trame.LifeCycleMonitor(events=("['created']",)) layout.icon.click = ctrl.reset_camera layout.title.set_text("Cone") layout.toolbar.dense = True # Toolbar with layout.toolbar as toolbar: vuetify.VSpacer() vuetify.VSlider( hide_details=True, v_model=("resolution", 6), max=60, min=3, step=1, style="max-width: 300px;", ) vuetify.VSwitch( hide_details=True, v_model=("$vuetify.theme.dark",), ) with vuetify.VBtn(icon=True, click=reset_resolution): vuetify.VIcon("mdi-undo") with layout.content: with vuetify.VContainer(fluid=True, classes="pa-0 fill-height"): with vtk.VtkView() as view: ctrl.reset_camera = view.reset_camera with vtk.VtkGeometryRepresentation(): vtk.VtkAlgorithm( vtkClass="vtkConeSource", state=("{ resolution }",) ) # ----------------------------------------------------------------------------- # start server # ----------------------------------------------------------------------------- if __name__ == "__main__": server.start()
29.769231
79
0.447028
155
1,935
5.451613
0.509677
0.031953
0.035503
0.03787
0.049704
0
0
0
0
0
0
0.006702
0.228941
1,935
64
80
30.234375
0.559651
0.277003
0
0.051282
0
0
0.086518
0
0
0
0
0
0
1
0.025641
false
0
0.076923
0
0.102564
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e5770a81b9b82cc0a7f14946858d355e97381b6c
752
py
Python
migrations/versions/4d998c6ec630_nobadges.py
Togohogo1/tag-dh
e6903a87b8e491d84d3dcee02912238e6a3cabbe
[ "MIT" ]
4
2020-05-05T01:36:54.000Z
2021-03-13T21:05:47.000Z
migrations/versions/4d998c6ec630_nobadges.py
Togohogo1/tag-dh
e6903a87b8e491d84d3dcee02912238e6a3cabbe
[ "MIT" ]
1
2020-05-23T05:48:18.000Z
2020-05-23T05:48:18.000Z
migrations/versions/4d998c6ec630_nobadges.py
Togohogo1/tag-dh
e6903a87b8e491d84d3dcee02912238e6a3cabbe
[ "MIT" ]
1
2020-05-23T05:41:24.000Z
2020-05-23T05:41:24.000Z
"""nobadges Revision ID: 4d998c6ec630 Revises: 7950a35f5dbd Create Date: 2020-05-04 11:55:22.475532 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '4d998c6ec630' down_revision = '7950a35f5dbd' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('account') as batch_op: batch_op.drop_column('badges') # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### with op.batch_alter_table('account') as batch_op: batch_op.add_column(sa.Column('badges', sa.TEXT(), nullable=True)) # ### end Alembic commands ###
24.258065
74
0.695479
97
752
5.257732
0.536082
0.054902
0.082353
0.090196
0.337255
0.337255
0.337255
0.337255
0.337255
0.337255
0
0.081037
0.179521
752
30
75
25.066667
0.745543
0.385638
0
0.166667
0
0
0.117647
0
0
0
0
0
0
1
0.166667
false
0
0.166667
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e577c049fff9107639adbdf73d533e3bceafb55b
323
py
Python
app/migrations/0003_remove_opinions_idea_opinion.py
sergane13/WorkingPlace
51f3550dbfa077cdfc7dd1a5421f787ad5bd84b1
[ "MIT" ]
1
2021-12-10T17:58:09.000Z
2021-12-10T17:58:09.000Z
app/migrations/0003_remove_opinions_idea_opinion.py
sergane13/IdeaTool-testSite
51f3550dbfa077cdfc7dd1a5421f787ad5bd84b1
[ "MIT" ]
null
null
null
app/migrations/0003_remove_opinions_idea_opinion.py
sergane13/IdeaTool-testSite
51f3550dbfa077cdfc7dd1a5421f787ad5bd84b1
[ "MIT" ]
null
null
null
# Generated by Django 2.1.15 on 2021-01-26 08:59 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('app', '0002_opinions'), ] operations = [ migrations.RemoveField( model_name='opinions', name='idea_opinion', ), ]
17.944444
48
0.585139
34
323
5.470588
0.823529
0
0
0
0
0
0
0
0
0
0
0.088496
0.30031
323
17
49
19
0.734513
0.142415
0
0
1
0
0.130909
0
0
0
0
0
0
1
0
false
0
0.090909
0
0.363636
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
e579720eabdbde95c5e282c52355449ab7cbf297
325
bzl
Python
library.bzl
tintor/mono
396edd39e45f536cac91b1fa6524f019244e4549
[ "Apache-2.0" ]
1
2020-09-27T05:07:20.000Z
2020-09-27T05:07:20.000Z
library.bzl
tintor/mono
396edd39e45f536cac91b1fa6524f019244e4549
[ "Apache-2.0" ]
null
null
null
library.bzl
tintor/mono
396edd39e45f536cac91b1fa6524f019244e4549
[ "Apache-2.0" ]
null
null
null
def library(name, hdrs=[], srcs=[], deps=[], test_deps=[]): native.cc_library( name = name, hdrs = [name + ".h"] + hdrs, srcs = srcs, deps = deps, ) native.cc_test( name = name + "_test", srcs = [name + "_test.cc"], deps = test_deps + [":" + name, "//:catch"], args = ["-d=yes"], )
21.666667
59
0.489231
39
325
3.923077
0.358974
0.143791
0.156863
0
0
0
0
0
0
0
0
0
0.276923
325
14
60
23.214286
0.651064
0
0
0
0
0
0.092308
0
0
0
0
0
0
1
0.076923
false
0
0
0
0.076923
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
e57aa1c123fef768bea75c45c3ceece7bb3f0440
859
py
Python
dxgi_values.py
ouroborus/dds_loader
2b28d5f58c79473e040a1f755305bf26cfe28d47
[ "MIT" ]
null
null
null
dxgi_values.py
ouroborus/dds_loader
2b28d5f58c79473e040a1f755305bf26cfe28d47
[ "MIT" ]
null
null
null
dxgi_values.py
ouroborus/dds_loader
2b28d5f58c79473e040a1f755305bf26cfe28d47
[ "MIT" ]
null
null
null
# BPP and block sizes for all DXGI formats # Since dxgi formats are enumerated from 0 onward there is no need for dictionary # if some formats are not suited for storing the value is going to be set to 0 # Sizes are in BYTES dxgi_pixel_or_block_size = [ 0, 16, 16, 16, 16, 12, 12, 12, 12, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, # DXGI_FORMAT_R1_UNORM ehm >.< ( TODO ) 4, 4, 4, 8, 8, 8, # BC1 16, 16, 16, # BC2 16, 16, 16, # BC3 8, 8, 8, # BC4 16, 16, 16, # BC5 2, 2, 4, 4, 4, 4, 4, 4, 4, 16, 16, 16, # BC6 16, 16, 16, # BC7 # TODO Complete the rest ] dxgi_compressed_formats = [ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 94, 95, 96, 97, 98, 99 ]
23.216216
81
0.527357
189
859
2.349206
0.407407
0.144144
0.195946
0.234234
0.144144
0.130631
0.130631
0.114865
0.114865
0.114865
0
0.293132
0.305006
859
37
82
23.216216
0.450586
0.355064
0
0.483871
0
0
0
0
0
0
0
0.027027
0
1
0
false
0
0
0
0
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
5