hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
994230f6b4ebf07a0d7cc91b97f4dc1767bdae63
714
py
Python
setup.py
teriyakichild/python-zcli
43538a8e02a18d3e415d98b2cb1114d074e44a4f
[ "Apache-2.0" ]
null
null
null
setup.py
teriyakichild/python-zcli
43538a8e02a18d3e415d98b2cb1114d074e44a4f
[ "Apache-2.0" ]
null
null
null
setup.py
teriyakichild/python-zcli
43538a8e02a18d3e415d98b2cb1114d074e44a4f
[ "Apache-2.0" ]
null
null
null
from setuptools import setup from sys import path path.insert(0, '.') NAME = "zcli" if __name__ == "__main__": setup( name = NAME, version = "0.1.0", author = "Tony Rogers", author_email = "tony.rogers@rackspace.com", url = "https://github.com/teriyakichild/python-zcli", license = 'internal use', packages = [NAME], package_dir = {NAME: NAME}, description = "Zabbix CLI.", install_requires = ['requests', 'argparse', 'pyzabbix', 'ConfigParser'], entry_points={ 'console_scripts': [ 'zcli = zcli:cli' ], } )
23.8
61
0.491597
64
714
5.28125
0.6875
0.047337
0
0
0
0
0
0
0
0
0
0.009009
0.378151
714
29
62
24.62069
0.752252
0
0
0
0
0
0.262272
0.035063
0
0
0
0
0
1
0
false
0
0.086957
0
0.086957
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
994265d708b917d735f3e601041c2ca08012e144
7,022
py
Python
src/python/gem5/components/memory/dram_interfaces/hbm.py
hyu-iot/gem5
aeccc8bd8e9a86f96fc7a6f40d978f8494337fc5
[ "BSD-3-Clause" ]
765
2015-01-14T16:17:04.000Z
2022-03-28T07:46:28.000Z
src/python/gem5/components/memory/dram_interfaces/hbm.py
hyu-iot/gem5
aeccc8bd8e9a86f96fc7a6f40d978f8494337fc5
[ "BSD-3-Clause" ]
148
2018-07-20T00:58:36.000Z
2021-11-16T01:52:33.000Z
src/python/gem5/components/memory/dram_interfaces/hbm.py
hyu-iot/gem5
aeccc8bd8e9a86f96fc7a6f40d978f8494337fc5
[ "BSD-3-Clause" ]
807
2015-01-06T09:55:38.000Z
2022-03-30T10:23:36.000Z
# Copyright (c) 2012-2021 Arm Limited # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Copyright (c) 2013 Amin Farmahini-Farahani # Copyright (c) 2015 University of Kaiserslautern # Copyright (c) 2015 The University of Bologna # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Interfaces for LPDDR5 memory devices These memory "interfaces" contain the timing,energy,etc parameters for each memory type and are usually based on datasheets for the memory devices. You can use these interfaces in the MemCtrl object as the `dram` timing interface. """ from m5.objects import DRAMInterface class HBM_1000_4H_1x128(DRAMInterface): """ A single HBM x128 interface (one command and address bus), with default timings based on data publically released ("HBM: Memory Solution for High Performance Processors", MemCon, 2014), IDD measurement values, and by extrapolating data from other classes. Architecture values based on published HBM spec A 4H stack is defined, 2Gb per die for a total of 1GiB of memory. **IMPORTANT** HBM gen1 supports up to 8 128-bit physical channels Configuration defines a single channel, with the capacity set to (full_ stack_capacity / 8) based on 2Gb dies To use all 8 channels, set 'channels' parameter to 8 in system configuration """ # 128-bit interface legacy mode device_bus_width = 128 # HBM supports BL4 and BL2 (legacy mode only) burst_length = 4 # size of channel in bytes, 4H stack of 2Gb dies is 1GiB per stack; # with 8 channels, 128MiB per channel device_size = "128MiB" device_rowbuffer_size = "2KiB" # 1x128 configuration devices_per_rank = 1 # HBM does not have a CS pin; set rank to 1 ranks_per_channel = 1 # HBM has 8 or 16 banks depending on capacity # 2Gb dies have 8 banks banks_per_rank = 8 # depending on frequency, bank groups may be required # will always have 4 bank groups when enabled # current specifications do not define the minimum frequency for # bank group architecture # setting bank_groups_per_rank to 0 to disable until range is defined bank_groups_per_rank = 0 # 500 MHz for 1Gbps DDR data rate tCK = "2ns" # use values from IDD measurement in JEDEC spec # use tRP value for tRCD and tCL similar to other classes tRP = "15ns" tRCD = "15ns" tCL = "15ns" tRAS = "33ns" # BL2 and BL4 supported, default to BL4 # DDR @ 500 MHz means 4 * 2ns / 2 = 4ns tBURST = "4ns" # value for 2Gb device from JEDEC spec tRFC = "160ns" # value for 2Gb device from JEDEC spec tREFI = "3.9us" # extrapolate the following from LPDDR configs, using ns values # to minimize burst length, prefetch differences tWR = "18ns" tRTP = "7.5ns" tWTR = "10ns" # start with 2 cycles turnaround, similar to other memory classes # could be more with variations across the stack tRTW = "4ns" # single rank device, set to 0 tCS = "0ns" # from MemCon example, tRRD is 4ns with 2ns tCK tRRD = "4ns" # from MemCon example, tFAW is 30ns with 2ns tCK tXAW = "30ns" activation_limit = 4 # 4tCK tXP = "8ns" # start with tRFC + tXP -> 160ns + 8ns = 168ns tXS = "168ns" class HBM_1000_4H_1x64(HBM_1000_4H_1x128): """ A single HBM x64 interface (one command and address bus), with default timings based on HBM gen1 and data publically released A 4H stack is defined, 8Gb per die for a total of 4GiB of memory. Note: This defines a pseudo-channel with a unique controller instantiated per pseudo-channel Stay at same IO rate (1Gbps) to maintain timing relationship with HBM gen1 class (HBM_1000_4H_x128) where possible **IMPORTANT** For HBM gen2 with pseudo-channel mode, configure 2X channels. Configuration defines a single pseudo channel, with the capacity set to (full_ stack_capacity / 16) based on 8Gb dies To use all 16 pseudo channels, set 'channels' parameter to 16 in system configuration """ # 64-bit pseudo-channel interface device_bus_width = 64 # HBM pseudo-channel only supports BL4 burst_length = 4 # size of channel in bytes, 4H stack of 8Gb dies is 4GiB per stack; # with 16 channels, 256MiB per channel device_size = "256MiB" # page size is halved with pseudo-channel; maintaining the same same number # of rows per pseudo-channel with 2X banks across 2 channels device_rowbuffer_size = "1KiB" # HBM has 8 or 16 banks depending on capacity # Starting with 4Gb dies, 16 banks are defined banks_per_rank = 16 # reset tRFC for larger, 8Gb device # use HBM1 4Gb value as a starting point tRFC = "260ns" # start with tRFC + tXP -> 160ns + 8ns = 168ns tXS = "268ns" # Default different rank bus delay to 2 CK, @1000 MHz = 2 ns tCS = "2ns" tREFI = "3.9us" # active powerdown and precharge powerdown exit time tXP = "10ns" # self refresh exit time tXS = "65ns"
35.64467
79
0.721874
1,044
7,022
4.814176
0.369732
0.020692
0.007163
0.013132
0.175885
0.12893
0.122165
0.110227
0.097493
0.066057
0
0.04553
0.227428
7,022
196
80
35.826531
0.880922
0.799487
0
0.1
0
0
0.093489
0
0
0
0
0
0
1
0
false
0
0.025
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
9943a954b6c98669a7f2d794d8606fb4a934d9b6
1,826
py
Python
Code/branches/Pre-Prospectus/python/SourceFiles/Geometry.py
jlconlin/PhDThesis
8e704613721a800ce1c59576e94f40fa6f7cd986
[ "MIT" ]
null
null
null
Code/branches/Pre-Prospectus/python/SourceFiles/Geometry.py
jlconlin/PhDThesis
8e704613721a800ce1c59576e94f40fa6f7cd986
[ "MIT" ]
null
null
null
Code/branches/Pre-Prospectus/python/SourceFiles/Geometry.py
jlconlin/PhDThesis
8e704613721a800ce1c59576e94f40fa6f7cd986
[ "MIT" ]
null
null
null
__id__ = "$Id: Geometry.py 51 2007-04-25 20:43:07Z jlconlin $" __author__ = "$Author: jlconlin $" __version__ = " $Revision: 51 $" __date__ = "$Date: 2007-04-25 14:43:07 -0600 (Wed, 25 Apr 2007) $" import scipy import Errors class Geometry(object): """ Geometry is a class to hold information about the geometry of the problem. """ def __init__(self, bins, range): """ bins: A tuple each number is how many spatial bins in each dimension (up to 3) range: A list of [min, max] pairs; the limits of the spatial geometry in each dimension. """ try: self.dimension = len(bins) except TypeError: self.dimension = 1 if self.dimension != 1: raise Errors.GeometryError( "Geometry currently only suppors 1-D geometry") elif self.dimension != len(range): raise Errors.GeometryError( "Bins and Range must have same degree") else: self.bins = bins self.range = range self.edges = scipy.zeros(self.bins+1) self.centers = scipy.zeros(self.bins) # Bin centers width = self.max - self.min for i in xrange(self.bins+1): edge = self.min + i*(width/float(self.bins)) self.edges[i] = edge for i in xrange(len(self.centers)): self.centers[i] = self.edges[i] + (self.edges[i+1] - self.edges[i])/2.0 def __repr__(self): """ """ return "bins: %s, range: %s" %(self.bins, self.range) def _getMinX(self): return min(self.range[0]) def _getMaxX(self): return max(self.range[0]) min = property(fget=_getMinX) max = property(fget=_getMaxX)
29.934426
87
0.552026
231
1,826
4.242424
0.398268
0.057143
0.040816
0.036735
0
0
0
0
0
0
0
0.043514
0.332968
1,826
60
88
30.433333
0.761084
0.148959
0
0.054054
0
0.027027
0.160269
0
0
0
0
0
0
1
0.108108
false
0
0.054054
0.054054
0.324324
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99440dc4872605e49ce2bfbd37e480db9c6f90a0
12,244
py
Python
django_backblaze_b2/storage.py
ehossack/django-backblaze-b2
556777a74a23780bffde68296c3173fb5a7d5ccd
[ "BSD-2-Clause" ]
12
2020-09-14T15:43:34.000Z
2021-12-11T17:45:22.000Z
django_backblaze_b2/storage.py
ehossack/django-backblaze-b2
556777a74a23780bffde68296c3173fb5a7d5ccd
[ "BSD-2-Clause" ]
10
2020-11-28T19:55:20.000Z
2022-03-28T02:18:15.000Z
django_backblaze_b2/storage.py
ehossack/django-backblaze-b2
556777a74a23780bffde68296c3173fb5a7d5ccd
[ "BSD-2-Clause" ]
2
2021-01-29T21:58:26.000Z
2021-06-22T19:34:11.000Z
from datetime import datetime from hashlib import sha3_224 as hash from logging import getLogger from typing import IO, Any, Callable, Dict, List, Optional, Tuple, cast from b2sdk.account_info import InMemoryAccountInfo from b2sdk.account_info.abstract import AbstractAccountInfo from b2sdk.account_info.sqlite_account_info import SqliteAccountInfo from b2sdk.api import B2Api, Bucket from b2sdk.cache import AuthInfoCache from b2sdk.exception import FileOrBucketNotFound, NonExistentBucket from django.core.cache.backends.base import BaseCache from django.core.exceptions import ImproperlyConfigured from django.core.files.base import File from django.core.files.storage import Storage from django.utils.deconstruct import deconstructible from typing_extensions import TypedDict from django_backblaze_b2.b2_file import B2File from django_backblaze_b2.cache_account_info import DjangoCacheAccountInfo from django_backblaze_b2.options import ( BackblazeB2StorageOptions, DjangoCacheAccountInfoConfig, SqliteAccountInfoConfig, getDefaultB2StorageOptions, ) logger = getLogger("django-backblaze-b2") class _BaseFileInfoDict(TypedDict): fileId: str fileName: str fileInfo: dict class _FileInfoDict(_BaseFileInfoDict, total=False): size: int uploadTimestamp: int contentType: str class B2FileInformationNotAvailableException(Exception): ... @deconstructible class BackblazeB2Storage(Storage): """Storage class which fulfills the Django Storage contract through b2 apis""" def __init__(self, **kwargs): opts = self._getDjangoSettingsOptions(kwargs.get("opts", {})) if "opts" in kwargs: self._validateOptions(kwargs.get("opts")) _merge(opts, kwargs.get("opts", {})) logOpts = opts.copy() logOpts.update({"application_key_id": "<redacted>", "application_key": "<redacted>"}) logger.debug(f"Initializing {self.__class__.__name__} with options {logOpts}") self._bucketName = opts["bucket"] self._defaultFileMetadata = opts["defaultFileInfo"] self._forbidFilePropertyCaching = opts["forbidFilePropertyCaching"] self._authInfo = dict( [(k, v) for k, v in opts.items() if k in ["realm", "application_key_id", "application_key"]] ) self._allowFileOverwrites = opts["allowFileOverwrites"] self._getAccountInfo = self._createAccountInfoCallable(opts) logger.info(f"{self.__class__.__name__} instantiated to use bucket {self._bucketName}") if opts["authorizeOnInit"]: logger.debug(f"{self.__class__.__name__} authorizing") self.b2Api if opts["validateOnInit"]: self._getOrCreateBucket(opts["nonExistentBucketDetails"]) def _getDjangoSettingsOptions(self, kwargOpts: Dict) -> BackblazeB2StorageOptions: """Setting terminology taken from: https://b2-sdk-python.readthedocs.io/en/master/glossary.html#term-application-key-ID kwargOpts available for subclasses """ from django.conf import settings if not hasattr(settings, "BACKBLAZE_CONFIG"): raise ImproperlyConfigured("add BACKBLAZE_CONFIG dict to django settings") if "application_key_id" not in settings.BACKBLAZE_CONFIG or "application_key" not in settings.BACKBLAZE_CONFIG: raise ImproperlyConfigured( "At minimum BACKBLAZE_CONFIG must contain auth 'application_key' and 'application_key_id'" f"\nfound: {settings.BACKBLAZE_CONFIG}" ) self._validateOptions(settings.BACKBLAZE_CONFIG) opts = getDefaultB2StorageOptions() opts.update(settings.BACKBLAZE_CONFIG) # type: ignore return opts def _validateOptions(self, options: Dict) -> None: unrecognizedOptions = [k for k in options.keys() if k not in getDefaultB2StorageOptions().keys()] if unrecognizedOptions: raise ImproperlyConfigured(f"Unrecognized options: {unrecognizedOptions}") def _createAccountInfoCallable(self, opts: BackblazeB2StorageOptions) -> Callable[[], AbstractAccountInfo]: if ( not isinstance(opts["accountInfo"], dict) or "type" not in opts["accountInfo"] or opts["accountInfo"]["type"] not in ["memory", "sqlite", "django-cache"] ): raise ImproperlyConfigured( (f"accountInfo property must be a dict with type found in options.py, was {opts['accountInfo']}") ) if opts["accountInfo"]["type"] == "django-cache": logger.debug(f"{self.__class__.__name__} will use {DjangoCacheAccountInfo.__name__}") return lambda: DjangoCacheAccountInfo( cacheName=cast(DjangoCacheAccountInfoConfig, opts["accountInfo"]).get("cache", "django-backblaze-b2") ) elif opts["accountInfo"]["type"] == "memory": logger.debug(f"{self.__class__.__name__} will use {InMemoryAccountInfo.__name__}") return lambda: InMemoryAccountInfo() elif opts["accountInfo"]["type"] == "sqlite": logger.debug(f"{self.__class__.__name__} will use {SqliteAccountInfo.__name__}") return lambda: SqliteAccountInfo( file_name=cast(SqliteAccountInfoConfig, opts["accountInfo"])["databasePath"] ) raise ImproperlyConfigured() @property def b2Api(self) -> B2Api: if not hasattr(self, "_b2Api"): self._accountInfo = self._getAccountInfo() self._b2Api = B2Api(account_info=self._accountInfo, cache=AuthInfoCache(self._accountInfo)) self._b2Api.authorize_account(**self._authInfo) return self._b2Api @property def bucket(self) -> Bucket: if not hasattr(self, "_bucket"): self._getOrCreateBucket() return self._bucket def _getOrCreateBucket(self, newBucketDetails=None) -> None: try: self._bucket = self.b2Api.get_bucket_by_name(self._bucketName) except NonExistentBucket as e: if newBucketDetails is not None: logger.debug(f"Bucket {self._bucketName} not found. Creating with details: {newBucketDetails}") if "bucket_type" not in newBucketDetails: newBucketDetails["bucket_type"] = "allPrivate" self._bucket = self.b2Api.create_bucket(name=self._bucketName, **newBucketDetails) else: raise e logger.debug(f"Connected to bucket {self._bucket.as_dict()}") def _refreshBucket(self) -> None: self.b2Api.session.cache.clear() self._getOrCreateBucket() def _open(self, name: str, mode: str) -> File: return B2File( name=name, bucket=self.bucket, fileMetadata=self._defaultFileMetadata, mode=mode, sizeProvider=self.size, ) def _save(self, name: str, content: IO[Any]) -> str: """ Save and retrieve the filename. If the file exists it will make another version of that file. """ return B2File( name=name, bucket=self.bucket, fileMetadata=self._defaultFileMetadata, mode="w", sizeProvider=self.size, ).saveAndRetrieveFile(content) def path(self, name: str) -> str: return name def delete(self, name: str) -> None: fileInfo = self._fileInfo(name) if fileInfo: logger.debug(f"Deleting file {name} id=({fileInfo['fileId']})") self.b2Api.delete_file_version(file_id=fileInfo["fileId"], file_name=name) if self._cache: self._cache.delete(self._fileCacheKey(name)) else: logger.debug("Not found") def _fileInfo(self, name: str) -> Optional[_FileInfoDict]: try: if self._cache: cacheKey = self._fileCacheKey(name) timeoutInSeconds = 60 def loadInfo(): logger.debug(f"file info cache miss for {name}") return self.bucket.get_file_info_by_name(name).as_dict() return self._cache.get_or_set(key=cacheKey, default=loadInfo, timeout=timeoutInSeconds) return self.bucket.get_file_info_by_name(name).as_dict() except FileOrBucketNotFound: return None def _fileCacheKey(self, name: str) -> str: return hash(f"{self.bucket.name}__{name}".encode()).hexdigest() @property def _cache(self) -> Optional[BaseCache]: if ( not self._forbidFilePropertyCaching and self.b2Api # force init and self._accountInfo and isinstance(self._accountInfo, DjangoCacheAccountInfo) ): return self._accountInfo.cache return None def exists(self, name: str) -> bool: return bool(self._fileInfo(name)) def size(self, name: str) -> int: fileInfo = self._fileInfo(name) return fileInfo.get("size", 0) if fileInfo else 0 def url(self, name: Optional[str]) -> str: if not name: raise Exception("Name must be defined") return self._getFileUrl(name) def _getFileUrl(self, name: str) -> str: return self.getBackblazeUrl(name) def getBackblazeUrl(self, filename: str) -> str: return self.b2Api.get_download_url_for_file_name(bucket_name=self._bucketName, file_name=filename) def get_available_name(self, name: str, max_length: Optional[int] = None) -> str: if self._allowFileOverwrites: return name return super().get_available_name(name, max_length) def listdir(self, path: str) -> Tuple[List[str], List[str]]: """ List the contents of the specified path. Return a 2-tuple of lists: the first item being directories, the second item being files. """ raise NotImplementedError("subclasses of Storage must provide a listdir() method") def get_accessed_time(self, name: str) -> datetime: """ Return the last accessed time (as a datetime) of the file specified by name. The datetime will be timezone-aware if USE_TZ=True. """ raise NotImplementedError("subclasses of Storage must provide a get_accessed_time() method") def get_created_time(self, name: str) -> datetime: """ Return the creation time (as a datetime) of the file specified by name. The datetime will be timezone-aware if USE_TZ=True. """ from datetime import timezone from django.conf import settings fileInfo = self._fileInfo(name) try: if fileInfo and float(fileInfo.get("uploadTimestamp", 0)) > 0: timestamp = float(fileInfo["uploadTimestamp"]) / 1000.0 if settings.USE_TZ: # Safe to use .replace() because UTC doesn't have DST return datetime.utcfromtimestamp(timestamp).replace(tzinfo=timezone.utc) return datetime.fromtimestamp(timestamp) except ValueError as e: raise B2FileInformationNotAvailableException(f"'uploadTimestamp' from API not valid for {name}: {e}") raise B2FileInformationNotAvailableException(f"'uploadTimestamp' not available for {name}") def get_modified_time(self, name: str) -> datetime: """ Return the last modified time (as a datetime) of the file specified by name. The datetime will be timezone-aware if USE_TZ=True. """ return self.get_created_time(name) def _merge(target: Dict, source: Dict, path=None) -> Dict: """merges b into a https://stackoverflow.com/a/7205107/11076240 """ if path is None: path = [] for key in source: if key in target: printablePath = ".".join(path + [str(key)]) if isinstance(target[key], dict) and isinstance(source[key], dict): _merge(target[key], source[key], path + [str(key)]) elif target[key] != source[key]: logger.debug(f"Overriding setting {printablePath} with value {source[key]}") target[key] = source[key] else: target[key] = source[key] return target
41.505085
119
0.655586
1,326
12,244
5.88537
0.214932
0.014352
0.018324
0.00897
0.151717
0.104946
0.101743
0.097642
0.062019
0.062019
0
0.0079
0.245263
12,244
294
120
41.646259
0.836598
0.079141
0
0.155251
0
0
0.159288
0.039212
0
0
0
0
0
1
0.123288
false
0
0.100457
0.027397
0.392694
0.009132
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9946935225cfd5d8c3166e682fc9c3c573466b46
8,180
py
Python
docs/nnabla/p10_Python_API_Tutorials/s02_python_api.py
daizutabi/scratch
4c56fad47da0938eda89f3c2b6cb2f1919bee180
[ "MIT" ]
null
null
null
docs/nnabla/p10_Python_API_Tutorials/s02_python_api.py
daizutabi/scratch
4c56fad47da0938eda89f3c2b6cb2f1919bee180
[ "MIT" ]
null
null
null
docs/nnabla/p10_Python_API_Tutorials/s02_python_api.py
daizutabi/scratch
4c56fad47da0938eda89f3c2b6cb2f1919bee180
[ "MIT" ]
null
null
null
# # NNabla Python API Demonstration Tutorial # # (https://nnabla.readthedocs.io/en/latest/python/tutorial/python_api.html) import matplotlib.pyplot as plt import nnabla as nn import nnabla.functions as F import nnabla.parametric_functions as PF import nnabla.solvers as S import numpy as np from ivory.utils.path import cache_file # ## NdArray a = nn.NdArray((2, 3, 4)) print(a.data) # - print("[Substituting random values]") a.data = np.random.randn(*a.shape) print(a.data) print("[Slicing]") a.data[0, :, ::2] = 0 print(a.data) # - a.fill(1) # Filling all values with one. print(a.data) # - b = nn.NdArray.from_numpy_array(np.ones(a.shape)) print(b.data) # ## Variable x = nn.Variable([2, 3, 4], need_grad=True) print("x.data:", x.data) print("x.grad:", x.grad) # - x.shape # - print("x.data") print(x.d) x.d = 1.2345 # To avoid NaN assert np.all(x.d == x.data.data), "d: {} != {}".format(x.d, x.data.data) print("x.grad") print(x.g) x.g = 1.2345 # To avoid NaN assert np.all(x.g == x.grad.data), "g: {} != {}".format(x.g, x.grad.data) # !Zeroing grad values x.grad.zero() print("x.grad (after `.zero()`)") print(x.g) # - x2 = nn.Variable.from_numpy_array(np.ones((3,)), need_grad=True) print(x2) print(x2.d) x3 = nn.Variable.from_numpy_array(np.ones((3,)), np.zeros((3,)), need_grad=True) print(x3) print(x3.d) print(x3.g) # - print(x.parent) # ## Function sigmoid_output = F.sigmoid(x) sum_output = F.reduce_sum(sigmoid_output) print(sigmoid_output) print(sum_output) # - print("sigmoid_output.parent.name:", sigmoid_output.parent.name) print("x:", x) print("sigmoid_output.parent.inputs refers to x:", sigmoid_output.parent.inputs) # - print("sum_output.parent.name:", sum_output.parent.name) print("sigmoid_output:", sigmoid_output) print("sum_output.parent.inputs refers to sigmoid_output:", sum_output.parent.inputs) # - sum_output.forward() print("CG output:", sum_output.d) print("Reference:", np.sum(1.0 / (1.0 + np.exp(-x.d)))) # - x.grad.zero() sum_output.backward() print("d sum_o / d sigmoid_o:") print(sigmoid_output.g) print("d sum_o / d x:") print(x.g) x.d # - x = nn.Variable([5, 2]) # Input w = nn.Variable([2, 3], need_grad=True) # Weights b = nn.Variable([3], need_grad=True) # Biases affine_out = F.affine(x, w, b) # Create a graph including only affine affine_out # - # !Set random input and parameters x.d = np.random.randn(*x.shape) w.d = np.random.randn(*w.shape) b.d = np.random.randn(*b.shape) # !Initialize grad x.grad.zero() # Just for showing gradients are not computed when need_grad=False. w.grad.zero() b.grad.zero() # !Forward and backward affine_out.forward() affine_out.backward() # - print("F.affine") print(affine_out.d) print("Reference") print(np.dot(x.d, w.d) + b.d) print("dw") print(w.g) print("db") print(b.g) # - print(x.g) # ## Parametric Function with nn.parameter_scope("affine1"): c1 = PF.affine(x, 3) # - nn.get_parameters() # - c1 = PF.affine(x, 3, name="affine1") nn.get_parameters() # - c1.shape # - with nn.parameter_scope("foo"): h = PF.affine(x, 3) with nn.parameter_scope("bar"): h = PF.affine(h, 4) with nn.parameter_scope("foo"): params = nn.get_parameters() params # - with nn.parameter_scope("foo"): nn.clear_parameters() nn.get_parameters() # ## MLP Example For Explanation nn.clear_parameters() batchsize = 16 x = nn.Variable([batchsize, 2]) with nn.parameter_scope("fc1"): h = F.tanh(PF.affine(x, 512)) with nn.parameter_scope("fc2"): y = PF.affine(h, 1) print("Shapes:", h.shape, y.shape) # - nn.get_parameters() # - x.d = np.random.randn(*x.shape) # Set random input y.forward() y.d # - # !Variable for label label = nn.Variable([batchsize, 1]) # !Set loss loss = F.reduce_mean(F.squared_error(y, label)) # !Execute forward pass. label.d = np.random.randn(*label.shape) # Randomly generate labels loss.forward() print(loss.d) # - # !Collect all parameter variables and init grad. for name, param in nn.get_parameters().items(): param.grad.zero() # Gradients are accumulated to grad of params. loss.backward() # ## Imperative Mode for name, param in nn.get_parameters().items(): param.data -= param.grad * 0.001 # 0.001 as learning rate # - # !A simple example of imperative mode. xi = nn.NdArray.from_numpy_array(np.arange(4).reshape(2, 2)) yi = F.relu(xi - 1) xi.data # - yi.data # - id(xi) # - xi = xi + 1 id(xi) # - xi -= 1 id(xi) # - # !The following doesn't perform substitution but assigns a new NdArray object to `xi`. # !xi = xi + 1 # !The following copies the result of `xi + 1` to `xi`. xi.copy_from(xi + 1) assert np.all(xi.data == (np.arange(4).reshape(2, 2) + 1)) # Inplace operations like `+=`, `*=` can also be used (more efficient). xi += 1 assert np.all(xi.data == (np.arange(4).reshape(2, 2) + 2)) # ## Solver solver = S.Sgd(lr=0.00001) solver.set_parameters(nn.get_parameters()) # - # !Set random data x.d = np.random.randn(*x.shape) label.d = np.random.randn(*label.shape) # !Forward loss.forward() # - solver.zero_grad() loss.backward() solver.update() # ## Toy Problem To Demonstrate Training def vector2length(x): # x : [B, 2] where B is number of samples. return np.sqrt(np.sum(x ** 2, axis=1, keepdims=True)) # Example vector2length(np.array([[3, 4], [5, 12]])) # - # !Data for plotting contour on a grid data. xs = np.linspace(-1, 1, 100) ys = np.linspace(-1, 1, 100) grid = np.meshgrid(xs, ys) X = grid[0].flatten() Y = grid[1].flatten() def plot_true(): """Plotting contour of true mapping from a grid data created above.""" plt.contourf( xs, ys, vector2length(np.hstack([X[:, None], Y[:, None]])).reshape(100, 100) ) plt.axis("equal") plt.colorbar() plot_true() # - def length_mlp(x): h = x for i, hnum in enumerate([4, 8, 4, 2]): h = F.tanh(PF.affine(h, hnum, name="fc{}".format(i))) y = PF.affine(h, 1, name="fc") return y # - nn.clear_parameters() batchsize = 100 x = nn.Variable([batchsize, 2]) y = length_mlp(x) label = nn.Variable([batchsize, 1]) loss = F.reduce_mean(F.squared_error(y, label)) # - def predict(inp): ret = [] for i in range(0, inp.shape[0], x.shape[0]): xx = inp[i : i + x.shape[0]] # Imperative execution xi = nn.NdArray.from_numpy_array(xx) yi = length_mlp(xi) ret.append(yi.data.copy()) return np.vstack(ret) def plot_prediction(): plt.contourf(xs, ys, predict(np.hstack([X[:, None], Y[:, None]])).reshape(100, 100)) plt.colorbar() plt.axis("equal") # - solver = S.Adam(alpha=0.01) solver.set_parameters(nn.get_parameters()) # - def random_data_provider(n): x = np.random.uniform(-1, 1, size=(n, 2)) y = vector2length(x) return x, y # - num_iter = 2000 for i in range(num_iter): # Sample data and set them to input variables of training. xx, ll = random_data_provider(batchsize) x.d = xx label.d = ll # Forward propagation given inputs. loss.forward(clear_no_need_grad=True) # Parameter gradients initialization and gradients computation by backprop. solver.zero_grad() loss.backward(clear_buffer=True) # Apply weight decay and update by Adam rule. solver.weight_decay(1e-6) solver.update() # Just print progress. if i % 100 == 0 or i == num_iter - 1: print("Loss@{:4d}: {}".format(i, loss.d)) # - loss.forward(clear_buffer=True) print("The prediction `y` is cleared because it's an intermediate variable.") print(y.d.flatten()[:4]) # to save space show only 4 values y.persistent = True loss.forward(clear_buffer=True) print("The prediction `y` is kept by the persistent flag.") print(y.d.flatten()[:4]) # to save space show only 4 value # - plt.subplot(121) plt.title("Ground truth") plot_true() plt.subplot(122) plt.title("Prediction") plot_prediction() # - path_param = cache_file('nnabla/tutorial/python_api/param-vector2length.h5') nn.save_parameters(path_param) # !Remove all once nn.clear_parameters() nn.get_parameters() # - # !Load again nn.load_parameters(path_param) print('\n'.join(map(str, nn.get_parameters().items()))) # - with nn.parameter_scope('foo'): nn.load_parameters(path_param) print('\n'.join(map(str, nn.get_parameters().items())))
23.848397
88
0.66687
1,340
8,180
3.987313
0.223134
0.013476
0.033689
0.029946
0.302639
0.206625
0.155905
0.141119
0.117911
0.080479
0
0.024595
0.155012
8,180
342
89
23.918129
0.748409
0.1978
0
0.283784
0
0
0.094492
0.023429
0
0
0
0
0.018018
1
0.027027
false
0
0.031532
0.004505
0.076577
0.238739
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9946d28c6f11eadb2f0d488d601209e95eec15cf
18,232
py
Python
tests/unit/test_read_ahead_cursor.py
simonz-bq/amazon-qldb-driver-python
06c8146be0819e49d4b2ef22706ccd57e9990e03
[ "Apache-2.0" ]
39
2019-10-30T09:01:54.000Z
2022-02-06T18:30:27.000Z
tests/unit/test_read_ahead_cursor.py
simonz-bq/amazon-qldb-driver-python
06c8146be0819e49d4b2ef22706ccd57e9990e03
[ "Apache-2.0" ]
30
2020-03-05T23:52:57.000Z
2022-03-24T16:04:06.000Z
tests/unit/test_read_ahead_cursor.py
byronlin13/amazon-qldb-driver-python
2f6fb7864cd49fdf0c34cec9764d399e9b9c72c4
[ "Apache-2.0" ]
28
2019-12-09T17:21:59.000Z
2022-01-25T11:57:51.000Z
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with # the License. A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions # and limitations under the License. from queue import Queue from unittest import TestCase from unittest.mock import patch from unittest.mock import MagicMock from amazon.ion.simpleion import dumps from botocore.exceptions import ClientError from pyqldb.cursor.read_ahead_cursor import ReadAheadCursor from pyqldb.errors import ResultClosedError from .helper_functions import assert_query_stats, generate_statement_result MOCK_ERROR_CODE = '500' MOCK_MESSAGE = 'foo' MOCK_CLIENT_ERROR_MESSAGE = {'Error': {'Code': MOCK_ERROR_CODE, 'Message': MOCK_MESSAGE}} MOCK_READ_AHEAD = 0 MOCK_TRANSACTION_ID = 'ID' MOCK_VALUES = [1, 2] MOCK_ION_BINARY_VALUES = [{'IonBinary': MOCK_VALUES[0]}, {'IonBinary': MOCK_VALUES[1]}] MOCK_TOKEN = 'mock_token' MOCK_PAGE_WITH_TOKEN = {'Values': MOCK_ION_BINARY_VALUES, 'NextPageToken': MOCK_TOKEN} MOCK_READ_IOS = 3 MOCK_WRITE_IOS = 2 MOCK_PROCESSING_TIME = 1 MOCK_STATEMENT_RESULT = generate_statement_result(MOCK_READ_IOS, MOCK_WRITE_IOS, MOCK_PROCESSING_TIME, MOCK_TOKEN, True, MOCK_ION_BINARY_VALUES) MOCK_STATEMENT_RESULT_WITHOUT_TOKEN = generate_statement_result(MOCK_READ_IOS, MOCK_WRITE_IOS, MOCK_PROCESSING_TIME, None, True, MOCK_ION_BINARY_VALUES) MOCK_FETCH_PAGE_RESULT_WITHOUT_TOKEN = generate_statement_result(MOCK_READ_IOS, MOCK_WRITE_IOS, MOCK_PROCESSING_TIME, None, False, MOCK_ION_BINARY_VALUES) class TestReadAheadCursor(TestCase): @patch('pyqldb.communication.session_client.SessionClient') @patch('pyqldb.cursor.read_ahead_cursor.Queue') @patch('pyqldb.cursor.read_ahead_cursor.Thread') def test_ReadAheadCursor_without_executor(self, mock_thread, mock_queue, mock_session): mock_session.return_value = None mock_thread.return_value = mock_thread mock_queue.return_value = mock_queue read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID, MOCK_READ_AHEAD, None) self.assertEqual(read_ahead_cursor._page, MOCK_PAGE_WITH_TOKEN) self.assertEqual(read_ahead_cursor._session, mock_session) self.assertEqual(read_ahead_cursor._read_ios, MOCK_READ_IOS) self.assertEqual(read_ahead_cursor._write_ios, MOCK_WRITE_IOS) self.assertEqual(read_ahead_cursor._processing_time_milliseconds, MOCK_PROCESSING_TIME) self.assertEqual(read_ahead_cursor._transaction_id, MOCK_TRANSACTION_ID) self.assertEqual(read_ahead_cursor._index, 0) self.assertEqual(read_ahead_cursor._queue, mock_queue) self.assertEqual(read_ahead_cursor._is_open, True) mock_queue.assert_called_once_with(MOCK_READ_AHEAD - 1) mock_thread.assert_called_once_with(target=read_ahead_cursor._populate_queue) mock_thread().setDaemon.assert_called_once_with(True) mock_thread().start.assert_called_once_with() @patch('concurrent.futures.thread.ThreadPoolExecutor') @patch('pyqldb.communication.session_client.SessionClient') @patch('pyqldb.cursor.read_ahead_cursor.Queue') def test_ReadAheadCursor_with_executor(self, mock_queue, mock_session, mock_executor): mock_session.return_value = None mock_queue.return_value = mock_queue read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID, MOCK_READ_AHEAD, mock_executor) self.assertEqual(read_ahead_cursor._page, MOCK_PAGE_WITH_TOKEN) self.assertEqual(read_ahead_cursor._session, mock_session) self.assertEqual(read_ahead_cursor._read_ios, MOCK_READ_IOS) self.assertEqual(read_ahead_cursor._write_ios, MOCK_WRITE_IOS) self.assertEqual(read_ahead_cursor._processing_time_milliseconds, MOCK_PROCESSING_TIME) self.assertEqual(read_ahead_cursor._transaction_id, MOCK_TRANSACTION_ID) self.assertEqual(read_ahead_cursor._index, 0) self.assertEqual(read_ahead_cursor._queue, mock_queue) self.assertEqual(read_ahead_cursor._is_open, True) mock_queue.assert_called_once_with(MOCK_READ_AHEAD - 1) mock_executor.submit.assert_called_once_with(read_ahead_cursor._populate_queue) @patch('concurrent.futures.thread.ThreadPoolExecutor') @patch('pyqldb.communication.session_client.SessionClient') def test_iter(self, mock_session, mock_executor): mock_session.return_value = None read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID, MOCK_READ_AHEAD, mock_executor) self.assertEqual(iter(read_ahead_cursor), read_ahead_cursor) @patch('concurrent.futures.thread.ThreadPoolExecutor') @patch('pyqldb.cursor.stream_cursor.StreamCursor._value_holder_to_ion_value') @patch('pyqldb.communication.session_client.SessionClient') def test_next(self, mock_session, mock_value_holder_to_ion_value, mock_executor): mock_session.return_value = None mock_value_holder_to_ion_value.side_effect = lambda val: val read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT_WITHOUT_TOKEN, mock_session, MOCK_TRANSACTION_ID, MOCK_READ_AHEAD, mock_executor) count = 0 for value in MOCK_ION_BINARY_VALUES: self.assertEqual(read_ahead_cursor._index, count) self.assertEqual(next(read_ahead_cursor), value) mock_value_holder_to_ion_value.assert_called_with(value) count += 1 self.assertRaises(StopIteration, next, read_ahead_cursor) @patch('concurrent.futures.thread.ThreadPoolExecutor') @patch('pyqldb.communication.session_client.SessionClient') def test_next_when_closed(self, mock_session, mock_executor): read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID, MOCK_READ_AHEAD, mock_executor) read_ahead_cursor.close() self.assertRaises(ResultClosedError, next, read_ahead_cursor) @patch('concurrent.futures.thread.ThreadPoolExecutor') @patch('pyqldb.cursor.stream_cursor.StreamCursor._value_holder_to_ion_value') @patch('pyqldb.communication.session_client.SessionClient') @patch('pyqldb.cursor.read_ahead_cursor.ReadAheadCursor._next_page') @patch('pyqldb.cursor.read_ahead_cursor.ReadAheadCursor._are_there_more_results') def test_next_verify_are_there_more_results_and_next_page_called(self, mock_are_there_more_results, mock_next_page, mock_session, mock_value_holder_to_ion_value, mock_executor): updated_result = '1' def next_page(): read_ahead_cursor._page = {'NextPageToken': None, 'Values': [updated_result]} read_ahead_cursor._index = 0 mock_are_there_more_results.return_value = True mock_value_holder_to_ion_value.side_effect = lambda val: val mock_session.return_value = None mock_next_page.return_value = None mock_next_page.side_effect = next_page read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID, MOCK_READ_AHEAD, mock_executor) read_ahead_cursor._index = len(MOCK_ION_BINARY_VALUES) self.assertEqual(next(read_ahead_cursor), updated_result) mock_are_there_more_results.assert_called_once_with() mock_next_page.assert_called_once_with() mock_value_holder_to_ion_value.assert_called_once_with(updated_result) @patch('concurrent.futures.thread.ThreadPoolExecutor') @patch('pyqldb.cursor.read_ahead_cursor.ReadAheadCursor._next_page') @patch('pyqldb.communication.session_client.SessionClient') def test_next_when_next_page_returns_empty_values_and_none_token(self, mock_session, mock_next_page, mock_executor): mock_session.return_value = None def next_page(): read_ahead_cursor._page = {'NextPageToken': None, 'Values': []} read_ahead_cursor._index = 0 read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID, MOCK_READ_AHEAD, mock_executor) read_ahead_cursor._index = len(MOCK_ION_BINARY_VALUES) mock_next_page.side_effect = next_page self.assertRaises(StopIteration, next, read_ahead_cursor) @patch('concurrent.futures.thread.ThreadPoolExecutor') @patch('pyqldb.communication.session_client.SessionClient') def test_next_with_next_page_returns_empty_values_and_not_none_token(self, mock_session, mock_executor): read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID, MOCK_READ_AHEAD, mock_executor) read_ahead_cursor._queue = Queue() read_ahead_cursor._queue.put({'Page': {'NextPageToken': 'token', 'Values': []}}) read_ahead_cursor._queue.put({'Page': {'NextPageToken': None, 'Values': []}}) read_ahead_cursor._index = len(MOCK_ION_BINARY_VALUES) self.assertRaises(StopIteration, next, read_ahead_cursor) @patch('concurrent.futures.thread.ThreadPoolExecutor') @patch('pyqldb.communication.session_client.SessionClient') def test_close(self, mock_session, mock_executor): mock_session.return_value = None read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID, MOCK_READ_AHEAD, mock_executor) read_ahead_cursor.close() self.assertFalse(read_ahead_cursor._is_open) @patch('concurrent.futures.thread.ThreadPoolExecutor') @patch('pyqldb.communication.session_client.SessionClient') def test_are_there_more_results(self, mock_session, mock_executor): mock_session.return_value = None read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID, MOCK_READ_AHEAD, mock_executor) read_ahead_cursor._page = {'NextPageToken': 'token', 'Values': []} self.assertTrue(read_ahead_cursor._are_there_more_results()) read_ahead_cursor._page = {'NextPageToken': None, 'Values': []} read_ahead_cursor._queue = Queue() self.assertFalse(read_ahead_cursor._are_there_more_results()) @patch('concurrent.futures.thread.ThreadPoolExecutor') @patch('pyqldb.communication.session_client.SessionClient') @patch('pyqldb.cursor.read_ahead_cursor.Queue') def test_populate_queue(self, mock_queue, mock_session, mock_executor): mock_session.return_value = None mock_queue.return_value = mock_queue mock_session._fetch_page.return_value = MOCK_FETCH_PAGE_RESULT_WITHOUT_TOKEN mock_queue.full.return_value = False read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID, MOCK_READ_AHEAD, mock_executor) read_ahead_cursor._queue = mock_queue read_ahead_cursor._populate_queue() next_page_token = MOCK_STATEMENT_RESULT.get('FirstPage').get('NextPageToken') mock_session._fetch_page.assert_called_once_with(MOCK_TRANSACTION_ID, next_page_token) mock_queue.put.assert_called_once_with(MOCK_FETCH_PAGE_RESULT_WITHOUT_TOKEN, timeout=0.05) @patch('concurrent.futures.thread.ThreadPoolExecutor') @patch('pyqldb.cursor.read_ahead_cursor.logger.debug') @patch('pyqldb.communication.session_client.SessionClient') def test_populate_queue_client_error(self, mock_session, mock_logger_debug, mock_executor): mock_logger_debug.return_value = None mock_session.return_value = None mock_session._fetch_page.side_effect = ClientError(MOCK_CLIENT_ERROR_MESSAGE, MOCK_MESSAGE) read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID, MOCK_READ_AHEAD, mock_executor) read_ahead_cursor._queue = Queue(1) read_ahead_cursor._queue.put('value to be removed') read_ahead_cursor._populate_queue() mock_logger_debug.assert_called_once() self.assertIsInstance(read_ahead_cursor._queue.get(), ClientError) self.assertEqual(read_ahead_cursor._queue.qsize(), 0) @patch('concurrent.futures.thread.ThreadPoolExecutor') @patch('pyqldb.cursor.read_ahead_cursor.logger.debug') @patch('pyqldb.communication.session_client.SessionClient') def test_populate_queue_result_closed_error(self, mock_session, mock_logger_debug, mock_executor): def close_parent_txn(txn_id, token): read_ahead_cursor._is_open = False return MOCK_STATEMENT_RESULT mock_logger_debug.return_value = None mock_session.return_value = None mock_session._fetch_page.side_effect = close_parent_txn read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID, MOCK_READ_AHEAD, mock_executor) read_ahead_cursor._queue = Queue(1) read_ahead_cursor._queue.put('value to be removed') read_ahead_cursor._populate_queue() self.assertEqual(mock_logger_debug.call_count, 2) self.assertIsInstance(read_ahead_cursor._queue.get(), ResultClosedError) self.assertEqual(read_ahead_cursor._queue.qsize(), 0) @patch('concurrent.futures.thread.ThreadPoolExecutor') @patch('pyqldb.communication.session_client.SessionClient') def test_read_ahead_queue_with_query_stats(self, mock_session, mock_executor): mock_statement_result_1 = generate_statement_result(1, 2, 3, MOCK_TOKEN, True) mock_statement_result_2 = generate_statement_result(1, 2, 3, MOCK_TOKEN, False) mock_statement_result_3 = generate_statement_result(1, 2, 3, None, False) def fetch_page(txn_id, token): statement_results = [mock_statement_result_2, mock_statement_result_3] statement_result = statement_results[fetch_page.page_num] fetch_page.page_num += 1 return statement_result fetch_page.page_num = 0 mock_session.return_value = None mock_session._fetch_page.side_effect = fetch_page mock_read_ahead = 3 read_ahead_cursor = ReadAheadCursor(mock_statement_result_1, mock_session, MOCK_TRANSACTION_ID, mock_read_ahead, mock_executor) read_ahead_cursor._value_holder_to_ion_value = MagicMock(name='_value_holder_to_ion_value') read_ahead_cursor._populate_queue() # Queue should be populated with the next two pages self.assertEqual(read_ahead_cursor._queue.qsize(), mock_read_ahead - 1) # Even if queue is populated with the next two pages, query stats should only total the first page here assert_query_stats(self, read_ahead_cursor, 1, 2, 3) read_ahead_cursor._next_page() # Query stats should only total the first page and second page here assert_query_stats(self, read_ahead_cursor, 2, 4, 6) read_ahead_cursor._next_page() # Query stats should total all three pages assert_query_stats(self, read_ahead_cursor, 3, 6, 9) @patch('concurrent.futures.thread.ThreadPoolExecutor') @patch('pyqldb.communication.session_client.SessionClient') def test_next_page(self, mock_session, mock_executor): mock_page1 = {'Values': [{'IonBinary': 1}, {'IonBinary': 2}], 'NextPageToken': 'token'} mock_page2 = {'Values': [{'IonBinary': 2}, {'IonBinary': 3}], 'NextPageToken': None} mock_statement_result1 = {'Page': mock_page1} mock_statement_result2 = {'Page': mock_page2} mock_session.return_value = None mock_session._fetch_page.return_value = mock_statement_result2 read_ahead_cursor = ReadAheadCursor(mock_statement_result1, mock_session, MOCK_TRANSACTION_ID, MOCK_READ_AHEAD, mock_executor) read_ahead_cursor._queue = Queue() read_ahead_cursor._queue.put(mock_statement_result2) read_ahead_cursor._next_page() self.assertEqual(read_ahead_cursor._page, mock_page2) self.assertEqual(read_ahead_cursor._index, 0) @patch('concurrent.futures.thread.ThreadPoolExecutor') @patch('pyqldb.communication.session_client.SessionClient') def test_next_page_client_error(self, mock_session, mock_executor): mock_session.return_value = None mock_session._fetch_page.return_value = {'Page': MOCK_STATEMENT_RESULT} read_ahead_cursor = ReadAheadCursor(MOCK_STATEMENT_RESULT, mock_session, MOCK_TRANSACTION_ID, MOCK_READ_AHEAD, mock_executor) read_ahead_cursor._queue = Queue() read_ahead_cursor._queue.put(ClientError(MOCK_CLIENT_ERROR_MESSAGE, MOCK_MESSAGE)) self.assertRaises(ClientError, read_ahead_cursor._next_page) def test_value_holder_to_ion_value(self): ion_value = 'IonValue' value_holder = {'IonBinary': dumps(ion_value)} result = ReadAheadCursor._value_holder_to_ion_value(value_holder) self.assertEqual(result, ion_value)
53.781711
120
0.721369
2,204
18,232
5.509528
0.092559
0.091905
0.125999
0.047435
0.767109
0.725686
0.659722
0.601993
0.574487
0.558758
0
0.005427
0.201569
18,232
338
121
53.940828
0.828742
0.044153
0
0.524164
0
0
0.137844
0.116478
0
0
0
0
0.208178
1
0.078067
false
0
0.033457
0
0.122677
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
99481855b36edd94fabfd379b091a1551b3a37b4
2,838
py
Python
test_q2.py
karolineos/Desafio-de-Programa-o-Capgemini-
cb0aeceeb2d6ba393fc208c3c557c2980469e179
[ "MIT" ]
null
null
null
test_q2.py
karolineos/Desafio-de-Programa-o-Capgemini-
cb0aeceeb2d6ba393fc208c3c557c2980469e179
[ "MIT" ]
null
null
null
test_q2.py
karolineos/Desafio-de-Programa-o-Capgemini-
cb0aeceeb2d6ba393fc208c3c557c2980469e179
[ "MIT" ]
null
null
null
from unittest import TestCase from q2 import * class TestQuestao2Busca(TestCase): """ Nomeclatura: test_quando_..._deve_retornar_... def test_quando_..._deve_retornar_...(self): Metodos herdados de TestCase: |nome: | Ação: | setUp | Antes de cada teste | tearDown | Depois de cada teste | setUpClass | Antes de todos os testes | tearDownClass | Depois de todos os testes """ # def teste_quando_array_crescente_deve_retornar_mediana(self): # self.assertEqual(hello_world(), 'hello world') # def teste_quando_array_decrescente_deve_retornar_mediana(self): # def setUp(self): print("\nTestando Questao2 Busca") def Teste(self, lista, deslocamento,saida_esperada): print(f"Busca d. - teste: lista: {lista}, deslocamento: {deslocamento}; saida esperada: {saida_esperada}; ", end="") saida = buscar_deslocamentos(lista, deslocamento) #saida, pares = buscar_deslocamentos(lista, deslocamento) print(f"saida:{saida}") #print(f"saida:{saida}, pares: {pares}") self.assertEqual(saida, saida_esperada) def teste_Busca_quando_parametros_sao_do_exemplo1_deve_retornar_3(self): n = [1, 5, 3, 4, 2] x = 2 saida_esperada = 3 self.Teste(n,x,saida_esperada) class TestQuestao2BuscaMelhorada(TestCase): """ Nomeclatura: test_quando_..._deve_retornar_... def test_quando_..._deve_retornar_...(self): Metodos herdados de TestCase: |nome: | Ação: | setUp | Antes de cada teste | tearDown | Depois de cada teste | setUpClass | Antes de todos os testes | tearDownClass | Depois de todos os testes """ # def teste_quando_array_crescente_deve_retornar_mediana(self): # self.assertEqual(hello_world(), 'hello world') # def teste_quando_array_decrescente_deve_retornar_mediana(self): # def setUp(self): print("\nTestando Questao2 Busca Melhorada") def Teste(self, lista, deslocamento,saida_esperada): print(f"Busca d. Melhor - teste: lista: {lista}, deslocamento: {deslocamento}; saida esperada: {saida_esperada}; ", end="") saida, pares = buscar_deslocamentos_melhorado(lista, deslocamento) print(f"saida:{saida}, pares: {pares}") self.assertEqual(saida, saida_esperada) def teste_Busca_quando_parametros_sao_do_exemplo1_deve_retornar_3(self): n = [1, 5, 3, 4, 2] x = 2 saida_esperada = 3 self.Teste(n,x,saida_esperada)
37.342105
132
0.597604
299
2,838
5.411371
0.220736
0.096415
0.034611
0.054388
0.873918
0.873918
0.843016
0.843016
0.843016
0.843016
0
0.011675
0.305849
2,838
75
133
37.84
0.809645
0.394644
0
0.571429
0
0.071429
0.210526
0
0
0
0
0.08
0.071429
1
0.214286
false
0
0.071429
0
0.357143
0.214286
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
0
0
0
0
0
0
8
9949907614d70988d09cf14ed19ffbba33bd91dd
1,994
py
Python
main.py
kriszhengs/kouzhao
f0de3e99b98b696ffbb8cec193d01c7695e45ae3
[ "MIT" ]
null
null
null
main.py
kriszhengs/kouzhao
f0de3e99b98b696ffbb8cec193d01c7695e45ae3
[ "MIT" ]
null
null
null
main.py
kriszhengs/kouzhao
f0de3e99b98b696ffbb8cec193d01c7695e45ae3
[ "MIT" ]
null
null
null
from datetime import datetime import logging import requests from hashlib import md5 from time import sleep from apscheduler.schedulers.background import BlockingScheduler,BackgroundScheduler import kzconfig import json logging.basicConfig( handlers=[logging.FileHandler('log.log', 'a', 'utf-8')], level=logging.INFO,format='%(asctime)s %(levelname)s - %(message)s' ) logger =logging.getLogger("kouzhao") def token()->str: now = datetime.now() now_str = now.strftime("%Y*%m-%d")+ "_Qwe" m = md5() m.update(now_str.encode("utf-8")) url_md = m.hexdigest()[7:15] return url_md headers = { "User-Agent":"Mozilla/5.0 (Linux; Android 8.0.0; Pixel 2 XL Build/OPD1.170816.004) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/82.0.4078.0 Mobile Safari/537.36" } def miaosha_kz(): form_data = kzconfig.form_data # form_data = {} sleep(1) url = "https://kzapi.****.gov.cn/kouzhao/sq/miaosha/"+token() logger.info("today url is %s"%url) for i in range(1,kzconfig.MAX_TRY_TIME+1): logger.info("开始第 %d 此尝试 "%(i)) res = requests.post(url=url,data=form_data,headers=headers,timeout=10) json_data = res.json() logger.info(json.dumps(json_data,ensure_ascii=False)) shop_res = json_data.get("responseFlag","0") == "1" if shop_res: logger.info("第 %d 抢购成功"%i) break elif json_data.get("responseMessage","") == "您好,当前时间段口罩已经约完,建议关注后续的预约活动" : logger.info("当前时间段口罩已经约完") break elif json_data.get("status",200) == 404: break logger.info("第 %d 抢购失败 %f 秒后再次尝试" % (i,kzconfig.SLEEP_TIME)) sleep(kzconfig.SLEEP_TIME) logger.info("抢购结束") def main(): scheduler = BlockingScheduler() scheduler.add_job(miaosha_kz, 'cron' , day="*/7", hour='19',minute="0",second='1' ,timezone =kzconfig.cst_tz) scheduler.start() if __name__ == '__main__': main()
29.323529
166
0.627382
269
1,994
4.527881
0.513011
0.057471
0.027094
0.019704
0.032841
0
0
0
0
0
0
0.037869
0.218656
1,994
68
167
29.323529
0.743902
0.007021
0
0.056604
0
0.018868
0.215766
0.024255
0
0
0
0
0
1
0.056604
false
0
0.150943
0
0.226415
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99499bf828e2f984b7709bf04b28aafa77fa10fd
5,649
py
Python
Utils/repeated_timer.py
schmouk/PythonOpenSourceProject
3ca3af15672942888560fa4edf7aef3d44e90b57
[ "MIT" ]
null
null
null
Utils/repeated_timer.py
schmouk/PythonOpenSourceProject
3ca3af15672942888560fa4edf7aef3d44e90b57
[ "MIT" ]
35
2020-08-07T09:11:23.000Z
2021-09-12T11:10:04.000Z
Utils/repeated_timer.py
schmouk/PythonOpenSourceProject
3ca3af15672942888560fa4edf7aef3d44e90b57
[ "MIT" ]
null
null
null
""" Copyright (c) 2020 Philippe Schmouker Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ #============================================================================= from threading import Event, Thread from typing import Optional from .decorators import abstract #============================================================================= class RepeatedTimer( Thread ): """The class of repeated timers. ===-------------------------------------------------=== CAUTION: When running this code over a non RTOS (for Real-Time Operating System), there is NO WAY to ensure that periods of time will be correctly respected. It MAY and it WILL be that counts of milliseconds will not be respected by the underlying operating system. Theref- ore, you SHOULD NOT USE THIS CODE FOR APPLICATIONS DEALING WITH PEOPLE SAFETY AND FOR ANY OTHER KIND OF APPLICATIONS FOR WHICH REAL TIME OPERATING IS MANDATORY IF THIS CODE IS NOT RUN OVER A TRUE RTOS. Notice: MS-Windows is NOT an RTOS. Most versions of Linux are not also, which includes MacOS versions too. ===-------------------------------------------------=== A repeated timer is a specific timer which repeats its processing function after a fixed period of time has elapsed. Repeated timers must be explicitly started with a call to their method '.start()'. They cannot be started twice, since they inherit from threading.Threads. Repeated timers can be definitively stopped by calling their method '.stop()'. Inheriting classes must implement method '.process()'. This method contains the whole stuff that is to be processed every time the watchdog is "awaken". Users are encouraged to add attributes to this class. These will then be accessible into method '.process()' when they might be needed for this processing. """ #------------------------------------------------------------------------- def __init__(self, period_s: float , name : Optional[str] = None, *args, **kwargs ) -> None: '''Constructor. Args: period_s: float The interval of time, expressed as a fract- ional value of seconds, to wait before the timer will repeat. name: str The name of this timer. May be None, in which case the underlying OS will give a default, unique one to it. Defaults to None. *args, **kwargs: Arguments to be passed to the processing function. ''' self.stop_event= Event() self.set_period( period_s ) self.args = args self.kwargs = kwargs super().__init__( name=name ) #------------------------------------------------------------------------- @abstract def process(self) -> None: '''The instructions to be run when timer is repeated. 'self.args' and 'self.kwargs' are available in this method. Raises: NotImplementedError: This method has not been implemented in inheriting class. ''' ... #------------------------------------------------------------------------- def run(self) -> None: '''This method is automatically called by method '.start()'. Notice: method '.start()' is inherited from class 'threading.Thread'. ''' self.stop_event.clear() ## just to be sure that associate internal flag is set to False while not self.stop_event.wait( self.period_s ): self.process() #------------------------------------------------------------------------- def set_period(self, period_s: int) -> None: '''Modifies/sets the period of time used for repeating this timer. Args: period_s: float The interval of time, expressed as a fract- ional value of seconds, to wait before the timer will repeat. ''' assert period_s > 0.0 self.period_s = period_s #------------------------------------------------------------------------- def stop(self) -> None: '''Definitively stops this repeated timer. ''' self.stop_event.set() #===== end of Utils.repeated_timer =====#
40.06383
96
0.552133
642
5,649
4.820872
0.4081
0.020355
0.014216
0.010339
0.063974
0.063974
0.063974
0.063974
0.063974
0.063974
0
0.001487
0.285537
5,649
140
97
40.35
0.765362
0.750221
0
0
0
0
0
0
0
0
0
0
0.041667
1
0.208333
false
0
0.125
0
0.375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
994c72ee587b0016dc85da74d0c7617e61b815aa
329
py
Python
examples.py
loynoir/defer.py
46f37a046028b1854586301a45870c2b3a628f65
[ "MIT" ]
null
null
null
examples.py
loynoir/defer.py
46f37a046028b1854586301a45870c2b3a628f65
[ "MIT" ]
null
null
null
examples.py
loynoir/defer.py
46f37a046028b1854586301a45870c2b3a628f65
[ "MIT" ]
null
null
null
from Defer import Defer with Defer() as defer: print("enter the room") defer(lambda: print("leave the room")) print("prepare printer") defer(lambda: print("close printer")) print("start printing") defer(lambda: print("end printing")) print(3) print(4) print(5) print("LONG LONG TASKS")
20.5625
42
0.635258
44
329
4.75
0.5
0.157895
0.229665
0
0
0
0
0
0
0
0
0.011765
0.224924
329
15
43
21.933333
0.807843
0
0
0
0
0
0.294833
0
0
0
0
0
0
1
0
true
0
0.083333
0
0.083333
0.833333
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
3
994d23acaf6906fc4bf97467e6053a890c952369
15,807
py
Python
corehq/apps/locations/views.py
SEL-Columbia/commcare-hq
992ee34a679c37f063f86200e6df5a197d5e3ff6
[ "BSD-3-Clause" ]
1
2015-02-10T23:26:39.000Z
2015-02-10T23:26:39.000Z
corehq/apps/locations/views.py
SEL-Columbia/commcare-hq
992ee34a679c37f063f86200e6df5a197d5e3ff6
[ "BSD-3-Clause" ]
null
null
null
corehq/apps/locations/views.py
SEL-Columbia/commcare-hq
992ee34a679c37f063f86200e6df5a197d5e3ff6
[ "BSD-3-Clause" ]
null
null
null
import copy from django.http import HttpResponse, HttpResponseRedirect, Http404 from django.utils.safestring import mark_safe from django.views.decorators.http import require_POST from corehq.apps.commtrack.views import BaseCommTrackManageView from corehq.apps.domain.decorators import domain_admin_required, login_and_domain_required from corehq.apps.hqwebapp.utils import get_bulk_upload_form from corehq.apps.locations.models import Location from corehq.apps.locations.forms import LocationForm from corehq.apps.locations.util import load_locs_json, location_hierarchy_config, dump_locations from corehq.apps.commtrack.models import LocationType, Product, SupplyPointCase from corehq.apps.commtrack.util import unicode_slug from corehq.apps.facilities.models import FacilityRegistry from django.core.urlresolvers import reverse from django.shortcuts import render from django.contrib import messages from couchdbkit import ResourceNotFound import urllib import json from django.utils.translation import ugettext as _, ugettext_noop from dimagi.utils.decorators.memoized import memoized from custom.openlmis.tasks import bootstrap_domain_task from soil.util import expose_download, get_download_context from corehq.apps.commtrack.tasks import import_locations_async from couchexport.models import Format from corehq.apps.consumption.shortcuts import get_default_monthly_consumption @domain_admin_required def default(request, domain): return HttpResponseRedirect(reverse(LocationsListView.urlname, args=[domain])) class BaseLocationView(BaseCommTrackManageView): @property def main_context(self): context = super(BaseLocationView, self).main_context context.update({ 'hierarchy': location_hierarchy_config(self.domain), 'api_root': reverse('api_dispatch_list', kwargs={'domain': self.domain, 'resource_name': 'location', 'api_name': 'v0.3'}), }) return context class LocationsListView(BaseLocationView): urlname = 'manage_locations' page_title = ugettext_noop("Locations") template_name = 'locations/manage/locations.html' @property def page_context(self): selected_id = self.request.GET.get('selected') return { 'selected_id': selected_id, 'locations': load_locs_json(self.domain, selected_id), } class LocationSettingsView(BaseCommTrackManageView): urlname = 'location_settings' page_title = ugettext_noop("Location Types") template_name = 'locations/settings.html' @property def page_context(self): return { 'settings': self.settings_context, } @property def settings_context(self): return { 'loc_types': [self._get_loctype_info(l) for l in self.domain_object.commtrack_settings.location_types], } def _get_loctype_info(self, loctype): return { 'name': loctype.name, 'code': loctype.code, 'allowed_parents': [p or None for p in loctype.allowed_parents], 'administrative': loctype.administrative, } def post(self, request, *args, **kwargs): payload = json.loads(request.POST.get('json')) def mk_loctype(loctype): loctype['allowed_parents'] = [p or '' for p in loctype['allowed_parents']] cleaned_code = unicode_slug(loctype['code']) if cleaned_code != loctype['code']: err = _( 'Location type code "{code}" is invalid. No spaces or special characters are allowed. ' 'It has been replaced with "{new_code}".' ) messages.warning(request, err.format(code=loctype['code'], new_code=cleaned_code)) loctype['code'] = cleaned_code return LocationType(**loctype) #TODO add server-side input validation here (currently validated on client) self.domain_object.commtrack_settings.location_types = [mk_loctype(l) for l in payload['loc_types']] self.domain_object.commtrack_settings.save() return self.get(request, *args, **kwargs) class NewLocationView(BaseLocationView): urlname = 'create_location' page_title = ugettext_noop("New Location") template_name = 'locations/manage/location.html' @property def parent_pages(self): return [{ 'title': LocationsListView.page_title, 'url': reverse(LocationsListView.urlname, args=[self.domain]), }] @property def parent_id(self): return self.request.GET.get('parent') @property @memoized def location(self): return Location(domain=self.domain, parent=self.parent_id) @property def consumption(self): return None @property @memoized def metadata(self): return copy.copy(dict(self.location.metadata)) @property @memoized def location_form(self): if self.request.method == 'POST': return LocationForm(self.location, self.request.POST) return LocationForm(self.location) @property def page_context(self): return { 'form': self.location_form, 'location': self.location, 'consumption': self.consumption, 'metadata': self.metadata } def post(self, request, *args, **kwargs): if self.location_form.is_valid(): self.location_form.save() messages.success(request, _('Location saved!')) return HttpResponseRedirect('%s?%s' % ( reverse(LocationsListView.urlname, args=[self.domain]), urllib.urlencode({'selected': self.location_form.location._id}) )) return self.get(request, *args, **kwargs) class EditLocationView(NewLocationView): urlname = 'edit_location' page_title = ugettext_noop("Edit Location") @property def location_id(self): return self.kwargs['loc_id'] @property @memoized def location(self): try: return Location.get(self.location_id) except ResourceNotFound: raise Http404() @property @memoized def supply_point(self): return SupplyPointCase.get_by_location(self.location) @property def consumption(self): consumptions = [] for product in Product.by_domain(self.domain): consumption = get_default_monthly_consumption( self.domain, product._id, self.location.location_type, self.supply_point._id if self.supply_point else None, ) if consumption: consumptions.append((product.name, consumption)) return consumptions @property def page_name(self): return mark_safe(_("Edit {name} <small>{type}</small>").format( name=self.location.name, type=self.location.location_type )) @property def page_url(self): return reverse(self.urlname, args=[self.domain, self.location_id]) class BaseSyncView(BaseLocationView): source = "" sync_urlname = None @property def page_context(self): return { 'settings': self.settings_context, 'source': self.source, 'sync_url': self.sync_urlname } @property def settings_context(self): key = "%s_config" % self.source if hasattr(self.domain_object.commtrack_settings, key): return { "source_config": getattr(self.domain_object.commtrack_settings, key)._doc, } else: return {} def post(self, request, *args, **kwargs): payload = json.loads(request.POST.get('json')) #TODO add server-side input validation here (currently validated on client) key = "%s_config" % self.source if "source_config" in payload: for item in payload['source_config']: if hasattr(self.domain_object.commtrack_settings, key): setattr( getattr(self.domain_object.commtrack_settings, key), item, payload['source_config'][item] ) self.domain_object.commtrack_settings.save() return self.get(request, *args, **kwargs) class FacilitySyncView(BaseSyncView): urlname = 'sync_facilities' sync_urlname = 'sync_openlmis' page_title = ugettext_noop("OpenLMIS") template_name = 'locations/facility_sync.html' source = 'openlmis' class EditLocationHierarchy(BaseLocationView): urlname = 'location_hierarchy' page_title = ugettext_noop("Location Hierarchy") template_name = 'locations/location_hierarchy.html' class LocationImportStatusView(BaseLocationView): urlname = 'location_import_status' page_title = ugettext_noop('Location Import Status') template_name = 'hqwebapp/soil_status_full.html' def get(self, request, *args, **kwargs): context = super(LocationImportStatusView, self).main_context context.update({ 'domain': self.domain, 'download_id': kwargs['download_id'], 'poll_url': reverse('location_importer_job_poll', args=[self.domain, kwargs['download_id']]), 'title': _("Location Import Status"), 'progress_text': _("Importing your data. This may take some time..."), 'error_text': _("Problem importing data! Please try again or report an issue."), }) return render(request, self.template_name, context) def page_url(self): return reverse(self.urlname, args=self.args, kwargs=self.kwargs) class LocationImportView(BaseLocationView): urlname = 'location_import' page_title = ugettext_noop('Upload Locations from Excel') template_name = 'locations/manage/import.html' @property def page_context(self): context = { 'bulk_upload': { "download_url": reverse( "location_export", args=(self.domain,)), "adjective": _("location"), "plural_noun": _("locations"), }, "manage_consumption": self.domain_object.commtrack_settings.individual_consumption_defaults, } context.update({ 'bulk_upload_form': get_bulk_upload_form(context), }) return context def post(self, request, *args, **kwargs): upload = request.FILES.get('bulk_upload_file') if not upload: messages.error(request, _('no file uploaded')) return self.get(request, *args, **kwargs) if not args: messages.error(request, _('no domain specified')) return self.get(request, *args, **kwargs) domain = args[0] # stash this in soil to make it easier to pass to celery file_ref = expose_download(upload.read(), expiry=1*60*60) task = import_locations_async.delay( domain, file_ref.download_id, ) file_ref.set_task(task) return HttpResponseRedirect( reverse( LocationImportStatusView.urlname, args=[domain, file_ref.download_id] ) ) @login_and_domain_required def location_importer_job_poll(request, domain, download_id, template="hqwebapp/partials/download_status.html"): context = get_download_context(download_id, check_state=True) context.update({ 'on_complete_short': _('Import complete.'), 'on_complete_long': _('Location importing has finished'), }) return render(request, template, context) @login_and_domain_required def location_export(request, domain): include_consumption = request.GET.get('include_consumption') == 'true' response = HttpResponse(mimetype=Format.from_format('xlsx').mimetype) response['Content-Disposition'] = 'attachment; filename="locations.xlsx"' dump_locations(response, domain, include_consumption) return response @domain_admin_required # TODO: will probably want less restrictive permission def location_edit(request, domain, loc_id=None): parent_id = request.GET.get('parent') if loc_id: try: location = Location.get(loc_id) except ResourceNotFound: raise Http404() else: location = Location(domain=domain, parent=parent_id) if request.method == "POST": form = LocationForm(location, request.POST) if form.is_valid(): form.save() messages.success(request, 'Location saved!') return HttpResponseRedirect('%s?%s' % ( reverse('manage_locations', kwargs={'domain': domain}), urllib.urlencode({'selected': form.location._id}) )) else: form = LocationForm(location) context = { 'domain': domain, 'api_root': reverse('api_dispatch_list', kwargs={'domain': domain, 'resource_name': 'location', 'api_name': 'v0.3'}), 'location': location, 'hierarchy': location_hierarchy_config(domain), 'form': form, } return render(request, 'locations/manage/location.html', context) @domain_admin_required @require_POST def sync_facilities(request, domain): commtrack_settings = request.project.commtrack_settings # create Facility Registry and Facility LocationTypes if they don't exist if not any(lt.name == 'Facility Registry' for lt in commtrack_settings.location_types): commtrack_settings.location_types.extend([ LocationType(name='Facility Registry', allowed_parents=['']), LocationType(name='Facility', allowed_parents=['Facility Registry']) ]) commtrack_settings.save() registry_locs = dict((l.external_id, l) for l in Location.filter_by_type(domain, 'Facility Registry')) # sync each registry and add/update Locations for each Facility for registry in FacilityRegistry.by_domain(domain): registry.sync_with_remote() try: registry_loc = registry_locs[registry.url] except KeyError: registry_loc = Location( domain=domain, location_type='Facility Registry', external_id=registry.url) registry_loc.name = registry.name registry_loc.save() registry_loc._seen = True facility_locs = dict((l.external_id, l) for l in Location.filter_by_type(domain, 'Facility', registry_loc)) for facility in registry.get_facilities(): uuid = facility.data['uuid'] try: facility_loc = facility_locs[uuid] except KeyError: facility_loc = Location( domain=domain, location_type='Facility', external_id=uuid, parent=registry_loc) facility_loc.name = facility.data.get('name', 'Unnamed Facility') facility_loc.save() facility_loc._seen = True for id, f in facility_locs.iteritems(): if not hasattr(f, '_seen'): f.delete() for id, r in registry_locs.iteritems(): if not hasattr(r, '_seen'): r.delete() return HttpResponse('OK') @domain_admin_required @require_POST def sync_openlmis(request, domain): # todo: error handling, if we care. bootstrap_domain_task.delay(domain) return HttpResponse('OK')
34.437908
115
0.636617
1,670
15,807
5.834132
0.176048
0.021554
0.015806
0.023094
0.263984
0.203223
0.156317
0.118033
0.101406
0.091758
0
0.001635
0.264693
15,807
458
116
34.5131
0.836617
0.026824
0
0.293478
0
0
0.126634
0.02374
0
0
0
0.002183
0
1
0.092391
false
0
0.11413
0.043478
0.415761
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
994dd444ad53259ecaaf7c341ddbb86dfd9de54c
363
py
Python
setup.py
wew84/cnn_bridge
7cd98e204922174ea9293d8c52c30d00733a7ed2
[ "BSD-3-Clause" ]
4
2019-05-16T01:30:24.000Z
2021-04-13T13:55:44.000Z
setup.py
wew84/cnn_bridge
7cd98e204922174ea9293d8c52c30d00733a7ed2
[ "BSD-3-Clause" ]
2
2020-01-23T23:39:45.000Z
2021-12-17T02:32:11.000Z
setup.py
wew84/cnn_bridge
7cd98e204922174ea9293d8c52c30d00733a7ed2
[ "BSD-3-Clause" ]
null
null
null
## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup # fetch values from package.xml setup_args = generate_distutils_setup( requires=['tensorflow', 'numpy', 'opencv'], packages=['cnn_bridge_main'], package_dir={'': 'src'}, ) setup(**setup_args)
25.928571
61
0.743802
48
363
5.395833
0.666667
0.131274
0.169884
0
0
0
0
0
0
0
0
0
0.137741
363
13
62
27.923077
0.827476
0.242424
0
0
1
0
0.143911
0
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
995050aaf43abe070dfab1f85dd869f0e7f9f405
1,642
py
Python
warmup_2/string_splosion.py
nhutnamhcmus/coding-bat-solutions
5f780a4027a6c3523a72961db1bad547c997fdc6
[ "MIT" ]
1
2020-09-19T18:02:13.000Z
2020-09-19T18:02:13.000Z
warmup_2/string_splosion.py
nhutnamhcmus/coding-bat-solutions
5f780a4027a6c3523a72961db1bad547c997fdc6
[ "MIT" ]
null
null
null
warmup_2/string_splosion.py
nhutnamhcmus/coding-bat-solutions
5f780a4027a6c3523a72961db1bad547c997fdc6
[ "MIT" ]
null
null
null
# ======================================================================================================================================= # VNU-HCM, University of Science # Department Computer Science, Faculty of Information Technology # Authors: Nhut-Nam Le (Tich Phan Suy Rong) # © 2020 import unittest """ Given a non-empty string like "Code" return a string like "CCoCodCode". string_splosion('Code') → 'CCoCodCode' string_splosion('abc') → 'aababc' string_splosion('ab') → 'aab' """ def string_splosion(str): result = '' i = 0 while i < len(str): result += str[:i+1] i += 1 return result class TestStringSplosion(unittest.TestCase): def test_case_00(self): self.assertEqual(string_splosion('Code'), 'CCoCodCode') def test_case_01(self): self.assertEqual(string_splosion('abc'), 'aababc') def test_case_02(self): self.assertEqual(string_splosion('ab'), 'aab') def test_case_03(self): self.assertEqual(string_splosion('x'), 'x') def test_case_04(self): self.assertEqual(string_splosion('fade'), 'ffafadfade') def test_case_05(self): self.assertEqual(string_splosion('There'), 'TThTheTherThere') def test_case_06(self): self.assertEqual(string_splosion('Kitten'), 'KKiKitKittKitteKitten') def test_case_07(self): self.assertEqual(string_splosion('Bye'), 'BByBye') def test_case_08(self): self.assertEqual(string_splosion('Good'), 'GGoGooGood') def test_case_09(self): self.assertEqual(string_splosion('Bad'), 'BBaBad') if __name__ == "__main__": unittest.main()
25.261538
137
0.612058
189
1,642
5.116402
0.412698
0.202689
0.113754
0.258532
0.341262
0
0
0
0
0
0
0.020045
0.179659
1,642
64
138
25.65625
0.694878
0.169306
0
0
0
0
0.111679
0.017903
0
0
0
0
0.322581
1
0.354839
false
0
0.032258
0
0.451613
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
3
995106f61a1486869dcff6b8ab27a4c99952dfef
334
py
Python
ea_sim/params_conf.py
lis-epfl/Tensoft-G21
7a83c5dabc12906c0a6bd1da0a28a131e9d5e144
[ "Apache-2.0" ]
1
2021-08-03T10:52:20.000Z
2021-08-03T10:52:20.000Z
ea_sim/params_conf.py
lis-epfl/Tensoft-G21
7a83c5dabc12906c0a6bd1da0a28a131e9d5e144
[ "Apache-2.0" ]
null
null
null
ea_sim/params_conf.py
lis-epfl/Tensoft-G21
7a83c5dabc12906c0a6bd1da0a28a131e9d5e144
[ "Apache-2.0" ]
1
2021-09-18T07:23:35.000Z
2021-09-18T07:23:35.000Z
# centralized file where robot parameters can be customized MIN_NUM_MODULES = 2 MAX_NUM_MODULES = 10 N_MODULES = list(range(MIN_NUM_MODULES, MAX_NUM_MODULES+1)) STIFF_TABLE = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1] #STIFF_TABLE = [0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2, 3, 4]
41.75
108
0.637725
84
334
2.404762
0.404762
0.19802
0.128713
0.118812
0.188119
0.188119
0.188119
0.188119
0.188119
0.188119
0
0.224199
0.158683
334
7
109
47.714286
0.494662
0.491018
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
99511d438332d3bcdc261ac0609cfadb3d8b5557
2,751
gyp
Python
binding.gyp
Cinegy/CinecoderNodeJs
9f9e932072b5b19ef786eb5bb2ee22381c6fed34
[ "Apache-2.0" ]
3
2019-02-07T15:42:49.000Z
2019-12-07T22:17:50.000Z
binding.gyp
Cinegy/CinecoderNodeJs
9f9e932072b5b19ef786eb5bb2ee22381c6fed34
[ "Apache-2.0" ]
3
2017-01-13T19:58:04.000Z
2021-08-31T21:28:10.000Z
binding.gyp
Cinegy/CinecoderNodeJs
9f9e932072b5b19ef786eb5bb2ee22381c6fed34
[ "Apache-2.0" ]
5
2017-01-13T17:09:56.000Z
2019-12-07T22:17:53.000Z
# Copyright 2016 Streampunk Media Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. { "targets": [ { "target_name": "cinecoder", "sources": [ "src/cinecoder.cc", "src/Decoder.cc", "src/Encoder.cc", "src/DecoderCinegy.cc", "src/EncoderCinegy.cc" ], "include_dirs": [ "<!(node -e \"require('nan')\")" ], 'conditions': [ ['OS=="linux"', { "cflags_cc!": [ "-fno-rtti", "-fno-exceptions" ], "cflags_cc": [ "-std=c++11", "-fexceptions" ], "link_settings": { "libraries": [ ], "ldflags": [ "-L<@(module_root_dir)/build/Release", "-Wl,-rpath,<@(module_root_dir)/build/Release" ] }, "copies": [ { "destination": "build/Release/", "files": [ # "cinegy/bin/cinecoder.so" ] } ] }], ['OS=="win"', { "variables": { "CinecoderRoot": "packages/Cinecoder.3.33.41.230" }, "include_dirs": ["<(CinecoderRoot)/sources/"], "sources" : [ "<(CinecoderRoot)/sources/Cinecoder_i.c" ], "configurations": { "Release": { "msvs_settings": { "VCCLCompilerTool": { "RuntimeTypeInfo": "true", "ExceptionHandling": 1 } } } }, "libraries": [ "-l../<(CinecoderRoot)/runtimes/win-x64/native/release/cinecoder.lib", "-l../<(CinecoderRoot)/runtimes/win-x64/native/release/D2_CUDA_lib.lib" ], "copies": [ { "destination": "build/Release/", "files": [ "<(CinecoderRoot)/runtimes/win-x64/native/release/Cinecoder.dll", "<(CinecoderRoot)/runtimes/win-x64/native/release/cudart64_80.dll", "<(CinecoderRoot)/runtimes/win-x64/native/release/D2_CUDA_lib.dll" ] } ] }] ], } ] }
31.62069
83
0.479826
240
2,751
5.429167
0.558333
0.046048
0.092095
0.103607
0.27782
0.18726
0.18726
0.075211
0.075211
0
0
0.020349
0.374773
2,751
86
84
31.988372
0.737209
0.211559
0
0.183099
0
0
0.45311
0.231198
0
0
0
0
0
1
0
true
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
99518065e86a0471bd9b44cb187c5e08ca0206fa
291
py
Python
src/rdml_graph/information_gathering/__init__.py
ianran/rdml_graph
83f3896a2a0f5ceb7e092f4e719fb35254c5a5f8
[ "MIT" ]
4
2020-09-01T17:52:18.000Z
2022-01-18T22:36:48.000Z
src/rdml_graph/information_gathering/__init__.py
ianran/rdml_graph
83f3896a2a0f5ceb7e092f4e719fb35254c5a5f8
[ "MIT" ]
null
null
null
src/rdml_graph/information_gathering/__init__.py
ianran/rdml_graph
83f3896a2a0f5ceb7e092f4e719fb35254c5a5f8
[ "MIT" ]
null
null
null
# init for information_gathering from .Evaluator import PathEvaluator, PathEvaluatorWithRadius, PathEvaluatorAlongPath, applyBudget from .MaskedEvaluator import MaskedEvaluator from .StochasticOptimizer import StochasticOptimizer from .InfoField import random_field2d, random_multi_field2d
41.571429
98
0.883162
27
291
9.37037
0.62963
0
0
0
0
0
0
0
0
0
0
0.007519
0.085911
291
6
99
48.5
0.943609
0.103093
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
99533e6ec88630f0bf822397008bdc4f64d07cdc
21,117
py
Python
web/project/training_api/libs/utilities.py
allspeak/api.allspeak.eu
0403c4ed870c32ff9846f943e28aeb897f4baf3c
[ "MIT" ]
1
2018-09-03T14:48:27.000Z
2018-09-03T14:48:27.000Z
web/project/training_api/libs/utilities.py
allspeak/api.allspeak.eu
0403c4ed870c32ff9846f943e28aeb897f4baf3c
[ "MIT" ]
null
null
null
web/project/training_api/libs/utilities.py
allspeak/api.allspeak.eu
0403c4ed870c32ff9846f943e28aeb897f4baf3c
[ "MIT" ]
null
null
null
# createSubjectTrainingMatrix(subj, in_orig_subj_path, output_net_path, arr_commands, arr_rip) # createSubjectTestMatrix(subj, in_orig_subj_path, output_net_path, arr_commands, arr_rip, sentences_filename, sentence_counter) # createFullMatrix(input_matrix_folder, data_name, label_name, output_matrix_path="") import os import shutil import ntpath import glob import re import json import numpy as np from numpy import genfromtxt from datetime import datetime from . import earray_wrapper def moveFolderContent(indir, outdir): for file in os.listdir(indir): fileref = indir + '/' + file if os.path.isdir(fileref) is False: shutil.move(fileref, outdir + '/' + file) def getFileName(path): head, tail = ntpath.split(path) return tail or ntpath.basename(head) def remoteExtension(path): return path.split('.')[0] # works when input files are : commandlabelNREP_scores.dat # vocfilepath contains lines as follows: # 1 cmdlab1 # 2 cmdlab7 # 3 cmlab8 # etc... def renameSubjectFiles(subject_name, inrootpath, outrootpath, vocfilepath): inpath = inrootpath + '/' + subject_name outpath = outrootpath + '/' + subject_name if os.path.isdir(outpath) is False: os.mkdir(outpath) with open(vocfilepath, "r") as f: data = f.readlines() for line in data: words = line.split() # words[0]=num corrispondente al comando, words[1]=nome frase for infile in glob.glob(os.path.join(inpath, '*.*')): file_name = os.path.basename(infile) b = re.split('(\d+)', file_name) if b[0] == words[1]: shutil.copy2(infile, outpath + '/' + subject_name + "_" + words[0] + "_" + b[1] + ".dat") # print subject_name + "_" + words[0] + "_" + b[1] + ".dat" -> te_0_0.dat # works when input files are : SUBJ_commandlabelNREP_scores.dat # vocfilepath contains lines as follows: # 1 cmdlab1 # 2 cmdlab7 # 3 cmlab8 # etc... def renameSubjectsFiles(subjects_name, inrootpath, outrootpath, vocfilepath): inpath = inrootpath + '/' + subjects_name with open(vocfilepath, "r") as f: data = f.readlines() for line in data: words = line.split() # words[0]=num corrispondente al comando, words[1]=nome frase for infile in glob.glob(os.path.join(inpath, '*.*')): file_name = os.path.basename(infile) id = file_name.index("_") subjlabel = file_name[:id] if os.path.isdir(outrootpath + '/' + subjlabel) is False: os.mkdir(outrootpath + '/' + subjlabel) file_name = file_name[(id+1):] b = re.split('(\d+)', file_name) if b[0] == words[1]: shutil.copy2(infile, outrootpath + '/' + subjlabel + '/' + subjlabel + "_" + words[0] + "_" + b[1] + ".dat") # print subject_name + "_" + words[0] + "_" + b[1] + ".dat" -> te_0_0.dat # works when input files are : commandlabelNREP_scores.dat # vocfilepath contains lines as follows: # 1 cmdlab1 # 2 cmdlab7 # 3 cmlab8 # etc... def renameSubjectFilesJSON(subject_name, inrootpath, outrootpath, jsonvocfilepath, ext=".dat"): inpath = inrootpath + '/' + subject_name outpath = outrootpath + '/' + subject_name if os.path.isdir(outpath) is False: os.mkdir(outpath) vocabulary = getVocabularyFromJSON(jsonvocfilepath) for sentence in vocabulary: sentenceid = str(sentence["id"]) lab = remoteExtension(str(sentence["readablefilename"])) #with open(vocfilepath, "r") as f: # data = f.readlines() # for line in data: # words = line.split() # words[0]=num corrispondente al comando, words[1]=nome frase for infile in glob.glob(os.path.join(inpath, '*.*')): file_name = os.path.basename(infile) b = re.split('(\d+)', file_name) if b[0] == lab: shutil.copy2(infile, outpath + '/' + subject_name + "_" + sentenceid + "_" + b[1] + ext) # print subject_name + "_" + words[0] + "_" + b[1] + ".dat" -> te_0_0.dat # works when input files are : SUBJ_commandlabelNREP_scores.dat # jsonvocfilepath contains lines as follows: #{ "vocabulary_categories": [], # "voicebank_vocabulary": [ { "title": "Sono felice", "id": 1101, "filename":"", "readablefilename" : "sono_felice.wav", "existwav": 0, "editable":false}, ...] #} def renameSubjectsFilesJSON(subjects_name, inrootpath, outrootpath, jsonvocfilepath, ext=".dat"): inpath = inrootpath + '/' + subjects_name vocabulary = getVocabularyFromJSON(jsonvocfilepath) # words = line.split() # words[0]=num corrispondente al comando, words[1]=nome frase for infile in glob.glob(os.path.join(inpath, '*.*')): copied = False file_name = os.path.basename(infile) id = file_name.index("_") subjlabel = file_name[:id] if os.path.isdir(outrootpath + '/' + subjlabel) is False: os.mkdir(outrootpath + '/' + subjlabel) file_name = file_name[(id + 1):] b = re.split('(\d+)', file_name) for sentence in vocabulary: sentenceid = str(sentence["id"]) lab = remoteExtension(str(sentence["readablefilename"])) if b[0] == lab: if os.path.isdir(outrootpath + '/' + subjlabel) is False: os.mkdir(outrootpath + '/' + subjlabel) shutil.copy2(infile, outrootpath + '/' + subjlabel + '/' + subjlabel + "_" + sentenceid + "_" + b[1] + ext) shutil.copy2(infile, outrootpath + '/' + subjlabel + "_" + sentenceid + "_" + b[1] + ext) copied = True break # print subject_name + "_" + words[0] + "_" + b[1] + ".dat" -> te_0_0.dat if copied is False: print(infile) # works when input files are : commandlabelNREP.dat.SUBJLABEL def renameSubjectFilesOld(subject_name, inrootpath, outrootpath, vocfilepath): inpath = inrootpath + '/' + subject_name outpath = outrootpath + '/' + subject_name if os.path.isdir(outpath) is False: os.mkdir(outpath) with open(vocfilepath, "r") as f: data = f.readlines() for line in data: words = line.split() # words[0]=num corrispondente al comando, words[1]=nome frase for infile in glob.glob(os.path.join(inpath, '*.*')): file_name = os.path.basename(infile) a = os.path.splitext(file_name)[0] b = re.split('(\d+)', a) if b[0] == words[1]: shutil.copy2(inpath + '/' + a + '.' + subject_name, outpath + '/' + subject_name + "_" + words[0] + "_" + b[1] + ".dat") # print subject_name + "_" + words[0] + "_" + b[1] + ".dat" -> te_0_0.dat def getVocabularyFromJSON(json_inputfile): with open(json_inputfile, encoding='utf-8') as data_file: data = json.load(data_file) return data["voicebank_vocabulary"] def createVocabularySentence(list_ids, json_inputfile, txt_outputfile): vocabulary = getVocabularyFromJSON(json_inputfile) file = open(txt_outputfile, 'w+') for id in list_ids: for sentence in vocabulary: sentenceid = sentence["id"] if id == sentenceid: title = sentence["title"] file.write(title + os.linesep) break file.close() def createVocabularyJson(list_ids, model, sessiondata, training_sessionid, json_globalvocabulary, json_outputfile): # get commands list from json_globalvocabulary vocabulary = getVocabularyFromJSON(json_globalvocabulary) commands = [] for id in list_ids: for sentence in vocabulary: sentenceid = sentence["id"] if id == sentenceid: commands.append({'title': sentence["title"], 'id': sentenceid}) break lencmds = len(commands) nw = datetime.now() # sModelFilePath is written by the App res = { 'sLabel': sessiondata['sLabel'], 'nModelClass': sessiondata['nModelClass'], 'nModelType': sessiondata['nModelType'], 'nInputParams': model['nInputParams'], 'nContextFrames': model['nContextFrames'], 'nItems2Recognize': lencmds, 'sModelFilePath': "", 'sModelFileName': model['sModelFileName'], 'saInputNodeName': model['saInputNodeName'], 'sOutputNodeName': model['sOutputNodeName'], 'nProcessingScheme': sessiondata['nProcessingScheme'], 'fRecognitionThreshold': model['fRecognitionThreshold'], 'sCreationTime': nw.strftime('%Y/%m/%d %H:%M:%S'), 'sLocalFolder': sessiondata['sLocalFolder'], 'sessionid': str(training_sessionid), 'commands': commands } with open(json_outputfile, 'w', encoding='utf-8') as data_file: json.dump(res, data_file) # =========================================================================================================================== # aims : This script creates the training matrix for a single subject (ctx_*.dat ==> SUBJ_train_data.npy [earray h5]) # # input : subj: subject folder name # in_orig_subj_path: path to the subject's cepstra with context # output_net_path: path to the output folder # arr_commands: IDs of the selected commands # arr_rip: range from 0 to Nripetitions # # return : output_matrices_path: path to the output folder (e.g. output/train/ANALYSISNAME/matrices) # =========================================================================================================================== def createSubjectTrainingMatrix(subj, in_orig_subj_path, output_net_path, arr_commands, arr_rip, file_prefix='ctx'): mat_compl = [] mat_lab = [] totalsize = 0 output_matrices_path = os.path.join(output_net_path, 'matrices') write_every_nfiles = 1 # every N (e.g. 10) files read, append them to disk and clear arrays if os.path.isdir(output_matrices_path) is False: os.mkdir(output_matrices_path) if subj != '': subj = subj + "_" output_data_matrix_path = output_matrices_path + '/' + subj + 'train_data.npy' output_labels_matrix_path = output_matrices_path + '/' + subj + 'train_labels.npy' if os.path.exists(output_data_matrix_path) is True: os.remove(output_data_matrix_path) if os.path.exists(output_labels_matrix_path) is True: os.remove(output_labels_matrix_path) try: cnt = 0 for ctxfile in glob.glob(in_orig_subj_path + '/' + file_prefix + '*'): spl = re.split('[_ .]', ctxfile) # e.g. ctx_SUBJ_CMD_REP => spl2[2] num comando, spl[3] num ripetiz id_cmd = int(spl[2]) id_rep = int(spl[3]) if id_cmd in arr_commands and id_rep in arr_rip: f = open(ctxfile, 'r') lines = f.readlines() count_lines = len(lines) f.close() # for every line of contexted file, write N-arr_commands columns lb = [[1 if i == id_cmd else 0 for i in arr_commands] for j in range(count_lines)] ctx = genfromtxt(ctxfile) # load dei cepstra if len(mat_compl) == 0 and len(mat_lab) == 0: mat_compl = ctx mat_lab = lb else: mat_compl = np.vstack((mat_compl, ctx)) mat_lab = np.vstack((mat_lab, lb)) cnt = cnt + 1 # check whether write 2 disk if cnt == write_every_nfiles: cnt = 0 earray_wrapper.appendArray2File(mat_compl, output_data_matrix_path) earray_wrapper.appendArray2File(mat_lab, output_labels_matrix_path) totalsize += mat_compl.size mat_compl = [] mat_lab = [] except Exception as e: print(str(e)) # save data in output/train/ANALYSISNAME/matrices if len(mat_compl): earray_wrapper.appendArray2File(mat_compl, output_data_matrix_path) earray_wrapper.appendArray2File(mat_lab, output_labels_matrix_path) print("createSubjectTrainingMatrix ended: " + str(totalsize)) return {'data_matrices_path': output_data_matrix_path, 'labels_matrices_path': output_labels_matrix_path} # ----------------------------------------------------------------------------------------------------------------------- # DO NOT create matrices file, just read and returns the data & labels arrays def getSubjectTrainingMatrix(in_orig_subj_path, arr_commands, arr_rip, file_prefix='ctx'): mat_compl = [] mat_lab = [] totalsize = 0 try: cnt = 0 for ctxfile in glob.glob(in_orig_subj_path + '/' + file_prefix + '*'): filename = ctxfile.split('/')[-1] spl = filename.split('.')[0] spl = spl.split('_') # spl = re.split('[_ .]', filename) # e.g. ctx_SUBJ_CMD_REP => spl2[2] num comando, spl[3] num ripetiz id_cmd = int(spl[2]) id_rep = int(spl[3]) if id_cmd in arr_commands and id_rep in arr_rip: f = open(ctxfile, 'r') lines = f.readlines() count_lines = len(lines) f.close() # for every line of contexted file, write N-arr_commands columns lb = [[1 if i == id_cmd else 0 for i in arr_commands] for j in range(count_lines)] ctx = genfromtxt(ctxfile) # load dei cepstra if len(mat_compl) == 0 and len(mat_lab) == 0: mat_compl = ctx mat_lab = lb else: mat_compl = np.vstack((mat_compl, ctx)) mat_lab = np.vstack((mat_lab, lb)) cnt = cnt + 1 except Exception as e: print(str(e)) rows = len(mat_compl) cols = len(mat_compl[0]) print("getSubjectTrainingMatrix ended, row: " + str(rows)+ ", cols: " + str(cols)) return mat_compl, mat_lab # return {'data_matrices': mat_compl, 'labels_matrices': mat_lab} # =========================================================================================================================== # aims : This script creates the testing matrix for a single subject # # input : subj: subject folder name # in_orig_subj_path: path to the subject's cepstra with context # output_net_path: path to the output folder # arr_commands: range from 1 to Ncommands # arr_rip: range from 0 to Nripetitions # sentences_filename: name of the output file # sentence_counter: it takes account of how many rows are occupied by each command and the command_id # # return : output_matrices_path: path to the output folder # sentence_counter: text file which takes account of how many rows are occupied by each command and the command_id # =========================================================================================================================== def createSubjectTestMatrix(subj, in_orig_subj_path, output_net_path, arr_commands, arr_rip, sentences_filename, sentence_counter): mat_compl = [] mat_lab = [] totalsize = 0 output_matrices_path = os.path.join(output_net_path, 'matrices') write_every_nfiles = 10 # every N (e.g. 10) files read, append them to disk and clear arrays if os.path.isdir(output_matrices_path) is False: os.mkdir(output_matrices_path) if os.path.isfile(sentences_filename) is True: os.remove(sentences_filename) output_data_matrix = output_matrices_path + '/' + subj + '_test_data.npy' output_labels_matrix = output_matrices_path + '/' + subj + '_test_labels.npy' if os.path.exists(output_data_matrix) is True: os.remove(output_data_matrix) if os.path.exists(output_labels_matrix) is True: os.remove(output_labels_matrix) try: cnt = 0 for ctxfile in glob.glob(in_orig_subj_path + '/ctx*'): spl = re.split('[_ .]', ctxfile) # e.g. ctx_SUBJ_CMD_REP => spl2[2] num comando, spl[3] num ripetiz id_cmd = int(spl[2]) id_rep = int(spl[3]) if id_cmd in arr_commands and id_rep in arr_rip: f = open(ctxfile, 'r') lines = f.readlines() count_lines = len(lines) f.close() sentence_counter = sentence_counter + 1 sc = [[sentence_counter, id_cmd] for j in range(count_lines)] with open(output_net_path + "/" + sentences_filename, 'ab') as f_handle: np.savetxt(f_handle, sc, fmt='%.0f') lb = [[1 if i == id_cmd else 0 for i in arr_commands] for j in range(count_lines)] ctx = genfromtxt(ctxfile) # load dei cepstra if len(mat_compl) == 0 and len(mat_lab) == 0: mat_compl = ctx mat_lab = lb else: mat_compl = np.vstack((mat_compl, ctx)) mat_lab = np.vstack((mat_lab, lb)) cnt = cnt + 1 # check whether write 2 disk if cnt == write_every_nfiles: cnt = 0 earray_wrapper.appendArray2File(mat_compl, output_data_matrix) earray_wrapper.appendArray2File(mat_lab, output_labels_matrix) totalsize += mat_compl.size mat_compl = [] mat_lab = [] except Exception as e: print(str(e)) # save data in output/test/ANALYSISNAME/matrices if len(mat_compl): earray_wrapper.appendArray2File(mat_compl, output_data_matrix) earray_wrapper.appendArray2File(mat_lab, output_labels_matrix) return {'data_matrices_path': output_data_matrix, 'labels_matrices_path': output_labels_matrix, 'sentence_counter': sentence_counter} # =========================================================================================================================== # aims : This script creates the testing matrix with all the pre-established subjects # # input : input_matrix_folder: path to the subject's folder containing testing and training matrices with cepstra or labels # data_name: name of the testing or training matrices with cepstra # label_name: name of the testing or training matrices with labels # output_matrix_path: path to the output folder. If is not specified, data will be stored in the current working folder # # return : data_matrix_path: path to the output folder # label_matrix_path: path to the output folder # =========================================================================================================================== def createFullMatrix(subjects_list, input_net_folder, data_name, label_name, output_net_folder=""): input_matrix_folder = os.path.join(input_net_folder, 'matrices') if os.path.isdir(input_matrix_folder) is False: os.mkdir(input_matrix_folder) if len(output_net_folder): output_matrix_folder = os.path.join(output_net_folder, 'matrices') if os.path.isdir(output_matrix_folder) is False: os.mkdir(output_matrix_folder) data_matrix_path = output_matrix_folder + '/full_' + data_name + '.npy' label_matrix_path = output_matrix_folder + '/full_' + label_name + '.npy' else: data_matrix_path = input_matrix_folder + '/full_' + data_name + '.npy' label_matrix_path = input_matrix_folder + '/full_' + label_name + '.npy' for file in glob.glob(input_matrix_folder + '/*' + data_name + '.npy'): file_name = os.path.basename(file) spl = re.split('[_ .]', file_name) # spl[0] paz, spl[1] parola 'train' for subj in subjects_list: if subj == spl[0]: print("createFullMatrix: " + file) file_train = np.load(file) file_labels = np.load(input_matrix_folder + '/' + spl[0] + '_' + label_name + '.npy') earray_wrapper.appendArray2File(file_train, data_matrix_path) earray_wrapper.appendArray2File(file_labels, label_matrix_path) return {'data_matrix_path': data_matrix_path, 'label_matrix_path': label_matrix_path} def getNodeBySubstring(graph, nomesubstring, allnodes=None): if allnodes is None: allnodes = [n.name for n in graph.as_graph_def().node ] node_str = [s for s in allnodes if nomesubstring in s and 'read' not in s] if len(node_str) == 1: return graph.get_tensor_by_name(node_str[0] + ":0") else: return None
41.163743
162
0.580764
2,456
21,117
4.785016
0.120928
0.016338
0.010892
0.012168
0.679374
0.649932
0.594452
0.536504
0.527315
0.498724
0
0.010162
0.273003
21,117
512
163
41.244141
0.755341
0.261211
0
0.556604
0
0
0.063609
0.006
0
0
0
0
0
1
0.050314
false
0
0.031447
0.003145
0.110063
0.022013
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9953d0571911118b84e22efc8fdf8aadef38a916
1,373
py
Python
server/models.py
kenny-skaggs/lists
c9a46c184b8ce664c4c58c10b08a1dc7b60d1a5d
[ "Unlicense" ]
null
null
null
server/models.py
kenny-skaggs/lists
c9a46c184b8ce664c4c58c10b08a1dc7b60d1a5d
[ "Unlicense" ]
null
null
null
server/models.py
kenny-skaggs/lists
c9a46c184b8ce664c4c58c10b08a1dc7b60d1a5d
[ "Unlicense" ]
null
null
null
from datetime import datetime import sqlalchemy as sa from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship BaseModel = declarative_base() class Item(BaseModel): __tablename__ = 'items' id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.String(300), nullable=False) location_refs = relationship('ItemLocation', back_populates='item', cascade='all, delete-orphan') class Location(BaseModel): __tablename__ = 'location' id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.String(200), nullable=False) class ItemLocation(BaseModel): __tablename__ = 'item_location' id = sa.Column(sa.Integer, primary_key=True) location_id = sa.Column(sa.Integer, sa.ForeignKey(Location.id), nullable=False) item_id = sa.Column(sa.Integer, sa.ForeignKey(Item.id), nullable=False) location = relationship(Location, backref='item_refs') item = relationship(Item, back_populates='location_refs') class ItemNeeded(BaseModel): __tablename__ = 'item_needed' id = sa.Column(sa.Integer, primary_key=True) item_id = sa.Column(sa.Integer, sa.ForeignKey(Item.id), nullable=False) needed_time = sa.Column(sa.DateTime, nullable=False, default=datetime.utcnow()) cleared_time = sa.Column(sa.DateTime) item = relationship(Item, backref='needed_refs')
32.690476
101
0.739257
176
1,373
5.568182
0.255682
0.089796
0.112245
0.085714
0.386735
0.341837
0.333673
0.302041
0.268367
0.218367
0
0.005085
0.140568
1,373
41
102
33.487805
0.825424
0
0
0.214286
0
0
0.075802
0
0
0
0
0
0
1
0
false
0
0.142857
0
0.964286
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
99548a01e83a179a6bb2c82d30f37deca9cc74b6
5,206
py
Python
Note11_Learn Python_Staticmethods&Exceptions.py
stanreport/Python-Tutorials
7aff8ff7c21d4face1afb218ab9679f3d1160e27
[ "Apache-2.0" ]
null
null
null
Note11_Learn Python_Staticmethods&Exceptions.py
stanreport/Python-Tutorials
7aff8ff7c21d4face1afb218ab9679f3d1160e27
[ "Apache-2.0" ]
1
2018-04-14T19:35:14.000Z
2018-04-14T19:35:14.000Z
Note11_Learn Python_Staticmethods&Exceptions.py
stanreport/Python-Tutorials
7aff8ff7c21d4face1afb218ab9679f3d1160e27
[ "Apache-2.0" ]
null
null
null
# ---------- STATIC METHODS ---------- # Static methods allow access without the need to initialize # a class. They should be used as utility methods, or when # a method is needed, but it doesn't make sense for the real # world object to be able to perform a task class Sum: # You use the static method decorator to define that a # method is static @staticmethod def getSum(*args): sum = 0 for i in args: sum += i return sum def main(): # Call a static method by proceeding it with its class # name print("Sum :", Sum.getSum(1,2,3,4,5)) main() # ---------- STATIC VARIABLES ---------- # Fields declared in a class, but outside of any method # are static variables. There value is shared by every # object of that class class Dog: # This is a static variable num_of_dogs = 0 def __init__(self, name="Unknown"): self.name = name # You reference the static variable by proceeding # it with the class name Dog.num_of_dogs += 1 @staticmethod def getNumOfDogs(): print("There are currently {} dogs".format(Dog.num_of_dogs)) def main(): spot = Dog("Spot") doug = Dog("Doug") spot.getNumOfDogs() main() # ---------- MODULES ---------- # Your Python programs will contain a main program that # includes your main function. Then you will create many # modules in separate files. Modules also end with .py # just like any other Python file # ————— sum.py ————— def getSum(*args): sum = 0 for i in args: sum += i return sum # ————— End of sum.py ————— # You can import by listing the file name minus the py import sum # Get access to functions by proceeding with the file # name and then the function you want print("Sum :", sum.getSum(1,2,3,4,5)) # ---------- FROM ---------- # You can use from to copy specific functions from a module # You can use from sum import * to import all functions # You can import multiple functions by listing them after # import separated by commas from sum import getSum # You don't have to reference the module name now print("Sum :", getSum(1,2,3,4,5)) # ---------- EXCEPTION HANDLING ---------- # Exceptions are triggered either when an error occurs # or when you want them to. # We use exceptions are used to handle errors, execute # specific code when code generates something out of # the ordinary, to always execute code when something # happens (close a file that was opened), # When an error occurs you stop executing code and jump # to execute other code that responds to that error # Let's handle an IndexError exception that is # triggered when you try to access an index in a list # that doesn't exist # Surround a potential exception with try try: aList = [1,2,3] print(aList[3]) # Catch the exception with except followed by the # exception you want to catch # You can catch multiple exceptions by separating them # with commas inside parentheses # except (IndexError, NameError): except IndexError: print("Sorry that index doesn't exist") # If the exception wasn't caught above this will # catch all others except: print("An unknown error occurred") # ---------- CUSTOM EXCEPTIONS ---------- # Lets trigger an exception if the user enters a # name that contains a number # Although you won't commonly create your own exceptions # this is how you do it # Create a class that inherits from Exception class DogNameError(Exception): def __init__(self, *args, **kwargs): Exception.__init__(self, *args, **kwargs) try: dogName = input("What is your dogs name : ") if any(char.isdigit() for char in dogName): # Raise your own exception # You can raise the built in exceptions as well raise DogNameError except DogNameError: print("Your dogs name can't contain a number") # ---------- FINALLY & ELSE ---------- # finally is used when you always want certain code to # execute whether an exception is raised or not num1, num2 = input("Enter to values to divide : ").split() try: quotient = int(num1) / int(num2) print("{} / {} = {}".format(num1, num2, quotient)) except ZeroDivisionError: print("You can't divide by zero") # else is only executed if no exception was raised else: print("You didn't raise an exception") finally: print("I execute no matter what") # ---------- PROBLEM EXCEPTIONS & FILES ---------- # 1. Create a file named mydata2.txt and put data in it # 2. Using what you learned in part 8 and Google to find # out how to open a file without with try to open the # file in a try block # 3. Catch the FileNotFoundError exception # 4. In else print the file contents # 5. In finally close the file # 6. Try to open the nonexistent file mydata3.txt and # test to see if you caught the exception try: myFile = open("mydata2.txt", encoding="utf-8") # We can use as to access data and methods in the # exception class except FileNotFoundError as ex: print("That file was not found") # Print out further data on the exception print(ex.args) else: print("File :", myFile.read()) myFile.close() finally: print("Finished Working with File")
24.909091
68
0.671725
792
5,206
4.417929
0.323232
0.012003
0.00343
0.009431
0.03944
0.03944
0.03944
0.035439
0.035439
0.022864
0
0.009886
0.22282
5,206
209
69
24.909091
0.849975
0.610834
0
0.352941
0
0
0.185736
0
0
0
0
0
0
1
0.102941
false
0
0.029412
0
0.220588
0.235294
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
995771bb5ce39f771d0087436d5379344c7c7a93
17,632
py
Python
(3)TopTitanic1.py
statpng/KaggleTranscript
b110482a2adcf0390fac0d54c890c95894f98dea
[ "Apache-2.0" ]
null
null
null
(3)TopTitanic1.py
statpng/KaggleTranscript
b110482a2adcf0390fac0d54c890c95894f98dea
[ "Apache-2.0" ]
null
null
null
(3)TopTitanic1.py
statpng/KaggleTranscript
b110482a2adcf0390fac0d54c890c95894f98dea
[ "Apache-2.0" ]
null
null
null
# https://www.kaggle.com/yassineghouzam/titanic-top-4-with-ensemble-modeling # Feature analysis # Feature engineering # Modeling import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline from collections import Counter from sklearn.ensemble import RandomForestClassifier, \ AdaBoostClassifier, \ GradientBoostingClassifier, \ ExtraTreesClassifier, \ VotingClassifier from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.neural_network import MLPClassifier from sklearn.svm import SVC from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, learning_curve sns.set(style="white", context="notebook", palette="deep") # Load and check data # Load data train = pd.read_csv("./titanic/train.csv") test = pd.read_csv("./titanic/test.csv") IDtest = test["PassengerId"] def detect_outliers(df, n, features): """ Take a dataframe df of features and returns a list of the indices corresponding to the observations containing more than n outliers according to the Tukey method :param df: dataframe :param n: features :param features: feature name to be investigated :return: outlier_indices """ outlier_indices = [] # iterate over features (columns) for col in features: Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col], 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list( k for k, v in outlier_indices.items() if v > n ) return multiple_outliers Outliers_to_drop = detect_outliers(train, 2, ["Age", "SibSp", "Parch", "Fare"]) train.loc[Outliers_to_drop] train = train.drop(Outliers_to_drop, axis = 0).reset_index(drop=True) train_len = len(train) dataset = pd.concat(objs=[train, test], axis=0).reset_index(drop=True) dataset = dataset.fillna(np.nan) dataset.isnull().sum() train.info() train.isnull().sum() train.head() train.dtypes train.describe() # Feature Analysis g = sns.heatmap(train[["Survived", "SibSp", "Parch", "Age", "Fare"]].corr(), annot=True, fmt=".2f", cmap="coolwarm") g = sns.catplot(x="SibSp", y="Survived", kind="bar", data=train, size=6, palette="muted") g.despine(left=True) g = g.set_ylabels("survival probability") # Parch g = sns.catplot(x="Parch", y="Survived", data=train, kind="bar", size=6, palette = "muted") g.despine(left=True) g = g.set_ylabels("survival probability") # Age g = sns.FacetGrid(train, col = "Survived") g = g.map(sns.distplot, "Age") g = sns.kdeplot(train["Age"][(train["Survived"]==0) & (train["Age"].notnull())], color = "Red", shade = True ) g = sns.kdeplot(train["Age"][(train["Survived"]==1) & (train["Age"].notnull())], color = "Blue", shade = True ) g.set_xlabel("Age") g.set_ylabel("Frequency") g = g.legend(["Not Survived", "Survived"]) # Fare dataset["Fare"].isnull().sum() dataset["Fare"] = dataset["Fare"].fillna(dataset["Fare"].median()) g = sns.distplot(dataset["Fare"], color="m", label="Skewness : %.2f"%(dataset["Fare"].skew())) g = g.legend(loc = "best") dataset["Fare"] = dataset["Fare"].map(lambda i: np.log(i) if i > 0 else 0) g = sns.distplot(dataset["Fare"], color="b", label="Skewness : %.2f"%(dataset["Fare"].skew())) g = g.legend(loc="best") # Categorical values # Sex g = sns.barplot(x="Sex", y="Survived", data=train) g = g.set_ylabel("Survival Probability") train[["Sex", "Survived"]].groupby("Sex").mean() # Pclass g = sns.catplot(x="Pclass", y="Survived", data = train, kind="bar", size = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("survival probability") g = sns.catplot(x="Pclass", y="Survived", hue="Sex", kind="bar", data=dataset, size=6, palette="muted") g.despine(left=True) g = g.set_ylabels("survival probability") # Embarked dataset["Embarked"].isnull().sum() dataset["Embarked"] = dataset["Embarked"].fillna("S") g = sns.catplot(x="Embarked", y="Survived", kind="bar", data=train, size=6, palette="muted") g.despine(left=True) g = g.set_ylabels("survival probability") g = sns.catplot("Pclass", col="Embarked", kind="count", data=train, size=6, palette="muted") # Filling missing values # Age g = sns.catplot(x = "Sex", y = "Age", kind="box", data=dataset) g = sns.catplot(x = "Sex", y = "Age", hue="Pclass", kind="box", data=dataset) g = sns.catplot(x = "Parch", y = "Age", kind="box", data=dataset) g = sns.catplot(x = "SibSp", y = "Age", kind="box", data=dataset) dataset["Sex"] = dataset["Sex"].map({"male":0, "female":1}) # male --> 0; female --> 1 g = sns.heatmap(dataset[["Age", "Sex", "SibSp", "Parch", "Pclass"]].corr(), cmap="BrBG", annot=True) # Filling missing value of Age index_NaN_age = list( dataset["Age"][dataset["Age"].isnull()].index ) for i in index_NaN_age : age_med = dataset["Age"].median() age_pred = dataset["Age"][((dataset["SibSp"] == dataset.iloc[i]["SibSp"]) & (dataset["Parch"] == dataset.iloc[i]["Parch"]) & (dataset["Pclass"] == dataset.iloc[i]["Pclass"]) )].median() if not np.isnan(age_pred) : dataset["Age"].iloc[i] = age_pred else : dataset["Age"].iloc[i] = age_med g = sns.catplot(x="Survived", y="Age", kind="box", data=train) g = sns.catplot(x="Survived", y="Age", data=train, kind="violin") # Feature Engineering dataset["Name"].head() dataset_title = [i.split(",")[1].split(".")[0].strip() for i in dataset["Name"]] dataset["Title"] = pd.Series(dataset_title) dataset["Title"].head() g = sns.countplot(x="Title", data=dataset) g = plt.setp(g.get_xticklabels(), rotation=45) dataset["Title"] = dataset["Title"].replace(["Lady", "the Countess", "Countess", "Capt", "Col", "Don", "Dr", "Major", "Rev", "Sir", "Jonkheer", "Dona"], "Rare") dataset["Title"] = dataset["Title"].map({"Master":0, "Miss":1, "Ms":1, "Mme":1, "Mlle":1, "Mrs":1, "Mr":2, "Rare":3 }) dataset["Title"] = dataset["Title"].astype(int) dataset["Title"].value_counts() g = sns.countplot(dataset["Title"]) g = g.set_xticklabels(["Master", "Miss/Ms/Mme/Mlle/Mrs", "Mr", "Rare"]) g = sns.catplot(x="Title", y="Survived", kind="bar", data=dataset) g = g.set_xticklabels(["Master", "Miss-Mrs", "Mr", "Rare"]) g = g.set_ylabels("survival probability") dataset.drop(labels = ["Name"], axis=1, inplace=True) dataset["Fsize"] = dataset["SibSp"] + dataset["Parch"] + 1 g = sns.catplot(x="Fsize", y="Survived", kind="point", data=dataset) g = g.set_ylabels("Survival probability") dataset["Single"] = dataset["Fsize"].map(lambda s: 1 if s == 1 else 0) dataset["SmallF"] = dataset["Fsize"].map(lambda s: 1 if s == 2 else 0) dataset["MedF"] = dataset["Fsize"].map(lambda s: 1 if 3 <= s <= 4 else 0) dataset["LargeF"] = dataset["Fsize"].map(lambda s: 1 if s >= 5 else 0) dataset[["Single", "SmallF", "MedF", "LargeF"]].apply(lambda x: x.value_counts(), axis=0) fig, ax=plt.subplots(2,2,figsize=(10,10)) sns.barplot(x = "Single", y="Survived", data=dataset, ax=ax[0,0]) ax[0,0].set_ylabel("Survival probability") g = sns.barplot(x = "SmallF", y="Survived", data=dataset, ax=ax[0,1]) ax[0,1].set_ylabel("Survival probability") g = sns.barplot(x = "MedF", y="Survived", data=dataset, ax=ax[1,0]) ax[1,0].set_ylabel("Survival probability") g = sns.barplot(x = "LargeF", y="Survived", data=dataset, ax=ax[1,1]) ax[1,1].set_ylabel("Survival probability") dataset = pd.get_dummies(dataset, columns = ["Title"]) dataset = pd.get_dummies(dataset, columns = ["Embarked"], prefix = "Em") dataset.head(4) # Cabin dataset["Cabin"].head() dataset["Cabin"].describe() dataset["Cabin"].isnull().sum() dataset["Cabin"][dataset["Cabin"].notnull()].head() dataset["Cabin"] = pd.Series( [i[0] if not pd.isnull(i) else "X" for i in dataset["Cabin"] ]) ord = ["A", "B", "C", "D", "E", "F", "G", "T", "X"] g = sns.countplot( dataset["Cabin"], order = ord ) g = sns.catplot(x="Cabin", y="Survived", kind="bar", data=dataset, order = ord) g = g.set_ylabels("Survival Probability") dataset = pd.get_dummies(dataset, prefix = "Cabin", columns=["Cabin"]) dataset["Ticket"].head() Ticket = [] for i in list(dataset.Ticket): if not i.isdigit() : Ticket.append(i.replace(".", "").replace("/", "").strip().split(" ")[0]) else : Ticket.append("X") dataset["Ticket"] = Ticket dataset["Ticket"].head() dataset = pd.get_dummies(dataset, columns = ["Ticket"], prefix = "T") dataset["Pclass"] = dataset["Pclass"].astype("category") dataset = pd.get_dummies(dataset, columns=["Pclass"], prefix="Pc") dataset.drop(labels = ["PassengerId"], axis=1, inplace=True) dataset.head() # Modeling train = dataset[:train_len] test = dataset[train_len:] test.drop(labels=["Survived"], axis=1, inplace=True) train["Survived"] = train["Survived"].astype(int) Y_train = train["Survived"] X_train = train.drop(labels = ["Survived"], axis=1) # Simple modeling kfold = StratifiedKFold(n_splits=10) random_state = 2 classifiers = [] classifiers.append( SVC(random_state = random_state) ) classifiers.append( DecisionTreeClassifier(random_state = random_state) ) classifiers.append( AdaBoostClassifier(DecisionTreeClassifier(random_state = random_state), random_state = random_state, learning_rate = 0.1)) classifiers.append( RandomForestClassifier(random_state=random_state) ) classifiers.append( ExtraTreesClassifier(random_state=random_state) ) classifiers.append( GradientBoostingClassifier(random_state=random_state) ) classifiers.append( MLPClassifier(random_state=random_state) ) classifiers.append( KNeighborsClassifier() ) classifiers.append( LogisticRegression(random_state=random_state) ) classifiers.append( LinearDiscriminantAnalysis() ) cv_results = [] for classifier in classifiers : cv_results.append(cross_val_score(classifier, X=X_train, y=Y_train, scoring = "accuracy", cv=kfold)) cv_means = [] cv_std = [] for cv_result in cv_results : cv_means.append( cv_result.mean() ) cv_std.append( cv_result.std() ) algorithms = [ i.__str__().split("(")[0].replace("Classifier", "").replace("Regression", "").replace("Analysis", "") for i in classifiers ] cv_res = pd.DataFrame({ "CrossValMeans":cv_means, "CrossValerrors": cv_std, "Algorithm": algorithms }) g = sns.barplot("CrossValMeans", "Algorithm", data=cv_res, palette = "Set3", orient = "h", **{"xerr":cv_std}) g.set_xlabel("Mean Accuracy") g = g.set_title("Cross validation scores") DTC = DecisionTreeClassifier() adaDTC = AdaBoostClassifier(DTC, random_state=7) ada_param_grid = {"base_estimator__criterion" : ["gini", "entropy"], "base_estimator__splitter" : ["best", "random"], "algorithm" : ["SAMME", "SAMME.R"], "n_estimators" : [1,2], "learning_rate" : [0.0001, 0.001, 0.01, 0.1, 0.2, 0.3, 1.5]} gsadaDTC = GridSearchCV(adaDTC, param_grid = ada_param_grid, cv=kfold, scoring="accuracy", verbose=1) gsadaDTC.fit(X_train, Y_train) ada_best = gsadaDTC.best_estimator_ gsadaDTC.best_score_ # ExtraTrees ExtC = ExtraTreesClassifier() ex_param_grid = { "max_depth": [None], "max_features": [1, 3, 10], "min_samples_split": [2, 3, 10], "min_samples_leaf": [1, 3, 10], "bootstrap": [False], "n_estimators": [100, 300], "criterion": ["gini"] } gsExtC = GridSearchCV(ExtC, param_grid = ex_param_grid, cv=kfold, scoring="accuracy", verbose=1) gsExtC.fit(X_train, Y_train) ExtC_best = gsExtC.best_estimator_ gsExtC.best_score_ # Random Forest RFC = RandomForestClassifier() rf_param_grid = {"max_depth": [None], "max_features": [1, 3, 10], "min_samples_split": [2, 3, 10], "min_samples_leaf": [1, 3, 10], "bootstrap": [False], "n_estimators": [100, 300], "criterion": ["gini"]} gsRFC = GridSearchCV( RFC, param_grid = rf_param_grid, cv=kfold, scoring="accuracy", verbose=1) gsRFC.fit(X_train, Y_train) RFC_best = gsRFC.best_estimator_ gsRFC.best_score_ # Gradient Boosting GBC = GradientBoostingClassifier() gb_param_grid = { "loss" : ["deviance"], "n_estimators" : [100,200,300], "learning_rate" : [0.1,0.05,0.01], "max_depth" : [4, 8], "min_samples_leaf" : [100, 150], "max_features" : [0.3, 0.1] } gsGBC = GridSearchCV(GBC, param_grid = gb_param_grid, cv=kfold, scoring="accuracy", verbose = 1) gsGBC.fit(X_train, Y_train) GBC_best = gsGBC.best_estimator_ gsGBC.best_score_ # SVM SVMC = SVC(probability=True) svc_param_grid = { "kernel": ["rbf"], "gamma": [0.001, 0.01, 0.1, 1], "C": [1, 10, 50, 100, 200, 300, 1000] } gsSVMC = GridSearchCV(SVMC, param_grid=svc_param_grid, cv=kfold, scoring="accuracy", verbose=1) gsSVMC.fit(X_train, Y_train) SVMC_best = gsSVMC.best_estimator_ gsSVMC.best_score_ def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5)) : """ Generate a simple plot of the test and training learning curve :param estimator: :param title: :param X: :param y: :param ylim: :param cv: :param n_jobs: :param train_sizes: :return: """ plt.figure() plt.title(title) if ylim is not None : plt.ylim(*ylim) plt.xlabel("Training examples") plt.ylabel("Score") train_sizes, train_scores, test_scores = learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.grid() plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r" ) plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="r" ) plt.plot(train_sizes, train_scores_mean, "o-", color="r", label="Training score") plt.plot(train_sizes, test_scores_mean, "o-", color="g", label="Cross-validation score") plt.legend(loc="best") return plt g1 = plot_learning_curve(gsRFC.best_estimator_, "RF learning curves", X_train, Y_train, cv=kfold, train_sizes=np.linspace(.1, 1.0, 5)) g2 = plot_learning_curve(gsExtC.best_estimator_, "ExtraTrees learning curves", X_train, Y_train, cv=kfold, train_sizes=np.linspace(.1, 1.0, 5)) g3 = plot_learning_curve(gsSVMC.best_estimator_, "SVC learning curves", X_train, Y_train, cv=kfold, train_sizes=np.linspace(.1, 1.0, 5)) g4 = plot_learning_curve(gsadaDTC.best_estimator_, "AdaBoost learning curves", X_train, Y_train, cv=kfold, train_sizes=np.linspace(.1, 1.0, 5)) g5 = plot_learning_curve(gsGBC.best_estimator_, "GradientBoosting learning curves", X_train, Y_train, cv=kfold, train_sizes=np.linspace(.1, 1.0, 5)) nrows = ncols = 2 fig, axes = plt.subplots(nrows = nrows, ncols = ncols, sharex="all", figsize=(15,15)) names_classifiers = [("AdaBoosting", ada_best), ("ExtraTrees", ExtC_best), ("RandomForest", RFC_best), ("GradientBoosting", GBC_best) ] nclassifier = 0 for row in range(nrows) : for col in range(ncols) : name = names_classifiers[nclassifier][0] classifier = names_classifiers[nclassifier][1] indices = np.argsort(classifier.feature_importances_)[::-1][:40] g = sns.barplot(y=X_train.columns[indices][:40], x= classifier.feature_importances_[indices][:40], orient = "h", ax=axes[row][col]) g.set_xlabel("Relative Importance", fontsize=12) g.set_ylabel("Feature", fontsize=12) g.tick_params(labelsize=9) g.set_title(name + " feature importance") nclassifier += 1 test_Survived_RFC = pd.Series(RFC_best.predict(test), name="RFC") test_Survived_ExtC = pd.Series(ExtC_best.predict(test), name="ExtC") test_Survived_SVMC = pd.Series(SVMC_best.predict(test), name="SVMC") test_Survived_AdaC = pd.Series(ada_best.predict(test), name="AdaC") test_Survived_GBC = pd.Series(GBC_best.predict(test), name="GBC") ensemble_results = pd.concat( [test_Survived_RFC, test_Survived_ExtC, test_Survived_AdaC, test_Survived_GBC, test_Survived_SVMC], axis=1 ) g = sns.heatmap(ensemble_results.corr(), annot=True) votingC = VotingClassifier(estimators = [ ("rfc", RFC_best), ("extc", ExtC_best), ("adac", ada_best), ("gbc", GBC_best)], voting="soft") votingC = votingC.fit(X_train, Y_train) test_Survived = pd.Series(votingC.predict(test), name="Survived") resutls = pd.concat([IDtest, test_Survived], axis=1) # results.to_csv("ensemble_python_voting.csv", index=False)
35.193613
165
0.650522
2,382
17,632
4.667926
0.175063
0.011512
0.014839
0.015109
0.303984
0.262973
0.187877
0.145517
0.108013
0.100639
0
0.019216
0.179503
17,632
500
166
35.264
0.749361
0.054957
0
0.102167
0
0
0.142702
0.00296
0
0
0
0
0
1
0.006192
false
0.006192
0.052632
0
0.065015
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9958f6192f0c624286a05b17462ac8310129e2ae
2,742
py
Python
simpleeval__examples__calc/basic/compound_types.py
DazEB2/SimplePyScripts
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
[ "CC-BY-4.0" ]
117
2015-12-18T07:18:27.000Z
2022-03-28T00:25:54.000Z
simpleeval__examples__calc/basic/compound_types.py
DazEB2/SimplePyScripts
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
[ "CC-BY-4.0" ]
8
2018-10-03T09:38:46.000Z
2021-12-13T19:51:09.000Z
simpleeval__examples__calc/basic/compound_types.py
DazEB2/SimplePyScripts
1dde0a42ba93fe89609855d6db8af1c63b1ab7cc
[ "CC-BY-4.0" ]
28
2016-08-02T17:43:47.000Z
2022-03-21T08:31:12.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = 'ipetrash' # SOURCE: https://github.com/danthedeckie/simpleeval/blob/master/README.rst#compound-types # """ # Compound types (dict, tuple, list, set) in general just work if you pass them in as named objects. # If you want to allow creation of these, the EvalWithCompoundTypes class works. Just replace any use of # SimpleEval with that. # """ # pip install simpleeval from simpleeval import simple_eval, SimpleEval, EvalWithCompoundTypes # SimpleEval and simple_eval NOT WORK with compound types try: print(simple_eval('[1, 2, 3, 4]')) except Exception as e: print(e) # Sorry, List is not available in this evaluator try: my_eval = SimpleEval() print(my_eval.eval('[1, 2, 3, 4]')) except Exception as e: print(e) # Sorry, List is not available in this evaluator print() # Compound Types my_compound_types_eval = EvalWithCompoundTypes() my_compound_types_eval.functions['len'] = len # list print(my_compound_types_eval.eval('[1, 2, 3, 4]')) # [1, 2, 3, 4] print(my_compound_types_eval.eval('[1, 2] + [3, 4]')) # [1, 2, 3, 4] print(my_compound_types_eval.eval('len([1, 2, 3, 4])')) # 4 print(my_compound_types_eval.eval('[1, 2, 1, 3, 4].count(1)')) # 2 print(my_compound_types_eval.eval('list("1234")')) # ['1', '2', '3', '4'] print() # dict print(my_compound_types_eval.eval('{"a": 1, "b": 999}')) # {'a': 1, 'b': 999} print(my_compound_types_eval.eval('{"a": 1, "b": 999}["b"]')) # 999 print(my_compound_types_eval.eval('{"a": 1, "b": 999}.items()')) # dict_items([('a', 1), ('b', 999)]) print(my_compound_types_eval.eval('len({"a": 1, "b": 999})')) # 2 print(my_compound_types_eval.eval('dict([("a", 1), ("b", 999)])')) # {'a': 1, 'b': 999} print() # tuple print(my_compound_types_eval.eval('(1, 2, 3, 4)')) # (1, 2, 3, 4) print(my_compound_types_eval.eval('(1, 2) + (3, 4)')) # (1, 2, 3, 4) print(my_compound_types_eval.eval('1, 2, 3, 4')) # (1, 2, 3, 4) print(my_compound_types_eval.eval('len((1, 2, 3, 4))')) # 4 print(my_compound_types_eval.eval('(1, 2, 1, 3, 4).count(1)')) # 2 print() # set print(my_compound_types_eval.eval('{1, 2, 3, 4}')) # {1, 2, 3, 4} print(my_compound_types_eval.eval('{1, 2, 1, 3, 1, 4, 3}')) # {1, 2, 3, 4} print(my_compound_types_eval.eval('[1, 2, 1, 3, 1, 4, 3]')) # [1, 2, 1, 3, 1, 4, 3] print(my_compound_types_eval.eval('set([1, 2, 1, 3, 1, 4, 3])')) # {1, 2, 3, 4} print(my_compound_types_eval.eval('{1, 1, 2}.union({3, 2, 4})')) # {1, 2, 3, 4} print(my_compound_types_eval.eval('{1, 1, 2}.intersection({3, 2, 4})')) # {2}
40.323529
104
0.598104
452
2,742
3.45354
0.185841
0.038437
0.221012
0.279949
0.599616
0.59385
0.57335
0.534914
0.534914
0.524023
0
0.081364
0.197666
2,742
67
105
40.925373
0.628182
0.299781
0
0.263158
0
0
0.239234
0
0
0
0
0
0
1
0
false
0
0.026316
0
0.026316
0.763158
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
3
9958fb1fe550f9459cfe99043f36afca01044db6
1,049
py
Python
demo/person/tests/project/domain/person/repository/test_physical_person_model.py
giovannifarlley/ms--fastapi-template
5bbd6903305db07cc18330ec86fb04ca518e9dab
[ "MIT" ]
24
2021-03-07T13:00:35.000Z
2022-02-11T03:41:51.000Z
demo/person/tests/project/domain/person/repository/test_physical_person_model.py
giovannifarlley/ms--fastapi-template
5bbd6903305db07cc18330ec86fb04ca518e9dab
[ "MIT" ]
2
2021-05-15T01:05:17.000Z
2021-08-13T13:53:57.000Z
demo/person/tests/project/domain/person/repository/test_physical_person_model.py
giovannifarlley/ms--fastapi-template
5bbd6903305db07cc18330ec86fb04ca518e9dab
[ "MIT" ]
4
2021-04-27T12:18:33.000Z
2021-10-03T23:43:23.000Z
from datetime import datetime from bson.objectid import ObjectId import pytest from project.domain.person.repository.physical_person import PhysicalPerson def test_instance_physical_person(): input_data = { "_id": ObjectId(), "status": "active", "name": "teste", "last_name": "teste", "age": 12, "birthdate": datetime.now(), "gender": "", "personal_document_id": "11122233344", "email": "teste@teste.com", "phone": "+5534988887777", } physical_person = PhysicalPerson(**input_data) assert input_data["_id"] == physical_person.dict()["id"] def test_instance_physical_person_errors(): with pytest.raises(ValueError): input_data = { "status": "", "name": "", "last_name": "", "age": -1, "birthdate": datetime.now(), "gender": "", "personal_document_id": "", "email": "", "phone": "", } PhysicalPerson(**input_data)
26.897436
75
0.551954
95
1,049
5.852632
0.442105
0.125899
0.053957
0.082734
0.26259
0.158273
0.158273
0
0
0
0
0.036585
0.296473
1,049
38
76
27.605263
0.716802
0
0
0.181818
0
0
0.188751
0
0
0
0
0
0.030303
1
0.060606
false
0
0.121212
0
0.181818
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
995a18380107d2a42827b6340d3c5bca73c8436d
2,202
py
Python
tests/api/v2/test_queries.py
droessmj/python-sdk
42ea2366d08ef5e4d1fa45029480b800352ab765
[ "MIT" ]
2
2020-09-08T20:42:05.000Z
2020-09-09T14:27:55.000Z
tests/api/v2/test_queries.py
droessmj/python-sdk
42ea2366d08ef5e4d1fa45029480b800352ab765
[ "MIT" ]
null
null
null
tests/api/v2/test_queries.py
droessmj/python-sdk
42ea2366d08ef5e4d1fa45029480b800352ab765
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Test suite for the community-developed Python SDK for interacting with Lacework APIs. """ import random import pytest from laceworksdk.api.v2.queries import QueriesAPI from tests.api.test_crud_endpoint import CrudEndpoint # Tests @pytest.fixture(scope="module") def api_object(api): return api.queries @pytest.fixture(scope="module") def api_object_create_body(random_text): return { "query_id": random_text, "query_text": f"""{random_text} {{ source {{CloudTrailRawEvents e}} filter {{EVENT_SOURCE = 'iam.amazonaws.com' AND EVENT:userIdentity.name::String NOT LIKE '%{random_text}'}} return distinct {{EVENT_NAME, EVENT}} }}""" } @pytest.fixture(scope="module") def api_object_update_body(random_text): return { "query_text": f"""{random_text} {{ source {{CloudTrailRawEvents e}} filter {{EVENT_SOURCE = 'iam.amazonaws.com' AND EVENT:userIdentity.name::String NOT LIKE '%{random_text}_updated'}} return distinct {{EVENT_NAME, EVENT}} }}""" } @pytest.fixture(scope="module") def query(api): queries = api.queries.get() queries = list(filter(lambda elem: elem["owner"] == "Lacework" and "LW_Global_AWS_CTA" in elem["queryId"], queries["data"])) query = random.choice(queries) return query class TestQueries(CrudEndpoint): OBJECT_ID_NAME = "queryId" OBJECT_TYPE = QueriesAPI def test_api_get_by_id(self, api_object): self._get_object_classifier_test(api_object, "id", self.OBJECT_ID_NAME) def test_queries_api_execute_by_id(self, api_object, query): start_time, end_time = self._get_start_end_times() response = api_object.execute_by_id( query_id=query["queryId"], arguments={ "StartTimeRange": start_time, "EndTimeRange": end_time, } ) assert "data" in response.keys() def test_queries_api_validate(self, api_object, query): response = api_object.validate(query_text=query["queryText"]) assert "data" in response.keys() def test_api_search(self): pass
28.597403
128
0.656676
268
2,202
5.141791
0.339552
0.058781
0.05225
0.069666
0.422351
0.365022
0.365022
0.261248
0.261248
0.261248
0
0.001168
0.222071
2,202
76
129
28.973684
0.803269
0.051771
0
0.307692
0
0.038462
0.309764
0.06253
0
0
0
0
0.038462
1
0.153846
false
0.019231
0.076923
0.057692
0.403846
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
995ce8cb055163c1151d7b483d731dd014f5c38e
9,058
py
Python
dataloader.py
AriaPs/TransparentDepth
c053b273be856cc9433fd5598a56b96d44ae910e
[ "MIT" ]
1
2021-05-16T19:40:58.000Z
2021-05-16T19:40:58.000Z
dataloader.py
AriaPs/TransparentDepth
c053b273be856cc9433fd5598a56b96d44ae910e
[ "MIT" ]
null
null
null
dataloader.py
AriaPs/TransparentDepth
c053b273be856cc9433fd5598a56b96d44ae910e
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import os import glob import sys from PIL import Image import Imath import numpy as np import torch import torch.nn as nn from torch.utils.data import Dataset from torchvision import transforms from imgaug import augmenters as iaa import imgaug as ia import imageio import cv2 from utils.utils import exr_loader, depthTensor2rgbTensor, depth2rgb class ClearGraspsDataset(Dataset): """ Dataset class for training model //TODO: DOC Args: input_dir (str): Path to folder containing the input images (.png format). transform (imgaug transforms): imgaug Transforms to be applied to the imgs """ def __init__( self, input_dir, depth_dir='', transform=None, input_only=None, outputImgWidth = 256, outputImgHeight = 256, ): super().__init__() self.images_dir = input_dir self.depth_dir = depth_dir self.transform = transform self.input_only = input_only # Create list of filenames self._datalist_input = [] # Variable containing list of all input images filenames in dataset self._datalist_depth = [] self._extension_input = ['-rgb.jpg'] # The file extension of input images self._extension_depth = ['-depth-rectified.exr'] self._create_lists_filenames(self.images_dir, self.depth_dir) self.outputImgWidth = outputImgWidth self.outputImgHeight = outputImgHeight def __len__(self): return len(self._datalist_input) def __getitem__(self, index): '''Returns an item from the dataset at the given index. If no depths directory has been specified, then a tensor of zeroes will be returned as the depth. Args: index (int): index of the item required from dataset. Returns: torch.Tensor: Tensor of input image torch.Tensor: Tensor of depth (Tensor of zeroes is depth_dir is "" or None) ''' # Open input imgs image_path = self._datalist_input[index] _img = Image.open(image_path).convert('RGB') _img = np.array(_img) # Open depths if self.depth_dir: depth_path = self._datalist_depth[index] _depth = exr_loader(depth_path, ndim=1) #_depth = cv2.resize(_depth, (self.outputImgWidth, self.outputImgHeight), interpolation=cv2.INTER_NEAREST) _depth[np.isnan(_depth)] = 0 _depth[np.isinf(_depth)] = 0 _depth = np.expand_dims(_depth, axis=0) # Apply image augmentations and convert to Tensor if self.transform: det_tf = self.transform.to_deterministic() _img = det_tf.augment_image(_img.copy()) if self.depth_dir: # Making all values of invalid pixels marked as -1.0 to 0. # In raw data, invalid pixels are marked as (-1, -1, -1) so that on conversion to RGB they appear black. mask = np.all(_depth == -1.0, axis=0) _depth[:, mask] = 0.0 _depth = _depth.transpose((1, 2, 0)) # To Shape: (H, W, 3) _depth = det_tf.augment_image(_depth, hooks=ia.HooksImages(activator=self._activator_masks)) _depth = _depth.transpose((2, 0, 1)) # To Shape: (3, H, W) # Return Tensors _img_tensor = transforms.ToTensor()(_img.copy()) if self.depth_dir: _depth_tensor = torch.from_numpy(_depth.copy()) #_depth_tensor = nn.functional.normalize(_depth_tensor, p=2, dim=0) else: _depth_tensor = torch.zeros((3, _img_tensor.shape[1], _img_tensor.shape[2]), dtype=torch.float32) return _img_tensor, _depth_tensor def _create_lists_filenames(self, images_dir, depth_dir): '''Creates a list of filenames of images and depths each in dataset The depth at index N will match the image at index N. Args: images_dir (str): Path to the dir where images are stored depth_dir (str): Path to the dir where depths are stored Raises: ValueError: If the given directories are invalid ValueError: No images were found in given directory ValueError: Number of images and depths do not match ''' assert os.path.isdir(images_dir), 'Dataloader given images directory that does not exist: "%s"' % (images_dir) for ext in self._extension_input: imageSearchStr = os.path.join(images_dir, '*' + ext) imagepaths = sorted(glob.glob(imageSearchStr)) self._datalist_input = self._datalist_input + imagepaths numImages = len(self._datalist_input) if numImages == 0: raise ValueError('No images found in given directory. Searched in dir: {} '.format(images_dir)) if depth_dir: assert os.path.isdir(depth_dir), ('Dataloader given depths directory that does not exist: "%s"' % (depth_dir)) for ext in self._extension_depth: depthSearchStr = os.path.join(depth_dir, '*' + ext) depthpaths = sorted(glob.glob(depthSearchStr)) self._datalist_depth = self._datalist_depth + depthpaths numdepths = len(self._datalist_depth) if numdepths == 0: raise ValueError('No depths found in given directory. Searched for {}'.format(imageSearchStr)) if numImages != numdepths: raise ValueError('The number of images and depths do not match. Please check data,' + 'found {} images and {} depths in dirs:\n'.format(numImages, numdepths) + 'images: {}\ndepths: {}\n'.format(images_dir, depth_dir)) def _activator_masks(self, images, augmenter, parents, default): '''Used with imgaug to help only apply some augmentations to images and not depths Eg: Blur is applied to input only, not depth. However, resize is applied to both. ''' if self.input_only and augmenter.name in self.input_only: return False else: return default if __name__ == '__main__': import matplotlib.pyplot as plt from torch.utils.data import DataLoader from torchvision import transforms import torchvision import imageio # Example Augmentations using imgaug imsize = 512 augs_train = iaa.Sequential([ # Geometric Augs iaa.Scale((imsize, imsize), 0), # Resize image iaa.Fliplr(0.5), iaa.Flipud(0.5), iaa.Rot90((0, 4)), # Blur and Noise iaa.Sometimes(0.2, iaa.GaussianBlur(sigma=(0, 1.5), name="gaus-blur")), iaa.Sometimes(0.1, iaa.Grayscale(alpha=(0.0, 1.0), from_colorspace="RGB", name="grayscale")), iaa.Sometimes(0.2, iaa.AdditiveLaplaceNoise(scale=(0, 0.1*255), per_channel=True, name="gaus-noise")), # Color, Contrast, etc. iaa.Sometimes(0.2, iaa.Multiply((0.75, 1.25), per_channel=0.1, name="brightness")), iaa.Sometimes(0.2, iaa.GammaContrast((0.7, 1.3), per_channel=0.1, name="contrast")), iaa.Sometimes(0.2, iaa.AddToHueAndSaturation((-20, 20), name="hue-sat")), iaa.Sometimes(0.3, iaa.Add((-20, 20), per_channel=0.5, name="color-jitter")), ]) # augs_test = iaa.Sequential([ # # Geometric Augs # iaa.Scale((imsize, imsize), 0), # ]) min = 0.1 max = 1.5 augs = augs_train input_only = ["gaus-blur", "grayscale", "gaus-noise", "brightness", "contrast", "hue-sat", "color-jitter"] db_test = ClearGraspsDataset(input_dir='./data/train/rgb-imgs', depth_dir='./data/train/depth-imgs-rectified', transform=augs, input_only=input_only) batch_size = 4 testloader = DataLoader(db_test, batch_size=batch_size, shuffle=True, num_workers=1, drop_last=True) # Show 1 Shuffled Batch of Images for ii, batch in enumerate(testloader): # Get Batch img, depth = batch print('image shape, type: ', img.shape, img.dtype) print('depth shape, type: ', depth.shape, depth.dtype) # Show Batch im_vis1 = torchvision.utils.make_grid(img, nrow=batch_size // 4, padding=2, normalize=True, scale_each=True) plt.imshow(im_vis1.numpy().transpose(1, 2, 0)) plt.show() im_vis2 = torchvision.utils.make_grid(depthTensor2rgbTensor(depth), nrow=batch_size // 4, padding=2, normalize=True, scale_each=True) plt.imshow(im_vis2.numpy().transpose(1, 2, 0)) plt.show() break
40.4375
142
0.598256
1,109
9,058
4.721371
0.25789
0.024446
0.01738
0.013369
0.157945
0.110772
0.071047
0.053094
0.040489
0.022536
0
0.020592
0.303047
9,058
223
143
40.618834
0.808807
0.220137
0
0.082707
0
0
0.093505
0.008157
0
0
0
0.004484
0.015038
1
0.037594
false
0
0.150376
0.007519
0.225564
0.015038
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
995fa5032672d255714e214da117b6d2228beb50
596
py
Python
django/settings/wsgi.py
radifar/django-vue-graphql-demo
bae75dacf79b5da47a49b02a275a5795cdc5b1de
[ "MIT" ]
null
null
null
django/settings/wsgi.py
radifar/django-vue-graphql-demo
bae75dacf79b5da47a49b02a275a5795cdc5b1de
[ "MIT" ]
4
2021-03-19T10:42:08.000Z
2021-06-10T20:11:15.000Z
django/settings/wsgi.py
radifar/django-vue-graphql-demo
bae75dacf79b5da47a49b02a275a5795cdc5b1de
[ "MIT" ]
null
null
null
""" WSGI config for django_vue project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ """ import os from dotenv import load_dotenv PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) project_folder = os.path.expanduser(PROJECT_DIR) load_dotenv(os.path.join(PROJECT_DIR, '.env')) from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings.settings') application = get_wsgi_application()
28.380952
78
0.791946
89
596
5.11236
0.550562
0.065934
0.057143
0.065934
0.07033
0
0
0
0
0
0
0.003697
0.092282
596
20
79
29.8
0.837338
0.362416
0
0
0
0
0.115591
0.05914
0
0
0
0
0
1
0
false
0
0.375
0
0.375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
996276e35632e5351e97d09b376c527fbb524a3d
1,409
py
Python
alpa/torch/optim/adam.py
TarzanZhao/alpa
c477805bfb182788ed07e8b3a8e2924333433507
[ "Apache-2.0" ]
null
null
null
alpa/torch/optim/adam.py
TarzanZhao/alpa
c477805bfb182788ed07e8b3a8e2924333433507
[ "Apache-2.0" ]
null
null
null
alpa/torch/optim/adam.py
TarzanZhao/alpa
c477805bfb182788ed07e8b3a8e2924333433507
[ "Apache-2.0" ]
null
null
null
"""Adam optimizer""" import copy import torch def adam(lr=1e-4): """torchoptim.adam(**adam_config)(params) Factory that generates functional version of Adam optimizer. Implementation has no in-place op and no data-dependent control flow. Returns: - `optim_func`: a function that: - takes (`params`, `optim_state`, `params_grad`) as input - returns (`params`, `optim_state`) after applying Adam algorithm - `optim_state_init_func`: a function that: - takes `optim_state` as input - returns `optim_state` which is Adam optimizer state - `optim_state`: tracked state (shape-only) of Adam optimizer. """ # TODO FIXME: properly implement Adam optimizer def optim_gen(params): def optim_func(params, optim_state, params_grad): for k in params: params[k] = params[k] + params_grad[k] * lr optim_state[k] = optim_state[k] + 1 return params, optim_state optim_state = copy.deepcopy(params) def optim_state_init_func(optim_state): new_state = {} for k, v in optim_state.items(): new_state[k] = torch.full_like(v, 0.0) return new_state return optim_func, optim_state_init_func, optim_state return optim_gen
32.022727
77
0.596167
173
1,409
4.653179
0.369942
0.198758
0.079503
0.067081
0.18882
0.069565
0
0
0
0
0
0.005187
0.315827
1,409
43
78
32.767442
0.829876
0.457062
0
0
1
0
0
0
0
0
0
0.023256
0
1
0.235294
false
0
0.117647
0
0.588235
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
1
0
0
1
0
0
0
0
1
0
0
4
9962f81178525ce273dd05b72036d4af806539c0
2,122
py
Python
graph_plots/fwidgets/f_icon_label.py
DanShai/kivy-graph
6537901d521247a13e186aaa8ecbaffdffdaf7ea
[ "MIT" ]
3
2018-11-28T13:35:35.000Z
2021-09-12T15:54:28.000Z
graph_plots/fwidgets/f_icon_label.py
DanShai/kivy-graph
6537901d521247a13e186aaa8ecbaffdffdaf7ea
[ "MIT" ]
null
null
null
graph_plots/fwidgets/f_icon_label.py
DanShai/kivy-graph
6537901d521247a13e186aaa8ecbaffdffdaf7ea
[ "MIT" ]
1
2021-05-03T18:48:01.000Z
2021-05-03T18:48:01.000Z
''' @author: dan ''' from f_widget import FWidget from kivy.uix.label import Label from kivy.properties import ListProperty, NumericProperty, StringProperty, BooleanProperty, ObjectProperty from kivy.uix.button import Button from kivy.lang import Builder from f_button import FButton from utils import get_icon_char, get_rgba_color from f_scalable import ScalableBehaviour Builder.load_string(''' <FIconLabel>: Label: id: licon font_name: './graph_plots/fwidgets/data/font/fontawesome-webfont.ttf' pos: root.pos size: root.size font_size: root.font_size text: root.get_icon(root.icon) if root.icon else '' color: root.get_color(root.txt_color) ''') class FIconLabel(Button, FWidget, ScalableBehaviour): icon = StringProperty('') get_icon = ObjectProperty(get_icon_char) txt_color = ListProperty(['Orange', '100']) n_txt_color = ListProperty(['Orange', '100']) d_txt_color = ListProperty(['Orange', '400']) def __init__(self, **kwargs): super(FIconLabel, self).__init__(**kwargs) self.get_icon = get_icon_char self.background_color = (1, 1, 1, 0) self.markup = True self.halign = 'center' self.valign = 'middle' self.color = self.get_color(self.txt_color) self.size_hint = 1, 1 self.font_size = self.height * .8 self.p_width = 0 self.txt_color = self.n_txt_color def on_txt_color(self, widget, txt_color): widget.color = self.get_color(txt_color) widget.ids.licon.color = self.get_color(txt_color) def on_size(self, widget, size): self.size = size self.font_size = self.height * .8 def on_touch_down(self, touch): if self.collide_point(touch.x, touch.y): self.txt_color = self.d_txt_color return super(FIconLabel, self).on_touch_down(touch) def on_touch_up(self, touch): if self.collide_point(touch.x, touch.y): self.txt_color = self.n_txt_color return super(FIconLabel, self).on_touch_up(touch)
27.921053
106
0.65787
285
2,122
4.659649
0.294737
0.090361
0.045181
0.048193
0.278614
0.23494
0.162651
0.143072
0.082831
0.082831
0
0.011091
0.235156
2,122
75
107
28.293333
0.807147
0.005655
0
0.117647
0
0
0.182381
0.053333
0
0
0
0
0
1
0.098039
false
0
0.156863
0
0.411765
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99641347f295a7ff5a40d732751c3697679eb9d0
1,498
py
Python
openquake/qa_tests_data/classical/case_20/__init__.py
gfzriesgos/shakyground-lfs
2caf67cc32e6800286eded2df1efb05973ccf41b
[ "BSD-3-Clause" ]
1
2019-08-01T00:28:24.000Z
2019-08-01T00:28:24.000Z
openquake/qa_tests_data/classical/case_20/__init__.py
gfzriesgos/shakyground-lfs
2caf67cc32e6800286eded2df1efb05973ccf41b
[ "BSD-3-Clause" ]
4
2018-08-31T14:14:35.000Z
2021-10-11T12:53:13.000Z
openquake/qa_tests_data/classical/case_20/__init__.py
gfzriesgos/shakyground-lfs
2caf67cc32e6800286eded2df1efb05973ccf41b
[ "BSD-3-Clause" ]
3
2018-08-31T14:11:00.000Z
2019-07-17T10:06:02.000Z
# -*- coding: utf-8 -*- # vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright (C) 2015-2018 GEM Foundation # # OpenQuake is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # OpenQuake is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with OpenQuake. If not, see <http://www.gnu.org/licenses/>. """ This is an example with a source model logic tree containing uncertainties on the fault geometry. The GMPE is fixed as the Sadigh et al. 1997 model. The source model contains three faults SFLT1 (simple fault), COMFLT1 (complex fault) and CHAR1 (characteristic fault). Two geometries are defined for SFLT1 (sg1, sg2), two for COMFLT1 (cog1, cog2) and three for CHAR1 (char_simple, char_complex, char_planar) 12 curves output: *_sg1_cog1_char_simple-* *_sg1_cog1_char_complex-* *_sg1_cog1_char_planar-* *_sg1_cog2_char_simple-* *_sg1_cog2_char_complex-* *_sg1_cog2_char_planar-* *_sg2_cog1_char_simple-* *_sg2_cog1_char_complex-* *_sg2_cog1_char_planar-* *_sg2_cog2_char_simple-* *_sg2_cog2_char_complex-* *_sg2_cog2_char_planar-* """
35.666667
77
0.777704
237
1,498
4.700422
0.49789
0.043088
0.032316
0.051167
0.089767
0.089767
0.061041
0
0
0
0
0.041245
0.14219
1,498
41
78
36.536585
0.825681
0.971295
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
2
996441264a0388e222c0662b66693e392b112c8f
5,368
py
Python
yardstick/network_services/vnf_generic/vnf/iniparser.py
mythwm/yardstick-wm
319ced11df92456b42c80cfd6e53c66dbd22a746
[ "Apache-2.0" ]
1
2019-12-08T21:57:31.000Z
2019-12-08T21:57:31.000Z
yardstick/network_services/vnf_generic/vnf/iniparser.py
mythwm/yardstick-wm
319ced11df92456b42c80cfd6e53c66dbd22a746
[ "Apache-2.0" ]
null
null
null
yardstick/network_services/vnf_generic/vnf/iniparser.py
mythwm/yardstick-wm
319ced11df92456b42c80cfd6e53c66dbd22a746
[ "Apache-2.0" ]
null
null
null
# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class ParseError(Exception): def __init__(self, message, line_no, line): self.msg = message self.line = line self.line_no = line_no def __str__(self): return 'at line %d, %s: %r' % (self.line_no, self.msg, self.line) class BaseParser(object): PARSE_EXC = ParseError def __init__(self): super(BaseParser, self).__init__() self.line_no = 0 def _assignment(self, key, value): self.assignment(key, value) return None, [] def _get_section(self, line): if not line.endswith(']'): return self.error_no_section_end_bracket(line) if len(line) <= 2: return self.error_no_section_name(line) return line[1:-1] def _split_key_value(self, line): colon = line.find(':') equal = line.find('=') if colon < 0 and equal < 0: return self.error_invalid_assignment(line) if colon < 0 or (0 <= equal < colon): key, value = line[:equal], line[equal + 1:] else: key, value = line[:colon], line[colon + 1:] value = value.strip() if value and value[0] == value[-1] and value.startswith(("\"", "'")): value = value[1:-1] return key.strip(), [value] def _single_line_parse(self, line, key, value): self.line_no += 1 if line.startswith(('#', ';')): self.comment(line[1:].strip()) return key, value active, _, comment = line.partition(';') self.comment(comment.strip()) if not active: # Blank line, ends multi-line values if key: key, value = self._assignment(key, value) return key, value if active.startswith((' ', '\t')): # Continuation of previous assignment if key is None: return self.error_unexpected_continuation(line) value.append(active.lstrip()) return key, value if key: # Flush previous assignment, if any key, value = self._assignment(key, value) if active.startswith('['): # Section start section = self._get_section(active) if section: self.new_section(section) else: key, value = self._split_key_value(active) if not key: return self.error_empty_key(line) return key, value def parse(self, line_iter=None): if line_iter is None: return key = None value = [] for line in line_iter: key, value = self._single_line_parse(line, key, value) if key: # Flush previous assignment, if any self._assignment(key, value) def assignment(self, key, value): """Called when a full assignment is parsed.""" raise NotImplementedError() def new_section(self, section): """Called when a new section is started.""" raise NotImplementedError() def comment(self, comment): """Called when a comment is parsed.""" pass def make_parser_error(self, template, line): raise self.PARSE_EXC(template, self.line_no, line) def error_invalid_assignment(self, line): self.make_parser_error("No ':' or '=' found in assignment", line) def error_empty_key(self, line): self.make_parser_error('Key cannot be empty', line) def error_unexpected_continuation(self, line): self.make_parser_error('Unexpected continuation line', line) def error_no_section_end_bracket(self, line): self.make_parser_error('Invalid section (must end with ])', line) def error_no_section_name(self, line): self.make_parser_error('Empty section name', line) class ConfigParser(BaseParser): """Parses a single config file, populating 'sections' to look like: {'DEFAULT': {'key': [value, ...], ...}, ...} """ def __init__(self, filename, sections): super(ConfigParser, self).__init__() self.filename = filename self.sections = sections self.section = None def parse(self, line_iter=None): with open(self.filename) as f: return super(ConfigParser, self).parse(f) def new_section(self, section): self.section = section self.sections.setdefault(self.section, []) def assignment(self, key, value): if not self.section: raise self.error_no_section() value = '\n'.join(value) self.sections[self.section].append([key, value]) def error_no_section(self): self.make_parser_error('Section must be started before assignment', '')
30.157303
79
0.600037
657
5,368
4.74277
0.234399
0.05905
0.026958
0.036585
0.204108
0.117779
0.049422
0.026316
0.026316
0
0
0.006307
0.29117
5,368
177
80
30.327684
0.812615
0.179396
0
0.17757
0
0
0.04717
0
0
0
0
0
0
1
0.205607
false
0.009346
0
0.009346
0.383178
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
9964cf19b644bec6e8da1584aa1fc11f4f7d82fe
245
py
Python
6_json_to_csv.py
pLINaROF/income_of_russian_deputies
6c27fe968825aa2131b613da7265364a03e47397
[ "MIT" ]
null
null
null
6_json_to_csv.py
pLINaROF/income_of_russian_deputies
6c27fe968825aa2131b613da7265364a03e47397
[ "MIT" ]
null
null
null
6_json_to_csv.py
pLINaROF/income_of_russian_deputies
6c27fe968825aa2131b613da7265364a03e47397
[ "MIT" ]
null
null
null
import pandas df = pandas.read_json('data_with_income_rub.json') df.to_csv('data_with_income_rub.csv', index=False) df = pandas.read_json('data_with_income_rub_from_csv.json') df.to_csv('data_with_income_rub_from_csv.csv', index=False)
30.625
60
0.791837
44
245
3.954545
0.318182
0.183908
0.321839
0.390805
0.781609
0.781609
0.701149
0.701149
0
0
0
0
0.085714
245
7
61
35
0.776786
0
0
0
0
0
0.487395
0.487395
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0
0
0
0
null
0
1
1
0
1
1
1
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
99659d8149cb29fabf4d67b97757ba247c6d49a2
1,767
py
Python
tests/tests/test_settings.py
washwash/django-rest-tfa
b1b77a2220824297a471b0cfa4529e1d6fba7dfe
[ "MIT" ]
1
2019-10-11T15:25:52.000Z
2019-10-11T15:25:52.000Z
tests/tests/test_settings.py
washwash/django-rest-tfa
b1b77a2220824297a471b0cfa4529e1d6fba7dfe
[ "MIT" ]
null
null
null
tests/tests/test_settings.py
washwash/django-rest-tfa
b1b77a2220824297a471b0cfa4529e1d6fba7dfe
[ "MIT" ]
null
null
null
import mock from datetime import datetime, timedelta from django.test import override_settings from freezegun import freeze_time from django.urls import reverse from rest_framework import status from tests.tests.test_api import get_token_from_email @mock.patch('django.core.mail.outbox', new_callable=list) @override_settings(TFA_TOKEN_AGE='20') def test_token_expire(outbox, user_logged_client): user_logged_client.post( '/auth/login/', data={ 'username': 'user', 'password': 'password', } ) user_logged_client.post(reverse('tfa_create_challenge'), data={'type': 'email'}) token = get_token_from_email(outbox.pop()) date = datetime.now() + timedelta(seconds=30) with freeze_time(date): url = reverse('tfa_accept_challenge') response = user_logged_client.post(url, data={'token': token}) assert response.status_code == status.HTTP_400_BAD_REQUEST @mock.patch('django.core.mail.outbox', new_callable=list) @override_settings(TFA_CLIENT_AGE='20') def test_client_expire(outbox, user_logged_client): user_logged_client.post( '/auth/login/', data={ 'username': 'user', 'password': 'password', } ) user_logged_client.post(reverse('tfa_create_challenge'), data={'type': 'email'}) token = get_token_from_email(outbox.pop()) user_logged_client.post(reverse('tfa_accept_challenge'), data={'token': token}) response = user_logged_client.get('/dummy/') assert response.status_code == status.HTTP_200_OK date = datetime.now() + timedelta(seconds=30) with freeze_time(date): response = user_logged_client.get('/dummy/') assert response.status_code == status.HTTP_303_SEE_OTHER
34.647059
84
0.697793
225
1,767
5.186667
0.297778
0.08569
0.137104
0.102828
0.66838
0.66838
0.613539
0.613539
0.613539
0.613539
0
0.011708
0.178268
1,767
50
85
35.34
0.792011
0
0
0.511628
0
0
0.142615
0.026033
0
0
0
0
0.069767
1
0.046512
false
0.046512
0.162791
0
0.209302
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
9965fa5ba34897ab740dab94e3003731240a9286
3,131
py
Python
zorin/report.py
warnerpr/zorin
7f41029a229fd0589460a76514afd42122e92aed
[ "MIT" ]
1
2015-07-14T11:57:27.000Z
2015-07-14T11:57:27.000Z
zorin/report.py
warnerpr/zorin
7f41029a229fd0589460a76514afd42122e92aed
[ "MIT" ]
null
null
null
zorin/report.py
warnerpr/zorin
7f41029a229fd0589460a76514afd42122e92aed
[ "MIT" ]
null
null
null
import sys import json from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.exc import IntegrityError from sqlalchemy import Column, Integer, String, Table, PrimaryKeyConstraint, create_engine # engine = create_engine('sqlite:////tmp/zorin.db') engine = create_engine('sqlite:///:memory:') Base = declarative_base() Session = sessionmaker(bind=engine) visitors = Table('visitors', Base.metadata, Column('site_id', Integer), Column('name', String), PrimaryKeyConstraint("site_id", "name") ) class Visitor(Base): __table__ = visitors Base.metadata.create_all(engine) class Site(object): def __init__(self): self.op_events = {} self.chats = set() self.emails = set() self.operators = set() def add_operator_event(self, ts, op, state): self.op_events[op] = sorted(set(self.op_events.get(op, []) + [(ts, state)])) self.operators.add(op) def get_state(self, time_stamp): states = [] for op, events in self.op_events.items(): prev_state = 'offline' for ts, state in events: if ts > time_stamp: break prev_state = state states.append(prev_state) return 'online' if 'online' in states else 'offline' def add_chat(self, time_stamp, visitor, site_id): if time_stamp in self.chats or time_stamp in self.emails: return state = self.get_state(time_stamp) if state == 'online': self.chats.add(time_stamp) else: self.emails.add(time_stamp) visitor = Visitor(site_id=site_id, name=visitor) session = Session() session.add(visitor) try: session.commit() except IntegrityError: pass def report(self, site_id): session = Session() visitors = session.query(Visitor).filter(Visitor.site_id == site_id).all() print "{site_id},messages={messages},emails={emails},operators={operators},visitors={visitors}".format( site_id=site_id, messages=len(self.chats), emails=len(self.emails), operators=len(self.operators), visitors=len(visitors)) def main(): fname = sys.argv[1] sites = {} with open(fname) as f: for line in f.readlines(): data = json.loads(line) site_id = data['site_id'] site = sites.setdefault(site_id, Site()) if data['type'] == 'status': site.add_operator_event(data['timestamp'], data['from'], data['data']['status']) with open(fname) as f: for line in f.readlines(): data = json.loads(line.strip()) site_id = data['site_id'] site = sites[site_id] if data['type'] == 'message': site.add_chat(data['timestamp'], data['from'], site_id) for site_id, site in sorted(sites.items(), key=lambda _e: _e[0]): site.report(site_id) if __name__ == '__main__': main()
30.105769
111
0.595018
377
3,131
4.763926
0.267905
0.066815
0.038976
0.020045
0.106904
0.085746
0.085746
0.057906
0.057906
0.057906
0
0.000886
0.279144
3,131
103
112
30.398058
0.79486
0.01565
0
0.102564
0
0
0.07987
0.028247
0
0
0
0
0
0
null
null
0.012821
0.076923
null
null
0.012821
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
9967cec318291035a6b99a56b195699b1cec987a
4,766
py
Python
holybible.py
DPS0340/holybible.py
ee6b4d6da7b21f44a6d3e7fc8973cf186f7c1109
[ "MIT" ]
null
null
null
holybible.py
DPS0340/holybible.py
ee6b4d6da7b21f44a6d3e7fc8973cf186f7c1109
[ "MIT" ]
null
null
null
holybible.py
DPS0340/holybible.py
ee6b4d6da7b21f44a6d3e7fc8973cf186f7c1109
[ "MIT" ]
null
null
null
# 이지호 작성 # # 공동번역 성서의 저작권은 모두 저작권자에게 있습니다. # import sys import re import random end = "끝났습니다." error = "오류입니다." def run(): short = ['Gen', 'Exo', 'Lev', 'Num', 'Deu', 'Jos', 'Jdg', 'Rth', '1Sa', '2Sa', '1Ki', '2Ki', '1Ch', '2Ch', 'Ezr', 'Neh', 'Est', 'Job', 'Psa', 'Pro', 'Ecc', 'Sol', 'Isa', 'Jer', 'Eze', 'Dan', 'Amo', 'Oba', 'Jon', 'Mic', 'Nah', 'Hab', 'Zep', 'Hag', 'Zec', 'Mar', 'Luk', 'Joh', 'Act', 'Rom', '1Co', '2Co', 'Gal', 'Eph', 'Phi', 'Col', '1Th', '2Th', '1Ti', '2Ti', 'Tit', 'Phm', 'Heb', 'Jam', '1Pe', '2Pe', '1Jo', '2Jo', '3Jo', 'Jod', 'Rev'] bookname = ['창세기', '출애굽기', '레위기', '민수기', '신명기', '여호수아', '판관기', '룻기', '사무엘상', '사무엘하', '열왕기상', '열왕기하', '역대기상', '역대기하', '에즈라', '느헤미야', '에스델', '욥기', '시편', '잠언', '전도서', '아가', '이사야', '에레미야', '애가', '에제키엘', '다니엘', '호에샤', '요엘', '아모스', '오바디야', '오냐', '미가', '나훔', '하바꾹', '스바니야', '하깨', '즈가리야', '말라기', '마태오의 복음서', '마르코의 복음서', '루가의 복음서', '요한의 복음서', '사도행전', '로마인에게 보낸 편지', '고린토인에게 보낸 첫째 편지', '고랜토인에게 보낸 둘째 편지', '갈라디아인에게 보낸 편지', '에페소인에게 보낸 편지', '필립비인들에게 보낸 편지', '골로사이인들에게 보낸 편지', '델살로니카인들에게 보낸 첫째 편지', '데살로니카인들에게 보낸 둘째 편지', '디모테오에게 보낸 첫째 편지', '디도에게 보낸 편지', '필레몬에게 보낸 편지', '히브리인들에게 보낸 편지', '야고보의 편지', '베드로의 첫째 편지', '베드로의 둘째 편지', '요한의 첫째 편지', '요한의 둘째 편지', '요한의 세째 편지', '유다의 편지', '요한의 묵시록'] global selectbookname global k global line global number for i in range(len(short)): book = bookname[i] say = ("[%d] " % (i + 1)) print(say + book, end=" ") if i % 5 == 0: print(''' ''') if i == (len(short) - 1) and (i % 5) != 0: for p in range((len(short) - 1) % 5): print(say + book, end=" ") print("선택하실 책 번호를 선택하세요.") number = int(input()) selectbookname = short[int(number - 1)] print(selectbookname) print(''' [1] 성경 scrapper [2] 장 선택해서 읽기 [3] 줄 선택해서 읽기 [4] 성경 리더 [5] 랜덤 줄 출력(모든 경전) 무엇을 선택하시겠습니까?''') choice = int(input()) if choice == 1: # 성경 scrapper lines = '' anypnl = re.compile("\d:\d") while True: with open('공동번역.txt', 'r')as a: line = a.readline() checker = line.find('%s %s' % (selectbookname, anypnl)) if not checker == -1: lines += line if line is False: break with open('result.txt', 'w') as b: b.write(lines) if choice == 2: # 장 리더 page = '' print('''몇 장입니까?''') k = int(input()) with open('공동번역.txt', 'r')as a: while True: line = a.readline() checker = line.find('%s %d' % (selectbookname, k)) closer = line.find('%s %d' % (selectbookname, k+1)) if checker != -1: page += '%s\n' % line if closer != -1: break if not line: break print("\n" * 5) print(page) if choice == 3: # 줄 리더 print('''몇 장 입니까?''') page = input() print('''몇 줄 입니까?''') line = input() with open('공동번역.txt', 'r') as a: while True: linesearcher = a.readline() linechecker = linesearcher.find("%s %s:%s" % (selectbookname, page, line)) if linechecker != -1: break if linesearcher == False: break print(linesearcher) if choice == 4: # 성경 리더 page = '' print('''몇 장부터 보시겠습니까?''') k = int(input()) with open('공동번역.txt', 'r')as a: while True: line = a.readline() checker = line.find('%s %d' % (selectbookname, k)) closer = line.find('%s %d' % (selectbookname, k+1)) if checker != -1: page += '%s\n' % line if closer != -1: print(page) print('''다음 장을 보려면 엔터를 눌러주세요. 다른 값을 입력하시면 종료됩니다.''') k += 1 select = input() if select == '': continue else: break if choice == 5: # 랜덤 줄 with open('공동번역.txt', 'r') as a: alllines = a.readlines() print(random.choice(alllines)) if choice not in [1, 2, 3, 4, 5]: print(error) sys.exit() run()
36.381679
118
0.402854
550
4,766
3.490909
0.434545
0.016667
0.03125
0.039063
0.201042
0.201042
0.201042
0.166146
0.166146
0.166146
0
0.018378
0.417751
4,766
130
119
36.661538
0.673514
0.014897
0
0.333333
0
0
0.206632
0
0
0
0
0
0
1
0.008333
false
0
0.025
0
0.033333
0.141667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9969702168eaa2a803a9b22907df963ef3cbab6f
143
py
Python
mail_alias_manager/api/v1_api/__init__.py
stuvusIT/mail_alias_manager
260b6d1da4db03079afee159c23c3f83f4e75937
[ "MIT" ]
null
null
null
mail_alias_manager/api/v1_api/__init__.py
stuvusIT/mail_alias_manager
260b6d1da4db03079afee159c23c3f83f4e75937
[ "MIT" ]
null
null
null
mail_alias_manager/api/v1_api/__init__.py
stuvusIT/mail_alias_manager
260b6d1da4db03079afee159c23c3f83f4e75937
[ "MIT" ]
null
null
null
"""Module containing the v1 API.""" from .root import API_V1 # noqa from . import recipient_alias # noqa from . import sender_alias # noqa
23.833333
37
0.72028
21
143
4.761905
0.571429
0.16
0.28
0
0
0
0
0
0
0
0
0.017241
0.188811
143
5
38
28.6
0.844828
0.314685
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
996b4a5c2c888f7934622bcd04f25b88299bc8b1
1,784
py
Python
server/cosmic_pairing_api/mocks/signs.py
lingtran/cosmic_pairing
df25b3873cc4fb871526eb5187070596b8b11ea2
[ "MIT" ]
null
null
null
server/cosmic_pairing_api/mocks/signs.py
lingtran/cosmic_pairing
df25b3873cc4fb871526eb5187070596b8b11ea2
[ "MIT" ]
19
2020-07-31T03:06:14.000Z
2022-03-10T05:03:04.000Z
server/cosmic_pairing_api/mocks/signs.py
lingtran/cosmic_pairing
df25b3873cc4fb871526eb5187070596b8b11ea2
[ "MIT" ]
null
null
null
from cosmic_pairing_api.utils.enums.element import Element from cosmic_pairing_api.utils.enums.modality import Modality SIGNS = [ { "id": 1, "name": "aries", "modality": Modality.CARDINAL.name, "element": Element.FIRE.name, }, { "id": 2, "name": "taurus", "modality": Modality.FIXED.name, "element": Element.EARTH.name, }, { "id": 3, "name": "gemini", "modality": Modality.MUTABLE.name, "element": Element.AIR.name, }, { "id": 4, "name": "cancer", "modality": Modality.CARDINAL.name, "element": Element.WATER.name, }, { "id": 5, "name": "leo", "modality": Modality.FIXED.name, "element": Element.FIRE.name, }, { "id": 6, "name": "virgo", "modality": Modality.MUTABLE.name, "element": Element.EARTH.name, }, { "id": 7, "name": "libra", "modality": Modality.CARDINAL.name, "element": Element.AIR.name, }, { "id": 8, "name": "scorpio", "modality": Modality.FIXED.name, "element": Element.WATER.name, }, { "id": 9, "name": "sagittarius", "modality": Modality.MUTABLE.name, "element": Element.FIRE.name, }, { "id": 10, "name": "capricorn", "modality": Modality.CARDINAL.name, "element": Element.EARTH.name, }, { "id": 11, "name": "aquarius", "modality": Modality.FIXED.name, "element": Element.AIR.name, }, { "id": 12, "name": "pisces", "modality": Modality.MUTABLE.name, "element": Element.WATER.name, }, ]
22.871795
60
0.483184
163
1,784
5.263804
0.263804
0.223776
0.251748
0.130536
0.77972
0.769231
0
0
0
0
0
0.01281
0.34361
1,784
77
61
23.168831
0.719898
0
0
0.315789
0
0
0.184417
0
0
0
0
0
0
1
0
false
0
0.026316
0
0.026316
0
0
0
0
null
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
996b9f6d14e4feb9f7a3b2d58454376d40004276
513
py
Python
progs/mean.py
Breccia/s-py
4fc5fcd0efbfcaa6574a81ee922c1083ed0ef57d
[ "MIT" ]
null
null
null
progs/mean.py
Breccia/s-py
4fc5fcd0efbfcaa6574a81ee922c1083ed0ef57d
[ "MIT" ]
null
null
null
progs/mean.py
Breccia/s-py
4fc5fcd0efbfcaa6574a81ee922c1083ed0ef57d
[ "MIT" ]
null
null
null
#!/usr/local/anaconda3/bin/python import sys sys.path.insert(0, "../libs/") from spy_mean import compute_mean if __name__ == "__main__": print("Program to compute mean") count = input("Enter total number of samples: ") idx = 0 data = [] for idx in range(0, int(count)): val = input("Enter data {0}: ".format(idx + 1)) data.append(val) #mean = spy_mean.compute_mean(data) mean = compute_mean(data) print("You entered: {0} vals, mean = {1}".format(count, mean))
24.428571
66
0.623782
74
513
4.148649
0.554054
0.143322
0.09772
0.123779
0
0
0
0
0
0
0
0.01995
0.218324
513
20
67
25.65
0.745636
0.128655
0
0
0
0
0.268623
0
0
0
0
0
0
1
0
false
0
0.153846
0
0.153846
0.153846
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
996c038f0063123980ac86217bb77ad88b247eae
896
py
Python
wxalarmlib/utils/time_util.py
sanderiana/wxAlarm
6abc4a8851ce83fa7d3ee30d89a773d9952f87ed
[ "MIT" ]
null
null
null
wxalarmlib/utils/time_util.py
sanderiana/wxAlarm
6abc4a8851ce83fa7d3ee30d89a773d9952f87ed
[ "MIT" ]
null
null
null
wxalarmlib/utils/time_util.py
sanderiana/wxAlarm
6abc4a8851ce83fa7d3ee30d89a773d9952f87ed
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # --------------------------------------------------------------- # wxalarm.py # # Copyright (c) 2019 sanderiana https://github.com/sanderiana # # This software is released under the MIT License. # http://opensource.org/licenses/mit-license.php # --------------------------------------------------------------- # Icon made by Freepik from www.flaticon.com # --------------------------------------------------------------- import datetime def change_time(hour_min): date = datetime.datetime.now() year = date.year month = date.month day = date.day time = hour_min.split(":") hour = int(time[0]) min = int(time[1]) return datetime.datetime(year, month, day, hour, min, 0) def change_delta(delta_time): sec = delta_time.total_seconds() hour = sec // 3600 min = (sec - (hour * 3600)) // 60 return "%02d:%02d" % (hour, min)
28
65
0.506696
100
896
4.47
0.56
0.06264
0.049217
0
0
0
0
0
0
0
0
0.029412
0.165179
896
32
66
28
0.568182
0.472098
0
0
0
0
0.021645
0
0
0
0
0
0
1
0.133333
false
0
0.066667
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
996e5d48a68547f594689c85765b9e52520f471a
652
py
Python
getmode.py
NeraOS/Simulation-Python
a75cd01d9dcf61caa5f1f4471194d44d841095df
[ "MIT" ]
1
2020-12-02T10:20:40.000Z
2020-12-02T10:20:40.000Z
getmode.py
NeraOS/Simulation-Python
a75cd01d9dcf61caa5f1f4471194d44d841095df
[ "MIT" ]
null
null
null
getmode.py
NeraOS/Simulation-Python
a75cd01d9dcf61caa5f1f4471194d44d841095df
[ "MIT" ]
null
null
null
"""Выбор режима""" print (""" Выберите режим загрузки: 1 - Стандартная загрузка с поддержкой сетевых драйверов 2 - Безопасный режим с записью действий в log 9 - Остановить загрузку Введите ТОЛЬКО цифру """) def wr(mode): f = open('config.ini','w') # открытие в режиме записи f.write(str(mode)) # запись режима в файл f.close() x = input('))') if x == 1: wr(1) print('Стандартная загрузка с поддержкой сетевых драйверов') elif x == 2: wr(2) print('Безопасный режим с записью действий в log') elif x == 9: exit() else: wr(1) print('Стандартная загрузка с поддержкой сетевых драйверов')
25.076923
65
0.642638
90
652
4.655556
0.522222
0.136038
0.143198
0.214797
0.534606
0.534606
0.424821
0.257757
0.257757
0
0
0.018145
0.239264
652
25
66
26.08
0.826613
0.090491
0
0.173913
0
0
0.588235
0
0
0
0
0
0
1
0.043478
false
0
0
0
0.043478
0.173913
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
996e8e0ab897144a54ed75e7fe50985334a44f7b
150
py
Python
competitive/AtCoder/ABC144/B.py
pn11/benkyokai
9ebdc46b529e76b7196add26dbc1e62ad48e72b0
[ "MIT" ]
null
null
null
competitive/AtCoder/ABC144/B.py
pn11/benkyokai
9ebdc46b529e76b7196add26dbc1e62ad48e72b0
[ "MIT" ]
22
2020-03-24T16:24:47.000Z
2022-02-26T15:51:18.000Z
competitive/AtCoder/ABC144/B.py
pn11/benkyokai
9ebdc46b529e76b7196add26dbc1e62ad48e72b0
[ "MIT" ]
null
null
null
N = int(input()) ans = 'No' for i in range(1, 10): if N % i == 0: res = N // i if res < 10: ans = 'Yes' print(ans)
12.5
23
0.393333
25
150
2.36
0.64
0.067797
0
0
0
0
0
0
0
0
0
0.070588
0.433333
150
11
24
13.636364
0.623529
0
0
0
0
0
0.033557
0
0
0
0
0
0
1
0
false
0
0
0
0
0.125
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
996ea645ce42f819744d7c8848ee5604d942ae67
3,380
py
Python
ship.py
kcwikizh/kancolle-shinkai-db
73808a91b5f59d158374f016e2d514225f1ca6bd
[ "MIT" ]
1
2019-02-11T08:57:07.000Z
2019-02-11T08:57:07.000Z
ship.py
kcwikizh/kancolle-shinkai-db
73808a91b5f59d158374f016e2d514225f1ca6bd
[ "MIT" ]
null
null
null
ship.py
kcwikizh/kancolle-shinkai-db
73808a91b5f59d158374f016e2d514225f1ca6bd
[ "MIT" ]
null
null
null
"""Convert shinkai ship Json to KcWiki Lua """ __all__ = ['main'] import json from collections import OrderedDict from utils import python_data_to_lua_table SHIPS_HR_JSON = 'json/ships_human_readable.json' SHIPS_LUA = 'lua/ships.lua' def shinkai_parse_ship(ships): """Get shinkai ships stored by python OrderedDict""" ships_dict = OrderedDict() for ship_id in ships: ship = ships[ship_id] ship_dict = OrderedDict() ship_dict['日文名'] = ship['name']['fullname_ja_jp'] ship_dict['中文名'] = ship['name']['fullname_zh_cn'] ship_dict['kcwiki分类'] = ship['kcwiki_class'] attributes_dict = OrderedDict() attributes_dict['耐久'] = ship['stats']['taik'] attributes_dict['火力'] = [ ship['stats']['houg'], ship['stats']['houg2'] ] attributes_dict['雷装'] = [ ship['stats']['raig'], ship['stats']['raig2'] ] attributes_dict['对空'] = ship['stats']['tyku'] attributes_dict['对潜'] = ship['stats']['tais'] attributes_dict['回避'] = ship['stats']['houk'] attributes_dict['索敌'] = ship['stats']['saku'] attributes_dict['速力'] = ship['stats']['soku'] attributes_dict['装甲'] = ship['stats']['souk'] attributes_dict['运'] = ship['stats']['luck'] attributes_dict['射程'] = ship['stats']['leng'] ship_dict['属性'] = attributes_dict equip_dict = OrderedDict() equip_dict['格数'] = len(ship['slots']) equip_dict['搭载'] = ship['slots'] equip_dict['装备'] = ship['equips'] ship_dict['装备'] = equip_dict appears_list = [] for appear in ship.get('appears', []): appear_dict = OrderedDict() appear_dict['map'] = OrderedDict() appear_dict['map']['限定海域'] = appear['map']['is_event'] appear_dict['map']['年'] = appear['map']['year'] appear_dict['map']['季节'] = [ None, '冬', '春', '夏', '秋'][appear['map']['season']] appear_dict['map']['海域'] = 'E-' + str(appear['map']['event_id']) appear_dict['map']['Boss'] = appear['map']['is_boss'] if 'is_final_battle' in appear: appear_dict['最终战'] = appear['is_final_battle'] if 'selected_rank' in appear: appear_dict['选择难度'] = [ '无', '丙', '乙', '甲'][appear['selected_rank']] appears_list.append(appear_dict) if appears_list: ship_dict['出现海域'] = appears_list ships_dict[ship_id] = ship_dict return ships_dict def shinkai_generate_ship_lua(ships): """Generate KcWiki shinkai ship Lua table""" ships_dict = shinkai_parse_ship(ships) data, _ = python_data_to_lua_table(ships_dict, level=1) with open(SHIPS_LUA, 'w', encoding='utf8') as lua_fp: lua_fp.write('local d = {}\n\n' + 'd.shipDataTable = {\n') lua_fp.write(data) lua_fp.write('\n}\n\nreturn d\n') def load_ships_json(json_file): """Load and decode json""" print('Load json file: {}'.format(json_file)) with open(json_file, 'r', encoding='utf8') as file: ships = json.load(file) return ships def main(): """Main process""" ships = load_ships_json(SHIPS_HR_JSON) shinkai_generate_ship_lua(ships) if __name__ == '__main__': main()
33.465347
76
0.57071
411
3,380
4.420925
0.321168
0.100165
0.042928
0.016511
0.057237
0.027518
0
0
0
0
0
0.001995
0.25858
3,380
100
77
33.8
0.723065
0.047041
0
0
0
0
0.16531
0.009393
0
0
0
0
0
1
0.052632
false
0
0.039474
0
0.118421
0.013158
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
997013f433286a434be0bc62644d89db7ecaf942
346
py
Python
PYTHON/Py3_Mundo1_Fundamental/desafios/des001.py
Marciobroficial/CURSO-EM-VIDEO
37b10c26336a9744236603282af77661fdf8c61a
[ "MIT" ]
1
2021-10-09T18:11:20.000Z
2021-10-09T18:11:20.000Z
PYTHON/Py3_Mundo1_Fundamental/desafios/des001.py
Coppini21/CURSO-EM-VIDEO
37b10c26336a9744236603282af77661fdf8c61a
[ "MIT" ]
1
2021-09-15T04:18:34.000Z
2022-03-02T23:16:26.000Z
PYTHON/Py3_Mundo1_Fundamental/desafios/des001.py
Coppini21/CURSO-EM-VIDEO
37b10c26336a9744236603282af77661fdf8c61a
[ "MIT" ]
3
2021-12-15T17:19:51.000Z
2022-03-29T02:19:00.000Z
# Desafio 01 # Faça um programa Leia um nome de uma pessoa e que mostre uma mensagem de boas-vidas de acordo com o valor digitado. print() print('=-='*15) nome = input ('Qual é sue nome ? ') print ('Ola',nome,'Prazer em te conhecer!') nome = input('Digite seu nome: ') print('É um prazer te conhecer, {}!'.format(nome)) print('=-='*15) print()
26.615385
117
0.67052
57
346
4.070175
0.614035
0.116379
0
0
0
0
0
0
0
0
0
0.020833
0.16763
346
13
118
26.615385
0.784722
0.364162
0
0.5
0
0
0.431193
0
0
0
0
0
0
1
0
false
0
0
0
0
0.75
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
1
9971ddcf2919c00539af25050648ccbd84f39ca4
6,547
py
Python
models/FCOSInference.py
meet-minimalist/FCOS-Pytorch-Implementation
e8ac1c6230174902732dbe8bcff3a87034f99517
[ "MIT" ]
null
null
null
models/FCOSInference.py
meet-minimalist/FCOS-Pytorch-Implementation
e8ac1c6230174902732dbe8bcff3a87034f99517
[ "MIT" ]
null
null
null
models/FCOSInference.py
meet-minimalist/FCOS-Pytorch-Implementation
e8ac1c6230174902732dbe8bcff3a87034f99517
[ "MIT" ]
null
null
null
import os import sys from typing_extensions import final sys.path.append("../") # TODO : Remove this append line import numpy as np import torch import torch.nn as nn from models.FCOS import FCOS from models.PostProcessor import PostProcessor import imgaug.augmenters as iaa from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage from torchvision import transforms from utils.transforms.to_tensor import ToTensorOwn from utils.transforms.normalize import Normalize from utils.transforms.center_crop import CenterCrop cuda = torch.device('cuda:0') cpu = torch.device("cpu:0") class FCOSInference(nn.Module): def __init__(self, backbone_model='resnet50', freeze_backend=[False, False, False, False], \ fpn_features=256, num_classes=81, use_det_head_group_norm=True, \ centerness_on_regression=True, use_gradient_checkpointing=False, \ strides=[8, 16, 32, 64, 128], use_cuda=False, \ add_centerness_in_cls_prob=True, max_detection_boxes_num=1000, \ cls_score_threshold=0.05, nms_iou_threshold=0.60): super(FCOSInference, self).__init__() self.strides = strides self.max_detection_boxes_num = max_detection_boxes_num self.model = FCOS(backbone_model, freeze_backend, fpn_features, num_classes, \ use_det_head_group_norm, centerness_on_regression, use_gradient_checkpointing) self.post_process = PostProcessor(use_cuda, add_centerness_in_cls_prob, \ max_detection_boxes_num, cls_score_threshold, nms_iou_threshold, num_classes) if use_cuda: self.model = self.model.to(cuda, non_blocking=True) self.post_process = self.post_process.to(cuda, non_blocking=True) def forward(self, preprocesed_image): # image : [B x 3 x img_h x img_w] cls_probs, cnt_logits, reg_values = self.model(preprocesed_image) # cls_probs, cnt_logit, reg_values each will have a list of features having shape as below. # cls_probs : [[B x 81 x H x W], [B x 81 x H x W], ....] # cnt_logits: [[B x 1 x H x W], [B x 1 x H x W], ....] # reg_values: [[B x 4 x H x W], [B x 4 x H x W], ....] predictions = self.post_process([cls_probs, cnt_logits, reg_values], self.strides) # predictions : List of [N x 6] tensor for each element in batch # : [x1, y1, x2, y2, cls_prob, cls_id] B = preprocesed_image.shape[0] num_bboxes = torch.zeros(size=[B]) for i, res_img in enumerate(preprocesed_image): img_h, img_w = res_img.shape[1:] predictions[i][:, 0] = torch.clip(predictions[i][:, 0], 0, img_w) predictions[i][:, 1] = torch.clip(predictions[i][:, 1], 0, img_h) predictions[i][:, 2] = torch.clip(predictions[i][:, 2], 0, img_w) predictions[i][:, 3] = torch.clip(predictions[i][:, 3], 0, img_h) num_bboxes[i] = len(predictions[i]) final_prediction = torch.zeros(size=[B, self.max_detection_boxes_num, 6], dtype=torch.float32) for i, pred in enumerate(predictions): final_prediction[i, :len(pred)] = pred return final_prediction, num_bboxes if __name__ == "__main__": import cv2 import config_converter as config complete_model = FCOSInference(backbone_model=config.converter_backbone, freeze_backend=[False, False, False, False], \ fpn_features=config.converter_fpn_features, num_classes=config.converter_num_classes, \ use_det_head_group_norm=config.converter_use_det_head_group_norm, \ centerness_on_regression=config.converter_centerness_on_regression, \ use_gradient_checkpointing=False, strides=config.converter_strides, use_cuda=False, \ add_centerness_in_cls_prob=config.add_centerness_in_cls_prob, \ max_detection_boxes_num=config.max_detection_boxes_num, \ cls_score_threshold=config.cls_score_threshold, \ nms_iou_threshold=config.nms_iou_threshold) ckpt_path = "../summaries/2021_07_26_00_01_29/ckpt/fcos_resnet50_eps_26_test_loss_2.5426.pth" ckpt = torch.load(ckpt_path)['model'] complete_model.model.load_state_dict(ckpt, strict=True) # Restore FCOS architecture part only complete_model.model.eval() # TODO : Skipping this intentionally complete_model.eval() # Image loading and preprocessing img_path = "../sample_imgs/000026.jpg" # img_path = "../sample_imgs/000012.jpg" img = cv2.imread(img_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) transforms = transforms.Compose([ CenterCrop(), ToTensorOwn(), # Custom ToTensor transform, converts to CHW from HWC only Normalize(config.converter_normalization_type), ]) empty_bb = BoundingBoxesOnImage([BoundingBox(0, 0, 100, 100, label=0)], \ shape=(*config.input_size, 3)) sample = {'image' : img, 'bbox' : empty_bb} preprocessed_tensor = transforms([sample, config.input_size]) resized_img = preprocessed_tensor['image'] resized_img = torch.unsqueeze(resized_img, dim=0) # Model Inference final_predictions, num_bboxes = complete_model(resized_img) final_predictions = final_predictions.detach().numpy() num_bboxes = num_bboxes.detach().numpy() resized_img = resized_img.detach().numpy() for pred, num_bb, img in zip(final_predictions, num_bboxes, resized_img): pred = pred[:int(num_bb)] # Rest are padded zeros and not useful as we padded the predictions to make a batch of output img[0:1, :, :] = img[0:1, :, :] * 0.229 + 0.485 img[1:2, :, :] = img[1:2, :, :] * 0.224 + 0.456 img[2:3, :, :] = img[2:3, :, :] * 0.225 + 0.406 img = np.uint8(np.transpose(img, (1, 2, 0)) * 255) for bb in pred: x1, y1, x2, y2 = [int(c) for c in bb[:4]] cls_prob, cls_id = bb[4:] cls_name = config.converter_label_dict[int(cls_id)] print(f"X1: {x1}, Y1: {y1}, X2: {x2}, Y2: {y2}, Cls_id: {int(cls_id)}, Cls_name: {cls_name}, Cls_prob: {cls_prob:.4f}") cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2) img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) op_path = os.path.splitext(img_path)[0] + "_res.jpg" cv2.imwrite(op_path, img)
45.465278
131
0.64625
895
6,547
4.463687
0.268156
0.027034
0.029787
0.035044
0.219524
0.171715
0.129912
0.080601
0.021026
0
0
0.037231
0.241026
6,547
143
132
45.783217
0.766754
0.111807
0
0
0
0.01
0.046568
0.017937
0
0
0
0.006993
0
1
0.02
false
0
0.16
0
0.2
0.01
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
997314c831c988ddbd5db113b34ed31937c6e5ec
435
py
Python
parsing/question-extraction.py
hagarwa3/stackoverflow-search
990b339a1b87530894d1068eb99d0b03e6476268
[ "MIT" ]
4
2018-04-20T06:53:01.000Z
2020-02-09T12:08:52.000Z
parsing/question-extraction.py
hagarwa3/stackoverflow-bot
990b339a1b87530894d1068eb99d0b03e6476268
[ "MIT" ]
null
null
null
parsing/question-extraction.py
hagarwa3/stackoverflow-bot
990b339a1b87530894d1068eb99d0b03e6476268
[ "MIT" ]
5
2019-04-10T16:45:21.000Z
2021-12-05T16:32:03.000Z
f = open("C:\Users\Harshit Agarwal\Desktop\stackoverflow.com-Posts\Postsnew.xml", "w") f.write('<?xml version="1.0" encoding="utf-8"?>\n<posts>') i = 0 with open("C:\Users\Harshit Agarwal\Desktop\stackoverflow.com-Posts\Posts.xml") as fileobject: for line in fileobject: i +=1 if 'PostTypeId="1"' in line: if i%100000 == 0: print i f.write(line) f.write('</posts>') f.close()
36.25
94
0.602299
65
435
4.030769
0.507692
0.068702
0.076336
0.129771
0.396947
0.396947
0.396947
0.396947
0.396947
0
0
0.038123
0.216092
435
12
95
36.25
0.730205
0
0
0
0
0
0.470183
0.293578
0
0
0
0
0
0
null
null
0
0
null
null
0.083333
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
9976fff4f6af74e77c73639f170ea7b55694e3d1
6,611
py
Python
src/pybel/struct/filters/edge_predicates.py
tehw0lf/pybel
6f67f8cce15052cc3c42ef87374e3b9ee45e6519
[ "Apache-2.0" ]
null
null
null
src/pybel/struct/filters/edge_predicates.py
tehw0lf/pybel
6f67f8cce15052cc3c42ef87374e3b9ee45e6519
[ "Apache-2.0" ]
null
null
null
src/pybel/struct/filters/edge_predicates.py
tehw0lf/pybel
6f67f8cce15052cc3c42ef87374e3b9ee45e6519
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- """Predicates for edge data from BEL graphs.""" from functools import wraps from .utils import part_has_modifier from ..graph import BELGraph from ...constants import ( ACTIVITY, ANNOTATIONS, ASSOCIATION, CAUSAL_RELATIONS, CITATION, CITATION_AUTHORS, CITATION_TYPE, CITATION_TYPE_PUBMED, DEGRADATION, DIRECT_CAUSAL_RELATIONS, EVIDENCE, OBJECT, POLAR_RELATIONS, RELATION, SUBJECT, TRANSLOCATION, ) from ...dsl import BiologicalProcess, Pathology __all__ = [ 'edge_predicate', 'keep_edge_permissive', 'has_provenance', 'has_pubmed', 'has_authors', 'is_causal_relation', 'is_direct_causal_relation', 'is_associative_relation', 'has_polarity', 'edge_has_activity', 'edge_has_degradation', 'edge_has_translocation', 'edge_has_annotation', 'has_pathology_causal', ] def edge_predicate(func): # noqa: D202 """Decorate an edge predicate function that only takes a dictionary as its singular argument. Apply this as a decorator to a function that takes a single argument, a PyBEL node data dictionary, to make sure that it can also accept a pair of arguments, a BELGraph and a PyBEL node tuple as well. :type func: (dict) -> bool :rtype: (pybel.BELGraph, tuple, tuple, int) -> bool """ @wraps(func) def _wrapped(*args): x = args[0] if isinstance(x, BELGraph): u, v, k = args[1:4] return func(x[u][v][k]) return func(*args) return _wrapped def keep_edge_permissive(*args, **kwargs): """Return true for all edges. :param dict data: A PyBEL edge data dictionary from a :class:`pybel.BELGraph` :return: Always returns :code:`True` :rtype: bool """ return True @edge_predicate def has_provenance(data): """Check if the edge has provenance information (i.e. citation and evidence). :param dict data: The edge data dictionary :return: If the edge has both a citation and and evidence entry :rtype: bool """ return CITATION in data and EVIDENCE in data @edge_predicate def has_pubmed(data): """Check if the edge has a PubMed citation. :param dict data: A PyBEL edge data dictionary from a :class:`pybel.BELGraph` :return: Does the edge data dictionary has a PubMed citation? :rtype: bool """ return CITATION in data and CITATION_TYPE_PUBMED == data[CITATION][CITATION_TYPE] @edge_predicate def has_authors(data): """Check if the edge contains author information for its citation. :param dict data: A PyBEL edge data dictionary from a :class:`pybel.BELGraph` :return: Does the edge's citation data dictionary have authors included? :rtype: bool """ return CITATION in data and CITATION_AUTHORS in data[CITATION] and data[CITATION][CITATION_AUTHORS] @edge_predicate def is_causal_relation(data): """Check if the given relation is causal. :param dict data: The PyBEL edge data dictionary :rtype: bool """ return data[RELATION] in CAUSAL_RELATIONS @edge_predicate def is_direct_causal_relation(data): """Check if the edge is a direct causal relation. :param dict data: The PyBEL edge data dictionary :rtype: bool """ return data[RELATION] in DIRECT_CAUSAL_RELATIONS @edge_predicate def is_associative_relation(data): """Check if the edge has an association relation. :param dict data: The PyBEL edge data dictionary :return: If the edge is a causal edge :rtype: bool """ return data[RELATION] == ASSOCIATION @edge_predicate def has_polarity(data): """Check if the edge has polarity. :param dict data: The edge data dictionary :return: If the edge is a polar edge :rtype: bool """ return data[RELATION] in POLAR_RELATIONS def _has_modifier(data, modifier): """Check if the edge has the given modifier. :param dict data: The edge data dictionary :param str modifier: The modifier to check. One of :data:`pybel.constants.ACTIVITY`, :data:`pybel.constants.DEGRADATION`, or :data:`pybel.constants.TRANSLOCATION`. :return: Does either the subject or object have the given modifier :rtype: bool """ return part_has_modifier(data, SUBJECT, modifier) or part_has_modifier(data, OBJECT, modifier) @edge_predicate def edge_has_activity(data): """Check if the edge contains an activity in either the subject or object. :param dict data: The edge data dictionary :return: If the edge contains an activity in either the subject or object :rtype: bool """ return _has_modifier(data, ACTIVITY) @edge_predicate def edge_has_translocation(data): """Check if the edge has a translocation in either the subject or object. :param dict data: The edge data dictionary :return: If the edge has a translocation in either the subject or object :rtype: bool """ return _has_modifier(data, TRANSLOCATION) @edge_predicate def edge_has_degradation(data): """Check if the edge contains a degradation in either the subject or object. :param dict data: The edge data dictionary :return: If the edge contains a degradation in either the subject or object :rtype: bool """ return _has_modifier(data, DEGRADATION) def edge_has_annotation(data, key): """Check if an edge has the given annotation. :param dict data: The data dictionary from a BELGraph's edge :param str key: An annotation key :return: If the annotation key is present in the current data dictionary :rtype: Optional[Any] For example, it might be useful to print all edges that are annotated with 'Subgraph': >>> from pybel.examples import sialic_acid_graph >>> for u, v, data in sialic_acid_graph.edges(data=True): >>> if edge_has_annotation(data, 'Species') >>> print(u, v, data) """ annotations = data.get(ANNOTATIONS) if annotations is None: return return annotations.get(key) def has_pathology_causal(graph, u, v, k): """Check if the subject is a pathology and has a causal relationship with a non bioprocess/pathology. :param pybel.BELGraph graph: A BEL Graph :param BaseEntity u: A BEL node :param BaseEntity v: A BEL node :param str k: The edge key between the given nodes :return: If the subject of this edge is a pathology and it participates in a causal reaction. :rtype: bool """ return ( isinstance(u, Pathology) and is_causal_relation(graph, u, v, k) and not isinstance(v, (Pathology, BiologicalProcess)) )
29.382222
117
0.697776
921
6,611
4.896851
0.169381
0.038803
0.031929
0.031042
0.371175
0.34612
0.278492
0.256984
0.239246
0.222395
0
0.001356
0.218878
6,611
224
118
29.513393
0.871998
0.535774
0
0.126582
0
0
0.09101
0.026003
0
0
0
0
0
1
0.202532
false
0
0.063291
0
0.481013
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
9977007f75c563a21f518b2163f15a2318a36887
1,001
py
Python
tests/ComponentTests/neck_build.py
FabricExile/Kraken
d8c1f5189cb191945e2c18a1369c458d05305afc
[ "BSD-3-Clause" ]
7
2017-12-04T16:57:42.000Z
2021-09-07T07:02:38.000Z
tests/ComponentTests/neck_build.py
xtvjxk123456/Kraken
d8c1f5189cb191945e2c18a1369c458d05305afc
[ "BSD-3-Clause" ]
null
null
null
tests/ComponentTests/neck_build.py
xtvjxk123456/Kraken
d8c1f5189cb191945e2c18a1369c458d05305afc
[ "BSD-3-Clause" ]
6
2017-11-14T06:50:48.000Z
2021-08-21T22:47:29.000Z
from kraken import plugins from kraken.core.maths import Vec3 from kraken_examples.neck_component import NeckComponentGuide, NeckComponentRig from kraken.core.profiler import Profiler from kraken.helpers.utility_methods import logHierarchy Profiler.getInstance().push("neck_build") neckGuide = NeckComponentGuide("neck") neckGuide.loadData({ "name": "Neck", "location": "L", "neckPosition": Vec3(0.0, 16.5572, -0.6915), "neckUpVOffset": Vec3(0.0, 0.0, -1.0), "neckEndPosition": Vec3(0.0, 17.4756, -0.421) }) # Save the hand guide data for persistence. saveData = neckGuide.saveData() neckGuideData = neckGuide.getRigBuildData() neck = NeckComponentRig() neck.loadData(neckGuideData) builder = plugins.getBuilder() builder.build(neck) Profiler.getInstance().pop() if __name__ == "__main__": print Profiler.getInstance().generateReport() else: logHierarchy(neck)
25.025
79
0.67033
106
1,001
6.216981
0.509434
0.075873
0.027314
0
0
0
0
0
0
0
0
0.044473
0.213786
1,001
39
80
25.666667
0.792884
0.040959
0
0
0
0
0.082463
0
0
0
0
0
0
0
null
null
0
0.2
null
null
0.04
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
9978c4d313a84a9361e210c9564192dbd9f01882
1,354
py
Python
tests/test_xsw.py
cnelson/pysaml2
a30e51c271e27e4411a0243b65adbf5d7a3abb07
[ "Apache-2.0" ]
null
null
null
tests/test_xsw.py
cnelson/pysaml2
a30e51c271e27e4411a0243b65adbf5d7a3abb07
[ "Apache-2.0" ]
2
2020-01-17T20:13:26.000Z
2020-11-17T23:00:50.000Z
tests/test_xsw.py
cnelson/pysaml2
a30e51c271e27e4411a0243b65adbf5d7a3abb07
[ "Apache-2.0" ]
2
2020-02-24T15:18:18.000Z
2020-07-15T10:49:12.000Z
from datetime import datetime from unittest.mock import Mock from unittest.mock import patch from saml2.config import config_factory from saml2.response import authn_response from saml2.sigver import SignatureError from dateutil import parser from pytest import raises from pathutils import dotname from pathutils import full_path XML_RESPONSE_XSW = full_path("saml2_response_xsw.xml") class TestAuthnResponse: def setup_class(self): self.conf = config_factory("sp", dotname("server_conf")) self.ar = authn_response(self.conf, "http://lingon.catalogix.se:8087/") @patch('saml2.response.validate_on_or_after', return_value=True) def test_verify_signed_xsw(self, mock_validate_on_or_after): self.ar.issue_instant_ok = Mock(return_value=True) with open(XML_RESPONSE_XSW) as fp: xml_response = fp.read() self.ar.outstanding_queries = {"id12": "http://localhost:8088/sso"} self.ar.timeslack = 10000 self.ar.loads(xml_response, decode=False) assert self.ar.came_from == 'http://localhost:8088/sso' assert self.ar.session_id() == "id12" assert self.ar.issuer() == 'urn:mace:example.com:saml:roland:idp' with raises(SignatureError): self.ar.verify() assert self.ar.ava is None assert self.ar.name_id is None
30.088889
79
0.711226
188
1,354
4.941489
0.43617
0.071044
0.064586
0.047363
0
0
0
0
0
0
0
0.023679
0.189069
1,354
44
80
30.772727
0.822404
0
0
0
0
0
0.144756
0.068685
0
0
0
0
0.166667
1
0.066667
false
0
0.333333
0
0.433333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
997b54bcf5f7b4cc139e3fa13dc04de3e3c8896b
102
py
Python
python/6Kyu/Split Strings.py
athasv/Codewars-data
5e106466e709fd776f23585ad9f652d0d65b48d3
[ "MIT" ]
null
null
null
python/6Kyu/Split Strings.py
athasv/Codewars-data
5e106466e709fd776f23585ad9f652d0d65b48d3
[ "MIT" ]
null
null
null
python/6Kyu/Split Strings.py
athasv/Codewars-data
5e106466e709fd776f23585ad9f652d0d65b48d3
[ "MIT" ]
null
null
null
def solution(s): return [s[x:x+2] if x < len(s) - 1 else s[-1] + "_" for x in range(0, len(s), 2)]
51
85
0.529412
24
102
2.208333
0.583333
0.150943
0
0
0
0
0
0
0
0
0
0.063291
0.22549
102
2
85
51
0.607595
0
0
0
0
0
0.009709
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
997bef368503e63a63bd868a1dd0b1d4182d8089
752
py
Python
Tool/converter/convert.py
TencentOpen/icaf
d29a43c6ca89562872e044b2104c1bd5a595f1b6
[ "MIT" ]
38
2015-06-12T03:31:06.000Z
2018-08-29T06:49:14.000Z
Tool/converter/convert.py
TencentOpen/icaf
d29a43c6ca89562872e044b2104c1bd5a595f1b6
[ "MIT" ]
1
2016-01-25T01:57:40.000Z
2016-01-25T01:57:40.000Z
Tool/converter/convert.py
TencentOpen/icaf
d29a43c6ca89562872e044b2104c1bd5a595f1b6
[ "MIT" ]
35
2015-06-11T15:11:23.000Z
2020-04-05T01:26:11.000Z
import os import re def writeLine(fs, line): print "write: %s"%line fs.write(line) def fixFile(filePath): fs = open(filePath, "r") lines = fs.readlines() fs.close() fs = open(filePath, "w") for line in lines: result = re.search(r"(?<=\[NSThread\ssleepForTimeInterval:).*(?=\])", line) if not result: writeLine(fs, line) continue if line.startswith("\\\\"): writeLine(fs, line) continue time = result.group(0) indent = re.search(r"^\s*", line).group(0) newLine = indent + "AppeckerWait(" + time + ");"; writeLine(fs, newLine) fs.close() def onFileDetected( arg, dirname, names ): for name in names: if re.search(r'\.mm$', name.lower()): fixFile(dirname + '/' + name) os.path.walk(".", onFileDetected, ())
20.324324
77
0.62766
100
752
4.72
0.43
0.09322
0.095339
0.097458
0
0
0
0
0
0
0
0.003226
0.175532
752
36
78
20.888889
0.758065
0
0
0.214286
0
0
0.115691
0.06117
0
0
0
0
0
0
null
null
0
0.071429
null
null
0.035714
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
997f4b97d545477757e8bda91a697ce9e6990088
11,259
py
Python
kcclient/slidingmetrics.py
sanjeevm0/kcluster-client
5dda3f2a4ebc5811ec176aab70f48d9be5f6a731
[ "MIT" ]
null
null
null
kcclient/slidingmetrics.py
sanjeevm0/kcluster-client
5dda3f2a4ebc5811ec176aab70f48d9be5f6a731
[ "MIT" ]
null
null
null
kcclient/slidingmetrics.py
sanjeevm0/kcluster-client
5dda3f2a4ebc5811ec176aab70f48d9be5f6a731
[ "MIT" ]
1
2020-09-22T23:40:37.000Z
2020-09-22T23:40:37.000Z
import math import sys import os import copy thisPath = os.path.dirname(os.path.realpath(__file__)) sys.path.append(thisPath) from enum import Enum from mlock import MLock import utils # Input.Cumulative means cumulative value is being input (e.g. total bytes) # Input.Average means time average is being input (e.g. bytes/sec) # Input Value means value is being given (e.g. bytes) Input = Enum('Input', 'Value Counter NBitCounter') def noneMax(x, y): if x is None: return y elif y is None: return x else: return max(x, y) def noneMin(x, y): if x is None: return y elif y is None: return x else: return min(x, y) class SlidingMetrics(): # if input is avg, it is something like bytes/sec, etc., otherwise unit is bytes or seconds of latency, etc. def __init__(self, minWindow, maxWindow, inputType, bits=32): self.minWindow = minWindow self.maxWindow = maxWindow self.subWindow = maxWindow - minWindow self.maxWindows = math.ceil(minWindow / self.subWindow) + 1 self.inputType = inputType self.winIndex = 0 self.lock = MLock() # window statistics self.NWin = [] self.ts1Win = [] self.tsNWin = [] self.minValWin = [] self.maxValWin = [] self.cumu1Win = [] self.cumuNWin = [] self.N = 0 # num samples (1 to N) self.ts0 = None # ts below lowest value (needed for left ) self.ts1 = None self.tsN = None self.minVal = None self.maxVal = None self.cumu0 = 0 self.cumu1 = None self.cumuNMinus1 = None self.cumuN = 0 self.startTs = None self.prevTs = None self.prevData = 0 # raw data self.bits = bits def __dump__(self): o = copy.deepcopy(self.__dict__) o.pop("lock", None) o["inputType"] = str(o["inputType"]) return o @staticmethod def __load__(o): x = SlidingMetrics(10, 20, Input.Value, bits=32) o = utils.smartLoad(o, True) for key, val in o.items(): setattr(x, key, val) x.inputType = eval(x.inputType) # convert back return x def __serialize__(self, seenVals): o = {} for k, v in sorted(self.__dict__.items()): if k not in ["lock", "inputType"]: o[k] = utils.serialize(v, seenVals) o["inputType"] = str(self.inputType) return o def __deserialize__(self, o, toDict, seenVals): for k, v in sorted(o.items()): if k not in ["lock", "inputType"]: setattr(self, k, utils.deserialize(v, toDict, seenVals)) self.inputType = eval(o["inputType"]) def _resetCumu(self): amtToSub = self.cumu0 if amtToSub==0: return for i in range(len(self.cumu1Win)): self.cumu1Win[i] -= amtToSub self.cumuNWin[i] -= amtToSub self.cumu0 -= amtToSub self.cumu1 -= amtToSub self.cumuNMinus1 -= amtToSub self.cumuN -= amtToSub # returns cumulative and value def _setCumuVal(self, data): if self.inputType==Input.NBitCounter: if data < self.prevData: # counter overflow data += (1 << self.bits) val = data - self.prevData elif self.inputType==Input.Counter: val = data - self.prevData elif self.inputType==Input.Value: val = data self.prevData = data cumu = self.cumuN + val return val, cumu def popWindow(self): N0 = self.NWin.pop(0) self.N -= N0 self.ts1Win.pop(0) self.ts1 = self.ts1Win[0] self.ts0 = self.tsNWin.pop(0) self.minValWin.pop(0) self.minVal = min(self.minValWin) self.maxValWin.pop(0) self.maxVal = max(self.maxValWin) self.cumu1Win.pop(0) self.cumu1 = self.cumu1Win[0] self.cumu0 = self.cumuNWin.pop(0) def _addHelper(self, ts, data): if self.startTs is None: self.startTs = ts ts = ts - self.startTs # normalize to start at zero if self.prevTs is not None and ts < self.prevTs: return False self.prevTs = ts val, cumu = self._setCumuVal(data) if ts >= self.winIndex*self.subWindow: # new window self.winIndex += 1 self.NWin.append(1) self.ts1Win.append(ts) self.tsNWin.append(ts) self.minValWin.append(val) self.maxValWin.append(val) self.cumu1Win.append(cumu) self.cumuNWin.append(cumu) else: # add to current window (last window) self.NWin[-1] += 1 self.tsNWin[-1] = ts self.minValWin[-1] = min(self.minValWin[-1], val) self.maxValWin[-1] = max(self.maxValWin[-1], val) self.cumuNWin[-1] = cumu # pop oldest window as new window is added bWindowMoved = False if len(self.ts1Win) > self.maxWindows: self.popWindow() bWindowMoved = True if self.ts1 is None: self.ts1 = ts while ts-self.ts1 >= self.maxWindow: self.popWindow() bWindowMoved = True self.N += 1 self.tsN = ts self.minVal = noneMin(self.minVal, val) self.maxVal = noneMax(self.maxVal, val) if self.cumu1 is None: self.cumu1 = cumu self.cumuNMinus1 = self.cumuN self.cumuN = cumu #print("Cumu: {0} {1} {2} {3}".format(self.cumu0, self.cumu1, self.cumuNMinus1, self.cumuN)) if bWindowMoved: self._resetCumu() return True def add(self, ts, data): with self.lock: return self._addHelper(ts, data) def lockTryNan(self, fn): with self.lock: try: return fn() except ZeroDivisionError: return 0.0 except Exception: return float('nan') # given data points have timestamp which is left end of interval def avgL(self): with self.lock: if (self.N-1)==0 or (self.tsN == self.ts1): return 0.0 else: # N-1 points, N-1 intervals return (self.cumuNMinus1 - self.cumu0) / (self.tsN - self.ts1) def avgR(self): with self.lock: if self.ts0 is None: if (self.N-1)<=0 or (self.tsN == self.ts1): return 0.0 else: # N-1 points, N-1 intervals return (self.cumuN - self.cumu1) / (self.tsN - self.ts1) else: if self.N==0 or (self.tsN == self.ts0): return 0.0 else: # N points, N intervals return (self.cumuN - self.cumu0) / (self.tsN - self.ts0) def avgN(self): with self.lock: if self.N==0: return 0.0 else: return (self.cumuN - self.cumu0) / self.N # number of measurements def avgNumL(self): with self.lock: if (self.N-1)==0 or (self.tsN == self.ts1): return 0.0 else: return (N-1) / (self.tsN - self.ts1) def avgNumR(self): with self.lock: if self.ts0 is None: if (self.N-1)<=0 or (self.tsN == self.ts1): return 0.0 else: return (N-1) / (self.tsN - self.ts1) else: if self.N==0 or (self.tsN == self.ts0): return 0.0 else: return N / (self.tsN - self.ts0) def windowL(self): return self.lockTryNan(lambda : self.tsN - self.ts1) def windowR(self): with self.lock: if self.ts0 is None: return self.tsN - self.ts1 else: return self.tsN - self.ts0 utils.registerEval('SlidingMetrics', SlidingMetrics) utils.registerCreate("SlidingMetrics", lambda : SlidingMetrics(10, 20, Input.Value, bits=32)) # ============================ # Testing from numpy import random if __name__ == "__main__": window = [] N = 100000 s = SlidingMetrics(9.0, 10.0, Input.Value) subWindow = 1.0 numWindows = 10 ts = 0 r = random.RandomState(4532312) lastPopped = None valMin = 0 valMax = 20 tsDelta = 0.2 tsDeltaRand = 0.03 for i in range(N): val = r.uniform(valMin, valMax) s.add(ts, val) window.append((ts, val)) # remove from window curWindow = math.floor(ts/subWindow) firstWindow = max(0, curWindow - numWindows + 1) firstTs = firstWindow * subWindow while len(window) > 0: (t, v) = window[0] if t < firstTs: lastPopped = window.pop(0) else: break #print("{0} {1}".format(len(window), window)) # compare if lastPopped is not None: t0 = lastPopped[0] rStart = 0 else: t0 = None rStart = 1 sumL = 0 for j in range(0, len(window)-1): sumL += window[j][1] if len(window)<=1: avgWinL = 0.0 else: avgWinL = sumL / (window[-1][0] - window[0][0]) sumR = 0 for j in range(rStart, len(window)): sumR += window[j][1] if len(window)<=1: avgWinR = 0.0 elif rStart==0: avgWinR = sumR / (window[-1][0] - t0) else: avgWinR = sumR / (window[-1][0] - window[0][0]) avgWinN = (sumL + window[-1][1]) / len(window) maxWin = max(window, key=lambda x: x[1])[1] minWin = min(window, key=lambda x: x[1])[1] if False: print("T: {0} W: {1} V: {2}".format(ts, curWindow, val)) print("L: {0} {1}".format(avgWinL, s.avgL())) print("R: {0} {1}".format(avgWinR, s.avgR())) print("N: {0} {1}".format(avgWinN, s.avgN())) error = abs(avgWinL-s.avgL()) + abs(avgWinR-s.avgR()) + abs(avgWinN-s.avgN()) errorMinMax = abs(maxWin-s.maxVal) + abs(minWin-s.minVal) print("ERROR: {0:20.15f}\t ERRORMINMAX: {1:20.15f}".format(error, errorMinMax), end='\r') if (abs(avgWinL-s.avgL()) > abs(avgWinL)*0.0000001 or abs(avgWinR-s.avgR()) > abs(avgWinR)*0.0000001 or abs(avgWinN-s.avgN()) > abs(avgWinN)*0.0000001 or maxWin != s.maxVal or minWin != s.minVal): print("ERROR====") print("T: {0} W: {1} V: {2}".format(ts, curWindow, val)) print("L: {0} {1}".format(avgWinL, s.avgL())) print("R: {0} {1}".format(avgWinR, s.avgR())) print("N: {0} {1}".format(avgWinN, s.avgN())) print("M: {0} {1}".format(maxWin, s.maxVal)) print("m: {0} {1}".format(minWin, s.minVal)) ts += r.uniform(tsDelta - tsDeltaRand, tsDelta + tsDeltaRand)
31.362117
112
0.518963
1,393
11,259
4.161522
0.168701
0.020528
0.028463
0.02415
0.267897
0.203036
0.186648
0.148525
0.13438
0.129032
0
0.036782
0.355271
11,259
358
113
31.449721
0.761813
0.081624
0
0.248299
0
0
0.029968
0
0
0
0
0
0
1
0.068027
false
0
0.027211
0.003401
0.214286
0.040816
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
997fb873a286d232b8c4f66af54539b644cf21c9
9,025
py
Python
oscar/lib/python2.7/site-packages/whoosh/analysis/ngrams.py
sainjusajan/django-oscar
466e8edc807be689b0a28c9e525c8323cc48b8e1
[ "BSD-3-Clause" ]
null
null
null
oscar/lib/python2.7/site-packages/whoosh/analysis/ngrams.py
sainjusajan/django-oscar
466e8edc807be689b0a28c9e525c8323cc48b8e1
[ "BSD-3-Clause" ]
null
null
null
oscar/lib/python2.7/site-packages/whoosh/analysis/ngrams.py
sainjusajan/django-oscar
466e8edc807be689b0a28c9e525c8323cc48b8e1
[ "BSD-3-Clause" ]
null
null
null
# Copyright 2007 Matt Chaput. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO # EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and documentation are # those of the authors and should not be interpreted as representing official # policies, either expressed or implied, of Matt Chaput. from whoosh.compat import text_type from whoosh.compat import xrange from whoosh.analysis.acore import Token from whoosh.analysis.filters import Filter, LowercaseFilter from whoosh.analysis.tokenizers import Tokenizer, RegexTokenizer # Tokenizer class NgramTokenizer(Tokenizer): """Splits input text into N-grams instead of words. >>> ngt = NgramTokenizer(4) >>> [token.text for token in ngt("hi there")] ["hi t", "i th", " the", "ther", "here"] Note that this tokenizer does NOT use a regular expression to extract words, so the grams emitted by it will contain whitespace, punctuation, etc. You may want to massage the input or add a custom filter to this tokenizer's output. Alternatively, if you only want sub-word grams without whitespace, you could combine a RegexTokenizer with NgramFilter instead. """ __inittypes__ = dict(minsize=int, maxsize=int) def __init__(self, minsize, maxsize=None): """ :param minsize: The minimum size of the N-grams. :param maxsize: The maximum size of the N-grams. If you omit this parameter, maxsize == minsize. """ self.min = minsize self.max = maxsize or minsize def __eq__(self, other): if self.__class__ is other.__class__: if self.min == other.min and self.max == other.max: return True return False def __call__(self, value, positions=False, chars=False, keeporiginal=False, removestops=True, start_pos=0, start_char=0, mode='', **kwargs): assert isinstance(value, text_type), "%r is not unicode" % value inlen = len(value) t = Token(positions, chars, removestops=removestops, mode=mode) pos = start_pos if mode == "query": size = min(self.max, inlen) for start in xrange(0, inlen - size + 1): end = start + size if end > inlen: continue t.text = value[start:end] if keeporiginal: t.original = t.text t.stopped = False if positions: t.pos = pos if chars: t.startchar = start_char + start t.endchar = start_char + end yield t pos += 1 else: for start in xrange(0, inlen - self.min + 1): for size in xrange(self.min, self.max + 1): end = start + size if end > inlen: continue t.text = value[start:end] if keeporiginal: t.original = t.text t.stopped = False if positions: t.pos = pos if chars: t.startchar = start_char + start t.endchar = start_char + end yield t pos += 1 # Filter class NgramFilter(Filter): """Splits token text into N-grams. >>> rext = RegexTokenizer() >>> stream = rext("hello there") >>> ngf = NgramFilter(4) >>> [token.text for token in ngf(stream)] ["hell", "ello", "ther", "here"] """ __inittypes__ = dict(minsize=int, maxsize=int) def __init__(self, minsize, maxsize=None, at=None): """ :param minsize: The minimum size of the N-grams. :param maxsize: The maximum size of the N-grams. If you omit this parameter, maxsize == minsize. :param at: If 'start', only take N-grams from the start of each word. if 'end', only take N-grams from the end of each word. Otherwise, take all N-grams from the word (the default). """ self.min = minsize self.max = maxsize or minsize self.at = 0 if at == "start": self.at = -1 elif at == "end": self.at = 1 def __eq__(self, other): return other and self.__class__ is other.__class__\ and self.min == other.min and self.max == other.max def __call__(self, tokens): assert hasattr(tokens, "__iter__") at = self.at for t in tokens: text = t.text if len(text) < self.min: continue chars = t.chars if chars: startchar = t.startchar # Token positions don't mean much for N-grams, # so we'll leave the token's original position # untouched. if t.mode == "query": size = min(self.max, len(t.text)) if at == -1: t.text = text[:size] if chars: t.endchar = startchar + size yield t elif at == 1: t.text = text[0 - size:] if chars: t.startchar = t.endchar - size yield t else: for start in xrange(0, len(text) - size + 1): t.text = text[start:start + size] if chars: t.startchar = startchar + start t.endchar = startchar + start + size yield t else: if at == -1: limit = min(self.max, len(text)) for size in xrange(self.min, limit + 1): t.text = text[:size] if chars: t.endchar = startchar + size yield t elif at == 1: if chars: original_startchar = t.startchar start = max(0, len(text) - self.max) for i in xrange(start, len(text) - self.min + 1): t.text = text[i:] if chars: t.startchar = original_startchar + i yield t else: for start in xrange(0, len(text) - self.min + 1): for size in xrange(self.min, self.max + 1): end = start + size if end > len(text): continue t.text = text[start:end] if chars: t.startchar = startchar + start t.endchar = startchar + end yield t # Analyzers def NgramAnalyzer(minsize, maxsize=None): """Composes an NgramTokenizer and a LowercaseFilter. >>> ana = NgramAnalyzer(4) >>> [token.text for token in ana("hi there")] ["hi t", "i th", " the", "ther", "here"] """ return NgramTokenizer(minsize, maxsize=maxsize) | LowercaseFilter() def NgramWordAnalyzer(minsize, maxsize=None, tokenizer=None, at=None): if not tokenizer: tokenizer = RegexTokenizer() return tokenizer | LowercaseFilter() | NgramFilter(minsize, maxsize, at=at)
37.920168
80
0.525651
1,018
9,025
4.598232
0.252456
0.012818
0.013672
0.02179
0.370647
0.340526
0.293954
0.293954
0.278146
0.233283
0
0.006229
0.395235
9,025
237
81
38.080169
0.851411
0.330305
0
0.522727
0
0
0.007713
0
0
0
0
0
0.015152
1
0.060606
false
0
0.037879
0.007576
0.166667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9982e59aaaa75c68bd3e08786c5defa1efb2e162
244
py
Python
demo/config.py
SDchao/nonebot
145d1787143584895375231210e30fdd3003d5bf
[ "MIT" ]
1
2021-01-19T03:57:23.000Z
2021-01-19T03:57:23.000Z
demo/config.py
coffiasd/nonebot
c02b9a4ccf61126aa81e3f86b06b44685461af09
[ "MIT" ]
null
null
null
demo/config.py
coffiasd/nonebot
c02b9a4ccf61126aa81e3f86b06b44685461af09
[ "MIT" ]
null
null
null
import re from nonebot.default_config import * HOST = '0.0.0.0' SECRET = 'abc' SUPERUSERS = {1002647525} NICKNAME = {'奶茶', '小奶茶'} COMMAND_START = {'', '/', '!', '/', '!', re.compile(r'^>+\s*')} COMMAND_SEP = {'/', '.', re.compile(r'#|::?')}
20.333333
63
0.54918
30
244
4.366667
0.7
0.045802
0.045802
0
0
0
0
0
0
0
0
0.066986
0.143443
244
11
64
22.181818
0.559809
0
0
0
0
0
0.131148
0
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9984268e196c7b6f74d5fc4959e1fab66773f0d3
1,471
py
Python
chempy/properties/tests/test_water_viscosity_korson_1969.py
bertiewooster/chempy
115adc1d570aa1631baff4374f3128ce23fa7776
[ "BSD-2-Clause" ]
340
2015-10-30T03:41:05.000Z
2022-03-31T05:01:17.000Z
chempy/properties/tests/test_water_viscosity_korson_1969.py
bertiewooster/chempy
115adc1d570aa1631baff4374f3128ce23fa7776
[ "BSD-2-Clause" ]
80
2015-11-03T13:31:23.000Z
2022-03-31T16:46:19.000Z
chempy/properties/tests/test_water_viscosity_korson_1969.py
bertiewooster/chempy
115adc1d570aa1631baff4374f3128ce23fa7776
[ "BSD-2-Clause" ]
75
2016-06-06T19:55:48.000Z
2022-03-19T23:39:13.000Z
import warnings from ..water_viscosity_korson_1969 import water_viscosity def test_water_viscosity(): warnings.filterwarnings("error") # Table II (p. 38): assert abs(water_viscosity(273.15 + 0) - 1.7916) < 5e-4 assert abs(water_viscosity(273.15 + 5) - 1.5192) < 5e-4 assert abs(water_viscosity(273.15 + 10) - 1.3069) < 5e-4 assert abs(water_viscosity(273.15 + 15) - 1.1382) < 5e-4 assert abs(water_viscosity(273.15 + 20) - 1.0020) < 5e-4 assert abs(water_viscosity(273.15 + 25) - 0.8903) < 5e-4 assert abs(water_viscosity(273.15 + 30) - 0.7975) < 5e-4 assert abs(water_viscosity(273.15 + 35) - 0.7195) < 5e-4 assert abs(water_viscosity(273.15 + 40) - 0.6532) < 5e-4 assert abs(water_viscosity(273.15 + 45) - 0.5963) < 5e-4 assert abs(water_viscosity(273.15 + 50) - 0.5471) < 5e-4 assert abs(water_viscosity(273.15 + 55) - 0.5042) < 5e-4 assert abs(water_viscosity(273.15 + 60) - 0.4666) < 5e-4 assert abs(water_viscosity(273.15 + 65) - 0.4334) < 5e-4 assert abs(water_viscosity(273.15 + 70) - 0.4039) < 5e-4 assert abs(water_viscosity(273.15 + 75) - 0.3775) < 5e-4 assert abs(water_viscosity(273.15 + 80) - 0.3538) < 5e-4 assert abs(water_viscosity(273.15 + 85) - 0.3323) < 5e-4 assert abs(water_viscosity(273.15 + 90) - 0.3128) < 5e-4 assert abs(water_viscosity(273.15 + 95) - 0.2949) < 6e-4 assert abs(water_viscosity(273.15 + 100) - 0.2783) < 2e-3 warnings.resetwarnings()
50.724138
61
0.656016
254
1,471
3.692913
0.26378
0.358209
0.313433
0.514925
0.688699
0.688699
0.658849
0.627932
0
0
0
0.250629
0.188987
1,471
28
62
52.535714
0.535624
0.011557
0
0
0
0
0.003444
0
0
0
0
0
0.807692
1
0.038462
true
0
0.076923
0
0.115385
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
1
0
0
0
0
0
0
6
99843f08508767bdb980b0e376ab8912b933a55a
1,235
py
Python
sa_analysis.py
CarryChang/-Customer_satisfaction_Analysis
1d0edc9035302f826909fd462eab92e2a15dcfd9
[ "Apache-2.0" ]
341
2018-12-21T08:00:52.000Z
2022-03-31T00:31:31.000Z
sa_analysis.py
CarryChang/-Customer_satisfaction_Analysis
1d0edc9035302f826909fd462eab92e2a15dcfd9
[ "Apache-2.0" ]
5
2019-03-20T05:36:54.000Z
2020-08-27T03:00:47.000Z
sa_analysis.py
CarryChang/-Customer_satisfaction_Analysis
1d0edc9035302f826909fd462eab92e2a15dcfd9
[ "Apache-2.0" ]
111
2019-01-22T13:50:42.000Z
2022-03-12T12:34:53.000Z
# -*- coding: utf-8 -*- from litNlp.predict import SA_Model_Predict import matplotlib.pyplot as plt from setting import * import numpy as np import os plt.rcParams['font.sans-serif'] = ['SimHei'] plt.rcParams['axes.unicode_minus'] = False def topic_sa_analysis(): sa_model = SA_Model_Predict(tokenize_path, sa_model_path_m, max_len=100) if not os.path.exists(topic_emotion_pic): os.mkdir(topic_emotion_pic) print(topic_emotion_pic+'文件夹已经建立,请查看当前文件路径') for key_word in topic_words_list.keys(): sa_analysis_(key_word, sa_model) def sa_analysis_(key_word, sa_model): print('{} 正在执行...'.format(key_word)) key_txt = open('{}/{}.txt'.format(topic_path, key_word), 'r', encoding='utf-8').readlines() sentiments_score_predict = sa_model.predict(key_txt) # 情感极性输出 sentiments_score_list = [i[1] for i in sentiments_score_predict] plt.hist(sentiments_score_list, bins=np.arange(0, 1, 0.01)) plt.xlabel("情感值") plt.ylabel("评论数目") plt.title(key_word+'-情感极性分布图') plt.savefig('{}/{}.png'.format(topic_emotion_pic, key_word)) plt.show() plt.close() print('{} 情感极性图完成'.format(key_word)) # if __name__ == '__main__': # # 添加多线程提升预测速度 # topic_sa_analysis()
34.305556
95
0.697166
181
1,235
4.430939
0.453039
0.069825
0.074813
0.042394
0.05985
0.05985
0
0
0
0
0
0.010547
0.155466
1,235
35
96
35.285714
0.758389
0.077733
0
0
0
0
0.10159
0
0
0
0
0
0
1
0.074074
false
0
0.185185
0
0.259259
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9985ca862cfcc11f8348e0629d58913ccb5353c8
22,570
py
Python
codalab_competition_bundle/AutoDL_starting_kit/AutoDL_simple_baseline_models/3dcnn_pytorch/model.py
NehzUx/autodl
c80fdc4b297ed1ec2b9e6911d313f1fe31d83cb9
[ "Apache-2.0" ]
25
2018-09-26T14:07:11.000Z
2021-12-02T15:19:08.000Z
codalab_competition_bundle/AutoDL_starting_kit/AutoDL_simple_baseline_models/3dcnn_pytorch/model.py
NehzUx/autodl
c80fdc4b297ed1ec2b9e6911d313f1fe31d83cb9
[ "Apache-2.0" ]
8
2018-11-23T15:35:28.000Z
2020-02-27T14:55:11.000Z
codalab_competition_bundle/AutoDL_starting_kit/AutoDL_simple_baseline_models/3dcnn_pytorch/model.py
NehzUx/autodl
c80fdc4b297ed1ec2b9e6911d313f1fe31d83cb9
[ "Apache-2.0" ]
5
2019-03-05T11:05:59.000Z
2020-01-08T13:05:35.000Z
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Modified by: Shangeth Rajaa, Zhengying Liu, Isabelle Guyon """An example of code submission for the AutoDL challenge. It implements 3 compulsory methods ('__init__', 'train' and 'test') and an attribute 'done_training' for indicating if the model will not proceed more training due to convergence or limited time budget. To create a valid submission, zip model.py together with other necessary files such as Python modules/packages, pre-trained weights, etc. The final zip file should not exceed 300MB. """ from torch.optim.lr_scheduler import ReduceLROnPlateau from torch.autograd import Variable import datetime import logging import numpy as np import os import sys import time import torch.utils.data as data_utils import torch import torch.nn as nn import torchvision import tensorflow as tf # seeding randomness for reproducibility np.random.seed(42) torch.manual_seed(1) # PyTorch Model class class TorchModel(nn.Module): def __init__(self, input_shape, output_dim): ''' 3D CNN Model with no of CNN layers depending on the input size''' super(TorchModel, self).__init__() self.conv = torch.nn.Sequential() cnn_ch = 16 if input_shape[1] == 1: # if num_channels = 1 self.conv.add_module('cnn1', nn.Conv3d(input_shape[0], cnn_ch, (1,3,3))) else: self.conv.add_module('cnn1', nn.Conv3d(input_shape[0], cnn_ch, 3)) self.conv.add_module('pool1', nn.MaxPool3d(2,2)) i = 2 while True: self.conv.add_module('cnn{}'.format(i), nn.Conv3d(cnn_ch * (i-1), cnn_ch * i, (1,3,3))) self.conv.add_module('pool{}'.format(i), nn.MaxPool3d(2,2)) i += 1 n_size, out_len = self.get_fc_size(input_shape) # no more CNN layers if Linear layers get input size < 1000 if n_size < 1000 or out_len[3] < 3 or out_len[3] < 3: break fc_size, _ = self.get_fc_size(input_shape) self.fc = nn.Linear(fc_size, output_dim) def forward_cnn(self, x): x = self.conv(x) return x def get_fc_size(self, input_shape): ''' function to get the size for Linear layers with given number of CNN layers ''' sample_input = Variable(torch.rand(1, *input_shape)) output_feat = self.forward_cnn(sample_input) out_shape = output_feat.shape n_size = output_feat.data.view(1, -1).size(1) return n_size, out_shape def forward(self, x): x = self.forward_cnn(x) x = x.view(x.size(0), -1) x = self.fc(x) return x # PyTorch Dataset to get data from tensorflow Dataset. class TFDataset(torch.utils.data.Dataset): def __init__(self, dataset, session, num_samples): super(TFDataset, self).__init__() self.dataset = dataset self.session = session self.num_samples = num_samples self.next_element = None self.reset() def reset(self): dataset = self.dataset iterator = dataset.make_one_shot_iterator() self.next_element = iterator.get_next() return self def __len__(self): return self.num_samples def __getitem__(self, index): session = self.session if self.session is not None else tf.Session() try: example, label = session.run(self.next_element) except tf.errors.OutOfRangeError: self.reset() example, label = session.run(self.next_element) return example.transpose(3,0,1,2), label class Model(): def __init__(self, metadata): """ Args: metadata: an AutoDLMetadata object. Its definition can be found in AutoDL_ingestion_program/dataset.py """ # Attribute necessary for ingestion program to stop evaluation process self.done_training = False self.metadata_ = metadata # Getting details of the data from meta data self.output_dim = self.metadata_.get_output_size() self.num_examples_train = self.metadata_.size() row_count, col_count = self.metadata_.get_matrix_size(0) channel = self.metadata_.get_num_channels(0) sequence_size = self.metadata_.get_sequence_size() self.num_train = self.metadata_.size() test_metadata_filename = self.metadata_.get_dataset_name()\ .replace('train', 'test') + '/metadata.textproto' self.num_test = [int(line.split(':')[1]) for line in open(test_metadata_filename, 'r').readlines() if 'sample_count' in line][0] # Getting the device available self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print('Device Found = ', self.device, '\nMoving Model and Data into the device...') # Attributes for preprocessing self.default_image_size = (112,112) self.default_num_frames = 15 self.default_shuffle_buffer = 100 if row_count == -1 or col_count == -1 : row_count = self.default_image_size[0] col_count = self.default_image_size[1] if sequence_size == -1: sequence_size = self.default_num_frames self.input_shape = (channel, sequence_size, row_count, col_count) print('\n\nINPUT SHAPE = ', self.input_shape) # getting an object for the PyTorch Model class for Model Class # use CUDA if available self.pytorchmodel = TorchModel(self.input_shape, self.output_dim) print('\nPyModel Defined\n') print(self.pytorchmodel) self.pytorchmodel.to(self.device) # PyTorch Optimizer and Criterion self.criterion = nn.BCEWithLogitsLoss() self.optimizer = torch.optim.Adam(self.pytorchmodel.parameters(), lr=1e-2) # Attributes for managing time budget # Cumulated number of training steps self.birthday = time.time() self.total_train_time = 0 self.cumulated_num_steps = 0 self.estimated_time_per_step = None self.total_test_time = 0 self.cumulated_num_tests = 0 self.estimated_time_test = None self.trained = False # PYTORCH # Critical number for early stopping self.num_epochs_we_want_to_train = 100 # no of examples at each step/batch self.train_batch_size = 30 self.test_batch_size = 30 # Tensorflow sessions to get the data from TFDataset self.train_session = tf.Session() self.test_session = tf.Session() def train(self, dataset, remaining_time_budget=None): """Train this algorithm on the tensorflow |dataset|. This method will be called REPEATEDLY during the whole training/predicting process. So your `train` method should be able to handle repeated calls and hopefully improve your model performance after each call. **************************************************************************** **************************************************************************** IMPORTANT: the loop of calling `train` and `test` will only run if self.done_training = False (the corresponding code can be found in ingestion.py, search 'M.done_training') Otherwise, the loop will go on until the time budget is used up. Please pay attention to set self.done_training = True when you think the model is converged or when there is not enough time for next round of training. **************************************************************************** **************************************************************************** Args: dataset: a `tf.data.Dataset` object. Each of its examples is of the form (example, labels) where `example` is a dense 4-D Tensor of shape (sequence_size, row_count, col_count, num_channels) and `labels` is a 1-D Tensor of shape (output_dim,). Here `output_dim` represents number of classes of this multilabel classification task. IMPORTANT: some of the dimensions of `example` might be `None`, which means the shape on this dimension might be variable. In this case, some preprocessing technique should be applied in order to feed the training of a neural network. For example, if an image dataset has `example` of shape (1, None, None, 3) then the images in this datasets may have different sizes. On could apply resizing, cropping or padding in order to have a fixed size input tensor. remaining_time_budget: time remaining to execute train(). The method should keep track of its execution time to avoid exceeding its time budget. If remaining_time_budget is None, no time budget is imposed. """ steps_to_train = self.get_steps_to_train(remaining_time_budget) if steps_to_train <= 0: logger.info("Not enough time remaining for training. " + "Estimated time for training per step: {:.2f}, "\ .format(self.estimated_time_per_step) + "but remaining time budget is: {:.2f}. "\ .format(remaining_time_budget) + "Skipping...") self.done_training = True else: msg_est = "" if self.estimated_time_per_step: msg_est = "estimated time for this: " +\ "{:.2f} sec.".format(steps_to_train * self.estimated_time_per_step) logger.info("Begin training for another {} steps...{}".format(steps_to_train, msg_est)) # If PyTorch dataloader for training set doen't already exists, get the train dataloader if not hasattr(self, 'trainloader'): self.trainloader = self.get_dataloader(dataset, self.num_train, batch_size=self.train_batch_size) train_start = time.time() # Training loop self.trainloop(self.criterion, self.optimizer, steps=steps_to_train) train_end = time.time() # Update for time budget managing train_duration = train_end - train_start self.total_train_time += train_duration self.cumulated_num_steps += steps_to_train self.estimated_time_per_step = self.total_train_time / self.cumulated_num_steps logger.info("{} steps trained. {:.2f} sec used. ".format(steps_to_train, train_duration) +\ "Now total steps trained: {}. ".format(self.cumulated_num_steps) +\ "Total time used for training: {:.2f} sec. ".format(self.total_train_time) +\ "Current estimated time per step: {:.2e} sec.".format(self.estimated_time_per_step)) def test(self, dataset, remaining_time_budget=None): """Test this algorithm on the tensorflow |dataset|. Args: Same as that of `train` method, except that the `labels` will be empty. Returns: predictions: A `numpy.ndarray` matrix of shape (sample_count, output_dim). here `sample_count` is the number of examples in this dataset as test set and `output_dim` is the number of labels to be predicted. The values should be binary or in the interval [0,1]. """ if self.done_training: return None if self.choose_to_stop_early(): logger.info("Oops! Choose to stop early for next call!") self.done_training = True test_begin = time.time() if remaining_time_budget and self.estimated_time_test and\ self.estimated_time_test > remaining_time_budget: logger.info("Not enough time for test. " +\ "Estimated time for test: {:.2e}, ".format(self.estimated_time_test) +\ "But remaining time budget is: {:.2f}. ".format(remaining_time_budget) +\ "Stop train/predict process by returning None.") return None msg_est = "" if self.estimated_time_test: msg_est = "estimated time: {:.2e} sec.".format(self.estimated_time_test) logger.info("Begin testing..." + msg_est) # If PyTorch dataloader for training set doen't already exists, get the test dataloader if not hasattr(self, 'testloader'): self.testloader = self.get_dataloader_test(dataset, self.num_test, self.test_batch_size) # get predictions from the test loop predictions = self.testloop(self.testloader) test_end = time.time() # Update some variables for time management test_duration = test_end - test_begin self.total_test_time += test_duration self.cumulated_num_tests += 1 self.estimated_time_test = self.total_test_time / self.cumulated_num_tests logger.info("[+] Successfully made one prediction. {:.2f} sec used. ".format(test_duration) +\ "Total time used for testing: {:.2f} sec. ".format(self.total_test_time) +\ "Current estimated time for test: {:.2e} sec.".format(self.estimated_time_test)) return predictions ############################################################################## #### Above 3 methods (__init__, train, test) should always be implemented #### ############################################################################## def preprocess_tensor_4d(self, tensor_4d): """Preprocess a 4-D tensor (only when some dimensions are `None`, i.e. non-fixed). The output tensor wil have fixed, known shape. Args: tensor_4d: A Tensor of shape [sequence_size, row_count, col_count, num_channels] where some dimensions might be `None`. Returns: A 4-D Tensor with fixed, known shape. """ tensor_4d_shape = tensor_4d.shape logger.info("Tensor shape before preprocessing: {}".format(tensor_4d_shape)) if tensor_4d_shape[0] > 0 and tensor_4d_shape[0] < 10: num_frames = tensor_4d_shape[0] else: num_frames = self.default_num_frames if tensor_4d_shape[1] > 0: new_row_count = tensor_4d_shape[1] else: new_row_count=self.default_image_size[0] if tensor_4d_shape[2] > 0: new_col_count = tensor_4d_shape[2] else: new_col_count=self.default_image_size[1] if not tensor_4d_shape[0] > 0: logger.info("Detected that examples have variable sequence_size, will " + "randomly crop a sequence with num_frames = " + "{}".format(num_frames)) tensor_4d = crop_time_axis(tensor_4d, num_frames=num_frames) if not tensor_4d_shape[1] > 0 or not tensor_4d_shape[2] > 0: logger.info("Detected that examples have variable space size, will " + "resize space axes to (new_row_count, new_col_count) = " + "{}".format((new_row_count, new_col_count))) tensor_4d = resize_space_axes(tensor_4d, new_row_count=new_row_count, new_col_count=new_col_count) logger.info("Tensor shape after preprocessing: {}".format(tensor_4d.shape)) return tensor_4d def get_dataloader(self, tf_dataset, num_images, batch_size): ''' Get the training PyTorch dataloader Args: tf_dataset: Tensorflow Dataset which is given in train function num_images : number of examples in train data batch_size : batch_size for training set Return: dataloader: PyTorch Training Dataloader ''' tf_dataset = tf_dataset.map(lambda *x: (self.preprocess_tensor_4d(x[0]), x[1])) train_dataset = TFDataset(tf_dataset, self.train_session, num_images) dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=self.train_batch_size, shuffle=True, drop_last=False ) return dataloader def get_dataloader_test(self, tf_dataset, num_images, batch_size): ''' Get the test PyTorch dataloader Args: tf_dataset: Tensorflow Dataset which is given in test function num_images : number of examples in test data batch_size : batch_size for test set Return: dataloader: PyTorch Test Dataloader ''' tf_dataset = tf_dataset.map(lambda *x: (self.preprocess_tensor_4d(x[0]), x[1])) dataset = TFDataset(tf_dataset, self.test_session, num_images) dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size) return dataloader def trainloop(self, criterion, optimizer, steps): ''' Training loop with no of given steps Args: criterion: PyTorch Loss function Optimizer: PyTorch optimizer for training steps: No of steps to train the model Return: None, updates the model parameters ''' self.pytorchmodel.train() data_iterator = iter(self.trainloader) for i in range(steps): try: images, labels = next(data_iterator) except StopIteration: data_iterator = iter(self.trainloader) images, labels = next(data_iterator) images = images.float().to(self.device) labels = labels.float().to(self.device) optimizer.zero_grad() log_ps = self.pytorchmodel(images) loss = criterion(log_ps, labels) if hasattr(self, 'scheduler'): self.scheduler.step(loss) loss.backward() optimizer.step() def get_steps_to_train(self, remaining_time_budget): """Get number of steps for training according to `remaining_time_budget`. The strategy is: 1. If no training is done before, train for 10 steps (ten batches); 2. Otherwise, estimate training time per step and time needed for test, then compare to remaining time budget to compute a potential maximum number of steps (max_steps) that can be trained within time budget; 3. Choose a number (steps_to_train) between 0 and max_steps and train for this many steps. Double it each time. """ if not remaining_time_budget: # This is never true in the competition anyway remaining_time_budget = 1200 # if no time limit is given, set to 20min if not self.estimated_time_per_step: steps_to_train = 10 else: if self.estimated_time_test: tentative_estimated_time_test = self.estimated_time_test else: tentative_estimated_time_test = 50 # conservative estimation for test max_steps = int((remaining_time_budget - tentative_estimated_time_test) / self.estimated_time_per_step) max_steps = max(max_steps, 1) if self.cumulated_num_tests < np.log(max_steps) / np.log(2): steps_to_train = int(2 ** self.cumulated_num_tests) # Double steps_to_train after each test else: steps_to_train = 0 return steps_to_train def testloop(self, dataloader): ''' Args: dataloader: PyTorch test dataloader Return: preds: Predictions of the model as Numpy Array. ''' preds = [] with torch.no_grad(): self.pytorchmodel.eval() for images, _ in dataloader: if torch.cuda.is_available(): images = images.float().cuda() else: images = images.float() log_ps = self.pytorchmodel(images) pred = torch.sigmoid(log_ps).data > 0.5 preds.append(pred.cpu().numpy()) preds = np.vstack(preds) return preds def choose_to_stop_early(self): """The criterion to stop further training (thus finish train/predict process). """ # return self.cumulated_num_tests > 10 # Limit to make 10 predictions # return np.random.rand() < self.early_stop_proba batch_size = self.train_batch_size num_examples = self.metadata_.size() num_epochs = self.cumulated_num_steps * batch_size / num_examples logger.info("Model already trained for {} epochs.".format(num_epochs)) return num_epochs > self.num_epochs_we_want_to_train # Train for at least certain number of epochs then stop #### Other helper functions def crop_time_axis(tensor_4d, num_frames, begin_index=None): """Given a 4-D tensor, take a slice of length `num_frames` on its time axis. Args: tensor_4d: A Tensor of shape [sequence_size, row_count, col_count, num_channels] num_frames: An integer representing the resulted chunk (sequence) length begin_index: The index of the beginning of the chunk. If `None`, chosen randomly. Returns: A Tensor of sequence length `num_frames`, which is a chunk of `tensor_4d`. """ # pad sequence if not long enough pad_size = tf.maximum(num_frames - tf.shape(tensor_4d)[0], 0) padded_tensor = tf.pad(tensor_4d, ((0, pad_size), (0, 0), (0, 0), (0, 0))) # If not given, randomly choose the beginning index of frames if not begin_index: maxval = tf.shape(padded_tensor)[0] - num_frames + 1 begin_index = tf.random.uniform([1], minval=0, maxval=maxval, dtype=tf.int32) begin_index = tf.stack([begin_index[0], 0, 0, 0], name='begin_index') sliced_tensor = tf.slice(padded_tensor, begin=begin_index, size=[num_frames, -1, -1, -1]) return sliced_tensor def resize_space_axes(tensor_4d, new_row_count, new_col_count): """Given a 4-D tensor, resize space axes to have target size. Args: tensor_4d: A Tensor of shape [sequence_size, row_count, col_count, num_channels]. new_row_count: An integer indicating the target row count. new_col_count: An integer indicating the target column count. Returns: A Tensor of shape [sequence_size, target_row_count, target_col_count]. """ resized_images = tf.image.resize_images(tensor_4d, size=(new_row_count, new_col_count)) return resized_images def get_logger(verbosity_level): """Set logging format to something like: 2019-04-25 12:52:51,924 INFO model.py: <message> """ logger = logging.getLogger(__file__) logging_level = getattr(logging, verbosity_level) logger.setLevel(logging_level) formatter = logging.Formatter( fmt='%(asctime)s %(levelname)s %(filename)s: %(message)s') stdout_handler = logging.StreamHandler(sys.stdout) stdout_handler.setLevel(logging_level) stdout_handler.setFormatter(formatter) stderr_handler = logging.StreamHandler(sys.stderr) stderr_handler.setLevel(logging.WARNING) stderr_handler.setFormatter(formatter) logger.addHandler(stdout_handler) logger.addHandler(stderr_handler) logger.propagate = False return logger logger = get_logger('INFO')
39.946903
112
0.667036
3,082
22,570
4.687541
0.182349
0.01772
0.021181
0.014536
0.234443
0.165294
0.120786
0.088807
0.066657
0.056413
0
0.014067
0.225166
22,570
564
113
40.017731
0.812043
0.349269
0
0.111821
0
0
0.094032
0
0
0
0
0
0
1
0.067093
false
0
0.041534
0.003195
0.175719
0.01278
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99870a0387d9be5139c471e0259b81bcbfb1dce0
1,169
py
Python
scriptwakeup.py
mekhala1808-cmis/mekhala1808-cmis-cs2
9ec83d8a1f38dbfaebc03cea76ce39483b71c828
[ "CC0-1.0" ]
null
null
null
scriptwakeup.py
mekhala1808-cmis/mekhala1808-cmis-cs2
9ec83d8a1f38dbfaebc03cea76ce39483b71c828
[ "CC0-1.0" ]
null
null
null
scriptwakeup.py
mekhala1808-cmis/mekhala1808-cmis-cs2
9ec83d8a1f38dbfaebc03cea76ce39483b71c828
[ "CC0-1.0" ]
null
null
null
def wakeup(): print "This program will ask you for 5 integer or float values." + "\n" + "It will calculate the average of all values from 0 inclusive to 10 exclusive." + "\n" + "It will print out whether the resulting average is even or odd." def numbers(): n1 = float(raw_input("n0: ")) n2 = float(raw_input("n1: ")) n3 = float(raw_input("n2: ")) n4 = float(raw_input("n3: ")) n5 = float(raw_input("n4: ")) numbers () def main(): if n1 < 0 or n1 >= 10: print str(n1)+(" is out of range") if n2 < 0 or n1 >= 10: print str(n2)+(" is out of range") if n3 < 0 or n1 >= 10: print str(n3)+(" is out of range") if n4 < 0 or n1 >= 10: print str(n4)+(" is out of range") if n5 < 0 or n1 >= 10: print str(n5)+(" is out of range") main () def avgvalue(): avgvalue = int(n1) + int(n2) + int(n3) + int(n4) + int(n5) / 5 avgvalue() def evenodd(): greatestnumber == bool(str(greatestnumber)) def result(avgvalue, intpart, evenodd): result + """ The average is {} The intger part of the average is {}. THe integer part is {}. """.format(avgvalue, intpart, evenodd) return result
27.186047
232
0.588537
185
1,169
3.691892
0.308108
0.058565
0.095168
0.051245
0.191801
0.10981
0
0
0
0
0
0.057471
0.255774
1,169
42
233
27.833333
0.727586
0
0
0
0
0
0.326199
0
0
0
0
0
0
0
null
null
0
0
null
null
0.181818
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
99879b528d7993063b417f1a859d11a6963e5268
1,726
py
Python
2.linked-list/single-linked-list/remove-nth-from-end/test.py
tienduy-nguyen/coderust
d0884d7b3ced0d01e24b210284b9370432964274
[ "MIT" ]
null
null
null
2.linked-list/single-linked-list/remove-nth-from-end/test.py
tienduy-nguyen/coderust
d0884d7b3ced0d01e24b210284b9370432964274
[ "MIT" ]
null
null
null
2.linked-list/single-linked-list/remove-nth-from-end/test.py
tienduy-nguyen/coderust
d0884d7b3ced0d01e24b210284b9370432964274
[ "MIT" ]
null
null
null
class ListNode: def __init__(self, val, next = None): self.val = val self.next = next class LinkedList: def __init__(self): self.head = None def removeNthFromEnd(self, head, n): fast = slow = head for _ in range(n): if not fast: self.printNode(head) return head fast = fast.next # self.printNode(fast) while fast.next: fast = fast.next slow = slow.next slow.next = slow.next.next return head def remove_nth_from_end2(self, head, n): def remove(head,n): if head is None: return head, 0 node, count = remove(head.next, n) if node is not None: print('node: ', node.val, ' - count: ', count) count += 1 head.next = node if count == n: print('Somthing here: ', count, head.val, node.val) head = head.next return head, count return remove(head, n)[0] #Get head def count(self,head): def count_size(head): if head is None: return 0 count = count_size(head.next) + 1 return count return count_size(head) def shift(self, val): new_node = ListNode(val) new_node.next = self.head self.head = new_node def printNode(self,head): result = [] if head is None: print("List node null") while(head): result.append(head.val) head=head.next print(result) if __name__ == '__main__': lk = LinkedList() lk.shift(5) lk.shift(8) lk.shift(6) lk.shift(4) lk.shift(3) lk.shift(2) lk.shift(1) lk.shift(0) lk.printNode(lk.head) ans = lk.remove_nth_from_end2(lk.head, 2) lk.printNode(ans) # while ans is not None: # print(ans.val) # ans = ans.next
22.710526
73
0.586906
252
1,726
3.904762
0.206349
0.056911
0.02439
0.036585
0.036585
0
0
0
0
0
0
0.013104
0.292584
1,726
76
74
22.710526
0.792793
0.049247
0
0.096774
0
0
0.032396
0
0
0
0
0
0
1
0.145161
false
0
0
0
0.274194
0.129032
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9988fe53a17ec55168391346f0d02850a22177c1
8,800
py
Python
meiduo_mall/meiduo_mall/apps/users/views.py
Zasling/meiduo_mall33
ec55597758d5052b311d65aee44533b001f6ddd8
[ "MIT" ]
1
2019-04-12T08:56:29.000Z
2019-04-12T08:56:29.000Z
meiduo_mall/meiduo_mall/apps/users/views.py
Zasling/meiduo_mall33
ec55597758d5052b311d65aee44533b001f6ddd8
[ "MIT" ]
null
null
null
meiduo_mall/meiduo_mall/apps/users/views.py
Zasling/meiduo_mall33
ec55597758d5052b311d65aee44533b001f6ddd8
[ "MIT" ]
1
2020-03-30T14:35:22.000Z
2020-03-30T14:35:22.000Z
import re from django.conf import settings from rest_framework.views import APIView from rest_framework.generics import CreateAPIView, RetrieveAPIView, UpdateAPIView from random import randint from django_redis import get_redis_connection from rest_framework.response import Response from rest_framework_jwt.views import ObtainJSONWebToken from celery_tasks.sms_code.tasks import send_sms_code from goods.models import SKU from goods.serializers import SKUListSerializers from users.models import User from users.serializers import UserSerializers, UserDetailSerializer, EmailSerializer, AddUserBrowsingHistorySerializer from rest_framework.permissions import IsAuthenticated from itsdangerous import TimedJSONWebSignatureSerializer as TJS from users.utils import merge_cart_cookie_to_redis from django.http import HttpResponse from meiduo_mall.libs.captcha.captcha import captcha from oauth import constants # 发送短信 class SmsCodeView(APIView): def get(self, request, mobile): # 1.获取手机号,进行正则匹配 conn = get_redis_connection('sms_code') # 先判断是否间隔了1分钟 flag = conn.get('sms_code_flag_%s' % mobile) if flag: return Response({'error': '请求过于频繁'}, status=400) # 2.生成验证码 sms_code = '%06d' % randint(0, 999999) print(sms_code) # 3. 保存验证码到redis pl = conn.pipeline() # 通过管道将2个相同操作进行整合,只需要连接一次redis pl.setex('sms_code_%s'%mobile, 300, sms_code) # 设置一个条件判断是否为1分钟后再次发送 pl.setex('sms_code_flag_%s' %mobile, 60, 'a') pl.execute() # 4.发送验证码 # 1.ccp = CCP() # 手机号, 短信验证码+过期时间,1号模板 # ccp.send_template_sms(mobile, [sms_code, '5'], 1) # 2.线程 # t = Thread(target=work, kwargs={'mobile':mobile, 'sms_code':sms_code}) # t.start() # 3.celery异步发送 send_sms_code.delay(mobile, sms_code) # 5.返回信息 return Response({'message': 'ok'}) # 判断用户名 class UserNameView(APIView): def get(self, request, username): count = User.objects.filter(username=username).count() return Response({ 'username': username, 'count': count }) # 判断手机号 class MobileView(APIView): def get(self, request, mobile): count = User.objects.filter(mobile=mobile).count() return Response({ 'mobile': mobile, 'count': count }) # 绑定 class UsersView(CreateAPIView): serializer_class = UserSerializers # 用户中心信息显示 class UserDetailView(RetrieveAPIView): serializer_class = UserDetailSerializer permission_classes = [IsAuthenticated] def get_object(self): # self代表当前类实例对象(genericAPIview),使用它里面的request属性 # 接着将数据返回给RetrieveAPIView(大部分将数据在拓展类中进行处理,在拓展类处理的过程中又用到了序列化器.) return self.request.user # 发送验证邮件 class EmailView(UpdateAPIView): serializer_class = EmailSerializer permission_classes = [IsAuthenticated] # 原方法需要pk值,而我们的前端没有传递,所以要进行重写 def get_object(self, *args, **kwargs): # 返回对象 return self.request.user # 验证邮箱有效性 class VerifyEmailView(APIView): def get(self, request): # 获取前端传入的token token = request.query_params.get('token') if not token: return Response({'error': '缺少token'}, status=400) tjs = TJS(settings.SECRET_KEY, 300) try: # 检查token data = tjs.loads(token) except Exception: return Response({'errors': '无效token'}, status=400) username = data['name'] user = User.objects.get(username) user.email_active = True user.save() print(111) return Response({ 'message': 'ok' }) # 保存用户浏览记录 class UserBrowsingHistoryView(CreateAPIView): serializer_class = AddUserBrowsingHistorySerializer # permission_classes = [IsAuthenticated] # 获取用户浏览记录 def get(self, request): user = request.user conn = get_redis_connection('history') # 取出5条浏览记录 sku_ids = conn.lrange('history_%s'% user.id, 0, 6) # 通过sku——id在SKU表里过滤出对应的数据对象 skus = SKU.objects.filter(id__in=sku_ids) # 序列化返回 ser = SKUListSerializers(skus, many=True) return Response(ser.data) # 重写ObtainJSONWebToken登陆,合并购物车 class UserAuthorizeView(ObtainJSONWebToken): def post(self, request, *args, **kwargs): response = super().post(request, *args, **kwargs) serializer = self.get_serializer(data=request.data) if serializer.is_valid(): user = serializer.object.get('user') or request.user # 普通传参.传参顺序必须一致 response = merge_cart_cookie_to_redis(request, user,response) # 结果返回 return response # 重置密码 class PasswordResetView(UpdateAPIView): def put(self, request,user_id): try: user = User.objects.get(id=user_id) except: return Response({'error':'数据库异常'}) else: data = request.data # 1.获取前端密码数据 old_password = data['old_password'] password = data['password'] password2 = data['password2'] # 2.判断旧密码是否正确 if not user.check_password(old_password): return Response({'error':'密码错误'}) # 3.判断两次密码是否相同 if password != password2: return Response({'error':'两次密码输入不一致'}) # 4.旧密码正确则保存新密码 user.set_password(password) user.save() return Response({'message': 'ok'}) # 生成图片验证码 class ImageCodeView(APIView): """ 图片验证码 """ def get(self, request, image_code_id): name, text, image = captcha.generate_captcha() redis_conn = get_redis_connection("img_codes") redis_conn.setex("img_%s" % image_code_id, constants.IMAGE_CODE_REDIS_EXPIRES, text) # 固定返回验证码图片数据,不需要REST framework框架的Response帮助我们决定返回响应数据的格式 # 所以此处直接使用Django原生的HttpResponse即可 return HttpResponse(image) # 忘记密码 - 第一步 # 检验图片验证码,判断用户名是否存在 class CheckUsernameVIew(APIView): def get(self,request,username): # 获取前端图片验证码数据 image_code = request.GET.get('text') image_code_id = request.GET.get('image_code_id') # 验证图片验证码 conn = get_redis_connection("img_codes") if not image_code: return Response({'error':'图片验证码错误'}) img_code = conn.get('img_%s' % image_code_id) if img_code.decode().lower() != image_code.lower(): return Response({'error':'图片验证码错误'}) # 判断用户名是否存在 try: if re.match('^1[3-9]\d{9}',username): user = User.objects.get(mobile=username) else: user = User.objects.get(username = username) except: return Response({'error':'用户不存在'}) return Response({ 'mobile':user.mobile, 'access_token':user.id, }) # 忘记密码-第二步 # 发送短信验证码 class SendSmsCodeView(APIView): def get(self, request): # 1.获取access_token access_token = request.GET.get('access_token') conn = get_redis_connection('sms_code') user = User.objects.get(id = access_token) mobile = user.mobile # 判断是否间隔了1分钟 flag = conn.get('sms_code_flag_%s' % mobile) if flag: return Response({'error': '请求过于频繁'}, status=400) # 2.生成验证码 sms_code = '%06d' % randint(0, 999999) print(sms_code) # 3. 保存验证码到redis pl = conn.pipeline() # 通过管道将2个相同操作进行整合,只需要连接一次redis pl.setex('sms_code_%s'% mobile, 300, sms_code) # 设置一个条件判断是否为1分钟后再次发送 pl.setex('sms_code_flag_%s' % mobile, 60, 'a') pl.execute() send_sms_code.delay(mobile, sms_code) # 5.返回信息 return Response({'message': 'ok'}) # 忘记密码-第三步 # 验证短信验证码 class CheckSmsCode(APIView): def get(self,request,username): # 从前端获取数据 sms_code = request.GET.get('sms_code') user = User.objects.get(username = username) conn = get_redis_connection('sms_code') # 从redis中取出的数据为byte类型 try: real_sms_code = conn.get('sms_code_%s' % user.mobile) except: raise Response({'message':'短信验证码过期'}) if real_sms_code.decode().lower() != sms_code.lower(): raise Response({'message':'验证码错误'}) return Response({ 'user_id':user.id, 'access_token':user.id }) # 忘记密码-第四步 # 保存新密码 class NewPassword(APIView): def post(self,request,user_id): pwd = request.data['password'] pwd2 = request.data['password2'] if pwd != pwd2: return Response({'message':'两次密码不一致'}) user = User.objects.get(id = user_id) user.set_password(pwd) user.save() return Response('ok')
30.985915
118
0.622727
947
8,800
5.640971
0.275607
0.039311
0.016848
0.028641
0.235118
0.199176
0.125047
0.115313
0.115313
0.115313
0
0.01305
0.268523
8,800
283
119
31.095406
0.816529
0.119205
0
0.375691
0
0
0.070265
0
0
0
0
0
0
1
0.077348
false
0.060773
0.104972
0.01105
0.436464
0.016575
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
998910807a84d2343e16f6a84638953322f68a97
425
py
Python
socialregistration/contrib/googleapps/urls.py
zapier/django-socialregistration-with-google-apps
65cb2d446d30f8270c6abb866480bad105518e2b
[ "MIT" ]
1
2019-08-14T21:14:03.000Z
2019-08-14T21:14:03.000Z
socialregistration/contrib/googleapps/urls.py
mpdaugherty/django-socialregistration-with-google-apps
988c627995d48a0f861b1156401fe3e5e4c91826
[ "MIT" ]
null
null
null
socialregistration/contrib/googleapps/urls.py
mpdaugherty/django-socialregistration-with-google-apps
988c627995d48a0f861b1156401fe3e5e4c91826
[ "MIT" ]
3
2017-06-22T22:32:03.000Z
2021-09-08T10:01:50.000Z
from django.conf import settings from django.conf.urls.defaults import * from socialregistration.contrib.googleapps.views import GoogleAppsRedirect, \ GoogleAppsSetup, GoogleAppsCallback urlpatterns = patterns('', url('^redirect/$', GoogleAppsRedirect.as_view(), name='redirect'), url('^callback/$', GoogleAppsCallback.as_view(), name='callback'), url('^setup/$', GoogleAppsSetup.as_view(), name='setup'), )
38.636364
77
0.741176
43
425
7.255814
0.534884
0.057692
0.096154
0
0
0
0
0
0
0
0
0
0.105882
425
10
78
42.5
0.821053
0
0
0
0
0
0.12
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
9989614393dfb36145666c0ee972313c9461ff34
73
py
Python
firstpython.py
Renatoirp/CloudCourse
1edd614a7179bf444b122acfebb50f6ffea5c1c2
[ "Apache-2.0" ]
null
null
null
firstpython.py
Renatoirp/CloudCourse
1edd614a7179bf444b122acfebb50f6ffea5c1c2
[ "Apache-2.0" ]
null
null
null
firstpython.py
Renatoirp/CloudCourse
1edd614a7179bf444b122acfebb50f6ffea5c1c2
[ "Apache-2.0" ]
null
null
null
# Display output print("New python file!") print("Add a new print line.")
24.333333
30
0.712329
12
73
4.333333
0.75
0
0
0
0
0
0
0
0
0
0
0
0.136986
73
3
30
24.333333
0.825397
0.191781
0
0
0
0
0.637931
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
998cbf13c8435563780f89976a96d9d655cabb2a
14,049
py
Python
examples/notebooks/generating_yaml.py
wtgee/huntsman-pocs
c47976b1e52c5676a8237f6ee889555ede26d0e0
[ "MIT" ]
null
null
null
examples/notebooks/generating_yaml.py
wtgee/huntsman-pocs
c47976b1e52c5676a8237f6ee889555ede26d0e0
[ "MIT" ]
null
null
null
examples/notebooks/generating_yaml.py
wtgee/huntsman-pocs
c47976b1e52c5676a8237f6ee889555ede26d0e0
[ "MIT" ]
null
null
null
import yaml import os import datetime import ipywidgets as widgets from ipywidgets import interact, interactive, fixed, interact_manual from IPython.display import display import sys class POCS_devices_database(object): """ This class manages serial numbers and other information of multiple devices being used with POCS. It can be used to display ipython widgets to select the device information, and then create a .yaml config file that can be read and implemented by POCS. """ def __init__(self, device_info_master_directory='/var/huntsman-pocs/conf_files/', device_info_master_file='device_info_master.yaml', local_directory='/var/huntsman-pocs/conf_files/', archive_directory='/var/huntsman-pocs/conf_files/archive/', output_yaml_filename='huntsman.yaml'): """ Sets up the location to save all files, loads information off previous files, and gets the current datetime info for the archive filename. Args: device_info_master_directory : the file path of where the .yaml file that all the device info is in local_directory : the dir where the config file needs to be saved to be used by POCS archive_directory : the dir where the archive/version control of the config files are kept output_yaml_filename : the chosen filename of the local config file used by POCS """ self.local_directory = local_directory self.archive_directory = archive_directory self.device_info_master_directory = device_info_master_directory self.output_yaml_filename = output_yaml_filename self.device_info_master_file = device_info_master_file device_info_file = os.path.join( self.device_info_master_directory, self.device_info_master_file) try: with open(device_info_file, 'r') as file: self.data = yaml.load(file) except FileNotFoundError: sys.exit("Cannot find device information master file") date_info = datetime.datetime.today() datetime_str = date_info.strftime('%Y_%m_%d_%H_%M') self.archive_filename = '{}_{}.{}'.format('huntsman', datetime_str, 'yaml') previous_file = os.path.join(self.local_directory, self.output_yaml_filename) # loading general data from the previous .yaml file used try: with open(previous_file, 'r') as file: self.data_dict = yaml.load(file) if self.data_dict is not None and 'cameras' in self.data_dict: del self.data_dict['cameras'] except FileNotFoundError: self.data_dict = {} self.data_dict.update( {'cameras': {'hdr_mode': True, 'auto_detect': False, 'devices': [None]}}) def add_device_widget(self, dummy_variable_for_widget): """Function to add the details selected using the drop-down menu widgets to the 'data_dict' dictionary. The function is called by a widget in start_interface() and is then run when the user clicks on the widget button. Args: dummy_variable_for_widget : the widget needs an extra arg for some reason Output: Appends the data_dict dict with the information chosen from the device information widgets. """ additional_device = {'model': self.camera_type_chosen, 'port': self.camera_sn_chosen, 'filter_type': self.filter_ID_chosen, 'focuser': {'model': 'birger', 'port': self.birger_sn_chosen }, 'lens': {'model': 'canon', 'port': self.lens_sn_chosen, 'name': self.lens_name_chosen, 'image_stabalisataion': self.lens_image_stabalisation_chosen}, 'USB_hub_serial_number': self.USB_hub_SN_chosen, 'camera_into_serial_adaptor_port': self.camera_to_serial_port_chosen, 'serial_adaptor_into_USBhub_port': self.serial_to_USBhub_port_chosen, 'camera_into_USBhub_port': self.camera_to_USBhub_port_chosen } if self.data_dict['cameras']['devices'] == [None]: self.data_dict['cameras']['devices'] = [additional_device] else: self.data_dict['cameras']['devices'].append(additional_device) return self.data_dict def save_file(self, dummy_variable_for_widget): """This function writes the 'data_dict' dictionary to a .yaml text file. The function is called by a widget in start_interface() and is run when the user clicks on the widget button. Args: dummy_variable_for_widget : the widget needs an extra arg for some reason Output: Writes the information in the dict into a .yaml file in two locations, as determined by the assign_local_dir() and assign_archive_dir methods. The default locations are: '/var/huntsman-pocs/conf_files/huntsman.yaml' for the local config file to be used by POCS and '/var/huntsman-pocs/conf_files/huntsman_archive/huntsman_YYYY_mm_dd_hh_MM.yaml' for the archive of all version of the config files, with the date it was created in the filename """ strOutFile1 = os.path.join(self.local_directory, self.output_yaml_filename) objFile1 = open(strOutFile1, "w") yaml.dump(self.data_dict, objFile1, default_flow_style=False, indent=4) objFile1.close() strOutFile = os.path.join(self.archive_directory, self.archive_filename) objFile = open(strOutFile, "w") yaml.dump(self.data_dict, objFile, default_flow_style=False, indent=4) objFile.close() def start_interface(self): """This function runs all the code to generate the .yaml config files for the Huntsman-POCS system. It displays the Jupyter widgets which the user can interact with to write and save the config files. Files are saved in two locations, one for the local file that POCS will access, and the other is an archive of all previous config files which acts as a version control. By default, these locations are: (but can be changed using the arguments in the __init__ method) '/var/huntsman-pocs/conf_files/huntsman.yaml' for the local file. '/var/huntsman-pocs/conf_files/huntsman_archive/huntsman_YYYY_mm_dd_hh_MM.yaml' for the archive file. Steps for the user to follow: Select from the dropdown menus the information for one device set. Click 'Add new device set'. Select from the dropdown menus the information for the next device set. Click 'Add new device set'. Repeat until all device sets have been added. Click 'Save File' to write the .yaml file. Displays: Jupyter widgets of drop-down menus to select the device sets. These widgets are used to generate and save the .yaml config files. Output: A .yaml config file for Huntsman """ print(self.start_interface.__doc__) birger_sn = self.data['birger_SN'] self.birger_serial_number = interactive( birger_sn_widget, birger_serial_number_displayed=birger_sn) camera_sn = self.data['camera_SN'] self.camera_serial_number = interactive( camera_sn_widget, camera_serial_number_displayed=camera_sn) lens_sn = self.data['lens_SN'] self.lens_serial_number = interactive(lens_sn_widget, lens_serial_number_displayed=lens_sn) filter_ID = self.data['filter_ID'] self.filter_ID_code = interactive(filter_ID_widget, filter_ID_code_displayed=filter_ID) serial_into_USBhub = self.data['serial_into_USBhub_port'] self.serial_into_USBhub_port = interactive( serial_to_usb_widget, serial_into_USBhub_port_displayed=serial_into_USBhub) camera_into_serial = self.data['camera_into_serial_port'] self.camera_into_serial_port = interactive( camera_to_serial_widget, camera_into_serial_port_displayed=camera_into_serial) USBhub = self.data['USBhub_SN'] self.USBhub_SN = interactive(usbhub_sn_widget, USBhub_SN_displayed=USBhub) camera_into_USBhub = self.data['camera_into_USBhub_port'] self.camera_into_USBhub_port = interactive( camera_to_usb_widget, camera_into_USBhub_port_displayed=camera_into_USBhub) display(self.birger_serial_number) display(self.camera_serial_number) display(self.lens_serial_number) display(self.filter_ID_code) display(self.serial_into_USBhub_port) display(self.camera_into_serial_port) display(self.USBhub_SN) display(self.camera_into_USBhub_port) self.birger_sn_chosen = self.birger_serial_number.result self.camera_sn_chosen = self.camera_serial_number.result self.lens_sn_chosen = self.lens_serial_number.result self.filter_ID_chosen = self.filter_ID_code.result self.serial_to_USBhub_port_chosen = self.serial_into_USBhub_port.result self.camera_to_serial_port_chosen = self.camera_into_serial_port.result self.USB_hub_SN_chosen = self.USBhub_SN.result self.camera_to_USBhub_port_chosen = self.camera_into_USBhub_port.result self.camera_type_chosen = self.data['camera_type'][self.camera_sn_chosen] self.lens_name_chosen = self.data['lens_name'][self.lens_sn_chosen] self.lens_image_stabalisation_chosen = self.data['lens_image_stabalisation'][self.lens_sn_chosen] button1 = widgets.Button(description="Add new device set") display(button1) button1.on_click(self.add_device_widget) button = widgets.Button(description="Save File") display(button) button.on_click(self.save_file) def birger_sn_widget(birger_serial_number_displayed): """Function used to create Jupyter widget. It takes the parameter chosen from the widget and returns it such that it can be used as a variable. Args: birger_serial_number (str) : the serial number of the birger device as selected from the widget. Returns: The result of the widget; the chosen focuser serial number """ return birger_serial_number_displayed def camera_sn_widget(camera_serial_number_displayed): """Function used to create Jupyter widget. It takes the parameter chosen from the widget and returns it such that it can be used as a variable. Args: camera_serial_number (str) : the serial number of the camera device as selected from the widget. Returns: The result of the widget; the chosen camera serial number """ return camera_serial_number_displayed def lens_sn_widget(lens_serial_number_displayed): """Function used to create Jupyter widget. It takes the parameter chosen from the widget and returns it such that it can be used as a variable. Args: lens_serial_number (str) : the serial number of the lens device as selected from the widget. Returns: The result of the widget; the chosen lens serial number """ return lens_serial_number_displayed def filter_ID_widget(filter_ID_code_displayed): """Function used to create Jupyter widget. It takes the parameter chosen from the widget and returns it such that it can be used as a variable. Args: filter_ID_code (str) : the ID number of the lens as selected from the widget. Returns: The result of the widget; the chosen filter ID number """ return filter_ID_code_displayed def serial_to_usb_widget(serial_into_USBhub_port_displayed): """Function used to create Jupyter widget. It takes the parameter chosen from the widget and returns it such that it can be used as a variable. Args: serial_into_USBhub_port (str) : the port number of the USB Hub that the Serial Adaptor is plugged into as selected from the widget. Returns: The result of the widget; the chosen USB port number """ return serial_into_USBhub_port_displayed def camera_to_serial_widget(camera_into_serial_port_displayed): """Function used to create Jupyter widget. It takes the parameter chosen from the widget and returns it such that it can be used as a variable. Args: camera_into_serial_port (str) : the port number of the Serial Adaptor that the camera is plugged into as selected from the widget. Returns: The result of the widget; the chosen serial port number """ return camera_into_serial_port_displayed def usbhub_sn_widget(USBhub_SN_displayed): """Function used to create Jupyter widget. It takes the parameter chosen from the widget and returns it such that it can be used as a variable. Args: USBhub_SN (str) : the serial number of the USB Hub as selected from the widget. Returns: The result of the widget; the chosen USB Hub serial number """ return USBhub_SN_displayed def camera_to_usb_widget(camera_into_USBhub_port_displayed): """Function used to create Jupyter widget. It takes the parameter chosen from the widget and returns it such that it can be used as a variable. Args: camera_into_USBhub_port (str) : the port number of the USB Hub that the camera is plugged into as selected from the widget. Returns: The result of the widget; the chosen USB port number """ return camera_into_USBhub_port_displayed
42.702128
113
0.678838
1,895
14,049
4.788391
0.126121
0.039674
0.027772
0.01091
0.529755
0.429689
0.366321
0.314415
0.293366
0.26879
0
0.000962
0.260019
14,049
328
114
42.832317
0.871874
0.41583
0
0.030075
0
0
0.090193
0.042695
0
0
0
0
0
1
0.090226
false
0
0.052632
0
0.218045
0.007519
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
999192f10fb8b2831dc1f5ac84ee5ab0849ed0de
4,231
py
Python
synapse/resources/directories-plugin/directories.py
comodit/synapse-agent
ee3c6c2ec07ba34e821529f3e097123326b8b9c5
[ "MIT" ]
5
2015-11-05T05:44:08.000Z
2021-02-09T06:00:21.000Z
synapse/resources/directories-plugin/directories.py
comodit/synapse-agent
ee3c6c2ec07ba34e821529f3e097123326b8b9c5
[ "MIT" ]
2
2017-08-13T09:36:41.000Z
2017-08-13T09:36:58.000Z
synapse/resources/directories-plugin/directories.py
comodit/synapse-agent
ee3c6c2ec07ba34e821529f3e097123326b8b9c5
[ "MIT" ]
3
2015-09-30T20:08:19.000Z
2020-08-19T19:24:04.000Z
import getpass from datetime import datetime from synapse.resources.resources import ResourcesController from synapse.logger import logger from synapse.synapse_exceptions import ResourceException @logger class DirectoriesController(ResourcesController): __resource__ = "directories" def read(self, res_id=None, attributes={}): status = {} self.check_mandatory(res_id) present = self.module.is_dir(res_id) status['present'] = present if present: status['owner'] = self.module.owner(res_id) status['group'] = self.module.group(res_id) status['mode'] = self.module.mode(res_id) status['mod_time'] = self.module.mod_time(res_id) status['c_time'] = self.module.c_time(res_id) return status def create(self, res_id=None, attributes={}): self.check_mandatory(res_id) monitor = attributes.get('monitor') owner = self._get_owner(res_id, attributes) group = self._get_group(res_id, attributes) mode = self._get_mode(res_id, attributes) state = { 'owner': owner, 'group': group, 'mode': mode, 'mod_time': str(datetime.now()), 'c_time': str(datetime.now()), 'present': True } self.save_state(res_id, state, monitor=monitor) self.module.create_folders(res_id) # Update meta of given file self.module.update_meta(res_id, owner, group, mode) return self.read(res_id=res_id) def update(self, res_id=None, attributes={}): return self.create(res_id=res_id, attributes=attributes) def delete(self, res_id=None, attributes={}): self.check_mandatory(res_id) monitor = attributes.get('monitor') state = {'present': False} self.save_state(res_id, state, monitor=monitor) previous_state = self.read(res_id=res_id) self.module.delete_folder(res_id) if not self.module.exists(res_id): previous_state['present'] = False self.response = previous_state return self.read(res_id) def is_compliant(self, persisted_state, current_state): compliant = True # First, compare the present flag. If it differs, no need to go # further, there's a compliance issue. # Check the next path state if persisted_state.get("present") != current_state.get("present"): compliant = False return compliant # Secondly, compare path attributes for attr in ("name", "owner", "group", "mode"): if persisted_state.get(attr) != current_state.get(attr): compliant = False break return compliant def _get_owner(self, path, attributes): # Default, get the current user. getpass is portable Unix/Windows owner = getpass.getuser() # If path exists, get path owner if self.module.exists(path): owner = self.module.owner(path) # Overwrite if owner name is provided if attributes.get('owner'): owner = attributes['owner'] return owner def _get_group(self, path, attributes): # Default, get the current user's group. # getpass is portable Unix/Windows group = getpass.getuser() # If path exists, get path group if self.module.exists(path): group = self.module.group(path) # Overwrite if group name is provided if attributes.get('group'): group = attributes['group'] return group def _get_mode(self, path, attributes): # Default, get default mode according to current umask mode = self.module.get_default_mode(path) # If path exists, get current mode if self.module.exists(path): mode = self.module.mode(path) # If mode is provided, return its octal value as string if attributes.get('mode'): try: mode = oct(int(attributes['mode'], 8)) except ValueError as err: raise ResourceException("Error with path mode (%s)" % err) return mode
31.81203
74
0.610967
508
4,231
4.944882
0.214567
0.057723
0.021895
0.020701
0.281847
0.184713
0.147293
0.121019
0.058121
0.058121
0
0.000332
0.288584
4,231
132
75
32.05303
0.834219
0.140392
0
0.164706
0
0
0.053576
0
0
0
0
0
0
1
0.094118
false
0.035294
0.058824
0.011765
0.282353
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9992b282a524485f8001963cb892f9c2c4eb3263
3,532
py
Python
biothings/web/handlers/_flask.py
newgene/biothings.api
e3278695ac15a55fe420aa49c464946f81ec019d
[ "Apache-2.0" ]
30
2017-07-23T14:50:29.000Z
2022-02-08T08:08:16.000Z
biothings/web/handlers/_flask.py
kevinxin90/biothings.api
8ff3bbaecd72d04db4933ff944898ee7b7c0e04a
[ "Apache-2.0" ]
163
2017-10-24T18:45:40.000Z
2022-03-28T03:46:26.000Z
biothings/web/handlers/_flask.py
newgene/biothings.api
e3278695ac15a55fe420aa49c464946f81ec019d
[ "Apache-2.0" ]
22
2017-06-12T18:30:15.000Z
2022-03-01T18:10:47.000Z
from functools import wraps from types import CoroutineType import flask from biothings.web import templates from biothings.web.options import OptionError from biothings.web.query.pipeline import (QueryPipelineException, QueryPipelineInterrupt) from tornado.template import Loader routes = [] def route(pattern, methods=("GET", "POST")): def A(f): async def B(*args, **kwargs): biothings = flask.current_app.biothings optionsets = biothings.optionsets optionset = optionsets.get(f.__name__) if optionset: try: _args = optionset.parse(flask.request.method, ( (tuple(kwargs.values()), {}), flask.request.args, flask.request.form, flask.request.get_json() )) except OptionError as err: return err.info, 400 else: _args = {} result = f(biothings, _args) if isinstance(result, CoroutineType): return await result return result B.pattern = pattern B.methods = methods B.name = f.__name__ routes.append(B) return B return A @route("/") def homepage(biothings, args): loader = Loader(templates.__path__[0]) template = loader.load("home.html") return template.generate( alert='Front Page Not Configured.', title='Biothings API', contents=biothings.handlers.keys(), support=biothings.metadata.types, url='http://biothings.io/' ) def handle_es_conn(f): @wraps(f) async def _(biothings, *args, **kwargs): client = biothings.elasticsearch.async_client # because of the flask execution model # each time the async function is executed # it is executed on a different event loop # reset the connections to use the active loop del client.transport.connection_pool await client.transport._async_init() try: response = await f(biothings, *args, **kwargs) except QueryPipelineInterrupt as itr: return itr.details except QueryPipelineException as exc: kwargs = exc.details if isinstance(exc.details, dict) else {} kwargs["success"] = False kwargs["status"] = exc.code kwargs["reason"] = exc.summary return kwargs, exc.code finally: await client.close() return response return _ @route("/{ver}/query") @handle_es_conn async def query(biothings, args): return await biothings.pipeline.search(**args) @route([ "/{ver}/{typ}/", "/{ver}/{typ}/<id>"]) @handle_es_conn async def annotation(biothings, args): # could be a list, in which case we need jsonify. return flask.jsonify(await biothings.pipeline.fetch(**args)) @route("/{ver}/metadata") @handle_es_conn async def metadata(biothings, args): await biothings.metadata.refresh(None) return biothings.metadata.get_metadata(None) @route("/{ver}/metadata/fields") @handle_es_conn async def fields(biothings, args): await biothings.metadata.refresh(None) mappings = biothings.metadata.get_mappings(None) return biothings.pipeline.formatter.transform_mapping(mappings) @route("/status") @handle_es_conn async def status(biothings, args): return await biothings.health.async_check()
32.40367
73
0.615515
384
3,532
5.559896
0.380208
0.054801
0.033724
0.039813
0.120843
0.043091
0.043091
0
0
0
0
0.001582
0.284258
3,532
108
74
32.703704
0.842959
0.05974
0
0.096774
0
0
0.0546
0.006637
0
0
0
0
0
1
0.043011
false
0
0.075269
0
0.27957
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
9993702e8c8bf3d5ffb78c094fcfca64bb634489
633
py
Python
app/migrations/0001_initial.py
bedwards/lyric-tycoon
da90f029c6a7dc5b375603561b8fcba5142ee35d
[ "Apache-2.0" ]
null
null
null
app/migrations/0001_initial.py
bedwards/lyric-tycoon
da90f029c6a7dc5b375603561b8fcba5142ee35d
[ "Apache-2.0" ]
null
null
null
app/migrations/0001_initial.py
bedwards/lyric-tycoon
da90f029c6a7dc5b375603561b8fcba5142ee35d
[ "Apache-2.0" ]
null
null
null
# Generated by Django 3.2.4 on 2021-06-26 20:27 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Sentence', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('author', models.CharField(db_index=True, max_length=255)), ('book', models.CharField(max_length=255)), ('sentence', models.CharField(max_length=2048)), ], ), ]
26.375
117
0.579779
66
633
5.454545
0.666667
0.125
0.066667
0.133333
0
0
0
0
0
0
0
0.055804
0.292259
633
23
118
27.521739
0.747768
0.07109
0
0
1
0
0.051195
0
0
0
0
0
0
1
0
false
0
0.0625
0
0.3125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
999378e5bd86ceff2b4bb0417170acd2a54a5a27
1,571
py
Python
kinko/compat_ast.py
vmagamedov/kinko
b65f8ae97bbf5d056781e90e22d2a369f440ed4c
[ "BSD-3-Clause" ]
5
2015-11-18T20:31:14.000Z
2016-05-05T07:43:21.000Z
kinko/compat_ast.py
vmagamedov/kinko
b65f8ae97bbf5d056781e90e22d2a369f440ed4c
[ "BSD-3-Clause" ]
5
2016-03-17T14:42:30.000Z
2016-06-27T13:31:26.000Z
kinko/compat_ast.py
vmagamedov/kinko
b65f8ae97bbf5d056781e90e22d2a369f440ed4c
[ "BSD-3-Clause" ]
null
null
null
import ast as _ast from .compat import PY3, PY35 If = _ast.If Str = _ast.Str Num = _ast.Num For = _ast.For Expr = _ast.Expr Name = _ast.Name Load = _ast.Load List = _ast.List Index = _ast.Index Store = _ast.Store IfExp = _ast.IfExp IsNot = _ast.IsNot Tuple = _ast.Tuple Module = _ast.Module Assign = _ast.Assign Compare = _ast.Compare ListComp = _ast.ListComp Subscript = _ast.Subscript GeneratorExp = _ast.GeneratorExp comprehension = _ast.comprehension if PY3: def arg(arg): return _ast.arg(arg, None) def arguments(args, vararg, kwarg, defaults): return _ast.arguments(args, vararg, [], [], kwarg, defaults) def FunctionDef(name, args, body, decorator_list): return _ast.FunctionDef(name, args, body, decorator_list, None) Name = _ast.Name keyword = _ast.keyword Attribute = _ast.Attribute if PY35: def Call(func, args, keywords, starargs, kwargs): return _ast.Call(func, args, keywords) else: Call = _ast.Call else: def arg(arg): return _ast.Name(str(arg), _ast.Param()) def arguments(args, vararg, kwarg, defaults): return _ast.arguments(args, vararg, kwarg, defaults) def FunctionDef(name, args, body, decorator_list): return _ast.FunctionDef(str(name), args, body, decorator_list) def Name(id, ctx): return _ast.Name(str(id), ctx) def keyword(arg, value): return _ast.keyword(str(arg), value) def Attribute(value, attr, ctx): return _ast.Attribute(value, str(attr), ctx) Call = _ast.Call
23.102941
71
0.665818
211
1,571
4.763033
0.246446
0.089552
0.075622
0.095522
0.354229
0.293532
0.268657
0.268657
0.268657
0.268657
0
0.00491
0.222152
1,571
67
72
23.447761
0.817512
0
0
0.235294
0
0
0
0
0
0
0
0
0
1
0.196078
false
0
0.039216
0.196078
0.431373
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
0
0
0
1
99949b7499fb18d01405577641cf1fb6c9a87917
258
py
Python
tests/abs/test_product.py
powerpenguincat/practice-atcoder
6c656d0ebe3fc12d7df50112af2ef5c946bbaf46
[ "MIT" ]
null
null
null
tests/abs/test_product.py
powerpenguincat/practice-atcoder
6c656d0ebe3fc12d7df50112af2ef5c946bbaf46
[ "MIT" ]
null
null
null
tests/abs/test_product.py
powerpenguincat/practice-atcoder
6c656d0ebe3fc12d7df50112af2ef5c946bbaf46
[ "MIT" ]
null
null
null
import pytest from practice_atcoder.abs.product import question class Test(object): @pytest.mark.parametrize("ab,expect", [ ("3 4", "Even"), ("1 21", "Odd"), ]) def test(self, ab, expect): assert question(ab) == expect
19.846154
49
0.593023
32
258
4.75
0.75
0.157895
0
0
0
0
0
0
0
0
0
0.025773
0.248062
258
12
50
21.5
0.757732
0
0
0
0
0
0.089147
0
0
0
0
0
0.111111
1
0.111111
false
0
0.222222
0
0.444444
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99960f95359e6723c193ed2ed1eaac33b01b2ae2
1,440
py
Python
bp_eval/bpredict/utils.py
alexf91/gem5-advanced-computer-architecture
01fb031c0cc42410e148a26d0c3d891fb1372381
[ "BSD-3-Clause" ]
1
2020-03-25T09:35:45.000Z
2020-03-25T09:35:45.000Z
bp_eval/bpredict/utils.py
alexf91/gem5-advanced-computer-architecture
01fb031c0cc42410e148a26d0c3d891fb1372381
[ "BSD-3-Clause" ]
null
null
null
bp_eval/bpredict/utils.py
alexf91/gem5-advanced-computer-architecture
01fb031c0cc42410e148a26d0c3d891fb1372381
[ "BSD-3-Clause" ]
null
null
null
# # Copyright 2018 Alexander Fasching # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # __all__ = ('SaturatingCounter', 'History') class SaturatingCounter(object): value = property(lambda self: self._value) def __init__(self, minval, maxval, init=None): assert init is None or minval <= init <= maxval self._minval = minval self._maxval = maxval self._value = init or minval def increment(self): self._value = min(self._value + 1, self._maxval) def decrement(self): self._value = max(self._value - 1, self._minval) class History(object): value = property(lambda self: self._value) def __init__(self, length): self._length = length self._value = 0 def update(self, taken): self._value = ((self._value << 1) | taken) & ((1 << self._length) - 1)
31.304348
78
0.688889
201
1,440
4.79602
0.462687
0.093361
0.053942
0.059129
0.186722
0.159751
0.10166
0.10166
0.10166
0.10166
0
0.009778
0.21875
1,440
45
79
32
0.847111
0.445833
0
0.105263
0
0
0.03073
0
0
0
0
0
0.052632
1
0.263158
false
0
0
0
0.473684
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
9999a9338fc19b89e13fa629e1fae030c4729213
824
py
Python
tasks/train-model.py
wmeints/explainable-ai
ebdec372ff3f994c157dc5c47b4a8403e5c7d006
[ "MIT" ]
1
2021-06-15T08:27:06.000Z
2021-06-15T08:27:06.000Z
tasks/train-model.py
wmeints/explainable-ai
ebdec372ff3f994c157dc5c47b4a8403e5c7d006
[ "MIT" ]
null
null
null
tasks/train-model.py
wmeints/explainable-ai
ebdec372ff3f994c157dc5c47b4a8403e5c7d006
[ "MIT" ]
null
null
null
import os from pathlib import Path import joblib import pandas as pd import numpy as np from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_validate root_folder = Path(os.path.dirname(os.path.dirname(__file__))) training_data = root_folder / 'data' / 'processed' / 'train.csv' model_folder = root_folder / 'models' model_file = model_folder / 'classifier.bin' df = pd.read_csv(training_data) x = df.drop(['LABEL'], axis=1) y = df['LABEL'] model = RandomForestClassifier() results = cross_validate(model, x, y, cv=5, return_estimator=True) os.makedirs(model_folder, exist_ok=True) best_estimator = results['estimator'][np.argmax(results['test_score'])] joblib.dump(best_estimator, model_file) print(f"Done training. Stored model with score: {np.max(results['test_score'])}")
30.518519
81
0.769417
120
824
5.083333
0.483333
0.04918
0.042623
0
0
0
0
0
0
0
0
0.002717
0.106796
824
27
81
30.518519
0.826087
0
0
0
0
0
0.172121
0.037576
0
0
0
0
0
1
0
false
0
0.35
0
0.35
0.05
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
999a70b638ef43d6635227758b0cf57cc97a50c7
152
py
Python
src/nostradamus/utils/__init__.py
Orlogskapten/tsNostradamus
707cbc23fac3e0f92875d89550046e5c3b7b17d2
[ "MIT" ]
3
2020-07-06T10:58:40.000Z
2020-07-23T21:39:51.000Z
src/nostradamus/utils/__init__.py
wenceslas-sanchez/tsNostradamus
707cbc23fac3e0f92875d89550046e5c3b7b17d2
[ "MIT" ]
null
null
null
src/nostradamus/utils/__init__.py
wenceslas-sanchez/tsNostradamus
707cbc23fac3e0f92875d89550046e5c3b7b17d2
[ "MIT" ]
null
null
null
from .error import exception_type, check_method_lauched, check_is_int, \ check_is_in, check_key_is_in from .normal_hist import compare_hist_to_norm
38
72
0.842105
26
152
4.384615
0.653846
0.122807
0
0
0
0
0
0
0
0
0
0
0.111842
152
4
73
38
0.844444
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
999a7897f8cea7a46091c8b50a7b40974c139967
32,457
py
Python
networks/meta/past_grads_v2.py
annachen/dl_playground
f263dc16b4f0d91f6d33d94e678a9bbe2ace8913
[ "MIT" ]
null
null
null
networks/meta/past_grads_v2.py
annachen/dl_playground
f263dc16b4f0d91f6d33d94e678a9bbe2ace8913
[ "MIT" ]
null
null
null
networks/meta/past_grads_v2.py
annachen/dl_playground
f263dc16b4f0d91f6d33d94e678a9bbe2ace8913
[ "MIT" ]
null
null
null
"""Meta network using past gradients.""" import tensorflow as tf class DualRNN(tf.keras.layers.Layer): """ Pretty similar to LayerCompetition, except: 1) Optionally aggregate features across batch before feeding into the RNN. Doing this because if the RNN states were to represent training state of the underlying network, the whole batch is used for the underlying network and not just one instance from the batch. Doing this also means that we'd be training the meta-network RNN with batch_size = 1. 2) Extend the backward masking to be more similar to forward masking - the past masked gradients are passed into the RNN, while the current unmasked gradient is passed in through a separate branch. (This needs the inner network to pass in the masked gradient instead) Because the RNN states could potentially not have the batch dimension, we need to also pass in the current gradient at the end to get the mask output as (B, N) 3) Added a few more options to experiment with different network design. Parameters ---------- """ def __init__( self, rnn_type, rnn_units, input_mlp, fwd_output_mlp, bwd_output_mlp, mask_thresh=0.1, dist_fn='none', use_bwd_mask=False, normalize_grads=False, normalize_acts=False, random_grads_stddev=None, use_nearest_grads=False, use_node_set=True, node_set_version='v3', use_batch_set=False, use_batch_summary=True, cur_reuse_branch=False, bwd_return_grads=False, ): super(DualRNN, self).__init__() assert rnn_type in ['simplernn', 'gru', 'lstm'] if rnn_type == 'simplernn': self._rnn = tf.keras.layers.SimpleRNN(rnn_units) elif rnn_type == 'gru': self._rnn = tf.keras.layers.GRU(rnn_units) elif rnn_type == 'lstm': self._rnn = tf.keras.layers.LSTM(rnn_units) self._input_mlp = input_mlp self._fwd_output_mlp = fwd_output_mlp self._bwd_output_mlp = bwd_output_mlp self._mask_thresh = mask_thresh self._rnn_units = rnn_units self._dist_fn = dist_fn self._use_bwd_mask = use_bwd_mask self._normalize_grads = normalize_grads self._normalize_acts = normalize_acts self._use_node_set = use_node_set self._node_set_version = node_set_version self._use_batch_set = use_batch_set self._use_batch_summary = use_batch_summary self._random_grads_stddev = random_grads_stddev self._use_nearest_grads = use_nearest_grads self._cur_reuse_branch = cur_reuse_branch self._bwd_return_grads = bwd_return_grads self._last_input_mlp_input = None if self._fwd_output_mlp._last_layer_act_fn_str == 'linear': self._fwd_apply_sigmoid = True elif self._fwd_output_mlp._last_layer_act_fn_str == 'sigmoid': self._fwd_apply_sigmoid = False else: raise ValueError() if self._use_bwd_mask is False: assert self._bwd_output_mlp is None else: if self._bwd_output_mlp._last_layer_act_fn_str == 'linear': self._bwd_apply_sigmoid = True elif self._bwd_output_mlp._last_layer_act_fn_str == 'sigmoid': self._bwd_apply_sigmoid = False else: raise ValueError() def warm_start(self): batch = { 'past_grads': tf.zeros((1, 1, 1)), 'past_acts': tf.zeros((1, 1, 1)), 'cur_acts': tf.zeros((1, 1)), 'cur_grads': tf.zeros((1, 1)), } self.forward(batch, training=False) if self._use_bwd_mask: self.backward(batch, training=False) def first_forward(self, batch, training=None): """ batch : (B, N) current activations. """ B = tf.shape(batch)[0] N = tf.shape(batch)[1] # currently initial state is zeros h = tf.zeros((B * N, self._rnn_units), dtype=tf.float32) # prepare the branch from cur_acts if self._cur_reuse_branch: default_grads = self._get_default_grads( past_grads=None, past_acts=None, cur_acts=cur_acts, ) # (B, 1, N, cur_F) cur_act_input, cur_F = self._prepare_input_mlp_input( past_acts=cur_acts[:, tf.newaxis], past_grads=default_grads ) # (B*N, cur_F) cur_act_input = tf.reshape(cur_act_input, (B * N, cur_F)) # (B*N, F) cur_act_feats = self._input_mlp.call( cur_act_input, training=training ) F = self._input_mlp._filters[-1] cur_act_feats = tf.reshape(cur_act_feats, (B, 1, N, F)) # also run set features on cur_acts # (B, 1, N, F') cur_act_feats, F_p = self._get_set_feature( cur_act_feats, F ) else: if self._normalize_acts: # (B, N), (B, 1) nacts, norm = _safe_normalize(batch, axis=-1) norm = tf.tile(norm, [1, N]) # (B, N, 2) cur_act_feats = tf.stack([nacts, norm], axis=-1) F_p = 2 else: cur_act_feats = cur_acts F_p = 1 # concat with current activation to feed into output_mlp # (B*N, U+F') feat = tf.concat([ h, tf.reshape(cur_act_feats, (B * N, F_p)) ], axis=-1) out = self._fwd_output_mlp(feat, training=training) # (B, N) out = tf.reshape(out, (B, N)) if self._fwd_apply_sigmoid: mask = tf.nn.sigmoid(out) else: mask = out # to avoid gradient underflow in the inner net, make mask # smaller than `mask_thresh` 0s mask = tf.where( mask < self._mask_thresh, tf.zeros_like(mask), mask, ) return mask def forward(self, batch, training=None): """Returns the mask for forward inner network Parameters ---------- batch : dict "past_grads" : (B, T, N) "past_acts" : (B, T, N) "cur_acts" : (B, N) """ past_grads = batch['past_grads'] past_acts = batch['past_acts'] cur_acts = batch['cur_acts'] B = tf.shape(cur_acts)[0] N = tf.shape(cur_acts)[1] T = tf.shape(past_grads)[1] # (B, T, N, Fin) feat, Fin = self._prepare_input_mlp_input( past_grads=past_grads, past_acts=past_acts, ) feat = tf.reshape(feat, (-1, Fin)) #print("fwd Fin: {}".format(Fin)) # (B * T * N, F) feat = self._input_mlp.call(feat, training=training) F = self._input_mlp._filters[-1] feat = tf.reshape(feat, (B, T, N, F)) # (B, T, N, F') all_feats, F_p = self.get_set_feature(feat, F) #print("fwd Fp: {}".format(F_p)) if self._use_batch_summary: # (T, N, F') all_feats, F_p = self._get_batch_summary(all_feats, F_p) # (N, T, F') seq = tf.transpose(all_feats, (1, 0, 2)) # (N, U) last_h = self._rnn(seq, training=training) # (B, N, U) last_h = tf.tile(last_h[tf.newaxis], [B, 1, 1]) last_h = tf.reshape(last_h, (B * N, self._rnn_units)) else: # (B, N, T, F') seq = tf.transpose(all_feats, (0, 2, 1, 3)) seq = tf.reshape(seq, (B * N, T, F_p)) # (B*N, U) last_h = self._rnn(seq, training=training) # prepare the branch from cur_acts if self._cur_reuse_branch: default_grads = self._get_default_grads( past_grads=past_grads, past_acts=past_acts, cur_acts=cur_acts, ) # (B, 1, N, cur_F) cur_act_input, cur_F = self._prepare_input_mlp_input( past_acts=cur_acts[:, tf.newaxis], past_grads=default_grads ) # (B*N, cur_F) cur_act_input = tf.reshape(cur_act_input, (-1, cur_F)) # (B*N, F) cur_act_feats = self._input_mlp.call( cur_act_input, training=training ) F = self._input_mlp._filters[-1] cur_act_feats = tf.reshape(cur_act_feats, (B, 1, N, F)) # also run set features on cur_acts # (B, 1, N, F') cur_act_feats, F_p = self._get_set_feature( cur_act_feats, F ) else: if self._normalize_acts: # (B, N), (B, 1) nacts, norm = _safe_normalize(cur_acts, axis=-1) norm = tf.tile(norm, [1, N]) cur_act_feats = tf.stack([nacts, norm], axis=-1) F_p = 2 else: cur_act_feats = cur_acts F_p = 1 # prepare inputs for output_mlp # (B*N, U + F') feat = tf.concat([ last_h, tf.reshape(cur_act_feats, (B * N, F_p)) ], axis=-1) out = self._fwd_output_mlp(feat, training=training) # (B, N) out = tf.reshape(out, (B, N)) if self._fwd_apply_sigmoid: mask = tf.nn.sigmoid(out) else: mask = out # to avoid gradient underflow in the inner net, make mask # smaller than `mask_thresh` 0s # TODO: not sure if this is needed mask = tf.where( mask < self._mask_thresh, tf.zeros_like(mask), mask, ) return mask def first_backward(self, batch, training=None): """Returns the mask for backward gradient masking Parameters ---------- batch : dict "cur_acts" : (B, N) "cur_grads" : (B, N) """ cur_acts = batch['cur_acts'] cur_grads = batch['cur_grads'] B = tf.shape(cur_acts)[0] N = tf.shape(cur_acts)[1] # currently initial state is zeros h = tf.zeros((B * N, self._rnn_units), dtype=tf.float32) # prepare the branch from cur_acts if self._cur_reuse_branch: # (B, 1, N, cur_F) cur_input, cur_F = self._prepare_input_mlp_input( past_acts=cur_acts[:, tf.newaxis], past_grads=cur_grads[:, tf.newaxis], ) # (B*N, cur_F) cur_input = tf.reshape(cur_input, (B * N, cur_F)) # (B*N, F) cur_feats = self._input_mlp.call( cur_input, training=training ) F = self._input_mlp._filters[-1] cur_feats = tf.reshape(cur_feats, (B, 1, N, F)) # also run set features on cur_feats # (B, 1, N, F') cur_feats, F_p = self._get_set_feature( cur_feats, F ) else: if self._normalize_acts: # (B, N), (B, 1) nacts, norm = _safe_normalize(cur_acts, axis=-1) norm = tf.tile(norm, [1, N]) # (B, N, 2) cur_feats = tf.stack([nacts, norm], axis=-1) F_p = 2 else: cur_feats = cur_acts F_p = 1 if self._normalize_grads: ngrads, norm = _safe_normalize(cur_grads, axis=-1) norm = tf.tile(norm, [1, N]) cur_feats = tf.concat([ cur_feats, ngrads[..., tf.newaxis], norm[..., tf.newaxis] ], axis=-1) F_p += 2 else: cur_feats = tf.concat([ cur_feats, cur_grads[..., tf.newaxis] ], axis=-1) F_p += 1 # concat with current activation to feed into output_mlp # (B*N, U+F') feat = tf.concat([ h, tf.reshape(cur_feats, (B * N, F_p)) ], axis=-1) out = self._bwd_output_mlp(feat, training=training) # (B, N) out = tf.reshape(out, (B, N)) if self._bwd_return_grads: weights = tf.nn.softmax( tf.reshape(out, (B, N, 4)), axis=-1 ) grads = self._bwd_weighted_grads( cur_grads=cur_grads, weights=weights, ) return grads if self._bwd_apply_sigmoid: mask = tf.nn.sigmoid(out) else: mask = out # to avoid gradient underflow in the inner net, make mask # smaller than `mask_thresh` 0s mask = tf.where( mask < self._mask_thresh, tf.zeros_like(mask), mask, ) return mask def backward(self, batch, training=None): """Returns the mask for backward gradient masking Parameters ---------- batch : dict "past_grads" : (B, T, N) "past_acts" : (B, T, N) "cur_acts" : (B, N) "cur_grads" : (B, N) """ past_grads = batch['past_grads'] past_acts = batch['past_acts'] cur_acts = batch['cur_acts'] cur_grads = batch['cur_grads'] B = tf.shape(cur_acts)[0] N = tf.shape(cur_acts)[1] T = tf.shape(past_grads)[1] # (B, T, N, Fin) feat, Fin = self._prepare_input_mlp_input( past_grads=past_grads, past_acts=past_acts, ) feat = tf.reshape(feat, (-1, Fin)) #print("bwd Fin: {}".format(Fin)) # (B * T * N, F) feat = self._input_mlp.call(feat, training=training) F = self._input_mlp._filters[-1] feat = tf.reshape(feat, (B, T, N, F)) # (B, T, N, F') all_feats, F_p = self.get_set_feature(feat, F) #print("bwd Fp: {}".format(F_p)) if self._use_batch_summary: # (T, N, F') all_feats, F_p = self._get_batch_summary(all_feats, F_p) # (N, T, F') seq = tf.transpose(all_feats, (1, 0, 2)) # (N, U) last_h = self._rnn(seq, training=training) # (B, N, U) last_h = tf.tile(last_h[tf.newaxis], [B, 1, 1]) last_h = tf.reshape(last_h, (B * N, self._rnn_units)) else: # (B, N, T, F') seq = tf.transpose(all_feats, (0, 2, 1, 3)) seq = tf.reshape(seq, (B * N, T, F_p)) # (B*N, U) last_h = self._rnn(seq, training=training) # prepare the branch from cur_acts if self._cur_reuse_branch: # (B, 1, N, cur_F) cur_input, cur_F = self._prepare_input_mlp_input( past_acts=cur_acts[:, tf.newaxis], past_grads=cur_grads[:, tf.newaxis], ) # (B*N, cur_F) cur_input = tf.reshape(cur_input, (-1, cur_F)) # (B*N, F) cur_feats = self._input_mlp.call( cur_input, training=training ) F = self._input_mlp._filters[-1] cur_feats = tf.reshape(cur_feats, (B, 1, N, F)) # also run set features on cur_acts # (B, 1, N, F') cur_feats, F_p = self._get_set_feature( cur_feats, F ) else: if self._normalize_acts: # (B, N), (B, 1) nacts, norm = _safe_normalize(cur_acts, axis=-1) norm = tf.tile(norm, [1, N]) cur_feats = tf.stack([nacts, norm], axis=-1) F_p = 2 else: cur_feats = cur_acts F_p = 1 if self._normalize_grads: ngrads, norm = _safe_normalize(cur_grads, axis=-1) norm = tf.tile(norm, [1, N]) cur_feats = tf.concat([ cur_feats, ngrads[..., tf.newaxis], norm[..., tf.newaxis] ], axis=-1) F_p += 2 else: cur_feats = tf.concat([ cur_feats, cur_grads[..., tf.newaxis] ], axis=-1) F_p += 1 # prepare inputs for output_mlp # (B*N, U + F') feat = tf.concat([ last_h, tf.reshape(cur_feats, (B * N, F_p)) ], axis=-1) out = self._bwd_output_mlp(feat, training=training) if self._bwd_return_grads: weights = tf.nn.softmax( tf.reshape(out, (B, N, 4)), axis=-1 ) grads = self._bwd_weighted_grads( cur_grads=cur_grads, weights=weights, ) return grads # (B, N) out = tf.reshape(out, (B, N)) if self._bwd_apply_sigmoid: mask = tf.nn.sigmoid(out) else: mask = out # to avoid gradient underflow in the inner net, make mask # smaller than `mask_thresh` 0s # TODO: not sure if this is needed mask = tf.where( mask < self._mask_thresh, tf.zeros_like(mask), mask, ) return mask def _prepare_input_mlp_input(self, past_grads, past_acts): if self._normalize_acts: # (B, T, N), (B, T, 1) nacts, norm = _safe_normalize(past_acts, axis=2) N = tf.shape(nacts)[-1] # (B, T, N) norm = tf.tile(norm, [1, 1, N]) # (B, T, N, 2) feat = tf.stack([nacts, norm], axis=-1) F = 2 else: # (B, T, N, 1) feat = past_acts[..., tf.newaxis] F = 1 if self._normalize_grads: # (B, T, N), (B, T, 1) ngrads, norm = _safe_normalize(past_grads, axis=2) N = tf.shape(ngrads)[-1] # (B, T, N) norm = tf.tile(norm, [1, 1, N]) # (B, T, N, F+2) feat = tf.concat([ feat, ngrads[..., tf.newaxis], norm[..., tf.newaxis] ], axis=-1) F = F + 2 else: feat = tf.concat([feat, past_grads[..., tf.newaxis]]) F = F + 1 return feat, F def get_set_feature(self, feat, F): """Returns the features extracted based on sets. Parameters ---------- feat : tf.Tensor, shape (B, T, N, F) `N` is the dimension for the set F : int The number of channels for the input feature Returns ------- set_feat : tf.Tensor, shape (B, T, N, F') F' : int The number of channels of the output feature """ if not self._use_node_set and not self._use_batch_set: # if coordinate-wise, use original features return feat, F if self._use_node_set: if self._node_set_version == 'v1': feat, F = self._get_node_set_feature(feat, F) elif self._node_set_version == 'v2': feat, F = self._get_node_set_feature_v2(feat, F) elif self._node_set_version == 'v3': # (B, T, N, Fn) feat, F = self._get_node_set_feature_v3(feat, F) else: raise ValueError() if self._use_batch_set: feat_b, Fb = self._get_batch_set_feature(feat, F) if self._use_node_set: feat = tf.concat([feat, feat_b], axis=-1) F = F + Fb else: feat = feat_b F = Fb return feat, F def _get_node_set_feature(self, feat, F): """Returns the features extracted based on sets. Parameters ---------- feat : tf.Tensor, shape (B, T, N, F) `N` is the dimension for the set F : int The number of channels for the input feature Returns ------- set_feat : tf.Tensor, shape (B, T, N, F') F' : int The number of channels of the output feature """ B = tf.shape(feat)[0] T = tf.shape(feat)[1] # (BT, N, F) feat = tf.reshape(feat, (B * T, -1, F)) # obtain pair-wise feats for nodes # (BT, N, 1, F) src_feat = feat[:, :, tf.newaxis, :] # (BT, 1, N, F) dst_feat = feat[:, tf.newaxis, :, :] N = tf.shape(feat)[1] BT = B * T if self._dist_fn == 'diff': # (BT, N, N, F) dist = dst_feat - src_feat self_feat = feat elif self._dist_fn == 'dot': # (BT, N, N, F) dist = dst_feat * src_feat self_feat = feat elif self._dist_fn == 'norm_dot': n_dst_feat, _ = _safe_normalize(dst_feat, axis=-1) n_src_feat, _ = _safe_normalize(src_feat, axis=-1) dist = tf.reduce_sum( n_dst_feat * n_src_feat, axis=-1, keepdims=True ) self_feat = tf.ones([BT, N, 1]) F = 1 elif self._dist_fn == 'concat': # (BT, N, N, F*2) dist = tf.concat([ tf.tile(src_feat, [1, 1, N, 1]), tf.tile(dst_feat, [1, N, 1, 1]) ], axis=-1) # (BT, N, F*2) self_feat = tf.concat([feat, feat], axis=-1) F = F * 2 elif self._dist_fn == 'none': # need to tile the first `N` dimension and not the 2nd # (BT, N, N, F) dist = tf.tile(dst_feat, [1, N, 1, 1]) # (BT, N, F) self_feat = feat else: raise ValueError() # (N, N, B*T, F) dist = tf.transpose(dist, (1, 2, 0, 3)) # Aggregate over node features # Create an "other" mask mask = tf.ones((N, N)) - tf.eye(N) # (N * (N-1), 2) to_take = tf.where(mask > 0.5) # (N * (N-1), BT, F) gathered = tf.gather_nd(dist, to_take) # (N, N-1, BT, F) other_feat = tf.reshape(gathered, (N, N - 1, BT, F)) # So, what are some options after here? # I have NxN pairwise distance, and eventually I want to # reduce to N and the RNN will share weights among the N # nodes. # It'd be quite intuitive to apply attention of some form to # see what are the other nodes that a node should pay # attention to. So feature for RNN input would be # concat(self_feat, att(other_feat)) # I don't want to directly aggregate from NxN -> N without # distinguish self-vs-other because, well, seems like a # useful distinction. # But perhaps I'll start with some hard coded aggregation # (BT, N, N-1, F) other_feat = tf.transpose(other_feat, (2, 0, 1, 3)) # (BT, N, F) other_mean = tf.reduce_mean(other_feat, axis=2) other_min = tf.reduce_min(other_feat, axis=2) other_max = tf.reduce_max(other_feat, axis=2) # put them together agg_feats = [self_feat, other_mean, other_min, other_max] n_agg_feats = len(agg_feats) # (BT, N, F*n_agg_feats) all_feats = tf.concat(agg_feats, axis=-1) all_feats = tf.reshape(all_feats, (B, T, N, F * n_agg_feats)) return all_feats, F * n_agg_feats def _get_node_set_feature_v2(self, feat, F): """Returns the features extracted based on sets. Skip the pairwise distance as in v1 as it takes too much memory. Start looking at aggregation stats directly. Parameters ---------- feat : tf.Tensor, shape (B, T, N, F) `N` is the dimension for the set F : int The number of channels for the input feature Returns ------- set_feat : tf.Tensor, shape (B, T, N, F') F' : int The number of channels of the output feature """ B = tf.shape(feat)[0] T = tf.shape(feat)[1] N = tf.shape(feat)[2] def _other_stats(self_idx): # (N,) self_idx_one_hot = tf.one_hot(self_idx, depth=N) # (N-1, B, T, F) other_feat = tf.gather( tf.transpose(feat, (2, 0, 1, 3)), # (N, B, T, F) tf.where(self_idx_one_hot < 0.5)[:, 0], # (N-1, 1) ) # (B, T, N-1, F) other_feat = tf.transpose(other_feat, (1, 2, 0, 3)) # (B, T, F) other_min = tf.reduce_min(other_feat, axis=2) other_max = tf.reduce_max(other_feat, axis=2) other_mean = tf.reduce_mean(other_feat, axis=2) # (B, T, F * 3) return tf.concat( [other_min, other_max, other_mean], axis=-1 ) self_idxs = tf.range(N) # (N, B, T, F * 3) other_feats = tf.map_fn( fn=_other_stats, elems=self_idxs, fn_output_signature=tf.float32, ) # (B, T, N, F*3) other_feats = tf.transpose(other_feats, (1, 2, 0, 3)) # (B, T, N, F*4) all_feats = tf.concat([feat, other_feats], axis=-1) return all_feats, F * 4 def _get_node_set_feature_v3(self, feat, F): """Returns the features extracted based on sets. Skip the pairwise distance as in v1 as it takes too much memory. Start looking at aggregation stats directly. Skip self vs other and just use self vs all. Parameters ---------- feat : tf.Tensor, shape (B, T, N, F) `N` is the dimension for the set F : int The number of channels for the input feature Returns ------- set_feat : tf.Tensor, shape (B, T, N, F') F' : int The number of channels of the output feature """ B = tf.shape(feat)[0] T = tf.shape(feat)[1] N = tf.shape(feat)[2] # (B, T, 1, F) all_min = tf.reduce_min(feat, axis=2, keepdims=True) all_max = tf.reduce_max(feat, axis=2, keepdims=True) all_mean = tf.reduce_mean(feat, axis=2, keepdims=True) # (B, T, 1, F*3) all_feats = tf.concat([all_min, all_max, all_mean], axis=-1) # (B, T, N, F*3) all_feats = tf.tile(all_feats, [1, 1, N, 1]) # (B, T, N, F*4) all_feats = tf.concat([feat, all_feats], axis=-1) return all_feats, F * 4 def _get_batch_summary(self, feat, F): """Returns some summary of the current batch. Reduces over the batch dimension Parameters ---------- feat : tf.Tensor, shape (B, ..., F) F : int Returns ------- summary : tf.Tensor, shape (..., F') F' : int """ bmean = tf.reduce_mean(feat, axis=0) bmin = tf.reduce_min(feat, axis=0) bmax = tf.reduce_max(feat, axis=0) feat = tf.concat([bmean, bmin, bmax], axis=-1) F = F * 3 return feat, F def _bwd_weighted_grads(self, cur_grads, weights): # cur_grads: (B, N) # weights: (B, N, 4) # (B, 1, N, 4) set_grads, F_p = self._get_node_set_feature_v2( cur_grads[:, tf.newaxis, :, tf.newaxis], F=1, ) # (B, N) weighted_grads = tf.reduce_sum( weights * set_grads[:, 0], axis=-1 ) return weighted_grads def _get_batch_set_feature(self, feat, F): """Returns the features extracted based on sets. Parameters ---------- feat : tf.Tensor, shape (B, T, N, F) `N` is the dimension for the set F : int The number of channels for the input feature Returns ------- set_feat : tf.Tensor, shape (B, T, N, F') F' : int The number of channels of the output feature """ B = tf.shape(feat)[0] T = tf.shape(feat)[1] N = tf.shape(feat)[2] # (B, TN, F) feat = tf.reshape(feat, (B, -1, F)) # obtain pair-wise feats for nodes # (B, 1, TN, F) src_feat = feat[:, tf.newaxis, :, :] # (1, B, TN, F) dst_feat = feat[tf.newaxis, :, :, :] TN = T * N if self._dist_fn == 'diff': # (B, B, TN, F) dist = dst_feat - src_feat self_feat = feat elif self._dist_fn == 'dot': # (B, B, TN, F) dist = dst_feat * src_feat self_feat = feat elif self._dist_fn == 'norm_dot': n_dst_feat, _ = _safe_normalize(dst_feat, axis=-1) n_src_feat, _ = _safe_normalize(src_feat, axis=-1) dist = tf.reduce_sum( n_dst_feat * n_src_feat, axis=-1, keepdims=True ) self_feat = tf.ones([B, TN, 1]) F = 1 elif self._dist_fn == 'concat': # (B, B, TN, F*2) dist = tf.concat([ tf.tile(src_feat, [1, B, 1, 1]), tf.tile(dst_feat, [B, 1, 1, 1]), ], axis=-1) # (B, TN, F*2) self_feat = tf.concat([feat, feat], axis=-1) F = F * 2 elif self._dist_fn == 'none': # (B, B, TN, F) dist = tf.tile(dst_feat, [B, 1, 1, 1]) # (B, TN, F) self_feat = feat else: raise ValueError() # Aggregate over node features # Create an "other" mask mask = tf.ones((B, B)) - tf.eye(B) # here # (B * (B-1), 2) to_take = tf.where(mask > 0.5) # (B * (B-1), TN, F) gathered = tf.gather_nd(dist, to_take) # (B, B-1, TN, F) other_feat = tf.reshape(gathered, (B, B - 1, TN, F)) # (B, TN, F) other_mean = tf.reduce_mean(other_feat, axis=1) other_min = tf.reduce_min(other_feat, axis=1) other_max = tf.reduce_max(other_feat, axis=1) # put them together agg_feats = [self_feat, other_mean, other_min, other_max] n_agg_feats = len(agg_feats) # (B, TN, F*n_agg_feats) all_feats = tf.concat(agg_feats, axis=-1) all_feats = tf.reshape(all_feats, (B, T, N, F * n_agg_feats)) return all_feats, F * n_agg_feats def _get_default_grads(self, past_grads, past_acts, cur_acts): if self._random_grads_stddev is not None: default_grads = tf.random.normal( shape=(B, 1, N), stddev=self._random_grads_stddev ) elif self._use_nearest_grads: # TODO: can look at other batch instances too # which would create a (B, B, T, N) diff # TODO: can limit the time window that we look back # (B, T, N) diff = tf.math.abs(cur_acts[:, tf.newaxis] - past_acts) # (B, N) closest_idx = tf.math.argmin(diff, axis=1) # (B * N, 1) closest_idx = tf.reshape(closest_idx, (B * N, 1)) idx = tf.range(B * N) # (B * N, 2) closest_idx = tf.concat( [closest_idx, idx[..., tf.newaxis]], axis=-1 ) # (T, B, N) pg = tf.transpose(past_grads, (1, 0, 2)) # (T, B * N) pg = tf.reshape(pg, (T, B * N)) # (B * N) closest_grads = tf.gather_nd(pg, closest_idx) default_grads = tf.reshape(closest_grads, (B, 1, N)) else: default_grads = tf.zeros((B, 1, N), dtype=tf.float32) return default_grads def train_callback(self): self._input_mlp.train_callback() if self._fwd_output_mlp is not None: self._fwd_output_mlp.train_callback() if self._bwd_output_mlp is not None: self._bwd_output_mlp.train_callback() def _safe_normalize(tensor, axis, eps=1e-8): tensor, norm = tf.linalg.normalize(tensor + eps, axis=axis) return tensor, norm
32.23138
74
0.500909
4,328
32,457
3.527957
0.077172
0.008514
0.007663
0.006025
0.722051
0.662388
0.631738
0.61471
0.58943
0.561661
0
0.015437
0.381305
32,457
1,006
75
32.263419
0.744933
0.216594
0
0.58669
0
0
0.00971
0
0
0
0
0.002982
0.003503
1
0.031524
false
0
0.001751
0
0.06655
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
999ac636b504000a178a19bf4c2635260e522f9b
1,920
py
Python
tempset/package_data.py
IMMM-SFA/tempset
86da8415cda47a2158cc1a481b0fa8ceaf5f2e1e
[ "BSD-2-Clause" ]
null
null
null
tempset/package_data.py
IMMM-SFA/tempset
86da8415cda47a2158cc1a481b0fa8ceaf5f2e1e
[ "BSD-2-Clause" ]
null
null
null
tempset/package_data.py
IMMM-SFA/tempset
86da8415cda47a2158cc1a481b0fa8ceaf5f2e1e
[ "BSD-2-Clause" ]
null
null
null
import pkg_resources def get_example_eplus_file(): """Convenience wrapper to retrieve file path from package data.""" return pkg_resources.resource_filename('tempset', 'data/json/eplus_params.json') def get_example_batch_file(): """Convenience wrapper to retrieve file path from package data.""" return pkg_resources.resource_filename('tempset', 'data/json/batch_params.json') def get_example_htgsetp_file(): """Convenience wrapper to retrieve file path from package data.""" return pkg_resources.resource_filename('tempset', 'data/json/htgsetp_params.json') def get_example_htgsetp_params_file(): """Convenience wrapper to retrieve file path from package data.""" return pkg_resources.resource_filename('tempset', 'data/electric/htgsetp_params_electric.csv') def get_example_clgsetp_file(): """Convenience wrapper to retrieve file path from package data.""" return pkg_resources.resource_filename('tempset', 'data/json/clgsetp_params.json') def get_example_summary_file(): """Convenience wrapper to retrieve file path from package data.""" return pkg_resources.resource_filename('tempset', 'data/electric/summary.zip') def get_example_idd_file(): """Convenience wrapper to retrieve file path from package data.""" return pkg_resources.resource_filename('tempset', 'data/eplus/Energy+.idd') def get_example_electric_idf_file(): """Convenience wrapper to retrieve file path from package data.""" return pkg_resources.resource_filename('tempset', 'data/idf/electric.idf') def get_example_gas_idf_file(): """Convenience wrapper to retrieve file path from package data.""" return pkg_resources.resource_filename('tempset', 'data/idf/gas.idf') def get_example_main_idf_file(): """Convenience wrapper to retrieve file path from package data.""" return pkg_resources.resource_filename('tempset', 'data/idf/main.idf')
30.967742
98
0.758854
253
1,920
5.517787
0.134387
0.094556
0.093123
0.17192
0.82808
0.795129
0.752149
0.752149
0.752149
0.752149
0
0
0.13125
1,920
61
99
31.47541
0.83693
0.317188
0
0
0
0
0.257143
0.175397
0
0
0
0
0
1
0.47619
true
0
0.047619
0
1
0
0
0
0
null
0
0
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
0
0
0
0
0
0
9
999b30e3b541222c64dd017084efe4cadab334f9
5,193
py
Python
imessage_extractor/src/helpers/utils.py
tsouchlarakis/imessage-extractor
e77bee947e19ac3f30ffd60faf7d444ded336b3b
[ "MIT" ]
1
2021-12-17T05:41:49.000Z
2021-12-17T05:41:49.000Z
imessage_extractor/src/helpers/utils.py
tsouchlarakis/imessage-extractor
e77bee947e19ac3f30ffd60faf7d444ded336b3b
[ "MIT" ]
2
2021-08-22T02:15:40.000Z
2022-01-16T23:15:01.000Z
imessage_extractor/src/helpers/utils.py
tsouchlarakis/imessage-extractor
e77bee947e19ac3f30ffd60faf7d444ded336b3b
[ "MIT" ]
null
null
null
import os import pathlib import re import typing def fmt_seconds(time_in_sec: int, units: str='auto', round_digits: int=4) -> dict: """ Format time in seconds to a custom string. `units` parameter can be one of 'auto', 'seconds', 'minutes', 'hours' or 'days'. """ if units == 'auto': if time_in_sec < 60: time_diff = round(time_in_sec, round_digits) time_measure = 'seconds' elif time_in_sec >= 60 and time_in_sec < 3600: time_diff = round(time_in_sec/60, round_digits) time_measure = 'minutes' elif time_in_sec >= 3600 and time_in_sec < 86400: time_diff = round(time_in_sec/3600, round_digits) time_measure = 'hours' else: time_diff = round(time_in_sec/86400, round_digits) time_measure = 'days' elif units in ['seconds', 'minutes', 'hours', 'days']: time_measure = units if units == 'seconds': time_diff = round(time_in_sec, round_digits) elif units == 'minutes': time_diff = round(time_in_sec/60, round_digits) elif units == 'hours': time_diff = round(time_in_sec/3600, round_digits) else: # Days time_diff = round(time_in_sec/86400, round_digits) return dict(zip(['units', 'value'], [time_measure, time_diff])) def human_filesize(nbytes: int) -> str: """ Convert number of bytes to human-readable filesize string. Source: https://stackoverflow.com/questions/5194057/better-way-to-convert-file-sizes-in-python """ base = 1 for unit in ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']: n = nbytes / base if n < 9.95 and unit != 'B': # Less than 10 then keep 1 decimal place value = '{:.1f} {}'.format(n, unit) return value if round(n) < 1000: # Less than 4 digits so use this value = f'{round(n)} {unit}' return value base *= 1024 value = f'{round(n)} {unit}' return value def strip_ws(string: str): """ Strip whitespace off a string and replace all instances of >1 space with a single space. """ return re.sub(r'\s+', ' ', string.strip()) def ensurelist(val: typing.Any) -> list: """ Accept a string or list and ensure that it is formatted as a list. If `val` is not a list, return [val]. If `val` is already a list, return as is. """ return [val] if not isinstance(val, list) else val def listfiles(path: typing.Union[str, pathlib.Path]='.', ext=None, pattern=None, ignore_case=True, full_names=False, recursive=False, include_hidden=True) -> list: """ List files in a given directory. path (str): absolute path to search for files in ext (str): optional file extension or list of extensions to filter resulting files by pattern (str): optional filter resulting files by matching regex pattern ignore_case (bool): do not consider case in when filtering for `pattern` parameter full_names (bool): return absolute filepaths recursive (bool): search recursively down the directory tree include_hidden (bool): include hidden files in resulting file list """ owd = os.getcwd() os.chdir(path) if recursive: fpaths = [] for root, dpaths, filenames in os.walk('.'): for f in filenames: fpaths.append(os.path.join(root, f).replace('./', '')) else: fpaths = [f for f in os.listdir() if os.path.isfile(f)] if not include_hidden: fpaths = [f for f in fpaths if not os.path.basename(f).startswith('.')] if pattern is not None: if ignore_case: fpaths = [f for f in fpaths if re.search(pattern, f, re.IGNORECASE)] else: fpaths = [f for f in fpaths if re.search(pattern, f)] if ext: ext = [x.lower() for x in ensurelist(ext)] ext = ['.' + x if not x.startswith('.') else x for x in ext] fpaths = [x for x in fpaths if os.path.splitext(x)[1].lower() in ext] if full_names: path_expand = os.getcwd() if path == '.' else path fpaths = [os.path.join(path_expand, f) for f in fpaths] os.chdir(owd) return fpaths def duplicated(lst: list) -> list: """ Return list of boolean values indicating whether each item in a list is a duplicate of a previous item in the list. Order matters! """ dup_ind = [] for i, item in enumerate(lst): tmplist = lst.copy() del tmplist[i] if item in tmplist: # Test if this is the first occurrence of this item in the list. If so, do not # count as duplicate, as the first item in a set of identical items should not # be counted as a duplicate first_idx = min( [i for i, x in enumerate(tmplist) if x == item]) if i != first_idx: dup_ind.append(True) else: dup_ind.append(False) else: dup_ind.append(False) return dup_ind
32.254658
98
0.586366
726
5,193
4.093664
0.285124
0.030283
0.042396
0.04576
0.171938
0.153432
0.146366
0.121131
0.098923
0.024899
0
0.018283
0.304833
5,193
161
99
32.254658
0.804986
0.269209
0
0.225806
0
0
0.04267
0
0
0
0
0
0
1
0.064516
false
0
0.043011
0
0.193548
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
999b7e222c07ec4e4045ccf0fbb8d6e8afb0520b
575
py
Python
pandapipes/component_models/abstract_models/__init__.py
ggrrll/pandapipes
636c45bfeb05feb9f4700864070086adfd74b8cf
[ "BSD-3-Clause" ]
48
2020-02-14T13:16:31.000Z
2022-03-30T07:15:55.000Z
pandapipes/component_models/abstract_models/__init__.py
ggrrll/pandapipes
636c45bfeb05feb9f4700864070086adfd74b8cf
[ "BSD-3-Clause" ]
279
2020-02-20T13:06:56.000Z
2022-03-14T12:29:59.000Z
pandapipes/component_models/abstract_models/__init__.py
ggrrll/pandapipes
636c45bfeb05feb9f4700864070086adfd74b8cf
[ "BSD-3-Clause" ]
30
2020-02-14T15:38:24.000Z
2022-02-21T13:37:12.000Z
# Copyright (c) 2020-2021 by Fraunhofer Institute for Energy Economics # and Energy System Technology (IEE), Kassel, and University of Kassel. All rights reserved. # Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. from .base_component import * from .branch_models import * from .branch_w_internals_models import * from .branch_wo_internals_models import * from .branch_wzerolength_models import * from .node_element_models import * from .node_models import * from .const_flow_models import * from .circulation_pump import *
41.071429
99
0.803478
86
575
5.186047
0.616279
0.179372
0.251121
0.147982
0.139013
0
0
0
0
0
0
0.016194
0.14087
575
13
100
44.230769
0.88664
0.446957
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
3
999ba25c335559c3e34efe18682e396868769eb8
1,651
py
Python
src/modlunky2/sprites/monsters/big.py
mriswithe/modlunky2
20de5c7d226df7134cf87a9b9351fc9c28a89d6a
[ "Apache-2.0" ]
null
null
null
src/modlunky2/sprites/monsters/big.py
mriswithe/modlunky2
20de5c7d226df7134cf87a9b9351fc9c28a89d6a
[ "Apache-2.0" ]
null
null
null
src/modlunky2/sprites/monsters/big.py
mriswithe/modlunky2
20de5c7d226df7134cf87a9b9351fc9c28a89d6a
[ "Apache-2.0" ]
null
null
null
from pathlib import Path from ..base_classes.base_sprite_loader import BaseSpriteLoader class Big1(BaseSpriteLoader): _sprite_sheet_path = Path("Data/Textures/monstersbig01.png") _chunk_size = 128 _chunk_map = { "cavemanboss": (0, 0, 2, 2), "giantspider": (0, 10, 2, 12), "queenbee": (0, 14, 2, 16), } class Big2(BaseSpriteLoader): _sprite_sheet_path = Path("Data/Textures/monstersbig02.png") _chunk_size = 128 _chunk_map = { "mummy": (0, 0, 2, 2), "anubis": (4, 8, 6, 11), "anubis2": (2, 8, 4, 10), } class Big3(BaseSpriteLoader): _sprite_sheet_path = Path("Data/Textures/monstersbig03.png") _chunk_size = 128 _chunk_map = { "lamassu": (0, 0, 2, 2), "yeti_king": (0, 4, 2, 6), "yeti_queen": (0, 10, 2, 12), } class Big4(BaseSpriteLoader): _sprite_sheet_path = Path("Data/Textures/monstersbig04.png") _chunk_size = 128 _chunk_map = { "crabman": (0, 0, 2, 2), "lavamander": (10, 4, 12, 6), "giantfly": (0, 12, 2, 14), } class Big5(BaseSpriteLoader): _sprite_sheet_path = Path("Data/Textures/monstersbig05.png") _chunk_size = 128 _chunk_map = { "ammit": (0, 4, 2, 5), "apep": (8, 0, 10, 2), "madametusk": (0, 8, 2, 10), "giant_frog": (0, 13, 3, 16), "minister": (0, 10, 1, 13), } class Big6(BaseSpriteLoader): _sprite_sheet_path = Path("Data/Textures/monstersbig06.png") _chunk_size = 128 _chunk_map = { "kingu": (0, 0, 5, 6), "waddler": (0, 12, 2, 14), "humphead": (0, 14, 4, 16), }
25.015152
64
0.562689
211
1,651
4.175355
0.312796
0.14983
0.183882
0.211124
0.476731
0.476731
0.320091
0
0
0
0
0.115641
0.271956
1,651
65
65
25.4
0.617304
0
0
0.230769
0
0
0.207147
0.112659
0
0
0
0
0
1
0
false
0
0.038462
0
0.5
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
999c1286569e2835ef7654c27f554b9341e671ee
590
py
Python
tools/formats parser/match_parser.py
TheUberCatman/pastebin_rust_api
11441311ca26c9f81539ec7302ddda49528e62a0
[ "Apache-2.0" ]
1
2017-05-30T07:33:56.000Z
2017-05-30T07:33:56.000Z
tools/formats parser/match_parser.py
Catman155/pastebin_rust_api
11441311ca26c9f81539ec7302ddda49528e62a0
[ "Apache-2.0" ]
1
2018-03-09T19:11:38.000Z
2018-03-09T19:11:38.000Z
tools/formats parser/match_parser.py
Catman155/pastebin_rust_api
11441311ca26c9f81539ec7302ddda49528e62a0
[ "Apache-2.0" ]
null
null
null
# Source of values.txt: 'https://pastebin.com/api/' values = [] with open('values.txt', 'r') as myfile: data = myfile.read() data = data.split("\n") for d in data: result = d.split(" = ") values.append(result[0].replace(" ", "")) # rust_formats.txt is the list of the Enum present in src/paster/format.rs with open('rust_formats.txt', 'r') as myfile: data = myfile.read() data = data.replace("\n", "").replace(" ", "") data = data.split(",") i = 0 for d in data: print("&Format::" + d + " => \"" + values[i] + "\",") i += 1
29.5
74
0.538983
82
590
3.853659
0.463415
0.075949
0.037975
0.075949
0.21519
0.21519
0.21519
0.21519
0.21519
0
0
0.006757
0.247458
590
19
75
31.052632
0.704955
0.20678
0
0.266667
0
0
0.146237
0
0
0
0
0
0
1
0
false
0
0
0
0
0.066667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
999c8e70e080ff7ed7117aa1db45dd0b42791638
2,545
py
Python
Decrypt.py
momma-regen/P-C_Gif_Ripper
f6d4b8d84144113953abc3969544b5117adb2a12
[ "Unlicense" ]
null
null
null
Decrypt.py
momma-regen/P-C_Gif_Ripper
f6d4b8d84144113953abc3969544b5117adb2a12
[ "Unlicense" ]
null
null
null
Decrypt.py
momma-regen/P-C_Gif_Ripper
f6d4b8d84144113953abc3969544b5117adb2a12
[ "Unlicense" ]
null
null
null
import regex as re from math import ceil from typing import List from ByteReader import Reader, SeekOrigin as so from DataTypes import int_32 class rpg_file: offset: int_32 = int_32(0) size: int_32 = int_32(0) key: int_32 = int_32(0) name: str def decrypt_name(data: bytes, key: int|int_32) -> str: if type(key) != int_32: key = int_32(key) key.to_unsigned() decrypted_name: bytes = b"" key_bytes = key.to_bytes() j = 0 for i in range(len(data)): if j == 4: j = 0 decrypted_name += int_32(data[i] ^ (key_bytes[j] if j < len(key_bytes) else 0)).to_bytes() j += 1 return decrypted_name.decode("utf-8") def read_archive(file_path: str, match_str: str = None) -> List[rpg_file]: reader: Reader = Reader(file_path) reader.seek(8, so.Begin) key = reader.read_int32().to_unsigned() key *= 9 key += 3 files: List[rpg_file] = [] while(1): file = rpg_file() file.offset = int_32(reader.read_int32() ^ key) file.size = int_32(reader.read_int32() ^ key) file.key = int_32(reader.read_int32() ^ key).to_unsigned() length = int_32(reader.read_int32() ^ key) if file.offset < 0 or reader._p + length >= len(reader._data): break try: file.name = decrypt_name(reader.read_bytes(length), key).replace("\\", "/") if match_str is not None and not re.match(match_str, file.name, flags=re.IGNORECASE): continue files.append(file) except Exception as e: print('skipping: ' + str(e)) break return files def decrypt(files: List[rpg_file], file_location: str, save_location: str) -> None: reader = Reader(file_location) for file in files: file_name = file.name.split("/")[-1:][0] reader.seek(file.offset, so.Begin) data: bytes = reader.read_bytes(file.size) decrypted_file = b"" key = file.key.to_unsigned() key_bytes = key.to_bytes() + b'\x00\x00\x00\x00' j = 0 for i in range(len(data)): if j == 4: j = 0 key *= 7 key += 3 key_bytes = key.to_bytes() + b'\x00\x00\x00\x00' result = data[i] ^ key_bytes[j] decrypted_file += result.to_bytes(1, 'little') j += 1 open(f"{save_location.rstrip('/')}/{file_name}", "wb").write(decrypted_file)
31.036585
107
0.559528
359
2,545
3.788301
0.250696
0.055147
0.055147
0.044118
0.215441
0.157353
0.123529
0.083824
0.083824
0.083824
0
0.045066
0.311198
2,545
82
108
31.036585
0.730747
0
0
0.21875
0
0
0.039757
0.015822
0
0
0
0
0
1
0.046875
false
0
0.078125
0
0.234375
0.015625
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
999d5936d86373d84e9ba7d3368a0e7424c747d6
14,919
py
Python
gdmtl/datasets/mtl_dataset.py
binshengliu/gdmtl
fb8bfe0e87bbd6d8535cc8449012fb4119430d4c
[ "MIT" ]
null
null
null
gdmtl/datasets/mtl_dataset.py
binshengliu/gdmtl
fb8bfe0e87bbd6d8535cc8449012fb4119430d4c
[ "MIT" ]
null
null
null
gdmtl/datasets/mtl_dataset.py
binshengliu/gdmtl
fb8bfe0e87bbd6d8535cc8449012fb4119430d4c
[ "MIT" ]
1
2022-02-26T00:49:03.000Z
2022-02-26T00:49:03.000Z
from __future__ import annotations import logging from typing import Any, Dict, Mapping, Optional, Union import numpy as np import torch from transformers import PreTrainedTokenizer from .assembler import Assembler from .qa_dataset import QADataset from .rank_dataset import RankGroupDataset from .tsv_dataset import TsvCollection from .utils import ( make_targets_mlm_inputs, make_targets_ntp_inputs, mask_difference, mask_whole_word, ) log = logging.getLogger(__name__) class MtlSepDataset(RankGroupDataset): def __init__( self, array: Mapping[str, np.ndarray], tokenizer: PreTrainedTokenizer, query_col: TsvCollection, doc_col: TsvCollection, num_dup: int, num_neg: int, decoder_start_token_id: int, src_max_length: int, tgt_max_length: int, sample: Optional[Union[float, int]] = None, sort: Optional[str] = None, max_length: Optional[int] = None, summarizer_prefix_token_ids: Optional[str] = None, rank_prefix_token_ids: Optional[str] = None, pad_to_max_length: bool = True, **kwargs: Any, ): if kwargs: log.warning(f"Unused parameters: {kwargs}") super().__init__( array, tokenizer, query_col, doc_col, num_dup, num_neg, sample, sort, max_length, summarizer_prefix_token_ids, pad_to_max_length, ) self._pas_pad = tokenizer.pad_token_id self._sum_assembler = Assembler( tokenizer=tokenizer, max_length=src_max_length, prefix_token_ids=summarizer_prefix_token_ids, pad_to_max_length=pad_to_max_length, ) self._rank_assembler = Assembler( tokenizer=tokenizer, max_length=src_max_length, prefix_token_ids=rank_prefix_token_ids, pad_to_max_length=pad_to_max_length, ) decoder_start_token = tokenizer.decode(decoder_start_token_id) self._decoder_assembler = Assembler( tokenizer=tokenizer, max_length=tgt_max_length, prefix_token_ids=decoder_start_token, pad_to_max_length=False, add_special_tokens=False, return_token_type_ids=None, ) self._label_assembler = Assembler( tokenizer=tokenizer, max_length=tgt_max_length, suffix_token_ids=tokenizer.eos_token, pad_to_max_length=False, add_special_tokens=False, return_token_type_ids=None, ) def __getitem__(self, index: int) -> Dict[str, torch.Tensor]: qid = self._array["qid"][index] did = self._array["did"][index] label = self._array["label"][index] assert qid.shape == (self._num_neg + 1,) assert did.shape == (self._num_neg + 1,) assert label.shape == (self._num_neg + 1,) queries = [self._query_col[x] for x in qid] passages = [self._doc_col[x] for x in did] sum_inputs = self._sum_assembler.batch_assemble(passages) sum_decoder_inputs = self._decoder_assembler.batch_assemble(queries) lm_labels = self._label_assembler.batch_assemble(queries) lm_labels["input_ids"].masked_fill_(~lm_labels["attention_mask"].bool(), -100) rank_inputs = self._rank_assembler.batch_assemble(passages) rank_decoder_inputs = self._decoder_assembler.batch_assemble(queries) item: Dict[str, Any] = { "qids": torch.tensor([int(x) for x in qid]), "dnos": torch.tensor([int(x) for x in did]), "sum_input_ids": sum_inputs["input_ids"], "sum_attention_mask": sum_inputs["attention_mask"], "sum_decoder_input_ids": sum_decoder_inputs["input_ids"], "sum_decoder_attention_mask": sum_decoder_inputs["attention_mask"], "rank_input_ids": rank_inputs["input_ids"], "rank_attention_mask": rank_inputs["attention_mask"], "rank_decoder_input_ids": rank_decoder_inputs["input_ids"], "rank_decoder_attention_mask": rank_decoder_inputs["attention_mask"], "lm_labels": lm_labels["input_ids"], } assert item["sum_input_ids"].dim() == 2 assert item["sum_attention_mask"].dim() == 2 assert item["sum_decoder_input_ids"].dim() == 2 assert item["sum_decoder_attention_mask"].dim() == 2 assert item["rank_input_ids"].dim() == 2 assert item["rank_attention_mask"].dim() == 2 assert item["rank_decoder_input_ids"].dim() == 2 assert item["rank_decoder_attention_mask"].dim() == 2 assert item["lm_labels"].dim() == 2 return item class MtlMixedDataset(RankGroupDataset): def __init__( self, array: Mapping[str, np.ndarray], tokenizer: PreTrainedTokenizer, query_col: TsvCollection, doc_col: TsvCollection, num_dup: int, num_neg: int, src_max_length: int, sample: Optional[Union[float, int]] = None, sort: Optional[str] = None, max_length: Optional[int] = None, summarizer_prefix_token_ids: Optional[str] = None, rank_prefix_token_ids: Optional[str] = None, pad_to_max_length: bool = True, qa_data: Optional[Union[QADataset, str]] = None, qa_prefix: str = "", mask_whole_word_prob: float = 0.0, mask_qgen_query: bool = False, mask_query_from_passage: float = 0.0, min_rel_for_qgen: int = 1, **kwargs: Any, ): if kwargs: log.warning(f"Unused params {kwargs}") super(MtlMixedDataset, self).__init__( array, tokenizer, query_col, doc_col, num_dup, num_neg, sample, sort, max_length, summarizer_prefix_token_ids, pad_to_max_length, ) self._pas_pad = tokenizer.pad_token_id self._tokenizer = tokenizer self._mask_whole_word_prob = mask_whole_word_prob self._mask_query_from_passage = mask_query_from_passage self._mask_qgen_query = mask_qgen_query self._min_rel_for_qgen = min_rel_for_qgen self._sum_assembler = Assembler( tokenizer=tokenizer, max_length=src_max_length, prefix_token_ids=summarizer_prefix_token_ids, pad_to_max_length=pad_to_max_length, ) self._rank_assembler = Assembler( tokenizer=tokenizer, max_length=src_max_length, prefix_token_ids=rank_prefix_token_ids, pad_to_max_length=pad_to_max_length, ) if qa_data is not None: if isinstance(qa_data, str): self._qa = QADataset( path=qa_data, tokenizer=tokenizer, max_length=max_length, prefix=qa_prefix, ) else: self._qa = qa_data def __getitem__(self, index: int) -> Dict[str, Any]: qid = self._array["qid"][index] did = self._array["did"][index] label = self._array["label"][index] assert qid.shape == (self._num_neg + 1,) assert did.shape == (self._num_neg + 1,) assert label.shape == (self._num_neg + 1,) if label[0] < self._min_rel_for_qgen: idx = (self._array["label"][:, 0] >= self._min_rel_for_qgen).nonzero()[0] sample = np.random.choice(idx) qgen_queries = [self._query_col[x] for x in self._array["qid"][sample]] passages = [self._doc_col[x] for x in self._array["did"][sample]] qgen_passages = [self._doc_col[x] for x in self._array["did"][sample]] sum_input_weights = torch.tensor( self._array["label"][sample][:1], dtype=torch.float ) else: qgen_queries = [self._query_col[x] for x in qid] passages = [self._doc_col[x] for x in did] qgen_passages = [self._doc_col[x] for x in did] sum_input_weights = torch.tensor(label[:1], dtype=torch.float) if self._mask_query_from_passage > 0.0: qgen_passages = [ mask_difference(self._tokenizer, x, y, self._mask_query_from_passage) for x, y in zip(qgen_passages, qgen_queries) ] if self._mask_whole_word_prob > 0: qgen_passages = [ mask_whole_word(self._tokenizer, x, self._mask_whole_word_prob) for x in qgen_passages ] if self._mask_qgen_query: sum_inputs = make_targets_mlm_inputs( self._assembler, self._tokenizer, passages[:1], qgen_queries[:1], qgen_passages[:1], ) else: sum_inputs = make_targets_ntp_inputs( self._assembler, self._tokenizer, passages[:1], qgen_queries[:1], qgen_passages[:1], ) rank_queries = [self._query_col[x] for x in qid] rank_passages = [self._doc_col[x] for x in did] rank_inputs = self._rank_assembler.batch_assemble(rank_passages, rank_queries) item: Dict[str, Any] = { "qids": torch.tensor([int(x) for x in qid]), "dnos": torch.tensor([int(x) for x in did]), "sum_input_ids": sum_inputs["input_ids"], "sum_token_type_ids": sum_inputs["token_type_ids"], "sum_attention_mask": sum_inputs["attention_mask"], "sum_input_weights": sum_input_weights, "rank_input_ids": rank_inputs["input_ids"], "rank_token_type_ids": rank_inputs["token_type_ids"], "rank_attention_mask": rank_inputs["attention_mask"], "lm_labels": sum_inputs["lm_labels"], } assert item["sum_input_ids"].dim() == 2 if self._mask_qgen_query: assert item["sum_attention_mask"].dim() == 2 else: assert item["sum_attention_mask"].dim() == 3 assert item["sum_token_type_ids"].dim() == 2 assert item["sum_input_weights"].dim() == 1 assert item["rank_input_ids"].dim() == 2 assert item["rank_token_type_ids"].dim() == 2 assert item["rank_attention_mask"].dim() == 2 assert item["lm_labels"].dim() == 2 if hasattr(self, "_qa"): pos_qid = qid[0] qa_inputs = {f"qa_{k}": v for k, v in self._qa.by_qid(pos_qid).items()} item.update(qa_inputs) return item class MtlCatDataset(RankGroupDataset): def __init__( self, array: Mapping[str, np.ndarray], tokenizer: PreTrainedTokenizer, query_col: TsvCollection, doc_col: TsvCollection, num_dup: int, num_neg: int, src_max_length: int, tgt_max_length: int, decoder_start_token_id: int, sample: Optional[Union[float, int]] = None, sort: Optional[str] = None, max_length: Optional[int] = None, summarizer_prefix_token_ids: Optional[str] = None, rank_prefix_token_ids: Optional[str] = None, pad_to_max_length: bool = True, **kwargs: Any, ): if kwargs: log.warning(f"Unused params {kwargs}") super().__init__( array, tokenizer, query_col, doc_col, num_dup, num_neg, sample, sort, max_length, summarizer_prefix_token_ids, pad_to_max_length, ) self._pas_pad = tokenizer.pad_token_id self._tokenizer = tokenizer self._sum_assembler = Assembler( tokenizer=tokenizer, max_length=src_max_length, prefix_token_ids=summarizer_prefix_token_ids, pad_to_max_length=pad_to_max_length, ) self._rank_assembler = Assembler( tokenizer=tokenizer, max_length=src_max_length, prefix_token_ids=rank_prefix_token_ids, pad_to_max_length=pad_to_max_length, ) decoder_start_token = tokenizer.decode(decoder_start_token_id) self._decoder_assembler = Assembler( tokenizer=tokenizer, max_length=tgt_max_length, prefix_token_ids=decoder_start_token, pad_to_max_length=False, add_special_tokens=False, return_token_type_ids=None, ) self._label_assembler = Assembler( tokenizer=tokenizer, max_length=tgt_max_length, suffix_token_ids=tokenizer.eos_token, pad_to_max_length=False, add_special_tokens=False, return_token_type_ids=None, ) def __getitem__(self, index: int) -> Dict[str, Any]: qid = self._array["qid"][index] did = self._array["did"][index] label = self._array["label"][index] assert qid.shape == (self._num_neg + 1,) assert did.shape == (self._num_neg + 1,) assert label.shape == (self._num_neg + 1,) queries = [self._query_col[x] for x in qid] passages = [self._doc_col[x] for x in did] sum_inputs = self._sum_assembler.batch_assemble(passages) sum_decoder_inputs = self._decoder_assembler.batch_assemble(queries) lm_labels = self._label_assembler.batch_assemble(queries) lm_labels["input_ids"].masked_fill_(~lm_labels["attention_mask"].bool(), -100) rank_passages = [self._doc_col[x] for x in did] rank_inputs = self._rank_assembler.batch_assemble(rank_passages, queries) item: Dict[str, Any] = { "qids": torch.tensor([int(x) for x in qid]), "dnos": torch.tensor([int(x) for x in did]), "sum_input_ids": sum_inputs["input_ids"], "sum_attention_mask": sum_inputs["attention_mask"], "sum_decoder_input_ids": sum_decoder_inputs["input_ids"], "sum_decoder_attention_mask": sum_decoder_inputs["attention_mask"], "rank_input_ids": rank_inputs["input_ids"], "rank_attention_mask": rank_inputs["attention_mask"], "lm_labels": lm_labels["input_ids"], } assert item["sum_input_ids"].dim() == 2 assert item["sum_attention_mask"].dim() == 2 assert item["sum_decoder_input_ids"].dim() == 2 assert item["sum_decoder_attention_mask"].dim() == 2 assert item["rank_input_ids"].dim() == 2 assert item["rank_attention_mask"].dim() == 2 assert item["lm_labels"].dim() == 2 return item
36.65602
86
0.597359
1,793
14,919
4.559955
0.083659
0.060543
0.039384
0.037671
0.827299
0.793542
0.78498
0.756849
0.747065
0.728596
0
0.005754
0.301093
14,919
406
87
36.746305
0.778364
0
0
0.720548
0
0
0.088076
0.01917
0
0
0
0
0.093151
1
0.016438
false
0.068493
0.030137
0
0.063014
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
8
999e7ada6755057b2bc606106ac56df66fb8b441
1,446
py
Python
MachineInterface/mproxy/server/job_status.py
KTH-HPC/vestec-system
8168b90385468ca5e1ed701b5a0090e4423186c7
[ "BSD-3-Clause" ]
1
2021-10-31T08:41:58.000Z
2021-10-31T08:41:58.000Z
MachineInterface/mproxy/server/job_status.py
KTH-HPC/vestec-system
8168b90385468ca5e1ed701b5a0090e4423186c7
[ "BSD-3-Clause" ]
null
null
null
MachineInterface/mproxy/server/job_status.py
KTH-HPC/vestec-system
8168b90385468ca5e1ed701b5a0090e4423186c7
[ "BSD-3-Clause" ]
1
2022-02-08T16:57:05.000Z
2022-02-08T16:57:05.000Z
import time import datetime from dateutil.parser import parse class JobStatus: def __init__(self, queue_id, status, walltime, number_nodes, submit_time, start_time, end_time): self.queue_id=queue_id self.status=status self.walltime=walltime self.number_nodes=number_nodes self.submit_time="-" if submit_time == "Unknown" else submit_time self.start_time="-" if start_time == "Unknown" else start_time self.end_time="-" if end_time == "Unknown" else end_time def getQueueId(self): return self.queue_id def getStatus(self): return self.status def getWalltime(self): return self.walltime def getNumberNodes(self): return self.number_nodes def getQueueTime(self): if self.submit_time=="-" or self.start_time == "-": return "-" submit=time.mktime(parse(self.submit_time).timetuple()) start=time.mktime(parse(self.start_time).timetuple()) return str(start-submit) def getRunTime(self): if self.start_time == "-" or self.end_time == "-": return "-" start=time.mktime(parse(self.start_time).timetuple()) end=time.mktime(parse(self.end_time).timetuple()) return str(end-start) def toString(self): return self.queue_id+" "+self.status+" "+self.walltime+" "+self.number_nodes+" "+self.getQueueTime() +" " +self.getRunTime()
32.863636
132
0.644537
180
1,446
4.988889
0.2
0.100223
0.072383
0.084633
0.140312
0.093541
0.093541
0.093541
0
0
0
0
0.230982
1,446
44
133
32.863636
0.807554
0
0
0.117647
0
0
0.024188
0
0
0
0
0
0
1
0.235294
false
0
0.088235
0.147059
0.617647
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
2
99a126cb801c61ecd90be2fe3d5f2ec97ac26d6d
1,308
py
Python
Process_Threads/mul_threading.py
CrazyBBer/Python-Learn-Sample
3bd0694327db6c662c6cc3bdf91c6261daa4b6cf
[ "MIT" ]
2
2020-05-02T11:24:37.000Z
2020-05-02T13:49:18.000Z
Process_Threads/mul_threading.py
crazybber/pythontrip
062ba71dfe6729ecc606eff7260b1c39497b6456
[ "MIT" ]
null
null
null
Process_Threads/mul_threading.py
crazybber/pythontrip
062ba71dfe6729ecc606eff7260b1c39497b6456
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding utf-8 -*- __Author__='eamon' 'threading multithreading ' import time,threading def loop(): print('thread %s is running ...' % threading.current_thread().name) n=0 while n <5: n+=1 print('thread %s >> %s ' %(threading.current_thread().name,n)) time.sleep(1) print('thread %s ended.' % threading.current_thread().name) def testThread(): print('thread %s is running..' % threading.current_thread().name) t=threading.Thread(target=loop,name='LoopThread') t.start() t.join() print('thread % s ended.' % threading.current_thread().name) # testThread() balance =0 def change_it(n): global balance balance =balance +n balance =balance -n lock = threading.Lock() def run_thread(n): for i in range(100000): lock.acquire() try: change_it(n) finally: lock.release() def testMultiThreadDanger(): t1= threading.Thread(target=run_thread,args=(5,)) t2= threading.Thread(target=run_thread,args=(8,)) t1.start() t2.start() t1.join() t2.join() print(balance) # testMultiThreadDanger() import threading,multiprocessing def loop(): x=0 while True: x=x^1 def testRunfullCPU(): print('cpu num:',multiprocessing.cpu_count()) for i in range(multiprocessing.cpu_count()): t=threading.Thread(target=loop) t.start()
17.917808
68
0.682722
181
1,308
4.845304
0.337017
0.062714
0.068415
0.148233
0.374002
0.282782
0.205245
0.205245
0.107184
0
0
0.020739
0.152141
1,308
72
69
18.166667
0.770063
0.060398
0
0.125
0
0
0.116735
0
0
0
0
0
0
1
0.145833
false
0
0.041667
0
0.1875
0.145833
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99a1309fdf682660b44c9cdb76eca07c595e6924
3,024
py
Python
tools/lcm_logger.py
GTLIDAR/drake-schunk-driver
65230aac6a5f5f4906c5ce2aa33f5469a0800342
[ "BSD-3-Clause" ]
2
2020-02-16T02:18:38.000Z
2020-02-16T02:18:46.000Z
tools/lcm_logger.py
GTLIDAR/drake-schunk-driver
65230aac6a5f5f4906c5ce2aa33f5469a0800342
[ "BSD-3-Clause" ]
3
2018-04-02T20:54:23.000Z
2019-02-08T18:34:16.000Z
tools/lcm_logger.py
GTLIDAR/drake-schunk-driver
65230aac6a5f5f4906c5ce2aa33f5469a0800342
[ "BSD-3-Clause" ]
3
2018-04-02T20:47:17.000Z
2021-03-19T16:08:11.000Z
#!/usr/bin/env python import argparse import sys import lcm_adapter as lcm if len(sys.argv) < 2: print """The first argument to this command must be the directory of the generated python files for LCM messages.""" sys.exit(1) MESSAGE_CLASSES = lcm.get_all_lcm_message_classes(sys.argv[1]) def set_up_subscriptions(lcm_connection, args): """Sets up all of the LCMSubscription objects for the channels indicated in the arguments.""" if args.format == "lcm": lcm_logger = lcm.EventLog(args.logfile, mode='w', overwrite=True) else: # Deliberately leaked; we'll let the GC handle the close() for us. logfile = (sys.stdout if args.logfile is None else open(args.logfile, 'w')) write_csv_headers(logfile) def handle_message(channel, message): decoded = try_decode(message) if decoded is None: print "Received unreadable message on channel", channel return if args.format == "lcm": lcm_logger.write_event(decoded.timestamp, channel, message) elif args.format == "csv": fields = ([channel, type(decoded).__name__] + [getattr(decoded, slot) for slot in sorted(decoded.__slots__)]) logfile.write(",".join(['"%s"' % f for f in fields]) + "\n") elif args.format == "pretty": lcm.debug_print_msg(decoded, logfile) else: assert False for channel in args.channel: print "subscribing to channel", channel lcm_connection.subscribe(channel, handle_message) def try_decode(message): """Try to decode the message with each known message class; return the first successful decode, or None.""" for c in MESSAGE_CLASSES: try: return c.decode(message) except ValueError: pass # The message was probably of a different type. return None def write_csv_headers(logfile): """Write header lines in the CSV file with the schema of the messages involved.""" for c in MESSAGE_CLASSES: header_prefix = ["", c.__name__] header_elements = sorted(c.__slots__) logfile.write(",".join( ['"%s"' % h for h in (header_prefix + header_elements)]) + "\n") def main(argv): parser = argparse.ArgumentParser(description='Log some local LCM traffic.') parser.add_argument( '-l', '--logfile', metavar='LOGFILE', type=str, default=None, help="File name for lcm log; default is stdout.") parser.add_argument( '-f', '--format', default='lcm', choices=('lcm', 'csv', 'pretty'), help="Log format: 'lcm' (binary), 'csv', or 'pretty' (human-readable)") parser.add_argument('channel', nargs="+", metavar='CHANNEL') args = parser.parse_args(argv[2:]) lcm_connection = lcm.LCM() set_up_subscriptions(lcm_connection, args) while True: lcm_connection.handle() if __name__ == "__main__": main(sys.argv)
35.576471
79
0.627976
383
3,024
4.796345
0.37859
0.035384
0.027763
0.022863
0.109962
0.064235
0
0
0
0
0
0.001779
0.256283
3,024
84
80
36
0.815029
0.04332
0
0.126984
1
0.015873
0.156009
0
0
0
0
0
0.015873
0
null
null
0.015873
0.047619
null
null
0.063492
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
2
99a1822f416a16569175d3648ee1cf50474498cd
941
py
Python
challenges/merge_sort/merge_sort.py
nastinsk/python-data-structures-and-algorithms
505b26a70fb846f6e9d0681bbe4f77e3797acf2d
[ "MIT" ]
null
null
null
challenges/merge_sort/merge_sort.py
nastinsk/python-data-structures-and-algorithms
505b26a70fb846f6e9d0681bbe4f77e3797acf2d
[ "MIT" ]
null
null
null
challenges/merge_sort/merge_sort.py
nastinsk/python-data-structures-and-algorithms
505b26a70fb846f6e9d0681bbe4f77e3797acf2d
[ "MIT" ]
3
2020-05-31T03:25:49.000Z
2020-12-05T21:03:13.000Z
def merge_sort(lst): """function to prvide a merge sort on the given list, calles recursively """ n = len(lst) if n > 1: mid = n//2 left = lst[: mid] right = lst[mid:] # sort the left side merge_sort(left) # sort the right side merge_sort(right) # merge the sorted left and right sides together merge(left, right, lst) def merge(left, right, lst): """function to merge left sublist and rightsublist to the list in proper order""" i = 0 j = 0 k = 0 while i < len(left) and j < len(right): if left[i] <= right[j]: lst[k] = left[i] i += 1 else: lst[k] = right[j] j += 1 k += 1 if i == len(left): for el in right[j:]: lst[k] = el k += 1 else: for el in left[i:]: lst[k] = el k +=1
18.82
86
0.4644
134
941
3.238806
0.298507
0.082949
0.059908
0.078341
0.036866
0
0
0
0
0
0
0.018416
0.422954
941
49
87
19.204082
0.780847
0.248672
0
0.241379
0
0
0
0
0
0
0
0
0
1
0.068966
false
0
0
0
0.068966
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99a1c1776fe99e2a906834405a696a3f2033b0ed
572
py
Python
e_learning/api/models.py
Aaditya1978/Accessible-E-Learning-Platform
bf846b6f7e3aaca3d7f7ecd0a83a5c4dfc595f6d
[ "MIT" ]
3
2021-07-15T06:09:08.000Z
2022-02-01T13:47:03.000Z
e_learning/api/models.py
Aaditya1978/Accessible-E-Learning-Platform
bf846b6f7e3aaca3d7f7ecd0a83a5c4dfc595f6d
[ "MIT" ]
null
null
null
e_learning/api/models.py
Aaditya1978/Accessible-E-Learning-Platform
bf846b6f7e3aaca3d7f7ecd0a83a5c4dfc595f6d
[ "MIT" ]
null
null
null
from django.db import models # Create your models here. class Teacher(models.Model): first_name = models.CharField(max_length=255) last_name = models.CharField(max_length=255) email = models.EmailField() org_name = models.CharField(max_length=255) password = models.CharField(max_length=255) class Student(models.Model): first_name = models.CharField(max_length=255) last_name = models.CharField(max_length=255) email = models.EmailField() class_code = models.CharField(max_length=255) password = models.CharField(max_length=255)
33.647059
49
0.748252
77
572
5.376623
0.311688
0.289855
0.347826
0.463768
0.806763
0.806763
0.797101
0.797101
0.797101
0.797101
0
0.049281
0.148601
572
16
50
35.75
0.800821
0.041958
0
0.615385
0
0
0
0
0
0
0
0
0
1
0
false
0.153846
0.076923
0
1
0
0
0
0
null
1
1
1
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
10
99a4c98326e7361f9a182bd46371aea1ad73b400
4,401
py
Python
array_str_problems/zero_matrix.py
UPstartDeveloper/Problem_Solving_Practice
bd61333b3b056e82a94297e02bc05a17552e3496
[ "MIT" ]
null
null
null
array_str_problems/zero_matrix.py
UPstartDeveloper/Problem_Solving_Practice
bd61333b3b056e82a94297e02bc05a17552e3496
[ "MIT" ]
null
null
null
array_str_problems/zero_matrix.py
UPstartDeveloper/Problem_Solving_Practice
bd61333b3b056e82a94297e02bc05a17552e3496
[ "MIT" ]
null
null
null
""" Zero Matrix: Write an algorithm such that if an element in an MxN matrix is 0, its entire row and column are set to O. Clarifying Questions and Assumptions: - so we have a rectangular matrix? yes - just integers? yes - and what are the inputs to the function? - are we given the indicies of a single element ---> use a helper function - or are we given the entire matrix, and expected to do this over the whole matrix? yes - is the input mutable? no ---> otherwise it'll be ambiguous about - which rows and cols to "zeroify" as the function goes on - are we guaranteed to have at least 1 row with at least 1 element? no - are we allowed to use NumPy? no, you don't really need to - what is the return value --> a matrix? Intuition: - traverse the 2D matrix Approach Ideas: test input = [ [0, 5 ,6, 7, 3, 1, -5], [8, 8, 0, 6, 0, 2, 4], [5, 0, 3, 6, 7, 3, -3] ] ====> [ [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0] ] --------------------------- [ [0, 5 ,6, 7, 3, 1, -5], [8, 8, 0, 6, 0, 2, 4], [5, 5, 3, 6, 7, 3, -3] ] rows = 0, 1 cols = 0, 2, 3 ====> [ [0, 5 ,6, 7, 3, 1, -5], [8, 8, 0, 6, 0, 2, 4], [5, 5, 3, 6, 7, 3, -3] ] zeroes = [ (0, 0), (1, 2), (1, 4) ] 1. Brute Force - Start with 0's, Try to Keep Elements - make a MxN matrix of all zeroes - check rows - if the corresponding row in the input contains a 0, leave the output as is - otherwise, copy over the row 2. Brute Force idea 2 --> can be in-place or out of place - record locations of all the 0s - iterate back over the array - if we hit one of those locations from before, "zeroify" that row and column - return the output Edge Cases: - empty array (check for that) """ from typing import List def find_zeroes(matrix): rows, cols = set(), set() for row_ndx, row in enumerate(matrix): for col_ndx, element in enumerate(row): # only add the location if it's in a unique row and column if element == 0: if row_ndx not in rows: rows.add(row_ndx) if col_ndx not in cols: cols.add(col_ndx) return rows, cols def zeroify_row(matrix, zero_row_ndx): # zeroify the matrix row for col_ndx in range(len(matrix[zero_row_ndx])): matrix[zero_row_ndx][col_ndx] = 0 def zeroify_col(matrix, zero_col_ndx): # zeroify the matrix column for row_ndx in range(len(matrix)): matrix[row_ndx][zero_col_ndx] = 0 def zero_matrix(matrix: List[List[int]]) -> List[List[int]]: """ Input: [ 0 1 2 3 4 5 6 > 0 [0, 0 ,0, 0, 0, 0, 0], > 1 [0, 8, 0, 6, 0, 2, 4], > 2 [0, 5, 3, 6, 7, 3, -3] ] locations = [ (0, 0), (1, 2), (1, 4), ] ROW_LENGTH = 7 rndx row cndx e 0 [0, 5 ,6, 7, 3, 1, -5], 0 0 1 5 2 6 3 7 4 3 5 1 6 -5 zrndx cndx zcndx 0 0 0 1 2 3 4 5 6 Big O: Time: O(MxN) Space: O(M + N) Improvements: - remember the rows and cols we've already marked for zeroifying: TODO: ---> IN PLACE --> 1. First pass: edit the cols in top row to be zero, if they contain zero edit the row vals in left col to be zero, if their rows contain zero 2. Second pass: just check the top row and left col top row: zeroify the col left col: zeroify the row """ # - record locations of all the rows and cols to zeroify rows, cols = find_zeroes(matrix) # MxN iterations # "zeroify" the rows for row_ndx in rows: # M zeroify_row(matrix, row_ndx) # N # "zeroify" the columns if len(cols) < len(matrix): for col_ndx in cols: # N zeroify_col(matrix, col_ndx) # M # - return the output return matrix
25.736842
80
0.498523
670
4,401
3.226866
0.258209
0.031452
0.037465
0.044403
0.120722
0.061055
0.049954
0.043478
0.036078
0.036078
0
0.067598
0.398319
4,401
170
81
25.888235
0.748867
0.735287
0
0
0
0
0
0
0
0
0
0.005882
0
1
0.16
false
0
0.04
0
0.28
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99a63044e63f7d7aad2a8fc043b98abc40e94cd5
2,316
py
Python
arc/utility_functions/batch_generator.py
stalhabukhari/ARC
a5efc44c3af0714e07a60204cc7c3a8ca19ef20e
[ "MIT" ]
null
null
null
arc/utility_functions/batch_generator.py
stalhabukhari/ARC
a5efc44c3af0714e07a60204cc7c3a8ca19ef20e
[ "MIT" ]
null
null
null
arc/utility_functions/batch_generator.py
stalhabukhari/ARC
a5efc44c3af0714e07a60204cc7c3a8ca19ef20e
[ "MIT" ]
1
2022-03-18T10:55:57.000Z
2022-03-18T10:55:57.000Z
""" batch_generator.py """ import os, random import numpy as np from PIL import Image import tensorflow as tf from tensorflow.keras.preprocessing.image import img_to_array, load_img from tensorflow.keras.utils import to_categorical as tocat_fn Image.LOAD_TRUNCATED_IMAGES = True class BatchGenerator(tf.keras.utils.Sequence): def __init__(self, data_list, label_list, batch_size, image_size=(150, 150), aug_flag=False): self.data_list = data_list self.label_list = label_list self.batch_size = batch_size self.image_size = image_size self.aug_flag = aug_flag self.total_images = len(self.data_list) self.indices = np.arange(self.total_images) self.num_batches = int(np.ceil(self.total_images/self.batch_size)) #self.on_epoch_end() def __len__(self): """ iterations per epoch """ return self.num_batches def on_epoch_end(self): random.shuffle(self.indices) def __getitem__(self, index): """ return batch of (data, label) pairs """ batch_x, batch_y = [], [] batch_indices = self.indices[index*self.batch_size:min((index+1)*self.batch_size, self.total_images)] for loop in batch_indices: loaded_image = img_to_array((load_img(os.path.join( self.data_list[loop]))).resize(self.image_size, Image.ANTIALIAS)) loaded_label = tocat_fn(self.label_list[loop], 100) if self.aug_flag: loaded_image = self._random_rotate(loaded_image) batch_x.append(loaded_image) batch_y.append(loaded_label) return (np.asarray(batch_x, dtype=np.float32), np.asarray(batch_y, dtype=np.uint8)) def _random_augment(self, image): if np.random.uniform(-1, 1) > 0: return self._random_rotate(image) else: return self._random_brightness_distort(image) @staticmethod def _random_rotate(image): angle_multiplier = np.random.randint(3) return np.rot90(image, angle_multiplier) @staticmethod def _random_brightness_distort(image): noise_shift = np.random.normal(0., .05, image.shape) noise_scale = np.random.normal(1., .01, image.shape) return (image + noise_shift) * noise_scale
33.085714
109
0.664076
312
2,316
4.637821
0.317308
0.037319
0.033172
0.01935
0.023497
0
0
0
0
0
0
0.014077
0.233161
2,316
69
110
33.565217
0.800676
0.041883
0
0.041667
0
0
0
0
0
0
0
0
0
1
0.145833
false
0
0.125
0
0.416667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99a671af86b1c2e857176e3f38a594911c18081a
337
py
Python
setup.py
Alexander-Sovetsky/python-image
a8ea11423b453938a8804537b836d919eb8e5b93
[ "MIT" ]
null
null
null
setup.py
Alexander-Sovetsky/python-image
a8ea11423b453938a8804537b836d919eb8e5b93
[ "MIT" ]
null
null
null
setup.py
Alexander-Sovetsky/python-image
a8ea11423b453938a8804537b836d919eb8e5b93
[ "MIT" ]
null
null
null
from setuptools import setup, find_packages with open('README.rst') as f: readme = f.read() with open('LICENSE') as f: license = f.read() setup( name='python-image', version='0.1.0', description='project for OCT', long_description=readme, author='Alexander Sovetsky', license=license, packages=find_packages(exclude=('tests', 'docs')) )
22.466667
49
0.738872
49
337
5.020408
0.632653
0.097561
0
0
0
0
0
0
0
0
0
0.009836
0.094955
337
14
50
24.071429
0.796721
0
0
0
0
0
0.225519
0
0
0
0
0
0
0
null
null
0
0.071429
null
null
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
99a7afbbb47204c705de2e7ed32e0ae9dbea9c39
62
py
Python
hello_world.py
deepti-anand/hello-world
494fc16c416dc8fe25600d5830052343b70f2070
[ "Apache-2.0" ]
null
null
null
hello_world.py
deepti-anand/hello-world
494fc16c416dc8fe25600d5830052343b70f2070
[ "Apache-2.0" ]
null
null
null
hello_world.py
deepti-anand/hello-world
494fc16c416dc8fe25600d5830052343b70f2070
[ "Apache-2.0" ]
null
null
null
print("hi") print("Hello World") #print #diff print("hellooo")
20.666667
33
0.693548
9
62
4.777778
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.080645
62
3
34
20.666667
0.754386
0.16129
0
0
0
0
0.392157
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
4
99a891aedae6992a71d2251dfc4ff9348ce3d420
1,481
py
Python
Examen_02_sim01/p4/p4.py
GuidoZalles/Computacion_para_Ingenieria
2afc06784760ca07786396374397a32ac2c19a27
[ "Apache-2.0" ]
null
null
null
Examen_02_sim01/p4/p4.py
GuidoZalles/Computacion_para_Ingenieria
2afc06784760ca07786396374397a32ac2c19a27
[ "Apache-2.0" ]
null
null
null
Examen_02_sim01/p4/p4.py
GuidoZalles/Computacion_para_Ingenieria
2afc06784760ca07786396374397a32ac2c19a27
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Tue Feb 15 19:25:58 2022 @author: User """ class person: def __init__(self, name, phone_number, email_addres, addres): self.name = name self.phone_number = phone_number self.email_addres = email_addres self.addres = addres def purchaseParkingPass(): pass class student(person): def __init__(self, name, phone_number, email_addres, student_number, average_mark,addres): person.__init__(self, name, phone_number, email_addres, addres) self.student_number = student_number self.average_mark = average_mark def isElegibleToEnroll(): pass def getSeminarsTaken(): pass class professor(person): def __init__(self, name, phone_number, email_addres, salary, addres): person.__init__(self, name, phone_number, email_addres, addres) self.salary = salary class addres: def __init__(self, street, city, state, postal_code, country): self.street = street self.city = city self.state = state self.postal_code = postal_code self.country = country def validate(): print("validado") def outputAsLabel(): pass direc = addres("santa Fe", "La Paz", "021","Bolivia") profe = professor("Guido", "60584523", "zallesguido@gmail.com", "8000 bs", direc)
23.140625
94
0.602296
163
1,481
5.184049
0.355828
0.091124
0.071006
0.100592
0.297041
0.297041
0.297041
0.297041
0.297041
0.132544
0
0.026949
0.298447
1,481
64
95
23.140625
0.786333
0.049291
0
0.176471
0
0
0.052106
0.014989
0
0
0
0
0
1
0.264706
false
0.147059
0
0
0.382353
0.029412
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
2
99a8aebcdbc0b31659e37d72e454787e67305614
663
py
Python
names.py
EggSquishIt/mcserver
f9e98f100f7d1e4b9d4fc306ca33255619d5504f
[ "MIT" ]
3
2020-08-29T13:33:30.000Z
2020-10-03T15:40:30.000Z
names.py
EggSquishIt/mcserver
f9e98f100f7d1e4b9d4fc306ca33255619d5504f
[ "MIT" ]
3
2020-10-10T17:06:19.000Z
2020-11-14T15:21:26.000Z
names.py
EggSquishIt/mcserver
f9e98f100f7d1e4b9d4fc306ca33255619d5504f
[ "MIT" ]
1
2020-10-10T13:09:27.000Z
2020-10-10T13:09:27.000Z
import random vowels = [ "a", "au", "o", "e", "i", "u", ] prefixes = [ "b", "c", "d", "f", "g", "gh", "h", "k", "l", "m", "n", "p", "qu", "r", "s", "t", "v", "w", "x", "y", "z" ] suffixes = [ "b", "c", "cc", "ck", "d", "dd", "f", "g", "gh", "h", "i", "k", "l", "ll", "m", "n", "p", "r", "rr", "s", "t", "tt", "v", "w", "x", "y", "z" ] def generate_name(): result = "" length = random.randint(3, 15) while len(result) < length: result = result + random.choice(prefixes) + random.choice(vowels) + random.choice(suffixes) return result def proper_case(string): return string[:1].upper() + string[1:]
8.84
93
0.435897
95
663
3.021053
0.578947
0.125436
0.027875
0.034843
0.034843
0
0
0
0
0
0
0.01002
0.24736
663
74
94
8.959459
0.56513
0
0
0.608696
0
0
0.096531
0
0
0
0
0
0
1
0.028986
false
0
0.014493
0.014493
0.072464
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99a8f1f1c93af08f3ec765e04d13a2b8d33dbbbd
84
py
Python
hello.py
bingle2400/cs3240-labdemo
5dca6fd8f4c510aa5b06d7dd17e5204e0cc13ffc
[ "MIT" ]
null
null
null
hello.py
bingle2400/cs3240-labdemo
5dca6fd8f4c510aa5b06d7dd17e5204e0cc13ffc
[ "MIT" ]
null
null
null
hello.py
bingle2400/cs3240-labdemo
5dca6fd8f4c510aa5b06d7dd17e5204e0cc13ffc
[ "MIT" ]
null
null
null
from helper import reeting if __name__ == "__main__": reeting("hello")
9.333333
26
0.630952
9
84
5
0.888889
0
0
0
0
0
0
0
0
0
0
0
0.261905
84
8
27
10.5
0.725806
0
0
0
0
0
0.158537
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
4
99a903a06e260b2c7198a42a0a29f263e277358e
245
py
Python
attempt.py
hoshen20-meet/meet2018y1lab6
68e70de443eba980b1de8b865eea8337aa82e6d3
[ "MIT" ]
null
null
null
attempt.py
hoshen20-meet/meet2018y1lab6
68e70de443eba980b1de8b865eea8337aa82e6d3
[ "MIT" ]
null
null
null
attempt.py
hoshen20-meet/meet2018y1lab6
68e70de443eba980b1de8b865eea8337aa82e6d3
[ "MIT" ]
null
null
null
import turtle colors = ['green','blue','orange', 'red'] turtle.speed(900) for i in range(99999999): turtle.pencolor(colors[i%4]) turtle.bgcolor('black') turtle.forward(i) turtle.degrees() turtle.right(70)
13.611111
41
0.608163
31
245
4.806452
0.709677
0
0
0
0
0
0
0
0
0
0
0.074074
0.228571
245
17
42
14.411765
0.714286
0
0
0
0
0
0.094262
0
0
0
0
0
0
1
0
false
0
0.111111
0
0.111111
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99a9c7e2da4c504b1d30c8fa7fb339aa5d8ceae5
4,444
py
Python
nr_common/image_utils/image_utils_caffe.py
nitred/nr-common
f251e76fe10cb46f609583922d485013f5cba92b
[ "MIT" ]
null
null
null
nr_common/image_utils/image_utils_caffe.py
nitred/nr-common
f251e76fe10cb46f609583922d485013f5cba92b
[ "MIT" ]
1
2018-01-07T19:03:35.000Z
2018-01-07T19:03:35.000Z
nr_common/image_utils/image_utils_caffe.py
nitred/nr-common
f251e76fe10cb46f609583922d485013f5cba92b
[ "MIT" ]
1
2018-09-20T02:31:18.000Z
2018-09-20T02:31:18.000Z
"""Utility functions.""" import numpy as np def caffe_load_image(image_filename): """Load image using caffe.io.load_image. This is to maintain shape expectation across the caffe library. Args: image_filename (str): String filename. Returns: numpy.ndarray: an image with the following properties: shape: [Height, Width, Channels] channel_order: RGB scale: [0, 1] dtype: np.float32 """ import caffe return caffe.io.load_image(image_filename, color=True) def caffe_load_image_batch(image_filenames, batch_size=None): """Load image using caffe.io.load_image. This is to maintain shape expectation across the caffe library. Args: image_filename (list of str): List of string filenames. batch_size (int): If batch_size is None, then all filenames are read. Otherwise only the first `batch_size` number of filenames are read. Returns: numpy.ndarray: an image with the following properties: shape: [batch_size, Height, Width, Channels] channel_order: RGB scale: [0, 1] dtype: np.float32 """ if batch_size is None: batch_size = len(image_filenames) image_batch = [caffe_load_image(image_filename) for image_filename in image_filenames[:batch_size]] image_batch = np.array(image_batch) # converting list into numpy array return image_batch # TODO (nitred): LRU cache def get_caffe_transformer(net_input_shape, mean_bgr_255=None): """Transform a batch of images which were loaded by caffe.io.load_image. Transformations: - mean subtraction (if mean provided) - transposes data to become [Channels x Height x Width] - swaps channels to convert RGB to BGR - scales the data to [0., 255.] Args: net_input_shape (numpy.ndarray): The expected 4-dimensional shape of the network. The first dimension i.e. the batch_size doesn't really matter. Usually the expected shape is [BATCH_SIZE, Height, Width, Channels] mean_bgr_255 (numpy.ndarray): 1-dimensional array of means. Channel order should be BGR and scale should be [0., 255.] Returns: caffe.io.Transformer: With all standard transformations set. """ import caffe transformer = caffe.io.Transformer({'data': net_input_shape}) if mean_bgr_255 is not None: transformer.set_mean('data', mean_bgr_255) transformer.set_transpose('data', (2, 0, 1)) transformer.set_channel_swap('data', (2, 1, 0)) transformer.set_raw_scale('data', 255.0) return transformer def caffe_transform_batch(X, net_input_shape, mean_bgr_255=None): """Transform a batch of images which were loaded by caffe.io.load_image. Transformations: - mean subtraction (if mean provided) - transposes data to become [Channels x Height x Width] - swaps channels to convert RGB to BGR - scales the data to [0., 255.] Args: X (numpy.ndarray): A batch of images of shape [BATCH_SIZE, Height, Width, RGB-Channels]. Can be obtained by using `caffe_utils.caffe_load_image`. """ transformer = get_caffe_transformer(net_input_shape, mean_bgr_255) transformed_batch = np.array([transformer.preprocess('data', image) for image in X]) return transformed_batch def caffe_load_network_with_input_batch(net, X, mean_bgr_255=None, net_input_blob_name='data'): """Load the network with the input batch `inplace`. Args: net (caffe.Network): The network to load the input batch. X (numpy.ndarray): A batch of images of shape [BATCH_SIZE, Height, Width, RGB-Channels]. Can be obtained by using `caffe_utils.caffe_load_image`. mean_bgr_255 (numpy.ndarray): 1-dimensional array of means. Channel order should be BGR and scale should be [0., 255.] net_input_blob_name (str): The input blob name of the network. Default blob name is "data". Returns: net: The network is loaded with input inplace but it's returned anyway. """ net_input_shape = net.blobs[net_input_blob_name].data.shape transformed_batch = caffe_transform_batch(X, net_input_shape, mean_bgr_255) net.blobs[net_input_blob_name].reshape(*transformed_batch.shape) net.blobs[net_input_blob_name].data[...] = transformed_batch return net
37.982906
103
0.685419
624
4,444
4.695513
0.214744
0.03686
0.030717
0.027304
0.554608
0.505119
0.496928
0.496928
0.474403
0.453584
0
0.018741
0.231548
4,444
116
104
38.310345
0.839239
0.60126
0
0.068966
0
0
0.018482
0
0
0
0
0.008621
0
1
0.172414
false
0
0.103448
0
0.448276
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99aa9d14b3d5ad7bbef547b6bdc0baea743dd41e
1,183
py
Python
ENV/lib/python3.5/site-packages/pyrogram/session/internals/msg_id.py
block1o1/CryptoPredicted
7f660cdc456fb8252b3125028f31fd6f5a3ceea5
[ "MIT" ]
4
2021-10-14T21:22:25.000Z
2022-03-12T19:58:48.000Z
ENV/lib/python3.5/site-packages/pyrogram/session/internals/msg_id.py
inevolin/CryptoPredicted
7f660cdc456fb8252b3125028f31fd6f5a3ceea5
[ "MIT" ]
null
null
null
ENV/lib/python3.5/site-packages/pyrogram/session/internals/msg_id.py
inevolin/CryptoPredicted
7f660cdc456fb8252b3125028f31fd6f5a3ceea5
[ "MIT" ]
1
2022-03-15T22:52:53.000Z
2022-03-15T22:52:53.000Z
# Pyrogram - Telegram MTProto API Client Library for Python # Copyright (C) 2017-2018 Dan Tès <https://github.com/delivrance> # # This file is part of Pyrogram. # # Pyrogram is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Pyrogram is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Pyrogram. If not, see <http://www.gnu.org/licenses/>. from threading import Lock from time import time class MsgId: last_time = 0 offset = 0 lock = Lock() def __new__(cls) -> int: with cls.lock: now = time() cls.offset = cls.offset + 4 if now == cls.last_time else 0 msg_id = int(now * 2 ** 32) + cls.offset cls.last_time = now return msg_id
32.861111
74
0.690617
181
1,183
4.464088
0.585635
0.018564
0.044554
0.070545
0.123762
0.123762
0.084158
0
0
0
0
0.017738
0.237532
1,183
35
75
33.8
0.878049
0.651733
0
0
0
0
0
0
0
0
0
0
0
1
0.076923
false
0
0.153846
0
0.615385
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
99ab42eab96ab3c16d101146330c97b44958c97a
66
py
Python
configs/garbage/test_lr0.1_1x.py
zhanggefan/mmdet-yolov4
fe544b1a94c24e8abdb13bc2e4ca664854aa86a7
[ "Apache-2.0" ]
16
2021-01-21T06:52:28.000Z
2022-03-30T09:17:16.000Z
configs/garbage/test_lr0.1_1x.py
zhanggefan/mmdet-yolov4
fe544b1a94c24e8abdb13bc2e4ca664854aa86a7
[ "Apache-2.0" ]
2
2021-06-18T14:32:34.000Z
2021-09-13T15:52:16.000Z
configs/garbage/test_lr0.1_1x.py
zhanggefan/mmdet-yolov4
fe544b1a94c24e8abdb13bc2e4ca664854aa86a7
[ "Apache-2.0" ]
8
2020-12-22T21:54:10.000Z
2022-03-30T09:17:15.000Z
_base_ = './config_base/garbage_ddp.py' optimizer = dict(lr=0.1)
16.5
39
0.712121
11
66
3.909091
0.909091
0
0
0
0
0
0
0
0
0
0
0.033898
0.106061
66
3
40
22
0.694915
0
0
0
0
0
0.424242
0.424242
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
41d445f8f3d6e55aedb38945121914b577aa660c
1,967
py
Python
CNN using tensorflow.py
Highcourtdurai/Deep-learning
b9aed4f0973709ce407006311cef28a7a183787f
[ "Apache-2.0" ]
null
null
null
CNN using tensorflow.py
Highcourtdurai/Deep-learning
b9aed4f0973709ce407006311cef28a7a183787f
[ "Apache-2.0" ]
null
null
null
CNN using tensorflow.py
Highcourtdurai/Deep-learning
b9aed4f0973709ce407006311cef28a7a183787f
[ "Apache-2.0" ]
null
null
null
import tensorflow as tf import numpy as np import matplotlib.pyplot as plt #Fasion mnist=data of accesories like boats,dresses,bags etc fashion_mnist=tf.keras.datasets.fashion_mnist (train_images,train_labels),(test_images,test_labels)=fashion_mnist.load_data() print(train_images.shape) print(train_labels.shape) print(test_images.shape) print(test_labels.shape) plt.imshow(train_images[4]) plt.show() model=tf.keras.Sequential() model.add(tf.keras.layers.Flatten(input_shape=(28,28))) model.add(tf.keras.layers.Dense(units=120,activation="relu")) model.add(tf.keras.layers.Dense(units=10,activation="softmax")) # model.compile(optimizer=tf.keras.optimizers.Adam(0.01),loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),metrics=["accuracy"]) # model.fit(train_images,train_labels,epochs=20,batch_size=500) def cross_entropy(y_pred,y_true): return tf.reduce_mean(tf.keras.losses.SparseCategoricalCrossentropy(y_true,y_pred)) def accuracy(y_pred,y_true): correct_prediction=tf.equal(tf.cast(y_pred,tf.int64),tf.cast(y_true,tf.int64)) return tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) optimizer=tf.optimizers.Adam() def train_step(x,y): with tf.GradientTape() as tape: pred=tf.argmax(model.predict(x),axis=1) loss=cross_entropy(pred,y) trainable_variables=model.trainable_variables gradients=tape.gradient(loss,trainable_variables) optimizer.apply_gradients(zip(gradients,trainable_variables)) return pred,loss train_data=tf.data.Dataset.from_tensor_slices((train_images,train_labels)) train_data=train_data.repeat().shuffle(100).batch(32).prefetch(1) for epoch in range(20): for step,(batch_x,batch_y) in enumerate(train_data.take(train_images.shape[0]//32),1): pred,loss=train_step(batch_x,batch_y) acc=accuracy(pred,batch_y) print(acc,loss)
27.704225
142
0.734621
289
1,967
4.816609
0.384083
0.04023
0.034483
0.047414
0.111351
0.04454
0.04454
0
0
0
0
0.021829
0.138282
1,967
70
143
28.1
0.79941
0.132689
0
0
0
0
0.006778
0
0
0
0
0
0
1
0.083333
false
0
0.083333
0.027778
0.25
0.138889
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41d44b79dc2869fa41ba2410af3f958c1f765b2a
1,489
py
Python
Assignment-2/visualization.py
LuciFR1809/DAA-Assignments
0f2faaf2f545cb81da8c86bdd370646694c2c756
[ "BSD-3-Clause" ]
null
null
null
Assignment-2/visualization.py
LuciFR1809/DAA-Assignments
0f2faaf2f545cb81da8c86bdd370646694c2c756
[ "BSD-3-Clause" ]
null
null
null
Assignment-2/visualization.py
LuciFR1809/DAA-Assignments
0f2faaf2f545cb81da8c86bdd370646694c2c756
[ "BSD-3-Clause" ]
null
null
null
## # @file visualization.py # @brief Python file for visualization of the testcase. # Contains the driver code for reading the file and plotting it. # # @authors Kumar Pranjal 2018A7PS0163H # @authors Ashna Swaika 2018A7PS0027H # @authors Abhishek Bapna 2018A7PS0184H # @authors Ashish Verma 2018A7PS0009H # Importing required modules from sys import argv import matplotlib.pyplot as plt import numpy as np # Program starts here if __name__ == '__main__': fname = f'autotestcase/{argv[1]}' fname2 = f'{fname}_line.txt' X = [] Y = [] # Plotting the points with open(fname, 'r', encoding='utf8') as f: lines = f.readlines() for line in lines: x, y = list(map(float, line.split())) X.append(x) Y.append(y) plt.plot(X, Y, '--.', color='red', linewidth=0.5) # Plotting the partitions with open(fname2, 'r', encoding='utf8') as f: lines = f.readlines() err_tot = 0 for line in lines: cost, err, m, c, xmin, xmax = list(map(float, line.split())) if m == float('inf'): continue x = np.linspace(xmin, xmax, 1000) plt.plot(x, m*x+c, c=np.random.rand(3,), linewidth=3,label='y = %.3f x + %.3f' % (m, c)) err_tot += err plt.legend() # Displaying and saving the plot plt.title('Cost : %.3f Error : %.3f'%(cost,err_tot)) plt.savefig(f'{fname}.png') plt.show() exit(0)
30.387755
100
0.584285
203
1,489
4.226601
0.507389
0.009324
0.030303
0.034965
0.121212
0.072261
0.072261
0.072261
0
0
0
0.051258
0.279382
1,489
48
101
31.020833
0.748369
0.274681
0
0.133333
0
0
0.111842
0.020677
0
0
0
0
0
1
0
false
0
0.1
0
0.1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41d4b3a8528e092d2bed2da155c8fbd1a073ab7d
869
py
Python
test/test_signal.py
dabercro/dynamo-consistency
f37dfebe781a833e9ae30869d8f57be79b4f583c
[ "MIT" ]
null
null
null
test/test_signal.py
dabercro/dynamo-consistency
f37dfebe781a833e9ae30869d8f57be79b4f583c
[ "MIT" ]
1
2018-02-20T21:21:14.000Z
2018-02-20T21:21:14.000Z
test/test_signal.py
dabercro/dynamo-consistency
f37dfebe781a833e9ae30869d8f57be79b4f583c
[ "MIT" ]
2
2018-06-25T11:27:45.000Z
2021-05-13T20:32:36.000Z
#! /usr/bin/env python # This is to allow operators to disable a site and separately # kill the process without un-disabling the site import unittest from dynamo_consistency import signaling from dynamo_consistency import summary from dynamo_consistency import main from dynamo_consistency import picker import base class TestSignaling(base.TestSimple): def test_signaling(self): site = picker.pick_site() main.main(site) summary.unlock_site(site) self.assertEqual(summary.get_status(site), summary.READY) signaling.halt(2, 'dummy') self.assertEqual(summary.get_status(site), summary.HALT) summary.set_status(site, summary.DISABLED) signaling.halt(2, 'dummy') self.assertEqual(summary.get_status(site), summary.DISABLED) if __name__ == '__main__': unittest.main(argv=base.ARGS)
25.558824
68
0.728423
112
869
5.482143
0.455357
0.089577
0.136808
0.175896
0.267101
0.267101
0.267101
0.198697
0.198697
0.198697
0
0.002821
0.18412
869
33
69
26.333333
0.863188
0.147296
0
0.105263
0
0
0.02439
0
0
0
0
0
0.157895
1
0.052632
false
0
0.315789
0
0.421053
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
41d4ddc8906709d7184306ba4ee3569b358c67bc
2,359
py
Python
scripts/oscar_exclusive_data_format.py
codemechanic/oscar-sysex-grammar
88b841ee7d54a859df6de9bfa5c7a4f87bae3afd
[ "MIT" ]
null
null
null
scripts/oscar_exclusive_data_format.py
codemechanic/oscar-sysex-grammar
88b841ee7d54a859df6de9bfa5c7a4f87bae3afd
[ "MIT" ]
null
null
null
scripts/oscar_exclusive_data_format.py
codemechanic/oscar-sysex-grammar
88b841ee7d54a859df6de9bfa5c7a4f87bae3afd
[ "MIT" ]
null
null
null
#!/usr/bin/env python # This script is intended to be run from within the OSCar MIDI Sysex Grammar file # OSCar Exclusive Data Format # # 2 MIDI bytes contain one byte of data # first byte contains low 4 bit nibble # second byte contains high 4 bit nibble # # 0 8 16 # +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+ # |0|0|0|0|l|l|l|l| |0|0|0|0|h|h|h|h| # +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+ # \______/ \______/ \______/ \______/ # 4 zero 4 low 4 zero 4 high def parseByteRange(element, byteView, bitPos, bitLength, results): """parseByteRange method""" processedBytes = 0 initialBitLow = byteView.readUnsignedIntBits(bitPos, 1, ENDIAN_BIG) if (initialBitLow == 0): initialBitHigh = byteView.readUnsignedIntBits(bitPos+8, 1, ENDIAN_BIG) if (initialBitHigh == 0): # combine high and low nibbles from two bytes into one byte low = byteView.readUnsignedIntBits(bitPos+4, 4, ENDIAN_BIG) high = byteView.readUnsignedIntBits(bitPos+12, 4, ENDIAN_BIG) result = (high << 4) | low; # return value to results value = Value() value.setString(str(result)) results.addElement(element, 2, 0, value) processedBytes = 2 return processedBytes def fillByteRange(value, byteArray, bitPos, bitLength): """fillByteRange method""" if (bitLength < 16): print "Not enough space for OSCar Exclusive Data Format, 16 bits needed" # get number edited by user number = value.getUnsigned() high, low = number &gt;&gt; 4, number &amp; 0x0F # verbose flag verbose = False # verbose info if verbose: print("Input value: " + str(number)) print("byteArray length: " + str(byteArray.getLength())) print("bitPos: " + str(bitPos)) print("bitLength: " + str(bitLength)) # number in hex numHex = str.format('0x{:02X}', int(str(number), 16)) print("Input value hex: " + str(numHex)) # number in binary numBinary = '{0:08b}'.format(number) print("Input value binary: " + str(numBinary)) # number high and low nibbles print("Input value binary (low nibble): " + str('{0:04b}'.format(low))) print("Input value binary (high nibble): " + str('{0:04b}'.format(high))) if (number < 256): byteArray.writeUnsignedIntBits(low, bitPos, 8, ENDIAN_BIG) byteArray.writeUnsignedIntBits(high, bitPos+8, 8, ENDIAN_BIG) else: print("Input value out of range (0-255). Value not updated.")
29.123457
81
0.660449
311
2,359
4.913183
0.360129
0.007853
0.058901
0.04123
0.024869
0
0
0
0
0
0
0.033801
0.184824
2,359
80
82
29.4875
0.76079
0.275964
0
0
0
0
0.183099
0
0
0
0.002449
0
0
0
null
null
0
0
null
null
0.277778
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
41d5649317c085f8985b97148521c85447459436
38
py
Python
yeelight/version.py
kmohrf/python-yeelight
483019c074556b4c3d2f665398f0fc308afd6274
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
yeelight/version.py
kmohrf/python-yeelight
483019c074556b4c3d2f665398f0fc308afd6274
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
yeelight/version.py
kmohrf/python-yeelight
483019c074556b4c3d2f665398f0fc308afd6274
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
# flake8: noqa __version__ = "0.5.0"
9.5
21
0.631579
6
38
3.333333
0.833333
0
0
0
0
0
0
0
0
0
0
0.129032
0.184211
38
3
22
12.666667
0.516129
0.315789
0
0
0
0
0.208333
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
41d697a0f4888c1996ea9e1ef9843309eaf50ff0
2,744
py
Python
tremana/analysis/transformations.py
s-weigand/tremana
98a8a546c79ce4f248b3955da21374edfdd61dee
[ "Apache-2.0" ]
1
2022-03-07T02:52:25.000Z
2022-03-07T02:52:25.000Z
tremana/analysis/transformations.py
s-weigand/tremana
98a8a546c79ce4f248b3955da21374edfdd61dee
[ "Apache-2.0" ]
9
2021-04-26T07:08:27.000Z
2022-03-28T07:23:31.000Z
tremana/analysis/transformations.py
s-weigand/tremana
98a8a546c79ce4f248b3955da21374edfdd61dee
[ "Apache-2.0" ]
null
null
null
"""Transformations to be used on tremor accelerometry data (e.g.: FFT).""" from __future__ import annotations from typing import Iterable import numpy as np import pandas as pd from scipy.signal import periodogram def fft_spectra( input_dataframe: pd.DataFrame, columns: Iterable[str] | None = None, sampling_rate: int | float = 128, norm=False, ): """Calculate the FFT of accelerometry data. Parameters ---------- input_dataframe : pd.DataFrame Dataframe containing accelerometry data. columns : Iterable[str], optional Columns co calculate the FFT for, by default None which results in all columns to be used sampling_rate : int, optional Number of sample per second, by default 128 norm : bool, optional Whether to normalize the the data to 1 or not, by default False Returns ------- pd.DataFrame FFT spectra of the accelerometry data. """ n_samples = input_dataframe.shape[0] freq = np.fft.fftfreq(n_samples, d=1 / sampling_rate) fft_results = {} if columns is None: columns = input_dataframe.columns for column in columns: fft_vals = 2 / n_samples * np.abs(np.fft.fft(input_dataframe[column])) if norm: fft_vals /= fft_vals.max() fft_results[column] = fft_vals fft_df = pd.DataFrame(fft_results, index=freq) return fft_df.iloc[freq >= 0, :] def power_density_spectra( input_dataframe: pd.DataFrame, columns: Iterable[str] | None = None, sampling_rate: int | float = 128, norm=False, ): """Calculate the power density spectra of accelerometry data. Compared to the FFT the resulting values are FFT[-freq]*FFT[freq] with freq>=0. Parameters ---------- input_dataframe : pd.DataFrame Dataframe containing accelerometry data. columns : Iterable[str], optional Columns co calculate the FFT for, by default None which results in all columns to be used sampling_rate : int, optional Number of sample per second, by default 128 norm : bool, optional Whether to normalize the the data to 1 or not, by default False Returns ------- pd.DataFrame Power density spectra accelerometry data. """ pds_results = {} if columns is None: columns = input_dataframe.columns for column in columns: frequency, power_density = periodogram(input_dataframe[column], sampling_rate) if norm: power_density /= power_density.max() else: power_density *= 2 / (input_dataframe.shape[0] / sampling_rate) pds_results[column] = power_density return pd.DataFrame(pds_results, index=frequency)
31.181818
86
0.662901
354
2,744
5.014124
0.259887
0.078873
0.036056
0.056338
0.541972
0.541972
0.541972
0.541972
0.541972
0.541972
0
0.010254
0.253644
2,744
87
87
31.54023
0.856445
0.43586
0
0.45
0
0
0
0
0
0
0
0
0
1
0.05
false
0
0.125
0
0.225
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41d69abf160b8ce1e4074dd51d9496b0510b87af
12,633
py
Python
Prod_CV_NLP_API/flask/app.py
micintron/computer_vission_OCR
1fdd521b334f6e5958958ccf816341531b783a21
[ "CNRI-Python" ]
1
2021-02-25T09:52:46.000Z
2021-02-25T09:52:46.000Z
Prod_CV_NLP_API/flask/app.py
micintron/computer_vission_OCR
1fdd521b334f6e5958958ccf816341531b783a21
[ "CNRI-Python" ]
null
null
null
Prod_CV_NLP_API/flask/app.py
micintron/computer_vission_OCR
1fdd521b334f6e5958958ccf816341531b783a21
[ "CNRI-Python" ]
null
null
null
""" API to grab text content from images ID's and pdf's. Endpoints --------- * GET /: root: shows api info to new users on run * POST /: convert_pdf_to_image: converts a pdf doc to an image for processing * POST /: passport: extracts target text based information from pasport * POST /: image: extracts target text based information from jpg or png image USAGE ----- Run local: run app.py in virtual env after installing the requirments files You should then be able to navigate to localhost:5000 if you see message API if operational """ import os import json import logging from flask import Flask, request, make_response, jsonify from werkzeug.utils import secure_filename from passporteye.mrz.image import MRZPipeline from passporteye import read_mrz from pdfUtil import pdf_to_png try: from PIL import Image except ImportError: import Image import pytesseract import cv2 import numpy as np import re from random import * from flask_cors import CORS# CORS allows cross origin requests from web browsers from extract_image_data import * from nlp_ops import sentiment_analysis_score from nlpbot import NLPBot from scanner import scan_barcode_image #new addtions #%pip install easyocr import easyocr reader = easyocr.Reader(['es', 'en'], gpu=False) # for running locally #UPLOAD_FOLDER = 'uploads' #EDIT_FOLDER = 'edit' # for docker build UPLOAD_FOLDER = '/uploads' EDIT_FOLDER = '/edit' MAXIMUM_IMAGE_ROTATIONS = 3 app = Flask(__name__) log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' logging.basicConfig(level=logging.INFO, format=log_fmt) #Endpoint Routes @app.route('/') def root(): """Get and return root text response from API Parameters ---------- None Returns ------- None """ return 'Welcome ! The endpoint for images is at <b>/passport</b>, <b>/image</b> or <b>/barcode</b> the key is imagefile , The EndPoint of pdfs is <b>pdf</b> and the key is pdf' @app.route('/pdf', methods=['POST']) def convert_pdf_to_image(): """Post a pdf file for conversion to image format for data extraction Parameters ---------- None Returns ------- png image converted from orginal pdf """ # Get PDF file from request and save to local directory pdfFile = request.files.get('pdf', None) if not pdfFile: return make_response("Missing file parameter", 400) filename = secure_filename(pdfFile.filename) full_path = os.path.join(UPLOAD_FOLDER, filename) pdfFile.save(full_path) # Convert PDF file to image png_path_array = pdf_to_png(full_path) # Convert image to text text_array = [] for png_path in png_path_array: converted_text = image_to_string(png_path) text_array.append(converted_text) return jsonify(text_array) @app.route('/passport', methods=['POST']) def passport(): """Post a passport image file for text data to be extracted Parameters ---------- None Returns ------- json format - text data feilds extracted from the passport """ imagefile = request.files.get('imagefile', None) if not imagefile: return make_response("Missing file parameter", 400) mrz, full_content = get_image_content(imagefile) if mrz is None: return make_response("Can not read image", 400) mrz_data = mrz.to_dict() all_infos = {} all_infos['last_name'] = mrz_data['surname'].upper() all_infos['first_name'] = mrz_data['names'].upper() all_infos['country_code'] = mrz_data['country'] all_infos['country'] = get_country_name(all_infos['country_code']) all_infos['nationality'] = get_country_name(mrz_data['nationality']) all_infos['number'] = mrz_data['number'] all_infos['sex'] = mrz_data['sex'] # all_infos['full_text'] = full_content valid_score = mrz_data['valid_score'] # Trying to extract full name if all_infos['last_name'] in full_content: splitted_fulltext = full_content.split("\n") for w in splitted_fulltext: if all_infos['last_name'] in w: all_infos['last_name'] = w continue splitted_firstname = all_infos['first_name'].split(" ") if splitted_firstname[0] in full_content: splitted_fulltext = full_content.split("\n") for w in splitted_fulltext: if splitted_firstname[0] in w: all_infos['first_name'] = clean_name(w) continue #clean out text all_infos['last_name'] = all_infos['last_name'].replace('>','') all_infos['last_name'] = all_infos['last_name'].replace('<','') all_infos['last_name'] = all_infos['last_name'].replace('$','') #fix sex if misidentified s = all_infos['sex'].upper() s = s.strip() if(s != 'M' and s !='F'): i = randint(0, 1) if(i ==0): s ='M' else: s='F' all_infos['sex'] = s return jsonify(all_infos) @app.route('/image', methods=['POST']) def image(): """Post an image file for text data to be extracted Parameters ---------- None Returns ------- json format - text data extracted from the image png or jpg """ imagefile = request.files.get('imagefile', None) if not imagefile: return make_response("Missing file parameter", 400) filename = secure_filename(imagefile.filename) full_path = os.path.join(UPLOAD_FOLDER, filename) imagefile.save(full_path) text = '' try: # Convert image to text im = cv2.imread(full_path) imC = clean_image(im) text = pytesseract.image_to_string(imC, lang ='eng') if text == "": text = pytesseract.image_to_string(im, lang ='eng') # logging.info('full image content = %s' %(full_content)) except: text = 'Error : Can Not Read the current Image' return jsonify(text) @app.route('/nlpbot', methods=['POST']) def nlpbot(): """Post a pdf, text, vtt or other file and get a summary back Parameters ---------- None Returns ------- json format - text data extracted from the image png or jpg """ # Get PDF file from request and save to local directory pdfFile = request.files.get('pdf', None) if not pdfFile: return make_response("Missing file parameter", 400) filename = secure_filename(pdfFile.filename) full_path = os.path.join(UPLOAD_FOLDER, filename) pdfFile.save(full_path) nlpbot = NLPBot(infile_path=full_path) nlpbot.summarize() result = {"original_text": nlpbot.text, "summary_text": nlpbot.final_text} return jsonify(result) @app.route('/nlp_sa', methods=['POST']) def nlp_sa(): """Post a list of text and get sentiment analysis reports back on the data Parameters ---------- None Returns ------- json format - text data and response report scores """ #extract from json responnse - {"words":["list of words"]} data = request.json words = data["words"] result = sentiment_analysis_score(words) return jsonify(result) @app.route('/barcode', methods=['POST']) def barcode(): """Post a barcode image file for text data to be extracted Parameters ---------- imagefile Returns ------- json format - text data extracted from the image png or jpg """ imagefile = request.files.get('imagefile', None) if not imagefile: return make_response("Missing file parameter", 400) filename = secure_filename(imagefile.filename) full_path = os.path.join(UPLOAD_FOLDER, filename) imagefile.save(full_path) text = '' try: # Convert image to text text = scan_barcode_image(full_path) except: return make_response("Error processing image", 500) return jsonify(text) @app.route('/drivers_license', methods=['POST']) def drivers_license(): """Post an image file for text data to be extracted Parameters ---------- None Returns ------- json format - text data extracted from the image png or jpg example - {"name":"JANICE ANN","address":"123 MAIN STREET, AARRISBURG, PA 17101-0000","state":"Pennsylvana", "class":"A","sex":"F","height":"5'-06\"","eyes":"BRO","dob":"08/04/1975","exp":"08/05/2023"} """ imagefile = request.files.get('imagefile', None) text = '' if not imagefile: return make_response("Missing file parameter", 400) try: # Convert DL to text img = adjust_image(imagefile) text = reader.readtext(img, detail=0) parcetext={} other_info =[] #parce out data i = -1 for x in text: try: x = str(x).upper() x = str(x).replace('$','S') i+=1 s = x.split(":") if(len(s)>1): s=s[1] else: s=x if 'DL' in x: parcetext['DLN']=s continue if 'CLASS' in x: parcetext['CLASS']=s continue if 'SEX' in x: parcetext['SEX']=s continue if 'HGT' in x: parcetext['HGT']=s continue if 'WGT' in x: parcetext['WGT']=s continue if 'EXP' in x: parcetext['EXP']=s continue if 'EYE' in x: parcetext['EYES']=s continue if 'ISS' in x: parcetext['ISS']=s if len(x)<7: parcetext['ISS']=s+" "+ text[i-1] continue if 'DOB' in x or 'D0B'in x: parcetext['DOB']=s continue if 'DD' in x or '00:'in x: parcetext['DD']=s continue if 'DUPS' in x: parcetext['DUPS']=s continue if(len(x)>0): other_info.append(x) except: continue parcetext['personal_info'] =other_info except: parcetext = 'Error : Can Not Read the current Image' return jsonify(parcetext) @app.route('/drivers_license_raw', methods=['POST']) def drivers_license_raw(): """Post an image file for text data to be extracted Parameters ---------- None Returns ------- json format - text data extracted from the image png or jpg """ imagefile = request.files.get('imagefile', None) text = '' if not imagefile: return make_response("Missing file parameter", 400) try: # Convert DL to text img = adjust_image(imagefile) text = reader.readtext(img, detail=0) except: text = 'Error : Can Not Read the current Image' return jsonify(text) @app.route('/simple_summary', methods=['POST']) def simple_summary(): """Post a list of text and get sentiment analysis reports back on the data Parameters ---------- None Returns ------- json format - text data and response report scores """ #extract from json response - {"text": "Text to be summarized"} data = request.json nlpbot = NLPBot(text=data["text"]) nlpbot.summarize() result = {"original_text": nlpbot.text, "summary_text": nlpbot.final_text} return jsonify(result) @app.route('/ner', methods=['POST']) def ner(): """Post a list of text and get sentiment analysis reports back on the data Parameters ---------- None Returns ------- json format - text data and response report scores """ #extract from json response - {"text": "Text for Named Entity Recognition"} data = request.json nlpbot = NLPBot(text=data["text"]) nlpbot.ner() result = {"original_text": nlpbot.text, "ner_text": nlpbot.tags} return jsonify(result) if __name__ == "__main__": CORS(app) app.run(host="0.0.0.0", debug=True)
27.887417
181
0.576269
1,539
12,633
4.607537
0.191033
0.027077
0.018615
0.022564
0.483571
0.447187
0.422084
0.422084
0.422084
0.39811
0
0.009807
0.305866
12,633
452
182
27.949115
0.798837
0.256155
0
0.382979
0
0.004255
0.135544
0
0
0
0
0
0
1
0.046809
false
0.021277
0.093617
0
0.225532
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
41d72f12694e72053874661915e1331274883431
5,540
py
Python
py/umpire/server/service/multicast_unittest.py
arccode/factory
a1b0fccd68987d8cd9c89710adc3c04b868347ec
[ "BSD-3-Clause" ]
3
2022-01-06T16:52:52.000Z
2022-03-07T11:30:47.000Z
py/umpire/server/service/multicast_unittest.py
arccode/factory
a1b0fccd68987d8cd9c89710adc3c04b868347ec
[ "BSD-3-Clause" ]
null
null
null
py/umpire/server/service/multicast_unittest.py
arccode/factory
a1b0fccd68987d8cd9c89710adc3c04b868347ec
[ "BSD-3-Clause" ]
1
2021-10-24T01:47:22.000Z
2021-10-24T01:47:22.000Z
#!/usr/bin/env python3 # # Copyright 2021 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import unittest from unittest import mock from cros.factory.umpire.server.service import multicast from cros.factory.utils import json_utils DEFAULT_PORT = 8080 TESTDATA_DIR = os.path.join(os.path.dirname(__file__), 'testdata') def TestData(filename): return os.path.join(TESTDATA_DIR, filename) class GenerateConfigTest(unittest.TestCase): def setUp(self): self.payload = json_utils.LoadFile( os.path.join(TESTDATA_DIR, 'example_payload.json')) def testEnableAll(self): _SERVICE_CONFIG_ENABLE_ALL = { 'mgroup': '224.1.2.3', 'server_ip': '192.168.1.1', 'required_components': { "release_image": True, "test_image": True, "toolkit": True } } generated_config = multicast.MulticastService.GenerateConfig( _SERVICE_CONFIG_ENABLE_ALL, self.payload, DEFAULT_PORT) expected_config = json_utils.LoadFile( TestData('mcast_config_enable_all.json')) self.assertEqual(generated_config, expected_config) def testEnableToolkit(self): _SERVICE_CONFIG_ENABLE_TOOLKIT = { 'mgroup': '224.1.2.3', 'server_ip': '192.168.1.1', 'required_components': { "release_image": False, "test_image": False, "toolkit": True } } generated_config = multicast.MulticastService.GenerateConfig( _SERVICE_CONFIG_ENABLE_TOOLKIT, self.payload, DEFAULT_PORT) expected_config = json_utils.LoadFile( TestData('mcast_config_enable_toolkit.json')) self.assertEqual(generated_config, expected_config) def testDefaultValues(self): # Enable one component here to test default mgroup value. _SERVICE_CONFIG_DEFAULT_VALUES = { 'required_components': { "test_image": True } } generated_config = multicast.MulticastService.GenerateConfig( _SERVICE_CONFIG_DEFAULT_VALUES, self.payload, DEFAULT_PORT) expected_config = json_utils.LoadFile( TestData('mcast_config_default_values.json')) self.assertEqual(generated_config, expected_config) def testNoServerIp(self): """Test when `server_ip` is assigned, but `mgroup` is not given.""" _SERVICE_CONFIG_NO_SERVER_IP = { 'mgroup': '224.1.2.3', 'required_components': { "test_image": True } } generated_config = multicast.MulticastService.GenerateConfig( _SERVICE_CONFIG_NO_SERVER_IP, self.payload, DEFAULT_PORT) expected_config = json_utils.LoadFile( TestData('mcast_config_no_server_ip.json')) self.assertEqual(generated_config, expected_config) def testAutoAssignMgroup(self): """Test auto assigning `mgroup` from server_ip.""" _SERVICE_CONFIG_AUTO_ASSIGN_MGROUP = { 'server_ip': '192.168.12.34', 'required_components': { "test_image": True } } generated_config = multicast.MulticastService.GenerateConfig( _SERVICE_CONFIG_AUTO_ASSIGN_MGROUP, self.payload, DEFAULT_PORT) expected_config = json_utils.LoadFile( TestData('mcast_config_auto_assign_mgroup.json')) self.assertEqual(generated_config, expected_config) def testBadMgroup(self): _SERVICE_CONFIG_BAD_MGROUP = { 'mgroup': '123456', 'required_components': { "test_image": True } } with self.assertRaises(AssertionError): multicast.MulticastService.GenerateConfig(_SERVICE_CONFIG_BAD_MGROUP, self.payload, DEFAULT_PORT) def testAutoAssignMgroupWithBadServerIp(self): _SERVICE_CONFIG_BAD_SERVER_IP = { 'server_ip': '123456', 'required_components': { "test_image": True } } # Raised by the `.group()` call from a None object returned by `re.search`. with self.assertRaises(AttributeError): multicast.MulticastService.GenerateConfig(_SERVICE_CONFIG_BAD_SERVER_IP, self.payload, DEFAULT_PORT) class MulticastServiceTest(unittest.TestCase): _DUMMY_MCAST_CONFIG = { 'dummy_key': 'dummy_value' } _FAKE_UMPIRE_CONFIG = { 'services': { 'multicast': {} } } _FAKE_UMPIRE_BASE_DIR = 'umpire_base_dir' _FAKE_MCAST_RESOURCE_NAME = 'multicast.32d4f1f4ba53b174acc8aa0a68fb53bd.json' @mock.patch('cros.factory.utils.file_utils.ForceSymlink') @mock.patch(multicast.__name__ + '.MulticastService.GenerateConfig') def testCreateProcesses(self, mock_generate_config, mock_force_sym_link): mock_generate_config.return_value = self._DUMMY_MCAST_CONFIG mock_env = mock.MagicMock() mock_env.base_dir = self._FAKE_UMPIRE_BASE_DIR mock_env.AddConfigFromBlob.return_value = self._FAKE_MCAST_RESOURCE_NAME ret = multicast.MulticastService().CreateProcesses(self._FAKE_UMPIRE_CONFIG, mock_env) self.assertEqual(ret, []) mock_env.AddConfigFromBlob.assert_called_once_with( json_utils.DumpStr(self._DUMMY_MCAST_CONFIG, pretty=True), 'multicast_config') mock_force_sym_link.assert_called_once_with( os.path.join('resources', self._FAKE_MCAST_RESOURCE_NAME), os.path.join(self._FAKE_UMPIRE_BASE_DIR, 'multicast_config.json')) if __name__ == '__main__': unittest.main()
34.197531
80
0.687545
609
5,540
5.881773
0.249589
0.05081
0.076214
0.089894
0.485483
0.398381
0.336404
0.336404
0.265215
0.265215
0
0.018672
0.216968
5,540
161
81
34.409938
0.807054
0.075271
0
0.283465
0
0
0.15472
0.058754
0
0
0
0
0.07874
1
0.07874
false
0
0.03937
0.007874
0.173228
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0