hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
d86a7610b205b3e4cc679d4d420398b2a7ef8963
4,872
py
Python
TorPool/tor_method.py
SUN-PEI-YUAN/TorPool
6e77e3b4c0b1e370e6b9417e94dc358b1d25f365
[ "MIT" ]
1
2022-01-29T22:24:07.000Z
2022-01-29T22:24:07.000Z
TorPool/tor_method.py
SUN-PEI-YUAN/TorPool
6e77e3b4c0b1e370e6b9417e94dc358b1d25f365
[ "MIT" ]
null
null
null
TorPool/tor_method.py
SUN-PEI-YUAN/TorPool
6e77e3b4c0b1e370e6b9417e94dc358b1d25f365
[ "MIT" ]
null
null
null
# coding: utf-8 import subprocess import shutil import sys import os class TorMethod(object): '''tor 代理伺服器製作 :::參數說明::: torrc_dir: torrc要儲存的位置(必要) tordata_dir: torrc內DataDirectory的資訊(必要) __process: tor的process控制器, 可以使用TorMethod.get_process取得 __torname: torrc檔案和資料夾的名稱, 可以使用TorMethod.get_toruuid取得 __torrcfile: torrc檔案路徑, 可以使用TorMethod.get_torrcpath取得 __tordatafile: torrc內DataDirectory的資訊, 可以使用TorMethod.get_torrcpath取得 __socksport: Tor opens a SOCKS proxy on port [socksport] __controlport: The port on which Tor will listen for local connections from Tor controller applications, as documented in control-spec.txt. ''' def __init__(self, torrc_dir, tordata_dir, hashedcontrolpassword): from . import _TOR_EXE if sys.platform is 'win32': self.__tor_exe = _TOR_EXE else: self.__tor_exe = os.popen('which tor').read().rstrip('\n') if self.__tor_exe is '': error_msg = ( "\'Tor client\' is not installed. Please insatll tor client first!\n" "\t If Your system is debian or ubuntu, please execute \'sudo apt install tor -y\'.\n" "\t If Your system is macOS, please install homebrew and execute \'brew install tor -y\'.\n" ) raise OSError(error_msg) self.torrc_dir = torrc_dir # self.tordata_dir = tordata_dir # self.hashedcontrolpassword = hashedcontrolpassword import uuid self.__process = None # self.__torname = str(uuid.uuid4()) # self.__torrcfile = os.path.join(self.torrc_dir, self.__torname + '.conf') # self.__tordatafile = os.path.join(self.tordata_dir, self.__torname) self.__socksport = None # self.__controlport = None # self.__hashed = self.__tor_hashpasswd() if os.path.exists(self.torrc_dir): shutil.rmtree(self.torrc_dir) os.makedirs(self.torrc_dir) if os.path.exists(self.tordata_dir): shutil.rmtree(self.tordata_dir) os.makedirs(self.tordata_dir) @property def get_status(self): if self.__process is None: pid = None else: pid = self.__process.pid return { 'tor_exe': self.__tor_exe, 'socksport': self.__socksport, 'process': pid, 'tor_uuid': self.__torname, 'torrc_path': self.__torrcfile, 'torrcdata_path': self.__tordatafile, } def __tor_hashpasswd(self): process = subprocess.Popen(self.__tor_exe + ' --hash-password ' + str(self.hashedcontrolpassword), shell=True, stdout=subprocess.PIPE) return str(process.stdout.readline().decode('utf-8')).rstrip('\n') def get_free_port(self): '''找閒置port''' from socket import socket port = None with socket() as s: s.bind(('',0)) port = s.getsockname()[1] s.close() return port def make_torrc(self): '''寫出torrc''' if not os.path.exists(self.torrc_dir): os.makedirs(self.torrc_dir) if not os.path.exists(self.tordata_dir): os.makedirs(self.tordata_dir) with open(self.__torrcfile, 'w') as f: torrc = self.torrc() f.write(torrc) def torrc(self): '''torrc格式''' if self.__socksport is None: self.__socksport = self.get_free_port() if self.__controlport is None: self.__controlport = self.get_free_port() torrc_file = ( 'HashedControlPassword {hashedcontrolpassword}\n' 'SocksPort {socksport}\n' 'ControlPort {controlport}\n' 'DataDirectory {tordatafile}\n' ) return torrc_file.format( hashedcontrolpassword = self.__hashed, socksport = self.__socksport, controlport = self.__controlport, tordatafile = self.__tordatafile ) def start_tor(self): '''啟動tor''' if self.__process is not None: self.__process.kill() else: process = subprocess.Popen(self.__tor_exe + ' -f ' + self.__torrcfile, shell=True) self.__process = process def restart_tor(self): '''若proxy被封鎖,殺掉程序重新執行tor''' self.__process.kill() self.start_tor() def kill_process(self): '''殺死利用套件啟動的tor程序''' self.__process.kill() shutil.rmtree(self.torrcfile) shutil.rmtree(self.tordatafile) self.__process = None def kill_all_tor(self): '''殺死系統所有存在的tor''' if sys.platform is 'win32': os.system('TASKKILL /F /IM tor.exe /T') else: os.system('killall -9 tor') self.pool = []
36.088889
142
0.594212
535
4,872
5.127103
0.28785
0.029165
0.034998
0.023332
0.13416
0.118848
0.053956
0.053956
0.026249
0
0
0.002933
0.300082
4,872
135
143
36.088889
0.801466
0.129105
0
0.142857
0
0.019048
0.12503
0.011082
0
0
0
0
0
1
0.095238
false
0.066667
0.066667
0
0.209524
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
d86e898eaf189c08d400028fbb8d2971795df000
5,107
py
Python
core/management/commands/gendoc.py
klebed/esdc-ce
2c9e4591f344247d345a83880ba86777bb794460
[ "Apache-2.0" ]
97
2016-11-15T14:44:23.000Z
2022-03-13T18:09:15.000Z
core/management/commands/gendoc.py
klebed/esdc-ce
2c9e4591f344247d345a83880ba86777bb794460
[ "Apache-2.0" ]
334
2016-11-17T19:56:57.000Z
2022-03-18T10:45:53.000Z
core/management/commands/gendoc.py
klebed/esdc-ce
2c9e4591f344247d345a83880ba86777bb794460
[ "Apache-2.0" ]
33
2017-01-02T16:04:13.000Z
2022-02-07T19:20:24.000Z
import os import re import shutil from ._base import DanubeCloudCommand, CommandOption, CommandError, lcd class Command(DanubeCloudCommand): help = 'Generate documentation files displayed in GUI.' DOC_REPO = 'https://github.com/erigones/esdc-docs.git' DOC_TMP_DIR = '/var/tmp/esdc-docs' options = ( CommandOption('--api', '--api-only', action='store_true', dest='api_only', default=False, help='Generate only the API documentation.'), CommandOption('--user-guide', '--user-guide-only', action='store_true', dest='user_guide_only', default=False, help='Generate only the User Guide.'), ) def gendoc_api(self): """Generate api documentation""" with lcd(self.PROJECT_DIR): doc_dir = self._path(self.PROJECT_DIR, 'doc', 'api') doc_dst = self._path(self.PROJECT_DIR, 'api', 'static', 'api', 'doc') bin_dst = self._path(self.PROJECT_DIR, 'api', 'static', 'api', 'bin') # Build sphinx docs with lcd(doc_dir): self.local('make esdc-clean; make esdc ESDOCDIR="%s"' % doc_dst) # Create es script suitable for download es_src = self._path(self.PROJECT_DIR, 'bin', 'es') es_dst = self._path(bin_dst, 'es') es_current = os.path.join(self.settings.PROJECT_DIR, 'var', 'www', 'static', 'api', 'bin', 'es') api_url = "API_URL = '%s'" % (self.settings.SITE_LINK + '/api') if os.path.isfile(es_current): with open(es_current, 'r') as es0: for line in es0: if line.startswith("API_URL = '"): api_url = line break with open(es_src) as es1: with os.fdopen(os.open(es_dst, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o644), 'w') as es2: es2.write(es1.read().replace("API_URL = 'http://127.0.0.1:8000/api'", api_url)) # Copy es_bash_completion.sh to download location es_bc_src = self._path(doc_dir, 'es_bash_completion.sh') self.local('cp %s %s' % (es_bc_src, bin_dst)) self.display('API documentation built successfully.', color='green') def gendoc_user_guide(self, fallback_branch='master'): """Generate user guide""" doc_dst = self._path(self.PROJECT_DIR, 'gui', 'static', 'user-guide') with lcd(self.PROJECT_DIR): try: branch = self.get_git_version()[0] # Git tag or branch name except CommandError: self.display('Could not determine our branch or tag', color='yellow') branch = fallback_branch self.display('Falling back to "%s" branch' % branch, color='yellow') else: self.display('We are on branch "%s"' % branch) if self._path_exists(self.DOC_TMP_DIR, 'user-guide', 'conf.py'): existing_repo = True self.display('%s already exists in %s' % (self.DOC_REPO, self.DOC_TMP_DIR), color='yellow') with lcd(self.DOC_TMP_DIR): self.local('git fetch') self.display('%s has been successfully updated.' % self.DOC_REPO, color='green') else: if self._path_exists(self.DOC_TMP_DIR): self.display('Removing stale %s', self.DOC_TMP_DIR, color='yellow') shutil.rmtree(self.DOC_TMP_DIR) existing_repo = False self.local('git clone %s %s' % (self.DOC_REPO, self.DOC_TMP_DIR)) self.display('%s has been successfully cloned.' % self.DOC_TMP_DIR, color='green') with lcd(self.DOC_TMP_DIR): if self.local('git checkout %s' % branch, raise_on_error=False) != 0: self.display('Could not checkout esdc-docs branch "%s"' % branch, color='yellow') branch = fallback_branch self.display('Falling back to "%s" branch' % branch, color='yellow') self.local('git checkout %s' % branch) self.display('Checked out esdc-docs branch "%s"' % branch, color='green') # If the branch is no a tag name, then we need to merge/pull if existing_repo and not re.search('^v[0-9]', branch): self.local('git merge --ff-only origin/%s' % branch) self.display('Merged esdc-docs branch "%s"' % branch, color='green') # Build sphinx docs with lcd(self._path(self.DOC_TMP_DIR, 'user-guide')): self.local('make esdc-clean; make esdc ESDOCDIR="%s"' % doc_dst) self.display('User guide built successfully.', color='green') def handle(self, api_only=False, user_guide_only=False, **options): if api_only and user_guide_only: pass elif api_only: self.gendoc_api() return elif user_guide_only: self.gendoc_user_guide() return self.gendoc_api() self.display('\n\n', stderr=True) self.gendoc_user_guide()
44.798246
118
0.577247
658
5,107
4.305471
0.261398
0.047653
0.034945
0.045888
0.390046
0.298976
0.209319
0.151783
0.11366
0.08754
0
0.006639
0.292148
5,107
113
119
45.19469
0.77704
0.049344
0
0.209302
1
0
0.22608
0.00434
0
0
0
0
0
1
0.034884
false
0.011628
0.046512
0
0.162791
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
d8723e31987ced651ec6ee7cac0b7d24f592d4fe
1,332
py
Python
analytics_management/models.py
mattiolato98/reservation-ninja
0e50b218dd9d90f134868bade2ec2934283c12b5
[ "MIT" ]
1
2022-03-10T11:34:14.000Z
2022-03-10T11:34:14.000Z
analytics_management/models.py
mattiolato98/reservation-ninja
0e50b218dd9d90f134868bade2ec2934283c12b5
[ "MIT" ]
null
null
null
analytics_management/models.py
mattiolato98/reservation-ninja
0e50b218dd9d90f134868bade2ec2934283c12b5
[ "MIT" ]
null
null
null
from django.contrib.auth import get_user_model from django.db import models class Log(models.Model): """ Model that describe a Log object, it contains information about daily executions. """ execution_time = models.FloatField() users = models.IntegerField() lessons = models.IntegerField() date = models.DateField(auto_now_add=True) def __str__(self): return f"{self.date}" @property def average_user_execution_time(self): return self.execution_time / self.users if self.users > 0 else 0 @property def average_lesson_execution_time(self): """ This property returns a useful data about the average execution time of a lesson. Returns: float: average time resulted """ return self.execution_time / self.lessons if self.lessons > 0 else 0 class Feedback(models.Model): """ Model that describe a user feedback of the daily reservations. """ user = models.ForeignKey(get_user_model(), on_delete=models.SET_NULL, related_name='feedbacks', null=True) ok = models.BooleanField() date = models.DateField(auto_now_add=True) def __str__(self): return f'{self.user.username} {self.ok}' class Stats(models.Model): unsubscribed_users = models.IntegerField(default=0)
27.75
110
0.680931
170
1,332
5.164706
0.405882
0.088838
0.077449
0.045558
0.250569
0.189066
0.123007
0.123007
0.123007
0.123007
0
0.004854
0.226727
1,332
47
111
28.340426
0.847573
0.201952
0
0.26087
0
0
0.050968
0
0
0
0
0
0
1
0.173913
false
0
0.086957
0.130435
0.913043
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
0
0
0
1
d877c087af72ca345542775e075546a6694a0d12
1,422
py
Python
examples/s2_extensions.py
ChubV/oop-di
3de449e3c209529bd2554187aa9857bf729841b8
[ "MIT" ]
null
null
null
examples/s2_extensions.py
ChubV/oop-di
3de449e3c209529bd2554187aa9857bf729841b8
[ "MIT" ]
null
null
null
examples/s2_extensions.py
ChubV/oop-di
3de449e3c209529bd2554187aa9857bf729841b8
[ "MIT" ]
null
null
null
from abc import ABC, abstractmethod from oop_di import ContainerDefinition, Extension # #############Mailer bounded context############### class MailerInterface(ABC): @abstractmethod def send_mail(self): ... class Mailer(MailerInterface): def __init__(self, from_email): self.from_email = from_email def send_mail(self): print(f"Sending from {self.from_email}...") print("Sent") class MailExtension(Extension): def define(self): self.add_param("from_email", "test@example.com") self.add_named_service(MailerInterface, Mailer) # ############Product bounded context########### class ProductService: def __init__(self, mailer: MailerInterface): self.mailer = mailer def process_product(self): print("processing product") self.mailer.send_mail() class ProductExtension(Extension): def define(self): self.add_service(ProductService) # #################Application container_definition = ContainerDefinition() container_definition.add_extension(ProductExtension()) container_definition.add_extension(MailExtension()) container = container_definition.compile() @container.inject() def process_product_endpoint(something, *, product_service: ProductService): print(something) product_service.process_product() process_product_endpoint("doing something before calling product service")
22.571429
76
0.699015
145
1,422
6.613793
0.324138
0.046924
0.040667
0.031283
0.06048
0.06048
0
0
0
0
0
0
0.164557
1,422
62
77
22.935484
0.807239
0.040788
0
0.117647
0
0
0.098297
0
0
0
0
0
0
1
0.235294
false
0
0.058824
0
0.441176
0.117647
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
d881534818a726ed66374fd1b791859515909369
2,122
py
Python
cmsplugin_svg/migrations/0001_initial.py
parthenon/cmsplugin-svg
bb89705002cd3557f074f3f063a3ec251ca0a605
[ "BSD-3-Clause" ]
null
null
null
cmsplugin_svg/migrations/0001_initial.py
parthenon/cmsplugin-svg
bb89705002cd3557f074f3f063a3ec251ca0a605
[ "BSD-3-Clause" ]
null
null
null
cmsplugin_svg/migrations/0001_initial.py
parthenon/cmsplugin-svg
bb89705002cd3557f074f3f063a3ec251ca0a605
[ "BSD-3-Clause" ]
null
null
null
# Generated by Django 3.1.13 on 2021-08-03 22:33 from django.db import migrations, models import django.db.models.deletion import filer.fields.file class Migration(migrations.Migration): initial = True dependencies = [ ('filer', '0012_file_mime_type'), ('cms', '0022_auto_20180620_1551'), ] operations = [ migrations.CreateModel( name='SvgImage', fields=[ ('cmsplugin_ptr', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='+', serialize=False, to='cms.cmsplugin')), ('label', models.CharField(blank=True, help_text='Optional label for this plugin.', max_length=128, verbose_name='label')), ('id_name', models.CharField(blank=True, max_length=50, verbose_name='id name')), ('tag_type', models.CharField(blank=True, choices=[('', ''), ('figure', 'figure')], max_length=50, null=True, verbose_name='tag Type')), ('additional_class_names', models.TextField(blank=True, help_text='Comma separated list of additional classes to apply to tag_type', verbose_name='additional classes')), ('alignment', models.CharField(blank=True, choices=[('left', 'left'), ('right', 'right'), ('center', 'center')], max_length=10, null=True, verbose_name='image alignment')), ('width', models.PositiveIntegerField(blank=True, null=True, verbose_name='width')), ('height', models.PositiveIntegerField(blank=True, null=True, verbose_name='height')), ('caption_text', models.CharField(blank=True, max_length=255, null=True, verbose_name='caption text')), ('alt_text', models.CharField(blank=True, max_length=255, null=True, verbose_name='alt text')), ('svg_image', filer.fields.file.FilerFileField(null=True, on_delete=django.db.models.deletion.SET_NULL, to='filer.file', verbose_name='file')), ], options={ 'abstract': False, }, bases=('cms.cmsplugin',), ), ]
55.842105
192
0.633836
246
2,122
5.308943
0.394309
0.084227
0.091884
0.11026
0.29173
0.244257
0.173047
0.173047
0.090352
0.090352
0
0.030448
0.21065
2,122
38
193
55.842105
0.749254
0.021678
0
0
1
0
0.207711
0.021687
0
0
0
0
0
1
0
false
0
0.096774
0
0.225806
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
d881b477a22ed1f78da11df06776a0e9cf84193c
302
py
Python
5-gui.py
theseana/pesteh
1125dc1055e3b8466c3c539c4afc2149d663dd46
[ "MIT" ]
1
2022-01-16T00:33:57.000Z
2022-01-16T00:33:57.000Z
5-gui.py
theseana/pesteh
1125dc1055e3b8466c3c539c4afc2149d663dd46
[ "MIT" ]
null
null
null
5-gui.py
theseana/pesteh
1125dc1055e3b8466c3c539c4afc2149d663dd46
[ "MIT" ]
null
null
null
from tkinter import * root = Tk() root.config(bg='yellow') l1 = Label(root, text='Hello World!', bg='magenta') l1.pack(side=LEFT) b1 = Button(root, text='Click Me Please!', bg='cyan') b1.pack(side=LEFT) l2 = Label(root, text='Ta-Da!', bg='green') l2.pack(side=LEFT) root.mainloop()
18.875
54
0.629139
48
302
3.958333
0.583333
0.126316
0.189474
0
0
0
0
0
0
0
0
0.02381
0.165563
302
15
55
20.133333
0.730159
0
0
0
0
0
0.195122
0
0
0
0
0
0
1
0
false
0
0.1
0
0.1
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
d883b9402b2e431043b91f8f29e4ee4248eaa1ba
20,375
py
Python
venv/lib/python3.6/site-packages/ansible_collections/google/cloud/plugins/modules/gcp_redis_instance.py
usegalaxy-no/usegalaxy
75dad095769fe918eb39677f2c887e681a747f3a
[ "MIT" ]
7
2021-11-16T04:05:42.000Z
2022-02-19T21:14:29.000Z
venv/lib/python3.6/site-packages/ansible_collections/google/cloud/plugins/modules/gcp_redis_instance.py
usegalaxy-no/usegalaxy
75dad095769fe918eb39677f2c887e681a747f3a
[ "MIT" ]
12
2020-02-21T07:24:52.000Z
2020-04-14T09:54:32.000Z
venv/lib/python3.6/site-packages/ansible_collections/google/cloud/plugins/modules/gcp_redis_instance.py
usegalaxy-no/usegalaxy
75dad095769fe918eb39677f2c887e681a747f3a
[ "MIT" ]
1
2022-03-01T05:43:07.000Z
2022-03-01T05:43:07.000Z
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2017 Google # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # ---------------------------------------------------------------------------- # # *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** # # ---------------------------------------------------------------------------- # # This file is automatically generated by Magic Modules and manual # changes will be clobbered when the file is regenerated. # # Please read more about how to change this file at # https://www.github.com/GoogleCloudPlatform/magic-modules # # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function __metaclass__ = type ################################################################################ # Documentation ################################################################################ ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gcp_redis_instance description: - A Google Cloud Redis instance. short_description: Creates a GCP Instance author: Google Inc. (@googlecloudplatform) requirements: - python >= 2.6 - requests >= 2.18.4 - google-auth >= 1.3.0 options: state: description: - Whether the given object should exist in GCP choices: - present - absent default: present type: str alternative_location_id: description: - Only applicable to STANDARD_HA tier which protects the instance against zonal failures by provisioning it across two zones. - If provided, it must be a different zone from the one provided in [locationId]. required: false type: str auth_enabled: description: - Optional. Indicates whether OSS Redis AUTH is enabled for the instance. If set to "true" AUTH is enabled on the instance. - Default value is "false" meaning AUTH is disabled. required: false default: 'false' type: bool authorized_network: description: - The full name of the Google Compute Engine network to which the instance is connected. If left unspecified, the default network will be used. required: false type: str connect_mode: description: - The connection mode of the Redis instance. - 'Some valid choices include: "DIRECT_PEERING", "PRIVATE_SERVICE_ACCESS"' required: false default: DIRECT_PEERING type: str display_name: description: - An arbitrary and optional user-provided name for the instance. required: false type: str labels: description: - Resource labels to represent user provided metadata. required: false type: dict redis_configs: description: - Redis configuration parameters, according to U(http://redis.io/topics/config). - 'Please check Memorystore documentation for the list of supported parameters: U(https://cloud.google.com/memorystore/docs/redis/reference/rest/v1/projects.locations.instances#Instance.FIELDS.redis_configs) .' required: false type: dict location_id: description: - The zone where the instance will be provisioned. If not provided, the service will choose a zone for the instance. For STANDARD_HA tier, instances will be created across two zones for protection against zonal failures. If [alternativeLocationId] is also provided, it must be different from [locationId]. required: false type: str name: description: - The ID of the instance or a fully qualified identifier for the instance. required: true type: str memory_size_gb: description: - Redis memory size in GiB. required: true type: int redis_version: description: - 'The version of Redis software. If not provided, latest supported version will be used. Currently, the supported values are: - REDIS_5_0 for Redis 5.0 compatibility - REDIS_4_0 for Redis 4.0 compatibility - REDIS_3_2 for Redis 3.2 compatibility .' required: false type: str reserved_ip_range: description: - The CIDR range of internal addresses that are reserved for this instance. If not provided, the service will choose an unused /29 block, for example, 10.0.0.0/29 or 192.168.0.0/29. Ranges must be unique and non-overlapping with existing subnets in an authorized network. required: false type: str tier: description: - 'The service tier of the instance. Must be one of these values: - BASIC: standalone instance - STANDARD_HA: highly available primary/replica instances .' - 'Some valid choices include: "BASIC", "STANDARD_HA"' required: false default: BASIC type: str region: description: - The name of the Redis region of the instance. required: true type: str project: description: - The Google Cloud Platform project to use. type: str auth_kind: description: - The type of credential used. type: str required: true choices: - application - machineaccount - serviceaccount service_account_contents: description: - The contents of a Service Account JSON file, either in a dictionary or as a JSON string that represents it. type: jsonarg service_account_file: description: - The path of a Service Account JSON file if serviceaccount is selected as type. type: path service_account_email: description: - An optional service account email address if machineaccount is selected and the user does not wish to use the default email. type: str scopes: description: - Array of scopes to be used type: list elements: str env_type: description: - Specifies which Ansible environment you're running this module within. - This should not be set unless you know what you're doing. - This only alters the User Agent string for any API requests. type: str notes: - 'API Reference: U(https://cloud.google.com/memorystore/docs/redis/reference/rest/)' - 'Official Documentation: U(https://cloud.google.com/memorystore/docs/redis/)' - for authentication, you can set service_account_file using the C(gcp_service_account_file) env variable. - for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS) env variable. - For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL) env variable. - For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable. - For authentication, you can set scopes using the C(GCP_SCOPES) env variable. - Environment variables values will only be used if the playbook values are not set. - The I(service_account_email) and I(service_account_file) options are mutually exclusive. ''' EXAMPLES = ''' - name: create a network google.cloud.gcp_compute_network: name: network-instance project: "{{ gcp_project }}" auth_kind: "{{ gcp_cred_kind }}" service_account_file: "{{ gcp_cred_file }}" state: present register: network - name: create a instance google.cloud.gcp_redis_instance: name: instance37 tier: STANDARD_HA memory_size_gb: 1 region: us-central1 location_id: us-central1-a redis_version: REDIS_3_2 display_name: Ansible Test Instance reserved_ip_range: 192.168.0.0/29 labels: my_key: my_val other_key: other_val project: test_project auth_kind: serviceaccount service_account_file: "/tmp/auth.pem" state: present ''' RETURN = ''' alternativeLocationId: description: - Only applicable to STANDARD_HA tier which protects the instance against zonal failures by provisioning it across two zones. - If provided, it must be a different zone from the one provided in [locationId]. returned: success type: str authEnabled: description: - Optional. Indicates whether OSS Redis AUTH is enabled for the instance. If set to "true" AUTH is enabled on the instance. - Default value is "false" meaning AUTH is disabled. returned: success type: bool authorizedNetwork: description: - The full name of the Google Compute Engine network to which the instance is connected. If left unspecified, the default network will be used. returned: success type: str connectMode: description: - The connection mode of the Redis instance. returned: success type: str createTime: description: - The time the instance was created in RFC3339 UTC "Zulu" format, accurate to nanoseconds. returned: success type: str currentLocationId: description: - The current zone where the Redis endpoint is placed. - For Basic Tier instances, this will always be the same as the [locationId] provided by the user at creation time. For Standard Tier instances, this can be either [locationId] or [alternativeLocationId] and can change after a failover event. returned: success type: str displayName: description: - An arbitrary and optional user-provided name for the instance. returned: success type: str host: description: - Hostname or IP address of the exposed Redis endpoint used by clients to connect to the service. returned: success type: str labels: description: - Resource labels to represent user provided metadata. returned: success type: dict redisConfigs: description: - Redis configuration parameters, according to U(http://redis.io/topics/config). - 'Please check Memorystore documentation for the list of supported parameters: U(https://cloud.google.com/memorystore/docs/redis/reference/rest/v1/projects.locations.instances#Instance.FIELDS.redis_configs) .' returned: success type: dict locationId: description: - The zone where the instance will be provisioned. If not provided, the service will choose a zone for the instance. For STANDARD_HA tier, instances will be created across two zones for protection against zonal failures. If [alternativeLocationId] is also provided, it must be different from [locationId]. returned: success type: str name: description: - The ID of the instance or a fully qualified identifier for the instance. returned: success type: str memorySizeGb: description: - Redis memory size in GiB. returned: success type: int port: description: - The port number of the exposed Redis endpoint. returned: success type: int persistenceIamIdentity: description: - Output only. Cloud IAM identity used by import / export operations to transfer data to/from Cloud Storage. Format is "serviceAccount:". - The value may change over time for a given instance so should be checked before each import/export operation. returned: success type: str redisVersion: description: - 'The version of Redis software. If not provided, latest supported version will be used. Currently, the supported values are: - REDIS_5_0 for Redis 5.0 compatibility - REDIS_4_0 for Redis 4.0 compatibility - REDIS_3_2 for Redis 3.2 compatibility .' returned: success type: str reservedIpRange: description: - The CIDR range of internal addresses that are reserved for this instance. If not provided, the service will choose an unused /29 block, for example, 10.0.0.0/29 or 192.168.0.0/29. Ranges must be unique and non-overlapping with existing subnets in an authorized network. returned: success type: str tier: description: - 'The service tier of the instance. Must be one of these values: - BASIC: standalone instance - STANDARD_HA: highly available primary/replica instances .' returned: success type: str region: description: - The name of the Redis region of the instance. returned: success type: str ''' ################################################################################ # Imports ################################################################################ from ansible_collections.google.cloud.plugins.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict import json import time ################################################################################ # Main ################################################################################ def main(): """Main function""" module = GcpModule( argument_spec=dict( state=dict(default='present', choices=['present', 'absent'], type='str'), alternative_location_id=dict(type='str'), auth_enabled=dict(type='bool'), authorized_network=dict(type='str'), connect_mode=dict(default='DIRECT_PEERING', type='str'), display_name=dict(type='str'), labels=dict(type='dict'), redis_configs=dict(type='dict'), location_id=dict(type='str'), name=dict(required=True, type='str'), memory_size_gb=dict(required=True, type='int'), redis_version=dict(type='str'), reserved_ip_range=dict(type='str'), tier=dict(default='BASIC', type='str'), region=dict(required=True, type='str'), ) ) if not module.params['scopes']: module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform'] state = module.params['state'] fetch = fetch_resource(module, self_link(module)) changed = False if fetch: if state == 'present': if is_different(module, fetch): update(module, self_link(module), fetch) fetch = fetch_resource(module, self_link(module)) changed = True else: delete(module, self_link(module)) fetch = {} changed = True else: if state == 'present': fetch = create(module, create_link(module)) changed = True else: fetch = {} fetch.update({'changed': changed}) module.exit_json(**fetch) def create(module, link): auth = GcpSession(module, 'redis') return wait_for_operation(module, auth.post(link, resource_to_request(module))) def update(module, link, fetch): auth = GcpSession(module, 'redis') params = {'updateMask': updateMask(resource_to_request(module), response_to_hash(module, fetch))} request = resource_to_request(module) del request['name'] return wait_for_operation(module, auth.patch(link, request, params=params)) def updateMask(request, response): update_mask = [] if request.get('authEnabled') != response.get('authEnabled'): update_mask.append('authEnabled') if request.get('displayName') != response.get('displayName'): update_mask.append('displayName') if request.get('labels') != response.get('labels'): update_mask.append('labels') if request.get('redisConfigs') != response.get('redisConfigs'): update_mask.append('redisConfigs') if request.get('memorySizeGb') != response.get('memorySizeGb'): update_mask.append('memorySizeGb') return ','.join(update_mask) def delete(module, link): auth = GcpSession(module, 'redis') return wait_for_operation(module, auth.delete(link)) def resource_to_request(module): request = { u'alternativeLocationId': module.params.get('alternative_location_id'), u'authEnabled': module.params.get('auth_enabled'), u'authorizedNetwork': module.params.get('authorized_network'), u'connectMode': module.params.get('connect_mode'), u'displayName': module.params.get('display_name'), u'labels': module.params.get('labels'), u'redisConfigs': module.params.get('redis_configs'), u'locationId': module.params.get('location_id'), u'name': module.params.get('name'), u'memorySizeGb': module.params.get('memory_size_gb'), u'redisVersion': module.params.get('redis_version'), u'reservedIpRange': module.params.get('reserved_ip_range'), u'tier': module.params.get('tier'), } return_vals = {} for k, v in request.items(): if v or v is False: return_vals[k] = v return return_vals def fetch_resource(module, link, allow_not_found=True): auth = GcpSession(module, 'redis') return return_if_object(module, auth.get(link), allow_not_found) def self_link(module): return "https://redis.googleapis.com/v1/projects/{project}/locations/{region}/instances/{name}".format(**module.params) def collection(module): return "https://redis.googleapis.com/v1/projects/{project}/locations/{region}/instances".format(**module.params) def create_link(module): return "https://redis.googleapis.com/v1/projects/{project}/locations/{region}/instances?instanceId={name}".format(**module.params) def return_if_object(module, response, allow_not_found=False): # If not found, return nothing. if allow_not_found and response.status_code == 404: return None # If no content, return nothing. if response.status_code == 204: return None try: module.raise_for_status(response) result = response.json() except getattr(json.decoder, 'JSONDecodeError', ValueError): module.fail_json(msg="Invalid JSON response with error: %s" % response.text) if navigate_hash(result, ['error', 'errors']): module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) return result def is_different(module, response): request = resource_to_request(module) response = response_to_hash(module, response) # Remove all output-only from response. response_vals = {} for k, v in response.items(): if k in request: response_vals[k] = v request_vals = {} for k, v in request.items(): if k in response: request_vals[k] = v return GcpRequest(request_vals) != GcpRequest(response_vals) # Remove unnecessary properties from the response. # This is for doing comparisons with Ansible's current parameters. def response_to_hash(module, response): return { u'alternativeLocationId': module.params.get('alternative_location_id'), u'authEnabled': response.get(u'authEnabled'), u'authorizedNetwork': module.params.get('authorized_network'), u'connectMode': module.params.get('connect_mode'), u'createTime': response.get(u'createTime'), u'currentLocationId': response.get(u'currentLocationId'), u'displayName': response.get(u'displayName'), u'host': response.get(u'host'), u'labels': response.get(u'labels'), u'redisConfigs': response.get(u'redisConfigs'), u'locationId': module.params.get('location_id'), u'name': module.params.get('name'), u'memorySizeGb': response.get(u'memorySizeGb'), u'port': response.get(u'port'), u'persistenceIamIdentity': response.get(u'persistenceIamIdentity'), u'redisVersion': module.params.get('redis_version'), u'reservedIpRange': module.params.get('reserved_ip_range'), u'tier': module.params.get('tier'), } def async_op_url(module, extra_data=None): if extra_data is None: extra_data = {} url = "https://redis.googleapis.com/v1/{op_id}" combined = extra_data.copy() combined.update(module.params) return url.format(**combined) def wait_for_operation(module, response): op_result = return_if_object(module, response) if op_result is None: return {} status = navigate_hash(op_result, ['done']) wait_done = wait_for_completion(status, op_result, module) raise_if_errors(wait_done, ['error'], module) return navigate_hash(wait_done, ['response']) def wait_for_completion(status, op_result, module): op_id = navigate_hash(op_result, ['name']) op_uri = async_op_url(module, {'op_id': op_id}) while not status: raise_if_errors(op_result, ['error'], module) time.sleep(1.0) op_result = fetch_resource(module, op_uri, False) status = navigate_hash(op_result, ['done']) return op_result def raise_if_errors(response, err_path, module): errors = navigate_hash(response, err_path) if errors is not None: module.fail_json(msg=errors) if __name__ == '__main__': main()
34.533898
147
0.672442
2,552
20,375
5.256661
0.174373
0.020872
0.023481
0.022959
0.495043
0.434141
0.405442
0.377861
0.357585
0.349981
0
0.007179
0.200098
20,375
589
148
34.59253
0.815928
0.046822
0
0.455466
0
0.020243
0.646854
0.035431
0
0
0
0
0
1
0.034413
false
0
0.012146
0.008097
0.082996
0.002024
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
d884d18fcb8bab9fdae1894792a48243cc8a96d8
690
py
Python
tests/test_blogs.py
MichelAtieno/Personal-Blog
16657391b968e644b99fa0dde5d5a443881698da
[ "Unlicense" ]
null
null
null
tests/test_blogs.py
MichelAtieno/Personal-Blog
16657391b968e644b99fa0dde5d5a443881698da
[ "Unlicense" ]
null
null
null
tests/test_blogs.py
MichelAtieno/Personal-Blog
16657391b968e644b99fa0dde5d5a443881698da
[ "Unlicense" ]
null
null
null
import unittest from app.models import BlogPost from app import db class CommentTest(unittest.TestCase): def setUp(self): self.new_blog = BlogPost(title='New Blog',blog_post='This is the content') def tearDown(self): db.session.delete(self.new_blog) db.session.commit() def test_instance(self): self.assertTrue(isinstance(self.new_blog,BlogPost)) def test_check_instance_variables(self): self.assertEquals(self.new_blog.title,'New Blog') self.assertEquals(self.new_blog.blog_post,'This is the content') def test_save_blog(self): self.new_blog.save_blog() self.assertTrue(len(BlogPost.query.all())>0)
28.75
82
0.7
96
690
4.875
0.385417
0.119658
0.141026
0.064103
0.245727
0.145299
0.145299
0.145299
0.145299
0
0
0.001786
0.188406
690
24
83
28.75
0.833929
0
0
0
0
0
0.078148
0
0
0
0
0
0.235294
1
0.294118
false
0
0.176471
0
0.529412
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
d885bcf7c2809e705cb09676abf33bf651c041a9
603
py
Python
numero_parole.py
mecroby/test_pi_learning
5e32b768968b523445578f8dc33dd720930c72e7
[ "Unlicense" ]
null
null
null
numero_parole.py
mecroby/test_pi_learning
5e32b768968b523445578f8dc33dd720930c72e7
[ "Unlicense" ]
null
null
null
numero_parole.py
mecroby/test_pi_learning
5e32b768968b523445578f8dc33dd720930c72e7
[ "Unlicense" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Sun Oct 15 20:33:49 2017 @author: roby """ #dato un numero n, restituisce le prime n parole più usate import sys from collections import Counter try: num_words=int(sys.argv[1]) except: print "usage: nomefile.py numero_parole" sys.exit(1) counter= Counter(word.lower() for line in sys.stdin for word in line.strip().split() if word) for word, count in counter.most_common(num_words): sys.stdout.write(str(count)) sys.stdout.write("\t") sys.stdout.write(word) sys.stdout.write("\n")
22.333333
94
0.630182
91
603
4.131868
0.626374
0.095745
0.148936
0
0
0
0
0
0
0
0
0.032751
0.240464
603
26
95
23.192308
0.78821
0.129353
0
0
0
0
0.082192
0
0
0
0
0
0
0
null
null
0
0.153846
null
null
0.076923
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
d88a2d7db32e6f798da168a5ca1a0ee451bd035f
3,973
py
Python
nodes/swagger_server/models/job_simulator_opts.py
rdbox-intec/r2s2_for_rostest
42b70d3ba72cdae08e9fd5fcdce9ddaeca37297f
[ "MIT" ]
null
null
null
nodes/swagger_server/models/job_simulator_opts.py
rdbox-intec/r2s2_for_rostest
42b70d3ba72cdae08e9fd5fcdce9ddaeca37297f
[ "MIT" ]
null
null
null
nodes/swagger_server/models/job_simulator_opts.py
rdbox-intec/r2s2_for_rostest
42b70d3ba72cdae08e9fd5fcdce9ddaeca37297f
[ "MIT" ]
null
null
null
# coding: utf-8 from __future__ import absolute_import from datetime import date, datetime # noqa: F401 from typing import List, Dict # noqa: F401 from swagger_server.models.base_model_ import Model from swagger_server import util class JobSimulatorOpts(Model): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, display_no: int=None, screent_no: int=None, vnc_password: str=None, no_vnc_port: int=None): # noqa: E501 """JobSimulatorOpts - a model defined in Swagger :param display_no: The display_no of this JobSimulatorOpts. # noqa: E501 :type display_no: int :param screent_no: The screent_no of this JobSimulatorOpts. # noqa: E501 :type screent_no: int :param vnc_password: The vnc_password of this JobSimulatorOpts. # noqa: E501 :type vnc_password: str :param no_vnc_port: The no_vnc_port of this JobSimulatorOpts. # noqa: E501 :type no_vnc_port: int """ self.swagger_types = { 'display_no': int, 'screent_no': int, 'vnc_password': str, 'no_vnc_port': int } self.attribute_map = { 'display_no': 'display_no', 'screent_no': 'screent_no', 'vnc_password': 'vnc_password', 'no_vnc_port': 'no_vnc_port' } self._display_no = display_no self._screent_no = screent_no self._vnc_password = vnc_password self._no_vnc_port = no_vnc_port @classmethod def from_dict(cls, dikt) -> 'JobSimulatorOpts': """Returns the dict as a model :param dikt: A dict. :type: dict :return: The Job_simulator_opts of this JobSimulatorOpts. # noqa: E501 :rtype: JobSimulatorOpts """ return util.deserialize_model(dikt, cls) @property def display_no(self) -> int: """Gets the display_no of this JobSimulatorOpts. :return: The display_no of this JobSimulatorOpts. :rtype: int """ return self._display_no @display_no.setter def display_no(self, display_no: int): """Sets the display_no of this JobSimulatorOpts. :param display_no: The display_no of this JobSimulatorOpts. :type display_no: int """ self._display_no = display_no @property def screent_no(self) -> int: """Gets the screent_no of this JobSimulatorOpts. :return: The screent_no of this JobSimulatorOpts. :rtype: int """ return self._screent_no @screent_no.setter def screent_no(self, screent_no: int): """Sets the screent_no of this JobSimulatorOpts. :param screent_no: The screent_no of this JobSimulatorOpts. :type screent_no: int """ self._screent_no = screent_no @property def vnc_password(self) -> str: """Gets the vnc_password of this JobSimulatorOpts. :return: The vnc_password of this JobSimulatorOpts. :rtype: str """ return self._vnc_password @vnc_password.setter def vnc_password(self, vnc_password: str): """Sets the vnc_password of this JobSimulatorOpts. :param vnc_password: The vnc_password of this JobSimulatorOpts. :type vnc_password: str """ self._vnc_password = vnc_password @property def no_vnc_port(self) -> int: """Gets the no_vnc_port of this JobSimulatorOpts. :return: The no_vnc_port of this JobSimulatorOpts. :rtype: int """ return self._no_vnc_port @no_vnc_port.setter def no_vnc_port(self, no_vnc_port: int): """Sets the no_vnc_port of this JobSimulatorOpts. :param no_vnc_port: The no_vnc_port of this JobSimulatorOpts. :type no_vnc_port: int """ self._no_vnc_port = no_vnc_port
27.783217
128
0.63403
500
3,973
4.756
0.146
0.04836
0.083263
0.100925
0.571068
0.433978
0.286796
0.200168
0.164844
0.04037
0
0.008806
0.285427
3,973
142
129
27.978873
0.828813
0.435691
0
0.24
0
0
0.077957
0
0
0
0
0
0
1
0.2
false
0.18
0.1
0
0.42
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
d88b2dca04f46637500ddd52a9ae3a5f2b3f87ce
43,776
py
Python
code/CCU004-2-run-models-[4].py
BHFDSC/CCU004_02
81c645d9877addfb500374c53689f13be6b56726
[ "Apache-2.0" ]
null
null
null
code/CCU004-2-run-models-[4].py
BHFDSC/CCU004_02
81c645d9877addfb500374c53689f13be6b56726
[ "Apache-2.0" ]
null
null
null
code/CCU004-2-run-models-[4].py
BHFDSC/CCU004_02
81c645d9877addfb500374c53689f13be6b56726
[ "Apache-2.0" ]
2
2022-01-04T17:04:45.000Z
2022-02-02T10:17:56.000Z
# Databricks notebook source # MAGIC %md # MAGIC **Description** This notebook runs the model analysis pipeline for CCU004-2 # MAGIC # MAGIC **Project(s)** CCU004-2 - A nationwide deep learning pipeline to predict stroke and COVID-19 death in atrial fibrillation # MAGIC # MAGIC **Author(s)** Alex Handy # MAGIC # MAGIC **Reviewer(s)** Chris Tomlinson, Hiu Yan (Samantha) Ip # MAGIC # MAGIC **Date last updated** 24-01-2022 # COMMAND ---------- # MAGIC %run /Workspaces/dars_nic_391419_j3w9t_collab/CCU004/CCU004_2/CCU004-2-global-helper-functions # COMMAND ---------- #set upfront parameters #ALL SCENARIOS outcomes = ["stroke", "covid_death"] max_seq_lens = [100] sample_ratios = [1] runs = [1,2,3] input_run_date = "240122" output_run_date = "240122" scenarios = len(outcomes) * len(max_seq_lens) * len(sample_ratios) * len(runs) print(scenarios) SUB_GROUPS = ["female", "male", "lt_65", "gte_65", "white", "asian_or_asian_british", "black_or_black_british", "mixed", "other_ethnic_groups"] # COMMAND ---------- #helper functions and packages from datetime import datetime import math %matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd import sklearn from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import roc_curve, roc_auc_score, confusion_matrix, recall_score, precision_score, accuracy_score import time import torch import torch.nn as nn from torch.nn import TransformerEncoder, TransformerEncoderLayer import torch.nn.functional as F from torch.nn.utils.rnn import pad_sequence import torch.optim as optim from torch import Tensor from torch.utils.data import dataset from typing import Tuple import xgboost as xgb #to stop setting copy warning output - consider reviewing in refactoring pd.set_option('chained',None) ##MACHINE LEARNING METHODS def create_ml_features(x, feature_list, codelist, target_field, outcome): entry = {} for code in codelist: if code in x[target_field]: entry[code] = 1 else: entry[code] = 0 if outcome == "stroke": entry["age_at_af_diagnosis"] = x["age_at_af_diagnosis"] else: entry["age_at_cohort_start"] = x["age_at_cohort_start"] entry["female"] = x["female"] entry["white"] = x["white"] entry["asian_or_asian_british"] = x["asian_or_asian_british"] entry["black_or_black_british"] = x["black_or_black_british"] entry["mixed"] = x["mixed"] entry["other_ethnic_groups"] = x["other_ethnic_groups"] entry[outcome] = x[outcome] feature_list.append(entry) def calc_ml_metrics(prediction, target): try: tn, fp, fn, tp = confusion_matrix(target, prediction).ravel() accuracy = (tp + tn) / (tp + tn + fp + fn) auc = roc_auc_score(target, prediction) sensitivity = tp / (tp + fn) specificity = tn / (tn + fp) precision = tp / (tp + fp) except ValueError: print("Predicting all one class") accuracy = 0 auc = 0 sensitivity = 0 specificity = 0 precision = 0 return accuracy, auc, sensitivity, specificity, precision def evaluate_ml_models(models, x_train, x_val, y_train, y_val, cohort_test_sub_non_n, all_codes_non_n, num_static_features, outcome, summary_data, summary_data_sub, sub_groups): for i, model in enumerate(models): entry = {} md = model.fit(x_train, y_train) pred = md.predict(x_val) if isinstance(model,sklearn.linear_model._logistic.LogisticRegression): model_name = "Logistic Regression" elif isinstance(model,sklearn.ensemble._forest.RandomForestClassifier): model_name = "Random Forest" elif isinstance(model, xgb.XGBRegressor): model_name = "XG Boost" pred = [ 1 if p >= 0.5 else 0 for p in pred ] else: print("Incorrect model type") print("Validation sample results") accuracy, auc, sensitivity, specificity, precision = calc_ml_metrics(pred, y_val) print("Model:", model_name) print("Accuracy (val): ", accuracy) print("Auc (val): ", auc) print("Sensitivity (val):", sensitivity) print("Specificity (val):", specificity) print("Precision (val):", precision) print("Test sample results - whole group") pred_test = md.predict(cohort_test_sub_non_n.iloc[:, :(len(all_codes_non_n)+num_static_features)]) y_test = cohort_test_sub_non_n[outcome] if isinstance(model, xgb.XGBRegressor): pred_test = [ 1 if p >= 0.5 else 0 for p in pred_test ] accuracy_test, auc_test, sensitivity_test, specificity_test, precision_test = calc_ml_metrics(pred_test, y_test) entry["model"] = model_name entry["accuracy"] = accuracy_test entry["auc"] = auc_test entry["sensitivity"] = sensitivity_test entry["specificity"] = specificity_test entry["precision"] = precision_test print("Accuracy (test): ", accuracy_test) print("Auc (test): ", auc_test) print("Sensitivity (test):", sensitivity_test) print("Specificity (test):", specificity_test) print("Precision (test):", precision_test) summary_data.append(entry) print("Test sample results - sub groups") entry_sub = {} entry_sub["model"] = model_name if outcome == "stroke": age_col = "age_at_af_diagnosis" else: age_col = "age_at_cohort_start" for sub_group in sub_groups: if sub_group == "male": sub_group_test_df = cohort_test_sub_non_n[cohort_test_sub_non_n["female"] == 0] elif sub_group == "gte_65": sub_group_test_df = cohort_test_sub_non_n[cohort_test_sub_non_n[age_col] >=65] elif sub_group == "lt_65": sub_group_test_df = cohort_test_sub_non_n[cohort_test_sub_non_n[age_col] <65] else: sub_group_test_df = cohort_test_sub_non_n[cohort_test_sub_non_n[sub_group] == 1] pred_test_sub = md.predict(sub_group_test_df.iloc[:, :(len(all_codes_non_n)+num_static_features)]) y_test_sub = sub_group_test_df[outcome] if isinstance(model, xgb.XGBRegressor): pred_test_sub = [ 1 if p >= 0.5 else 0 for p in pred_test_sub ] accuracy_test_sub, auc_test_sub, sensitivity_test_sub, specificity_test_sub, precision_test_sub = calc_ml_metrics(pred_test_sub, y_test_sub) entry_sub[str("accuracy" + "_" + sub_group)] = accuracy_test_sub entry_sub[str("auc" + "_" + sub_group)] = auc_test_sub entry_sub[str("sensitivity" + "_" + sub_group)] = sensitivity_test_sub entry_sub[str("specificity" + "_" + sub_group)] = specificity_test_sub entry_sub[str("precision" + "_" + sub_group)] = precision_test_sub summary_data_sub.append(entry_sub) ##DEEP LEARNING METHODS #data preparation def lookup_embeddings(x, code_to_ix, target_field): med_hist_entry = [] for code in x[target_field]: emb = code_to_ix[code] med_hist_entry.append(emb) return med_hist_entry def add_label(x, target_field): if x[target_field] == 1: return target_field else: return "No " + target_field def create_dl_features(cohort_train_sub, cohort_test_sub, code_to_ix_nn, TARGET_FIELD, STATIC_FEATURES, outcome, run): #NOTE: ASSUMES SET PADDING IDX TO ZERO IN EMBEDDING LAYER seq_features_nn = cohort_train_sub.apply(lookup_embeddings, args=(code_to_ix_nn, TARGET_FIELD), axis=1) seq_features_nn_tn = [ torch.tensor(seq) for seq in seq_features_nn ] seq_features_nn_tn_pd = pad_sequence(seq_features_nn_tn, batch_first=True) #add static features static_df = cohort_train_sub[STATIC_FEATURES] static_features_nn = torch.tensor([ row for row in static_df.values ]) #generate outcome labels for training data OUTCOME_CAT = outcome + "_cat" cohort_train_sub[OUTCOME_CAT] = cohort_train_sub.apply(add_label, args=(outcome,), axis=1) all_categories = list(cohort_train_sub[OUTCOME_CAT].unique()) all_categories.sort() #CHECK SORT SO LABELLING MAKES SENSE FOR OUTCOMES e.g. 0=No stroke, 1=stroke print("DL categories: ", all_categories) n_categories = len(all_categories) labels_nn = cohort_train_sub[OUTCOME_CAT].apply(lambda x: all_categories.index(x)) labels_nn_tn = torch.tensor(labels_nn) #create train, validation and test dataset x_seq_train, x_seq_val, x_static_train, x_static_val, y_train, y_val = train_test_split(seq_features_nn_tn_pd, static_features_nn, labels_nn_tn,test_size=0.2, random_state=run) #test sample seq_features_test_nn = cohort_test_sub.apply(lookup_embeddings, args=(code_to_ix_nn, TARGET_FIELD), axis=1) seq_features_test_nn_tn = [ torch.tensor(seq) for seq in seq_features_test_nn ] seq_features_test_nn_tn_pd = pad_sequence(seq_features_test_nn_tn, batch_first=True) x_seq_test = seq_features_test_nn_tn_pd static_df_test = cohort_test_sub[STATIC_FEATURES] static_features_test_nn = torch.tensor([ row for row in static_df_test.values ]) x_static_test = static_features_test_nn cohort_test_sub[OUTCOME_CAT] = cohort_test_sub.apply(add_label, args=(outcome,), axis=1) labels_test_nn = cohort_test_sub[OUTCOME_CAT].apply(lambda x: all_categories.index(x)) labels_test_nn_tn = torch.tensor(labels_test_nn) y_test = labels_test_nn_tn print("x seq train", x_seq_train.size()) print("x static train", x_static_train.size()) print("y train", y_train.size()) print("x seq val", x_seq_val.size()) print("x static val", x_static_val.size()) print("y val", y_val.size()) print("x seq test", x_seq_test.size()) print("x static test", x_static_test.size()) print("y test", y_test.size()) return x_seq_train, x_seq_val, x_static_train, x_static_val, y_train, y_val, x_seq_test, x_static_test, y_test, all_categories, n_categories, OUTCOME_CAT def create_dl_sub_sample(cohort_test_sub, sub_group, code_to_ix_nn, TARGET_FIELD, STATIC_FEATURES, outcome): if outcome == "stroke": age_col = "age_at_af_diagnosis" else: age_col = "age_at_cohort_start" if sub_group == "male": cohort_test_sub_sample_df = cohort_test_sub[cohort_test_sub["female"] == 0] elif sub_group == "gte_65": cohort_test_sub_sample_df = cohort_test_sub[cohort_test_sub[age_col] >=65] elif sub_group == "lt_65": cohort_test_sub_sample_df = cohort_test_sub[cohort_test_sub[age_col] <65] else: cohort_test_sub_sample_df = cohort_test_sub[cohort_test_sub[sub_group] == 1] seq_features_test_nn_sub = cohort_test_sub_sample_df.apply(lookup_embeddings, args=(code_to_ix_nn, TARGET_FIELD), axis=1) seq_features_test_nn_tn_sub = [ torch.tensor(seq) for seq in seq_features_test_nn_sub ] seq_features_test_nn_tn_pd_sub = pad_sequence(seq_features_test_nn_tn_sub, batch_first=True) x_seq_test_sub = seq_features_test_nn_tn_pd_sub static_df_test_sub = cohort_test_sub_sample_df[STATIC_FEATURES] static_features_test_nn_sub = torch.tensor([ row for row in static_df_test_sub.values ]) x_static_test_sub = static_features_test_nn_sub OUTCOME_CAT = outcome + "_cat" cohort_test_sub_sample_df[OUTCOME_CAT] = cohort_test_sub_sample_df.apply(add_label, args=(outcome,), axis=1) labels_test_nn_sub = cohort_test_sub_sample_df[OUTCOME_CAT].apply(lambda x: all_categories.index(x)) labels_test_nn_tn_sub = torch.tensor(labels_test_nn_sub.values) y_test_sub = labels_test_nn_tn_sub return x_seq_test_sub, x_static_test_sub, y_test_sub def create_dl_batches(batch_size, val_batch_size, x_seq_train, x_static_train, y_train, x_seq_val, x_static_val, y_val): train_data_nn = torch.utils.data.TensorDataset( x_seq_train, x_static_train, y_train) val_data_nn = torch.utils.data.TensorDataset( x_seq_val, x_static_val, y_val) train_loader_nn = torch.utils.data.DataLoader( train_data_nn, shuffle=True, batch_size=batch_size, drop_last=True) val_loader_nn = torch.utils.data.DataLoader( val_data_nn, shuffle=False, batch_size=val_batch_size, drop_last=True) return train_loader_nn, val_loader_nn #training and evaluation def categoryFromOutput(output, all_categories): top_n, top_i = output.topk(1) category_i = top_i[0].item() return all_categories[category_i] def get_pred_label(output): top_n, top_i = output.topk(1) category_i = top_i[0].item() return category_i def calc_dl_metrics(prediction, target, all_categories): predictions = [] targets = [] for i, sample in enumerate(prediction): pred_label = categoryFromOutput(sample, all_categories) pred = get_pred_label(sample) target_label = target[i].item() predictions.append(get_pred_label(sample)) targets.append(target[i].item()) try: tn, fp, fn, tp = confusion_matrix(targets, predictions).ravel() accuracy = accuracy_score(targets, predictions) auc = roc_auc_score(targets, predictions) sensitivity = recall_score(targets, predictions) specificity = tn / (tn + fp) precision = precision_score(targets, predictions, zero_division=0) except ValueError: print("Predicting all one class") accuracy = 0 auc = 0 sensitivity = 0 specificity = 0 precision = 0 return accuracy, auc, sensitivity, specificity, precision def run_dl_training_and_evaluation(net, opt, criterion, summary_data, max_seq_len, all_categories, epochs, train_loader_nn, val_batch_size, val_loader_nn, x_seq_test, x_static_test, y_test, cohort_test_sub, sub_groups, code_to_ix_nn, TARGET_FIELD, STATIC_FEATURES, outcome, summary_data_sub): losses_train = [] accs_train = [] aucs_train = [] sens_train = [] specs_train = [] precs_train = [] accs_val = [] aucs_val = [] sens_val = [] specs_val = [] precs_val = [] accs_test = [] aucs_test = [] sens_test = [] specs_test = [] precs_test = [] sub_group_res = [] net_start = time.time() print(net.model_name, " started ", datetime.fromtimestamp(net_start)) #loop through epochs for e in range(1,epochs+1): net.train() epoch_start = time.time() print("Epoch: ", str(e), " started ", datetime.fromtimestamp(epoch_start)) #setup evaluation metrics epoch_loss = 0 epoch_acc_train = 0 epoch_auc_train = 0 epoch_sen_train = 0 epoch_spec_train = 0 epoch_prec_train = 0 epoch_acc_val = 0 epoch_auc_val = 0 epoch_sen_val = 0 epoch_spec_val = 0 epoch_prec_val = 0 epoch_sub_group_res = {} if net.model_name == "LSTM": #initialize hidden layers h = net.init_hidden(batch_size) #training batches for batch_index, batch in enumerate(train_loader_nn): x_batch_seq = batch[0] x_batch_static = batch[1] y_batch = batch[2] if net.model_name == "LSTM": #generates hidden layer input for lstm h = tuple([l.data for l in h]) elif net.model_name == "Transformer": #generates [max_seq_len, max_seq_len] square for transformer src_mask = generate_square_subsequent_mask(max_seq_len) else: print("Error, model type not available") #zero the gradient opt.zero_grad() #predict the output if net.model_name == "LSTM": y_batch_pred = net(x_batch_seq, x_batch_static, h) elif net.model_name == "Transformer": y_batch_pred = net(x_batch_seq, x_batch_static, src_mask) else: print("Error, model type not available") #calculate the loss loss = criterion(y_batch_pred, y_batch) #calculate evaluation metrics accuracy_train, auc_train, sensitivity_train, specificity_train, precision_train = calc_dl_metrics(y_batch_pred, y_batch, all_categories) #compute the gradient loss.backward() #update the weights opt.step() epoch_loss += loss.item() epoch_acc_train += accuracy_train epoch_auc_train += auc_train epoch_sen_train += sensitivity_train epoch_spec_train += specificity_train epoch_prec_train += precision_train #validation and test net.eval() with torch.no_grad(): #validation if net.model_name == "LSTM": h_val = net.init_hidden(val_batch_size) for batch_val_index, batch_val in enumerate(val_loader_nn): x_batch_seq_val = batch_val[0] x_batch_static_val = batch_val[1] y_batch_val = batch_val[2] if net.model_name == "LSTM": #generates hidden layer input for lstm h_val = tuple([l_v.data for l_v in h_val]) y_batch_pred_val = net(x_batch_seq_val, x_batch_static_val, h_val) elif net.model_name == "Transformer": #generates [max_seq_len, max_seq_len] square for transformer src_mask_val = generate_square_subsequent_mask(max_seq_len) y_batch_pred_val = net(x_batch_seq_val, x_batch_static_val, src_mask_val) else: print("Error, model type not available") accuracy_val, auc_val, sensitivity_val, specificity_val, precision_val = calc_dl_metrics(y_batch_pred_val, y_batch_val, all_categories) epoch_acc_val += accuracy_val epoch_auc_val += auc_val epoch_sen_val += sensitivity_val epoch_spec_val += specificity_val epoch_prec_val += precision_val #test the output - whole group if net.model_name == "LSTM": #test batch prep lstm h_test = net.init_hidden(len(x_seq_test)) h_test = tuple([l_t.data for l_t in h_test]) y_pred_test = net(x_seq_test, x_static_test, h_test) elif net.model_name == "Transformer": #test batch prep transformer src_mask_test = generate_square_subsequent_mask(max_seq_len) y_pred_test = net(x_seq_test, x_static_test, src_mask_test) else: print("Error, model type not available") accuracy_test, auc_test, sensitivity_test, specificity_test, precision_test = calc_dl_metrics(y_pred_test, y_test, all_categories) #test the output - sub groups epoch_sub_group_res["model"] = net.model_name for sub_group in sub_groups: x_seq_test_sub, x_static_test_sub, y_test_sub = create_dl_sub_sample(cohort_test_sub, sub_group, code_to_ix_nn, TARGET_FIELD, STATIC_FEATURES, outcome) if net.model_name == "LSTM": #test batch prep lstm h_test_sub = net.init_hidden(len(x_seq_test_sub)) h_test_sub = tuple([l_t.data for l_t in h_test_sub]) y_pred_test_sub = net(x_seq_test_sub, x_static_test_sub, h_test_sub) elif net.model_name == "Transformer": #test batch prep transformer #NOTE: in sub groups, there is greater possibility that sample does not have an individual with max seq len (e.g. black british error) so mask with size of longest length mask_len = x_seq_test_sub.size()[1] src_mask_test_sub = generate_square_subsequent_mask(mask_len) y_pred_test_sub = net(x_seq_test_sub, x_static_test_sub, src_mask_test_sub) else: print("Error, model type not available") accuracy_test_sub, auc_test_sub, sensitivity_test_sub, specificity_test_sub, precision_test_sub = calc_dl_metrics(y_pred_test_sub, y_test_sub, all_categories) epoch_sub_group_res[str("accuracy" + "_" + sub_group)] = accuracy_test_sub epoch_sub_group_res[str("auc" + "_" + sub_group)] = auc_test_sub epoch_sub_group_res[str("sensitivity" + "_" + sub_group)] = sensitivity_test_sub epoch_sub_group_res[str("specificity" + "_" + sub_group)] = specificity_test_sub epoch_sub_group_res[str("precision" + "_" + sub_group)] = precision_test_sub #accumulate metrics at epoch level (for charts and model reporting) reported_loss_train = epoch_loss / len(train_loader_nn) reported_acc_train = epoch_acc_train / len(train_loader_nn) reported_auc_train = epoch_auc_train / len(train_loader_nn) reported_sen_train = epoch_sen_train / len(train_loader_nn) reported_spec_train = epoch_spec_train / len(train_loader_nn) reported_prec_train = epoch_prec_train / len(train_loader_nn) reported_acc_val = epoch_acc_val / len(val_loader_nn) reported_auc_val = epoch_auc_val / len(val_loader_nn) reported_sen_val = epoch_sen_val / len(val_loader_nn) reported_spec_val = epoch_spec_val / len(val_loader_nn) reported_prec_val = epoch_prec_val / len(val_loader_nn) losses_train.append(epoch_loss) accs_train.append(reported_acc_train) aucs_train.append(reported_auc_train) sens_train.append(reported_sen_train) specs_train.append(reported_spec_train) precs_train.append(reported_prec_train) accs_val.append(reported_acc_val) aucs_val.append(reported_auc_val) sens_val.append(reported_sen_val) specs_val.append(reported_spec_val) precs_val.append(reported_prec_val) accs_test.append(accuracy_test) aucs_test.append(auc_test) sens_test.append(sensitivity_test) specs_test.append(specificity_test) precs_test.append(precision_test) sub_group_res.append(epoch_sub_group_res) epoch_end = time.time() print("Epoch: " + str(e) + " completed in %s seconds" % ( round(epoch_end - epoch_start,2) ) ) #present epoch outputs print("Epoch: " + str(e) + " | Training Loss: " + str(round(reported_loss_train, 3)) + " | Training Accuracy: " + str(round(reported_acc_train, 3)) + " | Training AUC: " + str(round(reported_auc_train, 3))) print("Epoch: " + str(e) + " | Training Sensitivity: " + str(round(reported_sen_train, 3)) + " | Training Specificity: " + str(round(reported_spec_train, 3)) + " | Training Precision: " + str(round(reported_prec_train, 3))) print("Epoch: " + str(e) + "| Validation Accuracy: " + str(round(reported_acc_val, 3)) + " | Validation AUC: " + str(round(reported_auc_val, 3))) print("Epoch: " + str(e) + " | Validation Sensitivity: " + str(round(reported_sen_val, 3)) + " | Validation Specificity: " + str(round(reported_spec_val, 3)) + " | Validation Precision: " + str(round(reported_prec_val, 3))) print("Epoch: " + str(e) + "| Test Accuracy: " + str(round(accuracy_test, 3)) + " | Test AUC: " + str(round(auc_test, 3))) print("Epoch: " + str(e) + " | Test Sensitivity: " + str(round(sensitivity_test, 3)) + " | Test Specificity: " + str(round(specificity_test, 3)) + " | Test Precision: " + str(round(precision_test, 3))) net_end = time.time() print("Training completed in %s minutes" % ( round(net_end - net_start,2) / 60) ) print("Get the summary results") max_auc_val = max(aucs_val) print("Max auc val", max_auc_val) max_auc_epoch_idx = aucs_val.index(max_auc_val) print("Max auc epoch", max_auc_epoch_idx) #load into summary data entry = {} entry["model"] = net.model_name entry["accuracy"] = accs_test[max_auc_epoch_idx] entry["auc"] = aucs_test[max_auc_epoch_idx] entry["sensitivity"] = sens_test[max_auc_epoch_idx] entry["specificity"] = specs_test[max_auc_epoch_idx] entry["precision"] = precs_test[max_auc_epoch_idx] summary_data.append(entry) summary_data_sub_entry = sub_group_res[max_auc_epoch_idx] summary_data_sub.append(summary_data_sub_entry) ##CHADSVASC def list_medcodes(codelist_column_df): codelist = [item.code for item in codelist_column_df.select('code').collect()] return codelist def load_chads_codelists(components): for comp in components: spark.sql(f"""CREATE OR REPLACE GLOBAL TEMP VIEW {comp}_codelist AS SELECT * FROM dars_nic_391419_j3w9t_collab.ccu020_20210816_2020_01_01_codelists WHERE codelist = '{comp}_chads' """) comp_table = 'global_temp.' + comp + '_codelist' comp_codelist = spark.table(comp_table) comp_codelist_py = list_medcodes(comp_codelist) component_codelists.append(comp_codelist_py) def create_features_chads(x, feature_list, codelists, outcome): entry = {} #populate component fields for idx, codelist in enumerate(codelists): if idx == 0: comp_name = "vascular_disease" elif idx == 1: comp_name = "congestive_heart_failure" elif idx == 2: comp_name = "diabetes" else: comp_name = "hypertension" #NOTE: this field is different than ML and DL models which use most recent 100 codes as did not want to artificially disadvantage chadsvasc that does not use high dimensional sequence data for code in x["med_hist_uniq"]: if code in codelist: entry[comp_name] = 1 break else: entry[comp_name] = 0 if outcome == "stroke": entry["age"] = x["age_at_af_diagnosis"] else: entry["age"] = x["age_at_cohort_start"] entry["female"] = x["female"] entry["white"] = x["white"] entry["asian_or_asian_british"] = x["asian_or_asian_british"] entry["black_or_black_british"] = x["black_or_black_british"] entry["mixed"] = x["mixed"] entry["other_ethnic_groups"] = x["other_ethnic_groups"] entry[outcome] = x[outcome] feature_list.append(entry) def create_chads_score(x): if x["age"] >=75: age = 2 elif (x["age"] >=65) & (x["age"] <75): age = 1 else: age = 0 score = (x["vascular_disease"] + x["congestive_heart_failure"] + x["diabetes"] + x["hypertension"] + age + x["female"]) return score def run_chads_evaluation(cohort_test_sub_chads, outcome, summary_data, summary_data_sub, sub_groups): #whole group metrics y = cohort_test_sub_chads[outcome].values pred = cohort_test_sub_chads["pred_chads2"].values accuracy, auc, sensitivity, specificity, precision = calc_ml_metrics(pred, y) print("Accuracy: ", accuracy) print("Auc : ", auc) print("Sensitivity:", sensitivity) print("Specificity:", specificity) print("Precision:", precision) #load into summary data entry = {} entry["model"] = "CHA2DS2-VASc >=2" entry["accuracy"] = accuracy entry["auc"] = auc entry["sensitivity"] = sensitivity entry["specificity"] = specificity entry["precision"] = precision summary_data.append(entry) #sub group metrics entry_sub = {} entry_sub["model"] = "CHA2DS2-VASc >=2" #NOTE: different age interface for chads as the age parameter is already adjusted for stroke vs covid death in chads features (opportunity for tidying) for sub_group in sub_groups: if sub_group == "male": sub_group_test_df = cohort_test_sub_chads[cohort_test_sub_chads["female"] == 0] elif sub_group == "gte_65": sub_group_test_df = cohort_test_sub_chads[cohort_test_sub_chads["age"] >=65] elif sub_group == "lt_65": sub_group_test_df = cohort_test_sub_chads[cohort_test_sub_chads["age"] <65] else: sub_group_test_df = cohort_test_sub_chads[cohort_test_sub_chads[sub_group] == 1] y_test_sub = sub_group_test_df[outcome].values pred_test_sub = sub_group_test_df["pred_chads2"].values accuracy_test_sub, auc_test_sub, sensitivity_test_sub, specificity_test_sub, precision_test_sub = calc_ml_metrics(pred_test_sub, y_test_sub) entry_sub[str("accuracy" + "_" + sub_group)] = accuracy_test_sub entry_sub[str("auc" + "_" + sub_group)] = auc_test_sub entry_sub[str("sensitivity" + "_" + sub_group)] = sensitivity_test_sub entry_sub[str("specificity" + "_" + sub_group)] = specificity_test_sub entry_sub[str("precision" + "_" + sub_group)] = precision_test_sub summary_data_sub.append(entry_sub) # COMMAND ---------- #LSTM model class class MyLSTM(nn.Module): def __init__(self, output_size, vocab_size, embedding_dim, hidden_dim, n_layers, static_features_n, fc1_dim, dropout=0.2): super(MyLSTM, self).__init__() self.model_name = "LSTM" self.embeddings = nn.Embedding(vocab_size, embedding_dim, padding_idx=0) self.output_size = output_size self.n_layers = n_layers self.hidden_dim = hidden_dim self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers, batch_first=True) self.fc_static = nn.Linear(static_features_n, hidden_dim) self.fc1 = nn.Linear((hidden_dim*2),fc1_dim) self.fc_out = nn.Linear(fc1_dim, output_size) self.softmax = nn.LogSoftmax(dim=1) self.dropout = nn.Dropout(dropout) def forward(self, seq_batch, static_batch, hidden): embeds = self.embeddings(seq_batch) lstm_out, (ht, ct) = self.lstm(embeds, hidden) lstm_ht = lstm_out[:,-1,:] lstm_ht_drop = self.dropout(lstm_ht) static = F.relu(self.fc_static(static_batch.float())) comb = torch.cat([lstm_ht_drop, static], dim=1) comb_drop = self.dropout(comb) fc1_out = F.relu(self.fc1(comb_drop)) out = self.fc_out(fc1_out) out = self.softmax(out) return out def init_hidden(self, batch_size): weight = next(self.parameters()).data #initializes hidden state and cell state hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_()) return hidden # COMMAND ---------- class MyTransformer(nn.Module): def __init__(self, output_size, vocab_size, embedding_dim, hidden_dim, n_head, n_layers, n_static_features, static_dim, combo_dim, fc_int_dim, dropout = 0.2): super().__init__() self.model_name = "Transformer" self.pos_encoder = PositionalEncoding(embedding_dim, dropout) encoder_layers = TransformerEncoderLayer(embedding_dim, n_head, hidden_dim, dropout, batch_first=True) self.transformer_encoder = TransformerEncoder(encoder_layers, n_layers) self.encoder = nn.Embedding(vocab_size, embedding_dim) self.fc_static = nn.Linear(n_static_features, static_dim) self.fc_int = nn.Linear(combo_dim, fc_int_dim) self.embedding_dim = embedding_dim self.decoder = nn.Linear(fc_int_dim, output_size) self.dropout = nn.Dropout(dropout) self.softmax = nn.LogSoftmax(dim=1) self.init_weights() def init_weights(self): initrange = 0.1 self.encoder.weight.data.uniform_(-initrange, initrange) self.decoder.bias.data.zero_() self.decoder.weight.data.uniform_(-initrange, initrange) def forward(self, src_seq, src_static, src_mask): src_1 = self.encoder(src_seq) * math.sqrt(self.embedding_dim) src_2 = self.pos_encoder(src_1) seq_output = self.transformer_encoder(src_2, src_mask) seq_output_drop = self.dropout(seq_output) seq_sum_output = seq_output_drop.sum(dim=1) # pool over the time dimension static_output_1 = F.relu(self.fc_static(src_static.float())) comb_output_1 = torch.cat([static_output_1, seq_sum_output], dim=1) comb_output_2 = F.relu(self.fc_int(comb_output_1)) comb_output_2_drop = self.dropout(comb_output_2) decoder_output = self.decoder(comb_output_2_drop) output = self.softmax(decoder_output) return output def generate_square_subsequent_mask(sz): return torch.triu(torch.ones(sz, sz) * float('-inf'), diagonal=1) class PositionalEncoding(nn.Module): def __init__(self, embedding_dim, dropout = 0.2, max_len = 5000): super().__init__() self.dropout = nn.Dropout(p=dropout) position = torch.arange(max_len).unsqueeze(1) div_term = torch.exp(torch.arange(0, embedding_dim, 2) * (-math.log(10000.0) / embedding_dim)) pe = torch.zeros(max_len, 1, embedding_dim) pe[:, 0, 0::2] = torch.sin(position * div_term) pe[:, 0, 1::2] = torch.cos(position * div_term) self.register_buffer('pe', pe) def forward(self, x): #permute to change from seq_len first to batch size first pos_add = self.pe[:x.size(1)].permute(1,0,2) x = x + pos_add return self.dropout(x) # COMMAND ---------- #main script start = time.time() print("Script started ", datetime.fromtimestamp(start)) for outcome in outcomes: print("Outcome: ", outcome) #define static features if outcome == "stroke": STATIC_FEATURES = ["age_at_af_diagnosis", "female", "white", "asian_or_asian_british", "black_or_black_british", "mixed", "other_ethnic_groups"] else: STATIC_FEATURES = ["age_at_cohort_start", "female", "white", "asian_or_asian_british", "black_or_black_british", "mixed", "other_ethnic_groups"] NUM_STATIC_FEATURES = len(STATIC_FEATURES) print("Number of static features", NUM_STATIC_FEATURES) for run in runs: print("Run: ", run) #load the test table print("Load the test table for: ", outcome, " and run ", run) cohort_test_sub_py_export_table_name = "ccu004_2_cohort_" + outcome + "_seq_len_all_run_" + str(run) + "_test_sub_" + input_run_date cohort_test_sub_py = spark.table("dars_nic_391419_j3w9t_collab." + cohort_test_sub_py_export_table_name) cohort_test_sub = cohort_test_sub_py.toPandas() print("Test sub rows", len(cohort_test_sub)) for sample_ratio in sample_ratios: print("Sample ratio: ", sample_ratio) #load the train table print("Load the train table for: ", outcome, " and run ", run, "and sample ratio ", sample_ratio) cohort_train_sub_py_export_table_name = "ccu004_2_cohort_" + outcome + "_seq_len_all_sr_" + str(sample_ratio) + "_run_" + str(run) + "_train_sub_" + input_run_date cohort_train_sub_py = spark.table("dars_nic_391419_j3w9t_collab." + cohort_train_sub_py_export_table_name) cohort_train_sub = cohort_train_sub_py.toPandas() print("Train sub rows", len(cohort_train_sub)) for max_seq_len in max_seq_lens: print("Max seq len: ", max_seq_len) #setup summary data for each scenario print("Setup summary data for ", outcome, " and run ", run, " and sample ratio ", sample_ratio, "and max seq len ", max_seq_len) summary_data = [] summary_data_sub = [] #define max seq len field if max_seq_len == 60: TARGET_FIELD = "med_hist_target_60" else: TARGET_FIELD = "med_hist_target_100" print("Target field: ", TARGET_FIELD) #create universal vocab of medical codes training_codes_non_n = [item for sublist in cohort_train_sub[TARGET_FIELD].values for item in sublist] training_codelist_non_n = list(set(training_codes_non_n)) print("Codelist vocab length training non-neural", len(training_codelist_non_n)) test_codes_non_n = [item for sublist in cohort_test_sub[TARGET_FIELD].values for item in sublist] test_codelist_non_n = list(set(test_codes_non_n)) print("Codelist vocab length test non-neural", len(test_codelist_non_n)) all_codes_non_n = list(set(training_codelist_non_n + test_codelist_non_n)) print("Codelist vocab length non-neural", len(all_codes_non_n)) # use same approach to cover training and test sub samples all_codes_nn = all_codes_non_n print("Codelist vocab length neural nets", len(all_codes_nn)) code_to_ix_nn = {code: i+1 for i, code in enumerate(all_codes_nn)} #create features for ml models print("Create ml features") start_ml = time.time() print("ML started ", datetime.fromtimestamp(start_ml)) train_features_non_n = [] test_features_non_n = [] cohort_train_sub.apply(create_ml_features, args=(train_features_non_n,all_codes_non_n, TARGET_FIELD, outcome), axis=1) cohort_test_sub.apply(create_ml_features, args=(test_features_non_n,all_codes_non_n, TARGET_FIELD, outcome), axis=1) cohort_train_sub_non_n = pd.DataFrame(train_features_non_n) cohort_test_sub_non_n = pd.DataFrame(test_features_non_n) print("Check dimensions of ml features") print("Ml train features shape: ", cohort_train_sub_non_n.shape) print("Ml test features shape: ", cohort_test_sub_non_n.shape) #train, evaluate and report on ml models ml_models = [LogisticRegression(max_iter=3000, random_state=run), RandomForestClassifier(random_state=run),xgb.XGBRegressor(objective="binary:logistic", random_state=run)] x_train, x_val, y_train, y_val = train_test_split(cohort_train_sub_non_n.iloc[:, :(len(all_codes_non_n)+NUM_STATIC_FEATURES)], cohort_train_sub_non_n[outcome], test_size=0.20, random_state=run) evaluate_ml_models(ml_models, x_train, x_val, y_train, y_val, cohort_test_sub_non_n, all_codes_non_n, NUM_STATIC_FEATURES, outcome, summary_data, summary_data_sub, SUB_GROUPS) #NOTE: aim to free up memory here cohort_train_sub_non_n = None cohort_test_sub_non_n = None summary_data_df = pd.DataFrame(summary_data) print("Summary data after ML models for ", outcome, " and run ", run, " and sample ratio ", sample_ratio, " and max seq len ", max_seq_len, "\n", summary_data_df) summary_data_sub_df = pd.DataFrame(summary_data_sub) print("Summary data for sub groups after ML models for ", outcome, " and run ", run, " and sample ratio ", sample_ratio, " and max seq len ", max_seq_len, "\n", summary_data_sub_df) end_ml = time.time() print("ML completed in %s minutes" % ( round(end_ml - start_ml,2) / 60) ) #setup features for dl models print("Create dl features") start_dl = time.time() x_seq_train, x_seq_val, x_static_train, x_static_val, y_train, y_val, x_seq_test, x_static_test, y_test, all_categories, n_categories, OUTCOME_CAT = create_dl_features(cohort_train_sub, cohort_test_sub, code_to_ix_nn, TARGET_FIELD, STATIC_FEATURES, outcome, run) #train, evaluate and report on dl models batch_size = 64 val_batch_size = len(y_val) train_loader_nn, val_loader_nn = create_dl_batches(batch_size, val_batch_size, x_seq_train, x_static_train, y_train, x_seq_val, x_static_val, y_val) #setup dl model parameters output_size = n_categories vocab_size = len(all_codes_nn)+1 embedding_dim = 200 hidden_dim = 128 n_layers = 2 static_features_n = NUM_STATIC_FEATURES dropout = 0.2 fc1_dim = 64 # lstm n_head = 2 # number of heads in nn.MultiheadAttention transformer static_dim = 64 # dimension for the transformer static data feedforward layer combo_dim = (static_dim + embedding_dim) #dimension for feedforward layer after concatenation in transformer fc_int_dim = int((combo_dim / 2)) #dimension for feedforward layer prior to decoder in transformer #setup training parameters epochs = 10 learning_rate = 0.001 iterations = int((len(y_train) / batch_size) * epochs) print("Number of iterations: ", iterations) lstm = MyLSTM(output_size, vocab_size, embedding_dim, hidden_dim, n_layers, static_features_n, fc1_dim, dropout) transformer = MyTransformer(output_size, vocab_size, embedding_dim, hidden_dim, n_head, n_layers, static_features_n, static_dim, combo_dim, fc_int_dim, dropout) nets = [lstm, transformer] #nets = [transformer] for net in nets: print(net.model_name) opt = optim.Adam(net.parameters(), lr=learning_rate) criterion = nn.NLLLoss() run_dl_training_and_evaluation(net, opt, criterion, summary_data, max_seq_len, all_categories, epochs, train_loader_nn, val_batch_size, val_loader_nn, x_seq_test, x_static_test, y_test, cohort_test_sub, SUB_GROUPS, code_to_ix_nn, TARGET_FIELD, STATIC_FEATURES, outcome, summary_data_sub) summary_data_df = pd.DataFrame(summary_data) print("Summary data after DL models for ", outcome, " and run ", run, " and sample ratio ", sample_ratio, " and max seq len ", max_seq_len, "\n", summary_data_df) summary_data_sub_df = pd.DataFrame(summary_data_sub) print("Summary data for sub groups after DL models for ", outcome, " and run ", run, " and sample ratio ", sample_ratio, " and max seq len ", max_seq_len, "\n", summary_data_sub_df) end_dl = time.time() print("DL completed in %s minutes" % ( round(end_dl - start_dl,2) / 60) ) #setup features for chadsvasc print("Create chadsvasc baseline") chads_components = ["vascular_disease", "congestive_heart_failure", "diabetes", "hypertension"] component_codelists = [] load_chads_codelists(chads_components) train_features_chads = [] test_features_chads = [] cohort_train_sub.apply(create_features_chads, args=(train_features_chads,component_codelists, outcome), axis=1) cohort_test_sub.apply(create_features_chads, args=(test_features_chads,component_codelists, outcome), axis=1) cohort_train_sub_chads = pd.DataFrame(train_features_chads) cohort_test_sub_chads = pd.DataFrame(test_features_chads) cohort_train_sub_chads["chads_score"] = cohort_train_sub_chads.apply(create_chads_score, axis=1) cohort_test_sub_chads["chads_score"] = cohort_test_sub_chads.apply(create_chads_score, axis=1) cohort_train_sub_chads["pred_chads2"] = np.where(cohort_train_sub_chads["chads_score"] >=2, 1, 0) cohort_test_sub_chads["pred_chads2"] = np.where(cohort_test_sub_chads["chads_score"] >=2, 1, 0) #evaluate and report on chadsvasc run_chads_evaluation(cohort_test_sub_chads, outcome, summary_data, summary_data_sub, SUB_GROUPS) #save summary tables summary_data_df = pd.DataFrame(summary_data) print("Final summary data table for ", outcome, " and run ", run, " and sample ratio ", sample_ratio, " and max seq len ", max_seq_len, "\n", summary_data_df) summary_data_sub_df = pd.DataFrame(summary_data_sub) print("Final summary data table for sub groups for ", outcome, " and run ", run, " and sample ratio ", sample_ratio, " and max seq len ", max_seq_len, "\n", summary_data_sub_df) summary_data_py = spark.createDataFrame(summary_data_df) summary_data_py_table_name = "ccu004_2_cohort_" + outcome + "_seq_len_" + str(max_seq_len) + "_sr_" + str(sample_ratio) + "_run_" + str(run) + "_summary_data_" + output_run_date create_table_pyspark(summary_data_py, summary_data_py_table_name) summary_data_sub_py = spark.createDataFrame(summary_data_sub_df) summary_data_sub_py_table_name = "ccu004_2_cohort_" + outcome + "_seq_len_" + str(max_seq_len) + "_sr_" + str(sample_ratio) + "_run_" + str(run) + "_summary_data_sub_" + output_run_date create_table_pyspark(summary_data_sub_py, summary_data_sub_py_table_name) end = time.time() print("Script completed in %s minutes" % ( round(end - start,2) / 60) )
42.295652
297
0.689053
6,228
43,776
4.466121
0.087508
0.035484
0.032249
0.011001
0.516268
0.440302
0.3762
0.336257
0.297825
0.283696
0
0.010938
0.210595
43,776
1,034
298
42.336557
0.793964
0.075429
0
0.191667
0
0
0.112787
0.012458
0
0
0
0
0
0
null
null
0
0.029167
null
null
0.118056
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
d88dbbafb09f9e5620ebb0c0a2726af886fe10c5
25,165
py
Python
hanibal/crm_gestion_faces/report/cheques_gir_no_cob_reporte.py
Christian-Castro/castro_odoo8
8247fdb20aa39e043b6fa0c4d0af509462ab3e00
[ "Unlicense" ]
null
null
null
hanibal/crm_gestion_faces/report/cheques_gir_no_cob_reporte.py
Christian-Castro/castro_odoo8
8247fdb20aa39e043b6fa0c4d0af509462ab3e00
[ "Unlicense" ]
null
null
null
hanibal/crm_gestion_faces/report/cheques_gir_no_cob_reporte.py
Christian-Castro/castro_odoo8
8247fdb20aa39e043b6fa0c4d0af509462ab3e00
[ "Unlicense" ]
null
null
null
# -*- encoding: utf-8 -*- from openerp.report import report_sxw import openerp.pooler class conciliacion_bancaria_c(report_sxw.rml_parse): ESTADOS = { 'draft':'Borrador', 'proforma':'Pro-Forma', 'posted':'Contabilizado', 'cancel':'Cancelado', 'open':'Abierto', 'confirmed':'Confirmado' } ESTADO = { 'no':'Movimientos', 'estado_ch':'Custodios VL', 'estado_ch_otros':'Custodios Otros', 'estado_ch_rise':'Custodios RISE' } def __init__(self, cr, uid, name, context=None): if context is None: context = {} super(conciliacion_bancaria_c, self).__init__(cr, uid, name, context=context) self.localcontext.update({ 'cuentas' :self._cuentas, 'get_cont':self._get_cont, 'resultante' :self._resultante, 'resultante_todo' :self._resultante_todo, 'proyectos':self._proyectos, 'get_desde':self._get_desde, 'get_hasta':self._get_hasta, 'get_desdeu':self._get_desdeu, 'get_hastau':self._get_hastau, 'get_corte':self._get_corte, 'get_retorno':self._get_retorno, 'valores':self._valor, 'get_saldo':self._get_salini, 'reset_total_cheques':self._reset_total_cheques, 'suma_total':self._suma_total, 'convert':self._convert, 'convierto':self._convierto, 'get_cuenta': self._get_cuenta, 'get_banco': self._get_banco, 'deposito':self._deposito, 'conver': self._convertir_estado, 'tra':self._tra, 'total_ch':self._total_ch, 'total_che':self._total_che, 'reset_t_ch':self._reset_t_ch, 'total_cheques':self._total_cheques, 'total_reporte':self._total, 'saldo_final':self._saldo_final, }) lineas = {} def _cuentas(self, data ): banco= False if data.get('form', False) and data['form'].get('bank', False): banco = data['form']['bank'][0] cuenta = False if data.get('form', False) and data['form'].get('cuenta', False): cuenta = data['form']['cuenta'][0] estado = False if data.get('form', False) and data['form'].get('estado', False): estado = data['form']['estado'] tipodiario= False if data.get('form', False) and data['form'].get('tipo_diario', False): tipodiario = data['form']['tipo_diario'][0] confirmados= False if data.get('form', False) and data['form'].get('confirmados', False): confirmados = data['form']['confirmados'] desde = False if data.get('form', False) and data['form'].get('desde', False): desde = data['form']['desde'] hasta = False if data.get('form', False) and data['form'].get('hasta', False): hasta = data['form']['hasta'] desdeu = False if data.get('form', False) and data['form'].get('desdeu', False): desdeu = data['form']['desdeu'] hastau = False if data.get('form', False) and data['form'].get('hastau', False): hastau = data['form']['hastau'] parametros = [] param = [] if banco: parametros.append("b.id = %s") param.append(banco) if confirmados: if confirmados == 'si': parametros.append("v.ver_banco = true") else: if confirmados == 'no': parametros.append("v.ver_banco = false") else: parametros.append("v.ver_banco is not null ") if cuenta: parametros.append("c.id = %s") param.append(cuenta) if tipodiario: parametros.append("m.id = %s") param.append(tipodiario) if estado: parametros.append("v.state = %s") param.append(estado) if desde and hasta: parametros.append("v.date between %s and %s ") param.append(desde) param.append(hasta) elif desde: parametros.append(" v.date >= %s ") param.append(desde) elif hasta: parametros.append("v.date <= %s ") param.append(hasta) if desdeu and hastau: parametros.append(" v.fecha_cobro between %s and %s ") param.append(desdeu) param.append(hastau) elif desdeu: parametros.append(" v.fecha_cobro >= %s ") param.append(desdeu) elif hastau: parametros.append(" v.fecha_cobro <= %s ") param.append(hastau) principal = """ select distinct v.ver_banco as verificacion , count (*) as contador from account_voucher as v left join payment_mode as m on m.journal = v.journal_id left join account_journal as j on j.id = v.journal_id left join res_partner_bank as c on c.id = m.bank_id left join res_bank as b on b.id = c.bank left join res_partner as p on p.id = v.partner_id """ groupby = "\n group by v.ver_banco " where = "\n where " query='' if (not parametros) or ( len(parametros) == 0 ): query = principal + groupby else: i=0 for g in parametros: if i==0: where = where + g else: where = where +' and '+ g i=1 query = principal + where + groupby param = tuple(param) self.cr.execute(query,param) lineas = self.cr.dictfetchall() return lineas def _resultante(self, data, confirmado_id ): banco= False if data.get('form', False) and data['form'].get('bank', False): banco = data['form']['bank'][0] cuenta = False if data.get('form', False) and data['form'].get('cuenta', False): cuenta = data['form']['cuenta'][0] tipodiario = False if data.get('form', False) and data['form'].get('tipo_diario', False): tipodiario = data['form']['tipo_diario'][0] desde = False if data.get('form', False) and data['form'].get('desde', False): desde = data['form']['desde'] hasta = False if data.get('form', False) and data['form'].get('hasta', False): hasta = data['form']['hasta'] desdeu = False if data.get('form', False) and data['form'].get('desdeu', False): desdeu = data['form']['desdeu'] hastau = False if data.get('form', False) and data['form'].get('hastau', False): hastau = data['form']['hastau'] estado = False if data.get('form', False) and data['form'].get('estado', False): estado = data['form']['estado'] parametros = [] param = [] if desde and hasta: parametros.append(" date between %s and %s ") param.append(desde) param.append(hasta) elif desde: parametros.append(" date >= %s ") param.append(desde) elif hasta: parametros.append(" date <= %s ") param.append(hasta) #--------------------------------------------- if desdeu and hastau: parametros.append(" fec_giro between %s and %s ") param.append(desdeu) param.append(hastau) elif desdeu: parametros.append(" fec_giro >= %s ") param.append(desdeu) elif hastau: parametros.append(" fec_giro <= %s ") param.append(hastau) if estado: parametros.append(" state = %s ") param.append(estado) if banco: parametros.append("b_id = %s") param.append(banco) if cuenta: parametros.append("c_id = %s") param.append(cuenta) if tipodiario: parametros.append("m_id = %s") param.append(tipodiario) #orderby = "\n ORDER BY 10,5,11 " #principal = """ select distinct estado_ch from conciliacion_bancaria """ #where = "\n where ver_banco = " +str(confirmado_id) + ' ' #for g in parametros: # where = where +' and '+ g #query = principal + where + str('order by 1 desc') #self.cr.execute(query ,param) #lineas = self.cr.dictfetchall() print confirmado_id,'<-------------' principal = """ select * from conciliacion_bancaria where estado_ch = 't' """ where = "\n and ver_banco = " +str(confirmado_id) + ' ' for g in parametros: where = where +' and '+ g query = principal + where +' '+ str('order by 1 desc') self.cr.execute(query ,param) lineas = self.cr.dictfetchall() principal = """ select * from conciliacion_bancaria where estado_ch_rise = 't' """ where = "\n and ver_banco = " +str(confirmado_id) + ' ' for g in parametros: where = where +' and '+ g query = principal + where +' '+ str('order by 1 desc') self.cr.execute(query ,param) lineas_a = self.cr.dictfetchall() principal = """ select * from conciliacion_bancaria where estado_ch_otros = 't' """ where = "\n and ver_banco = " +str(confirmado_id) + ' ' for g in parametros: where = where +' and '+ g query = principal + where +' '+ str('order by 1 desc') self.cr.execute(query ,param) lineas_b = self.cr.dictfetchall() principal = """ select * from conciliacion_bancaria where estado_ch_rise = 'f' and estado_ch_otros = 'f' and estado_ch = 'f' """ where = "\n and ver_banco = " +str(confirmado_id) + ' ' for g in parametros: where = where +' and '+ g query = principal + where +' '+ str('order by 1 desc') self.cr.execute(query ,param) lineas_c = self.cr.dictfetchall() dct=[] if len(lineas) > 0 : r = {'custodio': 'estado_ch' } dct.append(r) if len(lineas_a) > 0 : s = {'custodio': 'estado_ch_rise' } dct.append(s) if len(lineas_b) > 0 : t = {'custodio': 'estado_ch_otros' } dct.append(t) if len(lineas_c) > 0 : u = {'custodio': 'no' } dct.append(u) return dct #---------------------------------------------------------------------------------- def _resultante_todo(self, data, confirmado_id,estados_cheques ): if estados_cheques == 'no': estados_cheques = "estado_ch = 'f' and estado_ch_rise = 'f' and estado_ch_otros = 'f' " if estados_cheques == 'estado_ch': estados_cheques = "estado_ch = 't' " if estados_cheques == 'estado_ch_otros': estados_cheques = "estado_ch_otros = 't' " if estados_cheques == 'estado_ch_rise': estados_cheques = "estado_ch_rise = 't' " banco= False if data.get('form', False) and data['form'].get('bank', False): banco = data['form']['bank'][0] cuenta = False if data.get('form', False) and data['form'].get('cuenta', False): cuenta = data['form']['cuenta'][0] tipodiario = False if data.get('form', False) and data['form'].get('tipo_diario', False): tipodiario = data['form']['tipo_diario'][0] desde = False if data.get('form', False) and data['form'].get('desde', False): desde = data['form']['desde'] hasta = False if data.get('form', False) and data['form'].get('hasta', False): hasta = data['form']['hasta'] desdeu = False if data.get('form', False) and data['form'].get('desdeu', False): desdeu = data['form']['desdeu'] hastau = False if data.get('form', False) and data['form'].get('hastau', False): hastau = data['form']['hastau'] estado = False if data.get('form', False) and data['form'].get('estado', False): estado = data['form']['estado'] parametros = [] param = [] if desde and hasta: parametros.append(" date between %s and %s ") param.append(desde) param.append(hasta) elif desde: parametros.append(" date >= %s ") param.append(desde) elif hasta: parametros.append(" date <= %s ") param.append(hasta) if desdeu and hastau: parametros.append(" fec_giro between %s and %s ") param.append(desdeu) param.append(hastau) elif desdeu: parametros.append(" fec_giro >= %s ") param.append(desdeu) elif hastau: parametros.append(" fec_giro <= %s ") param.append(hastau) if estado: parametros.append(" state = %s ") param.append(estado) if banco: parametros.append("b_id = %s") param.append(banco) if cuenta: parametros.append("c_id = %s") param.append(cuenta) if tipodiario: parametros.append("m_id = %s") param.append(tipodiario) orderby = "\n ORDER BY 10,5,11 " principal = """ select * from conciliacion_bancaria """ where = "\n where ver_banco = '"+str(confirmado_id)+"' and "+str(estados_cheques)+" " for g in parametros: where = where +' and '+ g query = principal + where + orderby self.cr.execute(query ,param) lineas = self.cr.dictfetchall() return lineas #---------------------------------------------------------------------------------- def _proyectos(self, cheque_id ): query = """ SELECT analytics_id as id from account_invoice_line where invoice_id in ( select id from account_invoice where replace(number,'/','') in ( select ref from account_move_line where id in ( select move_line_id from account_voucher_line where voucher_id = %s ) ) ) group by analytics_id """ self.cr.execute(query,(cheque_id,)) res = self.cr.dictfetchall() mod_nom = [] c = len(res) i = 0 while i < c : nom_u = str(''+self._consulta_nombre(res[i]['id']) ) mod_nom.append(nom_u) i += 1 TEXT = str("//".join(mod_nom)) return TEXT def _consulta_nombre(self, pr_id ): self.cr.execute("select name from account_analytic_plan_instance where id = %s ",( pr_id ,)) nom = self.cr.dictfetchall() return nom[0]['name'] #---------------------------------------------------------------------------------- def _deposito(self, data ): banco= False if data.get('form', False) and data['form'].get('bank', False): banco = data['form']['bank'][0] cuenta = False if data.get('form', False) and data['form'].get('cuenta', False): cuenta = data['form']['cuenta'][0] corte = False if data.get('form', False) and data['form'].get('f_corte', False): corte = data['form']['f_corte'] parametros = [] param = [] if corte: parametros.append(" registrobanco <= %s ") param.append(corte) if banco: parametros.append("b_id = %s") param.append(banco) if cuenta: parametros.append("c_id = %s") param.append(cuenta) ejecutar = """ CREATE OR REPLACE VIEW conciliacion_bancaria AS SELECT v.id AS ninterno, p.name AS proveedor, v.amount AS total, v.ver_banco AS verificacion, v.veri_fecha AS fechaverifi, v.ver_regbanco AS registrobanco, v.number AS numero, v.pospago AS posfechado, v.amount * (-1)::numeric AS valor, v.date AS movi, v.fecha_cobro as fec_giro, 'CHEQUES'::character varying AS tipomovimiento, v.state AS estado, p.id as p_id, b.id as b_id, c.id as c_id, m.id as m_id, v.ver_banco as ver_banco, v.date date, v.state state, coalesce(v.estado_ch,'f') estado_ch, coalesce(v.estado_ch_otros,'f') estado_ch_otros, coalesce(v.estado_ch_rise,'f') estado_ch_rise FROM account_voucher v JOIN payment_mode m ON m.journal = v.journal_id JOIN res_partner_bank c ON c.id = m.bank_id JOIN res_bank b ON b.id = c.bank JOIN res_partner p ON p.id = v.partner_id where v.state in ( 'posted','draft') and number not like '%/%' and number not like '%B%' UNION all SELECT v.id AS ninterno, COALESCE(p.name, ' - '::character varying) AS proveedor, det.amount AS total, true AS verificacion, det.date AS fechaverifi, det.date AS registrobanco, det.ref AS numero, false AS posfechado, det.amount AS valor, det.date AS movi, det.date as fec_giro, upper(det.name::text) AS tipomovimiento, v.state AS estado, p.id AS p_id , b.id AS b_id, c.id AS c_id, m.id AS m_id, 'True' AS ver_banco, v.date date, v.state as state, 'f' as estado_ch, 'f' as estado_ch_otros, 'f' as estado_ch_rise FROM account_bank_statement v LEFT JOIN account_journal tdiario ON tdiario.id = v.journal_id LEFT JOIN account_bank_statement_line det ON det.statement_id = v.id LEFT JOIN res_partner p ON p.id = det.partner_id JOIN payment_mode m ON m.journal = tdiario.id JOIN res_partner_bank c ON m.bank_id = c.id JOIN res_bank b ON b.id = c.bank where v.state in ('open','confirm') """ query= '' if parametros: query = ejecutar else: query = ejecutar self.cr.execute(query) principal = """ select sum(valor) as saldoini from conciliacion_bancaria """ principalr = """ select ( sum(total) - sum(total) ) as saldoini from conciliacion_bancaria """ where = "\n where ver_banco = true " query='' if banco and cuenta and not corte: query = principalr else: i=0 for g in parametros: if i==0: where = where +' and '+ g else: where = where +' and '+ g i=1 query = principal + where param = tuple(param) self.cr.execute(query,param) lineas = self.cr.dictfetchall() return lineas def _get_banco(self, data): if data.get('form', False) and data['form'].get('bank', False): id = data['form']['bank'][0] return openerp.pooler.get_pool(self.cr.dbname).get('res.bank').browse(self.cr, self.uid, id).name return False def _get_cont(self,est): if est == 'posted': val = 'SI' else: val = 'NO' return val def _get_cuenta(self, data): if data.get('form', False) and data['form'].get('cuenta', False): id = data['form']['cuenta'][0] return openerp.pooler.get_pool(self.cr.dbname).get('res.partner.bank').browse(self.cr, self.uid, id).acc_number return False def _get_desde(self, data): if data.get('form', False) and data['form'].get('desde', False): return data['form']['desde'] return False def _get_hasta(self, data): if data.get('form', False) and data['form'].get('hasta', False): return data['form']['hasta'] return False #------------------------------------------------------------ def _get_desdeu(self, data): if data.get('form', False) and data['form'].get('desdeu', False): return data['form']['desdeu'] return False def _get_hastau(self, data): if data.get('form', False) and data['form'].get('hastau', False): return data['form']['hastau'] return False def _get_corte(self, data): if data.get('form', False) and data['form'].get('f_corte', False): return data['form']['f_corte'] return False def _convert(self , estado ): if not estado: valor = 'No' else: valor = 'SI' return valor def _convierto(self , estados ): if not estados: valores = 'no Confirmados ' else: valores = 'Confirmados' return valores def _convertir_estado(self,tipo): if self.ESTADOS.has_key(tipo): return self.ESTADOS[tipo] return 'Otros' def _tra(self,tipo): if self.ESTADO.has_key(tipo): return self.ESTADO[tipo] total_cheques = 0.00 total = 0.00 tota_saldo = 0.00 saldo = 0.00 pri = 0 seg = 0 suma = 0 sumado = 0.00 total_c = 0.0 def _valor(self, valor, cheque,cheque_a,cheque_b ): if str(cheque) == 'False' and str(cheque_a) == 'False' and str(cheque_b) == 'False' : self.total_cheques = self.total_cheques + valor def _total_cheques(self): return self.total_cheques def _reset_total_cheques(self): self.total_cheques = 0.0 def _suma_total(self, valor ): self.total = self.total - valor def _total(self): return self.total def _get_salini(self , valor ): self.total_saldo = valor def _saldo_final(self): self.saldo = self.total_saldo - self.total return self.saldo def _get_retorno(self,valor): self.suma += valor return self.suma def _total_che(self,val): self.total_c = self.total_c + val def _total_ch(self): return self.total_c def _reset_t_ch(self): self.total_c = 0.0 report_sxw.report_sxw( 'report.conciliacion.bancaria', 'rt.conciliacion.bancaria', 'addons/rt_verificacionbancaria/report/cheques_gir_no_cob_reporte.rml', parser=conciliacion_bancaria_c,header=False)
32.85248
137
0.476972
2,680
25,165
4.340672
0.08694
0.048139
0.027078
0.039113
0.589616
0.551105
0.534944
0.512078
0.492908
0.484828
0
0.004472
0.395788
25,165
765
138
32.895425
0.760605
0.027657
0
0.47331
0
0.001779
0.296045
0.020287
0
0
0
0
0
0
null
null
0
0.003559
null
null
0.001779
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
d88fd615e27e9bedcbbc112337a612f942bcf04c
400
py
Python
images/migrations/0002_auto_20180226_2100.py
andykimchris/Unsplash
dc0395a30ad983ea4620c30889cdb4b0ef6d707e
[ "MIT" ]
null
null
null
images/migrations/0002_auto_20180226_2100.py
andykimchris/Unsplash
dc0395a30ad983ea4620c30889cdb4b0ef6d707e
[ "MIT" ]
9
2019-08-06T01:57:09.000Z
2021-09-16T16:04:04.000Z
images/migrations/0002_auto_20180226_2100.py
andykimchris/Unsplash
dc0395a30ad983ea4620c30889cdb4b0ef6d707e
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.11.1 on 2018-02-26 18:00 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('images', '0001_initial'), ] operations = [ migrations.AlterModelOptions( name='gallery', options={'ordering': ['image']}, ), ]
20
48
0.6
41
400
5.707317
0.829268
0
0
0
0
0
0
0
0
0
0
0.071672
0.2675
400
19
49
21.052632
0.726962
0.17
0
0
1
0
0.115502
0
0
0
0
0
0
1
0
false
0
0.166667
0
0.416667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
d894c6184e52a7c8ac071f8f156fcf53e4231961
6,277
py
Python
pythonLoops.py
SandraCoburn/python-code-challenges
52ca026c02a45cadc890d01fc095d39d42b36d4c
[ "MIT" ]
null
null
null
pythonLoops.py
SandraCoburn/python-code-challenges
52ca026c02a45cadc890d01fc095d39d42b36d4c
[ "MIT" ]
null
null
null
pythonLoops.py
SandraCoburn/python-code-challenges
52ca026c02a45cadc890d01fc095d39d42b36d4c
[ "MIT" ]
null
null
null
''' We can use two types of loops in Python, a for loop and a while loop. A for loop iterates over a given sequence(iterator expression) A while loop repeats as long as a boolean context evaluates to True. the break statement terminates the loop containing it. Control of the program flows to the statement immediately after the body of the loop. If the break statement is inside a nested loop(loop inside another loop), the break statement will only terminate the innermost loop. You can use the continue statement to skp the rest of the code inside a loop for the current iretation only. The loop does not terminate entirely but continues with the next iteration ''' #Accesing the index in "for" loops ints = [8,23,45,12,78] for i, n in enumerate(ints): print(f"item #{i} = {n}") for x in range(5): print(x) # 0,1,2,3,4 for x in range(2,7): print("starts at two stops before 7",x) # 2,3,4,5,6 for x in range(1,8,2): print("starts at 1, before 8 by 2",x) # 1,3,5,7 # while loops to print the same values as the for loops above count = 0 while count < 5: print(count) count += 1 count = 2 while count < 7: print("starts at two stops before 7",count) count += 1 count = 1 while count < 8: print("starts at 1, before 8 by 2",count) count += 2 # You can use a break statement to exit a for loop or a while loop count = 0 while True: print(count) count += 1 if count >= 5: break # You can use continue statement to skp the current block but not exit the loop entirely for x in range(8): # if x is even, skip this block and do not print if x % 2 == 0: continue print(x) """ Write Python code below to loop through and print out all the odd numbers from the numbers list in the same order they are received. Don't print any numbers that come after 600 in the sequence. """ numbers_list = [ 951, 402, 984, 651, 360, 69, 408, 319, 601, 485, 980, 507, 725, 547, 544, 615, 83, 165, 141, 501, 263, 617, 865, 575, 219, 390, 984, 592, 236, 105, 942, 941, 386, 462, 47, 418, 907, 344, 236, 375, 823, 566, 597, 978, 328, 615, 953, 345, 399, 162, 758, 219, 918, 237, 412, 566, 826, 248, 866, 950, 626, 949, 687, 217, 815, 67, 104, 58, 512, 24, 892, 894, 767, 553, 81, 379, 843, 831, 445, 742, 717, 958, 609, 842, 451, 688, 753, 854, 685, 93, 857, 440, 380, 126, 721, 328, 753, 470, 743, 527 ] for num in numbers_list: if num % 2 != 0 and num < 600: print("odds", num) import string print("whitespace",string.whitespace) # Basic types my_int = 3 print(float(my_int)) #3.0 #modulo operator my_remainder = 9 % 4 print(my_remainder) #1 # exponentiation operator two_squared = 2 ** 2 print(two_squared) #4 two_cubed = 2 ** 3 print(two_cubed) #8 # Using multiplication operator to create a new list of string that repeats the original sequence: my_string = "Python" repeated = my_string * 3 print(repeated) #PythonPythonPython my_list = [1,2,3] repeated_list = my_list * 3 print( repeated_list) #[1, 2, 3, 1, 2, 3, 1, 2, 3] a = object() b = object() a_list = [a] * 5 b_list = [b] * 5 combined_a_and_b = a_list + b_list print(len(combined_a_and_b)) #10 # To forat a string in Python, you use the % operator to format a set of stored variables in a tuple. You also include # argument specifieres in your string with special symbols like %s and %d name = "Sandra" formatted_string = "hello, %s!" %name print(formatted_string) # hello, Sandra! # If you have more than one argument specifier, you need to enclose your arguments ina tuple: name2 = "Jackie" year = 2021 print("Hey %s! It's the year %d." % (name2, year)) #Hey Jackie! It's the year 2021. # Any object that is not a string can also be formatted using the %s operator my_sample_list = [1,2,3] print("my_list: %s" % my_sample_list)#my_list: [1, 2, 3] ''' #Common argument specifiers: - %s - String (or any object with a string representation) - %d - Integers - %f - Floating point numbers - %.<number of digits>f - Floating point numbers with a fixed amount of digits to the dot's right - %x/%X - Integers in hexadecimals(lowercase/uppercase) ''' product_name = "bananas" price = 1.23 product_id = 123456 print("%s (id: %d) are currently $%.2f." % (product_name, product_id, price)) #bananas (id: 123456) are currently $1.23. # The len() method prints out the number of characters in the string my_string_sample = "Hello, World" print(len(my_string_sample)) #12 #The index() method prints out theindex of the substring argument's first occurrence print(my_string_sample.index("o")) #4 print(my_string_sample.index(", W")) #5 # The count() method returns the number of occurrences of the substring argument print(my_string_sample.count("ll"))#1 print(my_string_sample.count('o'))# 2 # to slice a string, you can use this syntax: [start:stop:step]. To reverse the string's order, you can set the step value to be -1 print(my_string_sample[3:7]) #lo, print(my_string_sample[3:7:2])# start at index 3 stop before 7, we skip every 2 letters -> l, print(my_string_sample[::-1]) #dlroW ,olleH #uppercase and lowercase print(my_string_sample.upper()) #HELLO, WORLD print(my_string_sample.lower()) #hello, world #starts with print(my_string_sample.startswith("H")) #True print(my_string_sample.endswith("W")) #False #Split string, The split() method allows you to split up a strint into a list. The default separator is any whtespace print(my_string_sample.split(" ")) #['Hello,', 'World'] print(my_string_sample.split(", ")) #['Hello', 'World'] print(my_string_sample.split("l")) #['He', '', 'o, Wor', 'd'] #Anytime you have an iterable object(like a list) you can check if a specific item exists inside that iterable by using the `in` operator years = [2018, 2019,2020,2021] year = 2020 if year in years: print("%s is in the years collection" % year) # 2020 is in the years collection #If we want to determine if two objects are actually the same instance in memory, we use the `is` operator instead of value comparison operator == d = [1,2,3,4,5] e = [1,2,3,4,5] print(a == b) #True because a and b have teh same value print(a is b) #False because a and b reference two different list object x = [1,2,3] y = x print(x == y) #True because x and y have the same value print(x is y) #True because x and y reference the same list object
33.388298
492
0.70368
1,130
6,277
3.846903
0.326549
0.033126
0.05153
0.061192
0.122383
0.071774
0.045549
0.045549
0.021164
0.021164
0
0.091636
0.18289
6,277
188
493
33.388298
0.755898
0.451968
0
0.090909
0
0
0.102048
0
0
0
0
0
0
1
0
false
0
0.010101
0
0.010101
0.424242
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
1
d896b92786c756abe5bfc350a2fe42f3f303203a
1,322
py
Python
hashdist/formats/templated_stream.py
krafczyk/hashdist
a322a66d4bcd4b989a6a163cd2569f3e71995f60
[ "BSD-3-Clause" ]
67
2015-01-21T14:16:20.000Z
2022-03-31T23:21:09.000Z
hashdist/formats/templated_stream.py
dannygriffin000/hashdist
a322a66d4bcd4b989a6a163cd2569f3e71995f60
[ "BSD-3-Clause" ]
77
2015-01-01T00:38:55.000Z
2020-06-15T22:04:42.000Z
hashdist/formats/templated_stream.py
dannygriffin000/hashdist
a322a66d4bcd4b989a6a163cd2569f3e71995f60
[ "BSD-3-Clause" ]
20
2015-01-22T16:17:49.000Z
2021-02-11T21:35:25.000Z
""" A simple stream constructor that constructs a Stream by evaluating parameter substitutions from a dictionary parameters. Finds tokens of the form \{\{([a-zA-Z_-][\w-]*)\}\} and replaces {{var}} with the contents of gettattr(parameters, var) in the new stream. """ import re from StringIO import StringIO class TemplatedStream(StringIO): """ StringIO stream that expands template parameters of the form {{var}} """ dbrace_re = re.compile(r'\{\{([a-zA-Z_][\w-]*)\}\}') def __init__(self, stream, parameters): """ Create a TemplatedStream by populating variables from the parameters mapping. Silently passes matching strings that do not have a corresponding key defined in parameters as empty strings. """ StringIO.__init__(self) def dbrace_expand(match): if match.group(1) in parameters: # we may occassionally be handed non-string object in # parameters. Just convert them to string, they will # be re-run through the YAML parser anyway. return str(parameters[match.group(1)]) else: return '' for line in stream: self.write(self.dbrace_re.sub(dbrace_expand, line)) self.seek(0)
30.045455
79
0.618003
161
1,322
4.987578
0.565217
0.044832
0.022416
0.012453
0
0
0
0
0
0
0
0.003181
0.286687
1,322
43
80
30.744186
0.848356
0.503782
0
0
0
0
0.04223
0.04223
0
0
0
0
0
1
0.142857
false
0
0.142857
0
0.571429
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
d899d8df0f282d6997185cd206095a224300208b
3,483
py
Python
rl_agent/scripts/agents/test/gym_ddpg_server.py
alejodosr/drl-landing-iros
d2a221ef06332d36398c2a27008dfc4276d92af8
[ "MIT" ]
8
2018-07-12T11:12:36.000Z
2021-07-12T13:37:51.000Z
rl_agent/scripts/agents/test/gym_ddpg_server.py
alejodosr/drl-landing-iros
d2a221ef06332d36398c2a27008dfc4276d92af8
[ "MIT" ]
3
2018-11-13T22:40:12.000Z
2020-12-11T12:13:46.000Z
rl_agent/scripts/agents/test/gym_ddpg_server.py
alejodosr/drl-landing-iros
d2a221ef06332d36398c2a27008dfc4276d92af8
[ "MIT" ]
7
2018-07-12T11:12:39.000Z
2021-03-09T21:52:30.000Z
import filter_env import rospy from rl_agent_environment_communication.srv import * import cv2 from cv_bridge import CvBridge import gym import numpy as np ENV_NAME = 'LunarLanderContinuous-v2' #ENV_NAME = 'Pendulum-v0' DEBUG_SERVICES_MODE = False env = filter_env.makeFilteredEnv(gym.make(ENV_NAME)) #env = gym.wrappers.Monitor(env, 'experiments/' + ENV_NAME,force=True) state = env.reset() def handle_environment_reset(req): print 'Reseting Env...' state = env.reset() resp = ResetEnvSrvResponse() resp.state = state return resp def handle_environment_render(req): print 'Rendering Env...' image_state = env.render(mode='rgb_array') print 'ENV RENDERED!' #~ size = 400, 600, 3 #~ image_state = np.zeros(size, dtype=np.uint8) #~ print 'image_state (type): ', type(image_state) #~ cv2.imshow('image_state', image_state) #~ cv2.waitKey(1) print 'Creating CvBridge object...' cv_bridge_obj = CvBridge() img_msg = cv_bridge_obj.cv2_to_imgmsg(image_state, "bgr8") print 'Sending response...' resp = RenderEnvSrvResponse() resp.img = img_msg return resp def handle_environment_step(req): #env.render() action_req = np.array(req.action) if DEBUG_SERVICES_MODE: print '++++++++++ Requested Action INFO ++++++++++' print 'action (type): ', type(action_req) print 'action (shape): ', action_req.shape print 'action (length): ', len(action_req) print 'action (dim): ', action_req.ndim print 'action (values): ', action_req next_state, reward, done, _ = env.step(action_req) size = 5, 5, 3 img = np.zeros(size, dtype=np.uint8) cv_bridge_obj = CvBridge() img_msg = cv_bridge_obj.cv2_to_imgmsg(img, "bgr8") resp = AgentSrvResponse() resp.reward = reward resp.obs_real = np.array(next_state) resp.terminal_state = done resp.img = img_msg return resp def handle_environment_dimensionality(req): state_dim = env.observation_space.shape[0] action_dim = env.action_space.shape[0] state_dim_low = env.observation_space.low state_dim_high = env.observation_space.high action_dim_low = env.action_space.low action_dim_high = env.action_space.high if DEBUG_SERVICES_MODE: print '**** state_dim: ', state_dim print '**** action_dim: ', action_dim print '**** state_dim_low: ', state_dim_low print '**** state_dim_low (type): ', type(state_dim_low) print '**** state_dim_high: ', state_dim_high print '**** action_dim_low: ', action_dim_low print '**** action_dim_low (type): ', type(action_dim_low) print '**** action_dim_high: ', action_dim_high resp = EnvDimensionalitySrvResponse() resp.state_dim_lowdim = state_dim resp.state_dim_img = np.array([5, 5, 3], dtype=np.int) resp.state_min = state_dim_low resp.state_max = state_dim_high resp.action_dim = action_dim resp.action_min = action_dim_low resp.action_max = action_dim_high resp.num_iterations = env.spec.timestep_limit return resp def environment_server(): rospy.init_node('environment_step_server') s1 = rospy.Service('environment_step', AgentSrv, handle_environment_step) s2 = rospy.Service('environment_dimensionality', EnvDimensionalitySrv, handle_environment_dimensionality) s3 = rospy.Service('environment_reset', ResetEnvSrv, handle_environment_reset) s4 = rospy.Service('environment_render', RenderEnvSrv, handle_environment_render) print "Ready to step the Environment..." rospy.spin() def main(): environment_server() if __name__ == '__main__': main()
27.210938
109
0.729543
483
3,483
4.950311
0.254658
0.060226
0.027604
0.023839
0.168967
0.136345
0.075282
0.075282
0.075282
0.039314
0
0.01042
0.145851
3,483
127
110
27.425197
0.793277
0.079242
0
0.139535
0
0
0.176673
0.022827
0
0
0
0
0
0
null
null
0
0.081395
null
null
0.232558
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
d89b06fcef8d37cf29e8426902af9fd2fae016ee
807
py
Python
game/characters/enemy.py
FilippoLeone/pypega
8dd3eee22dcac063d5de430a4c1e3e34a5cc5b85
[ "MIT" ]
4
2020-05-10T09:20:15.000Z
2021-03-11T22:59:04.000Z
game/characters/enemy.py
FilippoLeone/pypega
8dd3eee22dcac063d5de430a4c1e3e34a5cc5b85
[ "MIT" ]
null
null
null
game/characters/enemy.py
FilippoLeone/pypega
8dd3eee22dcac063d5de430a4c1e3e34a5cc5b85
[ "MIT" ]
1
2020-09-14T02:39:02.000Z
2020-09-14T02:39:02.000Z
import pyxel import constants as c import random class Gachi: def __init__(self, x, y): self.x = x self.y = y self.x_side = [-16, 16, 16, 16, 16, 16] self.y_side = [16, -16, 16, 16, 16, 16, 16, 16, 16, 16] self.hp = c.gachi_hp def draw(self): pyxel.blt(self.x, self.y, 0, 16, 48, self.x_side[random.randint(0,len(self.x_side) - 1)] , self.y_side[random.randint(0,len(self.y_side) - 1)] , 0) def draw_hp_bar(self): pyxel.text(17, 10, self.get_hp(), 8) pyxel.blt(-2, 10, 0, 48, 48, 16, 16, 0) pyxel.blt(-18, 10, 0, 48+16, 48, 16, 16, 0) pyxel.blt(-34, 10, 0, 48+32, 48, 16, 16, 0) pyxel.blt(-50, 10, 0, 48+48, 48, 16, 16, 0) def get_hp(self): return (f"{self.hp}")
29.888889
96
0.519207
147
807
2.748299
0.244898
0.178218
0.178218
0.19802
0.381188
0.35396
0.118812
0.049505
0.049505
0
0
0.192171
0.303594
807
27
97
29.888889
0.52669
0
0
0
0
0
0.011139
0
0
0
0
0
0
1
0.181818
false
0
0.136364
0.045455
0.409091
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
d8a04a5bb0fbfb255a9d56aeb1bca04c5479f958
302
py
Python
scripts/load_raw_data_into_db.py
netoferraz/acordaos-tcu
67088d87d3ace7f85b8628955db7cf3ecdfaac45
[ "MIT" ]
21
2019-09-02T20:42:30.000Z
2021-09-14T09:54:04.000Z
scripts/load_raw_data_into_db.py
netoferraz/acordaos-tcu
67088d87d3ace7f85b8628955db7cf3ecdfaac45
[ "MIT" ]
2
2021-06-02T00:20:39.000Z
2021-12-13T20:14:30.000Z
scripts/load_raw_data_into_db.py
netoferraz/acordaos-tcu
67088d87d3ace7f85b8628955db7cf3ecdfaac45
[ "MIT" ]
2
2019-09-03T13:55:34.000Z
2020-12-08T22:09:49.000Z
from scripts.funcs import initiate_db, load_csv_into_db, load_json_into_db conn, cur = initiate_db("./db/acordaos-download.db") #years = list(range(1992, 2000)) filename = "./data/api/raw/2018_2019.json" load_json_into_db(filename, cursor=cur) #load_csv_into_db(years, cur) conn.commit() conn.close()
30.2
74
0.774834
51
302
4.294118
0.54902
0.109589
0.100457
0.118721
0
0
0
0
0
0
0
0.057554
0.07947
302
9
75
33.555556
0.730216
0.195364
0
0
0
0
0.224066
0.224066
0
0
0
0
0
1
0
false
0
0.166667
0
0.166667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
d8a517012e48b5bb958e754dbf1e564082718cef
2,759
py
Python
tools/hippydebug.py
jweinraub/hippyvm
09c7643aaa1c4ade566e8681abd2543f12bf874c
[ "MIT" ]
289
2015-01-01T15:36:55.000Z
2022-03-27T00:22:27.000Z
tools/hippydebug.py
jweinraub/hippyvm
09c7643aaa1c4ade566e8681abd2543f12bf874c
[ "MIT" ]
26
2015-01-21T16:34:41.000Z
2020-08-26T15:12:54.000Z
tools/hippydebug.py
jweinraub/hippyvm
09c7643aaa1c4ade566e8681abd2543f12bf874c
[ "MIT" ]
35
2015-01-05T12:09:41.000Z
2022-03-16T09:30:16.000Z
#!/usr/bin/env python """Hippy debugger. Usage: hippydebug.py [debugger_options] ../hippy-c args... (There are no debugger_options so far.) """ import sys, os, signal import getopt import subprocess sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from hippy.debugger import Connection, Message def run_interactive(read_fd, write_fd): import readline # for raw_input() below con = Connection(read_fd, write_fd) last_command = '' while True: try: msg = con.read() except EOFError: break if msg.command == '>': line = raw_input('> ') if not line: # Ctrl-D break line = line.strip() if not line: line = last_command else: last_command = line lst = line.split(" ", 1) if len(lst) == 1: con.write(Message(lst[0], None)) else: con.write(Message(lst[0], [lst[1]])) else: print msg.command, " ".join(msg.args) con.write(Message(".", None)) def reopen_terminal(): f = open("/dev/tty", "r+", 0) sys.stdin = sys.stdout = sys.stderr = f os.dup2(f.fileno(), 0) os.dup2(f.fileno(), 1) os.dup2(f.fileno(), 2) def printable_process_status(status): if os.WIFEXITED(status): return 'exit code %s' % (os.WEXITSTATUS(status),) elif os.WIFSIGNALED(status): return 'terminated by signal %s' % (os.WTERMSIG(status),) else: return 'unknown exit status 0x%x' % (status,) def main(hippy_command, *hippy_args): read_fd1, write_fd1 = os.pipe() read_fd2, write_fd2 = os.pipe() child_pid = os.fork() if child_pid == 0: # in the child os.close(read_fd1) os.close(write_fd2) hippy_command_list = [ hippy_command, '--debugger_pipes', str(read_fd2), str(write_fd1), ] + list(hippy_args) os.execvp(hippy_command, hippy_command_list) # this point never reached os.close(read_fd2) os.close(write_fd1) try: reopen_terminal() print >> sys.stderr, 'Hippy Debugger' run_interactive(read_fd1, write_fd2) finally: os.kill(child_pid, signal.SIGQUIT) print >> sys.stderr, 'Hippy finished:', _, status = os.waitpid(child_pid, 0) print >> sys.stderr, printable_process_status(status) if __name__ == '__main__': options, args = getopt.getopt(sys.argv[1:], '', []) if not args: print >> sys.stderr, __doc__ sys.exit(1) if not os.path.isfile(args[0]): print >> sys.stderr, '%s: No such file' % (args[0],) sys.exit(1) main(*args)
29.042105
79
0.573759
356
2,759
4.272472
0.351124
0.035503
0.046022
0.025641
0.08547
0
0
0
0
0
0
0.016854
0.290323
2,759
94
80
29.351064
0.759959
0.031533
0
0.157895
0
0
0.056604
0
0
0
0
0
0
0
null
null
0
0.065789
null
null
0.092105
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
d8a71387fda8430f0022fe54e931176d01f5e1cf
2,380
py
Python
brewerslab-orig-master/pitmButtonv2.py
allena29/brewerslabng
f47e671971436b7af806b54f6019c5b185d7d194
[ "Apache-2.0" ]
1
2020-04-12T10:08:10.000Z
2020-04-12T10:08:10.000Z
brewerslab-orig-master/pitmButtonv2.py
allena29/brewerslabng
f47e671971436b7af806b54f6019c5b185d7d194
[ "Apache-2.0" ]
2
2021-12-13T20:09:45.000Z
2022-03-08T21:09:57.000Z
brewerslab-orig-master/pitmButtonv2.py
allena29/brewerslabng
f47e671971436b7af806b54f6019c5b185d7d194
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python import os import sys import threading import time from pitmMcastOperations import pitmMcast from pitmLogHandler import pitmLogHandler from gpiotools2 import gpiotools2 from pitmCfg import pitmCfg class pitmButton: """ pitmButton manages the toggling of mode switches. These switches may be - Physcial, which are read from GPIO - or fake buttons (ipc/manualswitch_<NAME> In both cases if a physical button is on or the fakebutton is set the status is written to an ipc/<NAME> file and broadcast via multicast """ def __init__(self, rpi=True): self.cfg = pitmCfg() self.groot = pitmLogHandler() if rpi: self.gpio = gpiotools2() self.doMonitoring = False def _check_a_single_button(self, button): if os.path.exists('ipc/manualswitch_%s' % (button)): return True return self.gpio.input(button) def _set_ipc_flag(self, button): o = open("ipc/%s" % (button), "w") o.close() def _remove_ipc_flag(self, button): try: os.remove("ipc/%s" % (button)) except: pass def _build_button_control_message(self): controlMessage = {} controlMessage['_operation'] = 'button' controlMessage['_checksum'] = " " controlMessage['_button'] = {} for button in ['swHlt', 'swFerm', 'swSparge', 'swMash', 'swBoil', 'swPump']: button_state = self._check_a_single_button(button) if button_state: self._set_ipc_flag(button) else: self._remove_ipc_flag(button) controlMessage['_button'][button] = button_state return controlMessage def broadcastButtonResult(self): print "advertising our Button capabiltiies" mcastHandler = pitmMcast() while 1: controlMessage = self._build_button_control_message() mcastHandler.send_mcast_message(controlMessage, self.cfg.mcastButtonPort, 'button') time.sleep(1) if __name__ == '__main__': buttonController = pitmButton() broadcastResult = threading.Thread(target=buttonController.broadcastButtonResult) broadcastResult.daemon = True broadcastResult.start() while 1: time.sleep(5)
27.674419
95
0.62521
251
2,380
5.7251
0.462151
0.019485
0.016701
0.025052
0
0
0
0
0
0
0
0.004115
0.285294
2,380
85
96
28
0.840682
0.006723
0
0.037037
0
0
0.097345
0
0
0
0
0
0
0
null
null
0.018519
0.148148
null
null
0.018519
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
d8a8be7176c225f7e17fbe31e29722d7b21628c6
1,167
py
Python
muddery/mappings/event_action_set.py
noahzaozao/muddery
294da6fb73cb04c62e5ba6eefe49b595ca76832a
[ "BSD-3-Clause" ]
null
null
null
muddery/mappings/event_action_set.py
noahzaozao/muddery
294da6fb73cb04c62e5ba6eefe49b595ca76832a
[ "BSD-3-Clause" ]
null
null
null
muddery/mappings/event_action_set.py
noahzaozao/muddery
294da6fb73cb04c62e5ba6eefe49b595ca76832a
[ "BSD-3-Clause" ]
null
null
null
""" All available event actions. """ from __future__ import print_function from django.conf import settings from evennia.utils import logger from muddery.utils.exception import MudderyError from muddery.utils.utils import classes_in_path from muddery.events.base_event_action import BaseEventAction class EventActionSet(object): """ All available event triggers. """ def __init__(self): self.dict = {} self.load() def load(self): """ Add all event actions from the path. """ # load classes for cls in classes_in_path(settings.PATH_EVENT_ACTION_BASE, BaseEventAction): key = cls.key if self.dict.has_key(key): logger.log_infomsg("Event action %s is replaced by %s." % (key, cls)) self.dict[key] = cls() def get(self, key): """ Get the function of the event action. """ action = self.dict.get(key, None) if action: return action.func def all(self): """ Get all event types. """ return self.dict.keys() EVENT_ACTION_SET = EventActionSet()
22.882353
85
0.608398
141
1,167
4.879433
0.390071
0.079942
0.049419
0
0
0
0
0
0
0
0
0
0.29563
1,167
50
86
23.34
0.836983
0.143959
0
0
0
0
0.037486
0
0
0
0
0
0
1
0.173913
false
0
0.26087
0
0.565217
0.043478
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
d8a9231416df4329b68705f67027a39e5c572589
3,401
py
Python
bcml4pheno/ttbarzp.py
sheride/bcml4pheno
c9629dafcdbee0a4c28ceb7b28c9862de8479a24
[ "Apache-2.0" ]
null
null
null
bcml4pheno/ttbarzp.py
sheride/bcml4pheno
c9629dafcdbee0a4c28ceb7b28c9862de8479a24
[ "Apache-2.0" ]
null
null
null
bcml4pheno/ttbarzp.py
sheride/bcml4pheno
c9629dafcdbee0a4c28ceb7b28c9862de8479a24
[ "Apache-2.0" ]
null
null
null
# AUTOGENERATED! DO NOT EDIT! File to edit: ttbarzp.ipynb (unless otherwise specified). __all__ = ['get_elijah_ttbarzp_cs', 'get_manuel_ttbarzp_cs', 'import47Ddata', 'get47Dfeatures'] # Cell import numpy as np import tensorflow as tf # Cell def get_elijah_ttbarzp_cs(): r""" Contains cross section information produced by Elijah for $pp \to t\overline{t} \; Z'$ collider phenomenology. Returns list containing signal masses, signal cross sections (for those masses, in pb), and background cross sections (also in pb) """ # Z' masses (GeV) for which Elijah created signal samples elijah_masses = [10, 50, 100, 200, 350, 500, 1000, 2000, 5000] # signal cross sections (pb) elijah_sig_css = [9.801, 0.5445, 0.1442, 0.03622, 0.009998, 0.003802, 0.0003936, 2.034e-05, 2.748e-08] # background cross sections (pb) elijah_bg_css = [0.106, 0.0117, 5.58] return [elijah_masses, elijah_sig_css, elijah_bg_css] # Cell def get_manuel_ttbarzp_cs(): r""" Contains cross section information produced through MadGraph by Manuel for collider phenomenology regarding the semihadronic, semileptonic $pp \to t\overline{t} \; Z', Z' \to b\overline{b}$ channel """ # Z' masses (GeV) for which I (Elijah) created signal samples manuel_masses = [350, 500, 750, 1000, 2000, 3000, 4000] # signal cross sections (pb) manuel_sig_css = [0.001395, 0.0007823, 0.0003429, 0.0001692, 1.808e-05, 1.325e-06, 4.456e-07] # background cross sections (pb) manuel_bg_css = [0.1339, 0.01187, 5.603] return [manuel_masses, manuel_sig_css, manuel_bg_css] # Cell def import47Ddata(name): r""" Imports `name.npy` file containing 47-dimensional data for training Available files: - bgh.npy (Standard Model background 1, $pp \to t\overline{t}h$) - bg4t.npy (Standard Model background 2, $pp \to t\overline{t}t\overline{t}$) - bgnoh.npy (Standard Model background 3, $pp \to t\overline{t} \; \setminus \; h$) - sig350G.npy ($Z'$ signal, $m_{Z'} = 350$ GeV) - sig500G.npy ($Z'$ signal, $m_{Z'} = 500$ GeV) - sig1T.npy ($Z'$ signal, $m_{Z'} = 1$ TeV) - sig2T.npy ($Z'$ signal, $m_{Z'} = 2$ TeV) - sig4T.npy ($Z'$ signal, $m_{Z'} = 4$ TeV) """ if name[-4:] == '.npy': name = name[:-4] url = 'https://storage.googleapis.com/ttbarzp/47dim/' try: path = tf.keras.utils.get_file(f'{name}.npy', url + name + '.npy') data = np.load(path) return data except: print(f"{name}.npy doesn't appear to exist") # Cell def get47Dfeatures(): """ Returns list containing the names of the 47 features found in the data accessible through `ttbarzp.import47Ddata()` """ return [ 'pT b1', 'pT b2', 'pT b3', 'pT b4', 'sdEta b1 b2', 'sdEta b1 b3', 'sdEta b1 b4', 'sdEta b2 b3', 'sdEta b2 b4', 'sdEta b3 b4', 'sdPhi b1 b2', 'sdPhi b1 b3', 'sdPhi b1 b4', 'sdPhi b2 b3', 'sdPhi b2 b4', 'sdPhi b3 b4', 'dR b1 b2', 'dR b1 b3', 'dR b1 b4', 'dR b2 b3', 'dR b2 b4', 'dR b3 b4', 'MET', 'pT l', 'MT l MET', 'M b1 b2', 'M b1 b3', 'M b1 b4', 'M b2 b3', 'M b2 b4', 'M b3 b4', 'MT b1 l MET', 'MT b2 l MET', 'MT b3 l MET', 'MT b4 l MET', 'M j1 j2', 'pT j1', 'pT j2', 'dR j1 j2', 'dR b1 l', 'dR b2 l', 'dR b3 l', 'dR b4 l', 'sdPhi b1 l', 'sdPhi b2 l', 'sdPhi b3 l', 'sdPhi b4 l']
41.987654
121
0.614525
540
3,401
3.792593
0.32037
0.026367
0.029297
0.031738
0.129883
0.0625
0.047852
0.047852
0
0
0
0.112132
0.231697
3,401
81
122
41.987654
0.671642
0.426051
0
0
1
0
0.306178
0.022963
0
0
0
0
0
1
0.105263
false
0
0.105263
0
0.315789
0.026316
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
d8ae73d38d5c3ddf758f8b307b86f0cea7f183a9
444
py
Python
aiohttp_admin2/controllers/types.py
Arfey/aiohttp_admin2
2b3782389ec9e25809635811b76ef8111b27d8ba
[ "MIT" ]
12
2021-10-15T11:48:12.000Z
2022-03-24T07:31:43.000Z
aiohttp_admin2/controllers/types.py
Arfey/aiohttp_admin2
2b3782389ec9e25809635811b76ef8111b27d8ba
[ "MIT" ]
2
2021-12-29T16:31:05.000Z
2021-12-30T00:50:40.000Z
aiohttp_admin2/controllers/types.py
Arfey/aiohttp_admin2
2b3782389ec9e25809635811b76ef8111b27d8ba
[ "MIT" ]
null
null
null
import typing as t __all__ = ["Cell", "ListObject", ] class Cell(t.NamedTuple): """Field data representation for html template""" value: t.Any url: t.Tuple[str, t.Dict[str, t.Union[str, int]]] is_safe: bool = False class ListObject(t.NamedTuple): rows: t.List[t.List[Cell]] has_next: bool has_prev: bool count: t.Optional[int] active_page: t.Optional[int] per_page: int next_id: t.Optional[int]
21.142857
53
0.650901
68
444
4.102941
0.558824
0.096774
0.129032
0
0
0
0
0
0
0
0
0
0.209459
444
20
54
22.2
0.794872
0.096847
0
0
0
0
0.035443
0
0
0
0
0
0
1
0
false
0
0.071429
0
0.928571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
d8b1ac767220107dcae2af420dd45336ec49a8d3
9,548
py
Python
sppas/sppas/src/ui/phoenix/page_files/refstreectrl.py
mirfan899/MTTS
3167b65f576abcc27a8767d24c274a04712bd948
[ "MIT" ]
null
null
null
sppas/sppas/src/ui/phoenix/page_files/refstreectrl.py
mirfan899/MTTS
3167b65f576abcc27a8767d24c274a04712bd948
[ "MIT" ]
null
null
null
sppas/sppas/src/ui/phoenix/page_files/refstreectrl.py
mirfan899/MTTS
3167b65f576abcc27a8767d24c274a04712bd948
[ "MIT" ]
null
null
null
# -*- coding: UTF-8 -*- """ .. --------------------------------------------------------------------- ___ __ __ __ ___ / | \ | \ | \ / the automatic \__ |__/ |__/ |___| \__ annotation and \ | | | | \ analysis ___/ | | | | ___/ of speech http://www.sppas.org/ Use of this software is governed by the GNU Public License, version 3. SPPAS is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. SPPAS is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with SPPAS. If not, see <http://www.gnu.org/licenses/>. This banner notice must not be removed. --------------------------------------------------------------------- src.ui.phoenix..page_files.refstreectrl.py ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ """ import logging import wx import wx.dataview from .basectrls import BaseTreeViewCtrl from .refsviewmodel import ReferencesTreeViewModel # ---------------------------------------------------------------------------- # Control to store the data matching the model # ---------------------------------------------------------------------------- class ReferencesTreeViewCtrl(BaseTreeViewCtrl): """A control to display references in a tree-spreadsheet style. :author: Brigitte Bigi :organization: Laboratoire Parole et Langage, Aix-en-Provence, France :contact: contact@sppas.org :license: GPL, v3 :copyright: Copyright (C) 2011-2019 Brigitte Bigi Columns of this class are defined by the model and created by the constructor. No parent nor children will have the possibility to Append/Insert/Prepend/Delete columns: such methods are disabled. """ def __init__(self, parent, name=wx.PanelNameStr): """Constructor of the ReferencesTreeViewCtrl. :param parent: (wx.Window) """ super(ReferencesTreeViewCtrl, self).__init__(parent, name) # Create an instance of our model and associate to the view. self._model = ReferencesTreeViewModel() self.AssociateModel(self._model) self._model.DecRef() # Create the columns that the model wants in the view. for i in range(self._model.GetColumnCount()): col = BaseTreeViewCtrl._create_column(self._model, i) if i == self._model.GetExpanderColumn(): self.SetExpanderColumn(col) wx.dataview.DataViewCtrl.AppendColumn(self, col) # Bind events. # Used to remember the expend/collapse status of items after a refresh. self.Bind(wx.dataview.EVT_DATAVIEW_ITEM_EXPANDED, self._on_item_expanded) self.Bind(wx.dataview.EVT_DATAVIEW_ITEM_COLLAPSED, self._on_item_collapsed) self.Bind(wx.dataview.EVT_DATAVIEW_ITEM_ACTIVATED, self._on_item_activated) self.Bind(wx.dataview.EVT_DATAVIEW_SELECTION_CHANGED, self._on_item_selection_changed) self.Bind(wx.dataview.EVT_DATAVIEW_ITEM_EDITING_DONE, self._on_item_edited) # ------------------------------------------------------------------------ # Public methods # ------------------------------------------------------------------------ def get_data(self): """Return the data of the model.""" return self._model.get_data() # ------------------------------------------------------------------------ def set_data(self, data): self._model.set_data(data) self.__refresh() # ------------------------------------------------------------------------ def GetCheckedRefs(self): """Return checked references.""" return self._model.get_checked_refs() # ------------------------------------------------------------------------ def HasCheckedRefs(self): """Return True if at least one reference is checked.""" return self._model.has_checked_refs() # ------------------------------------------------------------------------ def CreateRef(self, ref_name, ref_type): """Create a new reference and add it into the tree. :param ref_name: (str) :param ref_type: (str) On of the accepted type of references :raise: Exception """ item = self._model.create_ref(ref_name, ref_type) if item is None: raise Exception("An unexpected error occurred.") logging.info('Reference created successfully: {:s}, {:d}' ''.format(ref_name, ref_type)) # ------------------------------------------------------------------------ def AddRefs(self, entries): """Add a list of references into the model. :param entries: (str) List of references. """ nb = self._model.add_refs(entries) if nb > 0: logging.debug('Added {:d} references in the data.'.format(len(entries))) self.__refresh() return nb # ------------------------------------------------------------------------ def RemoveCheckedRefs(self): """Remove all checked references.""" nb = self._model.remove_checked_refs() if nb > 0: logging.info('Removed {:d} references.'.format(nb)) self.__refresh() return nb # ------------------------------------------------------------------------ def RemoveAttribute(self, identifier): """Remove an attribute from the checked references. :param identifier: (str) :returns: Number of references in which the attribute were removed. """ nb = self._model.remove_attribute(identifier) logging.info('Identifier {:s} removed of {:d} references.' ''.format(identifier, nb)) if nb > 0: self.__refresh() return nb # ------------------------------------------------------------------------ def EditAttribute(self, identifier, value, att_type, description): """Add or modify an attribute into the checked references. :param identifier: (str) :param value: (str) :param att_type: (str) :param description: (str) :returns: Number of references in which the attribute were added. """ nb = self._model.edit_attribute(identifier, value, att_type, description) logging.info('Identifier {:s} added into {:d} references.' ''.format(identifier, nb)) if nb > 0: self.__refresh() return nb # ------------------------------------------------------------------------ def update_data(self): """Overridden. Update the currently displayed data.""" self._model.update() self.__refresh() # ------------------------------------------------------------------------ # Callbacks to events # ------------------------------------------------------------------------ def _on_item_expanded(self, evt): """Happens when the user checked the 1st column of the tree. We have to update the corresponding object 'expand' value to True. """ self._model.expand(True, evt.GetItem()) # ------------------------------------------------------------------------ def _on_item_collapsed(self, evt): """Happens when the user checked the 1st column of the tree. We have to update the corresponding object 'expand' value to False. """ self._model.expand(False, evt.GetItem()) # ------------------------------------------------------------------------ def _on_item_activated(self, event): """Happens when the user activated a cell (double-click). This event is triggered by double clicking an item or pressing some special key (usually "Enter") when it is focused. """ self._model.change_value(event.GetItem()) # ------------------------------------------------------------------------ def _on_item_selection_changed(self, event): """Happens when the user simple-click a cell. """ self._model.change_value(event.GetItem()) # ------------------------------------------------------------------------ def _on_item_edited(self, event): """Happens when the user modified the content of an editable cell. Notice that on MacOS, the event.GetValue() method returns None, so that the value can not be changed in that way. Use SetValue() of the model instead. """ if wx.Platform != "__WXMAC__": self._model.change_value(event.GetItem(), event.GetColumn(), event.GetValue()) # ------------------------------------------------------------------------ def __refresh(self): for item in self._model.get_expanded_items(True): self.Expand(item) for item in self._model.get_expanded_items(False): self.Collapse(item)
36.723077
94
0.506912
934
9,548
5.008565
0.307281
0.04425
0.010688
0.019239
0.252886
0.207567
0.149637
0.121419
0.106883
0.089354
0
0.002506
0.247801
9,548
259
95
36.864865
0.648844
0.529849
0
0.216867
0
0
0.055986
0
0
0
0
0
0
1
0.204819
false
0
0.060241
0
0.361446
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
d8bd9a8ceac50742b31415693499379e4a5d5377
8,540
py
Python
RecoBTag/PerformanceDB/python/measure/Btag_btagTtbarWp0612.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
852
2015-01-11T21:03:51.000Z
2022-03-25T21:14:00.000Z
RecoBTag/PerformanceDB/python/measure/Btag_btagTtbarWp0612.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
30,371
2015-01-02T00:14:40.000Z
2022-03-31T23:26:05.000Z
RecoBTag/PerformanceDB/python/measure/Btag_btagTtbarWp0612.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
3,240
2015-01-02T05:53:18.000Z
2022-03-31T17:24:21.000Z
import FWCore.ParameterSet.Config as cms BtagPerformanceESProducer_TTBARWPBTAGCSVL = cms.ESProducer("BtagPerformanceESProducer", # this is what it makes available ComponentName = cms.string('TTBARWPBTAGCSVL'), # this is where it gets the payload from PayloadName = cms.string('BTagTTBARWPBTAGCSVLtable_v8_offline'), WorkingPointName = cms.string('BTagTTBARWPBTAGCSVLwp_v8_offline') ) BtagPerformanceESProducer_TTBARWPBTAGCSVM = cms.ESProducer("BtagPerformanceESProducer", # this is what it makes available ComponentName = cms.string('TTBARWPBTAGCSVM'), # this is where it gets the payload from PayloadName = cms.string('BTagTTBARWPBTAGCSVMtable_v8_offline'), WorkingPointName = cms.string('BTagTTBARWPBTAGCSVMwp_v8_offline') ) BtagPerformanceESProducer_TTBARWPBTAGCSVT = cms.ESProducer("BtagPerformanceESProducer", # this is what it makes available ComponentName = cms.string('TTBARWPBTAGCSVT'), # this is where it gets the payload from PayloadName = cms.string('BTagTTBARWPBTAGCSVTtable_v8_offline'), WorkingPointName = cms.string('BTagTTBARWPBTAGCSVTwp_v8_offline') ) BtagPerformanceESProducer_TTBARWPBTAGJPL = cms.ESProducer("BtagPerformanceESProducer", # this is what it makes available ComponentName = cms.string('TTBARWPBTAGJPL'), # this is where it gets the payload from PayloadName = cms.string('BTagTTBARWPBTAGJPLtable_v8_offline'), WorkingPointName = cms.string('BTagTTBARWPBTAGJPLwp_v8_offline') ) BtagPerformanceESProducer_TTBARWPBTAGJPM = cms.ESProducer("BtagPerformanceESProducer", # this is what it makes available ComponentName = cms.string('TTBARWPBTAGJPM'), # this is where it gets the payload from PayloadName = cms.string('BTagTTBARWPBTAGJPMtable_v8_offline'), WorkingPointName = cms.string('BTagTTBARWPBTAGJPMwp_v8_offline') ) BtagPerformanceESProducer_TTBARWPBTAGJPT = cms.ESProducer("BtagPerformanceESProducer", # this is what it makes available ComponentName = cms.string('TTBARWPBTAGJPT'), # this is where it gets the payload from PayloadName = cms.string('BTagTTBARWPBTAGJPTtable_v8_offline'), WorkingPointName = cms.string('BTagTTBARWPBTAGJPTwp_v8_offline') ) BtagPerformanceESProducer_TTBARWPBTAGJBPL = cms.ESProducer("BtagPerformanceESProducer", # this is what it makes available ComponentName = cms.string('TTBARWPBTAGJBPL'), # this is where it gets the payload from PayloadName = cms.string('BTagTTBARWPBTAGJBPLtable_v8_offline'), WorkingPointName = cms.string('BTagTTBARWPBTAGJBPLwp_v8_offline') ) BtagPerformanceESProducer_TTBARWPBTAGJBPM = cms.ESProducer("BtagPerformanceESProducer", # this is what it makes available ComponentName = cms.string('TTBARWPBTAGJBPM'), # this is where it gets the payload from PayloadName = cms.string('BTagTTBARWPBTAGJBPMtable_v8_offline'), WorkingPointName = cms.string('BTagTTBARWPBTAGJBPMwp_v8_offline') ) BtagPerformanceESProducer_TTBARWPBTAGJBPT = cms.ESProducer("BtagPerformanceESProducer", # this is what it makes available ComponentName = cms.string('TTBARWPBTAGJBPT'), # this is where it gets the payload from PayloadName = cms.string('BTagTTBARWPBTAGJBPTtable_v8_offline'), WorkingPointName = cms.string('BTagTTBARWPBTAGJBPTwp_v8_offline') ) BtagPerformanceESProducer_TTBARWPBTAGJBPL = cms.ESProducer("BtagPerformanceESProducer", # this is what it makes available ComponentName = cms.string('TTBARWPBTAGJBPL'), # this is where it gets the payload from PayloadName = cms.string('BTagTTBARWPBTAGJBPLtable_v8_offline'), WorkingPointName = cms.string('BTagTTBARWPBTAGJBPLwp_v8_offline') ) BtagPerformanceESProducer_TTBARWPBTAGJBPM = cms.ESProducer("BtagPerformanceESProducer", # this is what it makes available ComponentName = cms.string('TTBARWPBTAGJBPM'), # this is where it gets the payload from PayloadName = cms.string('BTagTTBARWPBTAGJBPMtable_v8_offline'), WorkingPointName = cms.string('BTagTTBARWPBTAGJBPMwp_v8_offline') ) BtagPerformanceESProducer_TTBARWPBTAGJBPT = cms.ESProducer("BtagPerformanceESProducer", # this is what it makes available ComponentName = cms.string('TTBARWPBTAGJBPT'), # this is where it gets the payload from PayloadName = cms.string('BTagTTBARWPBTAGJBPTtable_v8_offline'), WorkingPointName = cms.string('BTagTTBARWPBTAGJBPTwp_v8_offline') ) BtagPerformanceESProducer_TTBARWPBTAGSSVHEM = cms.ESProducer("BtagPerformanceESProducer", # this is what it makes available ComponentName = cms.string('TTBARWPBTAGSSVHEM'), # this is where it gets the payload from PayloadName = cms.string('BTagTTBARWPBTAGSSVHEMtable_v8_offline'), WorkingPointName = cms.string('BTagTTBARWPBTAGSSVHEMwp_v8_offline') ) BtagPerformanceESProducer_TTBARWPBTAGSSVHET = cms.ESProducer("BtagPerformanceESProducer", # this is what it makes available ComponentName = cms.string('TTBARWPBTAGSSVHET'), # this is where it gets the payload from PayloadName = cms.string('BTagTTBARWPBTAGSSVHETtable_v8_offline'), WorkingPointName = cms.string('BTagTTBARWPBTAGSSVHETwp_v8_offline') ) BtagPerformanceESProducer_TTBARWPBTAGSSVHPT = cms.ESProducer("BtagPerformanceESProducer", # this is what it makes available ComponentName = cms.string('TTBARWPBTAGSSVHPT'), # this is where it gets the payload from PayloadName = cms.string('BTagTTBARWPBTAGSSVHPTtable_v8_offline'), WorkingPointName = cms.string('BTagTTBARWPBTAGSSVHPTwp_v8_offline') ) BtagPerformanceESProducer_TTBARWPBTAGTCHEL = cms.ESProducer("BtagPerformanceESProducer", # this is what it makes available ComponentName = cms.string('TTBARWPBTAGTCHEL'), # this is where it gets the payload from PayloadName = cms.string('BTagTTBARWPBTAGTCHELtable_v8_offline'), WorkingPointName = cms.string('BTagTTBARWPBTAGTCHELwp_v8_offline') ) BtagPerformanceESProducer_TTBARWPBTAGTCHEM = cms.ESProducer("BtagPerformanceESProducer", # this is what it makes available ComponentName = cms.string('TTBARWPBTAGTCHEM'), # this is where it gets the payload from PayloadName = cms.string('BTagTTBARWPBTAGTCHEMtable_v8_offline'), WorkingPointName = cms.string('BTagTTBARWPBTAGTCHEMwp_v8_offline') ) BtagPerformanceESProducer_TTBARWPBTAGTCHET = cms.ESProducer("BtagPerformanceESProducer", # this is what it makes available ComponentName = cms.string('TTBARWPBTAGTCHET'), # this is where it gets the payload from PayloadName = cms.string('BTagTTBARWPBTAGTCHETtable_v8_offline'), WorkingPointName = cms.string('BTagTTBARWPBTAGTCHETwp_v8_offline') ) BtagPerformanceESProducer_TTBARWPBTAGTCHPL = cms.ESProducer("BtagPerformanceESProducer", # this is what it makes available ComponentName = cms.string('TTBARWPBTAGTCHPL'), # this is where it gets the payload from PayloadName = cms.string('BTagTTBARWPBTAGTCHPLtable_v8_offline'), WorkingPointName = cms.string('BTagTTBARWPBTAGTCHPLwp_v8_offline') ) BtagPerformanceESProducer_TTBARWPBTAGTCHPM = cms.ESProducer("BtagPerformanceESProducer", # this is what it makes available ComponentName = cms.string('TTBARWPBTAGTCHPM'), # this is where it gets the payload from PayloadName = cms.string('BTagTTBARWPBTAGTCHPMtable_v8_offline'), WorkingPointName = cms.string('BTagTTBARWPBTAGTCHPMwp_v8_offline') ) BtagPerformanceESProducer_TTBARWPBTAGTCHPT = cms.ESProducer("BtagPerformanceESProducer", # this is what it makes available ComponentName = cms.string('TTBARWPBTAGTCHPT'), # this is where it gets the payload from PayloadName = cms.string('BTagTTBARWPBTAGTCHPTtable_v8_offline'), WorkingPointName = cms.string('BTagTTBARWPBTAGTCHPTwp_v8_offline') )
57.315436
89
0.709133
741
8,540
8.031039
0.106613
0.095278
0.134095
0.14821
0.719039
0.633339
0.633339
0.633339
0.633339
0.633339
0
0.0063
0.219321
8,540
148
90
57.702703
0.886306
0.286885
0
0.226415
0
0
0.379063
0.325054
0
0
0
0
0
1
0
false
0
0.009434
0
0.009434
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
d8c3cd712d592f39d12428d51a6c41a7fc38afb8
9,316
py
Python
qurkexp/join/pair-results.py
marcua/qurk_experiments
453c207ff50e730aefb6e1118e0f93e33babdb0b
[ "BSD-3-Clause" ]
1
2015-09-30T00:09:06.000Z
2015-09-30T00:09:06.000Z
qurkexp/join/pair-results.py
marcua/qurk_experiments
453c207ff50e730aefb6e1118e0f93e33babdb0b
[ "BSD-3-Clause" ]
null
null
null
qurkexp/join/pair-results.py
marcua/qurk_experiments
453c207ff50e730aefb6e1118e0f93e33babdb0b
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python import sys, os ROOT = os.path.abspath('%s/../..' % os.path.abspath(os.path.dirname(__file__))) sys.path.append(ROOT) os.environ['DJANGO_SETTINGS_MODULE'] = 'qurkexp.settings' from django.core.management import setup_environ from django.conf import settings from qurkexp.join.models import * from qurkexp.join.gal import getbtjoindata, getjoindata, run_gal from qurkexp.hitlayer.models import HitLayer from scipy import stats #batch = (sys.argv[1] == "batch") #num_to_compare = int(sys.argv[2]) #run_name = "joinpairs-actual-4" # match 6x6, 5 assignments each, 1 cent #run_name = "joinpairs-30-2" # match 30x30, 5 assignments each, 1 cent #run_name = "joinpairs-30-5" # match 30x30, 5 assignments each, 1 cent #run_name = "joinpairs-20-2" # match 20x20, 5 assignments each, 1 cent #run_name = "joinpairs-20-4" # match 20x20, 5 assignments each, 1 cent #run_name = "joinpairs-15-1" # match 15x15, 5 assignments each, 1 cent run_groups = [ # [False, "joinpairs-30-5",], # [False, "joinpairs-30-2",], [False, "joinpairs-30-2", "joinpairs-30-5",], # [False, "joinpairs-20-2",], # [False, "joinpairs-20-4",], # [False, "joinpairs-20-2", "joinpairs-20-4",], # [False, "joinpairs-15-1",], # [True, "30-10-naive-ordered-1",], # match 30x30, batch size 10, 5 assignments each, 1 cent # [True, "30-10-naive-ordered-20",], # match 30x30, batch size 10, 5 assignments each, 1 cent # [True, "30-10-naive-ordered-1", "30-10-naive-ordered-20",], # [True, "30-5-naive-ordered-1",], # match 30x30, batch size 5, 5 assignments each, 1 cent # [True, "30-5-naive-ordered-20",], # match 30x30, batch size 5, 5 assignments each, 1 cent # [True, "30-5-naive-ordered-1", "30-5-naive-ordered-20",], # [True, "30-3-naive-ordered-1",], # match 30x30, batch size 3, 5 assignments each, 1 cent # [True, "30-3-naive-ordered-20",], # match 30x30, batch size 3, 5 assignments each, 1 cent # [True, "30-3-naive-ordered-1", "30-3-naive-ordered-20",], # [True, "20-1-naive-ordered-3",], # match 20x20, batch size 3, 5 assignments each, 1 cent # [True, "20-1-naive-ordered-4",], # match 20x20, batch size 3, 5 assignments each, 1 cent # [True, "20-1-naive-ordered-3", "20-1-naive-ordered-4",], # [True, "20-1-naive-ordered-1-ACTUALLYSMART",], # match 20x20, batch size 1, 5 assignments each, 1 cent # [True, "20-1-naive-ordered-2-ACTUALLYSMART",], # match 20x20, batch size 1, 5 assignments each, 1 cent # [True, "20-1-naive-ordered-1-ACTUALLYSMART", "20-1-naive-ordered-2-ACTUALLYSMART",], # [True, "8-2-smart-ordered-1",], # match 8x8, batch size 2, 5 assignments each, 1 cent (bad join interface taint?) # [True, "30-5-smart-ordered-1",], # match 30x30, batch size 5, 5 assignments each, 1 cent (bad join interface taint?) # "30-2-smart-ordered-1", # match 30x30, batch size 2, 5 assignments each, 1 cent (bad join interface taint?) # "20-1-smart-ordered-1", # match 20x20, batch size 1, 5 assignments each, 1 cent (bad join interface taint?) # [True, "30-3-smart-ordered-1",], # match 30x30, batch size 3, 5 assignments each, 1 cent (fixed UI taint for IE8) # [True, "30-3-smart-ordered-2",], # match 30x30, batch size 3, 5 assignments each, 1 cent (fixed UI taint for IE8) # [True, "30-3-smart-ordered-1", "30-3-smart-ordered-2"], # [True, "20-1-smart-ordered-3",], # match 20x20, batch size 1, 5 assignments each, 1 cent (fixed UI taint for IE8) # [True, "30-2-smart-ordered-2",], # match 30x30, batch size 2, 5 assignments each, 1 cent (fixed UI taint for IE8) # [True, "30-2-smart-ordered-3",], # match 30x30, batch size 2, 5 assignments each, 1 cent (fixed UI taint for IE8) # [True, "30-2-smart-ordered-2", "30-2-smart-ordered-3"], ] def update_matches(ismatch, foundtrue, fn, fp, tn, tp): if ismatch and foundtrue: tp += 1 elif ismatch and not foundtrue: fn += 1 # fn_group.append("%d_%d %d %d" % (left, right, true_count, false_count)) elif not ismatch and foundtrue: fp += 1 # fp_group.append("%d_%d %d %d" % (left, right, true_count, false_count)) elif not ismatch and not foundtrue: tn += 1 return (fn, fp, tn, tp) def main(batch, run_names): if batch: pairs = BPPair.objects.filter(bpbatch__experiment__run_name__in = run_names) else: pairs = Pair.objects.filter(run_name__in = run_names) # if num_to_compare > 0: # pairs = pairs.filter(left__lte = num_to_compare).filter(right__lte = num_to_compare) print "num pairs", pairs.count() #print "Turker histogram" turkers = {} for pair in pairs: if batch: resps = pair.bprespans_set.all() else: resps = pair.pairresp_set.all() for resp in resps: if batch: wid = resp.bprm.wid else: wid = resp.wid if wid not in turkers: turkers[wid] = [0.0,0.0,[],[]] # weight = num_to_compare if (pair.left == pair.right) else 1 # weight = 30 if (pair.left == pair.right) else 1 turkers[wid][0] += 1#weight#1 turkers[wid][1] += (1 if ((pair.left == pair.right) == resp.same) else 0)#*weight turkers[wid][2].append(resp.same) if batch: worker_resps = BPRespAns.objects.filter(bprm__wid__in = turkers.keys()).filter(bprm__batch__experiment__run_name__in = run_names).order_by('bprm__submit_time') else: worker_resps = PairResp.objects.filter(wid__in = turkers.keys()).filter(pair__run_name__in = run_names).order_by('submit_time') for resp in worker_resps: if batch: arr = turkers[resp.bprm.wid][3] else: arr = turkers[resp.wid][3] actualres = resp.pair.left == resp.pair.right if actualres: if resp.same == actualres: arr.append('a') else: arr.append('_') else: if resp.same == actualres: arr.append('b') else: arr.append('-') lturkers = list(turkers.items()) lturkers.sort(lambda x,y: x[1][0] < y[1][0] and -1 or 1) for k,v in lturkers: #print k,v[0],v[1]/v[0], "".join(v[3]) pass#print '%5f, %d, %s' % ((v[1]/v[0]), len(v[3]), "".join(v[3])) xs = [v[1]/v[0] for k,v in lturkers] print "len xs", len(xs) ys = [len(v[3]) for k,v in lturkers] print "len ys", len(ys) (s, i, r, p, std) = stats.linregress(ys,xs) print "regression---slope %f, intercept %f, R^2 %f, p %f" % (s, i, r*r, p) #print "Accuracy printout" pair_counts = {} for pair in pairs: counts = pair_counts.get((pair.left, pair.right), {}) if batch: resps = pair.bprespans_set.all() else: resps = pair.pairresp_set.all() for resp in resps: if batch: wid = resp.bprm.wid else: wid = resp.wid # if turkers[wid][1] / turkers[wid][0] < 0.6: # continue if resp.same: counts["true_count"] = counts.get("true_count", 0) + 1 else: counts["false_count"] = counts.get("false_count", 0) + 1 pair_counts[(pair.left, pair.right)] = counts if batch: data = getbtjoindata(run_names) exptype = "btjoin" else: data = getjoindata(run_names) exptype = "join" gal_w, gal_res = run_gal(exptype, data) gal_res = dict(gal_res) # fp_group = [] # fn_group = [] (fn_mv, fp_mv, tn_mv, tp_mv, fn_g, fp_g, tn_g, tp_g) = [0.0]*8 # tc_count = 0 for (left, right), counts in pair_counts.items(): (true_count, false_count) = (counts.get("true_count", 0), counts.get("false_count", 0)) foundtrue_mv = true_count > false_count gal_dict = dict(gal_res["%d_%d" % (left, right)]) if gal_dict["True"] > .8: foundtrue_g = True else: foundtrue_g = False ismatch = (left == right) # if ismatch: # tc_count += counts.get("true_count", 0) (fn_mv, fp_mv, tn_mv, tp_mv) = update_matches(ismatch, foundtrue_mv, fn_mv, fp_mv, tn_mv, tp_mv) (fn_g, fp_g, tn_g, tp_g) = update_matches(ismatch, foundtrue_g, fn_g, fp_g, tn_g, tp_g) # if not(ismatch and false_count == 0) and not(not ismatch and true_count == 0) and ismatch != foundtrue: # print left, right, ismatch, true_count, false_count, foundtrue # print "true pos, true neg, false pos, false neg" print "%s\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d" % ("+".join(run_names), fn_mv, fp_mv, tn_mv, tp_mv, fn_g, fp_g, tn_g, tp_g) print "tc_count", tc_count # fp_group.sort() # fn_group.sort() #print "fp_group", fp_group #print "fn_group", fn_group if __name__ == "__main__": print "runs\tfn_mv\tfp_mv\ttn_mv\ttp_mv\tfn_g\tfp_g\ttn_g\ttp_g" for runs in run_groups: main(runs[0], runs[1:])
46.348259
167
0.584908
1,387
9,316
3.794521
0.129056
0.057002
0.076002
0.080752
0.545886
0.480144
0.424093
0.380581
0.375261
0.362721
0
0.061132
0.264277
9,316
200
168
46.58
0.706741
0.484006
0
0.29661
0
0.008475
0.077429
0.023759
0
0
0
0
0
0
null
null
0.008475
0.059322
null
null
0.059322
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
d8c54062ff8343ff9ddf5caf6e861fe863f75af6
8,057
py
Python
plasmapy/utils/tests/test_checks.py
ludoro/PlasmaPy
69712cb40b8b588400301edfd6925d41d2f13eac
[ "BSD-2-Clause-Patent", "BSD-3-Clause" ]
1
2020-04-28T23:04:41.000Z
2020-04-28T23:04:41.000Z
plasmapy/utils/tests/test_checks.py
ludoro/PlasmaPy
69712cb40b8b588400301edfd6925d41d2f13eac
[ "BSD-2-Clause-Patent", "BSD-3-Clause" ]
null
null
null
plasmapy/utils/tests/test_checks.py
ludoro/PlasmaPy
69712cb40b8b588400301edfd6925d41d2f13eac
[ "BSD-2-Clause-Patent", "BSD-3-Clause" ]
null
null
null
"""Tests for methods relating to quantities.""" import numpy as np from astropy import units as u import pytest from ...constants import c from ..checks import ( _check_quantity, _check_relativistic, check_relativistic, check_quantity ) # (value, units, error) quantity_error_examples_default = [ # exceptions associated with the units keyword (5*u.T, 5*u.T, TypeError), (5*u.T, 5, TypeError), (5*u.T, [u.T, 1], TypeError), (5*u.T, [1, u.m], TypeError), (u.T, u.J, TypeError), (5.0, u.m, UserWarning), (3*u.m/u.s, u.m, u.UnitConversionError), (5j*u.K, u.K, ValueError), ] # (value, units, can_be_negative, can_be_complex, can_be_inf, error) quantity_error_examples_non_default = [ (-5*u.K, u.K, False, False, True, ValueError), (np.inf*u.K, u.K, True, False, False, ValueError) ] # (value, units) quantity_valid_examples_default = [ # check basic functionality (5*u.T, u.T), (3*u.m/u.s, u.m/u.s), (3*u.m/u.s, [u.m/u.s]), (3*u.m/u.s**2, [u.m/u.s, u.m/(u.s**2)]), (3*u.km/u.yr, u.m/u.s), # check temperature in units of energy per particle (e.g., eV) (5*u.eV, u.K), (5*u.K, u.eV), (5*u.keV, [u.m, u.K]), # check keywords relating to numerical values (np.inf*u.T, u.T) ] # (value, units, can_be_negative, can_be_complex, can_be_inf) quantity_valid_examples_non_default = [ (5j*u.m, u.m, True, True, True) ] # Tests for _check_quantity @pytest.mark.parametrize( "value, units, can_be_negative, can_be_complex, can_be_inf, error", quantity_error_examples_non_default) def test__check_quantity_errors_non_default( value, units, can_be_negative, can_be_complex, can_be_inf, error): with pytest.raises(error): _check_quantity(value, 'arg', 'funcname', units, can_be_negative=can_be_negative, can_be_complex=can_be_complex, can_be_inf=can_be_inf) @pytest.mark.parametrize( "value, units, error", quantity_error_examples_default) def test__check_quantity_errors_default(value, units, error): with pytest.raises(error): _check_quantity(value, 'arg', 'funcname', units) @pytest.mark.parametrize( "value, units, can_be_negative, can_be_complex, can_be_inf", quantity_valid_examples_non_default) def test__check_quantity_non_default( value, units, can_be_negative, can_be_complex, can_be_inf): _check_quantity(value, 'arg', 'funcname', units, can_be_negative=can_be_negative, can_be_complex=can_be_complex, can_be_inf=can_be_inf) @pytest.mark.parametrize("value, units", quantity_valid_examples_default) def test__check_quantity_default(value, units): _check_quantity(value, 'arg', 'funcname', units) # Tests for check_quantity decorator @pytest.mark.parametrize( "value, units, error", quantity_error_examples_default) def test_check_quantity_decorator_errors_default(value, units, error): @check_quantity({ "x": {"units": units} }) def func(x): return x with pytest.raises(error): func(value) @pytest.mark.parametrize( "value, units, can_be_negative, can_be_complex, can_be_inf, error", quantity_error_examples_non_default) def test_check_quantity_decorator_errors_non_default( value, units, can_be_negative, can_be_complex, can_be_inf, error): @check_quantity({ "x": {"units": units, "can_be_negative": can_be_negative, "can_be_complex": can_be_complex, "can_be_inf": can_be_inf} }) def func(x): return x with pytest.raises(error): func(value) @pytest.mark.parametrize("value, units", quantity_valid_examples_default) def test_check_quantity_decorator_default(value, units): @check_quantity({ "x": {"units": units} }) def func(x): return x func(value) @pytest.mark.parametrize( "value, units, can_be_negative, can_be_complex, can_be_inf", quantity_valid_examples_non_default) def test_check_quantity_decorator_non_default( value, units, can_be_negative, can_be_complex, can_be_inf): @check_quantity({ "x": {"units": units, "can_be_negative": can_be_negative, "can_be_complex": can_be_complex, "can_be_inf": can_be_inf} }) def func(x): return x func(value) def test_check_quantity_decorator_missing_validated_params(): @check_quantity({ "x": {"units": u.m}, "y": {"units": u.s} }) def func(x): return x with pytest.raises(TypeError) as e: func(1*u.m) assert "Call to func is missing validated params y" == str(e.value) def test_check_quantity_decorator_two_args_default(): @check_quantity({ "x": {"units": u.m}, "y": {"units": u.s} }) def func(x, y): return x/y func(1*u.m, 1*u.s) def test_check_quantity_decorator_two_args_not_default(): @check_quantity({ "x": {"units": u.m, "can_be_negative": False}, "y": {"units": u.s} }) def func(x, y): return x/y with pytest.raises(ValueError): func(-1*u.m, 2*u.s) def test_check_quantity_decorator_two_args_one_kwargs_default(): @check_quantity({ "x": {"units": u.m}, "y": {"units": u.s}, "z": {"units": u.eV} }) def func(x, y, another, z=10*u.eV): return x*y*z func(1*u.m, 1*u.s, 10*u.T) def test_check_quantity_decorator_two_args_one_kwargs_not_default(): @check_quantity({ "x": {"units": u.m}, "y": {"units": u.s, "can_be_negative": False}, "z": {"units": u.eV, "can_be_inf": False} }) def func(x, y, z=10*u.eV): return x*y*z with pytest.raises(ValueError): func(1*u.m, 1*u.s, z=np.inf*u.eV) # (speed, betafrac) non_relativistic_speed_examples = [ (0*u.m/u.s, 0.1), (0.099999*c, 0.1), (-0.09*c, 0.1), (5*u.AA/u.Gyr, 0.1) ] # (speed, betafrac, error) relativisitc_error_examples = [ (0.11*c, 0.1, UserWarning), (1.0*c, 0.1, UserWarning), (1.1*c, 0.1, UserWarning), (np.inf*u.cm/u.s, 0.1, UserWarning), (-0.11*c, 0.1, UserWarning), (-1.0*c, 0.1, UserWarning), (-1.1*c, 0.1, UserWarning), (-np.inf*u.cm/u.s, 0.1, UserWarning), (2997924581*u.cm/u.s, 0.1, UserWarning), (0.02*c, 0.01, UserWarning), (u.m/u.s, 0.1, TypeError), (51513.35, 0.1, TypeError), (5*u.m, 0.1, u.UnitConversionError), (np.nan*u.m/u.s, 0.1, ValueError) ] # Tests for _check_relativistic @pytest.mark.parametrize("speed, betafrac", non_relativistic_speed_examples) def test__check_relativisitc_valid(speed, betafrac): _check_relativistic(speed, 'f', betafrac=betafrac) @pytest.mark.parametrize("speed, betafrac, error", relativisitc_error_examples) def test__check_relativistic_errors(speed, betafrac, error): with pytest.raises(error): _check_relativistic(speed, 'f', betafrac=betafrac) # Tests for check_relativistic decorator @pytest.mark.parametrize("speed, betafrac", non_relativistic_speed_examples) def test_check_relativistic_decorator(speed, betafrac): @check_relativistic(betafrac=betafrac) def speed_func(): return speed speed_func() @pytest.mark.parametrize( "speed", [item[0] for item in non_relativistic_speed_examples]) def test_check_relativistic_decorator_no_args(speed): @check_relativistic def speed_func(): return speed speed_func() @pytest.mark.parametrize( "speed", [item[0] for item in non_relativistic_speed_examples]) def test_check_relativistic_decorator_no_args_parentheses(speed): @check_relativistic() def speed_func(): return speed speed_func() @pytest.mark.parametrize("speed, betafrac, error", relativisitc_error_examples) def test_check_relativistic_decorator_errors(speed, betafrac, error): @check_relativistic(betafrac=betafrac) def speed_func(): return speed with pytest.raises(error): speed_func()
26.767442
79
0.655827
1,183
8,057
4.19273
0.099746
0.05746
0.052419
0.058065
0.775
0.758871
0.691935
0.665121
0.623589
0.575202
0
0.019467
0.203053
8,057
300
80
26.856667
0.752998
0.068884
0
0.557143
0
0
0.092209
0
0
0
0
0
0.004762
1
0.152381
false
0
0.02381
0.061905
0.238095
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
d8d163fba6f3d79bbb87bf34a4ae8f44106e768f
324
py
Python
Python3/Tornado/apps/pg/PG_Deposit/test/test_request.py
youngqqcn/QBlockChainNotes
85122049024dc5555705bf016312491a51966621
[ "MIT" ]
24
2018-11-01T03:36:43.000Z
2022-03-28T08:20:30.000Z
Python3/Tornado/apps/pg/PG_Deposit/test/test_request.py
songning4/QBlockChainNotes
d65ede073f5a20f728f41cc6850409693820cdb1
[ "MIT" ]
57
2019-12-04T08:26:47.000Z
2022-03-08T07:35:15.000Z
Python3/Tornado/apps/pg/PG_Deposit/test/test_request.py
youngqqcn/QBlockChainNotes
85122049024dc5555705bf016312491a51966621
[ "MIT" ]
11
2019-01-04T08:41:57.000Z
2022-03-16T03:51:36.000Z
#!coding:utf8 #author:yqq #date:2020/8/14 0014 19:26 #description: import requests def main(): url = 'http://htdf2020-test01.orientwalt.cn:1317/block_detail/1009408' r = requests.get(url=url) r.encoding = 'utf8' print(r.text) pass if __name__ == '__main__': main()
12.96
75
0.595679
42
324
4.380952
0.809524
0
0
0
0
0
0
0
0
0
0
0.142259
0.262346
324
25
76
12.96
0.627615
0.182099
0
0
0
0
0.310924
0
0
0
0
0
0
1
0.111111
false
0.111111
0.111111
0
0.222222
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
d8d4111b5c5d9efbe753fdccc4f3bae97104b8fb
401
py
Python
test_project/test_project/settings_pytest.py
mpasternak/django-reciprocity
2bffe1ae6025675ae96bb9420e1f69cf48b414c6
[ "MIT" ]
1
2019-12-09T11:23:51.000Z
2019-12-09T11:23:51.000Z
test_project/test_project/settings_pytest.py
mpasternak/django-reciprocity
2bffe1ae6025675ae96bb9420e1f69cf48b414c6
[ "MIT" ]
7
2019-03-01T18:13:40.000Z
2022-02-12T14:44:51.000Z
test_project/test_project/settings_pytest.py
mpasternak/django-reciprocity
2bffe1ae6025675ae96bb9420e1f69cf48b414c6
[ "MIT" ]
null
null
null
# Settings for testing with included docker-compose and pytest from .settings import * # noqa # Subscribe from remote selenium container to docker-compose nginx container NGINX_PUSH_STREAM_PUB_HOST = "localhost" NGINX_PUSH_STREAM_PUB_PORT = "9080" # Subscribe from local TravisCI machine to docker-compose nginx container NGINX_PUSH_STREAM_SUB_HOST = "webserver" NGINX_PUSH_STREAM_SUB_PORT = "80"
33.416667
76
0.820449
57
401
5.491228
0.54386
0.115016
0.191693
0.127796
0.28115
0.28115
0.28115
0.28115
0
0
0
0.017143
0.127182
401
11
77
36.454545
0.877143
0.528678
0
0
0
0
0.130435
0
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
d8d857f5c61e30e7fbb6c701c98c28a407e889a3
2,184
py
Python
UnitGenerServer.py
SachithS/UnitGener
165912afd050e6bc20ac988291c3311e4d351c8f
[ "MIT" ]
null
null
null
UnitGenerServer.py
SachithS/UnitGener
165912afd050e6bc20ac988291c3311e4d351c8f
[ "MIT" ]
null
null
null
UnitGenerServer.py
SachithS/UnitGener
165912afd050e6bc20ac988291c3311e4d351c8f
[ "MIT" ]
null
null
null
""" UnitGenerCore.py - Server of the UnitGener This file is responsible for creating the routes and the server of the UnitGener core module. Will create all the needed routes with params and initiate the server. @author Sachith Senarathne @version 1.0 @maintainer Sachith Senarathne @copyright Copyright 2017, The UnitGener Project @license MIT @version 1.0 @email sachith.senarathnes@gmail.com @status Development """ from flask import Flask, Response from flask import request from pystruct.learners import FrankWolfeSSVM from pystruct.models import GraphCRF import urllib from tokenizer import FunctionTokenizer as fT from crfmodels import CRFPredictor as crf from crfmodels import AssertionPredictor as ap app = Flask(__name__) tokenizer = fT.FunctionTokenizer() crfpredictor = crf.CRFPredictor() assert_pre = ap.AssertionPredictor() model = GraphCRF(directed=True, inference_method="max-product") ssvm = FrankWolfeSSVM(model=model, C=.1, max_iter=10) @app.route('/status') def unitgener_status(): print ssvm return "Hello form UnitGener" @app.route('/generate', methods=['POST']) def get_unit_generated(): print request.data _js_function = urllib.unquote_plus(urllib.unquote_plus(request.data)) print _js_function # processed_function = process_function(_js_function) line_f = _js_function.replace('/n', " ") raw_tokens = tokenizer.init_processing_function(line_f) tr_sets = crfpredictor.generate_type1_prediction(raw_tokens) r_assert = ssvm.predict(tr_sets[0][0:1]) unit_test = assert_pre.unit_test_assembler(r_assert, raw_tokens, 2) response = Response(str(unit_test)) response.headers["content-type"] = "text/plain" return response if __name__ == '__main__': result = tokenizer.read_process_file() train_sets = crfpredictor.generate_type1_prediction(result) ssvm.fit(train_sets[0], train_sets[1]) result_assert = ssvm.predict(train_sets[0][0:1]) test = assert_pre.unit_test_assembler(result_assert, result, 2) for f in test: print f print result_assert app.run() def process_function(_js_function): pass
28.736842
96
0.742216
288
2,184
5.402778
0.440972
0.032134
0.014139
0.025707
0.088689
0.03856
0
0
0
0
0
0.012686
0.169872
2,184
75
97
29.12
0.84556
0.023352
0
0
0
0
0.050663
0
0
0
0
0
0.162791
0
null
null
0.023256
0.186047
null
null
0.116279
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
d8d8d2b359e3e55295ef066e6f3edd2c7fb3f2d5
288
py
Python
setup.py
Califrais/lights
cb4e3a0cbe64606f071ed02c06a9fc5db2681c1d
[ "MIT" ]
6
2021-01-15T14:23:33.000Z
2022-02-01T12:25:24.000Z
setup.py
Califrais/lights
cb4e3a0cbe64606f071ed02c06a9fc5db2681c1d
[ "MIT" ]
11
2020-12-18T13:16:34.000Z
2021-11-02T08:27:02.000Z
setup.py
Califrais/lights
cb4e3a0cbe64606f071ed02c06a9fc5db2681c1d
[ "MIT" ]
1
2021-08-12T23:07:07.000Z
2021-08-12T23:07:07.000Z
from setuptools import setup setup( name='lights', version='0.1', author="Van-Tuan Nguyen", description="ligths is a generalized joint model for high-dimensional multivariate longitudinal data and censored durations", url="https://github.com/Califrais/lights", )
28.8
129
0.71875
36
288
5.75
0.944444
0
0
0
0
0
0
0
0
0
0
0.008403
0.173611
288
10
130
28.8
0.861345
0
0
0
0
0
0.584775
0
0
0
0
0
0
1
0
true
0
0.125
0
0.125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
d8da595c175dc5992171e47a8fd68be6e3fb96a1
6,909
py
Python
pycg/machinery/imports.py
WenJinfeng/PyCG
b45e8e04fe697d8301cf27222a8f37646d69f168
[ "Apache-2.0" ]
121
2020-12-16T20:31:37.000Z
2022-03-21T20:32:43.000Z
pycg/machinery/imports.py
WenJinfeng/PyCG
b45e8e04fe697d8301cf27222a8f37646d69f168
[ "Apache-2.0" ]
24
2021-03-13T00:04:00.000Z
2022-03-21T17:28:11.000Z
pycg/machinery/imports.py
WenJinfeng/PyCG
b45e8e04fe697d8301cf27222a8f37646d69f168
[ "Apache-2.0" ]
19
2021-03-23T10:58:47.000Z
2022-03-24T19:46:50.000Z
# # Copyright (c) 2020 Vitalis Salis. # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import sys import ast import os import importlib import copy from pycg import utils def get_custom_loader(ig_obj): """ Closure which returns a custom loader that modifies an ImportManager object """ class CustomLoader(importlib.abc.SourceLoader): def __init__(self, fullname, path): self.fullname = fullname self.path = path ig_obj.create_edge(self.fullname) if not ig_obj.get_node(self.fullname): ig_obj.create_node(self.fullname) ig_obj.set_filepath(self.fullname, self.path) def get_filename(self, fullname): return self.path def get_data(self, filename): return "" return CustomLoader class ImportManager(object): def __init__(self): self.import_graph = dict() self.current_module = "" self.input_file = "" self.mod_dir = None self.old_path_hooks = None self.old_path = None def set_pkg(self, input_pkg): self.mod_dir = input_pkg def get_mod_dir(self): return self.mod_dir def get_node(self, name): if name in self.import_graph: return self.import_graph[name] def create_node(self, name): if not name or not isinstance(name, str): raise ImportManagerError("Invalid node name") if self.get_node(name): raise ImportManagerError("Can't create a node a second time") self.import_graph[name] = {"filename": "", "imports": set()} return self.import_graph[name] def create_edge(self, dest): if not dest or not isinstance(dest, str): raise ImportManagerError("Invalid node name") node = self.get_node(self._get_module_path()) if not node: raise ImportManagerError("Can't add edge to a non existing node") node["imports"].add(dest) def _clear_caches(self): importlib.invalidate_caches() sys.path_importer_cache.clear() # TODO: maybe not do that since it empties the whole cache for name in self.import_graph: if name in sys.modules: del sys.modules[name] def _get_module_path(self): return self.current_module def set_current_mod(self, name, fname): self.current_module = name self.input_file = os.path.abspath(fname) def get_filepath(self, modname): if modname in self.import_graph: return self.import_graph[modname]["filename"] def set_filepath(self, node_name, filename): if not filename or not isinstance(filename, str): raise ImportManagerError("Invalid node name") node = self.get_node(node_name) if not node: raise ImportManagerError("Node does not exist") node["filename"] = os.path.abspath(filename) def get_imports(self, modname): if not modname in self.import_graph: return [] return self.import_graph[modname]["imports"] def _is_init_file(self): return self.input_file.endswith("__init__.py") def _handle_import_level(self, name, level): # add a dot for each level package = self._get_module_path().split(".") if level > len(package): raise ImportError("Attempting import beyond top level package") mod_name = ("." * level) + name # When an __init__ file is analyzed, then the module name doesn't contain # the __init__ part in it, so special care must be taken for levels. if self._is_init_file() and level >= 1: if level != 1: level -= 1 package = package[:-level] else: package = package[:-level] return mod_name, ".".join(package) def _do_import(self, mod_name, package): if mod_name in sys.modules: self.create_edge(mod_name) return sys.modules[mod_name] return importlib.import_module(mod_name, package=package) def handle_import(self, name, level): # We currently don't support builtin modules because they're frozen. # Add an edge and continue. # TODO: identify a way to include frozen modules root = name.split(".")[0] if root in sys.builtin_module_names: self.create_edge(root) return # Import the module try: mod_name, package = self._handle_import_level(name, level) except ImportError: return parent = ".".join(mod_name.split(".")[:-1]) parent_name = ".".join(name.split(".")[:-1]) combos = [(mod_name, package), (parent, package), (utils.join_ns(package, name), ""), (utils.join_ns(package, parent_name), "")] mod = None for mn, pkg in combos: try: mod = self._do_import(mn, pkg) break except: continue if not mod: return if not hasattr(mod, "__file__") or not mod.__file__: return if self.mod_dir not in mod.__file__: return fname = mod.__file__ if fname.endswith("__init__.py"): fname = os.path.split(fname)[0] return utils.to_mod_name( os.path.relpath(fname, self.mod_dir)) def get_import_graph(self): return self.import_graph def install_hooks(self): loader = get_custom_loader(self) self.old_path_hooks = copy.deepcopy(sys.path_hooks) self.old_path = copy.deepcopy(sys.path) loader_details = loader, importlib.machinery.all_suffixes() sys.path_hooks.insert(0, importlib.machinery.FileFinder.path_hook(loader_details)) sys.path.insert(0, os.path.abspath(self.mod_dir)) self._clear_caches() def remove_hooks(self): sys.path_hooks = self.old_path_hooks sys.path = self.old_path self._clear_caches() class ImportManagerError(Exception): pass
31.262443
90
0.625561
887
6,909
4.678692
0.251409
0.031807
0.039759
0.025301
0.134217
0.086506
0.056627
0.045301
0.026988
0.026988
0
0.003439
0.284412
6,909
220
91
31.404545
0.835963
0.179766
0
0.126761
0
0
0.047153
0
0
0
0
0.004545
0
1
0.161972
false
0.007042
0.288732
0.042254
0.619718
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
d8e3d77e68c95fabda4facfab3e43eb49416a652
923
py
Python
mit_d3m/db.py
micahjsmith/mit-d3m
a8138b2cc6329545bf3204f47cb39fe8faf7a44f
[ "MIT" ]
6
2018-11-26T09:48:49.000Z
2019-06-15T15:49:00.000Z
mit_d3m/db.py
micahjsmith/mit-d3m
a8138b2cc6329545bf3204f47cb39fe8faf7a44f
[ "MIT" ]
12
2019-01-21T19:07:33.000Z
2020-05-24T19:06:37.000Z
mit_d3m/db.py
micahjsmith/mit-d3m
a8138b2cc6329545bf3204f47cb39fe8faf7a44f
[ "MIT" ]
6
2018-11-26T09:48:52.000Z
2020-02-20T11:46:54.000Z
# -*- coding: utf-8 -*- import getpass import json import logging from pymongo import MongoClient LOGGER = logging.getLogger(__name__) def get_db(database=None, config=None, **kwargs): if config: with open(config, 'r') as f: config = json.load(f) else: config = kwargs host = config.get('host', 'localhost') port = config.get('port', 27017) user = config.get('user') password = config.get('password') database = database or config.get('database', 'test') auth_database = config.get('auth_database', 'admin') if user and not password: password = getpass.getpass(prompt='Please insert database password: ') client = MongoClient( host=host, port=port, username=user, password=password, authSource=auth_database ) LOGGER.info("Setting up a MongoClient %s", client) return client[database]
23.075
78
0.633803
109
923
5.293578
0.477064
0.093588
0
0
0
0
0
0
0
0
0
0.008621
0.245937
923
39
79
23.666667
0.820402
0.022752
0
0
0
0
0.133333
0
0
0
0
0
0
1
0.035714
false
0.178571
0.142857
0
0.214286
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
d8e3de05ad69c6353ea7726191b1fe77d3b073d2
53,019
py
Python
src/StandAlone/inputs/MPM/Arenisca/Arenisca3/AreniscaTestSuite_PostProc.py
abagusetty/Uintah
fa1bf819664fa6f09c5a7cd076870a40816d35c9
[ "MIT" ]
3
2020-06-10T08:21:31.000Z
2020-06-23T18:33:16.000Z
src/StandAlone/inputs/MPM/Arenisca/Arenisca3/AreniscaTestSuite_PostProc.py
abagusetty/Uintah
fa1bf819664fa6f09c5a7cd076870a40816d35c9
[ "MIT" ]
null
null
null
src/StandAlone/inputs/MPM/Arenisca/Arenisca3/AreniscaTestSuite_PostProc.py
abagusetty/Uintah
fa1bf819664fa6f09c5a7cd076870a40816d35c9
[ "MIT" ]
2
2019-12-30T05:48:30.000Z
2020-02-12T16:24:16.000Z
#! /usr/bin/env python # -*- coding: utf-8 -*- import os import math import tempfile import numpy as np import subprocess as sub_proc #Plotting stuff below from matplotlib import rc import matplotlib.pyplot as plt from matplotlib import ticker SHOW_ON_MAKE = False #Useful constants sqrtThree = np.sqrt(3.0) twoThirds = 2.0/3.0 threeHalves = 3.0/2.0 #Set matplotlib defaults to desired values #Set the legend to best fit fontSize = 16 markers = None plt.rcParams['legend.loc']='best' #Set font size plt.rcParams['mathtext.it'] = 'serif:bold' plt.rcParams['mathtext.rm'] = 'serif:bold' plt.rcParams['mathtext.sf'] = 'serif:bold' plt.rcParams['font.size']=fontSize plt.rcParams['font.weight']='bold' plt.rcParams['axes.labelsize']='medium' #plt.rcParams['axes.labelweight']='bold' plt.rcParams['legend.fontsize']='medium' #Set linewidth lineWidth = 2 plt.rcParams['lines.linewidth']=lineWidth #Set markersize plt.rcParams['lines.markersize'] = 8 #Set padding for tick labels and size plt.rcParams['xtick.major.pad'] = 12 plt.rcParams['ytick.major.pad'] = 8 plt.rcParams['xtick.major.size'] = 6 plt.rcParams['xtick.minor.size'] = 3 plt.rcParams['ytick.major.size'] = 6 plt.rcParams['ytick.minor.size'] = 3 #resolution plt.rcParams['figure.dpi']=120 font = {'family' : 'serif', 'weight' : 'bold', 'size' : fontSize} rc('font', **font) rc('text', usetex=True) def savePNG(name,size='1920x1080'): res = float(plt.rcParams['figure.dpi']) #Add Check for file already existing as name.png if size == '640x480': size = [640/res,480/res] if size == '1080x768': size = [1080/res,768/res] if size == '1152x768': size = [1152/res,768/res] if size == '1280x854': size = [1280/res,854/res] if size == '1280x960': size = [1280/res,960/res] if size == '1920x1080': size = [1920/res,1080/res] #set the figure size for saving plt.gcf().set_size_inches(size[0],size[1]) #save at speciified resolution plt.savefig(name+'.png', bbox_inches=0, dpi=plt.rcParams['figure.dpi']) def str_to_mathbf(string): #Only works with single spaces no leading space string = string.split() return_string = '' for elem in string: elem = r'$\mathbf{'+elem+'}$' return_string+=elem+' ' return return_string[0:-1] def sign(x,y): if y>=0: return abs(x) else: return -abs(x) def sigma_iso(sigma): return (np.trace(sigma)/3.0)*np.eye(3) def sigma_dev(sigma): return sigma-sigma_iso(sigma) def sigma_I1(sigma): return sigma.trace() def sigma_J2(sigma): return 0.5*np.dot(sigma_dev(sigma),sigma_dev(sigma)).trace() def sigma_J3(sigma): return (1/3.0)*np.dot(np.dot(sigma_dev(sigma),sigma_dev(sigma)),sigma_dev(sigma)).trace() def sigma_mag(sigma): #Returns the magnitude of a second-rank tensor #return np.linalg.norm(sigma) return np.sqrt(DblDot(sigma,sigma)) def DblDot(x,y):#Returns the double inner product of two second-rank tensors val=0 for i in range(0,3): for j in range(0,3): val=val+(x[i][j]*y[i][j]) return val def sigma_tau(sigma): #return sign(np.sqrt(sigma_J2(sigma)),sigma_J3(sigma)) return sign(np.sqrt(sigma_J2(sigma)),sigma_J3(sigma)) def get_ps_and_qs(sigmas): ps = [] qs = [] for sigma in sigmas: qs.append(sign(sqrtThree*np.sqrt(sigma_J2(sigma)),sigma_J3(sigma))) ps.append(sigma_I1(sigma)/3.0) return ps,qs def get_pStress(uda_path): NAN_FAIL = False #Extract stress history print "Extracting stress history..." args = ["partextract","-partvar","p.stress",uda_path] F_stress = tempfile.TemporaryFile() #F_stress = open("./tempStressFileOut.txt","w+") #open(os.path.split(uda_path)[0]+'/stressHistory.dat',"w+") tmp = sub_proc.Popen(args,stdout=F_stress,stderr=sub_proc.PIPE) dummy = tmp.wait() print('Done.') #Read file back in F_stress.seek(0) times = [] sigmas = [] for line in F_stress: line = line.strip().split() times.append(float(line[0])) S11 = np.float64(line[4]) S12 = np.float64(line[5]) S13 = np.float64(line[6]) S21 = np.float64(line[7]) S22 = np.float64(line[8]) S23 = np.float64(line[9]) S31 = np.float64(line[10]) S32 = np.float64(line[11]) S33 = np.float64(line[12]) sigmas.append(np.array([[S11,S12,S13],[S21,S22,S23],[S31,S32,S33]])) for i in range(3): for j in range(3): if np.isnan(sigmas[-1][i][j]): NAN_FAIL = True F_stress.close() if NAN_FAIL: print "\nERROR: 'nan's found reading in stress. Will not plot correctly" return times,sigmas def get_pDeformationMeasure(uda_path): NAN_FAIL = False #Extract stress history print "Extracting deformation history..." args = ["partextract","-partvar","p.deformationMeasure",uda_path] F_defMes = tempfile.TemporaryFile() #open(os.path.split(uda_path)[0]+'/stressHistory.dat',"w+") tmp = sub_proc.Popen(args,stdout=F_defMes,stderr=sub_proc.PIPE) dummy = tmp.wait() print('Done.') #Read file back in F_defMes.seek(0) times = [] Fs = [] for line in F_defMes: line = line.strip().split() times.append(float(line[0])) F11 = np.float64(line[4]) F12 = np.float64(line[5]) F13 = np.float64(line[6]) F21 = np.float64(line[7]) F22 = np.float64(line[8]) F23 = np.float64(line[9]) F31 = np.float64(line[10]) F32 = np.float64(line[11]) F33 = np.float64(line[12]) Fs.append(np.array([[F11,F12,F13],[F21,F22,F23],[F31,F32,F33]])) for i in range(3): for j in range(3): if np.isnan(Fs[-1][i][j]): NAN_FAIL = True F_defMes.close() if NAN_FAIL: print "\nERROR: 'nan's found reading in stress. Will not plot correctly" return times,Fs def get_epsilons(uda_path): #Assumes no shear strains times,Fs = get_pDeformationMeasure(uda_path) epsils = [] for F in Fs: epsils.append(np.array([[np.log(F[0][0]),0,0],[0,np.log(F[1][1]),0],[0,0,np.log(F[2][2])]])) return times,epsils def get_pKappa(uda_path): #Extract stress history print "Extracting kappa history..." args = ["partextract","-partvar","p.kappa",uda_path] F_kappa = tempfile.TemporaryFile() #open(os.path.split(uda_path)[0]+'/kappaHistory.dat',"w+") tmp = sub_proc.Popen(args,stdout=F_kappa,stderr=sub_proc.PIPE) dummy = tmp.wait() print('Done.') #Read file back in F_kappa.seek(0) times = [] kappas = [] for line in F_kappa: line = line.strip().split() times.append(float(line[0])) kappas.append(float(line[4])) F_kappa.close() return times,kappas def get_pPlasticStrainVol(uda_path): FAIL_NAN = False #Extract stress history print "Extracting plasticStrainVol history..." args = ["partextract","-partvar","p.evp",uda_path] F_plasticStrainVol = tempfile.TemporaryFile() #open(os.path.split(uda_path)[0]+'/plasticStrainVolHistory.dat',"w+") tmp = sub_proc.Popen(args,stdout=F_plasticStrainVol,stderr=sub_proc.PIPE) dummy = tmp.wait() print('Done.') #Read file back in F_plasticStrainVol.seek(0) times = [] plasticStrainVol = [] for line in F_plasticStrainVol: line = line.strip().split() times.append(float(line[0])) plasticStrainVol.append(np.float64(line[4])) if np.isnan(plasticStrainVol[-1]): FAIL_NAN = True F_plasticStrainVol.close() if FAIL_NAN: print "\ERROR: 'nan' encountered while retrieving p.evp, will not plot correctly." return times,plasticStrainVol def get_pElasticStrainVol(uda_path): FAIL_NAN = False #Extract elastic strain history print "Extracting elasticStrainVol history..." args = ["partextract","-partvar","p.eve",uda_path] F_elasticStrainVol = tempfile.TemporaryFile() #open(os.path.split(uda_path)[0]+'/elasticStrainVolHistory.dat',"w+") tmp = sub_proc.Popen(args,stdout=F_elasticStrainVol,stderr=sub_proc.PIPE) dummy = tmp.wait() print('Done.') #Read file back in F_elasticStrainVol.seek(0) times = [] elasticStrainVol = [] for line in F_elasticStrainVol: line = line.strip().split() times.append(float(line[0])) elasticStrainVol.append(np.float64(line[4])) if np.isnan(elasticStrainVol[-1]): FAIL_NAN = True F_elasticStrainVol.close() if FAIL_NAN: print "\ERROR: 'nan' encountered while retrieving p.eve, will not plot correctly." return times,elasticStrainVol def get_totalStrainVol(uda_path): times,plasticStrainVol = get_pPlasticStrainVol(uda_path) times,elasticStrainVol = get_pElasticStrainVol(uda_path) print 'num plastic : ',len(plasticStrainVol) print 'num elastic : ',len(elasticStrainVol) totalStrainVol = np.array(plasticStrainVol)+np.array(elasticStrainVol) return times,totalStrainVol def get_defTable(uda_path,working_dir): #Determine the defTable file try: ups_file = os.path.abspath(uda_path)+'/input.xml.orig' F = open(ups_file,"r") except: ups_file = os.path.abspath(uda_path)+'/input.xml' F = open(ups_file,"r") for line in F: if '<PrescribedDeformationFile>' in line and '</PrescribedDeformationFile>' in line: def_file = line.split('<PrescribedDeformationFile>')[1].split('</PrescribedDeformationFile>')[0].strip() F.close() #Assumes the input deck and uda share the same parent folder. def_file = working_dir+'/'+def_file F = open(def_file,'r') times = [] Fs = [] for line in F: line = line.strip().split() times.append(float(line[0])) Fs.append(np.array([[float(line[1]),float(line[2]),float(line[3])], [float(line[4]),float(line[5]),float(line[6])], [float(line[7]),float(line[8]),float(line[9])]])) F.close() return times,Fs def exp_fmt(x,loc): tmp = format(x,'1.2e').split('e') lead = tmp[0] exp = str(int(tmp[1])) if exp=='0' and lead=='0.00': return r'$\mathbf{0.00}$' else: if int(exp)<10 and int(exp)>0: exp = '+0'+exp elif int(exp)>-10 and int(exp)<0: exp = '-0'+exp.split('-')[1] elif int(exp)>10: exp = '+'+exp return r'$\mathbf{'+lead+r'\cdot{}10^{'+exp+'}}$' def eqShear_vs_meanStress(xs,ys,Xlims=False,Ylims=False,LINE_LABEL='Uintah',GRID=True): ax1 = plt.subplot(111) plt.plot(np.array(xs),np.array(ys),'-r',label=LINE_LABEL) plt.xlabel(str_to_mathbf('Mean Stress, p (Pa)')) plt.ylabel(str_to_mathbf('Equivalent Shear Stress, q, (Pa)')) formatter_int = ticker.FormatStrFormatter('$\mathbf{%g}$') formatter_exp = ticker.FuncFormatter(exp_fmt) ax1.xaxis.set_major_formatter(formatter_exp) ax1.yaxis.set_major_formatter(formatter_exp) if Xlims: ax1.set_xlim(Xlims[0],Xlims[1]) if Ylims: ax1.set_ylim(Ylims[0],Ylims[1]) if GRID: plt.grid(True) return ax1 def get_yield_surface(uda_path): #Reads in FSLOPE, FSLOPE_p, PEAKI1, CR, and P0 #WILL ONLY WORK FOR SINGLE ELEMENT TESTS OR DECKS #HAVING ONLY ONE ARENISCA SPECIFICATION try: ups_file = os.path.abspath(uda_path)+'/input.xml.orig' F_ups = open(ups_file,"r") except: ups_file = os.path.abspath(uda_path)+'/input.xml' F_ups = open(ups_file,"r") check_lines = False already_read = False material_dict = {} for line in F_ups: if '<constitutive_model' in line and 'type' in line and '"Arenisca3"' in line and not(already_read): check_lines = True if check_lines and not(already_read): if '<B0>' in line: material_dict['B0'] = float(line.split('<B0>')[1].split('</B0>')[0].strip()) if '<G0>' in line: material_dict['G0'] = float(line.split('<G0>')[1].split('</G0>')[0].strip()) if '<FSLOPE>' in line: material_dict['FSLOPE'] = float(line.split('<FSLOPE>')[1].split('</FSLOPE>')[0].strip()) if '<PEAKI1>' in line: material_dict['PEAKI1'] = float(line.split('<PEAKI1>')[1].split('</PEAKI1>')[0].strip()) if '<STREN>' in line: material_dict['STREN'] = float(line.split('<STREN>')[1].split('</STREN>')[0].strip()) if '<YSLOPE>' in line: material_dict['YSLOPE'] = float(line.split('<YSLOPE>')[1].split('</YSLOPE>')[0].strip()) if '<CR>' in line: material_dict['CR'] = float(line.split('<CR>')[1].split('</CR>')[0].strip()) if '<p0_crush_curve>' in line: material_dict['P0'] = float(line.split('<p0_crush_curve>')[1].split('</p0_crush_curve>')[0].strip()) if '<p1_crush_curve>' in line: material_dict['P1'] = float(line.split('<p1_crush_curve>')[1].split('</p1_crush_curve>')[0].strip()) if '<p3_crush_curve>' in line: material_dict['P3'] = float(line.split('<p3_crush_curve>')[1].split('</p3_crush_curve>')[0].strip()) if '<fluid_B0>' in line: material_dict['fluid_B0'] = float(line.split('<fluid_B0>')[1].split('</fluid_B0>')[0].strip()) if '<fluid_pressure_initial>' in line: material_dict['P_f0'] = float(line.split('<fluid_pressure_initial>')[1].split('</fluid_pressure_initial>')[0].strip()) if '<subcycling_characteristic_number>' in line: material_dict['subcycling char num'] = float(line.split('<subcycling_characteristic_number>')[1].split('</subcycling_characteristic_number>')[0].strip()) if '<T1_rate_dependence>' in line: material_dict['T1'] = float(line.split('<T1_rate_dependence>')[1].split('</T1_rate_dependence>')[0].strip()) if '<T2_rate_dependence>' in line: material_dict['T2'] = float(line.split('<T2_rate_dependence>')[1].split('</T2_rate_dependence>')[0].strip()) if '</constitutive_model>' in line: already_read = True check_lines = False F_ups.close() PRINTOUT = False if PRINTOUT: print '--Material Specification--' for key in material_dict: print key,':',material_dict[key] #tmp_string = r'$\mathbf{\underline{Material}}$'+' '+r'$\mathbf{\underline{Properties:}}$'+'\n' tmp_string = r'$\mathbf{\underline{Material\phantom{1}Properties:}}$'+'\n' key_list = material_dict.keys() key_list.sort() for key in key_list: if '_' in key: tmp = key.split('_') tmp = str_to_mathbf(tmp[0]+'_'+'{'+tmp[1]+'}') tmp_string += tmp+str_to_mathbf(' = ')+str_to_mathbf(format(material_dict[key],'1.3e'))+'\n' else: tmp = key if key == 'subcycling char num': tmp_string += str_to_mathbf(tmp+' = '+format(material_dict[key],'4.1f'))+'\n' else: tmp_string += str_to_mathbf(tmp+' = '+format(material_dict[key],'1.3e'))+'\n' material_dict['material string'] = tmp_string[0:-1] if PRINTOUT: print tmp_string return material_dict def get_kappa(PEAKI1,P0,CR): PEAKI1,P0,CR kappa = PEAKI1-CR*(PEAKI1-P0) return kappa def get_rs(nPoints,FSLOPE,PEAKI1,P0,CR): kappa = get_kappa(PEAKI1,P0,CR) I1s = np.linspace(PEAKI1,P0,nPoints) rs = [] for I1 in I1s: inner_root = (1.0-(pow(kappa-I1,2.0)/pow(kappa-P0,2.0))) r = FSLOPE*(I1-PEAKI1)*np.sqrt(2.0*inner_root) rs.append(r) return I1s,rs def I1_to_zbar(I1s): sqrt_3 = np.sqrt(3.0) if type(I1s) in [list,np.ndarray]: zbars = [] for I1 in I1s: zbars.append(-I1/sqrt_3) return zbars elif type(I1s) in [int,float,np.float64]: return -I1s/sqrt_3 else: print '\nERROR: cannot compute zbar from I1. Invalid type.\n\ttype(I1)\t:\t',type(I1s) return None def plot_crush_curve(uda_path,I1lims=[-10000,0]): nPoints = 500 material_dict = get_yield_surface(uda_path) P0 = material_dict['P0'] P1 = material_dict['P1'] P3 = material_dict['P3'] # Analytical solution for porosity vs. X for piece-wise crush curve # compression I1sC = np.linspace(I1lims[0],P0,nPoints) porosityC = 1-np.exp(-P3*np.exp(P1*(I1sC-P0))) plt.plot(I1sC,porosityC,'--g',linewidth=lineWidth+1,label='Analytical crush curve - Compression') plt.hold(True) # tension I1sT = np.linspace(P0,I1lims[1],nPoints) porosityT = 1-np.exp(-(I1sT/P0)**(P0*P1*P3)-P3+1) plt.plot(I1sT,porosityT,'--b',linewidth=lineWidth+1,label='Analytical crush curve - Tension') def plot_yield_surface_OLD(uda_path): nPoints = 500 material_dict = get_yield_surface(uda_path) FSLOPE = material_dict['FSLOPE'] #FSLOPE_p = material_dict['FSLOPE'] PEAKI1 = material_dict['PEAKI1'] CR = material_dict['CR'] P0 = material_dict['P0'] I1s,rs = get_rs(nPoints,FSLOPE,PEAKI1,P0,CR) zbars = I1_to_zbar(I1s) #WTF? for i in range(len(rs)): rs[i] = -rs[i] #print zbars #print rs plt.plot(np.array(I1s)/3.0,rs,'--k',linewidth=lineWidth+1,label='Initial Yield Surface') plt.plot(np.array(I1s)/3.0,-np.array(rs),'--k',linewidth=lineWidth+1) def J2VM(epsil_dot,dt,sig_Beg,K,G,tau_y): #J2 plasticity Von misses material model for 3D #Inputs: epsil_dot, dt, sig_Beg, K, G, tau_y #Outputs: epsil_Elastic_dot, epsil_Plastic_dot, sig_End #Initialize the trial stress state sig_Trial = sig_Beg+((2*G*sigma_dev(epsil_dot))+3*K*sigma_iso(epsil_dot))*dt #Determine if this is below, on, or above the yeild surface test = sigma_mag(sigma_dev(sig_Trial))/(np.sqrt(2.0)*tau_y) if test<=1: #Stress state is elastic sig_End = sig_Trial epsil_Plastic_dot = np.zeros((3,3)) epsil_Elastic_dot = epsil_dot elif test>1: #Stress state elastic-plastic sig_End = (sigma_dev(sig_Trial)/test)#+sigma_iso(sig_Trial) #Evaluate the consistent stress rate #sig_dot = (sig_End-sig_Beg)/test #Apply hookes law to get the elastic strain rate #epsil_Elastic_dot = sigma_dev(sig_dot)/(2*G)# + sigma_iso(sig_dot)/(3*K) #Apply strain rate decomposition relationship to get plastic strain rate #epsil_Plastic_dot = epsil_dot-epsil_Elastic_dot #Determine the equivalent stress and equivalent plastic strain rate #sig_Eq = np.sqrt(3/2)*sigma_mag(sigma_dev(sig_End)) #epsil_Plastic_dot_Eq = np.sqrt(3/2)*sigma_mag(sigma_dev(epsil_Plastic_dot)) #ans={'Elastic dot':epsil_Elastic_dot,'Plastic dot':epsil_Plastic_dot,'Stress State':sig_End} return sig_End def defTable_to_J2Solution(def_times,Fs,bulk_mod,shear_mod,tau_yield,num_substeps=1000): #Assumes: print 'Solving for analytical solution...' analytical_epsils = [np.array([[0,0,0],[0,0,0],[0,0,0]])] analytical_sigmas = [np.array([[0,0,0],[0,0,0],[0,0,0]])] analytical_times = [def_times[0]] epsils = [] for F in Fs: epsils.append(np.array([[np.log(sum(F[0])),0,0],[0,np.log(sum(F[1])),0],[0,0,np.log(sum(F[2]))]])) for leg in range(len(def_times)-1): t_start = def_times[leg] leg_delT = def_times[leg+1]-t_start leg_sub_delT = float(leg_delT)/float(num_substeps) leg_del_epsil = (epsils[leg+1]-epsils[leg]) leg_epsil_dot = leg_del_epsil/leg_delT for i in range(num_substeps): t_now = t_start+float(i)*leg_sub_delT analytical_times.append(t_now) analytical_sigmas.append(J2VM(leg_epsil_dot,leg_sub_delT,analytical_sigmas[-1],bulk_mod,shear_mod,tau_yield)) analytical_epsils.append(analytical_epsils[-1]+(leg_epsil_dot*leg_sub_delT)) analytical_epsils.append(analytical_epsils[-1]+(leg_epsil_dot*leg_sub_delT)) analytical_sigmas.append(J2VM(leg_epsil_dot,leg_sub_delT,analytical_sigmas[-1],bulk_mod,shear_mod,tau_yield)) analytical_times.append(def_times[-1]) print 'Done.' return analytical_times,analytical_sigmas,analytical_epsils def J2_at_Yield(uda_path): material_dict = get_yield_surface(uda_path) B0 = material_dict['B0'] G0 = material_dict['G0'] FSLOPE = material_dict['FSLOPE'] #FSLOPE_p = material_dict['FSLOPE'] PEAKI1 = material_dict['PEAKI1'] CR = material_dict['CR'] P0 = material_dict['P0'] P1 = material_dict['P1'] P3 = material_dict['P3'] fluid_B0 = material_dict['fluid_B0'] Pf0 = material_dict['P_f0'] subcyc_char_num = material_dict['subcycling char num'] #hardening_const = material_dict['hardening_constant'] kappa_initial = get_kappa(PEAKI1,P0,CR) I1 = 0 I1_plus3Pf0 = I1+3.0*Pf0 if I1_plus3Pf0 >= kappa_initial and I1_plus3Pf0<= PEAKI1: J2 = (FSLOPE**2)*((I1-PEAKI1+3.0*Pf0)**2) elif I1_plus3Pf0 >= P0 and I1_plus3Pf0 < kappa_initial: J2 = ((FSLOPE**2)*((I1-PEAKI1+3.0*Pf0)**2))*(1.0-((I1+CR*FSLOPE*I1-P0-CR*FSLOPE*PEAKI1+3.0*Pf0+3.0*CR*FSLOPE*Pf0)**2/((CR**2)*(FSLOPE**2)*(P0-PEAKI1)**2))) else: J2 = 0.0 return J2 def plot_yield_surface(uda_path,PLOT_TYPE='J2_vs_I1'): num_points = 500 material_dict = get_yield_surface(uda_path) B0 = material_dict['B0'] G0 = material_dict['G0'] FSLOPE = material_dict['FSLOPE'] PEAKI1 = material_dict['PEAKI1'] CR = material_dict['CR'] P0 = material_dict['P0'] P1 = material_dict['P1'] P3 = material_dict['P3'] fluid_B0 = material_dict['fluid_B0'] Pf0 = material_dict['P_f0'] kappa_initial = get_kappa(PEAKI1,P0,CR) I1s = np.linspace(P0-3.0*Pf0,PEAKI1-3.0*Pf0,num_points) #print 'Region 1:: ','I1 >= kappa initial-3.0*Pf0 : ',kappa_initial-3.0*Pf0,' ','I1 <= PEAKI1-3*Pf0 : ',PEAKI1-3.0*Pf0 #print 'Region 2:: ','I1 >= P0-3*Pf0 : ',P0-3.0*Pf0,' ','I1 < kappa_initial-3*Pf0 : ',kappa_initial-3.0*Pf0 #print 'Region 3:: Not Region 1 or 2' #J2 versus I1 J2s = [] PLOT = True for I1 in I1s: I1_plus3Pf0 = I1+3.0*Pf0 if I1_plus3Pf0 >= kappa_initial and I1_plus3Pf0<= PEAKI1: J2 = (FSLOPE**2)*((I1-PEAKI1+3.0*Pf0)**2) elif I1_plus3Pf0 >= P0 and I1_plus3Pf0 < kappa_initial: Ff = FSLOPE*(PEAKI1-I1_plus3Pf0) fc = np.sqrt(1.0 - ( (kappa_initial-I1_plus3Pf0)/(kappa_initial-P0) )**2) J2 = (Ff*fc)**2 else: J2 = 0.0 J2s.append(J2) if PLOT_TYPE == 'J2_vs_I1': xs = I1s ys = np.array(J2s) elif PLOT_TYPE == 'sqrtJ2_vs_I1': xs = I1s ys = np.sqrt(np.array(J2s)) elif PLOT_TYPE == 'r_vs_z': xs = np.array(I1s)/np.sqrt(3.0) ys = np.sqrt(2.0*np.array(J2s)) elif PLOT_TYPE == 'q_vs_I1': xs = I1s ys = np.sqrt(3.0*np.array(J2s)) elif PLOT_TYPE == 'q_vs_p': xs = np.array(I1s)/3.0 ys = np.sqrt(3.0*np.array(J2s)) else: PLOT = False print '\nError: invalid plot type specified for initial yield surface plot.\n\tPLOT_TYPE:',PLOT_TYPE if PLOT: plt.plot(xs,ys,'--k',linewidth=lineWidth+1,label='Initial Yield Surface') plt.plot(xs,-ys,'--k',linewidth=lineWidth+1) def test_yield_surface(uda_path): plot_yield_surface_2(uda_path,'J2_vs_I1') plt.show() plot_yield_surface_2(uda_path,'sqrtJ2_vs_I1') plt.show() plot_yield_surface_2(uda_path,'r_vs_z') plt.show() plot_yield_surface_2(uda_path,'q_vs_I1') plt.show() plot_yield_surface_2(uda_path,'q_vs_p') plt.show() ### ---------- # Test Methods Below ### ---------- def test01_postProc(uda_path,save_path,**kwargs): print "Post Processing Test: 01 - Uniaxial Compression With Rotation" times,sigmas = get_pStress(uda_path) material_dict = get_yield_surface(uda_path) Sxx = [] Syy = [] for sigma in sigmas: Sxx.append(sigma[0][0]) Syy.append(sigma[1][1]) ###PLOTTING formatter = ticker.FormatStrFormatter('$\mathbf{%g}$') plt.figure(1) plt.clf() plt.subplots_adjust(right=0.75) param_text = material_dict['material string'] plt.figtext(0.77,0.70,param_text,ha='left',va='top',size='x-small') #Syy ax2 = plt.subplot(212) #without rotation plt.plot([0,1],[0,0],'-b') #simulation results plt.plot(times,Syy,'-r') #guide line plt.plot([0,1],[0,-60],'--g') #labels and limits ax2.set_ylim(-70,10) plt.grid(True) ax2.xaxis.set_major_formatter(formatter) ax2.yaxis.set_major_formatter(formatter) plt.ylabel(str_to_mathbf('\sigma_{yy} (Pa)')) plt.xlabel(str_to_mathbf('Time (s)')) #Sxx ax1 = plt.subplot(211,sharex=ax2,sharey=ax2) plt.setp(ax1.get_xticklabels(), visible=False) #without rotation plt.plot([0,1],[0,-60],'-b',label='No rotation') #simulation results plt.plot(times,Sxx,'-r',label='Uintah') #guide lines plt.plot([0,1],[0,0],'--g',label='Guide lines') #labels ax1.set_ylim(-70,10) plt.grid(True) ax1.xaxis.set_major_formatter(formatter) ax1.yaxis.set_major_formatter(formatter) ax1.set_yticks([0,-20,-40,-60]) plt.ylabel(str_to_mathbf('\sigma_{xx} (Pa)')) plt.title('AreniscaTest 01:\nUniaxial Compression With Rotation') plt.legend() savePNG(save_path+'/Test01_verificationPlot','1280x960') if SHOW_ON_MAKE: plt.show() def test02_postProc(uda_path,save_path,**kwargs): #Extract stress history print "Post Processing Test: 02 - Vertex Treatment" times,sigmas = get_pStress(uda_path) ps,qs = get_ps_and_qs(sigmas) Sxx = [] Syy = [] Szz = [] for sigma in sigmas: Sxx.append(sigma[0][0]) Syy.append(sigma[1][1]) Szz.append(sigma[2][2]) #Analytical Solutions #Drucker-Prager constants r0 = 50.0 z0 = 50.0*sqrtThree #Solution From Brannon Leelavanichkul paper analytical_times = [0,1,threeHalves,2.0,5.0/2.0,3.0] analytical_S11 = np.array([0,-850.0/3.0,(-50.0/3.0)*(9.0+4.0*np.sqrt(6.0)),(-50.0/3.0)*(9.0+4.0*np.sqrt(6.0)),(50.0/3.0)*(2.0*np.sqrt(6)-3.0),160.0*np.sqrt(twoThirds)-110.0]) analytical_S22 = np.array([0,-850.0/3.0,(50.0/3.0)*(2.0*np.sqrt(6.0)-9.0),(50.0/3.0)*(2.0*np.sqrt(6.0)-9.0),(-50.0/3.0)*(3.0+np.sqrt(6.0)),(-10.0/3.0)*(33.0+8.0*np.sqrt(6.0))]) analytical_S33 = np.array([0,-850.0/3.0,(50.0/3.0)*(2.0*np.sqrt(6.0)-9.0),(50.0/3.0)*(2.0*np.sqrt(6.0)-9.0),(-50.0/3.0)*(3.0+np.sqrt(6.0)),(-10.0/3.0)*(33.0+8.0*np.sqrt(6.0))]) analytical_mean = (analytical_S11+analytical_S22+analytical_S33)/3.0 analytical_I1 = analytical_S11+analytical_S22+analytical_S33 tmp = (1.0/3.0)*analytical_I1 analytical_s1 = analytical_S11-tmp analytical_s2 = analytical_S22-tmp analytical_s3 = analytical_S33-tmp analytical_J2 = (1.0/2.0)*(pow(analytical_s1,2)+pow(analytical_s2,2)+pow(analytical_s3,2)) analytical_J3 = (1.0/3.0)*(pow(analytical_s1,3)+pow(analytical_s2,3)+pow(analytical_s3,3)) analytical_z = analytical_I1/sqrtThree analytical_q = [] for idx,J2 in enumerate(analytical_J2): J3 = analytical_J3[idx] analytical_q.append(sign(sqrtThree*np.sqrt(J2),J3)) #Drucker-Prager yield surface yield_zs = np.array([z0,min(analytical_z)]) yield_rs = r0/z0*((get_yield_surface(uda_path)['PEAKI1']/sqrtThree)-yield_zs) yield_ps = yield_zs*(sqrtThree/3.0) yield_qs = yield_rs*np.sqrt(threeHalves) ###PLOTTING formatter = ticker.FormatStrFormatter('$\mathbf{%g}$') ##Plot a plt.figure(1) plt.clf() plt.subplot(111) plt.subplots_adjust(right=0.75) material_dict = get_yield_surface(uda_path) param_text = material_dict['material string'] plt.figtext(0.77,0.70,param_text,ha='left',va='top',size='x-small') plt.plot(analytical_mean,analytical_q,'-g',linewidth=lineWidth+1,label='Analytical') plt.plot(yield_ps,yield_qs,'--k',linewidth=lineWidth+2,label='Yield surface') plt.plot(yield_ps,-yield_qs,'--k',linewidth=lineWidth+2) eqShear_vs_meanStress(ps,qs,(-300,60),(-300,300)) plt.title('AreniscaTest 02:\nVertex Treatment (plot a)') plt.legend() savePNG(save_path+'/Test02_verificationPlot_a','1280x960') ##Plot b plt.figure(2) plt.clf() plt.subplots_adjust(right=0.75) param_text = material_dict['material string'] plt.figtext(0.77,0.70,param_text,ha='left',va='top',size='x-small') endT = max(times) #Sigma zz ax3 = plt.subplot(313) plt.plot(analytical_times,analytical_S33,'-g',linewidth=lineWidth+2) plt.plot(times,np.array(Szz),'-r') #Add Yield Surface #Add Analytical plt.xlabel(str_to_mathbf('Time (s)')) plt.ylabel(str_to_mathbf('\sigma_{zz} (Pa)')) ax3.yaxis.set_major_formatter(formatter) ax3.set_xlim(0,endT) ax3.set_ylim(-300,100) ax3.set_yticks([-300,-200,-100,0,100]) plt.grid(True) #Sigma xx ax1 = plt.subplot(311,sharex=ax3) plt.plot(analytical_times,analytical_S11,'-g',linewidth=lineWidth+2,label='Analytical') plt.plot(times,np.array(Sxx),'-r',label='Uintah') #Add Yield Surface #Add Analytical plt.legend() plt.setp(ax1.get_xticklabels(), visible=False) plt.ylabel(str_to_mathbf('\sigma_{xx} (Pa)')) plt.title('AreniscaTest 02:\nVertex Treatment (plot b)') ax1.xaxis.set_major_formatter(formatter) ax1.yaxis.set_major_formatter(formatter) ax1.set_xlim(0,endT) ax1.set_ylim(-400,100) ax1.set_yticks([-400,-300,-200,-100,0,100]) plt.grid(True) #Sigma yy ax2 = plt.subplot(312,sharex=ax3) plt.plot(analytical_times,analytical_S22,'-g',linewidth=lineWidth+2) plt.plot(times,np.array(Syy),'-r') #Add Yield Surface #Add Analytical plt.setp(ax2.get_xticklabels(), visible=False) plt.ylabel(str_to_mathbf('\sigma_{yy} (Pa)')) ax2.yaxis.set_major_formatter(formatter) ax2.set_xlim(0,endT) ax2.set_ylim(-300,100) ax2.set_yticks([-300,-200,-100,0,100]) plt.grid(True) savePNG(save_path+'/Test02_verificationPlot_b','1280x960') if SHOW_ON_MAKE: plt.show() def test03_postProc(uda_path,save_path,**kwargs): #Extract stress history print "Post Processing Test: 03 - Uniaxial Strain Without Hardening" times,sigmas = get_pStress(uda_path) ps,qs = get_ps_and_qs(sigmas) material_dict = get_yield_surface(uda_path) PEAKI1 = material_dict['PEAKI1'] J2Yield = J2_at_Yield(uda_path) q_yield = np.sqrt(3.0*J2Yield) #print 'J2Yield : ',J2Yield #print 'q_yield : ',q_yield ###PLOTTING Xlims = (-450,50) Ylims = (-100,100) formatter = ticker.FormatStrFormatter('$\mathbf{%g}$') plt.figure(1) plt.clf() ax1 = plt.subplot(111) plt.subplots_adjust(right=0.75) material_dict = get_yield_surface(uda_path) param_text = material_dict['material string'] plt.figtext(0.77,0.70,param_text,ha='left',va='top',size='x-small') eqShear_vs_meanStress(ps,qs,Xlims,Ylims,) plt.title('AreniscaTest 03:\nUniaxial Strain Without Hardening') plt.plot(Xlims,(q_yield,q_yield),'--k',linewidth=lineWidth+1,label='Initial yield surface') plt.plot(Xlims,(-q_yield,-q_yield),'--k',linewidth=lineWidth+1) ax1.xaxis.set_major_formatter(formatter) ax1.yaxis.set_major_formatter(formatter) plt.legend() savePNG(save_path+'/Test03_verificationPlot','1280x960') if SHOW_ON_MAKE: plt.show() def test04_postProc(uda_path,save_path,**kwargs): #Extract stress history print "Post Processing Test: 04 - Curved Yield Surface" times,sigmas = get_pStress(uda_path) ps,qs = get_ps_and_qs(sigmas) ###PLOTTING formatter = ticker.FormatStrFormatter('$\mathbf{%g}$') ##Plot a plt.figure(1) plt.clf() ax1 = plt.subplot(111) plt.subplots_adjust(right=0.75) material_dict = get_yield_surface(uda_path) param_text = material_dict['material string'] plt.figtext(0.77,0.70,param_text,ha='left',va='top',size='x-small') eqShear_vs_meanStress(ps,qs,(-700,300),(-200,200)) plt.title('AreniscaTest 04:\nCurved Yield Surface') plot_yield_surface(uda_path,'q_vs_p') ax1.xaxis.set_major_formatter(formatter) ax1.yaxis.set_major_formatter(formatter) #Add Analytical plt.legend() savePNG(save_path+'/Test04_verificationPlot','1280x960') if SHOW_ON_MAKE: plt.show() def test05_postProc(uda_path,save_path,**kwargs): #Extract stress history print "Post Processing Test: 05 - Hydrostatic Compression Fixed Cap" times,sigmas = get_pStress(uda_path) ps,qs = get_ps_and_qs(sigmas) ###PLOTTING formatter = ticker.FormatStrFormatter('$\mathbf{%g}$') ##Plot a plt.figure(1) plt.clf() ax1 = plt.subplot(111) plt.subplots_adjust(right=0.75) material_dict = get_yield_surface(uda_path) param_text = material_dict['material string'] plt.figtext(0.77,0.70,param_text,ha='left',va='top',size='x-small') eqShear_vs_meanStress(ps,qs,(-700,300),(-200,200)) plt.title('AreniscaTest 05:\nHydrostatic Compression Fixed Cap') plot_yield_surface(uda_path,'q_vs_p') ax1.xaxis.set_major_formatter(formatter) ax1.yaxis.set_major_formatter(formatter) #Add Analytical plt.legend() savePNG(save_path+'/Test05_verificationPlot','1280x960') if SHOW_ON_MAKE: plt.show() def test06_postProc(uda_path,save_path,**kwargs): #Extract stress history print "Post Processing Test: 06 - Uniaxial Strain Cap Evolution" times,sigmas = get_pStress(uda_path) ps,qs = get_ps_and_qs(sigmas) ###PLOTTING formatter = ticker.FormatStrFormatter('$\mathbf{%g}$') ##Plot a plt.figure(1) plt.clf() ax1 = plt.subplot(111) plt.subplots_adjust(right=0.75) material_dict = get_yield_surface(uda_path) param_text = material_dict['material string'] plt.figtext(0.77,0.70,param_text,ha='left',va='top',size='x-small') eqShear_vs_meanStress(ps,qs,(-800,300),(-200,200)) plt.title('AreniscaTest 06:\nUniaxial Strain Cap Evolution') plot_yield_surface(uda_path,'q_vs_p') ax1.xaxis.set_major_formatter(formatter) ax1.yaxis.set_major_formatter(formatter) #Add Analytical plt.legend() savePNG(save_path+'/Test06_verificationPlot','1280x960') if SHOW_ON_MAKE: plt.show() def test07_postProc(uda_path,save_path,**kwargs): #Extract stress history print "Post Processing Test: 07 - Hydrostatic Compression with Fixed Cap" times,sigmas = get_pStress(uda_path) I1s = [] for sigma in sigmas: I1s.append(sigma_I1(sigma)) times,plasticStrainVol = get_pPlasticStrainVol(uda_path) material_dict = get_yield_surface(uda_path) P3 = material_dict['P3'] porosity = 1-np.exp(-(P3+np.array(plasticStrainVol))) ###PLOTTING formatter = ticker.FormatStrFormatter('$\mathbf{%g}$') ##Plot a I1lims = (-8000,0) plt.figure(1) plt.clf() ax1 = plt.subplot(111) plt.subplots_adjust(right=0.75) param_text = material_dict['material string'] plt.figtext(0.77,0.70,param_text,ha='left',va='top',size='x-small') ax1=eqShear_vs_meanStress(I1s,porosity,I1lims,(0,0.6)) plt.title('AreniscaTest 07:\nHydrostatic Compression with Fixed Cap') plt.ylabel(str_to_mathbf('Porosity')) plt.xlabel(str_to_mathbf('I_{1}:first invariant of stress tensor (Pa)')) plot_crush_curve(uda_path,I1lims) #ax1.set_xticks([-9000,-7000,-5000,-3000,-1000,0]) ax1.set_xticks([-8000,-6000,-4000,-2000,0]) ax1.xaxis.set_major_formatter(formatter) ax1.yaxis.set_major_formatter(formatter) plt.legend() savePNG(save_path+'/Test07_verificationPlot','1280x960') if SHOW_ON_MAKE: plt.show() def test08_postProc(uda_path,save_path,**kwargs): #Extract stress history print "Post Processing Test: 08 - Loading/Unloading" times,sigmas = get_pStress(uda_path) I1s = [] ps = [] for sigma in sigmas: I1s.append(sigma_I1(sigma)) ps.append(sigma_I1(sigma)/3.0) times,plasticStrainVol = get_pPlasticStrainVol(uda_path) times,elasticStrainVol = get_pElasticStrainVol(uda_path) totalStrainVol = np.array(elasticStrainVol)+np.array(plasticStrainVol) material_dict = get_yield_surface(uda_path) P3 = material_dict['P3'] porosity = 1-np.exp(-(P3+np.array(plasticStrainVol))) ###PLOTTING int_formatter = ticker.FormatStrFormatter('$\mathbf{%g}$') exp_formatter = ticker.FuncFormatter(exp_fmt) ##Plot a plt.figure(1) plt.clf() ax1 = plt.subplot(111) plt.subplots_adjust(right=0.75,left=0.15) param_text = material_dict['material string'] plt.figtext(0.77,0.70,param_text,ha='left',va='top',size='x-small') ax1=eqShear_vs_meanStress(times,-np.array(ps),(0,3.5),(-500,2000)) plt.title('AreniscaTest 08:\nLoading/Unloading (plot a)') plt.ylabel(str_to_mathbf('Pressure (Pa)')) plt.xlabel(str_to_mathbf('Time (s)')) ax1.xaxis.set_major_formatter(int_formatter) ax1.yaxis.set_major_formatter(exp_formatter) ax1.tick_params(axis='both',labelsize='small') savePNG(save_path+'/Test08_verificationPlot_a','1280x960') ##Plot b plt.figure(2) plt.clf() ax2 = plt.subplot(111) plt.subplots_adjust(right=0.75,left=0.15) param_text = material_dict['material string'] plt.figtext(0.77,0.70,param_text,ha='left',va='top',size='x-small') ax1=eqShear_vs_meanStress(times,totalStrainVol,(0,3.5),(-0.8,0.8)) plt.title('AreniscaTest 08:\nLoading/Unloading (plot b)') plt.ylabel(str_to_mathbf('Total Volumetric Strain, \epsilon_{v}')) plt.xlabel(str_to_mathbf('Time (s)')) ax2.xaxis.set_major_formatter(int_formatter) ax2.yaxis.set_major_formatter(int_formatter) ax2.tick_params(axis='both',labelsize='small') savePNG(save_path+'/Test08_verificationPlot_b','1280x960') ##Plot c I1lims = (-10000,0) plt.figure(3) plt.clf() ax3 = plt.subplot(111) plt.subplots_adjust(right=0.75,left=0.15) param_text = material_dict['material string'] plt.figtext(0.77,0.70,param_text,ha='left',va='top',size='x-small') eqShear_vs_meanStress(I1s,porosity,I1lims,(0,1.25)) plt.title('AreniscaTest 08:\nLoading/Unloading (plot c)') plt.ylabel(str_to_mathbf('Porosity')) plt.xlabel(str_to_mathbf('I_{1}:first invariant of stress tensor (Pa)')) plot_crush_curve(uda_path,I1lims) #ax1.set_xticks([-9000,-7000,-5000,-3000,-1000,0]) ax3.set_xticks([-10000,-7500,-5000,-2500,0,1000]) ax3.set_yticks([0,0.2,0.4,0.6,0.8,1.0]) ax3.xaxis.set_major_formatter(exp_formatter) ax3.yaxis.set_major_formatter(int_formatter) ax3.tick_params(axis='both',labelsize='small') plt.legend() savePNG(save_path+'/Test08_verificationPlot_c','1280x960') if SHOW_ON_MAKE: plt.show() def test09_postProc(uda_path,save_path,**kwargs): #Extract stress history print "Post Processing Test: 09 - Fluid Filled Pore Space" times,sigmas = get_pStress(uda_path) I1s = [] ps = [] for sigma in sigmas: I1s.append(sigma_I1(sigma)) ps.append(sigma_I1(sigma)/3.0) times,plasticStrainVol = get_pPlasticStrainVol(uda_path) times,elasticStrainVol = get_pElasticStrainVol(uda_path) totalStrainVol = np.array(elasticStrainVol)+np.array(plasticStrainVol) material_dict = get_yield_surface(uda_path) P3 = material_dict['P3'] porosity = 1-np.exp(-(P3+np.array(plasticStrainVol))) ###PLOTTING int_formatter = ticker.FormatStrFormatter('$\mathbf{%g}$') exp_formatter = ticker.FuncFormatter(exp_fmt) ##Plot a plt.figure(1) plt.clf() ax1 = plt.subplot(111) plt.subplots_adjust(right=0.75,left=0.15) param_text = material_dict['material string'] plt.figtext(0.77,0.70,param_text,ha='left',va='top',size='x-small') ax1=eqShear_vs_meanStress(times,-np.array(ps),(0,3.5),(-500,2000)) plt.title('AreniscaTest 09:\nFluid EFfects (plot a)') plt.ylabel(str_to_mathbf('Pressure (Pa)')) plt.xlabel(str_to_mathbf('Time (s)')) ax1.xaxis.set_major_formatter(int_formatter) ax1.yaxis.set_major_formatter(exp_formatter) ax1.tick_params(axis='both',labelsize='small') savePNG(save_path+'/Test09_verificationPlot_a','1280x960') ##Plot b plt.figure(2) plt.clf() ax2 = plt.subplot(111) plt.subplots_adjust(right=0.75,left=0.15) param_text = material_dict['material string'] plt.figtext(0.77,0.70,param_text,ha='left',va='top',size='x-small') ax1=eqShear_vs_meanStress(times,totalStrainVol,(0,3.5),(-0.8,0.8)) plt.title('AreniscaTest 09:\nFluid EFfects (plot b)') plt.ylabel(str_to_mathbf('Total Volumetric Strain, \epsilon_{v}')) plt.xlabel(str_to_mathbf('Time (s)')) ax2.xaxis.set_major_formatter(int_formatter) ax2.yaxis.set_major_formatter(int_formatter) ax2.tick_params(axis='both',labelsize='small') savePNG(save_path+'/Test09_verificationPlot_b','1280x960') ##Plot c I1lims = (-10000,0) plt.figure(3) plt.clf() ax3 = plt.subplot(111) plt.subplots_adjust(right=0.75,left=0.15) param_text = material_dict['material string'] plt.figtext(0.77,0.70,param_text,ha='left',va='top',size='x-small') eqShear_vs_meanStress(I1s,porosity,I1lims,(0,1.25)) plt.title('AreniscaTest 09:\nFluid EFfects (plot c)') plt.ylabel(str_to_mathbf('Porosity')) plt.xlabel(str_to_mathbf('I_{1}:first invariant of stress tensor (Pa)')) plot_crush_curve(uda_path,I1lims) #ax1.set_xticks([-9000,-7000,-5000,-3000,-1000,0]) ax3.set_xticks([-10000,-7500,-5000,-2500,0,1000]) ax3.set_yticks([0,0.2,0.4,0.6,0.8,1.0]) ax3.xaxis.set_major_formatter(exp_formatter) ax3.yaxis.set_major_formatter(int_formatter) ax3.tick_params(axis='both',labelsize='small') plt.legend() savePNG(save_path+'/Test09_verificationPlot_c','1280x960') if SHOW_ON_MAKE: plt.show() def test10_postProc(uda_path,save_path,**kwargs): if 'WORKING_PATH' in kwargs: working_dir = kwargs['WORKING_PATH'] #Extract stress history print "Post Processing Test: 10 - Transient Stress Eigenvalues with Constant Eigenvectors" times,sigmas = get_pStress(uda_path) Sxx = [] Syy = [] Szz = [] for sigma in sigmas: Sxx.append(sigma[0][0]) Syy.append(sigma[1][1]) Szz.append(sigma[2][2]) #Analytical solution material_dict = get_yield_surface(uda_path) def_times,Fs = get_defTable(uda_path,working_dir) tau_yield = material_dict['PEAKI1']/1e10 bulk_mod = material_dict['B0'] shear_mod = material_dict['G0'] analytical_times,analytical_sigmas,epsils=defTable_to_J2Solution(def_times,Fs,bulk_mod,shear_mod,tau_yield,num_substeps=10) analytical_Sxx = [] analytical_Syy = [] analytical_Szz = [] for sigma in analytical_sigmas: analytical_Sxx.append(sigma[0][0]) analytical_Syy.append(sigma[1][1]) analytical_Szz.append(sigma[2][2]) ###PLOTTING plt.figure(1) plt.clf() ax1 = plt.subplot(111) if BIG_FIGURE: plt.subplots_adjust(right=0.75) param_text = material_dict['material string'] plt.figtext(0.77,0.70,param_text,ha='left',va='top',size='x-small') else: plt.subplots_adjust(left=0.15,top=0.96,bottom=0.15,right=0.96) #analytical solution plt.plot(analytical_times,np.array(analytical_Sxx)/1e6,':r',linewidth=lineWidth+2,label=str_to_mathbf('Analytical \sigma_{xx}')) plt.plot(analytical_times,np.array(analytical_Syy)/1e6,'--g',linewidth=lineWidth+2,label=str_to_mathbf('Analytical \sigma_{yy}')) plt.plot(analytical_times,np.array(analytical_Szz)/1e6,'-.b',linewidth=lineWidth+2,label=str_to_mathbf('Analytical \sigma_{zz}')) #simulation results plt.plot(times,np.array(Sxx)/1e6,'-r',label=str_to_mathbf('Uintah \sigma_{xx}')) plt.plot(times,np.array(Syy)/1e6,'-g',label=str_to_mathbf('Uintah \sigma_{yy}')) plt.plot(times,np.array(Szz)/1e6,'-b',label=str_to_mathbf('Uintah \sigma_{zz}')) ax1.set_xlim(0,2.25) ax1.xaxis.set_major_formatter(formatter_int) ax1.yaxis.set_major_formatter(formatter_int) #labels plt.grid(True) plt.xlabel(str_to_mathbf('Time (s)')) plt.ylabel(str_to_mathbf('Stress (MPa)')) if BIG_FIGURE: plt.legend(loc='upper right', bbox_to_anchor=(1.38,1.12)) plt.title('AreniscaTest 10:\nTransient Stress Eigenvalues with Constant Eigenvectors') saveIMG(save_path+'/Test10_verificationPlot','1280x960') else: tmp = plt.rcParams['legend.fontsize'] plt.rcParams['legend.fontsize']='x-small' plt.legend(loc=7) savePNG(save_path+'/Test10_verificationPlot','640x480') plt.rcParams['legend.fontsize']=tmp if SHOW_ON_MAKE: plt.show() else: print '\nERROR: need working directory to post process this problem' def test11_postProc(uda_path,save_path,**kwargs): if 'WORKING_PATH' in kwargs: working_dir = kwargs['WORKING_PATH'] #Extract stress and strain history print "Post Processing Test: 11 - Uniaxial Strain J2 Plasticity" times,sigmas = get_pStress(uda_path) times,epsils = get_epsilons(uda_path) exx = [] eyy = [] ezz = [] for epsil in epsils: exx.append(epsil[0][0]) eyy.append(epsil[1][1]) ezz.append(epsil[2][2]) Sxx = [] Syy = [] Szz = [] for sigma in sigmas: Sxx.append(sigma[0][0]) Syy.append(sigma[1][1]) Szz.append(sigma[2][2]) #Analytical solution material_dict = get_yield_surface(uda_path) def_times,Fs = get_defTable(uda_path,working_dir) tau_yield = material_dict['PEAKI1']*material_dict['FSLOPE'] #tau_yield = material_dict['PEAKI1'] bulk_mod = material_dict['B0'] shear_mod = material_dict['G0'] analytical_times,analytical_sigmas,epsils=defTable_to_J2Solution(def_times,Fs,bulk_mod,shear_mod,tau_yield,num_substeps=1000) analytical_e11 = [] analytical_e22 = [] analytical_e33 = [] for epsil in epsils: analytical_e11.append(epsil[0][0]) analytical_e22.append(epsil[1][1]) analytical_e33.append(epsil[2][2]) analytical_Sxx = [] analytical_Syy = [] analytical_Szz = [] for sigma in analytical_sigmas: analytical_Sxx.append(sigma[0][0]) analytical_Syy.append(sigma[1][1]) analytical_Szz.append(sigma[2][2]) ###PLOTTING formatter = ticker.FormatStrFormatter('$\mathbf{%g}$') plt.figure(1) plt.clf() ax1 = plt.subplot(111) plt.subplots_adjust(right=0.75) ax1.xaxis.set_major_formatter(formatter) ax1.yaxis.set_major_formatter(formatter) param_text = material_dict['material string'] plt.figtext(0.77,0.70,param_text,ha='left',va='top',size='x-small') plt.title('AreniscaTest 11:\nUniaxial Strain J2 Plasticity (plot a)') plt.plot(np.array(analytical_e11),np.array(analytical_Sxx)/1e6,'--g',linewidth=lineWidth+1,label=str_to_mathbf('Analytical')) plt.plot(np.array(exx),np.array(Sxx)/1e6,'-r',label=str_to_mathbf('Uintah')) plt.xlabel(str_to_mathbf('\epsilon_{A}')) plt.ylabel(str_to_mathbf('\sigma_{A} (Mpa)')) plt.legend() savePNG(save_path+'/Test11_verificationPlot_a','1280x960') plt.figure(2) plt.clf() ax2 = plt.subplot(111) plt.subplots_adjust(right=0.75) ax2.xaxis.set_major_formatter(formatter) ax2.yaxis.set_major_formatter(formatter) param_text = material_dict['material string'] plt.figtext(0.77,0.70,param_text,ha='left',va='top',size='x-small') plt.title('AreniscaTest 11:\nUniaxial Strain J2 Plasticity (plot b)') plt.plot(np.array(analytical_e11),np.array(analytical_Syy)/1e6,'--g',linewidth=lineWidth+1,label=str_to_mathbf('Analytical')) plt.plot(np.array(exx),np.array(Syy)/1e6,'-r',label=str_to_mathbf('Uintah')) plt.xlabel(str_to_mathbf('\epsilon_{A}')) plt.ylabel(str_to_mathbf('\sigma_{L} (Mpa)')) plt.legend() savePNG(save_path+'/Test11_verificationPlot_b','1280x960') plt.figure(3) plt.clf() ax3 = plt.subplot(111) plt.subplots_adjust(right=0.75) ax3.xaxis.set_major_formatter(formatter) ax3.yaxis.set_major_formatter(formatter) param_text = material_dict['material string'] plt.figtext(0.77,0.70,param_text,ha='left',va='top',size='x-small') plt.title('AreniscaTest 11:\nUniaxial Strain J2 Plasticity (plot c)') plt.plot(analytical_times,np.array(analytical_e11),'-g',linewidth=lineWidth+1,label=str_to_mathbf('Analytical \epsilon_{xx}')) plt.plot(analytical_times,np.array(analytical_e22),'-r',linewidth=lineWidth+1,label=str_to_mathbf('Analytical \epsilon_{yy}')) plt.plot(analytical_times,np.array(analytical_e33),'-b',linewidth=lineWidth+1,label=str_to_mathbf('Analytical \epsilon_{zz}')) plt.legend() plt.xlabel(str_to_mathbf('Time (s)')) plt.ylabel(str_to_mathbf('\epsilon')) savePNG(save_path+'/Test11_verificationPlot_c','1280x960') plt.figure(4) plt.clf() ax4 = plt.subplot(111) plt.subplots_adjust(right=0.75) ax4.xaxis.set_major_formatter(formatter) ax4.yaxis.set_major_formatter(formatter) param_text = material_dict['material string'] plt.figtext(0.77,0.70,param_text,ha='left',va='top',size='x-small') plt.title('AreniscaTest 11:\nUniaxial Strain J2 Plasticity (plot d)') plt.plot(analytical_times,np.array(analytical_Sxx)/1e6,'-g',linewidth=lineWidth+1,label=str_to_mathbf('Analytical \sigma_{xx}')) plt.plot(analytical_times,np.array(analytical_Syy)/1e6,'-r',linewidth=lineWidth+1,label=str_to_mathbf('Analytical \sigma_{yy}')) plt.plot(analytical_times,np.array(analytical_Szz)/1e6,'-b',linewidth=lineWidth+1,label=str_to_mathbf('Analytical \sigma_{zz}')) plt.legend() plt.xlabel(str_to_mathbf('Time (s)')) plt.ylabel(str_to_mathbf('\sigma (Mpa)')) savePNG(save_path+'/Test11_verificationPlot_d','1280x960') if SHOW_ON_MAKE: plt.show() else: print '\nERROR: need working directory to post process this problem' def test12_postProc(uda_path,save_path,**kwargs): #Extract stress history print "Post Processing Test: 12 - Nonlinear Elasticity" times,sigmas = get_pStress(uda_path) pressure = [] for sigma in sigmas: pressure.append(-sigma_I1(sigma)/3.0) times,plasticStrainVol = get_pPlasticStrainVol(uda_path) times,elasticStrainVol = get_pElasticStrainVol(uda_path) totalStrainVol = -np.array(elasticStrainVol)-np.array(plasticStrainVol) ###PLOTTING formatter = ticker.FormatStrFormatter('$\mathbf{%g}$') ##Plot a evlims = (0.0,0025) plt.figure(1) plt.clf() ax1 = plt.subplot(111) plt.subplots_adjust(right=0.75) #param_text = material_dict['material string'] #plt.figtext(0.77,0.70,param_text,ha='left',va='top',size='x-small') #ax1=eqShear_vs_meanStress(I1s,porosity,I1lims,(0,0.6)) plt.plot(totalStrainVol,pressure,'-b',label='Arenisca') plt.title('AreniscaTest 12:\nNonlinear Elasticity') plt.ylabel(str_to_mathbf('p: pressure (Pa)')) plt.xlabel(str_to_mathbf('ev: compressive volumetric strain')) ax1.set_xticks([0,0.005,0.010,0.015,0.020,0.025]) ax1.xaxis.set_major_formatter(formatter) ax1.yaxis.set_major_formatter(formatter) plt.legend() savePNG(save_path+'/Test12_verificationPlot','1280x960') if SHOW_ON_MAKE: plt.show() else: print '\nERROR: need working directory to post process this problem' def test13_postProc(uda_path,save_path,**kwargs): COLORS = ['Black','Blue','Magenta','Red','Green'] if 'WORKING_PATH' in kwargs: working_dir = kwargs['WORKING_PATH'] #Plot Constants Xlims = (-450,50) Ylims = (-100,100) formatter = ticker.FormatStrFormatter('$\mathbf{%g}$') plt.figure(1) plt.hold(True) plt.clf() material_dict = get_yield_surface(uda_path) PEAKI1 = material_dict['PEAKI1'] FSLOPE = material_dict['FSLOPE'] #STREN = material_dict['STREN'] STREN = PEAKI1*FSLOPE T1 = material_dict['T1'] T2 = material_dict['T2'] def_times,Fs = get_defTable(uda_path,working_dir) A = Fs[1][0][0] #As = Fs[10][0][0] K = material_dict['B0'] G = material_dict['G0'] C = K+(4.0/3.0)*G Y = STREN*1.732 YS = STREN #uniaxial strain (unscaled) analytical_exx = [0.0, (Y/(2.0*G)), np.log(A), ] analytical_Sxx=[0.0, (C*Y)/(2.0*G), ((C-K)*Y)/(2*G)+K*np.log(A), ] #uniaxial strain (scaled) #analytical_exx = np.array([0.0, #(Y/(2.0*G)), #np.log(A), #np.log(A)-(Y)/(G), #0.0 #])/(Y/(2.0*G)) #analytical_Sxx = np.array([0.0, #(C*Y)/(2.0*G), #((C-K)*Y)/(2*G)+K*np.log(A), #K*np.log(A)-((C+K)*Y)/(2*G), #(K-C)*Y/(2*G) #])/((C*Y)/(2.0*G)) #pure shear (unscaled) #analytical_exx = np.array([0.0, # (YS/(2.0*G)), # np.log(As), # ]) #analytical_Sxx = np.array([0.0, # (YS), # (YS), # ]) #Extract stress history print "Post Processing Test: 13 " times,sigmas = get_pStress(uda_path) times,epsils = get_epsilons(uda_path) exx = [] eyy = [] ezz = [] exy = [] for epsil in epsils: exx.append(epsil[0][0]) eyy.append(epsil[1][1]) ezz.append(epsil[2][2]) exy.append(epsil[0][1]) Sxx = [] Syy = [] Szz = [] Sxy = [] for sigma in sigmas: Sxx.append(sigma[0][0]) Syy.append(sigma[1][1]) Szz.append(sigma[2][2]) Sxy.append(sigma[0][1]) scaled_exx = ((2.0*G)/Y)*np.array(exx) scaled_Sxx = ((2.0*G)/(C*Y))*np.array(Sxx) scaled_Syy = ((2.0*G)/(C*Y))*np.array(Syy) #S = np.array(Sxx) - np.array(Syy) S = np.array(Sxx) #E = np.array(exy) ###PLOTTING ax1 = plt.subplot(111) plt.subplots_adjust(right=0.75) #param_text = material_dict['material string'] #plt.figtext(0.77,0.70,param_text,ha='left',va='top',size='x-small') eqShear_vs_meanStress(exx,S,LINE_LABEL = 'T1='+format(T1,'1.3e')+' T2='+format(T2,'1.3e')) #eqShear_vs_meanStress(E,S,LINE_LABEL = 'T1='+format(T1,'1.3e')+' T2='+format(T2,'1.3e'),COLOR=COLORS[idx]) plt.plot(analytical_exx,analytical_Sxx,'--',color='Red',label='Analytical solution for rate independent case.') plt.title('AreniscaTest 13:') plt.ylabel(str_to_mathbf('\sigma_{xx}')) plt.xlabel(str_to_mathbf('\epsilon_{xx}')) #plt.ylabel(str_to_mathbf('\sigma_{xy}')) #plt.xlabel(str_to_mathbf('\epsilon_{xy}')) ax1.xaxis.set_major_formatter(formatter) ax1.yaxis.set_major_formatter(formatter) plt.legend() savePNG(save_path+'/Test13_verificationPlot','1280x960') if SHOW_ON_MAKE: plt.show() else: print '\nERROR: need working directory to post process this problem'
35.607119
178
0.679304
8,104
53,019
4.270854
0.08465
0.038832
0.019387
0.025541
0.662708
0.615007
0.570454
0.541389
0.524689
0.471382
0
0.055805
0.152681
53,019
1,488
179
35.631048
0.714629
0.096833
0
0.496575
0
0.000856
0.15687
0.021867
0
0
0
0
0
0
null
null
0
0.006849
null
null
0.034247
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
d8f1098b74f0861cbf64404f75ae544f49b9969a
1,617
py
Python
src/products/migrations/0006_ShippableFullName.py
denkasyanov/education-backend
c796b6f2f1cc1cd09f83cab2ca0cc45344906ef5
[ "MIT" ]
151
2020-04-21T09:58:57.000Z
2021-09-12T09:01:21.000Z
src/products/migrations/0006_ShippableFullName.py
tlgtaa/education-backend
86f8af315f9cff2c1fd19406899d593fc0852124
[ "MIT" ]
163
2020-05-29T20:52:00.000Z
2021-09-11T12:44:56.000Z
src/products/migrations/0006_ShippableFullName.py
boochamoocha/education-backend
c6ffb0c00bc066c8f1e0a8c0ffe4d0215c7c416a
[ "MIT" ]
39
2020-04-21T12:28:16.000Z
2021-09-12T15:33:47.000Z
# Generated by Django 2.2.7 on 2019-11-15 21:48 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('products', '0005_ClickMeeetingRoomURL'), ] operations = [ migrations.AddField( model_name='course', name='full_name', field=models.CharField(default='', help_text='Билет на мастер-класс о TDD или «запись курсов кройки и шитья»', max_length=255, verbose_name='Full name for letters'), preserve_default=False, ), migrations.AddField( model_name='record', name='full_name', field=models.CharField(default='', help_text='«Запись мастер-класса о TDD»', max_length=255, verbose_name='Full name for letters'), preserve_default=False, ), migrations.AlterField( model_name='course', name='name_genitive', field=models.CharField(help_text='«мастер-класса о TDD». К примеру для записей.', max_length=255, verbose_name='Genitive name'), ), migrations.AlterField( model_name='course', name='name_receipt', field=models.CharField(help_text='«посещение мастер-класса по TDD» или «Доступ к записи курсов кройки и шитья»', max_length=255, verbose_name='Name for receipts'), ), migrations.AlterField( model_name='record', name='name_receipt', field=models.CharField(help_text='«Доступ к записи курсов кройки и шитья»', max_length=255, verbose_name='Name for receipts'), ), ]
39.439024
177
0.622758
198
1,617
5.005051
0.348485
0.045409
0.100908
0.095863
0.643794
0.587286
0.558022
0.489405
0.489405
0.306761
0
0.0285
0.262214
1,617
40
178
40.425
0.792121
0.027829
0
0.617647
1
0
0.291083
0.015924
0
0
0
0
0
1
0
false
0
0.029412
0
0.117647
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
d8f21d41b8c07b30108c551d5f9b8476610f53e3
2,286
py
Python
convert_xml_to_xls.py
CharlesBuy/pyxmlspreadsheet
9ac2064c5c44a3800c3ff8892292d12fbd57d27b
[ "MIT" ]
null
null
null
convert_xml_to_xls.py
CharlesBuy/pyxmlspreadsheet
9ac2064c5c44a3800c3ff8892292d12fbd57d27b
[ "MIT" ]
null
null
null
convert_xml_to_xls.py
CharlesBuy/pyxmlspreadsheet
9ac2064c5c44a3800c3ff8892292d12fbd57d27b
[ "MIT" ]
null
null
null
# # - Very simple code to convert from office spreadsheet to xls # # Sneldev.com # import sys import xml.etree.ElementTree as ET import xlwt from dateutil.parser import parse class xml_workbook(): #hard code a tag here _tag_prefix = "{urn:schemas-microsoft-com:office:spreadsheet}" def _wt(self, key): return self._tag_prefix + key def __init__(self, path): tree = ET.parse(path) self.root = tree.getroot() def get_worksheets(self): for ws in self.root.findall(self._wt('Worksheet')): yield ws.findall(self._wt('Table'))[0] def get_rows(self, ws): return ws.findall(self._wt('Row')) def get_cells(self, row): def create_cell(c): data = c.findall(self._wt('Data'))[0] text = data.text data_type = data.get(self._wt('Type'),"String") return {'text' : text, 'type' : data_type} cells=row.findall(self._wt('Cell')) return [create_cell(c) for c in cells] #This Small Class convert a xml cell to a text and a style #as can be put in an xlwt worksheet class cell_converter(): def __init__(self): self.cell_convert = { "String" : lambda value : value, "Number" : lambda value : float(value), "DateTime" : lambda value : value and parse(value), } number_style = xlwt.XFStyle() number_style.num_format_str = "0.00" date_style = xlwt.XFStyle() date_style.num_format_str = 'dd/mm/yyyy' self.cell_styles = { "DateTime" : date_style, "Number" : number_style, } self.default_format = xlwt.XFStyle() def get_text(self, cell): return self.cell_convert[cell['type']](cell['text']) def get_style(self, cell): return self.cell_styles.get(cell['type'],self.default_format) def convert_xml_spreadsheet_to_xls(in_path, out_path): wb_o = xlwt.Workbook(encoding="UTF-8") wb = xml_workbook(in_path) conv = cell_converter() for ws_idx, ws in enumerate(wb.get_worksheets()): ws_o = wb_o.add_sheet("Converted Data%d" % ws_idx) for row_idx, row in enumerate(wb.get_rows(ws)): for cell_idx, cell in enumerate(wb.get_cells(row)): ws_o.write(row_idx, cell_idx, conv.get_text(cell), conv.get_style(cell)) wb_o.save(out_path) if __name__ == '__main__': if (len(sys.argv) < 3): print "me.py input_spreadsheet.xml output.xls" sys.exit() convert_xml_spreadsheet_to_xls(sys.argv[1],sys.argv[2])
26.275862
76
0.701225
367
2,286
4.133515
0.316076
0.023731
0.042848
0.031641
0.063283
0
0
0
0
0
0
0.004678
0.158355
2,286
86
77
26.581395
0.783784
0.08049
0
0
0
0
0.103448
0.032088
0
0
0
0
0
0
null
null
0
0.067797
null
null
0.016949
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
d8ff6bd51bd1471d74da2d3d069253e4211178d6
861
py
Python
AutoRecon-main/autorecon/default-plugins/nmap-oracle.py
Nano-Techx/nano-tool
6907cbaa3251ba9e47ab634b6e1a656002b3672b
[ "Apache-2.0" ]
null
null
null
AutoRecon-main/autorecon/default-plugins/nmap-oracle.py
Nano-Techx/nano-tool
6907cbaa3251ba9e47ab634b6e1a656002b3672b
[ "Apache-2.0" ]
null
null
null
AutoRecon-main/autorecon/default-plugins/nmap-oracle.py
Nano-Techx/nano-tool
6907cbaa3251ba9e47ab634b6e1a656002b3672b
[ "Apache-2.0" ]
null
null
null
from autorecon.plugins import ServiceScan class NmapOracle(ServiceScan): def __init__(self): super().__init__() self.name = "Nmap Oracle" self.tags = ['default', 'safe', 'databases'] def configure(self): self.match_service_name('^oracle') def manual(self, service, plugin_was_run): service.add_manual_command('Brute-force SIDs using Nmap:', 'nmap {nmap_extra} -sV -p {port} --script="banner,oracle-sid-brute" -oN "{scandir}/{protocol}_{port}_oracle_sid-brute_nmap.txt" -oX "{scandir}/xml/{protocol}_{port}_oracle_sid-brute_nmap.xml" {address}') async def run(self, service): await service.execute('nmap {nmap_extra} -sV -p {port} --script="banner,(oracle* or ssl*) and not (brute or broadcast or dos or external or fuzzer)" -oN "{scandir}/{protocol}_{port}_oracle_nmap.txt" -oX "{scandir}/xml/{protocol}_{port}_oracle_nmap.xml" {address}')
47.833333
266
0.727062
123
861
4.845528
0.455285
0.080537
0.120805
0.050336
0.375839
0.315436
0.251678
0.251678
0.127517
0
0
0
0.10453
861
17
267
50.647059
0.773022
0
0
0
0
0.166667
0.586527
0.310105
0
0
0
0
0
1
0.25
false
0
0.083333
0
0.416667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
2b01580378afa92e53e9be55b04e4282dd5ebf24
2,128
py
Python
sweden_crs_transformations/transformation/_transform_strategy_from_sweref99_or_rt90_to_wgs84_and_then_to_real_target.py
TomasJohansson/sweden_crs_transformations_4python
d4c0ae1bab2e0f505f3e6f948c1b31b3bf397f04
[ "MIT" ]
1
2021-10-02T20:05:59.000Z
2021-10-02T20:05:59.000Z
sweden_crs_transformations/transformation/_transform_strategy_from_sweref99_or_rt90_to_wgs84_and_then_to_real_target.py
TomasJohansson/sweden_crs_transformations_4python
d4c0ae1bab2e0f505f3e6f948c1b31b3bf397f04
[ "MIT" ]
null
null
null
sweden_crs_transformations/transformation/_transform_strategy_from_sweref99_or_rt90_to_wgs84_and_then_to_real_target.py
TomasJohansson/sweden_crs_transformations_4python
d4c0ae1bab2e0f505f3e6f948c1b31b3bf397f04
[ "MIT" ]
null
null
null
""" | Copyright (c) Tomas Johansson , http://www.programmerare.com | The code in this library is licensed with MIT. | The library is based on the C#.NET library 'sweden_crs_transformations_4net' (https://github.com/TomasJohansson/sweden_crs_transformations_4net) | which in turn is based on 'MightyLittleGeodesy' (https://github.com/bjornsallarp/MightyLittleGeodesy/) | which is also released with MIT. | License information about 'sweden_crs_transformations_4python' and 'MightyLittleGeodesy': | https://github.com/TomasJohansson/sweden_crs_transformations_4python/blob/python_SwedenCrsTransformations/LICENSE | For more information see the webpage below. | https://github.com/TomasJohansson/sweden_crs_transformations_4python """ from sweden_crs_transformations.crs_coordinate import CrsCoordinate from sweden_crs_transformations.crs_projection import CrsProjection from sweden_crs_transformations.transformation._transform_strategy import _TransformStrategy class _TransFormStrategy_From_Sweref99OrRT90_to_WGS84_andThenToRealTarget(_TransformStrategy): # Precondition: sourceCoordinate must be CRS SWEREF99 or RT90, and the target too def transform(self, source_coordinate: CrsCoordinate, final_target_crs_projection: CrsProjection ) -> CrsCoordinate: from sweden_crs_transformations.transformation._transformer import _Transformer source_coordinate_projection: CrsProjection = source_coordinate.get_crs_projection() if (not ( (source_coordinate_projection.is_sweref99() or source_coordinate_projection.is_rt90()) and (final_target_crs_projection.is_sweref99() or final_target_crs_projection.is_rt90()) )): _Transformer._throwExceptionMessage(source_coordinate.get_crs_projection(), final_target_crs_projection) intermediate_crs_projection = CrsProjection.WGS84 intermediate_wgs84_coordinate = _Transformer.transform(source_coordinate, intermediate_crs_projection) return _Transformer.transform(intermediate_wgs84_coordinate, final_target_crs_projection)
60.8
146
0.795113
230
2,128
7
0.356522
0.080745
0.134161
0.074534
0.284472
0.10559
0.10559
0.073292
0
0
0
0.015873
0.141447
2,128
34
147
62.588235
0.864806
0.037124
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0.210526
null
null
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
2b066e4df5230c4aa951723634c68a3d9f727def
1,430
py
Python
vibromaf/signal/spectrum.py
hofbi/vibromaf
7678042d18fa3b4ab006283bdbd1b1cc6d84e822
[ "MIT" ]
1
2022-03-11T19:56:59.000Z
2022-03-11T19:56:59.000Z
vibromaf/signal/spectrum.py
hofbi/vibromaf
7678042d18fa3b4ab006283bdbd1b1cc6d84e822
[ "MIT" ]
null
null
null
vibromaf/signal/spectrum.py
hofbi/vibromaf
7678042d18fa3b4ab006283bdbd1b1cc6d84e822
[ "MIT" ]
null
null
null
"""Spectrum module""" import numpy as np from scipy.stats import norm def pow2db(power: np.array) -> np.array: """ Convert power to decibels https://de.mathworks.com/help/signal/ref/pow2db.html """ return 10.0 * np.log10(power) def db2pow(decibel: np.array) -> np.array: """ Convert decibel to power https://de.mathworks.com/help/signal/ref/db2pow.html """ return np.power(10.0, decibel / 10.0) def mag2db(power: np.array) -> np.array: """ Convert magnitude to decibels https://de.mathworks.com/help/signal/ref/mag2db.html """ return 2 * pow2db(power) def signal_energy(signal: np.array) -> np.array: """Calculate the signal energy""" return np.sum(np.square(signal, dtype=np.float64)) def compute_normalized_spectral_difference( reference_spectrum: np.array, distorted_spectrum: np.array ) -> np.array: """Compute the normalized difference of two spectra""" difference = np.sum( np.abs(db2pow(reference_spectrum) - db2pow(distorted_spectrum)), axis=1 ) return pow2db( difference / (np.sum(np.abs(db2pow(reference_spectrum)), axis=1) + np.finfo(float).eps) ) def compute_spectral_support(spectrum: np.array, scale: float = 12) -> np.array: """Compute the spectral support of perceptual spectrum using a normal distribution cdf""" return np.apply_along_axis(norm.cdf, 1, spectrum, scale=scale)
27.5
93
0.678322
196
1,430
4.882653
0.341837
0.095089
0.047022
0.073145
0.287356
0.265413
0.211076
0.177638
0.087774
0
0
0.025795
0.186713
1,430
51
94
28.039216
0.797077
0.290909
0
0
0
0
0
0
0
0
0
0
0
1
0.272727
false
0
0.090909
0
0.636364
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
2b0823806306e95de80b37510219b6b55f6cacc5
1,484
py
Python
src/condor_tests/ornithology/io.py
sridish123/htcondor
481d975fd8602242f6a052aab04e20b0b560db89
[ "Apache-2.0" ]
217
2015-01-08T04:49:42.000Z
2022-03-27T10:11:58.000Z
src/condor_tests/ornithology/io.py
sridish123/htcondor
481d975fd8602242f6a052aab04e20b0b560db89
[ "Apache-2.0" ]
185
2015-05-03T13:26:31.000Z
2022-03-28T03:08:59.000Z
src/condor_tests/ornithology/io.py
sridish123/htcondor
481d975fd8602242f6a052aab04e20b0b560db89
[ "Apache-2.0" ]
133
2015-02-11T09:17:45.000Z
2022-03-31T07:28:54.000Z
# Copyright 2019 HTCondor Team, Computer Sciences Department, # University of Wisconsin-Madison, WI. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import textwrap from pathlib import Path logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) # TODO: does this way of doing permissions work on Windows? def write_file(path: Path, text: str, permissions: int = 0o777) -> Path: """ Write the given ``text`` to a new file at the given ``path``, stomping anything that might exist there. Parameters ---------- path The path to write to. text The text to write. permissions The permissions to give the file. Returns ------- path : pathlib.Path The path the file was written to (as an absolute path). """ path = Path(path).absolute() path.parent.mkdir(parents=True, exist_ok=True) path.write_text(textwrap.dedent(text)) path.chmod(permissions) return path
29.68
74
0.703504
210
1,484
4.938095
0.547619
0.057859
0.025072
0.030858
0
0
0
0
0
0
0
0.010239
0.210243
1,484
49
75
30.285714
0.874573
0.679919
0
0
0
0
0
0
0
0
0
0.020408
0
1
0.090909
false
0
0.272727
0
0.454545
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
2b121fd3a533e1e023e1b1b4e531b0bbc5c02d6c
621
py
Python
main.py
m-zakeri/ADAFEST
d13f73682aecded34b4e8fa203435e56dd7a280a
[ "MIT" ]
2
2022-01-04T14:47:35.000Z
2022-02-23T07:14:11.000Z
main.py
m-zakeri/ADAFEST
d13f73682aecded34b4e8fa203435e56dd7a280a
[ "MIT" ]
1
2021-03-20T07:25:30.000Z
2021-03-20T07:25:30.000Z
main.py
m-zakeri/ADAFEST
d13f73682aecded34b4e8fa203435e56dd7a280a
[ "MIT" ]
1
2022-02-23T07:14:13.000Z
2022-02-23T07:14:13.000Z
""" The main module of DAFEST project. ADAFEST is an abbreviation: 'A Data-Driven Approach to Estimating / Evaluating Software Testability' The full version of source code will be available as soon as the relevant paper(s) are published. """ class Main(): """Welcome to project ADAFEST This file contains the main script """ @classmethod def print_welcome(cls, name) -> None: """ Print welcome message :param name: :return: """ print(f'Welcome to the project {name}.') # Main driver if __name__ == '__main__': Main.print_welcome('ADAFEST')
20.7
101
0.648953
78
621
5.038462
0.653846
0.091603
0
0
0
0
0
0
0
0
0
0
0.256039
621
29
102
21.413793
0.850649
0.571659
0
0
0
0
0.220588
0
0
0
0
0
0
1
0.166667
false
0
0
0
0.333333
0.5
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
1
2b143c83842d129d2bd45478b75903d5f38b5a19
2,652
py
Python
src/gruenbeck/requests/__init__.py
xancoder/gruenbeck
f6d6e8a27156d56017227a5ed3b79e22eacd8cda
[ "MIT" ]
null
null
null
src/gruenbeck/requests/__init__.py
xancoder/gruenbeck
f6d6e8a27156d56017227a5ed3b79e22eacd8cda
[ "MIT" ]
null
null
null
src/gruenbeck/requests/__init__.py
xancoder/gruenbeck
f6d6e8a27156d56017227a5ed3b79e22eacd8cda
[ "MIT" ]
null
null
null
import random import xml.etree.ElementTree import requests def get_data(host, parameters): result_data = {} url = f'http://{host}/mux_http' request_id = random.randint(4000, 6000) payload_header = f'id={request_id}&show=' data = '|'.join(parameters) payload = f'{payload_header}{data}~' headers = {'Content-Type': 'application/x-www-form-urlencoded'} response_data = requests.post(url, data=payload, headers=headers) try: root = xml.etree.ElementTree.fromstring(response_data.text) except xml.etree.ElementTree.ParseError as err: raise ValueError(f"[-] failed to parse xml: {err}") for child in root: if child.tag == 'code': continue result_data.update({ child.tag: child.text }) return result_data if __name__ == '__main__': host_device = 'softliq-sc-ae-85-48' input_data = { 'D_Y_2_1': {'access': 'read', 'device': '', 'value': 'Int', 'unit': '[l]', 'code': '', 'note': 'gestern'}, 'D_Y_2_2': {'access': 'read', 'device': '', 'value': 'Int', 'unit': '[l]', 'code': '', 'note': 'vor 2 Tagen'}, 'D_Y_2_3': {'access': 'read', 'device': '', 'value': 'Int', 'unit': '[l]', 'code': '', 'note': 'vor 3 Tagen'}, 'D_Y_2_4': {'access': 'read', 'device': '', 'value': 'Int', 'unit': '[l]', 'code': '', 'note': 'vor 4 Tagen'}, 'D_Y_2_5': {'access': 'read', 'device': '', 'value': 'Int', 'unit': '[l]', 'code': '', 'note': 'vor 5 Tagen'}, 'D_Y_2_6': {'access': 'read', 'device': '', 'value': 'Int', 'unit': '[l]', 'code': '', 'note': 'vor 6 Tagen'}, 'D_Y_2_7': {'access': 'read', 'device': '', 'value': 'Int', 'unit': '[l]', 'code': '', 'note': 'vor 7 Tagen'}, 'D_Y_2_8': {'access': 'read', 'device': '', 'value': 'Int', 'unit': '[l]', 'code': '', 'note': 'vor 8 Tagen'}, 'D_Y_2_9': {'access': 'read', 'device': '', 'value': 'Int', 'unit': '[l]', 'code': '', 'note': 'vor 9 Tagen'}, 'D_Y_2_10': {'access': 'read', 'device': '', 'value': 'Int', 'unit': '[l]', 'code': '', 'note': 'vor 10 Tagen'}, 'D_Y_2_11': {'access': 'read', 'device': '', 'value': 'Int', 'unit': '[l]', 'code': '', 'note': 'vor 11 Tagen'}, 'D_Y_2_12': {'access': 'read', 'device': '', 'value': 'Int', 'unit': '[l]', 'code': '', 'note': 'vor 12 Tagen'}, 'D_Y_2_13': {'access': 'read', 'device': '', 'value': 'Int', 'unit': '[l]', 'code': '', 'note': 'vor 13 Tagen'}, 'D_Y_2_14': {'access': 'read', 'device': '', 'value': 'Int', 'unit': '[l]', 'code': '', 'note': 'vor 14 Tagen'}, } result = get_data(host_device, input_data) print(result)
47.357143
120
0.519608
345
2,652
3.802899
0.269565
0.021341
0.032012
0.224085
0.424543
0.424543
0.424543
0.424543
0.424543
0.396341
0
0.029929
0.206259
2,652
55
121
48.218182
0.593349
0
0
0
0
0
0.368401
0.029035
0
0
0
0
0
1
0.023256
false
0
0.069767
0
0.116279
0.023256
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
2b1fa39d20f7750128e2a06b8ef41f905c26ad90
4,000
py
Python
scripts/run_scripts/main.py
ECP-copa/CabanaPIC
ac83e086cad3b6a3307abec26fb276ce7307143e
[ "BSD-3-Clause" ]
10
2019-07-30T18:52:55.000Z
2021-02-08T07:22:33.000Z
scripts/run_scripts/main.py
ECP-copa/CabanaPIC
ac83e086cad3b6a3307abec26fb276ce7307143e
[ "BSD-3-Clause" ]
28
2019-04-16T21:15:34.000Z
2021-02-08T20:16:44.000Z
scripts/run_scripts/main.py
ECP-copa/CabanaPIC
ac83e086cad3b6a3307abec26fb276ce7307143e
[ "BSD-3-Clause" ]
2
2019-05-08T17:30:01.000Z
2019-06-27T16:12:15.000Z
from git import Repo import subprocess import os, shutil # I use this later to lazily generate an error with a message class CustomError(Exception): pass repo_path = "../../" r = Repo(repo_path) repo_heads = r.heads # or it's alias: r.branches repo_heads_names = [h.name for h in repo_heads] #kokkos_src = '/Users/bird/kokkos/' #kokkos_install = '/Users/bird/kokkos/build/install' #cabana_install = '/Users/bird/Cabana/build/build/install' # not a typo, it's in a dumb path #platforms = ["Serial", "CPU", "GPU", "UVM"] platforms = ["Serial", "CPU", "GPU"] #platforms = ["CPU", "GPU"] #platforms = ["GPU"] #platforms = ["CPU"] CXX = "g++" #arch = 'Volta70' arch = 'Kepler35' subprocess.check_call(['./timing_lib.sh']) this_build_dir = 'build' kokkos_dirs = {} cabana_dirs = {} home_dir = os.environ['HOME'] # Build Dependencies # TODO: make this configurable kokkos_root = os.path.join(home_dir,'kokkos') cabana_root = os.path.join(home_dir,'Cabana') # Check we can find Kokkos and Cabana if not os.path.isdir(kokkos_root): raise CustomError("Can't find kokkos") if not os.path.isdir(cabana_root): raise CustomError("Can't find Cabana") # Copy Kokkos and Cabana to be inside this dir def copy_and_overwrite(from_path, to_path): if os.path.exists(to_path): shutil.rmtree(to_path) shutil.copytree(from_path, to_path) def copy_if_safe(from_path, to_path): if not os.path.isdir(to_path): shutil.copytree(from_path, to_path) # only copy if they don't exist already kokkos_new = os.path.join(this_build_dir,'kokkos') copy_if_safe(kokkos_root, kokkos_new) cabana_new = os.path.join(this_build_dir,'cabana') copy_if_safe(cabana_root, cabana_new) # Build Dependencies for plat in platforms: install_dir = "build-" + plat # Do Build print("build_kokkos.sh " + CXX + " " + kokkos_new + " " + install_dir + " " + plat + " " + arch) subprocess.check_call(['./build_kokkos.sh', CXX, kokkos_new, install_dir, plat, arch]) print("./build_cabana.sh " + " " + CXX + " " + os.path.join(kokkos_new,install_dir,'install') + " " + cabana_new + " " + install_dir + " " + plat) subprocess.check_call(['./build_cabana.sh', CXX, os.path.join(kokkos_new,install_dir,'install'), cabana_new, install_dir, plat]) # Save dirs, relative to root cabana_dirs[plat] = install_dir kokkos_dirs[plat] = install_dir # Iterate over *local* git branches for branch in repo_heads_names: print("Working on branch " + branch) for plat in platforms: print(plat) # TODO: throughout these scripts we assume ./instal is the install dir! abstract it. cabana_install = os.path.join( cabana_dirs[plat], 'install') kokkos_install = os.path.join( kokkos_dirs[plat], 'install') # For each repo, check it out into a new folder and build it #clone_path = './' + branch clone_path = os.path.join('./', this_build_dir, branch) print("!!!! WORKING ON " + clone_path) # look to see if the folder already exists: if not os.path.isdir(clone_path): # if it does... delete it (!) #print("Deleting " + clone_path) # We need to delete where it will build only one platforms worth, # or hoist the clone #shutil.rmtree(clone_path + build??) # OR if it does... skip #continue # clone it cloned = Repo.clone_from( repo_path, clone_path, branch=branch ) pwd = os.getcwd() kokkos_full_path = os.path.join(pwd, kokkos_new, kokkos_install) cabana_full_path = os.path.join(pwd, cabana_new, cabana_install) print("kk full path " + kokkos_full_path) print("./build_and_run.sh " + clone_path + " g++ " + kokkos_full_path + " " + cabana_full_path + " " + plat) subprocess.check_call(['./build_and_run.sh', clone_path, "g++", kokkos_full_path, cabana_full_path, plat])
32.258065
150
0.6525
569
4,000
4.383128
0.240773
0.038492
0.044106
0.017642
0.303929
0.251002
0.186047
0.165998
0.138733
0.138733
0
0.001278
0.2175
4,000
123
151
32.520325
0.795527
0.2665
0
0.064516
1
0
0.109655
0
0
0
0
0.00813
0
1
0.032258
false
0.016129
0.048387
0
0.096774
0.112903
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
2b221e9fcd75b4225d8486366160ca6b79866358
566
py
Python
misago/misago/socialauth/admin/tests/test_providers_list.py
vascoalramos/misago-deployment
20226072138403108046c0afad9d99eb4163cedc
[ "MIT" ]
2
2021-03-06T21:06:13.000Z
2021-03-09T15:05:12.000Z
misago/misago/socialauth/admin/tests/test_providers_list.py
vascoalramos/misago-deployment
20226072138403108046c0afad9d99eb4163cedc
[ "MIT" ]
null
null
null
misago/misago/socialauth/admin/tests/test_providers_list.py
vascoalramos/misago-deployment
20226072138403108046c0afad9d99eb4163cedc
[ "MIT" ]
null
null
null
from django.urls import reverse admin_link = reverse("misago:admin:settings:socialauth:index") def test_providers_list_renders(admin_client): response = admin_client.get(admin_link) assert response.status_code == 200 def test_providers_list_renders_with_active_provider(admin_client, provider): response = admin_client.get(admin_link) assert response.status_code == 200 def test_providers_list_renders_with_disabled_provider(admin_client, disabled_provider): response = admin_client.get(admin_link) assert response.status_code == 200
28.3
88
0.805654
76
566
5.618421
0.355263
0.154567
0.112412
0.140515
0.653396
0.590164
0.590164
0.590164
0.590164
0.590164
0
0.018072
0.120141
566
19
89
29.789474
0.839357
0
0
0.545455
0
0
0.067138
0.067138
0
0
0
0
0.272727
1
0.272727
false
0
0.090909
0
0.363636
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
2b2a2cb2f442f0c61a738971056437e441c211fd
340
py
Python
project_name/urls.py
hotbaby/django-project-skeleton
41ee89fa96e0df26157d5aea92ce9bcf731c0e13
[ "MIT" ]
1
2019-01-11T10:10:43.000Z
2019-01-11T10:10:43.000Z
project_name/urls.py
hotbaby/django-project-skeleton
41ee89fa96e0df26157d5aea92ce9bcf731c0e13
[ "MIT" ]
3
2018-12-18T12:15:28.000Z
2020-06-05T19:38:46.000Z
project_name/urls.py
hotbaby/django-project-skeleton
41ee89fa96e0df26157d5aea92ce9bcf731c0e13
[ "MIT" ]
null
null
null
"""{{ project_name }} URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.11/topics/http/urls/ """ # Django imports from django.conf.urls import include, url urlpatterns = [ # Examples: # url(r'^blog/', include('blog.urls', namespace='blog')), ]
26.153846
77
0.694118
45
340
5.222222
0.777778
0
0
0
0
0
0
0
0
0
0
0.010381
0.15
340
12
78
28.333333
0.802768
0.758824
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
2b32f528d435bacb92f58da33b3b9a36bf562ce4
449
py
Python
Problema 2/Problema2.py
ItaloCcosccoAlvis/ADA-UNSA---Lab2
00b221f66d1a2149c83b4eb79bbedf3b35a1a2a9
[ "MIT" ]
null
null
null
Problema 2/Problema2.py
ItaloCcosccoAlvis/ADA-UNSA---Lab2
00b221f66d1a2149c83b4eb79bbedf3b35a1a2a9
[ "MIT" ]
null
null
null
Problema 2/Problema2.py
ItaloCcosccoAlvis/ADA-UNSA---Lab2
00b221f66d1a2149c83b4eb79bbedf3b35a1a2a9
[ "MIT" ]
1
2021-11-01T17:22:48.000Z
2021-11-01T17:22:48.000Z
# Problema 2 # Generar una arreglo invertido de n números y después buscar un elemento def generateList(size): return list(range(size,0,-1)) def searchInvertArray(array, element): for i in range(0,len(array)): if array[i] == element: return True return False # Casos de prueba test1 = generateList(18) test2 = generateList(89) print(searchInvertArray(test1,78)) print(searchInvertArray(test2,15))
24.944444
74
0.681514
59
449
5.186441
0.694915
0.143791
0
0
0
0
0
0
0
0
0
0.045714
0.22049
449
18
75
24.944444
0.828571
0.218263
0
0
1
0
0
0
0
0
0
0
0
1
0.181818
false
0
0
0.090909
0.454545
0.181818
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
2b3da381c58b4f82260a14be2bde2c3527be2594
442
py
Python
src/rmi/bibliotheque.py
e-yuzo/distributed-systems-for-fun
54e605265a4d78656aba815184b869b96227d3a9
[ "Apache-2.0" ]
null
null
null
src/rmi/bibliotheque.py
e-yuzo/distributed-systems-for-fun
54e605265a4d78656aba815184b869b96227d3a9
[ "Apache-2.0" ]
null
null
null
src/rmi/bibliotheque.py
e-yuzo/distributed-systems-for-fun
54e605265a4d78656aba815184b869b96227d3a9
[ "Apache-2.0" ]
null
null
null
from __future__ import print_function import Pyro4 @Pyro4.expose class Bibliotheque(object): books = [] def __init__(self): self.books = [] def add(self, book): self.books.append(book) print("added") def rm(self, book): for b in self.books: if b.title == book.title: self.books.remove(b) print("removed") def ls(self): return self.books
19.217391
37
0.561086
54
442
4.425926
0.518519
0.188285
0
0
0
0
0
0
0
0
0
0.006711
0.325792
442
23
38
19.217391
0.795302
0
0
0
0
0
0.027088
0
0
0
0
0
0
1
0.235294
false
0
0.117647
0.058824
0.529412
0.176471
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
2b3e182cdb9374aefd88508287e3f0812c0864ba
634
py
Python
onepanman_api/migrations/0014_auto_20200319_1740.py
Capstone-onepanman/api-server
1a5174fbc441d2718f3963863590f634ba2014e1
[ "MIT" ]
null
null
null
onepanman_api/migrations/0014_auto_20200319_1740.py
Capstone-onepanman/api-server
1a5174fbc441d2718f3963863590f634ba2014e1
[ "MIT" ]
12
2020-03-24T18:09:30.000Z
2022-03-12T00:15:07.000Z
onepanman_api/migrations/0014_auto_20200319_1740.py
Capstone-onepanman/api-server
1a5174fbc441d2718f3963863590f634ba2014e1
[ "MIT" ]
null
null
null
# Generated by Django 2.2.10 on 2020-03-19 08:40 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('onepanman_api', '0013_auto_20200319_1714'), ] operations = [ migrations.AlterField( model_name='problem', name='rule', field=models.TextField(db_column='RULE', default='{"obj_num": ,"placement" : , "action" : , "ending": ,}', verbose_name='문제 규칙'), ), migrations.DeleteModel( name='ProblemRuleInfo', ), migrations.DeleteModel( name='RuleInfo', ), ]
25.36
141
0.570978
61
634
5.803279
0.754098
0.118644
0.141243
0
0
0
0
0
0
0
0
0.071111
0.290221
634
24
142
26.416667
0.715556
0.072555
0
0.277778
1
0
0.226962
0.039249
0
0
0
0
0
1
0
false
0
0.055556
0
0.222222
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
2b4181e01e4e874885f0067383123bf864197817
7,231
py
Python
cinderlib/tests/unit/objects/test_snapshot.py
Akrog/cinderlib
6481cd9a34744f80bdba130fe9089f1b8b7cb327
[ "Apache-2.0" ]
15
2018-01-04T13:46:59.000Z
2020-07-06T13:27:57.000Z
cinderlib/tests/unit/objects/test_snapshot.py
Akrog/cinderlib
6481cd9a34744f80bdba130fe9089f1b8b7cb327
[ "Apache-2.0" ]
12
2018-06-13T10:57:55.000Z
2019-04-04T09:31:44.000Z
cinderlib/tests/unit/objects/test_snapshot.py
Akrog/cinderlib
6481cd9a34744f80bdba130fe9089f1b8b7cb327
[ "Apache-2.0" ]
7
2018-03-12T22:41:30.000Z
2019-01-17T23:16:40.000Z
# Copyright (c) 2018, Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from cinderlib import exception from cinderlib import objects from cinderlib.tests.unit import base class TestSnapshot(base.BaseTest): def setUp(self): super(TestSnapshot, self).setUp() self.vol = objects.Volume(self.backend_name, size=10, extra_specs={'e': 'v'}, qos_specs={'q': 'qv'}) self.snap = objects.Snapshot(self.vol, name='my_snap', description='my_desc') self.vol._snapshots.append(self.snap) self.vol._ovo.snapshots.objects.append(self.snap._ovo) def test_init_from_volume(self): self.assertIsNotNone(self.snap.id) self.assertEqual(self.backend, self.snap.backend) self.assertEqual('my_snap', self.snap.name) self.assertEqual('my_snap', self.snap.display_name) self.assertEqual('my_desc', self.snap.description) self.assertEqual(self.vol.user_id, self.snap.user_id) self.assertEqual(self.vol.project_id, self.snap.project_id) self.assertEqual(self.vol.id, self.snap.volume_id) self.assertEqual(self.vol.size, self.snap.volume_size) self.assertEqual(self.vol._ovo, self.snap._ovo.volume) self.assertEqual(self.vol.volume_type_id, self.snap.volume_type_id) self.assertEqual(self.vol, self.snap.volume) def test_init_from_ovo(self): snap2 = objects.Snapshot(None, __ovo=self.snap._ovo) self.assertEqual(self.snap.backend, snap2.backend) self.assertEqual(self.snap._ovo, snap2._ovo) self.assertEqual(self.vol, self.snap.volume) def test_create(self): update_vol = {'provider_id': 'provider_id'} self.backend.driver.create_snapshot.return_value = update_vol self.snap.create() self.assertEqual('available', self.snap.status) self.assertEqual('provider_id', self.snap.provider_id) self.backend.driver.create_snapshot.assert_called_once_with( self.snap._ovo) self.persistence.set_snapshot.assert_called_once_with(self.snap) def test_create_error(self): self.backend.driver.create_snapshot.side_effect = exception.NotFound with self.assertRaises(exception.NotFound) as assert_context: self.snap.create() self.assertEqual(self.snap, assert_context.exception.resource) self.backend.driver.create_snapshot.assert_called_once_with( self.snap._ovo) self.assertEqual('error', self.snap.status) self.persistence.set_snapshot.assert_called_once_with(self.snap) def test_delete(self): with mock.patch.object( self.vol, '_snapshot_removed', wraps=self.vol._snapshot_removed) as snap_removed_mock: self.snap.delete() snap_removed_mock.assert_called_once_with(self.snap) self.backend.driver.delete_snapshot.assert_called_once_with( self.snap._ovo) self.persistence.delete_snapshot.assert_called_once_with(self.snap) self.assertEqual([], self.vol.snapshots) self.assertEqual([], self.vol._ovo.snapshots.objects) self.assertEqual('deleted', self.snap._ovo.status) @mock.patch('cinderlib.objects.Volume._snapshot_removed') def test_delete_error(self, snap_removed_mock): self.backend.driver.delete_snapshot.side_effect = exception.NotFound with self.assertRaises(exception.NotFound) as assert_context: self.snap.delete() self.assertEqual(self.snap, assert_context.exception.resource) self.backend.driver.delete_snapshot.assert_called_once_with( self.snap._ovo) snap_removed_mock.assert_not_called() self.persistence.delete_snapshot.assert_not_called() self.assertEqual([self.snap], self.vol.snapshots) self.assertEqual([self.snap._ovo], self.vol._ovo.snapshots.objects) self.assertEqual('error_deleting', self.snap._ovo.status) def test_create_volume(self): create_mock = self.backend.driver.create_volume_from_snapshot create_mock.return_value = None vol2 = self.snap.create_volume(name='new_name', description='new_desc') create_mock.assert_called_once_with(vol2._ovo, self.snap._ovo) self.assertEqual('available', vol2.status) self.assertEqual(1, len(self.backend._volumes)) self.assertEqual(vol2, self.backend._volumes[0]) self.persistence.set_volume.assert_called_once_with(vol2) self.assertEqual(self.vol.id, self.vol.volume_type_id) self.assertNotEqual(self.vol.id, vol2.id) self.assertEqual(vol2.id, vol2.volume_type_id) self.assertEqual(self.vol.volume_type.extra_specs, vol2.volume_type.extra_specs) self.assertEqual(self.vol.volume_type.qos_specs.specs, vol2.volume_type.qos_specs.specs) def test_create_volume_error(self): create_mock = self.backend.driver.create_volume_from_snapshot create_mock.side_effect = exception.NotFound with self.assertRaises(exception.NotFound) as assert_context: self.snap.create_volume() self.assertEqual(1, len(self.backend._volumes_inflight)) vol2 = list(self.backend._volumes_inflight.values())[0] self.assertEqual(vol2, assert_context.exception.resource) create_mock.assert_called_once_with(vol2, self.snap._ovo) self.assertEqual('error', vol2.status) self.persistence.set_volume.assert_called_once_with(mock.ANY) def test_get_by_id(self): mock_get_snaps = self.persistence.get_snapshots mock_get_snaps.return_value = [mock.sentinel.snap] res = objects.Snapshot.get_by_id(mock.sentinel.snap_id) mock_get_snaps.assert_called_once_with( snapshot_id=mock.sentinel.snap_id) self.assertEqual(mock.sentinel.snap, res) def test_get_by_id_not_found(self): mock_get_snaps = self.persistence.get_snapshots mock_get_snaps.return_value = None self.assertRaises(exception.SnapshotNotFound, objects.Snapshot.get_by_id, mock.sentinel.snap_id) mock_get_snaps.assert_called_once_with( snapshot_id=mock.sentinel.snap_id) def test_get_by_name(self): res = objects.Snapshot.get_by_name(mock.sentinel.name) mock_get_snaps = self.persistence.get_snapshots mock_get_snaps.assert_called_once_with( snapshot_name=mock.sentinel.name) self.assertEqual(mock_get_snaps.return_value, res)
47.261438
79
0.696584
935
7,231
5.136898
0.168984
0.074953
0.079117
0.062461
0.550281
0.47512
0.394337
0.320841
0.287945
0.27004
0
0.005383
0.203568
7,231
152
80
47.572368
0.828616
0.083529
0
0.254098
0
0
0.029794
0.006352
0
0
0
0
0.491803
1
0.098361
false
0
0.032787
0
0.139344
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
1
2b4db1ded88b7c952c097270d8ab5f711e96a0de
627
py
Python
python/tvm/tensor_graph/testing/datasets/mnist.py
QinHan-Erin/AMOS
634bf48edf4015e4a69a8c32d49b96bce2b5f16f
[ "Apache-2.0" ]
22
2022-03-18T07:29:31.000Z
2022-03-23T14:54:32.000Z
python/tvm/tensor_graph/testing/datasets/mnist.py
QinHan-Erin/AMOS
634bf48edf4015e4a69a8c32d49b96bce2b5f16f
[ "Apache-2.0" ]
null
null
null
python/tvm/tensor_graph/testing/datasets/mnist.py
QinHan-Erin/AMOS
634bf48edf4015e4a69a8c32d49b96bce2b5f16f
[ "Apache-2.0" ]
2
2022-03-18T08:26:34.000Z
2022-03-20T06:02:48.000Z
import torch import torchvision from torchvision import transforms def load_mnist_dataset(train_batch_size, test_batch_size=1): train_set = torchvision.datasets.MNIST(".", train=True, transform=transforms.Compose([transforms.ToTensor()]), download=True) test_set = torchvision.datasets.MNIST(".", train=False, transform=transforms.Compose([transforms.ToTensor()]), download=True) train_loader = torch.utils.data.DataLoader(train_set, batch_size=train_batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(test_set, batch_size=test_batch_size, shuffle=False) return train_loader, test_loader
52.25
129
0.795853
82
627
5.841463
0.341463
0.112735
0.058455
0.075157
0.584551
0.23382
0.23382
0
0
0
0
0.001757
0.092504
627
11
130
57
0.84007
0
0
0
0
0
0.00319
0
0
0
0
0
0
1
0.111111
false
0
0.333333
0
0.555556
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
2b53d1c33b0ce215b092396a108e1d3388123180
6,028
py
Python
MLE.py
tutan0558/Stevens-FE800-Group16-18Spring
8683fe4e1b7971403d3c5db16ebf5ba5a0f75af4
[ "BSD-2-Clause" ]
null
null
null
MLE.py
tutan0558/Stevens-FE800-Group16-18Spring
8683fe4e1b7971403d3c5db16ebf5ba5a0f75af4
[ "BSD-2-Clause" ]
null
null
null
MLE.py
tutan0558/Stevens-FE800-Group16-18Spring
8683fe4e1b7971403d3c5db16ebf5ba5a0f75af4
[ "BSD-2-Clause" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Wed Feb 21 18:22:32 2018 @author: 79127 """ import numpy as np from scipy.optimize import minimize import math import pandas as pd minimize? def LL(params, data): ''' params is a ndarray, [mean, variance, skew, kurt] ''' mean = params[0] sig = params[1] skew = params[2] kurt = params[3] x = data norm = 1/(np.sqrt(2*np.pi)*sig) * np.exp(-(x-mean)**2 / (2*sig**2)) H3 = ((x - mean)/sig)**3 - 3*((x- mean)/sig) H4 = ((x - mean)/sig)**4 - 6*((x - mean)/sig)**2 + 3 temp1 = 1+skew/(6*sig**3)*H3 + (kurt-3)/(24*sig**4)*H4 temp2 = 1+skew**2/(6*sig**3) + (kurt-3)**2/(24*sig**4) f = norm*temp1**2/temp2 ll = np.sum(np.log(f)) return -ll def LL(params, data): # Params mean = params[0] sig = params[1] skew = params[2] kurt = params[3] # Standardize data x = (data - mean) / sig # Compose PDF norm = 1 / (np.sqrt(2*np.pi)) * np.exp(-x**2 / 2) temp1 = 1+skew/6 * (x**3 - 3*x) + (kurt-3)/24 * (x**4 - 6*x**2 + 3) temp2 = 1+skew**2 / 6 + (kurt-3)**2 / 24 f = norm * temp1**2 / temp2 # Log Maximum Likelihood Function ll = np.sum(np.log(f)) return -ll def MY_GC(x,params): mean = params[0] sig = params[1] skew = params[2] kurt = params[3] norm = 1 / (np.sqrt(2*np.pi)) * np.exp(-x**2 / 2) temp1 = 1+skew/6 * (x**3 - 3*x) + (kurt-3)/24 * (x**4 - 6*x**2 + 3) temp2 = 1+skew**2 / 6 + (kurt-3)**2 / 24 f = norm * temp1**2 / temp2 return f MY_GC(1, MLE_result) x = np.linspace(-7, 7, 10000) plt.figure(figsize = (15,10)) plt.title('Gram-Charlier With Positive Constraints') plt.plot(x, MY_GC(x, MLE_result)) SPY100 = pd.read_clipboard() SPY100.rename(columns = {'V1' : 'SPY_Positive'}, inplace = True) SPY100.SPY_Positive = SPY100.SPY_Positive/100 SPY100.plot(secondary_y = ('SPY'),) SPY100.hist(bins = 30, figsize = (15,10)) SPY100.hist(bins = 30, figsize = (7, 10)) SPY100.info() SPY100 SPY100.info() SPY100Var1 = SPY100Var1.reset_index() Var_100Day.reset_index(inplace = True) SPY100 = SPY100.join(SPY100Var1.SPY) SPY100.info() SPY100.SPY_Positive = SPY100.SPY_Positive/100 mean, sig, skew, kurt = np.array([0,1,1,4]) LL(np.array([1,1,1,3]), spy) spy = np.log(data.SPY / data.SPY.shift(1)) x = spy MLE_result = minimize(LL, x0=np.array([0,1,1,4]), args=spy, method = 'L-BFGS-B', bounds = ((-0.5, 0.5), (0.5, 1.1), (-200, 200), (0, 10)))['x'] minimize(LL, x0=np.array([0,1,1,4]), args=spy, method = 'L-BFGS-B', bounds = ((-0.5, 0.5), (0.5, 1.1), (-200, 200), (0, 10))) def Positive(x, params): mean = params[0] sig = params[1] skew = params[2] kurt = params[3] norm = 1/(np.sqrt(2*np.pi)*sig) * np.exp(-(x-mean)**2 / (2*sig**2)) H3 = ((x - mean)/sig)**3 - 3*((x - mean)/sig) H4 = ((x- mean)/sig)**4 - 6*((x - mean)/sig)**2 + 3 temp1 = 1+skew/(6*sig**3)*H3 + (kurt-3)/(24*sig**4)*H4 temp2 = 1+skew**2/(6*sig**3) + (kurt-3)**2/(24*sig**4) return norm*temp1**2/temp2 x = 5 mean = MLE_result[0] sig = MLE_result[1] skew = MLE_result[2] kurt = MLE_result[3] norm = 1/(np.sqrt(2*np.pi)*sig) * np.exp(-(x-mean)**2 / (2*sig**2)) H3 = ((data - mean)/sig)**3 - 3*((data - mean)/sig) H4 = ((data - mean)/sig)**4 - 6*((data - mean)/sig)**2 + 3 temp1 = 1+skew/(6*sig**3)*H3 + (kurt-3)/(24*sig**4)*H4 temp2 = 1+skew**2/(6*sig**3) + (kurt-3)**2/(24*sig**4) norm*temp1**2/temp2 Positive(1, MLE_result) x = np.linspace(-0.03, 0.03, 100000) plt.plot(x, Positive(x, MLE_result)) ss = pd.read_csv('ss.csv') ss def integrate(b): x = np.linspace(-10, b, 1000000) fx = Positive(x, params) area = np.sum(fx)*(b+10)/1000000 return area -0.05 Positive(2, params) integrate(2) params = np.array(ss.iloc[1,1:5]) bisect(integrate, -20,-2) params[1] for i in range(11): params = np.array(ss.iloc[i,1:5]) print(bisect(integrate, -20, 2)) def LL(params,data): # Params skew = params[0] kurt = params[1] # Standardize data # Compose PDF norm = 1 / (np.sqrt(2*np.pi)) * np.exp(-x**2 / 2) temp1 = 1+skew/6 * (x**3 - 3*x) + (kurt-3)/24 * (x**4 - 6*x**2 + 3) temp2 = 1+skew**2 / 6 + (kurt-3)**2 / 24 f = norm * temp1**2 / temp2 # Log Maximum Likelihood Function ll = np.sum(np.log(f)) return -ll skew, kurt = minimize(LL, x0=np.array([-1,4]), args=spy, method = 'L-BFGS-B', bounds = ( (-200, 200), (0, 10)))['x'] skew, kurt = minimize(LL, x0=np.array([-1,4]), args=spy[:50], method = 'L-BFGS-B')['x'] def GC_Positive(x, skew, kurt): norm = 1 / (np.sqrt(2*np.pi)) * np.exp(-x**2 / 2) temp1 = 1+skew/6 * (x**3 - 3*x) + (kurt-3)/24 * (x**4 - 6*x**2 + 3) temp2 = 1+skew**2 / 6 + (kurt-3)**2 / 24 return norm *temp1**2 / temp2 x = np.linspace(-5, 5, 100000) plt.figure(figsize = (15,10)) plt.plot(x,GC_Positive(x, skew, kurt)) area = 0 sets = np.linspace(-5, 5, 100000) i = 0 while abs(area - 0.05) >= 0.001: a, b = GC_Positive(sets[i], skew, kurt), GC_Positive(sets[i+1], skew, kurt) area += (a+b)*(1/10000) i += 1 def GC_VaR(data): # Normalize Data x = (data - np.mean(data)) / np.std(data) # Log-Likelyhood Estimation skew, kurt = minimize(LL, x0 = np.array([-1,4]), args = x, method = 'L-BFGS-B')['x'] # Compute VaR area = 0 sets = np.linspace(-5, 5, 100000) i = 0 while abs(area - 0.05) >= 0.001: a, b = GC_Positive(sets[i], skew, kurt), GC_Positive(sets[i+1], skew, kurt) area += (a+b)*(1/10000) i += 1 return a GC_SPY_50Day = spy.rolling(1900).apply(GC_VaR) Log_Return = np.log(data/data.shift(1)) GC_SPY_100Day = pd.DataFrame() for ETF in data.columns: GC_SPY_100Day[ETF] =
24.806584
144
0.532349
1,035
6,028
3.063768
0.143961
0.033113
0.020183
0.024283
0.626616
0.548723
0.503627
0.48029
0.48029
0.466099
0
0.121057
0.253152
6,028
243
145
24.806584
0.583296
0.034837
0
0.52027
0
0
0.019485
0
0
0
0
0
0
0
null
null
0
0.027027
null
null
0.006757
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
2b554d39cace94bec033a5b3f6828f64cc59bedf
450
py
Python
setup.py
Nathaniel-Rodriguez/reservoirlib
367261d17fa762375ae40b4cc2ffab1de2113858
[ "MIT" ]
4
2018-07-01T20:17:08.000Z
2020-04-19T06:56:43.000Z
setup.py
Nathaniel-Rodriguez/reservoirlib
367261d17fa762375ae40b4cc2ffab1de2113858
[ "MIT" ]
null
null
null
setup.py
Nathaniel-Rodriguez/reservoirlib
367261d17fa762375ae40b4cc2ffab1de2113858
[ "MIT" ]
5
2019-08-20T02:04:54.000Z
2021-08-16T22:11:38.000Z
from setuptools import setup setup(name='reservoirlib', version='0.1', description='Python 3 library that provides utilities for creating and' ' training reservoir computers.', author='Nathaniel Rodriguez', packages=['reservoirlib'], url='https://github.com/Nathaniel-Rodriguez/reservoirlib.git', install_requires=[ 'numpy', 'scipy' ], include_package_data=True)
30
77
0.633333
44
450
6.409091
0.886364
0.12766
0
0
0
0
0
0
0
0
0
0.008955
0.255556
450
14
78
32.142857
0.832836
0
0
0
0
0
0.44
0
0
0
0
0
0
1
0
true
0
0.076923
0
0.076923
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
2b59f33e04abb7e7a0a838142a38b316dd787aec
2,349
py
Python
Python/Deep Learning/Krish Naik/How to Select how many hidden layer and neurons in a neural network.py
omkarsutar1255/Python-Data
169d0c54b23d9dd5a7f1aea41ab385121c3b3c63
[ "CC-BY-3.0" ]
null
null
null
Python/Deep Learning/Krish Naik/How to Select how many hidden layer and neurons in a neural network.py
omkarsutar1255/Python-Data
169d0c54b23d9dd5a7f1aea41ab385121c3b3c63
[ "CC-BY-3.0" ]
null
null
null
Python/Deep Learning/Krish Naik/How to Select how many hidden layer and neurons in a neural network.py
omkarsutar1255/Python-Data
169d0c54b23d9dd5a7f1aea41ab385121c3b3c63
[ "CC-BY-3.0" ]
null
null
null
# todo: How to Select how many hidden layer and neurons in a neural network # Importing the libraries import pandas as pd from keras.models import Sequential from keras.layers import Dense, Activation # Importing the dataset dataset = pd.read_csv('Churn_Modelling.csv') X = dataset.iloc[:, 3:13].values y = dataset.iloc[:, 13].values dataset.head() # Encoding categorical data from sklearn.preprocessing import LabelEncoder, OneHotEncoder labelencoder_X_1 = LabelEncoder() X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1]) labelencoder_X_2 = LabelEncoder() X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2]) onehotencoder = OneHotEncoder(categorical_features=[1]) X = onehotencoder.fit_transform(X).toarray() X = X[:, 1:] # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) # Feature Scaling from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) from keras.wrappers.scikit_learn import KerasClassifier from sklearn.model_selection import GridSearchCV def create_model(layers, activation): model = Sequential() for i, nodes in enumerate(layers): if i == 0: model.add(Dense(nodes, input_dim=X_train.shape[1])) model.add(Activation(activation)) else: model.add(Dense(nodes)) model.add(Activation(activation)) model.add(Dense(1)) # Note: no activation beyond this point model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) return model model = KerasClassifier(build_fn=create_model, verbose=0) print(model) layers = [[20], [40, 20], [45, 30, 15]] activations = ['sigmoid', 'relu'] param_grid = dict(layers=layers, activation=activations, batch_size=[128, 256], epochs=[30]) grid = GridSearchCV(estimator=model, param_grid=param_grid) grid_result = grid.fit(X_train, y_train) print([grid_result.best_score_, grid_result.best_params_]) pred_y = grid.predict(X_test) y_pred = (pred_y > 0.5) print(y_pred) from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred) print(cm) from sklearn.metrics import accuracy_score score = accuracy_score(y_test, y_pred) print(score)
30.506494
92
0.747126
340
2,349
4.973529
0.382353
0.03903
0.030751
0.026611
0.10408
0.049675
0
0
0
0
0
0.021782
0.14006
2,349
76
93
30.907895
0.815347
0.108983
0
0.038462
0
0
0.029257
0
0
0
0
0.013158
0
1
0.019231
false
0
0.192308
0
0.230769
0.096154
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
2b604002fb5b796426d2078ad7992b9e321f4415
804
py
Python
A3C/agent.py
gungui98/deeprl-a3c-ai2thor
5a2699f0a4bc5fe8cd54b0f38b898e023d163e29
[ "MIT" ]
1
2020-12-14T13:11:24.000Z
2020-12-14T13:11:24.000Z
A3C/agent.py
gungui98/deeprl-a3c-ai2thor
5a2699f0a4bc5fe8cd54b0f38b898e023d163e29
[ "MIT" ]
13
2020-01-28T22:42:44.000Z
2022-03-11T23:47:10.000Z
A3C/agent.py
gungui98/deeprl-a3c-ai2thor
5a2699f0a4bc5fe8cd54b0f38b898e023d163e29
[ "MIT" ]
2
2019-06-26T05:03:12.000Z
2021-03-29T08:26:07.000Z
import numpy as np from keras.optimizers import RMSprop class Agent: """ Agent Generic Class """ def __init__(self, inp_dim, out_dim, lr, tau = 0.001): self.inp_dim = inp_dim self.out_dim = out_dim self.tau = tau self.rms_optimizer = RMSprop(lr=lr, epsilon=0.1, rho=0.99) def fit(self, inp, targ): """ Perform one epoch of training """ self.model.fit(self.reshape(inp), targ, epochs=1, verbose=0) def predict(self, inp): """ Critic Value Prediction """ return self.model.predict(self.reshape(inp)) def reshape(self, x): if len(x.shape) < 4 and len(self.inp_dim) > 2: return np.expand_dims(x, axis=0) elif len(x.shape) < 2: return np.expand_dims(x, axis=0) else: return x
28.714286
87
0.599502
122
804
3.836066
0.45082
0.074786
0.064103
0.064103
0.106838
0.106838
0.106838
0.106838
0
0
0
0.02735
0.272388
804
27
88
29.777778
0.77265
0.108209
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.125
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
2b6092497777272f3c9eff71d04f360c6510a763
12,676
py
Python
genestack_client/scripts/genestack_user_setup.py
genestack/python-client
083eb0508dc99c7575ba7f115595f2535f007583
[ "MIT" ]
2
2017-08-30T22:32:59.000Z
2021-07-20T10:08:23.000Z
genestack_client/scripts/genestack_user_setup.py
genestack/python-client
083eb0508dc99c7575ba7f115595f2535f007583
[ "MIT" ]
58
2015-10-19T08:36:00.000Z
2020-12-07T13:48:17.000Z
genestack_client/scripts/genestack_user_setup.py
genestack/python-client
083eb0508dc99c7575ba7f115595f2535f007583
[ "MIT" ]
6
2015-10-21T21:43:45.000Z
2021-01-06T20:33:53.000Z
#!python # -*- coding: utf-8 -*- from __future__ import print_function from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from future import standard_library standard_library.install_aliases() from builtins import input from builtins import * import os import re import sys from argparse import ArgumentParser from getpass import getpass from operator import attrgetter from genestack_client import GenestackAuthenticationException from genestack_client.genestack_shell import Command, GenestackShell from genestack_client.settings import DEFAULT_HOST, User, config from genestack_client.utils import interactive_select def input_host(): host = input('host [%s]: ' % DEFAULT_HOST).strip() return host or DEFAULT_HOST def validate_alias(alias): expression = re.compile('[a-zA-Z0-9_@\-]+$') return bool(alias and expression.match(alias)) def input_alias(existing): print('Please input alias. (Alias can contain: letters (a-z, A-Z), ' 'digits (0-9), at-sign (@), underscore (_), hyphen (-))') while True: alias = input('alias: ').strip() if not alias: print('Alias cannot be empty') continue if not validate_alias(alias): print('Restricted symbols message') continue if alias in existing: print('Alias must be unique') continue return alias def create_user_from_input(host, alias): """ Ask credentials interactively and return user that can login to platform. :param host: server host :type host: basestring :param alias: user alias :type alias: basestring :return: user :rtype: User """ by_token = 'by token' items = [by_token, 'by email and password'] use_token = interactive_select(items, 'Select authentication') == by_token if use_token: return create_user_from_token(host, alias=alias) else: return create_user_from_input_email_and_password(host, alias=alias) def create_user_from_input_email_and_password(host, alias=None): """ Ask email and password, check that it is possible to login with this credentials and return user. :param host: server host :type host: basestring :param alias: user alias :type alias: basestring :return: user :rtype: User """ print('Specify email and password for host: "%s"' % host, end=' ') if alias: print('and alias: "%s"' % alias) else: print() user_login = None while True: if user_login: res = input('Please specify your user login (email) [%s]: ' % user_login).strip() if res: user_login = res else: user_login = input('Please specify your user login (email): ').strip() if not user_login: print('Login cannot be empty') continue user_password = getpass('Please specify your password for %s: ' % user_login) if not user_password: print('Password cannot be empty') continue if not user_login or not user_password: print() continue user = User(user_login, host=host, password=user_password, alias=alias) try: user.get_connection() break except GenestackAuthenticationException: print('Your username or password was incorrect, please try again') return user def create_user_from_token(host, alias=None): print('Host: %s' % host) msg = 'Please specify Genestack API token%s: ' with_alias = '' if not alias else ' for "%s"' % alias msg = msg % with_alias while True: token = getpass(msg) if not token: print('Token cannot be empty') continue user = User(email=None, host=host, password=None, alias=alias, token=token) try: user.get_connection() break except GenestackAuthenticationException: print('Could not login with given token, please try again') return user def check_config(): config_path = config.get_settings_file() if not os.path.exists(config_path): print('You do not seem to have a config file yet. ' 'Please run `genestack-user-setup init`. Exiting') exit(1) class AddUser(Command): COMMAND = 'add' DESCRIPTION = 'Add new user.' OFFLINE = True def run(self): alias = input_alias(config.users.keys()) host = input_host() user = create_user_from_input(host, alias) config.add_user(user) print('User "%s" has been created' % user.alias) def select_user(users, selected=None): """ Choose user from users stored in config. :param users: :param selected: :return: :rtype: User """ user_list = sorted(users.values(), key=lambda x: x.alias) return interactive_select(user_list, 'Select user', to_string=attrgetter('alias'), selected=selected) class ChangePassword(Command): COMMAND = 'change-password' DESCRIPTION = 'Change password for user.' OFFLINE = True def update_parser(self, parent): parent.add_argument('alias', metavar='<alias>', help='Alias for user to change password', nargs='?') def run(self): check_config() users = config.users user = users.get(self.args.alias) if not user: user = select_user(users) if not user.email: print('User without email could be authorized only by token') return while True: user.password = getpass('Input password for %s: ' % user.alias.encode('utf-8')) try: user.get_connection() break except GenestackAuthenticationException: continue config.change_password(user.alias, user.password) print('Password has been changed successfully') class ChangeToken(Command): COMMAND = 'change-token' DESCRIPTION = 'Change token for user.' OFFLINE = True def update_parser(self, parent): parent.add_argument('alias', metavar='<alias>', help='Alias for user to change token for', nargs='?') def run(self): check_config() users = config.users user = users.get(self.args.alias) if not user: user = select_user(users) new_user = create_user_from_token(user.host, alias=user.alias) user.token = new_user.token config.change_token(user.alias, user.token) print('Token has been changed successfully') class SetDefault(Command): COMMAND = 'default' DESCRIPTION = 'Set default user.' OFFLINE = True def update_parser(self, parent): parent.add_argument('alias', metavar='<alias>', help='Alias for user to change password', nargs='?') def run(self): check_config() users = config.users user = users.get(self.args.alias) if not user: user = select_user(users, selected=config.default_user) if user.alias != config.default_user.alias: config.set_default_user(user) print('Default user has been set to "%s"' % user.alias) else: print('Default user has not been changed') class Remove(Command): COMMAND = 'remove' DESCRIPTION = 'Remove user.' OFFLINE = True def update_parser(self, parent): parent.add_argument('alias', metavar='<alias>', help='Alias for user to change password', nargs='?') def run(self): check_config() users = config.users user = users.get(self.args.alias) if not user: user = select_user(users) if user.alias == config.default_user.alias: print('Cannot delete default user') return config.remove_user(user) print('"%s" has been removed from config' % user.alias) class RenameUser(Command): COMMAND = 'rename' DESCRIPTION = 'Rename user.' OFFLINE = True def update_parser(self, parent): parent.add_argument('alias', metavar='<alias>', help='Alias to be renamed', nargs='?') parent.add_argument('new_alias', metavar='<new_alias>', help='New alias', nargs='?') def run(self): check_config() users = config.users user = users.get(self.args.alias) if not user: print('Select user to rename') user = select_user(users) if not self.args.new_alias or not validate_alias(self.args.new_alias): print('Enter new alias') new_alias = input_alias(users.keys()) else: new_alias = self.args.new_alias new_user = User(email=user.email, alias=new_alias, host=user.host, password=user.password, token=user.token) config.add_user(new_user, save=False) if user.alias == config.default_user.alias: config.set_default_user(new_user, save=False) config.remove_user(user) print('"%s" alias changed to "%s"' % (user.alias, new_user.alias)) class List(Command): COMMAND = 'list' DESCRIPTION = 'List all users.' OFFLINE = True def run(self): check_config() users = sorted(config.users.items()) default_user_alias = config.default_user and config.default_user.alias for key, user in users: print() print('%s%s:' % (key, ' (default)' if default_user_alias == key else '')) print(' %-10s%s' % ('email', user.email)) print(' %-10s%s' % ('host', user.host)) class Path(Command): COMMAND = 'path' DESCRIPTION = 'Show path to configuration file.' OFFLINE = True def run(self): print(config.get_settings_file()) class Init(Command): COMMAND = 'init' DESCRIPTION = 'Create default settings.' OFFLINE = True def get_command_parser(self, parser=None): parser = parser or ArgumentParser(description=self.DESCRIPTION) parser.description = self.DESCRIPTION group = parser.add_argument_group('command arguments') self.update_parser(group) group.add_argument('-H', '--host', default=DEFAULT_HOST, help='Genestack host, ' 'change it to connect somewhere else than %s' % DEFAULT_HOST, metavar='<host>') return parser def run(self): """ Create config file if it is not present. Catch ``KeyboardInterrupt`` and ``EOFError`` is required here for case when this command is run for first time and in shell mode. If we don't quit here, shell will continue execution and ask credentials once more. """ # Hardcoded alias that created for the first user only. # Normal usecase is when user have single account and don't care about alias name. # Advanced users can rename alias. default_alias = 'Default' try: config_path = config.get_settings_file() if os.path.exists(config_path): print('A config file already exists at %s' % config_path) return print('If you do not have a Genestack account, you need to create one first') user = create_user_from_input(self.args.host, default_alias) config.add_user(user) # adding first user make him default. print('Config file at "%s" has been created successfully' % config_path) except (KeyboardInterrupt, EOFError): sys.stdout.flush() sys.stderr.write('\nError: Init is not finished\n') exit(1) class UserManagement(GenestackShell): DESCRIPTION = 'Genestack user management application.' COMMAND_LIST = [ Init, List, AddUser, SetDefault, ChangePassword, ChangeToken, Path, Remove, RenameUser ] intro = "User setup shell.\nType 'help' for list of available commands.\n\n" prompt = 'user_setup> ' def set_shell_user(self, args): config_path = config.get_settings_file() if not os.path.exists(config_path): print('No config file was found, creating one interactively') self.process_command(Init(), ['--host', args.host or DEFAULT_HOST], False) args.host = None # do not provide host for future use of arguments def main(): shell = UserManagement() shell.cmdloop() if __name__ == '__main__': main()
31.376238
108
0.620069
1,535
12,676
4.991531
0.170033
0.023493
0.010572
0.014096
0.32746
0.283738
0.23597
0.209997
0.192247
0.180762
0
0.001316
0.28053
12,676
403
109
31.454094
0.838816
0.083465
0
0.352941
0
0
0.185835
0.001834
0
0
0
0
0
1
0.086505
false
0.083045
0.058824
0
0.32872
0.124567
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
2b619974b791b3dc319a188eb0b8ea38e14618c6
386
py
Python
build_scripts/install_deps.py
kamranrad1993/ngfx
eeef60e3419a88371a97e8bc3109d2b35b82cc89
[ "Apache-2.0", "MIT-0", "MIT" ]
12
2021-04-03T16:50:22.000Z
2022-03-18T07:14:14.000Z
build_scripts/install_deps.py
kamranrad1993/ngfx
eeef60e3419a88371a97e8bc3109d2b35b82cc89
[ "Apache-2.0", "MIT-0", "MIT" ]
6
2021-05-06T21:02:19.000Z
2022-02-14T11:57:27.000Z
build_scripts/install_deps.py
kamranrad1993/ngfx
eeef60e3419a88371a97e8bc3109d2b35b82cc89
[ "Apache-2.0", "MIT-0", "MIT" ]
5
2021-06-11T20:15:37.000Z
2022-03-18T07:14:21.000Z
from common import * os.chdir(EXTERNAL_DIR) PKGS = env('PKGS', 'all') ninja_template = read_file(f'{SCRIPT_DIR}/install_deps_{OS_LOWER_CASE}.ninja.in') write_file(f'install_deps_{OS_LOWER_CASE}.ninja', ninja_template.format(**ENV_PARAMS)) for key, val in ENV_PARAMS.items(): print(f'{key} = {val}') shell(f'ninja -v -j 1 -f install_deps_{OS_LOWER_CASE}.ninja {PKGS}')
32.166667
87
0.715026
64
386
4
0.515625
0.128906
0.152344
0.210938
0.324219
0.324219
0.21875
0
0
0
0
0.00295
0.121762
386
11
88
35.090909
0.752212
0
0
0
0
0
0.432
0.314667
0
0
0
0
0
1
0
false
0
0.125
0
0.125
0.125
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
2b632497a6ea6fdf0bb92e774fc99e0e6b5fa507
957
py
Python
en16931/tests/test_tax.py
invinet/python-en16931
f6671f86e8d578c3c82a48134426f89ec13b160c
[ "Apache-2.0" ]
9
2018-07-09T10:34:27.000Z
2021-10-13T20:11:04.000Z
en16931/tests/test_tax.py
invinet/python-en16931
f6671f86e8d578c3c82a48134426f89ec13b160c
[ "Apache-2.0" ]
null
null
null
en16931/tests/test_tax.py
invinet/python-en16931
f6671f86e8d578c3c82a48134426f89ec13b160c
[ "Apache-2.0" ]
1
2022-02-07T15:30:53.000Z
2022-02-07T15:30:53.000Z
import pytest from collections import Hashable from en16931.tax import Tax class TestTaxes: def test_initialization(self): t = Tax(0.21, "S", "IVA") assert t def test_hashable(self): t = Tax(0.21, "S", "IVA") assert isinstance(t, Hashable) def test_percent_less_than_one(self): t = Tax(0.21, "S", "IVA") assert t.percent == 0.21 def test_percent_more_than_one(self): t = Tax(21, "S", "IVA") assert t.percent == 0.21 def test_percent_string(self): t = Tax("21", "S", "IVA") assert t.percent == 0.21 def test_cmp_with_None(self): t = Tax("21", "S", "IVA") assert not (t == None) def test_value_error_bad_percent(self): with pytest.raises(ValueError): t = Tax("asdf", "S", "IVA") def test_value_error_bad_category(self): with pytest.raises(ValueError): t = Tax("21", "asd", "IVA")
23.925
44
0.572623
136
957
3.860294
0.279412
0.106667
0.091429
0.137143
0.598095
0.495238
0.495238
0.327619
0.287619
0.228571
0
0.045322
0.285266
957
39
45
24.538462
0.722222
0
0
0.357143
0
0
0.045977
0
0
0
0
0
0.214286
1
0.285714
false
0
0.107143
0
0.428571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
2b6800c9e35b30d919c5fb6ac7e188c956250fd2
1,338
py
Python
qgisserver/migrations/0012_auto_20190305_1011.py
aroiginfraplan/giscube-admin
b7f3131b0186f847f3902df97f982cb288b16a49
[ "BSD-3-Clause" ]
5
2018-06-07T12:54:35.000Z
2022-01-14T10:38:38.000Z
qgisserver/migrations/0012_auto_20190305_1011.py
aroiginfraplan/giscube-admin
b7f3131b0186f847f3902df97f982cb288b16a49
[ "BSD-3-Clause" ]
140
2018-06-18T10:27:28.000Z
2022-03-23T09:53:15.000Z
qgisserver/migrations/0012_auto_20190305_1011.py
aroiginfraplan/giscube-admin
b7f3131b0186f847f3902df97f982cb288b16a49
[ "BSD-3-Clause" ]
1
2021-04-13T11:20:54.000Z
2021-04-13T11:20:54.000Z
# Generated by Django 2.1.7 on 2019-03-05 10:11 from django.db import migrations, models import qgisserver.models class Migration(migrations.Migration): dependencies = [ ('qgisserver', '0011_auto_20180803_0824'), ] operations = [ migrations.AlterField( model_name='service', name='visibility', field=models.CharField(choices=[('private', 'Private'), ('public', 'Public')], default='private', max_length=10), ), migrations.AlterField( model_name='service', name='wms_buffer_enabled', field=models.BooleanField(default=False, verbose_name='buffer enabled'), ), migrations.AlterField( model_name='service', name='wms_buffer_size', field=models.CharField(blank=True, help_text='Buffer around tiles, e.g. 64,64', max_length=12, null=True, validators=[qgisserver.models.validate_integer_pair], verbose_name='buffer size'), ), migrations.AlterField( model_name='service', name='wms_tile_sizes', field=models.TextField(blank=True, help_text='Integer pairs in different lines e.g.<br/>256,256<br/>512,512', null=True, validators=[qgisserver.models.validate_integer_pair_list], verbose_name='tile sizes'), ), ]
38.228571
219
0.637519
150
1,338
5.52
0.48
0.096618
0.120773
0.140097
0.346618
0.346618
0.298309
0.246377
0
0
0
0.049659
0.232436
1,338
34
220
39.352941
0.756573
0.033632
0
0.428571
1
0
0.215337
0.039504
0
0
0
0
0
1
0
false
0
0.071429
0
0.178571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
2b6933c7fbf6d07d10feccd70de52addfefb38ae
658
py
Python
python/tests/structural/test_decorator.py
harkhuang/designpatterns
dfd6623976410882753913498158dcb0ea70c1d2
[ "Apache-2.0" ]
null
null
null
python/tests/structural/test_decorator.py
harkhuang/designpatterns
dfd6623976410882753913498158dcb0ea70c1d2
[ "Apache-2.0" ]
null
null
null
python/tests/structural/test_decorator.py
harkhuang/designpatterns
dfd6623976410882753913498158dcb0ea70c1d2
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import unittest from patterns.structural.decorator import TextTag, BoldWrapper, ItalicWrapper class TestTextWrapping(unittest.TestCase): def setUp(self): self.raw_string = TextTag('raw but not cruel') def test_italic(self): self.assertEqual(ItalicWrapper(self.raw_string).render(), '<i>raw but not cruel</i>') def test_bold(self): self.assertEqual(BoldWrapper(self.raw_string).render(), '<b>raw but not cruel</b>') def test_mixed_bold_and_italic(self): self.assertEqual(BoldWrapper(ItalicWrapper(self.raw_string)).render(), '<b><i>raw but not cruel</i></b>')
34.631579
113
0.700608
89
658
5.067416
0.404494
0.070953
0.115299
0.124169
0.259424
0.070953
0
0
0
0
0
0.001789
0.150456
658
18
114
36.555556
0.805009
0.06383
0
0
0
0
0.156352
0
0
0
0
0
0.272727
1
0.363636
false
0
0.181818
0
0.636364
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
2b6da2f0c9b5f0cefbd0075526e9eb021fc084e9
2,092
py
Python
dosu/utils.py
tsandrini/DoSU
312afb1c1dccaf5088b8d5818adf08d5406076ae
[ "MIT" ]
null
null
null
dosu/utils.py
tsandrini/DoSU
312afb1c1dccaf5088b8d5818adf08d5406076ae
[ "MIT" ]
2
2017-02-07T08:23:34.000Z
2017-09-09T08:46:25.000Z
dosu/utils.py
tsandrini/dosu
312afb1c1dccaf5088b8d5818adf08d5406076ae
[ "MIT" ]
null
null
null
""" __/\\\\\\\\\\\\______________________/\\\\\\\\\\\____/\\\________/\\\_ _\/\\\////////\\\__________________/\\\/////////\\\_\/\\\_______\/\\\_ _\/\\\______\//\\\________________\//\\\______\///__\/\\\_______\/\\\_ _\/\\\_______\/\\\_____/\\\\\______\////\\\_________\/\\\_______\/\\\_ _\/\\\_______\/\\\___/\\\///\\\_______\////\\\______\/\\\_______\/\\\_ _\/\\\_______\/\\\__/\\\__\//\\\_________\////\\\___\/\\\_______\/\\\_ _\/\\\_______/\\\__\//\\\__/\\\___/\\\______\//\\\__\//\\\______/\\\__ _\/\\\\\\\\\\\\/____\///\\\\\/___\///\\\\\\\\\\\/____\///\\\\\\\\\/___ _\////////////________\/////_______\///////////________\/////////_____ Created by Tomáš Sandrini """ import yaml import os from .settings import HOME class Config: config_paths = ( HOME + '/.config/dosu/config.yml', HOME + '/.dosu.yml', ) def __init__(self): self.config = self.load_raw_config() self.subjects = self.load_subjects() def load_raw_config(self): for config_path in self.config_paths: try: with open(config_path, 'r') as ymlfile: return yaml.load(ymlfile) except IOError as e: continue else: return None def load_subjects(self): if not self.config: return None base = self.get('general.root_dir') if not base: print ("DoSU root dir is not defined in config file") sys.exit(2) return set([name for name in os.listdir(base) if os.path.isdir(base + '/' + name)]) def get(self, key, fallback=None): """ Gets a cached value by its key using dotted notation """ try: tmp = self.config for fragment in key.split('.'): tmp = tmp[fragment] return tmp except KeyError as e: return fallback if fallback != None else key def load_file(path): with open(path, 'r') as f: data = f.read() return data config = Config()
27.168831
91
0.498566
175
2,092
4.274286
0.411429
0.053476
0.034759
0.045455
0
0
0
0
0
0
0
0.000653
0.268164
2,092
76
92
27.526316
0.487916
0.32696
0
0.097561
0
0
0.074216
0.018363
0
0
0
0
0
1
0.121951
false
0
0.073171
0
0.414634
0.02439
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
2b6dbaaeee834ce10b296c5ec51a9014d0fbab68
2,113
py
Python
mod/bad_images.py
lokal-profil/isfdb_site
0ce20d6347849926d4eda961ea9249c31519eea5
[ "BSD-3-Clause" ]
null
null
null
mod/bad_images.py
lokal-profil/isfdb_site
0ce20d6347849926d4eda961ea9249c31519eea5
[ "BSD-3-Clause" ]
null
null
null
mod/bad_images.py
lokal-profil/isfdb_site
0ce20d6347849926d4eda961ea9249c31519eea5
[ "BSD-3-Clause" ]
null
null
null
#!_PYTHONLOC # # (C) COPYRIGHT 2014-2021 Ahasuerus # ALL RIGHTS RESERVED # # The copyright notice above does not evidence any actual or # intended publication of such source code. # # Version: $Revision$ # Date: $Date$ from isfdb import * from common import * from isfdblib import * from SQLparsing import * from library import * def PrintTableHeaders(): print '<table class="generic_table">' print '<tr class="generic_table_header">' for column in ('#', 'Publication', 'Suspect URL', 'Click Once Resolved'): print '<th>%s</th>' % column print '</tr>' def PrintPubRecord(count, pub_id, url, pub_title, bgcolor): if bgcolor: print '<tr align=left class="table1">' else: print '<tr align=left class="table2">' print '<td>%d</td>' % (count) print '<td>%s</td>' % ISFDBLink('pl.cgi', pub_id, pub_title) print '<td>%s</td>' % (url) print '<td>%s</td>' % ISFDBLink('mod/resolve_bad_url.cgi', pub_id, 'Click Once Resolved') print '</tr>' if __name__ == '__main__': PrintPreMod('Publications with Suspect Images') PrintNavBar() query = """select bad_images.pub_id, bad_images.image_url, pubs.pub_title from bad_images, pubs where pubs.pub_id=bad_images.pub_id order by pubs.pub_title""" db.query(query) result = db.store_result() num = result.num_rows() if num: PrintTableHeaders() record = result.fetch_row() bgcolor = 1 count = 1 while record: pub_id = record[0][0] url = record[0][1] pub_title = record[0][2] PrintPubRecord(count, pub_id, url, pub_title, bgcolor) record = result.fetch_row() bgcolor ^= 1 count += 1 print '</table>' else: print '<h2>No publications with bad images found</h2>' PrintPostMod(0)
30.185714
97
0.54567
242
2,113
4.61157
0.421488
0.035842
0.021505
0.026882
0.207885
0.136201
0.136201
0.136201
0
0
0
0.01637
0.335069
2,113
69
98
30.623188
0.777936
0.10885
0
0.125
0
0
0.300587
0.066204
0
0
0
0
0
0
null
null
0
0.104167
null
null
0.270833
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
2b7661631e26d7c909ec01b827d25a4ce6c56fb8
1,096
py
Python
nabu/postprocessing/reconstructors/weighted_kmeans.py
Darleen2019/Nabu-MSSS
5e862cbf846d45b8a317f87588533f3fde9f0726
[ "MIT" ]
18
2017-10-16T13:12:46.000Z
2022-02-15T01:20:00.000Z
nabu/postprocessing/reconstructors/weighted_kmeans.py
Darleen2019/Nabu-MSSS
5e862cbf846d45b8a317f87588533f3fde9f0726
[ "MIT" ]
null
null
null
nabu/postprocessing/reconstructors/weighted_kmeans.py
Darleen2019/Nabu-MSSS
5e862cbf846d45b8a317f87588533f3fde9f0726
[ "MIT" ]
9
2017-10-03T18:10:10.000Z
2020-11-13T08:26:31.000Z
# Based on: https://towardsdatascience.com/clustering-the-us-population-observation-weighted-k-means-f4d58b370002 import random import numpy as np import scipy.spatial def distance(p1,p2): return np.linalg.norm(p1,p2) def cluster_centroids(data,weights, clusters, k): results=[] for i in range(k): results.append( np.average(data[clusters == i],weights=weights[clusters == i],axis=0)) return np.array(results) def kmeans(data,weights, k, steps=20): if(np.shape(data)[0] != np.shape(weights)[0]): print "Dimension data and weights don't match" # Forgy initialization method: choose k data points randomly. centroids = data[np.random.choice(np.arange(len(data)), k, False)] for _ in range(max(steps, 1)): sqdists = scipy.spatial.distance.cdist(centroids, data, 'euclidean') # Index of the closest centroid to each data point. clusters = np.argmin(sqdists, axis=0) new_centroids = cluster_centroids(data,weights, clusters, k) if np.array_equal(new_centroids, centroids): break centroids = new_centroids return clusters, centroids
29.621622
113
0.720803
157
1,096
4.987261
0.509554
0.066411
0.051086
0.068966
0.091954
0.091954
0
0
0
0
0
0.021575
0.154197
1,096
36
114
30.444444
0.823085
0.201642
0
0
0
0
0.053961
0
0
0
0
0
0
0
null
null
0
0.136364
null
null
0.045455
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
2b7ab663452a80c4be262712173532cb7f06dc77
2,339
py
Python
Web.PY/client-post.py
Phoebus-Ma/Python-Helper
d880729f0bbfbc2b1503602fd74c9177ecd4e970
[ "MIT" ]
null
null
null
Web.PY/client-post.py
Phoebus-Ma/Python-Helper
d880729f0bbfbc2b1503602fd74c9177ecd4e970
[ "MIT" ]
null
null
null
Web.PY/client-post.py
Phoebus-Ma/Python-Helper
d880729f0bbfbc2b1503602fd74c9177ecd4e970
[ "MIT" ]
null
null
null
### # Python http post example. # # License - MIT. ### import os # pip install requests. import requests # pip install lxml # pip install beautifulsoup4 from bs4 import BeautifulSoup # login github class. class login_github(): # { # Initialization function. def __init__(self): # { # Chromium core browser user agent. self._headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.74 Safari/537.36' } self._login_page = 'https://github.com/login' self._session_page = 'https://github.com/session' self._session = requests.Session() # } # Close session. def close(self): # { self._session.close() # } # Get html data. def datas(self, url_path): # { datas = self._session.get(url = url_path, headers = self._headers) return datas # } # Http Get. def get(self): # { html = requests.get(url = self._login_page, headers = self._headers) soup = BeautifulSoup(html.text, 'lxml') tokens = soup.find_all('input', type="hidden")[1] attrs = tokens.attrs['value'] return attrs # } # Http Post. def post(self, Username, Password): # { data = { 'commit' : 'Sign in', 'utf8' : ' ✓', 'authenticity_token': self.get(), 'login' : Username, 'password' : Password, 'webauthn-support': ' supported' } # Post. res = self._session.post( url = self._session_page, data = data, headers = self._headers ) print(res.status_code) # } # } # Main function. def main(): # { test_page = 'https://github.com/torvalds/linux' print('Login Github !') Username = input('Username or email address: ') Password = input('Password: ') login = login_github() # login. login.post(Username, Password) # get data. datas = login.datas(test_page) with open('test.html', 'wb') as fd: fd.write(datas.content) # close. login.close() # } # Program entry. if '__main__' == __name__: main()
20.163793
128
0.535271
246
2,339
4.943089
0.430894
0.054276
0.037007
0.044408
0
0
0
0
0
0
0
0.020566
0.334758
2,339
115
129
20.33913
0.760283
0.138948
0
0
0
0.019608
0.188956
0
0
0
0
0
0
1
0.117647
false
0.078431
0.058824
0
0.235294
0.039216
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
2b7b4342609278d1a046c18ba64c0b30f5c732af
7,772
py
Python
wqxlib/wqx_v3_0/BiologicalHabitatCollectionInformation.py
FlippingBinary/wqxlib-python
5aa1d41384928f1faca47d5984485e2efa93174c
[ "MIT" ]
null
null
null
wqxlib/wqx_v3_0/BiologicalHabitatCollectionInformation.py
FlippingBinary/wqxlib-python
5aa1d41384928f1faca47d5984485e2efa93174c
[ "MIT" ]
null
null
null
wqxlib/wqx_v3_0/BiologicalHabitatCollectionInformation.py
FlippingBinary/wqxlib-python
5aa1d41384928f1faca47d5984485e2efa93174c
[ "MIT" ]
null
null
null
from yattag import Doc from .CollectionEffort import CollectionEffort from .MeasureCompact import MeasureCompact from .NetInformation import NetInformation from .SimpleContent import CollectionDescriptionText, PassCount class BiologicalHabitatCollectionInformation: """ Allows for the reporting of biological habitat sample collection information. """ __collectionDuration: MeasureCompact __collectionArea: MeasureCompact __collectionEffort: CollectionEffort __reachLengthMeasure: MeasureCompact __reachWidthMeasure: MeasureCompact __collectionDescriptionText: CollectionDescriptionText __passCount: PassCount __netInformation: NetInformation def __init__( self, o: dict = None, *, collectionDuration: MeasureCompact = None, collectionArea: MeasureCompact = None, collectionEffort: CollectionEffort = None, reachLengthMeasure: MeasureCompact = None, reachWidthMeasure: MeasureCompact = None, collectionDescriptionText: CollectionDescriptionText = None, passCount: PassCount = None, netInformation: NetInformation = None ): if isinstance(o, BiologicalHabitatCollectionInformation): # Assign attributes from objects without typechecking self.__collectionDuration = o.collectionDuration self.__collectionArea = o.collectionArea self.__collectionEffort = o.collectionEffort self.__reachLengthMeasure = o.reachLengthMeasure self.__reachWidthMeasure = o.reachWidthMeasure self.__collectionDescriptionText = o.collectionDescriptionText self.__passCount = o.passCount self.__netInformation = o.netInformation elif isinstance(o, dict): # Assign attributes from dictionary with typechecking self.collectionDuration = o.get("collectionDuration") self.collectionArea = o.get("collectionArea") self.collectionEffort = o.get("collectionEffort") self.reachLengthMeasure = o.get("reachLengthMeasure") self.reachWidthMeasure = o.get("reachWidthMeasure") self.collectionDescriptionText = o.get("collectionDescriptionText") self.passCount = o.get("passCount") self.netInformation = o.get("netInformation") else: # Assign attributes from named keywords with typechecking self.collectionDuration = collectionDuration self.collectionArea = collectionArea self.collectionEffort = collectionEffort self.reachLengthMeasure = reachLengthMeasure self.reachWidthMeasure = reachWidthMeasure self.collectionDescriptionText = collectionDescriptionText self.passCount = passCount self.netInformation = netInformation @property def collectionDuration(self) -> MeasureCompact: """ The length of time a collection procedure or protocol was performed (e.g. total energized time for electrofishing, or total time kick net used). """ return self.__collectionDuration @collectionDuration.setter def collectionDuration(self, val: MeasureCompact) -> None: """ The length of time a collection procedure or protocol was performed (e.g. total energized time for electrofishing, or total time kick net used). """ self.__collectionDuration = None if val is None else MeasureCompact(val) @property def collectionArea(self) -> MeasureCompact: """ The area of a collection procedure or protocol was performed (e.g. total area coverage for electrofishing, or total area kick net used). """ return self.__collectionArea @collectionArea.setter def collectionArea(self, val: MeasureCompact) -> None: """ The area of a collection procedure or protocol was performed (e.g. total area coverage for electrofishing, or total area kick net used). """ self.__collectionArea = None if val is None else MeasureCompact(val) @property def collectionEffort(self) -> CollectionEffort: return self.__collectionEffort @collectionEffort.setter def collectionEffort(self, val: CollectionEffort) -> None: self.__collectionEffort = None if val is None else CollectionEffort(val) @property def reachLengthMeasure(self) -> MeasureCompact: """ A measurement of the water body length distance in which the procedure or protocol was performed. """ return self.__reachLengthMeasure @reachLengthMeasure.setter def reachLengthMeasure(self, val: MeasureCompact) -> None: """ A measurement of the water body length distance in which the procedure or protocol was performed. """ self.__reachLengthMeasure = None if val is None else MeasureCompact(val) @property def reachWidthMeasure(self) -> MeasureCompact: """ A measurement of the reach width during collection procedures. """ return self.__reachWidthMeasure @reachWidthMeasure.setter def reachWidthMeasure(self, val: MeasureCompact) -> None: """ A measurement of the reach width during collection procedures. """ self.__reachWidthMeasure = None if val is None else MeasureCompact(val) @property def collectionDescriptionText(self) -> CollectionDescriptionText: return self.__collectionDescriptionText @collectionDescriptionText.setter def collectionDescriptionText(self, val: CollectionDescriptionText) -> None: self.__collectionDescriptionText = ( None if val is None else CollectionDescriptionText(val) ) @property def passCount(self) -> PassCount: return self.__passCount @passCount.setter def passCount(self, val: PassCount) -> None: self.__passCount = None if val is None else PassCount(val) @property def netInformation(self) -> NetInformation: return self.__netInformation @netInformation.setter def netInformation(self, val: NetInformation) -> None: self.__netInformation = None if val is None else NetInformation(val) def generateXML(self, name: str = "BiologicalHabitatCollectionInformation") -> str: doc = Doc() asis = doc.asis line = doc.line tag = doc.tag with tag(name): if self.__collectionDuration is not None: asis(self.__collectionDuration.generateXML("CollectionDuration")) if self.__collectionArea is not None: asis(self.__collectionArea.generateXML("CollectionArea")) if self.__collectionEffort is not None: asis(self.__collectionEffort.generateXML("CollectionEffort")) if self.__reachLengthMeasure is not None: asis(self.__reachLengthMeasure.generateXML("ReachLengthMeasure")) if self.__reachWidthMeasure is not None: asis(self.__reachWidthMeasure.generateXML("ReachWidthMeasure")) if self.__collectionDescriptionText is not None: line("CollectionDescriptionText", self.__collectionDescriptionText) if self.__passCount is not None: line("PassCount", self.__passCount) if self.__netInformation is not None: asis(self.__netInformation.generateXML("NetInformation")) return doc.getvalue()
41.340426
88
0.665208
665
7,772
7.62406
0.130827
0.010651
0.014201
0.017357
0.244576
0.210651
0.18856
0.18856
0.178698
0.157791
0
0
0.270458
7,772
187
89
41.561497
0.89418
0.144879
0
0.062992
0
0
0.048317
0.014173
0
0
0
0
0
1
0.141732
false
0.102362
0.03937
0.031496
0.322835
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
2b7b76037a4e898e16254c3e5645a41b057d31df
2,434
py
Python
Python/Search/Search-1-Billion-Users.py
sethmh82/SethDevelopment
08f3bd22923b652f9d676ffa2af3dc037eed6d73
[ "MIT" ]
null
null
null
Python/Search/Search-1-Billion-Users.py
sethmh82/SethDevelopment
08f3bd22923b652f9d676ffa2af3dc037eed6d73
[ "MIT" ]
null
null
null
Python/Search/Search-1-Billion-Users.py
sethmh82/SethDevelopment
08f3bd22923b652f9d676ffa2af3dc037eed6d73
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Tue Dec 29 15:50:49 2020 @author: SethHarden """ import math # Add any extra import statements you may need here """ We have N different apps with differnt user growth rates. At a given time (t) Measure in days (d) the number of users using an app is g ^ t can be a float G = growth rate Apps launch at same time user can only use 1 app at a time We want to know How many total users there are when you add together the number of users from each After how many full days will we have 1 billion total users across the N apps? We are sliding 1.0 < GR < 2.0 1 <= N <= 1,000 """ # Add any helper functions you may need here # lets create a function for the passing of time billion = 1000000000 # set the lower boundry def getDays(arr, t): #O(log n) days_passed = 0 for g in arr: days_passed += (g ** t) return days_passed # find the upper boundry def boundry(arr, low, high): #O(log n) while low < high: mid = low + (high - low) // 2 if getDays(arr, mid) < billion: low = mid + 1 else: high = mid return high def getBillionUsersDay(growthRates): i = 1 app_users = getDays(growthRates, i) #we inherit the results if app_users >= billion: return 1 #return 1 day if we're already over. while app_users < billion: i *= 2 app_users = getDays(growthRates, i) print(growthRates) print(boundry(growthRates, i // 2, i)) return boundry(growthRates, i // 2, i) # These are the tests we use to determine if the solution is correct. # You can add your own at the bottom, but they are otherwise not editable! def printInteger(n): print('[', n, ']', sep='', end='') test_case_number = 1 def check(expected, output): global test_case_number result = False if expected == output: result = True rightTick = '\u2713' wrongTick = '\u2717' if result: print(rightTick, 'Test #', test_case_number, sep='') else: print(wrongTick, 'Test #', test_case_number, ': Expected ', sep='', end='') printInteger(expected) print(' Your output: ', end='') printInteger(output) print() test_case_number += 1 if __name__ == "__main__": test_1 = [1.1, 1.2, 1.3] expected_1 = 79 output_1 = getBillionUsersDay(test_1) check(expected_1, output_1) test_2 = [1.01, 1.02] expected_2 = 1047 output_2 = getBillionUsersDay(test_2) check(expected_2, output_2) # Add your own test cases here
22.962264
82
0.670501
391
2,434
4.079284
0.398977
0.037618
0.043887
0.017555
0.060188
0
0
0
0
0
0
0.043985
0.224733
2,434
106
83
22.962264
0.801272
0.208299
0
0.074074
0
0
0.040972
0
0
0
0
0
0
1
0.092593
false
0.055556
0.018519
0
0.185185
0.185185
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
990d999fd69482a15fa5ce17aef975e84d69d8f0
1,833
py
Python
app/grandchallenge/workstations/urls.py
njmhendrix/grand-challenge.org
9bc36f5e26561a78bd405e8ea5e4c0f86c95f011
[ "Apache-2.0" ]
1
2021-02-09T10:30:44.000Z
2021-02-09T10:30:44.000Z
app/grandchallenge/workstations/urls.py
njmhendrix/grand-challenge.org
9bc36f5e26561a78bd405e8ea5e4c0f86c95f011
[ "Apache-2.0" ]
null
null
null
app/grandchallenge/workstations/urls.py
njmhendrix/grand-challenge.org
9bc36f5e26561a78bd405e8ea5e4c0f86c95f011
[ "Apache-2.0" ]
null
null
null
from django.urls import path from grandchallenge.workstations.views import ( SessionCreate, WorkstationCreate, WorkstationDetail, WorkstationEditorsUpdate, WorkstationImageCreate, WorkstationImageDetail, WorkstationImageUpdate, WorkstationList, WorkstationUpdate, WorkstationUsersAutocomplete, WorkstationUsersUpdate, ) app_name = "workstations" urlpatterns = [ path("", WorkstationList.as_view(), name="list"), path( "users-autocomplete/", WorkstationUsersAutocomplete.as_view(), name="users-autocomplete", ), path("create/", WorkstationCreate.as_view(), name="create"), # TODO - add region path( "sessions/create/", SessionCreate.as_view(), name="default-session-create", ), path( "<slug>/sessions/create/", SessionCreate.as_view(), name="workstation-session-create", ), path( "<slug>/<uuid:pk>/sessions/create/", SessionCreate.as_view(), name="workstation-image-session-create", ), path( "<slug>/editors/update/", WorkstationEditorsUpdate.as_view(), name="editors-update", ), path( "<slug>/users/update/", WorkstationUsersUpdate.as_view(), name="users-update", ), path("<slug>/", WorkstationDetail.as_view(), name="detail"), path("<slug>/update/", WorkstationUpdate.as_view(), name="update"), path( "<slug>/images/create/", WorkstationImageCreate.as_view(), name="image-create", ), path( "<slug>/images/<uuid:pk>/", WorkstationImageDetail.as_view(), name="image-detail", ), path( "<slug>/images/<uuid:pk>/update/", WorkstationImageUpdate.as_view(), name="image-update", ), ]
25.816901
71
0.608838
153
1,833
7.202614
0.281046
0.07078
0.117967
0.078947
0.156987
0.12069
0.087114
0
0
0
0
0
0.242226
1,833
70
72
26.185714
0.793377
0.009274
0
0.318182
0
0
0.237596
0.128997
0
0
0
0.014286
0
1
0
false
0
0.030303
0
0.030303
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
990e9f8c1c95014b70321c97ccabc51b75a24bab
8,829
py
Python
Project10- Bank Marketing.py
vaibhav162/Banking-Marketing-Project
1255338b0a844a6a662e3b1887e9cef1a9edc834
[ "Unlicense" ]
null
null
null
Project10- Bank Marketing.py
vaibhav162/Banking-Marketing-Project
1255338b0a844a6a662e3b1887e9cef1a9edc834
[ "Unlicense" ]
null
null
null
Project10- Bank Marketing.py
vaibhav162/Banking-Marketing-Project
1255338b0a844a6a662e3b1887e9cef1a9edc834
[ "Unlicense" ]
null
null
null
#!/usr/bin/env python # coding: utf-8 # # Importing Libraries and Dataset # In[1]: import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # In[2]: bank= pd.read_csv(r"C:\Users\shruti\Desktop\Decodr\Project\Decodr Project\Bank marketing project\bank.csv", delimiter=";") # In[3]: bank.head() # In[4]: bank.tail() # In[5]: # Renaming "y" column with "deposit" bank.rename(columns={"y":"deposit"}, inplace=True) # In[6]: bank.head() # # Data Exploration # In[7]: # To get total number of rows print("Bank Marketing Dataset contains {rows} rows.".format(rows=len(bank))) # In[8]: # To get percentage of missing values in each columns missing_values= bank.isnull().mean()*100 missing_values.sum() # ### Categorical Columns Exploration # In[9]: cat_columns = ['job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month','poutcome'] fig, axs = plt.subplots(3, 3, sharex=False, sharey=False, figsize=(10, 8)) counter = 0 for cat_column in cat_columns: value_counts = bank[cat_column].value_counts() trace_x = counter // 3 trace_y = counter % 3 x_pos = np.arange(0, len(value_counts)) axs[trace_x, trace_y].bar(x_pos, value_counts.values, tick_label = value_counts.index) axs[trace_x, trace_y].set_title(cat_column) for tick in axs[trace_x, trace_y].get_xticklabels(): tick.set_rotation(90) counter += 1 plt.show() # ### Numerical Columns Exploration # In[10]: num_columns = ['balance', 'day', 'duration', 'campaign', 'pdays', 'previous'] fig, axs = plt.subplots(2, 3, sharex=False, sharey=False, figsize=(10, 8)) counter = 0 for num_column in num_columns: trace_x = counter // 3 trace_y = counter % 3 axs[trace_x, trace_y].bar(x_pos, value_counts.values, tick_label = value_counts.index) axs[trace_x, trace_y].set_title(num_column) counter += 1 plt.show() # In[11]: bank[["pdays", "campaign", "previous"]].describe() # In[12]: len(bank[bank["pdays"]> 400])/ len(bank)*100 # In[13]: len(bank[bank["campaign"]> 34])/ len(bank)*100 # In[14]: len(bank[bank["previous"]> 34])/ len(bank)*100 # ## Analysis of Categorical columns # In[15]: value_counts= bank["deposit"].value_counts() value_counts.plot.bar(title= "Deposit value counts") # In[16]: # Plotting Deposit Vs Jobs j_bank= pd.DataFrame() j_bank["yes"]= bank[bank["deposit"] == "yes"]["job"].value_counts() j_bank["no"]= bank[bank["deposit"] == "no"]["job"].value_counts() j_bank.plot.bar(title= "Job & Deposit") # In[17]: # Plotting Deposit Vs Marital Status j_bank= pd.DataFrame() j_bank["yes"]= bank[bank["deposit"] == "yes"]["marital"].value_counts() j_bank["no"]= bank[bank["deposit"] == "no"]["marital"].value_counts() j_bank.plot.bar(title= "Marital Status & Deposit") # In[18]: # Plotting Deposite Vs Education j_bank= pd.DataFrame() j_bank["yes"]= bank[bank["deposit"] == "yes"]["education"].value_counts() j_bank["no"]= bank[bank["deposit"] == "no"]["education"].value_counts() j_bank.plot.bar(title= "Education & Deposit") # In[19]: # Plotting Deposit Vs Contact j_bank= pd.DataFrame() j_bank["yes"]= bank[bank["deposit"] == "yes"]["contact"].value_counts() j_bank["no"]= bank[bank["deposit"] == "no"]["contact"].value_counts() j_bank.plot.bar(title= "Contact & Deposit") # ## Analysis of Numeric columns # In[20]: # Balance & Deposit b_bank= pd.DataFrame() b_bank['balance_yes'] = (bank[bank['deposit'] == 'yes'][['deposit','balance']].describe())['balance'] b_bank['balance_no'] = (bank[bank['deposit'] == 'no'][['deposit','balance']].describe())['balance'] b_bank # In[21]: b_bank.drop(["count", "25%", "50%", "75%"]).plot.bar(title= "Balance & Deposit Statistics") # In[22]: # Age & Deposit b_bank= pd.DataFrame() b_bank['age_yes'] = (bank[bank['deposit'] == 'yes'][['deposit','age']].describe())['age'] b_bank['age_no'] = (bank[bank['deposit'] == 'no'][['deposit','age']].describe())['age'] b_bank # In[23]: b_bank.drop(["count", "25%", "50%", "75%"]).plot.bar(title= "Age & Deposit Statistics") # In[24]: # Campaign & Deposit b_bank= pd.DataFrame() b_bank['campaign_yes'] = (bank[bank['deposit'] == 'yes'][['deposit','campaign']].describe())['campaign'] b_bank['campaign_no'] = (bank[bank['deposit'] == 'no'][['deposit','campaign']].describe())['campaign'] b_bank # In[25]: b_bank.drop(["count", "25%", "50%", "75%"]).plot.bar(title= "Campaign & Deposit Statistics") # In[26]: # Previous Campaign & Deposit b_bank= pd.DataFrame() b_bank['previous_yes'] = (bank[bank['deposit'] == 'yes'][['deposit','previous']].describe())['previous'] b_bank['previous_no'] = (bank[bank['deposit'] == 'no'][['deposit','previous']].describe())['previous'] b_bank # In[27]: b_bank.drop(["count", "25%", "50%", "75%"]).plot.bar(title= "Previous Campaign & Deposit Statistics") # # Data Cleaning # In[28]: def get_dummy_from_bool(row, column_name): """Returns 0 if value in column_name is no, returns 1 if value in column_name is yes""" return 1 if row[column_name] == "yes" else 0 def get_correct_values(row, column_name, threshold, bank): """Returns mean value if value in column_name is above threshold""" if row[column_name] <= threshold: return row[column_name] else: mean= bank[bank[column_name] <= threshold][column_name].mean() return mean def clean_data(bank): ''' INPUT df - pandas dataframe containing bank marketing campaign dataset OUTPUT df - cleaned dataset: 1. columns with 'yes' and 'no' values are converted into boolean variables; 2. categorical columns are converted into dummy variables; 3. drop irrelevant columns. 4. impute incorrect values ''' cleaned_bank = bank.copy() # Converting columns containing 'yes' and 'no' values to boolean variables and drop original columns bool_columns = ['default', 'housing', 'loan', 'deposit'] for bool_col in bool_columns: cleaned_bank[bool_col + '_bool'] = bank.apply(lambda row: get_dummy_from_bool(row, bool_col),axis=1) cleaned_bank = cleaned_bank.drop(columns = bool_columns) # Converting categorical columns to dummies cat_columns = ['job', 'marital', 'education', 'contact', 'month', 'poutcome'] for col in cat_columns: cleaned_bank = pd.concat([cleaned_bank.drop(col, axis=1), pd.get_dummies(cleaned_bank[col], prefix=col, prefix_sep='_', drop_first=True, dummy_na=False)], axis=1) # Dropping irrelevant columns cleaned_bank = cleaned_bank.drop(columns = ['pdays']) # Imputing incorrect values and drop original columns cleaned_bank['campaign_cleaned'] = bank.apply(lambda row: get_correct_values(row, 'campaign', 34, cleaned_bank),axis=1) cleaned_bank['previous_cleaned'] = bank.apply(lambda row: get_correct_values(row, 'previous', 34, cleaned_bank),axis=1) cleaned_bank = cleaned_bank.drop(columns = ['campaign', 'previous']) return cleaned_bank # In[29]: cleaned_bank= clean_data(bank) cleaned_bank.head() # # Predicting Campaign Model # ### Classification Model # In[30]: X= cleaned_bank.drop(columns= "deposit_bool") y= cleaned_bank[["deposit_bool"]] # In[31]: TEST_SIZE = 0.3 RAND_STATE= 42 # In[41]: from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test= train_test_split(X, y, test_size= TEST_SIZE, random_state= RAND_STATE) # In[42]: pip install xgboost # In[43]: import xgboost import warnings xgb = xgboost.XGBClassifier(n_estimators=100, learning_rate=0.08, gamma=0, subsample=0.75, colsample_bytree=1, max_depth=7) # In[44]: xgb.fit(X_train, y_train.squeeze().values) # In[45]: y_train_preds= xgb.predict(X_train) y_test_preds= xgb.predict(X_test) # In[47]: from sklearn.metrics import accuracy_score print("XGB accuracy score for train data : %.3f and for test data : %.3f" % (accuracy_score(y_train, y_train_preds), accuracy_score(y_test, y_test_preds))) # # Get Feature Importance from Trained Model # In[50]: headers= ["name", "score"] values= sorted(zip(X_train.columns, xgb.feature_importances_), key= lambda x: x[1]*-1) xgb_feature_importances_=pd.DataFrame(values,columns=headers) xgb_feature_importances_ # In[52]: x_pos= np.arange(0, len(xgb_feature_importances_)) plt.figure(figsize=(10,8)) plt.bar(x_pos, xgb_feature_importances_["score"]) plt.xticks(x_pos, xgb_feature_importances_["name"]) plt.xticks(rotation=90) plt.title("Feature Importance (XGB)") plt.show() # In[ ]:
19.930023
123
0.654094
1,235
8,829
4.501215
0.211336
0.043533
0.043173
0.025904
0.356719
0.315165
0.215866
0.17809
0.141392
0.100378
0
0.02429
0.174652
8,829
442
124
19.975113
0.738576
0.131725
0
0.204545
0
0.007576
0.181793
0.006351
0
0
0
0
0
0
null
null
0
0.113636
null
null
0.015152
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
990ee382aab29a71c3680acb076db25c10304e41
829
py
Python
backend/recotem/recotem/api/serializers/project.py
codelibs/recotem
383ccdd6e1e9feb59bc3adb2543c00b08277317a
[ "Apache-2.0" ]
7
2021-05-15T05:43:36.000Z
2022-01-06T16:08:06.000Z
backend/recotem/recotem/api/serializers/project.py
codelibs/recotem
383ccdd6e1e9feb59bc3adb2543c00b08277317a
[ "Apache-2.0" ]
5
2021-09-25T13:30:38.000Z
2022-01-09T12:59:03.000Z
backend/recotem/recotem/api/serializers/project.py
codelibs/recotem
383ccdd6e1e9feb59bc3adb2543c00b08277317a
[ "Apache-2.0" ]
1
2021-11-02T12:49:06.000Z
2021-11-02T12:49:06.000Z
from rest_framework import serializers from recotem.api.models import Project, TrainingData class ProjectSerializer(serializers.ModelSerializer): class Meta: model = Project fields = "__all__" class TrainingDataForSummarySerializer(serializers.ModelSerializer): n_parameter_tuning_jobs = serializers.IntegerField( source="parametertuningjob_set.count" ) n_trained_models = serializers.IntegerField(source="trainedmodel_set.count") class Meta: model = TrainingData fields = ["id", "n_parameter_tuning_jobs", "n_trained_models"] class ProjectSummarySerializer(serializers.Serializer): n_data = serializers.IntegerField() n_complete_jobs = serializers.IntegerField() n_models = serializers.IntegerField() ins_datetime = serializers.DateTimeField()
29.607143
80
0.756333
79
829
7.658228
0.468354
0.190083
0.046281
0.066116
0
0
0
0
0
0
0
0
0.165259
829
27
81
30.703704
0.874277
0
0
0.105263
0
0
0.118215
0.088058
0
0
0
0
0
1
0
false
0
0.105263
0
0.684211
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
9915ead35e605389439f3f75b603bcbfca8137f3
401
py
Python
Leetcode/Move Zeroes/Move Zeroes.py
rahil-1407/Data-Structure-and-Algorithms
ea3eb9849aeb2716ef5812a0b5621a28120b1880
[ "MIT" ]
51
2021-01-14T04:05:55.000Z
2022-01-25T11:25:37.000Z
Leetcode/Move Zeroes/Move Zeroes.py
rahil-1407/Data-Structure-and-Algorithms
ea3eb9849aeb2716ef5812a0b5621a28120b1880
[ "MIT" ]
638
2020-12-27T18:49:53.000Z
2021-11-21T05:22:52.000Z
Leetcode/Move Zeroes/Move Zeroes.py
rahil-1407/Data-Structure-and-Algorithms
ea3eb9849aeb2716ef5812a0b5621a28120b1880
[ "MIT" ]
124
2021-01-30T06:40:20.000Z
2021-11-21T15:14:40.000Z
class Solution: def moveZeroes(self, nums: List[int]) -> None: non_zeros = [i for i in range(len(nums)) if nums[i] != 0] # List comprehension to keep only numbers that are non -zero nz = len(non_zeros) nums[:nz] = [nums[i] for i in non_zeros] # edit the list to add non zero numbers to the list nums[nz:] = [0] *(len(nums)-nz) #dd zeroes at the end
57.285714
126
0.598504
67
401
3.537313
0.507463
0.101266
0.042194
0.059072
0
0
0
0
0
0
0
0.006944
0.281796
401
7
127
57.285714
0.815972
0.326683
0
0
0
0
0
0
0
0
0
0
0
1
0.166667
false
0
0
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
9918396b139801392f0a7cf201d6cd0639fc9b91
4,629
py
Python
sandbox/test/test_misc.py
yingted/pysandbox
cb20c202459fc1b22a81e879c0efafc66e1ddd8a
[ "BSD-2-Clause-FreeBSD" ]
1
2019-02-05T13:13:09.000Z
2019-02-05T13:13:09.000Z
sandbox/test/test_misc.py
yingted/pysandbox
cb20c202459fc1b22a81e879c0efafc66e1ddd8a
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
sandbox/test/test_misc.py
yingted/pysandbox
cb20c202459fc1b22a81e879c0efafc66e1ddd8a
[ "BSD-2-Clause-FreeBSD" ]
null
null
null
from __future__ import with_statement from sandbox import Sandbox, SandboxError, SandboxConfig, Timeout from sandbox.test import createSandbox, createSandboxConfig, SkipTest from sandbox.test.tools import capture_stdout def test_valid_code(): def valid_code(): assert 1+2 == 3 createSandbox().call(valid_code) def test_exit(): def exit_noarg(): try: exit() except SandboxError as err: assert str(err) == "exit() function blocked by the sandbox" else: assert False createSandbox().call(exit_noarg) config = createSandboxConfig("exit") def exit_1(): try: exit(1) except SystemExit as err: assert err.args[0] == 1 else: assert False import sys try: sys.exit("bye") except SystemExit as err: assert err.args[0] == "bye" else: assert False Sandbox(config).call(exit_1) def test_sytem_exit(): def system_exit_denied(): try: raise SystemExit() except NameError as err: assert str(err) == "global name 'SystemExit' is not defined" except: assert False createSandbox().call(system_exit_denied) config = createSandboxConfig("exit") def system_exit_allowed(): try: raise SystemExit() except SystemExit: pass else: assert False Sandbox(config).call(system_exit_allowed) try: raise SystemExit() except SystemExit: pass else: assert False def test_stdout(): import sys config = createSandboxConfig(disable_debug=True) with capture_stdout() as stdout: def print_denied(): print "Hello Sandbox 1" try: Sandbox(config).call(print_denied) except SandboxError: pass else: assert False def print_allowed(): print "Hello Sandbox 2" config2 = createSandboxConfig('stdout') Sandbox(config2).call(print_allowed) print "Hello Sandbox 3" sys.stdout.flush() stdout.seek(0) output = stdout.read() assert output == "Hello Sandbox 2\nHello Sandbox 3\n" def test_traceback(): def check_frame_filename(): import sys frame = sys._getframe(1) frame_code = frame.f_code frame_filename = frame_code.co_filename # it may ends with .py or .pyc assert __file__.startswith(frame_filename) config = createSandboxConfig('traceback') config.allowModule('sys', '_getframe') Sandbox(config).call(check_frame_filename) check_frame_filename() def test_regex(): def check_regex(): import re assert re.escape('+') == '\\+' assert re.match('a+', 'aaaa').group(0) == 'aaaa' # FIXME: Remove this workaround: list(...) assert list(re.findall('.', 'abc')) == ['a', 'b', 'c'] assert re.search('a+', 'aaaa').group(0) == 'aaaa' # FIXME: Remove this workaround: list(...) assert list(re.split(' +', 'a b c')) == ['a', 'b', 'c'] assert re.sub('a+', '#', 'a b aa c') == '# b # c' sandbox = createSandbox('regex') sandbox.call(check_regex) check_regex() def test_timeout_while_1(): if not createSandboxConfig.use_subprocess: raise SkipTest("timeout is only supported with subprocess") def denial_of_service(): while 1: pass config = createSandboxConfig() config.timeout = 0.1 try: Sandbox(config).call(denial_of_service) except Timeout: pass else: assert False def test_timeout_cpu_intensive(): if not createSandboxConfig.use_subprocess: raise SkipTest("timeout is only supported with subprocess") def denial_of_service(): sum(2**x for x in range(100000)) config = createSandboxConfig() config.timeout = 0.1 try: Sandbox(config).call(denial_of_service) except Timeout: pass else: assert False def test_crash(): if not createSandboxConfig.use_subprocess: raise SkipTest("catching a crash is only supported with subprocess") def crash(): import _sandbox _sandbox._test_crash() config = createSandboxConfig() config.allowSafeModule("_sandbox", "_test_crash") sand = Sandbox(config) try: sand.call(crash) except SandboxError as err: assert str(err) == 'subprocess killed by signal 11', str(err) else: assert False
26.301136
76
0.600994
522
4,629
5.176245
0.243295
0.040711
0.049963
0.035159
0.409326
0.357513
0.324944
0.280533
0.254626
0.254626
0
0.010753
0.296824
4,629
175
77
26.451429
0.819355
0.023763
0
0.454545
0
0
0.098582
0
0
0
0
0.005714
0.167832
0
null
null
0.041958
0.062937
null
null
0.048951
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
9929ca378f85f069f43b01b2e6b9a5f840475d81
466
py
Python
setup.py
spyoungtech/commander
891172d31dc93b9e34c5e38097159141c863d75e
[ "MIT" ]
3
2018-08-14T20:42:43.000Z
2020-03-04T05:18:47.000Z
setup.py
spyoungtech/commander
891172d31dc93b9e34c5e38097159141c863d75e
[ "MIT" ]
2
2020-07-08T12:30:08.000Z
2022-02-02T14:59:23.000Z
setup.py
spyoungtech/commander
891172d31dc93b9e34c5e38097159141c863d75e
[ "MIT" ]
null
null
null
from setuptools import setup setup( name='voice-commander', version='0.0.2a', packages=['voice_commander'], install_requires=['fuzzywuzzy', 'fuzzywuzzy[speedup]', 'keyboard', 'easygui', 'pyaudio', 'SpeechRecognition'], url='https://github.com/spyoungtech/voice-commander', license='MIT', author='Spencer Young', author_email='spencer.young@spyoung.coom', description='cross-platform voice-activation hooks and keyboard macros' )
33.285714
114
0.708155
51
466
6.411765
0.764706
0.12844
0
0
0
0
0
0
0
0
0
0.007444
0.135193
466
13
115
35.846154
0.80397
0
0
0
0
0
0.534335
0.055794
0
0
0
0
0
1
0
true
0
0.083333
0
0.083333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
99323e8a12cde588dd2bc1c30a2dbda9df176374
7,804
py
Python
neatest/genome.py
goktug97/NEATEST
f35f355fd896c8f9ab88d411752324ebcc836d71
[ "MIT" ]
13
2021-09-25T19:52:38.000Z
2021-09-28T09:42:22.000Z
neatest/genome.py
goktug97/NEATEST
f35f355fd896c8f9ab88d411752324ebcc836d71
[ "MIT" ]
null
null
null
neatest/genome.py
goktug97/NEATEST
f35f355fd896c8f9ab88d411752324ebcc836d71
[ "MIT" ]
null
null
null
from typing import List import math import os from .connection import Connection, GeneRate, Weight from .node import Node, NodeType, group_nodes from .version import VERSION import cloudpickle # type: ignore try: disable_mpi = os.environ.get('NEATEST_DISABLE_MPI') if disable_mpi and disable_mpi != '0': raise ImportError from mpi4py import MPI # type: ignore except ImportError: from .MPI import MPI MPI = MPI() class Genome(object): def __init__(self, nodes: List[Node], connections: List[Connection]): self.connections = connections self.nodes = nodes self.version = VERSION grouped_nodes = group_nodes(self.nodes, 'type') self.input_size = len(grouped_nodes[0]) self.output_size = len(grouped_nodes[-1]) self.outputs = grouped_nodes[-1] def __call__(self, inputs: List[float]) -> List[float]: self.nodes.sort(key=lambda x: x.depth) for node in self.nodes: value = 0.0 if node.type == NodeType.INPUT: value += inputs[node.id] elif node.type == NodeType.BIAS: continue for connection in node.inputs: if connection.enabled: value += connection.in_node.value * connection.weight.value node.value = node.activation(value) return [node.value for node in self.outputs] def copy(self): connections: List[Connection] = [] nodes: List[Node] = [] for idx in range(len(self.connections)): connection = self.connections[idx] in_node = Node(connection.in_node.id, connection.in_node.type, connection.in_node.activation, depth=connection.in_node.depth) out_node = Node(connection.out_node.id, connection.out_node.type, connection.out_node.activation, depth=connection.out_node.depth) nodes_dict = dict(zip(nodes, range(len(nodes)))) if in_node not in nodes_dict: nodes.append(in_node) nodes_dict[in_node] = len(nodes)-1 if out_node not in nodes_dict: nodes.append(out_node) nodes_dict[out_node] = len(nodes)-1 new_connection = Connection(nodes[nodes_dict[in_node]], nodes[nodes_dict[out_node]], innovation=connection.innovation, dominant_gene_rate=connection.dominant_gene_rate, weight=connection.weight) new_connection.enabled = connection.enabled connections.append(new_connection) new_genome = Genome(nodes, connections) return new_genome def deepcopy(self): connections: List[Connection] = [] nodes: List[Node] = [] for idx in range(len(self.connections)): connection = self.connections[idx] in_node = Node(connection.in_node.id, connection.in_node.type, connection.in_node.activation, depth=connection.in_node.depth) out_node = Node(connection.out_node.id, connection.out_node.type, connection.out_node.activation, depth=connection.out_node.depth) nodes_dict = dict(zip(nodes, range(len(nodes)))) if in_node not in nodes_dict: nodes.append(in_node) nodes_dict[in_node] = len(nodes)-1 if out_node not in nodes_dict: nodes.append(out_node) nodes_dict[out_node] = len(nodes)-1 new_connection = Connection(nodes[nodes_dict[in_node]], nodes[nodes_dict[out_node]], innovation=connection.innovation, dominant_gene_rate=GeneRate( connection.dominant_gene_rate.value), weight=Weight(connection.weight.value)) new_connection.enabled = connection.enabled connections.append(new_connection) new_genome = Genome(nodes, connections) return new_genome def draw(self, node_radius: float = 0.05, vertical_distance: float = 0.25, horizontal_distance: float = 0.25) -> None: draw_genome(self, node_radius, vertical_distance, horizontal_distance) def save(self, filename: str) -> None: if MPI.COMM_WORLD.rank == 0: with open(filename, 'wb') as output: cloudpickle.dump(self, output) @classmethod def load(cls, filename: str) -> 'Genome': print(f"\033[33;1mLoading: {filename}\033[0m") with open(filename, 'rb') as f: genome = cloudpickle.load(f) if genome.version != VERSION: print("\033[31;1mWarning: Genome version mismatch!\n" f"Current Version: {VERSION.major}.{VERSION.minor}.{VERSION.patch}\n" "Checkpoint Version:" f" {genome.version.major}.{genome.version.minor}." f"{genome.version.patch}\033[0m") return genome def __str__(self): string = '' string = f'{string}NODES:\n' for node in self.nodes: string = f'{string}{node}\n' string = f'{string}\n\nCONNECTIONS:\n' for connection in self.connections: string = f'{string}{connection}\n' return string def draw_genome(genome: Genome, node_radius: float = 0.05, vertical_distance: float = 0.25, horizontal_distance: float = 0.25) -> None: '''Draw the genome to a matplotlib figure but do not show it.''' import matplotlib.pyplot as plt # type: ignore import matplotlib.patches as patches # type: ignore plt.gcf().canvas.set_window_title('float') positions = {} node_groups = group_nodes(genome.nodes, 'depth') for group_idx, nodes in enumerate(node_groups): y_position = -vertical_distance * (len(nodes)-1)/2 for i, node in enumerate(nodes): positions[f'{node.id}'] = (group_idx * horizontal_distance, y_position + i*vertical_distance) circle = plt.Circle(positions[f'{node.id}'], node_radius, color='r', fill=False) plt.gcf().gca().text(*positions[f'{node.id}'], node.id, horizontalalignment='center', verticalalignment='center', fontsize=10.0) plt.gcf().gca().add_artist(circle) for connection in genome.connections: if connection.enabled: node1_x = positions[f'{connection.in_node.id}'][0] node2_x = positions[f'{connection.out_node.id}'][0] node1_y = positions[f'{connection.in_node.id}'][1] node2_y = positions[f'{connection.out_node.id}'][1] angle = math.atan2(node2_x - node1_x, node2_y - node1_y) x_adjustment = node_radius * math.sin(angle) y_adjustment = node_radius * math.cos(angle) arrow = patches.FancyArrowPatch( (node1_x + x_adjustment, node1_y + y_adjustment), (node2_x - x_adjustment, node2_y - y_adjustment), arrowstyle="Simple,tail_width=0.5,head_width=3,head_length=5", color="k", antialiased=True) plt.gcf().gca().add_patch(arrow) plt.axis('scaled')
43.597765
89
0.563813
871
7,804
4.881745
0.202067
0.031044
0.045155
0.016933
0.407338
0.389464
0.362653
0.362653
0.362653
0.362653
0
0.014649
0.335213
7,804
178
90
43.842697
0.804934
0.014223
0
0.35
1
0
0.072228
0.040864
0
0
0
0
0
1
0.05625
false
0
0.08125
0
0.175
0.0125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
993374093a84570c0c1ef8f2f30f5090353300ca
558
py
Python
release/stubs.min/System/Windows/Forms/__init___parts/FormClosedEventArgs.py
YKato521/ironpython-stubs
b1f7c580de48528490b3ee5791b04898be95a9ae
[ "MIT" ]
null
null
null
release/stubs.min/System/Windows/Forms/__init___parts/FormClosedEventArgs.py
YKato521/ironpython-stubs
b1f7c580de48528490b3ee5791b04898be95a9ae
[ "MIT" ]
null
null
null
release/stubs.min/System/Windows/Forms/__init___parts/FormClosedEventArgs.py
YKato521/ironpython-stubs
b1f7c580de48528490b3ee5791b04898be95a9ae
[ "MIT" ]
null
null
null
class FormClosedEventArgs(EventArgs): """ Provides data for the System.Windows.Forms.Form.FormClosed event. FormClosedEventArgs(closeReason: CloseReason) """ @staticmethod def __new__(self, closeReason): """ __new__(cls: type,closeReason: CloseReason) """ pass CloseReason = property( lambda self: object(), lambda self, v: None, lambda self: None ) """Gets a value that indicates why the form was closed. Get: CloseReason(self: FormClosedEventArgs) -> CloseReason """
20.666667
71
0.645161
54
558
6.518519
0.648148
0.085227
0
0
0
0
0
0
0
0
0
0
0.252688
558
26
72
21.461538
0.844125
0.283154
0
0
0
0
0
0
0
0
0
0
0
1
0.142857
false
0.142857
0
0
0.428571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
9939051042bf55dc56637d71a73ebeb04bd0f880
1,085
py
Python
doc/source/EXAMPLES/mu_reproj_interact.py
kapteyn-astro/kapteyn
f12332cfd567c7c0da40628dcfc7b297971ee636
[ "BSD-3-Clause" ]
3
2016-04-28T08:55:33.000Z
2018-07-23T18:35:58.000Z
doc/source/EXAMPLES/mu_reproj_interact.py
kapteyn-astro/kapteyn
f12332cfd567c7c0da40628dcfc7b297971ee636
[ "BSD-3-Clause" ]
2
2020-07-23T12:28:37.000Z
2021-07-13T18:26:06.000Z
doc/source/EXAMPLES/mu_reproj_interact.py
kapteyn-astro/kapteyn
f12332cfd567c7c0da40628dcfc7b297971ee636
[ "BSD-3-Clause" ]
3
2017-05-03T14:01:08.000Z
2020-07-23T12:23:28.000Z
from kapteyn import maputils from matplotlib import pyplot as plt import numpy # Read first image as base Basefits = maputils.FITSimage(promptfie=maputils.prompt_fitsfile) print(type(Basefits), isinstance(Basefits, maputils.FITSimage)) # Get data from a second image. This is the data that # should be reprojected to fit the header of Basefits. Secondfits = maputils.FITSimage(promptfie=maputils.prompt_fitsfile) #Secondfits.set_imageaxes(promptfie=maputils.prompt_imageaxes) #Secondfits.set_limits(promptfie=maputils.prompt_box) # Now we want to overlay the data of this Base fits object onto # the wcs of the second fits object. This is done with the # reproject_to() method of # the first FITSimage object (the data object) with the second # FITSimage object as parameter. This results in a new fits file #Reprojfits = Basefits.reproject_to(Secondfits.hdr, plimlo=(2,1), plimhi=(2,1)) #Reprojfits = Basefits.reproject_to(Secondfits.hdr, pxlim=(100,400), pylim=(100,400)) Reprojfits = Basefits.reproject_to(Secondfits.hdr) Reprojfits.writetofits("reproj.fits", clobber=True)
43.4
86
0.795392
158
1,085
5.398734
0.455696
0.079719
0.107855
0.101993
0.260258
0.260258
0
0
0
0
0
0.016719
0.117972
1,085
24
87
45.208333
0.874608
0.623041
0
0
0
0
0.027778
0
0
0
0
0
0
1
0
false
0
0.375
0
0.375
0.125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
9939f19eab14d0c7a3b637eb8dcb7c8e88f38637
381
py
Python
python/Container With Most Water.py
kuwarkapur/Hacktoberfest-2022
efaafeba5ce51d8d2e2d94c6326cc20bff946f17
[ "MIT" ]
1
2021-12-03T09:23:41.000Z
2021-12-03T09:23:41.000Z
python/Container With Most Water.py
kuwarkapur/Hacktoberfest-2022
efaafeba5ce51d8d2e2d94c6326cc20bff946f17
[ "MIT" ]
null
null
null
python/Container With Most Water.py
kuwarkapur/Hacktoberfest-2022
efaafeba5ce51d8d2e2d94c6326cc20bff946f17
[ "MIT" ]
null
null
null
class Solution: def maxArea(self, height: List[int]) -> int: i = 0 j = len(height)-1 res = 0 area = 0 while i < j: area = min(height[i],height[j])*(j-i) #print(area) res = max(res,area) if height[i]<height[j]: i+=1 else: j-=1 return res
23.8125
49
0.393701
48
381
3.125
0.458333
0.093333
0.173333
0.186667
0
0
0
0
0
0
0
0.030151
0.47769
381
15
50
25.4
0.723618
0.028871
0
0
0
0
0
0
0
0
0
0
0
1
0.071429
false
0
0
0
0.214286
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
993e1a819f6f487b24a2d7f90f46158e52218a76
584
py
Python
lib/hyperparams.py
J-Moravec/pairtree
91cbba628b78aea31034efb080976fdb47d83976
[ "MIT" ]
15
2021-01-19T21:13:50.000Z
2022-02-02T00:01:33.000Z
lib/hyperparams.py
J-Moravec/pairtree
91cbba628b78aea31034efb080976fdb47d83976
[ "MIT" ]
17
2020-11-25T09:41:03.000Z
2022-03-28T04:52:14.000Z
lib/hyperparams.py
J-Moravec/pairtree
91cbba628b78aea31034efb080976fdb47d83976
[ "MIT" ]
6
2021-01-01T06:00:31.000Z
2021-06-29T15:03:11.000Z
explanations = { 'gamma': ''' Proportion of tree modifications that should use mutrel-informed choice for node to move, rather than uniform choice ''', 'zeta': ''' Proportion of tree modifications that should use mutrel-informed choice for destination to move node to, rather than uniform choice ''', 'iota': ''' Probability of initializing with mutrel-informed tree rather than fully branching tree when beginning chain ''' } defaults = { 'gamma': 0.7, 'zeta': 0.7, 'iota': 0.7, } assert set(explanations.keys()) == set(defaults.keys())
23.36
79
0.674658
74
584
5.324324
0.486486
0.106599
0.081218
0.147208
0.329949
0.329949
0.329949
0.329949
0.329949
0.329949
0
0.013015
0.210616
584
24
80
24.333333
0.841649
0
0
0.2
0
0
0.712329
0
0
0
0
0
0.05
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
9943e8ba1fe715b4c9d83b3026d9f901028d6a2e
359
py
Python
app/database/seed/seeds/real_madrid/team.py
batistado/FlaskFootball
4cd57edca35a7ce9864201f7ae0fc8af55a2724f
[ "MIT" ]
null
null
null
app/database/seed/seeds/real_madrid/team.py
batistado/FlaskFootball
4cd57edca35a7ce9864201f7ae0fc8af55a2724f
[ "MIT" ]
null
null
null
app/database/seed/seeds/real_madrid/team.py
batistado/FlaskFootball
4cd57edca35a7ce9864201f7ae0fc8af55a2724f
[ "MIT" ]
null
null
null
import os import app.database.seed.seed_helper as helper from app.translation.deserializer import Deserializer from app.extensions import db real_madrid = { 'name': 'Real Madrid CF', 'players': helper.read_csv_file(os.path.join(os.path.dirname(__file__), 'players.csv')), } team = Deserializer().deserialize_team(real_madrid) db.session.add(team)
23.933333
92
0.763231
51
359
5.176471
0.529412
0.113636
0
0
0
0
0
0
0
0
0
0
0.114206
359
14
93
25.642857
0.830189
0
0
0
0
0
0.100279
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
9946ecb403a384000ba7abec0b673e218cfb79aa
436
py
Python
examples/loop.py
letmaik/exhaust
7c3b4f1dfab95192968b0bcb49bbb5574e1ac83b
[ "MIT" ]
1
2021-12-04T21:37:41.000Z
2021-12-04T21:37:41.000Z
examples/loop.py
letmaik/exhaust
7c3b4f1dfab95192968b0bcb49bbb5574e1ac83b
[ "MIT" ]
null
null
null
examples/loop.py
letmaik/exhaust
7c3b4f1dfab95192968b0bcb49bbb5574e1ac83b
[ "MIT" ]
null
null
null
# This example shows how a space can be modelled with loops. import exhaust def generate_numbers(state: exhaust.State): numbers = [] for _ in range(5): numbers.append(state.randint(1, 5)) return numbers for numbers in exhaust.space(generate_numbers): print(numbers) # Output: # [1, 1, 1, 1, 1] # [1, 1, 1, 1, 2] # [1, 1, 1, 1, 3] # [1, 1, 1, 1, 4] # [1, 1, 1, 1, 5] # [1, 1, 1, 2, 1] # ... # [5, 5, 5, 5, 5]
18.956522
60
0.568807
77
436
3.181818
0.38961
0.155102
0.171429
0.146939
0.065306
0.036735
0.036735
0.036735
0
0
0
0.115502
0.245413
436
22
61
19.818182
0.629179
0.417431
0
0
1
0
0
0
0
0
0
0
0
1
0.125
false
0
0.125
0
0.375
0.125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
994a38b1a44b5485284d61f6f94e7e736096f6bb
488
py
Python
app/api/v1/models/meetups_model.py
MRichardN/Questioner-api
2697ccc976c6f4896246f7f817aab7d12f1d606c
[ "MIT" ]
null
null
null
app/api/v1/models/meetups_model.py
MRichardN/Questioner-api
2697ccc976c6f4896246f7f817aab7d12f1d606c
[ "MIT" ]
2
2019-01-08T06:58:47.000Z
2019-01-08T08:46:16.000Z
app/api/v1/models/meetups_model.py
MRichardN/Questioner-api
2697ccc976c6f4896246f7f817aab7d12f1d606c
[ "MIT" ]
null
null
null
from datetime import datetime from ..utils.utils import idGenerator from .base_model import Model meetups = [] class Meetup(Model): """ This class represents the meetup model.""" def __init__(self): super().__init__(meetups) def save(self, data): """ Save a meetup.""" data['id'] = idGenerator(self.collection) data['createdOn'] = datetime.now() data['happeningOn'] = datetime.now() return super().save(data)
17.428571
50
0.612705
54
488
5.37037
0.481481
0.075862
0
0
0
0
0
0
0
0
0
0
0.254098
488
27
51
18.074074
0.796703
0.110656
0
0
0
0
0.052632
0
0
0
0
0
0
1
0.166667
false
0
0.25
0
0.583333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
994c3e8c1a0f1cd2339af26e2dce6708adab8045
469
py
Python
FootageOverview/FootageManager/migrations/0003_footage_staticpath.py
nylser/FootageOverview
921e003550ba445d5a3308dee231a2d92e642b01
[ "Unlicense" ]
null
null
null
FootageOverview/FootageManager/migrations/0003_footage_staticpath.py
nylser/FootageOverview
921e003550ba445d5a3308dee231a2d92e642b01
[ "Unlicense" ]
null
null
null
FootageOverview/FootageManager/migrations/0003_footage_staticpath.py
nylser/FootageOverview
921e003550ba445d5a3308dee231a2d92e642b01
[ "Unlicense" ]
null
null
null
# Generated by Django 2.1.1 on 2018-09-20 07:56 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('FootageManager', '0002_footage_length'), ] operations = [ migrations.AddField( model_name='footage', name='staticpath', field=models.CharField(default='', max_length=200, verbose_name='static_path'), preserve_default=False, ), ]
23.45
91
0.616205
49
469
5.755102
0.77551
0
0
0
0
0
0
0
0
0
0
0.06414
0.268657
469
19
92
24.684211
0.758017
0.095949
0
0
1
0
0.14455
0
0
0
0
0
0
1
0
false
0
0.076923
0
0.307692
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
99528f1bcb117738e7d9a6979b7ae04eff5afc1b
429
py
Python
equinox/__init__.py
marcelroed/equinox
3804a8d60217bde685bee0a893a7bd55b1e63c26
[ "Apache-2.0" ]
null
null
null
equinox/__init__.py
marcelroed/equinox
3804a8d60217bde685bee0a893a7bd55b1e63c26
[ "Apache-2.0" ]
null
null
null
equinox/__init__.py
marcelroed/equinox
3804a8d60217bde685bee0a893a7bd55b1e63c26
[ "Apache-2.0" ]
null
null
null
from . import experimental, nn from .filters import ( combine, filter, is_array, is_array_like, is_inexact_array, is_inexact_array_like, partition, ) from .grad import filter_custom_vjp, filter_grad, filter_value_and_grad from .jit import filter_jit from .module import Module, static_field from .tree import tree_at, tree_equal, tree_pformat from .update import apply_updates __version__ = "0.3.1"
22.578947
71
0.762238
63
429
4.809524
0.507937
0.046205
0.092409
0
0
0
0
0
0
0
0
0.008475
0.174825
429
18
72
23.833333
0.847458
0
0
0
0
0
0.011655
0
0
0
0
0
0
1
0
false
0
0.4375
0
0.4375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
9956e73164bd63500cea24e33dc9bcf6d9f73e4e
4,311
py
Python
scp.py
KvantPro/SCP
1304d03007992f223d319d41037e2b32c9fbf934
[ "Unlicense" ]
1
2021-11-12T19:28:16.000Z
2021-11-12T19:28:16.000Z
scp.py
KvantPro/SCP
1304d03007992f223d319d41037e2b32c9fbf934
[ "Unlicense" ]
null
null
null
scp.py
KvantPro/SCP
1304d03007992f223d319d41037e2b32c9fbf934
[ "Unlicense" ]
null
null
null
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'SCP.ui' # # Created by: PyQt5 UI code generator 5.15.2 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtWidgets import QMessageBox import random, time def animate(): a = 0 while a <= 100: ui.progressBar.setValue(a) time.sleep(0.01) a += 1 def wt(w): sg = QMessageBox() sg.resize(100, 100) if w == 'w': te = 'Вы выиграли!' elif w == 'p': te = 'Вы проиграли' else: te = 'Ничья' sg.setWindowTitle('Результат') sg.setText(str(te)) x = sg.exec_() def pr(x, win): if win == 'S': if x == 0: wt = 'n' elif x == 1: wt = 'w' else: wt = 'p' if win == 'C': if x == 0: wt = 'p' elif x == 1: wt = 'n' else: wt = 'w' if win == 'P': if x == 0: wt = 'w' elif x == 1: wt = 'p' else: wt = 'n' return wt class Ui_Form(object): def setupUi(self, Form): Form.setObjectName("Form") Form.resize(396, 257) self.S = QtWidgets.QPushButton(Form) self.S.setGeometry(QtCore.QRect(70, 180, 75, 71)) self.S.setObjectName("pushButton") self.C = QtWidgets.QPushButton(Form) self.C.setGeometry(QtCore.QRect(160, 180, 75, 71)) self.C.setObjectName("pushButton_2") self.P = QtWidgets.QPushButton(Form) self.P.setGeometry(QtCore.QRect(250, 180, 75, 71)) self.P.setObjectName("pushButton_3") self.ME = QtWidgets.QPushButton(Form) self.ME.setGeometry(QtCore.QRect(320, 10, 75, 31)) self.ME.setObjectName("pushButton_4") self.label = QtWidgets.QLabel(Form) self.label.setGeometry(QtCore.QRect(80, 120, 211, 31)) self.label.setStyleSheet("font-size: 25px;") self.label.setObjectName("label") self.label_2 = QtWidgets.QLabel(Form) self.label_2.setGeometry(QtCore.QRect(80, 70, 211, 31)) self.label_2.setStyleSheet("font-size: 25px;") self.label_2.setObjectName("label_2") self.progressBar = QtWidgets.QProgressBar(Form) self.progressBar.setGeometry(QtCore.QRect(10, 10, 301, 31)) self.progressBar.setProperty("value", 0) self.progressBar.setObjectName("progressBar") self.retranslateUi(Form) QtCore.QMetaObject.connectSlotsByName(Form) def retranslateUi(self, Form): _translate = QtCore.QCoreApplication.translate Form.setWindowTitle(_translate("Form", "Камень, ножницы, бумага!")) self.S.setText(_translate("Form", "Камень")) self.C.setText(_translate("Form", "Ножницы")) self.P.setText(_translate("Form", "Бумага")) self.ME.setText(_translate("Form", "О нас")) self.label.setText(_translate("Form", "Бот:")) self.label_2.setText(_translate("Form", "Вы:")) def start(winh): ui.label.setText("Бот: ") winb = ['Камень', 'Ножницы', 'Бумага'] x = random.randint(0, 2) bot = winb[x] win = pr(x, winh) if winh == 'S': winh = 'Камень' elif winh == 'C': winh = 'Ножницы' else: winh = 'Бумага' ui.label_2.setText("Вы: " + winh) animate() ui.label.setText("Бот: " + bot) wt(win) def ME(): msg = QMessageBox() msg.setWindowTitle('О нас') msg.setText("Мы, компания @Kvant`s studios\nЯвляемся разработчиками игры:\nКамень, ножницы, бумага.\nДля связи с нами пишите на почту \nили в ВК:\n\nkvantgd@gmail.com\nvk.com/kvantgd") x = msg.exec_() if __name__ == "__main__": import sys app = QtWidgets.QApplication(sys.argv) Form = QtWidgets.QWidget() ui = Ui_Form() ui.setupUi(Form) Form.show() ui.S.clicked.connect( lambda: start("S") ) ui.C.clicked.connect( lambda: start("C") ) ui.P.clicked.connect( lambda: start("P") ) ui.ME.clicked.connect( ME ) sys.exit(app.exec_())
31.23913
189
0.566922
536
4,311
4.501866
0.324627
0.037298
0.063821
0.046415
0.051388
0.028181
0
0
0
0
0
0.038562
0.290188
4,311
137
190
31.467153
0.75
0.062167
0
0.172414
1
0.008621
0.124166
0.010262
0
0
0
0
0
1
0.060345
false
0
0.034483
0
0.112069
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
995a079b3191ebb48a0a239dc759e1d69f8990ae
9,682
py
Python
vendor/istio.io/api/python/istio_api/networking/v1alpha3/service_dependency_pb2.py
octarinesec/istio
913b459130045fc4846a36c46c05a48da88776bb
[ "Apache-2.0" ]
2
2020-07-20T06:35:29.000Z
2021-01-22T03:35:38.000Z
vendor/istio.io/api/python/istio_api/networking/v1alpha3/service_dependency_pb2.py
octarinesec/istio
913b459130045fc4846a36c46c05a48da88776bb
[ "Apache-2.0" ]
null
null
null
vendor/istio.io/api/python/istio_api/networking/v1alpha3/service_dependency_pb2.py
octarinesec/istio
913b459130045fc4846a36c46c05a48da88776bb
[ "Apache-2.0" ]
1
2021-01-22T03:35:42.000Z
2021-01-22T03:35:42.000Z
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: networking/v1alpha3/service_dependency.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='networking/v1alpha3/service_dependency.proto', package='istio.networking.v1alpha3', syntax='proto3', serialized_pb=_b('\n,networking/v1alpha3/service_dependency.proto\x12\x19istio.networking.v1alpha3\"\x92\x03\n\x11ServiceDependency\x12M\n\x0c\x64\x65pendencies\x18\x01 \x03(\x0b\x32\x37.istio.networking.v1alpha3.ServiceDependency.Dependency\x1a)\n\x06Import\x12\x11\n\tnamespace\x18\x01 \x01(\t\x12\x0c\n\x04host\x18\x02 \x01(\t\x1a\x82\x02\n\nDependency\x12q\n\x16source_workload_labels\x18\x01 \x03(\x0b\x32Q.istio.networking.v1alpha3.ServiceDependency.Dependency.SourceWorkloadLabelsEntry\x12\x44\n\x07imports\x18\x02 \x03(\x0b\x32\x33.istio.networking.v1alpha3.ServiceDependency.Import\x1a;\n\x19SourceWorkloadLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01*&\n\x0b\x43onfigScope\x12\n\n\x06PUBLIC\x10\x00\x12\x0b\n\x07PRIVATE\x10\x01\x42\"Z istio.io/api/networking/v1alpha3b\x06proto3') ) _CONFIGSCOPE = _descriptor.EnumDescriptor( name='ConfigScope', full_name='istio.networking.v1alpha3.ConfigScope', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='PUBLIC', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='PRIVATE', index=1, number=1, options=None, type=None), ], containing_type=None, options=None, serialized_start=480, serialized_end=518, ) _sym_db.RegisterEnumDescriptor(_CONFIGSCOPE) ConfigScope = enum_type_wrapper.EnumTypeWrapper(_CONFIGSCOPE) PUBLIC = 0 PRIVATE = 1 _SERVICEDEPENDENCY_IMPORT = _descriptor.Descriptor( name='Import', full_name='istio.networking.v1alpha3.ServiceDependency.Import', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='namespace', full_name='istio.networking.v1alpha3.ServiceDependency.Import.namespace', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='host', full_name='istio.networking.v1alpha3.ServiceDependency.Import.host', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=176, serialized_end=217, ) _SERVICEDEPENDENCY_DEPENDENCY_SOURCEWORKLOADLABELSENTRY = _descriptor.Descriptor( name='SourceWorkloadLabelsEntry', full_name='istio.networking.v1alpha3.ServiceDependency.Dependency.SourceWorkloadLabelsEntry', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='istio.networking.v1alpha3.ServiceDependency.Dependency.SourceWorkloadLabelsEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='istio.networking.v1alpha3.ServiceDependency.Dependency.SourceWorkloadLabelsEntry.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=419, serialized_end=478, ) _SERVICEDEPENDENCY_DEPENDENCY = _descriptor.Descriptor( name='Dependency', full_name='istio.networking.v1alpha3.ServiceDependency.Dependency', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='source_workload_labels', full_name='istio.networking.v1alpha3.ServiceDependency.Dependency.source_workload_labels', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='imports', full_name='istio.networking.v1alpha3.ServiceDependency.Dependency.imports', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_SERVICEDEPENDENCY_DEPENDENCY_SOURCEWORKLOADLABELSENTRY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=220, serialized_end=478, ) _SERVICEDEPENDENCY = _descriptor.Descriptor( name='ServiceDependency', full_name='istio.networking.v1alpha3.ServiceDependency', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='dependencies', full_name='istio.networking.v1alpha3.ServiceDependency.dependencies', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_SERVICEDEPENDENCY_IMPORT, _SERVICEDEPENDENCY_DEPENDENCY, ], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=76, serialized_end=478, ) _SERVICEDEPENDENCY_IMPORT.containing_type = _SERVICEDEPENDENCY _SERVICEDEPENDENCY_DEPENDENCY_SOURCEWORKLOADLABELSENTRY.containing_type = _SERVICEDEPENDENCY_DEPENDENCY _SERVICEDEPENDENCY_DEPENDENCY.fields_by_name['source_workload_labels'].message_type = _SERVICEDEPENDENCY_DEPENDENCY_SOURCEWORKLOADLABELSENTRY _SERVICEDEPENDENCY_DEPENDENCY.fields_by_name['imports'].message_type = _SERVICEDEPENDENCY_IMPORT _SERVICEDEPENDENCY_DEPENDENCY.containing_type = _SERVICEDEPENDENCY _SERVICEDEPENDENCY.fields_by_name['dependencies'].message_type = _SERVICEDEPENDENCY_DEPENDENCY DESCRIPTOR.message_types_by_name['ServiceDependency'] = _SERVICEDEPENDENCY DESCRIPTOR.enum_types_by_name['ConfigScope'] = _CONFIGSCOPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) ServiceDependency = _reflection.GeneratedProtocolMessageType('ServiceDependency', (_message.Message,), dict( Import = _reflection.GeneratedProtocolMessageType('Import', (_message.Message,), dict( DESCRIPTOR = _SERVICEDEPENDENCY_IMPORT, __module__ = 'networking.v1alpha3.service_dependency_pb2' # @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.ServiceDependency.Import) )) , Dependency = _reflection.GeneratedProtocolMessageType('Dependency', (_message.Message,), dict( SourceWorkloadLabelsEntry = _reflection.GeneratedProtocolMessageType('SourceWorkloadLabelsEntry', (_message.Message,), dict( DESCRIPTOR = _SERVICEDEPENDENCY_DEPENDENCY_SOURCEWORKLOADLABELSENTRY, __module__ = 'networking.v1alpha3.service_dependency_pb2' # @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.ServiceDependency.Dependency.SourceWorkloadLabelsEntry) )) , DESCRIPTOR = _SERVICEDEPENDENCY_DEPENDENCY, __module__ = 'networking.v1alpha3.service_dependency_pb2' # @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.ServiceDependency.Dependency) )) , DESCRIPTOR = _SERVICEDEPENDENCY, __module__ = 'networking.v1alpha3.service_dependency_pb2' # @@protoc_insertion_point(class_scope:istio.networking.v1alpha3.ServiceDependency) )) _sym_db.RegisterMessage(ServiceDependency) _sym_db.RegisterMessage(ServiceDependency.Import) _sym_db.RegisterMessage(ServiceDependency.Dependency) _sym_db.RegisterMessage(ServiceDependency.Dependency.SourceWorkloadLabelsEntry) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z istio.io/api/networking/v1alpha3')) _SERVICEDEPENDENCY_DEPENDENCY_SOURCEWORKLOADLABELSENTRY.has_options = True _SERVICEDEPENDENCY_DEPENDENCY_SOURCEWORKLOADLABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) # @@protoc_insertion_point(module_scope)
40.008264
837
0.777629
1,070
9,682
6.749533
0.160748
0.072279
0.063694
0.099695
0.596511
0.476599
0.44946
0.402935
0.368457
0.352534
0
0.035445
0.108345
9,682
241
838
40.174274
0.801112
0.058356
0
0.589623
1
0.004717
0.237923
0.210145
0
0
0
0
0
1
0
false
0
0.09434
0
0.09434
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
995ff73160e804154606204631367eefc3fb8d9c
1,522
py
Python
notifications/sms.py
HexNumbers/OctoPrint
ba01fdd0c625150bdbe09a2ba965e30b7f434e4a
[ "MIT" ]
null
null
null
notifications/sms.py
HexNumbers/OctoPrint
ba01fdd0c625150bdbe09a2ba965e30b7f434e4a
[ "MIT" ]
null
null
null
notifications/sms.py
HexNumbers/OctoPrint
ba01fdd0c625150bdbe09a2ba965e30b7f434e4a
[ "MIT" ]
null
null
null
import smtplib, sys, urllib2 from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart from email.mime.image import MIMEImage from PIL import Image import io GMAIL_USERNAME = '*******@gmail.com' GMAIL_PASS = '*******' RECEPIENT = '*******@tmomail.net' SNAP_URL='http://127.0.0.1:8080/?action=snapshot' MESSAGE = "Print complete! Here is your thingy:" ROTATE_IMAGE = False def send(): email = MIMEMultipart() envelope = MIMEMultipart('alternative') msg_text = MIMEText(MESSAGE, 'plain') msg_html = MIMEText(MESSAGE, 'html') u = urllib2.urlopen(SNAP_URL) image = Image.open(u) fp = io.BytesIO() if ROTATE_IMAGE: image.rotate(180).save(fp, Image.registered_extensions()['.jpg']) else: image.save(fp, Image.registered_extensions()['.jpg']) img = MIMEImage(fp.getvalue(), 'jpeg; name="print.jpg"') img.add_header('Content-Disposition', 'attachment; filename="print.jpg"') img.add_header('Content-ID', '<thingy>') img.add_header('X-Attachment-Id', 'thingy') email['From'] = GMAIL_USERNAME email['To'] = RECEPIENT envelope.attach(msg_text) envelope.attach(msg_html) email.attach(envelope) email.attach(img) server = smtplib.SMTP( "smtp.gmail.com", 587 ) server.starttls() server.login(GMAIL_USERNAME, GMAIL_PASS) server.sendmail( GMAIL_USERNAME, RECEPIENT, email.as_string()) send()
35.395349
81
0.636662
181
1,522
5.243094
0.447514
0.054795
0.041096
0.044257
0.128556
0.128556
0
0
0
0
0
0.015113
0.217477
1,522
42
82
36.238095
0.781696
0
0
0
0
0
0.181997
0
0
0
0
0
0
1
0.025641
false
0.051282
0.153846
0
0.179487
0.051282
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
9962163f869a0d942c76685500a8b2f453af027d
6,306
py
Python
dendropy/test/test_dataio_nexml_reader_chars.py
EnjoyLifeFund/macHighSierra-py36-pkgs
5668b5785296b314ea1321057420bcd077dba9ea
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
null
null
null
dendropy/test/test_dataio_nexml_reader_chars.py
EnjoyLifeFund/macHighSierra-py36-pkgs
5668b5785296b314ea1321057420bcd077dba9ea
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
null
null
null
dendropy/test/test_dataio_nexml_reader_chars.py
EnjoyLifeFund/macHighSierra-py36-pkgs
5668b5785296b314ea1321057420bcd077dba9ea
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
null
null
null
# !/usr/bin/env python ############################################################################## ## DendroPy Phylogenetic Computing Library. ## ## Copyright 2010-2015 Jeet Sukumaran and Mark T. Holder. ## All rights reserved. ## ## See "LICENSE.rst" for terms and conditions of usage. ## ## If you use this work or any portion thereof in published work, ## please cite it as: ## ## Sukumaran, J. and M. T. Holder. 2010. DendroPy: a Python library ## for phylogenetic computing. Bioinformatics 26: 1569-1571. ## ############################################################################## """ Tests for general NEXUS character matrix reading. """ import unittest import dendropy from dendropy.utility import error from dendropy.test.support import dendropytest from dendropy.test.support import pathmap from dendropy.test.support import standard_file_test_chars from dendropy.test.support import compare_and_validate from dendropy.dataio import nexmlreader from dendropy.utility import messaging _LOG = messaging.get_logger(__name__) class NexmlCharactersReaderDnaTestCase( standard_file_test_chars.DnaTestChecker, dendropytest.ExtendedTestCase): @classmethod def setUpClass(cls): cls.build() def test_basic_nexml(self): src_filenames = [ "standard-test-chars-dna.as_cells.nexml", "standard-test-chars-dna.as_seqs.nexml", ] for src_idx, src_filename in enumerate(src_filenames): # print(src_idx, src_filename) src_path = pathmap.char_source_path(src_filename) self.verify_get_from( matrix_type=dendropy.DnaCharacterMatrix, src_filepath=src_path, schema="nexml", factory_kwargs={}, check_taxon_annotations=False, check_matrix_annotations=False, check_sequence_annotations=False, check_column_annotations=False, check_cell_annotations=False) class NexmlCharactersReaderRnaTestCase( standard_file_test_chars.RnaTestChecker, dendropytest.ExtendedTestCase): @classmethod def setUpClass(cls): cls.build() def test_basic_nexml(self): src_filenames = [ "standard-test-chars-rna.as_cells.nexml", "standard-test-chars-rna.as_seqs.nexml", ] for src_idx, src_filename in enumerate(src_filenames): # print(src_idx, src_filename) src_path = pathmap.char_source_path(src_filename) self.verify_get_from( matrix_type=dendropy.RnaCharacterMatrix, src_filepath=src_path, schema="nexml", factory_kwargs={}, check_taxon_annotations=False, check_matrix_annotations=False, check_sequence_annotations=False, check_column_annotations=False, check_cell_annotations=False) class NexmlCharactersReaderProteinTestCase( standard_file_test_chars.ProteinTestChecker, dendropytest.ExtendedTestCase): @classmethod def setUpClass(cls): cls.build() def test_basic_nexml(self): src_filenames = [ "standard-test-chars-protein.as_cells.nexml", "standard-test-chars-protein.as_seqs.nexml", ] for src_idx, src_filename in enumerate(src_filenames): # print(src_idx, src_filename) src_path = pathmap.char_source_path(src_filename) self.verify_get_from( matrix_type=dendropy.ProteinCharacterMatrix, src_filepath=src_path, schema="nexml", factory_kwargs={}, check_taxon_annotations=False, check_matrix_annotations=False, check_sequence_annotations=False, check_column_annotations=False, check_cell_annotations=False) class NexmlCharactersContinuousTestCase( standard_file_test_chars.ContinuousTestChecker, dendropytest.ExtendedTestCase): @classmethod def setUpClass(cls): cls.build() def test_basic_nexml(self): src_filenames = [ "standard-test-chars-continuous.as_cells.nexml", "standard-test-chars-continuous.as_seqs.nexml", ] for src_idx, src_filename in enumerate(src_filenames): # print(src_idx, src_filename) src_path = pathmap.char_source_path(src_filename) self.verify_get_from( matrix_type=dendropy.ContinuousCharacterMatrix, src_filepath=src_path, schema="nexml", factory_kwargs={}, check_taxon_annotations=False, check_matrix_annotations=False, check_sequence_annotations=False, check_column_annotations=False, check_cell_annotations=False) class NexmlStandardCharacters01234TestCase( standard_file_test_chars.Standard01234TestChecker, dendropytest.ExtendedTestCase): @classmethod def setUpClass(cls): cls.build() def test_basic_nexml(self): src_filenames = [ "standard-test-chars-generic.as_cells.nexml", "standard-test-chars-generic.as_seqs.nexml", ] for src_idx, src_filename in enumerate(src_filenames): # print(src_idx, src_filename) src_path = pathmap.char_source_path(src_filename) self.verify_get_from( matrix_type=dendropy.StandardCharacterMatrix, src_filepath=src_path, schema="nexml", factory_kwargs={}, check_taxon_annotations=False, check_matrix_annotations=False, check_sequence_annotations=False, check_column_annotations=False, check_cell_annotations=False) if __name__ == "__main__": unittest.main()
36.877193
78
0.599746
598
6,306
6.016722
0.220736
0.111173
0.116732
0.047248
0.704836
0.65592
0.61562
0.61562
0.61562
0.61562
0
0.007304
0.305265
6,306
170
79
37.094118
0.813969
0.09594
0
0.669291
0
0
0.07955
0.073556
0
0
0
0
0
1
0.07874
false
0
0.070866
0
0.188976
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
9966d637d34484011d82a970b82967f225c9276e
2,067
py
Python
lib.py
miami-acm/unit-testing
d074c680805f49848991bdfeab43537785238560
[ "MIT" ]
null
null
null
lib.py
miami-acm/unit-testing
d074c680805f49848991bdfeab43537785238560
[ "MIT" ]
null
null
null
lib.py
miami-acm/unit-testing
d074c680805f49848991bdfeab43537785238560
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 def sleep_in(weekday, vacation): """ The parameter weekday is True if it is a weekday, and the parameter vacation is True if we are on vacation. We sleep in if it is not a weekday or we're on vacation. Return True if we sleep in. sleep_in(False, False) → True sleep_in(True, False) → False sleep_in(False, True) → True """ return False def monkey_trouble(a_smile, b_smile): """ We have two monkeys, a and b, and the parameters a_smile and b_smile indicate if each is smiling. We are in trouble if they are both smiling or if neither of them is smiling. Return True if we are in trouble. monkey_trouble(True, True) → True monkey_trouble(False, False) → True monkey_trouble(True, False) → False """ return False def sum_double(a, b): """ Given two int values, return their sum. Unless the two values are the same, then return double their sum. sum_double(1, 2) → 3 sum_double(3, 2) → 5 sum_double(2, 2) → 8 """ return 0 def diff21(n): """ Given an int n, return the absolute difference between n and 21, except return double the absolute difference if n is over 21. diff21(19) → 2 diff21(10) → 11 diff21(21) → 0 """ return 0 def count_evens(nums): """ Return the number of even ints in the given array. Note: the % "mod" operator computes the remainder, e.g. 5 % 2 is 1. count_evens([2, 1, 2, 3, 4]) → 3 count_evens([2, 2, 0]) → 3 count_evens([1, 3, 5]) → 0 """ return 0 def xyz_there(s): """ Return True if the given string contains an appearance of "xyz" where the xyz is not directly preceeded by a period (.). So "xxyz" counts but "x.xyz" does not. xyz_there('abcxyz') → True xyz_there('abc.xyz') → False xyz_there('xyz.abc') → True """ return False def is_prime(n): """ Return True if the given number is prime. is_prime(2) → True is_prime(4) → False is_prime(11) → True """ return False
23.488636
79
0.627963
357
2,067
3.616247
0.291317
0.030984
0.03718
0.03718
0.079009
0
0
0
0
0
0
0.037259
0.272859
2,067
87
80
23.758621
0.807718
0.734398
0
0.5
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
9968ab6b834c0b7f00339801637e6217641653ab
246
py
Python
gvars.py
MattSkiff/cow_flow
6354842fbe3ceccc3648d956987b391670476292
[ "MIT" ]
null
null
null
gvars.py
MattSkiff/cow_flow
6354842fbe3ceccc3648d956987b391670476292
[ "MIT" ]
1
2022-03-12T01:03:01.000Z
2022-03-12T01:03:01.000Z
gvars.py
MattSkiff/cow_flow
6354842fbe3ceccc3648d956987b391670476292
[ "MIT" ]
null
null
null
import config as c FEAT_MOD_DIR = './models/feat_extractors/' VIZ_DIR = './viz' WEIGHT_DIR = './weights' MODEL_DIR = './models' LOG_DIR = './logs' C_DIR = './cstates' DMAP_DIR = './data/precompute/size_{}_sigma_{}/'.format(c.filter_size,c.sigma)
27.333333
78
0.695122
37
246
4.27027
0.621622
0.113924
0
0
0
0
0
0
0
0
0
0
0.101626
246
9
78
27.333333
0.714932
0
0
0
0
0
0.392713
0.242915
0
0
0
0
0
1
0
false
0
0.125
0
0.125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
996bd5432ca4d3076b30cd7035a3ff74a9f4d4d5
2,220
py
Python
elationmagic.py
lordjabez/light-maestro
ddc8a6398f818bc531f5c809ab00e69e121e25ad
[ "Apache-2.0" ]
1
2015-08-20T08:05:41.000Z
2015-08-20T08:05:41.000Z
elationmagic.py
lordjabez/light-maestro
ddc8a6398f818bc531f5c809ab00e69e121e25ad
[ "Apache-2.0" ]
null
null
null
elationmagic.py
lordjabez/light-maestro
ddc8a6398f818bc531f5c809ab00e69e121e25ad
[ "Apache-2.0" ]
null
null
null
""" @copyright: 2013 Single D Software - All Rights Reserved @summary: Elation Magic 260 MIDI interface for Light Maestro. """ # Standard library imports import logging # Additional library imports import rtmidi import rtmidi.midiconstants # Application imports import console # Named logger for this module _logger = logging.getLogger(__name__) class ElationMagic(console.Console): """The console class that communicates with the Elation Magic 260.""" def _sendmidi(self, channel, note): try: self._midi.send_message((rtmidi.midiconstants.NOTE_ON | channel, note, 127)) _logger.debug('Sent note {0} to channel {1}'.format(note, channel)) except RuntimeError: raise console.CommunicationError def getstatus(self): """ Provide status information for the connection to the console. @return: Dictionary containing status information """ status = super().getstatus() status['condition'] = 'operational' if self._midi else 'nonoperational' return status def getchannels(self): raise console.NotSupportedError def loadchannels(self, data, sceneid=None): raise console.NotSupportedError def getscenes(self): raise console.NotSupportedError def getscene(self, sceneid): raise console.NotSupportedError def loadscene(self, sceneid): try: channel, note = divmod(int(sceneid) - 1, 72) self._sendmidi(channel, note) except ValueError: _logger.warning('Non-numeric scenes are not supported.') def savescene(self, sceneid, fade=5, scene=None): raise console.NotSupportedError def deletescene(self, sceneid): raise console.NotSupportedError def __init__(self, parameter='USB'): self._midi = rtmidi.MidiOut() for p, portname in enumerate(self._midi.get_ports()): if parameter in portname: self._midi.open_port(p) _logger.info('Connected to MIDI device "{0}"'.format(self._midi.get_port_name(p))) super().__init__() return _logger.warning('No USB MIDI device found')
29.6
98
0.656757
243
2,220
5.872428
0.473251
0.058865
0.121934
0.134548
0.161177
0.060266
0
0
0
0
0
0.012048
0.252252
2,220
74
99
30
0.84759
0.178378
0
0.186047
0
0
0.087788
0
0
0
0
0
0
1
0.232558
false
0
0.093023
0
0.395349
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
996e1f99f31c354cf76b0192bd90d53f495fec14
194
py
Python
6 kyu/Caesar Cipher Encryption Variation.py
mwk0408/codewars_solutions
9b4f502b5f159e68024d494e19a96a226acad5e5
[ "MIT" ]
6
2020-09-03T09:32:25.000Z
2020-12-07T04:10:01.000Z
6 kyu/Caesar Cipher Encryption Variation.py
mwk0408/codewars_solutions
9b4f502b5f159e68024d494e19a96a226acad5e5
[ "MIT" ]
1
2021-12-13T15:30:21.000Z
2021-12-13T15:30:21.000Z
6 kyu/Caesar Cipher Encryption Variation.py
mwk0408/codewars_solutions
9b4f502b5f159e68024d494e19a96a226acad5e5
[ "MIT" ]
null
null
null
def caesar_encode(phrase, shift): res=[] for i,j in enumerate(phrase.split()): res.append("".join(chr(ord("a")+(ord(k)-ord("a")+shift+i)%26) for k in j)) return " ".join(res)
38.8
82
0.582474
33
194
3.393939
0.606061
0.071429
0
0
0
0
0
0
0
0
0
0.012579
0.180412
194
5
83
38.8
0.691824
0
0
0
0
0
0.015385
0
0
0
0
0
0
1
0.2
false
0
0
0
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
99729efd1ed3e15b30cbb89f9e80540b94f9b6f9
557
py
Python
python/setup.py
dune-mirrors/dune-python
e83ac8c8e6eb7c4f6e72a21b1efde8e674a226bd
[ "BSD-3-Clause" ]
null
null
null
python/setup.py
dune-mirrors/dune-python
e83ac8c8e6eb7c4f6e72a21b1efde8e674a226bd
[ "BSD-3-Clause" ]
null
null
null
python/setup.py
dune-mirrors/dune-python
e83ac8c8e6eb7c4f6e72a21b1efde8e674a226bd
[ "BSD-3-Clause" ]
null
null
null
from setuptools import setup, find_packages setup(name='dune.common', namespace_packages=['dune'], version='2.4', description='Python package accompanying the DUNE project', url='http://www.dune-project.org', author='Dominic Kempf', author_email='dominic.kempf@iwr.uni-heidelberg.de', license='BSD', packages=['dune.common', 'dune.common.parametertree', 'dune.common.modules', ], install_requires=['pyparsing>=2.1.10', ], )
30.944444
65
0.572711
57
557
5.526316
0.701754
0.126984
0
0
0
0
0
0
0
0
0
0.015075
0.285458
557
17
66
32.764706
0.776382
0
0
0.125
0
0
0.38061
0.10772
0
0
0
0
0
1
0
true
0
0.0625
0
0.0625
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
998087f02dbab8dae6c25cacb1cd710f8eb57923
345
py
Python
plugins/broadcast.py
DeveloperNoob/BAD_LOKI_FACE
75acea0e9c2403bfd7b732153a0bef284d12b16e
[ "MIT" ]
null
null
null
plugins/broadcast.py
DeveloperNoob/BAD_LOKI_FACE
75acea0e9c2403bfd7b732153a0bef284d12b16e
[ "MIT" ]
null
null
null
plugins/broadcast.py
DeveloperNoob/BAD_LOKI_FACE
75acea0e9c2403bfd7b732153a0bef284d12b16e
[ "MIT" ]
null
null
null
#By @Joel_Noob from pyrogram import Client, filters from config import Config @Client.on_message( filters.private & filters.command("broadcast") & filters.user(Config.ADMINS) & filters.reply ) async def broadcast_(c, m): await c.start_broadcast( broadcast_message=m.reply_to_message, admin_id=m.from_user.id )
20.294118
69
0.715942
47
345
5.06383
0.553191
0
0
0
0
0
0
0
0
0
0
0
0.185507
345
16
70
21.5625
0.846975
0.037681
0
0
0
0
0.02719
0
0
0
0
0
0
1
0
true
0
0.166667
0
0.166667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
998f551a05620bbe51cf037cfa0f6b3d026b742d
486
py
Python
recipes/migrations/0004_auto_20210308_0628.py
PavelYasukevich/foodgram-project
d03af25d8fd0cbf1eec03467a95620b89993c9fd
[ "MIT" ]
null
null
null
recipes/migrations/0004_auto_20210308_0628.py
PavelYasukevich/foodgram-project
d03af25d8fd0cbf1eec03467a95620b89993c9fd
[ "MIT" ]
null
null
null
recipes/migrations/0004_auto_20210308_0628.py
PavelYasukevich/foodgram-project
d03af25d8fd0cbf1eec03467a95620b89993c9fd
[ "MIT" ]
1
2021-03-27T16:34:07.000Z
2021-03-27T16:34:07.000Z
# Generated by Django 3.1.7 on 2021-03-08 06:28 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('recipes', '0003_auto_20210304_1233'), ] operations = [ migrations.AlterField( model_name='recipe', name='fav_counter', field=models.PositiveSmallIntegerField(default=0, help_text='Счетчик добавлений в избранное', verbose_name='Добавлений в избранное'), ), ]
25.578947
145
0.650206
53
486
5.830189
0.811321
0.071197
0.12945
0
0
0
0
0
0
0
0
0.086957
0.242798
486
18
146
27
0.752717
0.092593
0
0
1
0
0.225513
0.052392
0
0
0
0
0
1
0
false
0
0.083333
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
9993ad6982945646f5058571579df16c0d7ea0c7
438
py
Python
board_app/migrations/0004_alter_boardmodel_snsimage.py
OkuboAtsushi/board-project
c73beab6ad9525f1fe31d8e9b987476e4b45fd18
[ "MIT" ]
null
null
null
board_app/migrations/0004_alter_boardmodel_snsimage.py
OkuboAtsushi/board-project
c73beab6ad9525f1fe31d8e9b987476e4b45fd18
[ "MIT" ]
null
null
null
board_app/migrations/0004_alter_boardmodel_snsimage.py
OkuboAtsushi/board-project
c73beab6ad9525f1fe31d8e9b987476e4b45fd18
[ "MIT" ]
null
null
null
# Generated by Django 3.2.3 on 2021-08-01 12:25 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('board_app', '0003_alter_boardmodel_snsimage'), ] operations = [ migrations.AlterField( model_name='boardmodel', name='snsimage', field=models.ImageField(blank=True, default=None, null=True, upload_to=''), ), ]
23.052632
87
0.621005
48
438
5.541667
0.791667
0
0
0
0
0
0
0
0
0
0
0.058642
0.260274
438
18
88
24.333333
0.762346
0.10274
0
0
1
0
0.14578
0.076726
0
0
0
0
0
1
0
false
0
0.083333
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
99a086af551d96b57462192e6864be3a7cb52597
2,049
py
Python
smartlearner/status.py
MarcCote/smartlearner
0afdcd3b38dddfee16330b8324eb3b0e224f1c2b
[ "BSD-3-Clause" ]
null
null
null
smartlearner/status.py
MarcCote/smartlearner
0afdcd3b38dddfee16330b8324eb3b0e224f1c2b
[ "BSD-3-Clause" ]
null
null
null
smartlearner/status.py
MarcCote/smartlearner
0afdcd3b38dddfee16330b8324eb3b0e224f1c2b
[ "BSD-3-Clause" ]
null
null
null
from os.path import join as pjoin from .utils import save_dict_to_json_file, load_dict_from_json_file class Status(object): def __init__(self, trainer=None, starting_epoch=0, starting_update=0): self.current_epoch = starting_epoch self.current_update = starting_update self.current_update_in_epoch = 1 self.trainer = trainer self.training_time = 0 self.done = False self.extra = {} def increment_update(self): self.current_update += 1 self.current_update_in_epoch += 1 def increment_epoch(self): self.current_epoch += 1 self.current_update_in_epoch = 0 def __repr__(self): return ('Status object with state :\n' +\ ' current_epoch = {!r}\n' +\ ' current_update = {!r}\n' +\ ' current_update_in_epoch = {!r}\n' +\ ' trainer = {!r}\n' +\ ' done = {!r}\n' +\ ' extra = {!r}\n').format(self.current_epoch, self.current_update, self.current_update_in_epoch, self.trainer, self.training_time, self.done, self.extra) def save(self, savedir="./"): state = {"version": 1, "current_epoch": self.current_epoch, "current_update": self.current_update, "current_update_in_epoch": self.current_update_in_epoch, "training_time": self.training_time, "done": self.done, "extra": self.extra, } save_dict_to_json_file(pjoin(savedir, 'status.json'), state) def load(self, loaddir="./"): state = load_dict_from_json_file(pjoin(loaddir, 'status.json')) self.current_epoch = state["current_epoch"] self.current_update = state["current_update"] self.current_update_in_epoch = state["current_update_in_epoch"] self.training_time = state["training_time"] self.done = state["done"] self.extra = state["extra"]
37.254545
113
0.585163
240
2,049
4.666667
0.1875
0.197321
0.166964
0.160714
0.338393
0.139286
0.066071
0
0
0
0
0.006272
0.299658
2,049
54
114
37.944444
0.774216
0
0
0
0
0
0.164959
0.033675
0
0
0
0
0
1
0.136364
false
0
0.045455
0.022727
0.227273
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
99a24f7bca5db37447ff93cc105a33b663fe4185
9,991
py
Python
10601-hws/HW 6/hw6/python/hwTestLeNet.py
dfreilich/machine-learning-workspace
a1b6e5bd84a4f5708461f3827d64e2bf5a32dffa
[ "MIT" ]
null
null
null
10601-hws/HW 6/hw6/python/hwTestLeNet.py
dfreilich/machine-learning-workspace
a1b6e5bd84a4f5708461f3827d64e2bf5a32dffa
[ "MIT" ]
null
null
null
10601-hws/HW 6/hw6/python/hwTestLeNet.py
dfreilich/machine-learning-workspace
a1b6e5bd84a4f5708461f3827d64e2bf5a32dffa
[ "MIT" ]
null
null
null
import numpy as np import cnn_lenet import pickle import copy import random import matplotlib as mp import matplotlib.pyplot as plt import math def get_lenet(): """Define LeNet Explanation of parameters: type: layer type, supports convolution, pooling, relu channel: input channel num: output channel k: convolution kernel width (== height) group: split input channel into several groups, not used in this assignment """ layers = {} layers[1] = {} layers[1]['type'] = 'DATA' layers[1]['height'] = 28 layers[1]['width'] = 28 layers[1]['channel'] = 1 layers[1]['batch_size'] = 1 layers[2] = {} layers[2]['type'] = 'CONV' layers[2]['num'] = 20 layers[2]['k'] = 5 layers[2]['stride'] = 1 layers[2]['pad'] = 0 layers[2]['group'] = 1 layers[3] = {} layers[3]['type'] = 'POOLING' layers[3]['k'] = 2 layers[3]['stride'] = 2 layers[3]['pad'] = 0 layers[4] = {} layers[4]['type'] = 'CONV' layers[4]['num'] = 50 layers[4]['k'] = 5 layers[4]['stride'] = 1 layers[4]['pad'] = 0 layers[4]['group'] = 1 layers[5] = {} layers[5]['type'] = 'POOLING' layers[5]['k'] = 2 layers[5]['stride'] = 2 layers[5]['pad'] = 0 layers[6] = {} layers[6]['type'] = 'IP' layers[6]['num'] = 500 layers[6]['init_type'] = 'uniform' layers[7] = {} layers[7]['type'] = 'RELU' layers[8] = {} layers[8]['type'] = 'LOSS' layers[8]['num'] = 10 return layers def trainNet(): # define lenet layers = get_lenet() # load data # change the following value to true to load the entire dataset fullset = True print("Loading MNIST Dataset...") xtrain, ytrain, xval, yval, xtest, ytest = cnn_lenet.load_mnist(fullset) print("MNIST Dataset Loading Complete!\n") xtrain = np.hstack([xtrain, xval]) ytrain = np.hstack([ytrain, yval]) m_train = xtrain.shape[1] # cnn parameters batch_size = 64 mu = 0.9 epsilon = 0.01 gamma = 0.0001 power = 0.75 weight_decay = 0.0005 w_lr = 1 b_lr = 2 test_interval = 100 display_interval = 100 snapshot = 5000 max_iter = 10000 # Lets it run the entire way # initialize parameters print("Initializing Parameters...") # You can make the params your params, and not the initialized ones, in order to visualize the results params = cnn_lenet.init_convnet(layers) param_winc = copy.deepcopy(params) print("Initilization Complete!\n") for l_idx in range(1, len(layers)): param_winc[l_idx]['w'] = np.zeros(param_winc[l_idx]['w'].shape) param_winc[l_idx]['b'] = np.zeros(param_winc[l_idx]['b'].shape) # learning iterations random.seed(100000) indices = range(m_train) random.shuffle(indices) train_cost = np.array([]) train_accuracy = np.array([]) test_cost = np.array([]) test_accuracy = np.array([]) print("Training Started. Printing report on training data every " + str(display_interval) + " steps.") print("Printing report on test data every " + str(test_interval) + " steps.\n") for step in range(max_iter): # get mini-batch and setup the cnn with the mini-batch start_idx = step * batch_size % m_train end_idx = (step+1) * batch_size % m_train if start_idx > end_idx: random.shuffle(indices) continue idx = indices[start_idx: end_idx] [cp, param_grad] = cnn_lenet.conv_net(params, layers, xtrain[:, idx], ytrain[idx], True) # True there is to get backtracking, but you can just use it for forward, to visualize # You have to make the function return output for you, so that you can reshape it into an image matrix, to show the image # we have different epsilons for w and b w_rate = cnn_lenet.get_lr(step, epsilon*w_lr, gamma, power) b_rate = cnn_lenet.get_lr(step, epsilon*b_lr, gamma, power) params, param_winc = cnn_lenet.sgd_momentum(w_rate, b_rate, mu, weight_decay, params, param_winc, param_grad) # display training loss if (step+1) % display_interval == 0: print 'training_cost = %f training_accuracy = %f' % (cp['cost'], cp['percent']) + ' current_step = ' + str(step + 1) train_cost = np.append(train_cost, cp['cost']) train_accuracy = np.append(train_accuracy, cp['percent']) # display test accuracy if (step+1) % test_interval == 0: layers[1]['batch_size'] = xtest.shape[1] cptest, _ = cnn_lenet.conv_net(params, layers, xtest, ytest, False) layers[1]['batch_size'] = 64 print 'test_cost = %f test_accuracy = %f' % (cptest['cost'], cptest['percent']) + ' current_step = ' + str(step + 1) + '\n' test_cost = np.append(test_cost, cptest['cost']) test_accuracy = np.append(test_accuracy, cptest['percent']) # save params peridocally to recover from any crashes if (step+1) % snapshot == 0: pickle_path = 'lenet.mat' pickle_file = open(pickle_path, 'wb') pickle.dump(params, pickle_file) pickle_file.close() # Saves params at 30 for Question 4 if (step+1) == 30: pickle_path = 'lenetAt30Iterations.mat' pickle_file = open(pickle_path, 'wb') pickle.dump(params, pickle_file) pickle_file.close() if (step+1) == max_iter: np.savetxt('trainCost.txt', train_cost) np.savetxt('trainAccuracy.txt', train_accuracy) np.savetxt('testCost.txt', test_cost) np.savetxt('testAccuracy.txt', test_accuracy) # np.savetxt('costsStacked.txt', np.column_stack(train_cost, test_cost)) # np.savetxt('accuracyStacked.txt', np.column_stack(train_accuracy, test_accuracy)) pickle_path = 'lenetAt10000Iterations.mat' pickle_file = open(pickle_path, 'wb') pickle.dump(params, pickle_file) pickle_file.close() if (step) == max_iter: np.savetxt('trainCost1.txt', train_cost) np.savetxt('trainAccuracy1.txt', train_accuracy) np.savetxt('testCost1.txt', test_cost) np.savetxt('testAccuracy1.txt', test_accuracy) # np.savetxt('costsStacked1.txt', np.column_stack(train_cost, test_cost)) # np.savetxt('accuracyStacked1.txt', np.column_stack(train_accuracy, test_accuracy)) pickle_path = 'lenetAtMAXPLUSONEIterations.mat' pickle_file = open(pickle_path, 'wb') pickle.dump(params, pickle_file) pickle_file.close() def visualizeOutputOfSecondLayer(givenParams): # define lenet layers = get_lenet() # load data # change the following value to true to load the entire dataset fullset = True print("Loading MNIST Dataset...") xtrain, ytrain, xval, yval, xtest, ytest = cnn_lenet.load_mnist(fullset) print("MNIST Dataset Loading Complete!\n") xtrain = np.hstack([xtrain, xval]) ytrain = np.hstack([ytrain, yval]) m_train = xtrain.shape[1] # cnn parameters batch_size = 1 # initialize parameters print("Initializing Parameters from given params") # You can make the params your params, and not the initialized ones, in order to visualize the results params = givenParams param_winc = copy.deepcopy(params) print("Initilization Complete!\n") for l_idx in range(1, len(layers)): param_winc[l_idx]['w'] = np.zeros(param_winc[l_idx]['w'].shape) param_winc[l_idx]['b'] = np.zeros(param_winc[l_idx]['b'].shape) # learning iterations random.seed(100000) indices = range(m_train) random.shuffle(indices) max_iter = 1 # get mini-batch and setup the cnn with the mini-batch for step in range(max_iter): # get mini-batch and setup the cnn with the mini-batch start_idx = step * batch_size % m_train end_idx = (step + 1) * batch_size % m_train if start_idx > end_idx: random.shuffle(indices) continue idx = indices[start_idx: end_idx] [cp, param_grad, output] = cnn_lenet.conv_net(params, layers, xtrain[:, 0:1], ytrain[0:1], False) # conv_out = output[2]['data'].reshape(24,24,20) # plotNNFilter(conv_out) conv_out = output[1]['data'].reshape(28,28,1) plotNNFilter(conv_out) # for j in range(20): # plt.figure() # print j # plt.imshow(conv_out[:,:,j], cmap="gray") # plt.show() # plotNNFilter(additionalReturn['data'].reshape(24,24,20)) # plotNNFilter(additionalReturn['data'].reshape()) # You have to make the function return output for you, so that you can reshape it into an image matrix, to show the image def plotNNFilter(units): filters = 1 plt.figure(1, figsize=(24,24)) n_columns = 4 n_rows = math.ceil(filters / n_columns) + 1 for i in range(filters): plt.subplot(n_rows, n_columns, i+1) plt.title('Filter ' + str(i+1)) plt.imshow(units[:,:,i], interpolation="nearest", cmap="gray") plt.pause(100) def visualizeCost(): train_Cost = np.genfromtxt('trainCost.txt') test_Cost = np.genfromtxt('testCost.txt') plt.gca().set_color_cycle(['red', 'green']) plt.plot(train_Cost, train_Cost) plt.plot(train_Cost, test_Cost) # plt.axis([0,1,0,10000]) plt.legend(['Train Cost', 'Test Cost'], loc='upper left') plt.show() def visualizeAccuracy(): train_Cost = np.genfromtxt('trainAccuracy.txt') test_Cost = np.genfromtxt('testAccuracy.txt') plt.gca().set_color_cycle(['red', 'green']) plt.plot(train_Cost, train_Cost) plt.plot(train_Cost, test_Cost) # plt.axis([0,1,0,10000]) plt.legend(['Train Accuracy', 'Test Accuracy'], loc='upper left') plt.show() if __name__ == '__main__': # params = pickle.load(open("lenetAt30Iterations.mat", "rb")) # params2 = pickle.load(open("lenetAt10000Iterations.mat", "rb")) # visualizeOutputOfSecondLayer(params2) visualizeAccuracy() # print(params2) # visualizeOutputOfSecondLayer() # main()
31.319749
129
0.636273
1,366
9,991
4.513177
0.207906
0.023358
0.012976
0.016869
0.511436
0.453366
0.431144
0.411354
0.411354
0.411354
0
0.028686
0.225403
9,991
318
130
31.418239
0.767929
0.201481
0
0.326923
0
0
0.141084
0.010451
0
0
0
0
0
0
null
null
0
0.038462
null
null
0.057692
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
99a673617423652de49110e1613b793a9d706888
644
py
Python
conf/urls.py
crisariasgg/RepinSolution
27e9b04ccc887b4300d77dda8657e761f9523123
[ "MIT" ]
null
null
null
conf/urls.py
crisariasgg/RepinSolution
27e9b04ccc887b4300d77dda8657e761f9523123
[ "MIT" ]
null
null
null
conf/urls.py
crisariasgg/RepinSolution
27e9b04ccc887b4300d77dda8657e761f9523123
[ "MIT" ]
1
2021-12-09T21:27:35.000Z
2021-12-09T21:27:35.000Z
"""URL Configuration""" from django.contrib import admin from django.urls import path, include from django.conf import settings from django.views.generic import TemplateView from django.conf.urls.static import static urlpatterns = [ path('admin/', admin.site.urls), path('', include(('apps.users.urls', 'users'), namespace='users')), path('', include(('apps.import_excel.urls', 'import_excel'), namespace='import_excel')), ]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) if settings.DEBUG: import debug_toolbar urlpatterns += [ path('__debug__/', include(debug_toolbar.urls)), ]
25.76
96
0.706522
78
644
5.679487
0.358974
0.112867
0.063205
0
0
0
0
0
0
0
0
0
0.150621
644
24
97
26.833333
0.809872
0.026398
0
0
0
0
0.140097
0.035427
0
0
0
0
0
1
0
false
0
0.466667
0
0.466667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
99a8de2271c3c05622470fceff6ae843c59d9535
1,524
py
Python
pages/migrations/0001_initial.py
mixnix/subject_rate
224fdc7c17afd972596c628bda65a384274ed4a1
[ "MIT" ]
null
null
null
pages/migrations/0001_initial.py
mixnix/subject_rate
224fdc7c17afd972596c628bda65a384274ed4a1
[ "MIT" ]
null
null
null
pages/migrations/0001_initial.py
mixnix/subject_rate
224fdc7c17afd972596c628bda65a384274ed4a1
[ "MIT" ]
null
null
null
# Generated by Django 2.1.3 on 2018-11-22 04:31 import django.core.validators from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='CourseName', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('course_name', models.CharField(max_length=225)), ], ), migrations.CreateModel( name='Professor', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('professor_name', models.CharField(default='', max_length=255)), ], ), migrations.CreateModel( name='Review', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('how_easy', models.IntegerField(default=0, validators=[django.core.validators.MaxValueValidator(100), django.core.validators.MinValueValidator(0)])), ('how_interesting', models.IntegerField(default=0, validators=[django.core.validators.MaxValueValidator(100), django.core.validators.MinValueValidator(0)])), ('creation_date', models.DateTimeField(auto_now_add=True)), ('review_body', models.TextField()), ], ), ]
38.1
173
0.603018
148
1,524
6.081081
0.418919
0.055556
0.111111
0.076667
0.516667
0.516667
0.516667
0.516667
0.516667
0.516667
0
0.027605
0.263123
1,524
39
174
39.076923
0.77382
0.029528
0
0.46875
1
0
0.073798
0
0
0
0
0
0
1
0
false
0
0.0625
0
0.1875
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
99a91fc05ae7123630e16e93bde5cb1b4970dfad
570
py
Python
src/spaceone/inventory/error/custom.py
jihyungSong/plugin-azure-power-state
d66bd5dfafa01659c877da11c0d18de6e55cb5ab
[ "Apache-2.0" ]
1
2020-12-04T01:37:15.000Z
2020-12-04T01:37:15.000Z
src/spaceone/inventory/error/custom.py
jihyungSong/plugin-azure-power-state
d66bd5dfafa01659c877da11c0d18de6e55cb5ab
[ "Apache-2.0" ]
null
null
null
src/spaceone/inventory/error/custom.py
jihyungSong/plugin-azure-power-state
d66bd5dfafa01659c877da11c0d18de6e55cb5ab
[ "Apache-2.0" ]
2
2020-12-04T01:37:18.000Z
2020-12-28T02:53:39.000Z
from spaceone.core.error import ERROR_BASE class ERROR_REPOSITORY_BACKEND(ERROR_BASE): status_code = 'INTERNAL' message = 'Repository backend has problem. ({host})' class ERROR_DRIVER(ERROR_BASE): status_code = 'INTERNAL' message = '{message}' class ERROR_NOT_INITIALIZED_EXCEPTION(ERROR_BASE): status_code = 'INTERNAL' message = 'Collector is not initialized. Please call initialize() method before using it.' class ERROR_ATHENTICATION_VERIFY(ERROR_BASE): message = 'Connection failed. Please check your authentication information.'
27.142857
94
0.759649
68
570
6.132353
0.544118
0.107914
0.107914
0.136691
0.244604
0.244604
0
0
0
0
0
0
0.15614
570
20
95
28.5
0.866944
0
0
0.25
0
0
0.377193
0
0
0
0
0
0
1
0
false
0
0.083333
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
99aacf62d8ba4b992323536e4d23c55bfd7baf04
706
py
Python
db_server.py
YashithaNadiranga/MysqlFlask
1576abc388666f3e7e5ff288d1d221a4012f991b
[ "MIT" ]
null
null
null
db_server.py
YashithaNadiranga/MysqlFlask
1576abc388666f3e7e5ff288d1d221a4012f991b
[ "MIT" ]
null
null
null
db_server.py
YashithaNadiranga/MysqlFlask
1576abc388666f3e7e5ff288d1d221a4012f991b
[ "MIT" ]
null
null
null
from flask import Flask from flask_sqlalchemy import SQLAlchemy app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:pass@localhost/student_details' db = SQLAlchemy(app) class User(db.Model): id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(80), unique=True, nullable=False) email = db.Column(db.String(120), unique=True, nullable=False) def __repr__(self): return '<User %r>' % self.username db.create_all() admin = User(username='admin', email='admin@example.com') db.session.add(admin) db.session.commit() print(User.query.all()) @app.route("/") def index(): pass if __name__ == "__main__": app.run(debug=True)
22.0625
85
0.70255
100
706
4.74
0.54
0.050633
0.063291
0.067511
0
0
0
0
0
0
0
0.008237
0.140227
706
31
86
22.774194
0.772652
0
0
0
0
0
0.150142
0.093484
0
0
0
0
0
1
0.095238
false
0.095238
0.095238
0.047619
0.428571
0.047619
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
99ac99fa9ef1118415e2e909b4f4d2bb005ce536
1,397
py
Python
relay_daemon/logger.py
vt-gs/relay_daemon
9d77cd3222a3fe3e588f7c2196a4a06e8a73a471
[ "MIT" ]
null
null
null
relay_daemon/logger.py
vt-gs/relay_daemon
9d77cd3222a3fe3e588f7c2196a4a06e8a73a471
[ "MIT" ]
null
null
null
relay_daemon/logger.py
vt-gs/relay_daemon
9d77cd3222a3fe3e588f7c2196a4a06e8a73a471
[ "MIT" ]
null
null
null
#!/usr/bin/env python2 # Logger utilities import math, sys, os, time, struct, traceback, binascii, logging import datetime as dt class MyFormatter(logging.Formatter): #Overriding formatter for datetime converter=dt.datetime.utcfromtimestamp def formatTime(self, record, datefmt=None): ct = self.converter(record.created) if datefmt: s = ct.strftime(datefmt) else: t = ct.strftime("%Y%m%d_%H:%M:%S") s = "%s,%03d" % (t, record.msecs) return s def setup_logger(log_name, level=logging.DEBUG, ts = None, log_path = None): l = logging.getLogger(log_name) if ts == None: ts = str(get_uptime()) log_file = "relayd_{:s}_{:s}.log".format(log_name, ts) if log_path == None: log_path = '.' log_path = log_path + '/' + log_file #log_path = os.getcwd() + '/log/' + log_file print log_path formatter = MyFormatter(fmt='%(asctime)s UTC | %(threadName)14s | %(levelname)8s | %(message)s',datefmt='%Y%m%d %H:%M:%S.%f') #fileHandler = logging.FileHandler(log_path, mode='w') fileHandler = logging.FileHandler(log_path) fileHandler.setFormatter(formatter) #streamHandler = logging.StreamHandler() #streamHandler.setFormatter(formatter) l.setLevel(level) l.addHandler(fileHandler) l.info('Logger Initialized') #l.addHandler(streamHandler) return fileHandler
34.073171
129
0.655691
179
1,397
5.005587
0.446927
0.070313
0.033482
0.008929
0.120536
0.013393
0
0
0
0
0
0.005386
0.202577
1,397
40
130
34.925
0.798923
0.193271
0
0
0
0
0.12958
0
0
0
0
0
0
0
null
null
0
0.076923
null
null
0.038462
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1