hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringdate
2015-01-01 00:00:47
2022-03-31 23:42:18
max_issues_repo_issues_event_max_datetime
stringdate
2015-01-01 17:43:30
2022-03-31 23:59:58
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
888504477ef926e05cac253422a2f5fcc1a109ea
4,031
py
Python
main.py
sun624/Dogecoin_musk
6dc48f03275321d29bb1ab131ecd14626bcc5170
[ "MIT" ]
null
null
null
main.py
sun624/Dogecoin_musk
6dc48f03275321d29bb1ab131ecd14626bcc5170
[ "MIT" ]
null
null
null
main.py
sun624/Dogecoin_musk
6dc48f03275321d29bb1ab131ecd14626bcc5170
[ "MIT" ]
null
null
null
#! usr/bin/env python3 from os import times from bs4 import BeautifulSoup from selenium import webdriver from selenium.webdriver.chrome.options import Options from selenium.webdriver.common.keys import Keys from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.by import By import numpy as np import matplotlib.pyplot as plt import matplotlib.dates as mdates import datetime import requests import pandas as pd import json import datetime import time import math from twitter import get_coin_tweets_dates #beautifulsoup cannot scrape dynamically changing webpages. #Instead we use third party library called Selenium and webdrivers. def convert_date_to_unixtime(year,month,day): dt = datetime.datetime(year,month,day) timestamp = (dt - datetime.datetime(1970,1,1)).total_seconds() return round(timestamp) def date_parser(date): return datetime.datetime.strptime(date, '%b %d, %Y').date() def is_valid(s): return len(s) > 1 def scraping_data(y1,m1,d1,y2,m2,d2,coin): DAYS_PER_SCROLL = 100 SECONDS_PER_DAY = 86400 start_date = convert_date_to_unixtime(y1,m1,d1) end_date = convert_date_to_unixtime(y2,m2,d2) url = f'https://finance.yahoo.com/quote/{coin}-USD/history?period1={start_date}&period2={end_date}&interval=1d&filter=history&frequency=1d&includeAdjustedClose=true' # initiating the webdriver. Parameter includes the path of the webdriver. chrome_options = Options() # run chrome without GUI chrome_options.headless = True chrome_options.add_argument("--log-level=3") driver = webdriver.Chrome(executable_path='./chromedriver',options = chrome_options) driver.get(url) html = driver.find_element_by_tag_name('html') #Webdriver press ESC to stop loading the page html.send_keys(Keys.ESCAPE) days_between = (end_date - start_date) / SECONDS_PER_DAY scroll = math.ceil(days_between / DAYS_PER_SCROLL) for i in range(scroll): soup = BeautifulSoup(driver.page_source,'html.parser') dates = [] prices = [] # extract date and price information for tr in soup.tbody.contents: #Navigable string is not callable date_source = tr.contents[0] #convert navigable string into callable string date_string = str(date_source.string) date = date_parser(date_string) price = tr.contents[4].string if is_valid(price): dates.insert(0,date) prices.insert(0,float(price.replace(',',''))) #webdriver press END key to scroll down to the buttom of the page to load more data html.send_keys(Keys.END) WebDriverWait(driver,timeout=0.5) time.sleep(0.3) driver.close() return [dates,prices] """ draw coin price fluctuation with Elon's tweet """ def draw(dates,prices,coin,tw_dates): fig, ax = plt.subplots() #set graph size 12inch by 10inch fig.set_size_inches((12, 10)) #draw fist graph---coin price and date ax.plot(dates, prices,label='coin price') tw_prices = [] for tw_date in tw_dates: index = dates.index(tw_date) tw_prices.append(prices[index]) #draw second graph---Elon's tweet and date ax.plot(tw_dates,tw_prices,'ro',label='Elon\'s Doge tweet' ) ax.xaxis.set_major_locator(mdates.AutoDateLocator()) ax.xaxis.set_minor_locator(mdates.DayLocator()) #auto rotate x axis ticks fig.autofmt_xdate() ax.grid(True) plt.xlabel('Date') plt.ylabel('Price') plt.title(f'{coin} coin Price',loc='center') plt.legend(loc='upper left') plt.show() def main(): start_time = time.time() [dates,prices] = scraping_data(2021,1,1,2021,5,21,'DOGE') tweet_dates = get_coin_tweets_dates('elonmusk') draw(dates,prices,'DOGE',tweet_dates) duration = time.time() - start_time print(f'It took {duration}s to run this application.') if __name__ == '__main__': main()
29.210145
170
0.690896
88858e6eec8ef3e573592e88fd8baa705aa1f430
1,264
py
Python
064_minimum_path_sum.py
gengwg/leetcode
0af5256ec98149ef5863f3bba78ed1e749650f6e
[ "Apache-2.0" ]
2
2018-04-24T19:17:40.000Z
2018-04-24T19:33:52.000Z
064_minimum_path_sum.py
gengwg/leetcode
0af5256ec98149ef5863f3bba78ed1e749650f6e
[ "Apache-2.0" ]
null
null
null
064_minimum_path_sum.py
gengwg/leetcode
0af5256ec98149ef5863f3bba78ed1e749650f6e
[ "Apache-2.0" ]
3
2020-06-17T05:48:52.000Z
2021-01-02T06:08:25.000Z
""" 64. Minimum Path Sum Given a m x n grid filled with non-negative numbers, find a path from top left to bottom right which minimizes the sum of all numbers along its path. Note: You can only move either down or right at any point in time. http://www.tangjikai.com/algorithms/leetcode-64-minimum-path-sum Dynamic Programming We can use an two-dimensional array to record the minimum sum at each position of grid, finally return the last element as output. """ class Solution(object): def minPathSum(self, grid): """ :type grid: List[List[int]] :rtype: int """ m = len(grid) n = len(grid[0]) dp = [[0] * n for _ in range(m)] for i in range(m): for j in range(n): # first element is first element in grid if i == 0 and j == 0: dp[i][j] = grid[0][0] elif i == 0: # first column dp[i][j] = dp[i][j - 1] + grid[i][j] elif j == 0: # first row dp[i][j] = dp[i - 1][j] + grid[i][j] else: # either top or left sum plus current position dp[i][j] = min(dp[i - 1][j], dp[i][j - 1]) + grid[i][j] return dp[-1][-1]
29.395349
75
0.530854
8886118689d4c63bf084bbb40abe034f4a2125d5
12,507
py
Python
pants-plugins/structured/subsystems/r_distribution.py
cosmicexplorer/structured
ea452a37e265dd75d4160efa59a4a939bf8c0521
[ "Apache-2.0" ]
null
null
null
pants-plugins/structured/subsystems/r_distribution.py
cosmicexplorer/structured
ea452a37e265dd75d4160efa59a4a939bf8c0521
[ "Apache-2.0" ]
null
null
null
pants-plugins/structured/subsystems/r_distribution.py
cosmicexplorer/structured
ea452a37e265dd75d4160efa59a4a939bf8c0521
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import logging import os import re import subprocess import sys from contextlib import contextmanager from abc import abstractproperty from pants.binaries.binary_util import BinaryUtil from pants.engine.isolated_process import ExecuteProcessRequest, ExecuteProcessResult from pants.fs.archive import TGZ from pants.subsystem.subsystem import Subsystem from pants.util.contextutil import environment_as, temporary_file_path from pants.util.dirutil import safe_mkdir from pants.util.memo import memoized_method, memoized_property from pants.util.meta import AbstractClass from pants.util.objects import datatype from pants.util.strutil import ensure_binary logger = logging.getLogger(__name__) class RDependency(AbstractClass): @abstractproperty def name(self): """???""" class RInvocationException(Exception): INVOCATION_ERROR_BOILERPLATE = "`{cmd}` failed: {what_happened}" def __init__(self, cmd, what_happened): msg = self.INVOCATION_ERROR_BOILERPLATE.format( cmd=' '.join(cmd), what_happened=what_happened, ) super(RInvocationException, self).__init__(msg) class RSpawnFailure(RInvocationException): def __init__(self, cmd, err): super(RSpawnFailure, self).__init__(cmd=cmd, what_happened=repr(err)) class RProcessResultFailure(RInvocationException): PROCESS_RESULT_FAILURE_BOILERPLATE = "exited non-zero ({exit_code}){rest}" def __init__(self, cmd, exit_code, rest=''): what_happened = self.PROCESS_RESULT_FAILURE_BOILERPLATE.format( exit_code=exit_code, rest=rest, ) super(RProcessResultFailure, self).__init__( cmd=cmd, what_happened=what_happened) class RProcessInvokedForOutputFailure(RProcessResultFailure): INVOKE_OUTPUT_ERROR_BOILERPLATE = """ stdout: {stdout} stderr: {stderr} """ def __init__(self, cmd, exit_code, stdout, stderr): rest = self.INVOKE_OUTPUT_ERROR_BOILERPLATE.format( stdout=stdout, stderr=stderr, ) super(RProcessInvokedForOutputFailure, self).__init__( cmd=cmd, exit_code=exit_code, rest=rest) class RDistribution(object): DEVTOOLS_CRAN_NAME = 'devtools' MODULES_GITHUB_ORG_NAME = 'klmr' MODULES_GITHUB_REPO_NAME = 'modules' class Factory(Subsystem): options_scope = 'r-distribution' @classmethod def subsystem_dependencies(cls): return super(RDistribution.Factory, cls).subsystem_dependencies() + ( BinaryUtil.Factory, ) @classmethod def register_options(cls, register): super(RDistribution.Factory, cls).register_options(register) register('--r-version', fingerprint=True, help='R distribution version. Used as part of the path to ' 'lookup the distribution with --binary-util-baseurls and ' '--pants-bootstrapdir.', default='3.4.3') register('--modules-git-ref', fingerprint=True, help='git ref of the klmr/modules repo to use for R modules.', default='d4199f2d216c6d20c3b092c691d3099c3325f2a3') register('--tools-cache-dir', advanced=True, metavar='<dir>', default=None, help='The parent directory for downloaded R tools. ' 'If unspecified, a standard path under the workdir is ' 'used.') register('--resolver-cache-dir', advanced=True, metavar='<dir>', default=None, help='The parent directory for resolved R packages. ' 'If unspecified, a standard path under the workdir is ' 'used.') register('--chroot-cache-dir', advanced=True, metavar='<dir>', default=None, help='The parent directory for the chroot cache. ' 'If unspecified, a standard path under the workdir is ' 'used.') @memoized_property def scratch_dir(self): return os.path.join( self.get_options().pants_workdir, *self.options_scope.split('.')) def create(self): binary_util = BinaryUtil.Factory.create() options = self.get_options() tools_cache_dir = options.tools_cache_dir or os.path.join( self.scratch_dir, 'tools') resolver_cache_dir = options.resolver_cache_dir or os.path.join( self.scratch_dir, 'resolved_packages') chroot_cache_dir = options.chroot_cache_dir or os.path.join( self.scratch_dir, 'chroots') return RDistribution( binary_util, r_version=options.r_version, modules_git_ref=options.modules_git_ref, tools_cache_dir=tools_cache_dir, resolver_cache_dir=resolver_cache_dir, chroot_cache_dir=chroot_cache_dir, ) def __init__(self, binary_util, r_version, modules_git_ref, tools_cache_dir, resolver_cache_dir, chroot_cache_dir): self._binary_util = binary_util self._r_version = r_version self.modules_git_ref = modules_git_ref self.tools_cache_dir = tools_cache_dir self.resolver_cache_dir = resolver_cache_dir self.chroot_cache_dir = chroot_cache_dir def _unpack_distribution(self, supportdir, r_version, output_filename): logger.debug('unpacking R distribution, version: %s', r_version) tarball_filepath = self._binary_util.select_binary( supportdir=supportdir, version=r_version, name=output_filename) logger.debug('Tarball for %s(%s): %s', supportdir, r_version, tarball_filepath) work_dir = os.path.join(os.path.dirname(tarball_filepath), 'unpacked') TGZ.extract(tarball_filepath, work_dir, concurrency_safe=True) return work_dir @memoized_property def r_installation(self): r_dist_path = self._unpack_distribution( supportdir='bin/R', r_version=self._r_version, output_filename='r.tar.gz') return r_dist_path @memoized_property def r_bin_dir(self): return os.path.join(self.r_installation, 'bin') R_SAVE_IMAGE_BOILERPLATE = """{initial_input} save.image(file='{save_file_path}', safe=FALSE) """ RDATA_FILE_NAME = '.Rdata' def r_invoke_isolated_process(self, context, cmd): logger.debug("isolated process '{}'".format(cmd)) env_path = ['PATH', self.r_bin_dir] req = ExecuteProcessRequest(tuple(cmd), env_path) res, = context._scheduler.product_request( ExecuteProcessResult, [req]) if res.exit_code != 0: raise RProcessInvokedForOutputFailure( cmd, res.exit_code, res.stdout, res.stderr) return res @contextmanager def r_isolated_invoke_with_input(self, context, stdin_input, suffix='.R'): logger.debug("isolated invoke with stdin_input:\n{}".format(stdin_input)) with temporary_file_path(suffix=suffix) as tmp_file_path: with open(tmp_file_path, 'w') as tmpfile: tmpfile.write(stdin_input) yield tmp_file_path def r_invoke_repl_sandboxed(self, workunit, cmd, cwd): new_path = ':'.join([ self.r_bin_dir, os.environ.get('PATH'), ]) with environment_as(PATH=new_path): try: subproc = subprocess.Popen( cmd, stdin=sys.stdin, stdout=workunit.output('stdout'), stderr=workunit.output('stderr'), cwd=cwd, ) return subproc.wait() except OSError as e: raise RSpawnFailure(cmd, e) except subprocess.CalledProcessError as e: raise RProcessResultFailure(cmd, e.returncode, e) def invoke_r_interactive(self, context, workunit, initial_input, chroot_dir, clean_chroot=False): logger.debug("interactive in '{}', initial_input: '{}'".format( chroot_dir, initial_input)) rdata_path = os.path.join(chroot_dir, self.RDATA_FILE_NAME) input_with_save = self.R_SAVE_IMAGE_BOILERPLATE.format( initial_input=initial_input, save_file_path=rdata_path, ) safe_mkdir(chroot_dir, clean=clean_chroot) with self.r_isolated_invoke_with_input( context, input_with_save) as tmp_file_path: save_cmd = [ 'R', '--vanilla', '--slave', '--file={}'.format(tmp_file_path) ] self.r_invoke_isolated_process(context, save_cmd) r_cmd = [ 'R', '--save', '--restore', '--interactive', ] return self.r_invoke_repl_sandboxed(workunit, r_cmd, chroot_dir) def invoke_rscript(self, context, stdin_input): with self.r_isolated_invoke_with_input( context, stdin_input) as tmp_file_path: r_cmd = [ 'Rscript', '--verbose', tmp_file_path, ] return self.r_invoke_isolated_process(context, r_cmd) class PackageInfoFormatError(Exception): """???""" BLANK_LINE_REGEX = re.compile('^\s*$') @classmethod def is_valid_package_name(cls, name): return cls.BLANK_LINE_REGEX.match(name) is None @classmethod def check_valid_package_name(cls, name): if not cls.is_valid_package_name(name): raise PackageInfoFormatError( "'{}' is not a valid package name (must not be blank)".format(name)) return name @classmethod def filter_packages_lines_stdout(cls, lines): return [p for p in lines if cls.is_valid_package_name(p)] VALID_VERSION_REGEX = re.compile('^[0-9]+(\.[0-9]+)*$') @classmethod def is_valid_version(cls, version): if version is None: return True return cls.VALID_VERSION_REGEX.match(version) is not None @classmethod def check_valid_version(cls, version): if not cls.is_valid_version(version): raise PackageInfoFormatError( "'{}' is not a valid package version " "(must be 'None' or match '{}')" .format(version, cls.VALID_VERSION_REGEX.pattern)) return version @classmethod def gen_script_load_stmts(cls, srcs_rel): if len(srcs_rel) == 0: return '' source_stmts = ["source('{}')".format(s.encode('ascii')) for s in srcs_rel] return '\n'.join(source_stmts) + '\n' @classmethod def convert_to_list_of_ascii(cls, arg): if not isinstance(arg, list): arg = [ensure_binary(arg)] return [ensure_binary(x) for x in arg] @classmethod def create_valid_r_charvec_input(cls, elements, drop_empty=False): elements = cls.convert_to_list_of_ascii(elements) if len(elements) == 0: if drop_empty: return None return 'character(0)' elif len(elements) == 1: return "'{}'".format(elements[0]) quoted = ["'{}'".format(el) for el in elements] return "c({})".format(', '.join(quoted)) @classmethod def gen_libs_input(cls, lib_paths): libs_charvec = cls.create_valid_r_charvec_input(lib_paths, drop_empty=True) if libs_charvec is None: return '' return ".libPaths({})".format(libs_charvec) + '\n' R_LIST_PACKAGES_BOILERPLATE = """{libs_input} cat(installed.packages(lib.loc={libs_joined})[,'Package'], sep='\\n') """ def get_installed_packages(self, context, lib_paths): libs_input = self.gen_libs_input(lib_paths) libs_charvec = self.create_valid_r_charvec_input(lib_paths, drop_empty=True) if libs_charvec is None: libs_charvec="NULL" installed_packages_input = self.R_LIST_PACKAGES_BOILERPLATE.format( libs_input=libs_input, libs_joined=libs_charvec, ) pkgs = self.invoke_rscript(context, installed_packages_input).stdout.split('\n') return self.filter_packages_lines_stdout(pkgs) # R_INSTALL_SOURCE_PACKAGE_BOILERPLATE = """???""" # def gen_source_install_input(self, source_dir, outdir): # return self.R_INSTALL_SOURCE_PACKAGE_BOILERPLATE.format( # expr="devtools::install_local('{}', lib='{}')".format( # source_dir, outdir), # outdir=outdir, # ) # def install_source_package(self, context, source_dir, pkg_cache_dir): # source_input = self.gen_source_install_input(source_dir, pkg_cache_dir) # self.invoke_rscript(context, source_input).stdout.split('\n') def install_cran_package(self, cran, context, cran_dep, outdir): cran_input = cran.gen_cran_install_input(cran_dep, outdir) self.invoke_rscript(context, cran_input) def install_github_package(self, github, context, github_dep, outdir): github_input = github.gen_github_install_input( self.tools_cache_dir, github_dep, outdir) logger.debug("github_input: '{}'".format(github_input)) self.invoke_rscript(context, github_input).stdout.split('\n')
33.352
93
0.691773
88864f3fa8092982651eaeda9dbe085e135b834a
5,121
py
Python
src/test.py
yliuhz/PMAW
23f4f3ec2ccb381be3d4b2edea0878e4015e1ae4
[ "Apache-2.0" ]
8
2021-12-02T02:25:55.000Z
2022-03-18T23:41:42.000Z
src/test.py
yliuhz/PMAW
23f4f3ec2ccb381be3d4b2edea0878e4015e1ae4
[ "Apache-2.0" ]
null
null
null
src/test.py
yliuhz/PMAW
23f4f3ec2ccb381be3d4b2edea0878e4015e1ae4
[ "Apache-2.0" ]
null
null
null
import torch from torch import nn import numpy as np class convmodel(torch.nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(3, 16, 3, 1, padding=1, bias=False) self.conv2 = nn.Conv2d(16, 32, 3, 1, padding=1, bias=False) self.linear = nn.Linear(32*10*10, 1, bias=False) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = self.linear(x.view(x.size(0), -1)) return x import torch from torch import nn def batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum): # Use `is_grad_enabled` to determine whether the current mode is training # mode or prediction mode if not torch.is_grad_enabled(): # If it is prediction mode, directly use the mean and variance # obtained by moving average X_hat = (X - moving_mean) / torch.sqrt(moving_var + eps) else: assert len(X.shape) in (2, 4) if len(X.shape) == 2: # When using a fully-connected layer, calculate the mean and # variance on the feature dimension mean = X.mean(dim=0) var = ((X - mean) ** 2).mean(dim=0) else: # When using a two-dimensional convolutional layer, calculate the # mean and variance on the channel dimension (axis=1). Here we # need to maintain the shape of `X`, so that the broadcasting # operation can be carried out later mean = X.mean(dim=(0, 2, 3), keepdim=True) var = ((X - mean) ** 2).mean(dim=(0, 2, 3), keepdim=True) # In training mode, the current mean and variance are used for the # standardization X_hat = (X - mean) / torch.sqrt(var + eps) # Update the mean and variance using moving average moving_mean = momentum * moving_mean + (1.0 - momentum) * mean moving_var = momentum * moving_var + (1.0 - momentum) * var Y = gamma * X_hat + beta # Scale and shift return Y, moving_mean.data, moving_var.data class BatchNorm(nn.Module): # `num_features`: the number of outputs for a fully-connected layer # or the number of output channels for a convolutional layer. `num_dims`: # 2 for a fully-connected layer and 4 for a convolutional layer def __init__(self, num_features, num_dims): super().__init__() if num_dims == 2: shape = (1, num_features) else: shape = (1, num_features, 1, 1) # The scale parameter and the shift parameter (model parameters) are # initialized to 1 and 0, respectively self.gamma = nn.Parameter(torch.ones(shape)) self.beta = nn.Parameter(torch.zeros(shape)) # The variables that are not model parameters are initialized to 0 and 1 self.moving_mean = torch.zeros(shape) self.moving_var = torch.ones(shape) def forward(self, X): # If `X` is not on the main memory, copy `moving_mean` and # `moving_var` to the device where `X` is located if self.moving_mean.device != X.device: self.moving_mean = self.moving_mean.to(X.device) self.moving_var = self.moving_var.to(X.device) # Save the updated `moving_mean` and `moving_var` Y, self.moving_mean, self.moving_var = batch_norm( X, self.gamma, self.beta, self.moving_mean, self.moving_var, eps=1e-5, momentum=0.9) return Y if __name__=='__main__': model = convmodel() for m in model.parameters(): m.data.fill_(0.1) # criterion = nn.CrossEntropyLoss() criterion = nn.MSELoss() optimizer = torch.optim.SGD(model.parameters(), lr=1.0) model.train() # 模拟输入8个 sample,每个的大小是 10x10, # 值都初始化为1,让每次输出结果都固定,方便观察 images = torch.ones(8, 3, 10, 10) targets = torch.ones(8, dtype=torch.float) output = model(images) print(output.shape) # torch.Size([8, 20]) loss = criterion(output.view(-1,), targets) print(model.conv1.weight.grad) # None loss.backward() print(model.conv1.weight.grad[0][0][0]) # tensor([-0.0782, -0.0842, -0.0782]) # 通过一次反向传播,计算出网络参数的导数, # 因为篇幅原因,我们只观察一小部分结果 print(model.conv1.weight[0][0][0]) # tensor([0.1000, 0.1000, 0.1000], grad_fn=<SelectBackward>) # 我们知道网络参数的值一开始都初始化为 0.1 的 optimizer.step() print(model.conv1.weight[0][0][0]) # tensor([0.1782, 0.1842, 0.1782], grad_fn=<SelectBackward>) # 回想刚才我们设置 learning rate 为 1,这样, # 更新后的结果,正好是 (原始权重 - 求导结果) ! optimizer.zero_grad() print(model.conv1.weight.grad[0][0][0]) # tensor([0., 0., 0.]) # 每次更新完权重之后,我们记得要把导数清零啊, # 不然下次会得到一个和上次计算一起累加的结果。 # 当然,zero_grad() 的位置,可以放到前边去, # 只要保证在计算导数前,参数的导数是清零的就好。 print('>>>test for bn<<<') bn = nn.BatchNorm2d(2) aa = torch.randn(2,2,1,1) bb = bn(aa) print('aa=', aa) print('bb=', bb) cc = BatchNorm(2, 4)(aa) print('cc=', cc) shape = (1, 2, 1, 1) mean = aa.mean(dim=(0,2,3), keepdim=True) dd = (aa - mean) / torch.sqrt(((aa-mean)**2).mean(dim=(0,2,3), keepdim=True)) print('dd=', dd)
35.075342
81
0.610037
8887cdf2cc8ae9604a5a9ce44664b255c6cabd67
64
py
Python
hanlp/datasets/ner/__init__.py
v-smwang/HanLP
98db7a649110fca4307acbd6a26f2b5bb1159efc
[ "Apache-2.0" ]
27,208
2015-03-27T10:25:45.000Z
2022-03-31T13:26:32.000Z
hanlp/datasets/ner/__init__.py
hushaoyun/HanLP
967b52404c9d0adbc0cff2699690c127ecfca36e
[ "Apache-2.0" ]
1,674
2015-03-30T06:36:44.000Z
2022-03-16T01:52:56.000Z
hanlp/datasets/ner/__init__.py
hushaoyun/HanLP
967b52404c9d0adbc0cff2699690c127ecfca36e
[ "Apache-2.0" ]
7,710
2015-03-27T08:07:57.000Z
2022-03-31T14:57:23.000Z
# -*- coding:utf-8 -*- # Author: hankcs # Date: 2019-12-06 15:32
21.333333
24
0.59375
888a09b848fdd84015221d8d652297a6bccb8e05
563
py
Python
portfolio/2011_krakDK/krak/items.py
0--key/lib
ba7a85dda2b208adc290508ca617bdc55a5ded22
[ "Apache-2.0" ]
null
null
null
portfolio/2011_krakDK/krak/items.py
0--key/lib
ba7a85dda2b208adc290508ca617bdc55a5ded22
[ "Apache-2.0" ]
null
null
null
portfolio/2011_krakDK/krak/items.py
0--key/lib
ba7a85dda2b208adc290508ca617bdc55a5ded22
[ "Apache-2.0" ]
5
2016-03-22T07:40:46.000Z
2021-05-30T16:12:21.000Z
# Define here the models for your scraped items # # See documentation in: # http://doc.scrapy.org/topics/items.html from scrapy.item import Item, Field class KrakItem(Item): # define the fields for your item here like: # name = Field() # pass# company_name = Field() company_site_url = Field() short_description = Field() address = Field() phone = Field() phone_type = Field() gen_description = Field() description_headers = Field() description_paragraphs = Field() tags = Field() category = Field()
23.458333
48
0.657194
888a79727132fd019b0db67bf3741b80a00a7a59
29,630
py
Python
src/mau/parsers/main_parser.py
Project-Mau/mau
193d16633c1573227debf4517ebcaf07add24979
[ "MIT" ]
28
2021-02-22T18:46:52.000Z
2022-02-21T15:14:05.000Z
src/mau/parsers/main_parser.py
Project-Mau/mau
193d16633c1573227debf4517ebcaf07add24979
[ "MIT" ]
5
2021-02-23T09:56:13.000Z
2022-03-13T09:47:42.000Z
src/mau/parsers/main_parser.py
Project-Mau/mau
193d16633c1573227debf4517ebcaf07add24979
[ "MIT" ]
2
2021-02-23T09:11:45.000Z
2021-03-13T11:08:21.000Z
import re import copy from mau.lexers.base_lexer import TokenTypes, Token from mau.lexers.main_lexer import MainLexer from mau.parsers.base_parser import ( BaseParser, TokenError, ConfigurationError, parser, ) from mau.parsers.text_parser import TextParser from mau.parsers.arguments_parser import ArgumentsParser from mau.parsers.preprocess_variables_parser import PreprocessVariablesParser from mau.parsers.nodes import ( HorizontalRuleNode, TextNode, BlockNode, ContentNode, ContentImageNode, CommandNode, HeaderNode, ListNode, ListItemNode, ParagraphNode, TocNode, TocEntryNode, FootnotesNode, ) class EngineError(ValueError): """ Used to signal that the engine selected for a code block is not known """ def header_anchor(text, level): """ Return a sanitised anchor for a header. """ # Everything lowercase sanitised_text = text.lower() # Get only letters, numbers, dashes, spaces, and dots sanitised_text = "".join(re.findall("[a-z0-9-\\. ]+", sanitised_text)) # Remove multiple spaces sanitised_text = "-".join(sanitised_text.split()) return sanitised_text # The MainParser is in charge of parsing # the whole input, calling other parsers # to manage single paragraphs or other # things like variables. class MainParser(BaseParser): def __init__(self, variables=None): super().__init__() self.lexer = MainLexer() # This is used as a storage for attributes. # Block attributes are defined before the block # so when we parse them we store them here and # then use them when dealing with the block itself. self.argsparser = ArgumentsParser() # Copy the variables and make sure the "mau" namespace exists self.variables = copy.deepcopy(variables) if variables else {} if "mau" not in self.variables: self.variables["mau"] = {} self.headers = [] self.footnote_defs = [] self.blocks = {} self.toc = None # When we define a block we establish an alias # {alias:actual_block_name} self.block_aliases = {} # Each block we define can have default values # {actual_block_name:kwargs} self.block_defaults = {} # Each block we define can have names for unnamed arguments # {actual_block_name:kwargs} self.block_names = {} # Backward compatibility with Mau 1.x # Mau 1.x used [source] to format source, while Mau 2.x # uses [myblock, engine=source], so this establishes # a default block definition so that # [source] = [source, engine=source] # In Mau 2.x this block uses the template "block-source" # so any template called "source" (e.g. "source.html") # must be renamed. # This definition can be overridden by custom block definitions self.block_aliases["source"] = "source" self.block_defaults["source"] = {"engine": "source", "language": "text"} self.block_names["source"] = ["language"] self.block_aliases["admonition"] = "admonition" self.block_names["admonition"] = ["class", "icon", "label"] self.block_aliases["quote"] = "quote" self.block_defaults["quote"] = {"attribution": None} self.block_names["quote"] = ["attribution"] # Iterate through block definitions passed as variables for alias, block_definition in ( self.variables["mau"].get("block_definitions", {}).items() ): try: blocktype = block_definition["blocktype"] self.block_aliases[alias] = blocktype except KeyError: raise ConfigurationError( f"Block definition '{alias}' is missing key 'blocktype'" ) try: self.block_defaults[blocktype] = block_definition["kwargs"] except KeyError: raise ConfigurationError( f"Block definition '{alias}' is missing key 'kwargs'" ) # This is a buffer for a block title self._title = None # This is the function used to create the header # anchors. It can be specified through # mau.header_anchor_function to override # the default one. self.header_anchor = self.variables["mau"].get( "header_anchor_function", header_anchor ) self.v1_backward_compatibility = self.variables["mau"].get( "v1_backward_compatibility", False ) def _pop_title(self): # This return the title and resets the # cached one, so no other block will # use it. title = self._title self._title = None return title def _push_title(self, title): # When we parse a title we can store it here # so that it is available to the next block # that will use it. self._title = title def _collect_lines(self, stop_tokens): # This collects several lines of text in a list # until it gets to a line that begins with one # of the tokens listed in stop_tokens. # It is useful for block or other elements that # are clearly surrounded by delimiters. lines = [] while self.peek_token() not in stop_tokens: lines.append(self.collect_join([Token(TokenTypes.EOL)])) self.get_token(TokenTypes.EOL) return lines def _collect_text_content(self): # Collects all adjacent text tokens # into a single string if not self.peek_token_is(TokenTypes.TEXT): return None values = [] # Get all tokens while self.peek_token_is(TokenTypes.TEXT): values.append(self.get_token().value) self.get_token(TokenTypes.EOL) return " ".join(values) def _parse_text_content(self, text): # Parse a text using the TextParser. # Replace variables p = PreprocessVariablesParser(self.variables).analyse( text, ) text = p.nodes[0].value # Parse the text p = TextParser( footnotes_start_with=len(self.footnote_defs) + 1, v1_backward_compatibility=self.v1_backward_compatibility, ).analyse(text) # Text should return a single sentence node result = p.nodes[0] # Store the footnotes self.footnote_defs.extend(p.footnote_defs) return result @parser def _parse_eol(self): # This simply parses the end of line. self.get_token(TokenTypes.EOL) @parser def _parse_horizontal_rule(self): # The horizontal rule --- self.get_token(TokenTypes.LITERAL, "---") self.get_token(TokenTypes.EOL) self._save(HorizontalRuleNode()) @parser def _parse_single_line_comment(self): # // A comment on a single line self.get_token(TokenTypes.TEXT, check=lambda x: x.startswith("//")) self.get_token(TokenTypes.EOL) @parser def _parse_multi_line_comment(self): # //// # A comment # on multiple lines # //// self.get_token(TokenTypes.LITERAL, "////") self._collect_lines([Token(TokenTypes.LITERAL, "////"), Token(TokenTypes.EOF)]) self.force_token(TokenTypes.LITERAL, "////") @parser def _parse_variable_definition(self): # This parses a variable definition # # Simple variables are defined as :name:value # as True booleans as just :name: # and as False booleas as :!name: # # Variable names can use a namespace with # :namespace.name:value # Get the mandatory variable name self.get_token(TokenTypes.LITERAL, ":") variable_name = self.get_token(TokenTypes.TEXT).value self.get_token(TokenTypes.LITERAL, ":") # Assume the variable is a flag variable_value = True # If the name starts with ! it's a false flag if variable_name.startswith("!"): variable_value = False variable_name = variable_name[1:] # Get the optional value value = self.collect_join([Token(TokenTypes.EOL)]) # The value is assigned only if the variable # is not a negative flag. In that case it is ignored if variable_value and len(value) > 0: variable_value = value # If the variable name contains a dot we # want to use a namespace if "." not in variable_name: self.variables[variable_name] = variable_value else: # Let's ignore all others dots namespace, variable_name = variable_name.split(".", maxsplit=1) # This defines the namespace if it's not already there try: self.variables[namespace][variable_name] = variable_value except KeyError: self.variables[namespace] = {variable_name: variable_value} @parser def _parse_command(self): # Parse a command in the form ::command: self.get_token(TokenTypes.LITERAL, "::") name = self.get_token(TokenTypes.TEXT).value self.get_token(TokenTypes.LITERAL, ":") args = [] kwargs = {} # Commands can have arguments with self: arguments = self.get_token(TokenTypes.TEXT).value self.argsparser.analyse(arguments) # Consume the attributes args, kwargs = self.argsparser.get_arguments_and_reset() if name == "defblock": # Block definitions must have at least 2 arguments, # the alias and the block type. if len(args) < 2: self.error( "Block definitions require at least two unnamed arguments: ALIAS and BLOCKTYPE" ) block_alias = args.pop(0) block_type = args.pop(0) self.block_aliases[block_alias] = block_type self.block_defaults[block_type] = kwargs self.block_names[block_type] = args return None self._save(CommandNode(name=name, args=args, kwargs=kwargs)) @parser def _parse_title(self): # Parse a title in the form # # . This is a title # or # .This is a title # Parse the mandatory dot self.get_token(TokenTypes.LITERAL, ".") # Parse the optional white spaces with self: self.get_token(TokenTypes.WHITESPACE) # Get the text of the title text = self.get_token(TokenTypes.TEXT).value self.get_token(TokenTypes.EOL) # Titles can contain Mau code p = TextParser( footnotes_start_with=len(self.footnote_defs) + 1, v1_backward_compatibility=self.v1_backward_compatibility, ).analyse(text) title = p.nodes[0] self._push_title(title) @parser def _parse_attributes(self): # Parse block attributes in the form # [unnamed1, unnamed2, ..., named1=value1, name2=value2, ...] self.get_token(TokenTypes.LITERAL, "[") attributes = self.get_token(TokenTypes.TEXT).value self.get_token(TokenTypes.LITERAL, "]") # Attributes can use variables p = PreprocessVariablesParser(self.variables).analyse( attributes, ) attributes = p.nodes[0].value # Parse the arguments self.argsparser.analyse(attributes) @parser def _parse_header(self): # Parse a header in the form # # = Header # # The number of equal signs is arbitrary # and represents the level of the header. # Headers are automatically assigned an anchor # created using the provided function self.header_anchor # # Headers in the form # =! Header # are rendered but not included in the TOC # Get all the equal signs header = self.get_token( TokenTypes.LITERAL, check=lambda x: x.startswith("=") ).value # Get the mandatory white spaces self.get_token(TokenTypes.WHITESPACE) # Check if the header has to be in the TOC in_toc = True if header.endswith("!"): header = header[:-1] in_toc = False # Get the text of the header and calculate the level text = self.get_token(TokenTypes.TEXT).value level = len(header) # Generate the anchor and append it to the TOC anchor = self.header_anchor(text, level) # Consume the attributes args, kwargs = self.argsparser.get_arguments_and_reset() # Generate the header node header_node = HeaderNode(value=text, level=level, anchor=anchor, kwargs=kwargs) if in_toc: self.headers.append(header_node) self._save(header_node) @parser def _parse_block(self): # Parse a block in the form # # [block_type] # ---- # Content # ---- # Optional secondary content # # Blocks are delimited by 4 consecutive identical characters. # Get the delimiter and check the length delimiter = self.get_token(TokenTypes.TEXT).value if len(delimiter) != 4 or len(set(delimiter)) != 1: raise TokenError self.get_token(TokenTypes.EOL) # Collect everything until the next delimiter content = self._collect_lines( [Token(TokenTypes.TEXT, delimiter), Token(TokenTypes.EOF)] ) self.force_token(TokenTypes.TEXT, delimiter) self.get_token(TokenTypes.EOL) # Get the optional secondary content secondary_content = self._collect_lines( [Token(TokenTypes.EOL), Token(TokenTypes.EOF)] ) # Consume the title title = self._pop_title() # The first unnamed argument is the block type blocktype = self.argsparser.pop() # If there is a block alias for blocktype replace it # otherwise use the blocktype we already have blocktype = self.block_aliases.get(blocktype, blocktype) # Assign names self.argsparser.set_names_and_defaults( self.block_names.get(blocktype, []), self.block_defaults.get(blocktype, {}) ) # Consume the attributes args, kwargs = self.argsparser.get_arguments_and_reset() # Extract classes and convert them into a list classes = [i for i in kwargs.pop("classes", "").split(",") if len(i) > 0] # Extract condition if present and process it condition = kwargs.pop("condition", "") # Run this only if there is a condition on this block if len(condition) > 0: try: # The condition should be either test:variable:value or test:variable: test, variable, value = condition.split(":") except ValueError: self.error( f'Condition {condition} is not in the form "test:variable:value" or "test:variable:' ) # If there is no value use True if len(value) == 0: value = True # Check if the variable matches the value and apply the requested test match = self.variables.get(variable) == value result = True if test == "if" else False # If the condition is not satisfied return if match is not result: return # Extract the preprocessor preprocessor = kwargs.pop("preprocessor", "none") # Extract the engine engine = kwargs.pop("engine", "default") # Create the node parameters according to the engine if engine in ["raw", "mau"]: # Engine "raw" doesn't process the content, # so we just pass it untouched in the form of # a TextNode per line. The same is true for "mau" # as the visitor will have to fire up an new parser # to process the content. content = [TextNode(line) for line in content] secondary_content = [TextNode(line) for line in secondary_content] elif engine == "source": # Engine "source" extracts the content (source code), # the callouts, and the highlights. # The default language is "text". content, callouts, highlights = self._parse_source_engine( content, secondary_content, kwargs ) secondary_content = [] kwargs["callouts"] = callouts kwargs["highlights"] = highlights kwargs["language"] = kwargs.get("language", "text") elif engine == "default": # This is the default engine and it parses # both content and secondary content using a new parser # but then merges headers and footnotes into the # current one. # Parse the primary and secondary content and record footnotes pc = MainParser(variables=self.variables).analyse("\n".join(content)) ps = MainParser(variables=self.variables).analyse( "\n".join(secondary_content) ) content = pc.nodes secondary_content = ps.nodes self.footnote_defs.extend(pc.footnote_defs) self.headers.extend(pc.headers) else: raise EngineError(f"Engine {engine} is not available") self._save( BlockNode( blocktype=blocktype, content=content, secondary_content=secondary_content, args=args, classes=classes, engine=engine, preprocessor=preprocessor, kwargs=kwargs, title=title, ) ) def _parse_source_engine(self, content, secondary_content, kwargs): # Parse a source block in the form # # [source, language, attributes...] # ---- # content # ---- # # Source blocks support the following attributes # # callouts=":" The separator used by callouts # highlight="@" The special character to turn on highlight # # [source, language, attributes...] # ---- # content:1: # ---- # # [source, language, attributes...] # ---- # content:@: # ---- # # Callout descriptions can be added to the block # as secondary content with the syntax # # [source, language, attributes...] # ---- # content:name: # ---- # <name>: <description> # # Since Mau uses Pygments, the attribute language # is one of the langauges supported by that tool. # Get the delimiter for callouts (":" by default) delimiter = kwargs.pop("callouts", ":") # A dictionary that contains callout markers in # the form {linenum:name} callout_markers = {} # Get the marker for highlighted lines ("@" by default) highlight_marker = kwargs.pop("highlight", "@") # A list of highlighted lines highlighted_lines = [] # This is a list of all lines that might contain # a callout. They will be further processed # later to be sure. lines_with_callouts = [ (linenum, line) for linenum, line in enumerate(content) if line.endswith(delimiter) ] # Each line in the previous list is processed # and stored if it contains a callout for linenum, line in lines_with_callouts: # Remove the final delimiter line = line[:-1] splits = line.split(delimiter) if len(splits) < 2: # It's a trap! There are no separators left continue # Get the callout and the line callout_name = splits[-1] line = delimiter.join(splits[:-1]) content[linenum] = line # Check if we want to just highlight the line if callout_name == highlight_marker: highlighted_lines.append(linenum) else: callout_markers[linenum] = callout_name # A dictionary that contains the text for each # marker in the form {name:text} callout_contents = {} # If there was secondary content it should be formatted # with callout names followed by colon and the # callout text. for line in secondary_content: if ":" not in line: self.error( f"Callout description should be written as 'name: text'. Missing ':' in '{line}'" ) name, text = line.split(":") if name not in callout_markers.values(): self.error(f"Callout {name} has not been created in the source code") text = text.strip() callout_contents[name] = text # Put markers and contents together callouts = {"markers": callout_markers, "contents": callout_contents} # Source blocks must preserve the content literally textlines = [TextNode(line) for line in content] return textlines, callouts, highlighted_lines # self._save( # SourceNode( # language, # callouts=callouts, # highlights=highlighted_lines, # delimiter=delimiter, # code=textlines, # title=title, # kwargs=kwargs, # ) # ) @parser def _parse_content(self): # Parse attached content in the form # # [attributes] # << content_type:uri # Get the mandatory "<<" and white spaces self.get_token(TokenTypes.LITERAL, check=lambda x: x.startswith("<<")) self.get_token(TokenTypes.WHITESPACE) # Get the content type and the content URI content_type_and_uri = self.get_token(TokenTypes.TEXT).value content_type, uri = content_type_and_uri.split(":", maxsplit=1) title = self._pop_title() if content_type == "image": return self._parse_content_image(uri, title) return self._parse_standard_content(content_type, uri, title) def _parse_content_image(self, uri, title): # Parse a content image in the form # # [alt_text, classes] # << image:uri # # alt_text is the alternate text to use is the image is not reachable # and classes is a comma-separated list of classes # Assign names and consume the attributes self.argsparser.set_names_and_defaults( ["alt_text", "classes"], {"alt_text": None, "classes": None} ) args, kwargs = self.argsparser.get_arguments_and_reset() alt_text = kwargs.pop("alt_text") classes = kwargs.pop("classes") if classes: classes = classes.split(",") self._save( ContentImageNode( uri=uri, alt_text=alt_text, classes=classes, title=title, kwargs=kwargs, ) ) def _parse_standard_content(self, content_type, uri, title): # This is the fallback for an unknown content type # Consume the attributes args, kwargs = self.argsparser.get_arguments_and_reset() self._save( ContentNode( uri=uri, title=title, args=args, kwargs=kwargs, ) ) @parser def _parse_list(self): # Parse a list. # Lists can be ordered (using numbers) # # * One item # * Another item # # or unordered (using bullets) # # # Item 1 # # Item 2 # # The number of headers increases # the depth of each item # # # Item 1 # ## Sub-Item 1.1 # # Spaces before and after the header are ignored. # So the previous list can be also written # # # Item 1 # ## Sub-Item 1.1 # # Ordered and unordered lists can be mixed. # # * One item # ## Sub Item 1 # ## Sub Item 2 # # Ignore initial white spaces with self: self.get_token(TokenTypes.WHITESPACE) # Get the header and decide if it's a numbered or unnumbered list header = self.peek_token(TokenTypes.LITERAL, check=lambda x: x[0] in "*#") numbered = True if header.value[0] == "#" else False # Parse all the following items nodes = self._parse_list_nodes() self._save(ListNode(numbered, nodes, main_node=True)) def _parse_list_nodes(self): # This parses all items of a list # Ignore initial white spaces with self: self.get_token(TokenTypes.WHITESPACE) # Parse the header and ignore the following white spaces header = self.get_token(TokenTypes.LITERAL, check=lambda x: x[0] in "*#").value self.get_token(TokenTypes.WHITESPACE) # Collect and parse the text of the item text = self._collect_text_content() content = self._parse_text_content(text) # Compute the level of the item level = len(header) nodes = [] nodes.append(ListItemNode(level, content)) while not self.peek_token() in [Token(TokenTypes.EOF), Token(TokenTypes.EOL)]: # This is the SentenceNode inside the last node added to the list # which is used to append potential nested nodes last_node_sentence = nodes[-1].content # Ignore the initial white spaces with self: self.get_token(TokenTypes.WHITESPACE) if len(self.peek_token().value) == level: # The new item is on the same level # Get the header header = self.get_token().value # Ignore white spaces self.get_token(TokenTypes.WHITESPACE) # Collect and parse the text of the item text = self._collect_text_content() content = self._parse_text_content(text) nodes.append(ListItemNode(len(header), content)) elif len(self.peek_token().value) > level: # The new item is on a deeper level # Treat the new line as a new list numbered = True if self.peek_token().value[0] == "#" else False subnodes = self._parse_list_nodes() last_node_sentence.content.append(ListNode(numbered, subnodes)) else: break return nodes @parser def _parse_paragraph(self): # This parses a paragraph. # Paragraphs can be written on multiple lines and # end with an empty line. # Get all the lines, join them and parse them lines = self._collect_lines([Token(TokenTypes.EOL), Token(TokenTypes.EOF)]) text = " ".join(lines) sentence = self._parse_text_content(text) # Consume the attributes args, kwargs = self.argsparser.get_arguments_and_reset() self._save(ParagraphNode(sentence, args=args, kwargs=kwargs)) def _parse_functions(self): # All the functions that this parser provides. return [ self._parse_eol, self._parse_horizontal_rule, self._parse_single_line_comment, self._parse_multi_line_comment, self._parse_variable_definition, self._parse_command, self._parse_title, self._parse_attributes, self._parse_header, self._parse_block, self._parse_content, self._parse_list, self._parse_paragraph, ] def _create_toc(self): # Create the TOC from the list of headers. nodes = [] latest_by_level = {} for header_node in self.headers: # This is the current node node = TocEntryNode(header_node) level = header_node.level # This collects the latest node added with a given level latest_by_level[level] = node try: # Simplest case, add it to the latest one # with a level just 1 step lower latest_by_level[level - 1].children.append(node) except KeyError: # Find all the latest ones added with a level lower than this latest = [latest_by_level.get(i, None) for i in range(1, level)] # Get the children list of each one, plus nodes for the root children = [nodes] + [i.children for i in latest if i is not None] # Get the nearest one and append to that children[-1].append(node) return TocNode(entries=nodes) def parse(self): super().parse() self.toc = self._create_toc() self.footnotes = FootnotesNode(entries=self.footnote_defs)
32.136659
104
0.583463
888b41cc12274148e790e361bed90e406da76010
3,344
py
Python
stereomag/nets.py
MandyMY/stereo-magnification
c18fa484484597dfa653f317459a503d9bf8d933
[ "Apache-2.0" ]
null
null
null
stereomag/nets.py
MandyMY/stereo-magnification
c18fa484484597dfa653f317459a503d9bf8d933
[ "Apache-2.0" ]
null
null
null
stereomag/nets.py
MandyMY/stereo-magnification
c18fa484484597dfa653f317459a503d9bf8d933
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Network definitions for multiplane image (MPI) prediction networks. """ from __future__ import division import numpy as np #import tensorflow as tf import tensorflow.compat.v1 as tf #from tensorflow.contrib import slim import tf_slim as slim def mpi_net(inputs, num_outputs, ngf=64, vscope='net', reuse_weights=False): """Network definition for multiplane image (MPI) inference. Args: inputs: stack of input images [batch, height, width, input_channels] num_outputs: number of output channels ngf: number of features for the first conv layer vscope: variable scope reuse_weights: whether to reuse weights (for weight sharing) Returns: pred: network output at the same spatial resolution as the inputs. """ with tf.variable_scope(vscope, reuse=reuse_weights): with slim.arg_scope( [slim.conv2d, slim.conv2d_transpose], normalizer_fn=slim.layer_norm): cnv1_1 = slim.conv2d(inputs, ngf, [3, 3], scope='conv1_1', stride=1) cnv1_2 = slim.conv2d(cnv1_1, ngf * 2, [3, 3], scope='conv1_2', stride=2) cnv2_1 = slim.conv2d(cnv1_2, ngf * 2, [3, 3], scope='conv2_1', stride=1) cnv2_2 = slim.conv2d(cnv2_1, ngf * 4, [3, 3], scope='conv2_2', stride=2) cnv3_1 = slim.conv2d(cnv2_2, ngf * 4, [3, 3], scope='conv3_1', stride=1) cnv3_2 = slim.conv2d(cnv3_1, ngf * 4, [3, 3], scope='conv3_2', stride=1) cnv3_3 = slim.conv2d(cnv3_2, ngf * 8, [3, 3], scope='conv3_3', stride=2) cnv4_1 = slim.conv2d( cnv3_3, ngf * 8, [3, 3], scope='conv4_1', stride=1, rate=2) cnv4_2 = slim.conv2d( cnv4_1, ngf * 8, [3, 3], scope='conv4_2', stride=1, rate=2) cnv4_3 = slim.conv2d( cnv4_2, ngf * 8, [3, 3], scope='conv4_3', stride=1, rate=2) # Adding skips skip = tf.concat([cnv4_3, cnv3_3], axis=3) cnv6_1 = slim.conv2d_transpose( skip, ngf * 4, [4, 4], scope='conv6_1', stride=2) cnv6_2 = slim.conv2d(cnv6_1, ngf * 4, [3, 3], scope='conv6_2', stride=1) cnv6_3 = slim.conv2d(cnv6_2, ngf * 4, [3, 3], scope='conv6_3', stride=1) skip = tf.concat([cnv6_3, cnv2_2], axis=3) cnv7_1 = slim.conv2d_transpose( skip, ngf * 2, [4, 4], scope='conv7_1', stride=2) cnv7_2 = slim.conv2d(cnv7_1, ngf * 2, [3, 3], scope='conv7_2', stride=1) skip = tf.concat([cnv7_2, cnv1_2], axis=3) cnv8_1 = slim.conv2d_transpose( skip, ngf, [4, 4], scope='conv8_1', stride=2) cnv8_2 = slim.conv2d(cnv8_1, ngf, [3, 3], scope='conv8_2', stride=1) feat = cnv8_2 pred = slim.conv2d( feat, num_outputs, [1, 1], stride=1, activation_fn=tf.nn.tanh, normalizer_fn=None, scope='color_pred') return pred
39.809524
78
0.650718
888c285859f9179b927cbdc06da726b52d44b5cf
3,731
py
Python
tests/test_init.py
ashb/freedesktop-icons
10737b499bff9a22c853aa20822215c8e059a737
[ "MIT" ]
1
2021-06-02T11:11:50.000Z
2021-06-02T11:11:50.000Z
tests/test_init.py
ashb/freedesktop-icons
10737b499bff9a22c853aa20822215c8e059a737
[ "MIT" ]
null
null
null
tests/test_init.py
ashb/freedesktop-icons
10737b499bff9a22c853aa20822215c8e059a737
[ "MIT" ]
null
null
null
from pathlib import Path from unittest import mock import pytest from freedesktop_icons import Icon, Theme, lookup, lookup_fallback, theme_search_dirs @pytest.mark.parametrize( ("env", "expected"), ( ("", [Path.home() / '.icons']), ("/foo:", [Path.home() / '.icons', Path('/foo/icons')]), ), ) def test_theme_search_dirs(env, expected, monkeypatch): monkeypatch.setenv('XDG_DATA_DIRS', env) assert list(theme_search_dirs()) == expected def _stub_get_theme(get_theme, **kwargs): get_theme.side_effect = kwargs.get @mock.patch("freedesktop_icons.get_theme", autospec=True) def test_lookup(get_theme): real_theme = mock.create_autospec(Theme, name="real_theme") real_theme.parents = ['parent', 'hicolor'] _stub_get_theme(get_theme, Adwaita=real_theme) lookup("org.mozilla.firefox", "Adwaita") assert get_theme.mock_calls == [mock.call('Adwaita')] @mock.patch("freedesktop_icons.get_theme", autospec=True) def test_lookup_icon(get_theme): real_theme = mock.create_autospec(Theme, name="real_theme") real_theme.parents = [] _stub_get_theme(get_theme, Adwaita=real_theme) icon = Icon("org.mozilla.firefox") lookup(icon, "Adwaita") assert get_theme.mock_calls == [mock.call('Adwaita')] @mock.patch("freedesktop_icons.get_theme", autospec=True) def test_lookup_in_parent(get_theme): real_theme = mock.create_autospec(Theme, name="real_theme") real_theme.parents = ['parent'] real_theme.lookup.return_value = None parent_theme = mock.create_autospec(Theme, name="parent_theme") _stub_get_theme(get_theme, Adwaita=real_theme, parent=parent_theme) lookup("org.mozilla.firefox", "Adwaita") assert get_theme.mock_calls == [mock.call('Adwaita'), mock.call('parent')] @mock.patch("freedesktop_icons.get_theme", autospec=True) def test_lookup_in_hicolor(get_theme): real_theme = mock.create_autospec(Theme, name="real_theme") real_theme.parents = ['parent'] real_theme.lookup.return_value = None parent_theme = mock.create_autospec(Theme, name="parent_theme") parent_theme.lookup.return_value = None hicolor = mock.create_autospec(Theme, name="hicolor") hicolor.lookup.return_value = mock.MagicMock() _stub_get_theme(get_theme, Adwaita=real_theme, parent=parent_theme, hicolor=hicolor) path = lookup("org.mozilla.firefox", "Adwaita") assert get_theme.mock_calls == [mock.call('Adwaita'), mock.call('parent'), mock.call('hicolor')] assert path is hicolor.lookup.return_value @mock.patch("freedesktop_icons.get_theme", autospec=True) @mock.patch("freedesktop_icons.lookup_fallback", autospec=True) def test_lookup_in_fallback(lookup_fallback, get_theme): real_theme = mock.create_autospec(Theme, name="real_theme") real_theme.lookup.return_value = None hicolor = mock.create_autospec(Theme, name="hicolor") hicolor.lookup.return_value = None _stub_get_theme(get_theme, Adwaita=real_theme, hicolor=hicolor) lookup_fallback.return_value = mock.MagicMock() path = lookup("org.mozilla.firefox", "Adwaita") assert get_theme.mock_calls == [mock.call('Adwaita'), mock.call('hicolor')] assert lookup_fallback.mock_calls == [mock.call('org.mozilla.firefox', ['svg', 'png', 'xpm'])] assert path is lookup_fallback.return_value @mock.patch("freedesktop_icons.fallback_paths") def test_lookup_fallback(fallback_paths, tmpdir): file = tmpdir / 'org.mozilla.firefox.svg' file.open('w').close() fallback_paths.return_value = [tmpdir] assert lookup_fallback("not-there", ['svg']) is None assert lookup_fallback("org.mozilla.firefox", ['png']) is None assert lookup_fallback("org.mozilla.firefox", ['svg']) == file
35.198113
100
0.729027
888d0174a06f5d771e461f6d3b086646f76a87f5
569
py
Python
src/sweetrpg_library_api/application/__init__.py
paulyhedral/sweetrpg-library-api
0105e963ef4321398aa66d7cb3aa9c2df1c4f375
[ "MIT" ]
null
null
null
src/sweetrpg_library_api/application/__init__.py
paulyhedral/sweetrpg-library-api
0105e963ef4321398aa66d7cb3aa9c2df1c4f375
[ "MIT" ]
33
2021-09-18T23:52:05.000Z
2022-03-30T12:25:49.000Z
src/sweetrpg_library_api/application/__init__.py
sweetrpg/library-api
0105e963ef4321398aa66d7cb3aa9c2df1c4f375
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- __author__ = "Paul Schifferer <dm@sweetrpg.com>" """ """ import os import sentry_sdk from sentry_sdk.integrations.flask import FlaskIntegration from sentry_sdk.integrations.redis import RedisIntegration from sweetrpg_library_api.application import constants sentry_sdk.init(dsn=os.environ[constants.SENTRY_DSN], traces_sample_rate=0.2, environment=os.environ.get(constants.SENTRY_ENV) or 'Unknown', integrations=[ FlaskIntegration(), RedisIntegration(), ])
28.45
78
0.680141
888eea6317cde6023d0d320a6a78866a20795e44
13,674
py
Python
isochrones_old.py
timothydmorton/fpp-old
6a2175d4bd9648b61c244c7463148632f36de631
[ "MIT" ]
null
null
null
isochrones_old.py
timothydmorton/fpp-old
6a2175d4bd9648b61c244c7463148632f36de631
[ "MIT" ]
null
null
null
isochrones_old.py
timothydmorton/fpp-old
6a2175d4bd9648b61c244c7463148632f36de631
[ "MIT" ]
null
null
null
""" Compiles stellar model isochrones into an easy-to-access format. """ from numpy import * from scipy.interpolate import LinearNDInterpolator as interpnd from consts import * import os,sys,re import scipy.optimize #try: # import pymc as pm #except: # print 'isochrones: pymc not loaded! MCMC will not work' import numpy.random as rand import atpy DATAFOLDER = os.environ['ASTROUTIL_DATADIR'] #'/Users/tdm/Dropbox/astroutil/data' def gr2B(g,r): return gr2V(g,r) + 1.04*(g-r) + 0.19 def gr2V(g,r): return r + 0.44*(g-r)-0.02 def BV2r(B,V): return (1/(1 + 0.56/0.44))*((1/0.44)*(V+0.02) - (1/1.04)*(B-V-0.19)) def BV2g(B,V): return BV2r(B,V) + (1/1.04)*(B-V) - 0.19/1.04 def fehstr(feh,minfeh=-1.0,maxfeh=0.5): if feh < minfeh: return '%.1f' % minfeh elif feh > maxfeh: return '%.1f' % maxfeh elif (feh > -0.05 and feh < 0): return '0.0' else: return '%.1f' % feh class isochrone(object): """Generic isochrone class.""" def __init__(self,age,m_ini,m_act,logL,Teff,logg,mags,fehs=None): self.minage = age.min() self.maxage = age.max() self.minmass = m_act.min() self.maxmass = m_act.max() self.bands = [] for band in mags: self.bands.append(band) L = 10**logL #R = sqrt(G*m_act*MSUN/10**logg)/RSUN if fehs is None: points = array([m_ini,age]).T self.is3d = False else: points = array([m_ini,age,fehs]).T self.is3d = True if self.is3d: self.feh = lambda m,age,feh: feh else: self.feh = lambda m,age: self.isofeh self.M = interpnd(points,m_act) self.tri = self.M.tri #self.R = interpnd(points,R) self.logL = interpnd(self.tri,logL) self.logg = interpnd(self.tri,logg) self.logTe = interpnd(self.tri,log10(Teff)) def Teff_fn(*pts): return 10**self.logTe(*pts) #self.Teff = lambda *pts: 10**self.logTe(*pts) self.Teff = Teff_fn def R_fn(*pts): return sqrt(G*self.M(*pts)*MSUN/10**self.logg(*pts))/RSUN #self.R = lambda *pts: sqrt(G*self.M(*pts)*MSUN/10**self.logg(*pts))/RSUN self.R = R_fn self.mag = {} for band in self.bands: self.mag[band] = interpnd(self.tri,mags[band]) def __call__(self,*args): if self.is3d: if len(args) != 3: raise ValueError('must call with M, age, and [Fe/H]') m,age,feh = args else: if len(args) != 2: raise ValueError('must call with M,age') m,age = args Ms = self.M(*args) Rs = self.R(*args) logLs = self.logL(*args) loggs = self.logg(*args) Teffs = self.Teff(*args) mags = {} for band in self.bands: mags[band] = self.mag[band](*args) return {'age':age,'M':Ms,'feh':self.feh(*args),'R':Rs,'logL':logLs,'logg':loggs,'Teff':Teffs,'mag':mags} def evtrack(self,m,minage=6.7,maxage=10,dage=0.05): ages = arange(minage,maxage,dage) Ms = self.M(m,ages) Rs = self.R(m,ages) logLs = self.logL(m,ages) loggs = self.logg(m,ages) Teffs = self.Teff(m,ages) mags = {} for band in self.bands: mags[band] = self.mag[band](m,ages) #return array([ages,Ms,Rs,logLs,loggs,Teffs, #record array? return {'age':ages,'M':Ms,'R':Rs,'logL':logLs,'Teff':Teffs,'mag':mags} def isochrone(self,age,minm=0.1,maxm=2,dm=0.02): ms = arange(minm,maxm,dm) ages = ones(ms.shape)*age Ms = self.M(ms,ages) Rs = self.R(ms,ages) logLs = self.logL(ms,ages) loggs = self.logg(ms,ages) Teffs = self.Teff(ms,ages) mags = {} for band in self.bands: mags[band] = self.mag[band](ms,ages) return {'M':Ms,'R':Rs,'logL':logLs,'Teff':Teffs,'mag':mags} class WD(isochrone): def __init__(self,composition='H'): if composition not in ['H','He']: raise ValueError('Unknown composition: %s (must be H or He)' % composition) self.composition = composition filename = '%s/stars/WDs_%s.txt' % (DATAFOLDER,composition) data = recfromtxt(filename,names=True) mags = {'bol':data.Mbol,'U':data.U,'B':data.B,'V':data.V,'R':data.R,'I':data.I, 'J':data.J,'H':data.H,'K':data.K,'u':data.u,'g':data.g,'r':data.r, 'i':data.i,'z':data.z,'y':data.y} logL = -2.5*(data.Mbol-4.77) gr = mags['g']-mags['r'] mags['kep'] = 0.25*mags['g'] + 0.75*mags['r'] w = where(gr > 0.3) mags['kep'][w] = 0.3*mags['g'][w] + 0.7*mags['r'][w] isochrone.__init__(self,log10(data.Age),data.mass,data.mass,logL, data.Teff,data.logg,mags) class padova(isochrone): def __init__(self,feh=0.): filename = DATAFOLDER + '/stars/padova_%s.dat' % fehstr(feh,-2,0.2) self.isofeh = feh #filename = 'data/kepisochrones.dat' age,m_ini,m_act,logL,logT,logg,mbol,kep,g,r,i,z,dd051,J,H,K = \ loadtxt(filename,usecols=(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15),unpack=True) mags = {'bol':mbol,'kep':kep,'g':g,'r':r,'i':i,'z':z,'dd051':dd051,'J':J,'H':H,'K':K} mags['B'] = gr2B(g,r) mags['V'] = gr2V(g,r) mags['R'] = r #cheating mags['I'] = i #cheating mags['Kepler'] = mags['kep'] isochrone.__init__(self,age,m_ini,m_act,logL,10**logT,logg,mags) class padova3d(isochrone): def __init__(self,minm=0.9,maxm=1.1,minage=9,maxage=10,minfeh=-0.2,maxfeh=0.2): #ages = array([]); m_inis = array([]); m_acts = array([]); logLs = array([]); logTs = array([]); loggs = array([]) #mbols = array([]); keps = array([]); gs = array([]); rs = array([]); iz = array([]); zs = array([]) #dd051s = array([]); Js = array([]) data = None fehs = array([]) fehlist = arange(-2,0.3,0.1) fehlist = fehlist[where((fehlist >= minfeh) & (fehlist <= maxfeh+0.001))] for feh in fehlist: print 'loading isochrone for [Fe/H] = %.1f...' % feh filename = DATAFOLDER + '/stars/padova_%.1f.dat' % feh if data is None: data = loadtxt(filename) n = len(data) else: newdata = loadtxt(filename) n = len(newdata) data = concatenate((data,newdata)) fehs = concatenate((fehs,ones(n)*feh)) inds = where((data[:,2] >= minm) & (data[:,2] <= maxm) & (data[:,0] >= minage) & (data[:,0] <= maxage))[0] data = data[inds,:] fehs = fehs[inds] age,m_ini,m_act,logL,logT,logg,mbol,kep,g,r,i,z,dd051,J,H,K = \ (data[:,0],data[:,1],data[:,2],data[:,3],data[:,4],data[:,5],data[:,6],data[:,7],data[:,8], data[:,9],data[:,10],data[:,11],data[:,12],data[:,13],data[:,14],data[:,15]) self.minm = m_ini.min() self.maxm = m_ini.max() self.minage = age.min() self.maxage = age.max() self.minfeh = fehs.min() self.maxfeh = fehs.max() mags = {'bol':mbol,'kep':kep,'g':g,'r':r,'i':i,'z':z,'dd051':dd051,'J':J,'H':H,'K':K} mags['B'] = gr2B(g,r) mags['V'] = gr2V(g,r) mags['R'] = r #cheating mags['I'] = i #cheating isochrone.__init__(self,age,m_ini,m_act,logL,10**logT,logg,mags,fehs=fehs) class baraffe(isochrone): def __init__(self,feh=0): filename = '%s/stars/baraffe0.0.txt' % DATAFOLDER data = recfromtxt(filename,names=True) self.isofeh = feh mags = {'V':data.Mv,'R':data.Mr,'I':data.Mi,'J':data.Mj,'H':data.Mh, 'K':data.Mk,'L':data.Ml,'M':data.Mm} #mags['kep'] = (mags['V'] + mags['R'])/2 - 2 mags['g'] = mags['V'] mags['r'] = mags['R'] mags['i'] = mags['I'] mags['z'] = 0.8*mags['I'] + 0.2*mags['J'] mags['kep'] = 0.1*mags['g'] + 0.9*mags['r'] isochrone.__init__(self,log10(data.age*1e9),data.m,data.m,data.logL, data.Teff,data.g,mags) class dartmouth(isochrone): def __init__(self,feh=0,bands=['U','B','V','R','I','J','H','K','g','r','i','z','Kepler']): filename = '%s/stars/dartmouth_%s.fits' % (DATAFOLDER,fehstr(feh,-1.0,0.5)) t = atpy.Table(filename) self.isofeh = feh mags = {} for band in bands: try: mags[band] = t[band] except: if band == 'kep' or band == 'Kepler': mags[band] = t['Kp'] else: raise #Fg = 10**(-0.4*mags['g']) #Fr = 10**(-0.4*mags['r']) #gr = mags['g']-mags['r'] #mags['kep'] = 0.25*mags['g'] + 0.75*mags['r'] #w = where(gr > 0.3) #mags['kep'][w] = 0.3*mags['g'][w] + 0.7*mags['r'][w] #mags['kep'] = (mags['g']+mags['r'])/2 #fix this! isochrone.__init__(self,log10(t['age']*1e9),t['M'],t['M'],t['logL'], 10**t['logTe'],t['logg'],mags) def write_all_dartmouth_to_fits(fehs=arange(-1,0.51,0.1)): for feh in fehs: try: print feh dartmouth_to_fits(feh) except: raise pass def dartmouth_to_fits(feh): filename_2mass = '%s/stars/dartmouth_%s_2massKp.iso' % (DATAFOLDER,fehstr(feh,-1.0,0.5)) filename_ugriz = '%s/stars/dartmouth_%s_ugriz.iso' % (DATAFOLDER,fehstr(feh,-1.0,0.5)) data_2mass = recfromtxt(filename_2mass,skiprows=8,names=True) data_ugriz = recfromtxt(filename_ugriz,skiprows=8,names=True) n = len(data_2mass) ages = zeros(n) curage = 0 i=0 for line in open(filename_2mass): m = re.match('#',line) if m: m = re.match('#AGE=\s*(\d+\.\d+)\s+',line) if m: curage=m.group(1) else: if re.search('\d',line): ages[i]=curage i+=1 t = atpy.Table() t.add_column('age',ages) t.add_column('M',data_2mass.MMo) t.add_column('logTe',data_2mass.LogTeff) t.add_column('logg',data_2mass.LogG) t.add_column('logL',data_2mass.LogLLo) t.add_column('U',data_2mass.U) t.add_column('B',data_2mass.B) t.add_column('V',data_2mass.V) t.add_column('R',data_2mass.R) t.add_column('I',data_2mass.I) t.add_column('J',data_2mass.J) t.add_column('H',data_2mass.H) t.add_column('K',data_2mass.Ks) t.add_column('Kp',data_2mass.Kp) t.add_column('u',data_ugriz.sdss_u) t.add_column('g',data_ugriz.sdss_g) t.add_column('r',data_ugriz.sdss_r) t.add_column('i',data_ugriz.sdss_i) t.add_column('z',data_ugriz.sdss_z) t.write('%s/stars/dartmouth_%s.fits' % (DATAFOLDER,fehstr(feh,-1,0.5)),overwrite=True) def isofit(iso,p0=None,**kwargs): def chisqfn(pars): if iso.is3d: m,age,feh = pars else: m,age = pars tot = 0 for kw in kwargs: val,err = kwargs[kw] fn = getattr(iso,kw) tot += (val-fn(*pars))**2/err**2 return tot if iso.is3d: if p0 is None: p0 = ((iso.minm+iso.maxm)/2,(iso.minage + iso.maxage)/2.,(iso.minfeh + iso.maxfeh)/2.) else: if p0 is None: p0 = (1,9.5) pfit = scipy.optimize.fmin(chisqfn,p0,disp=False) print pfit return iso(*pfit) def shotgun_isofit(iso,n=100,**kwargs): simdata = {} for kw in kwargs: val,err = kwargs[kw] simdata[kw] = rand.normal(size=n)*err + val if iso.is3d: Ms,ages,fehs = (zeros(n),zeros(n),zeros(n)) else: Ms,ages = (zeros(n),zeros(n)) for i in arange(n): simkwargs = {} for kw in kwargs: val = simdata[kw][i] err = kwargs[kw][1] simkwargs[kw] = (val,err) fit = isofit(iso,**simkwargs) Ms[i] = fit['M'] ages[i] = fit['age'] if iso.is3d: fehs[i] = fit['feh'] if iso.is3d: res = iso(Ms,ages,fehs) else: res = iso(Ms,ages) return res def isofitMCMCmodel(iso,**kwargs): if iso.is3d: mass = pm.Uniform('mass',lower=iso.minm,upper=iso.maxm) age = pm.Uniform('age',lower=iso.minage,upper=iso.maxage) feh = pm.Uniform('feh',lower=iso.minfeh,upper=iso.maxfeh) ns = {'pm':pm,'mass':mass,'age':age,'feh':feh} else: mass = pm.Uniform('mass',lower=0.1,upper=5) age = pm.Uniform('age',lower=6.7,upper=10.1) ns = {'pm':pm,'mass':mass,'age':age} for kw in kwargs: val,dval = kwargs[kw] fn = getattr(iso,kw) ns['fn'] = fn ns['val'] = val ns['dval'] = dval if iso.is3d: code = "@pm.observed(dtype=float)\ndef %s(value=val,mass=mass,age=age,feh=feh): return max(-1000,-(fn(mass,age,feh) - val)**2/dval**2)" % kw else: code = "@pm.observed(dtype=float)\ndef %s(value=val,mass=mass,age=age): return max(-1000,-(fn(mass,age) - val)**2/dval**2)" % kw exec code in ns return ns def isofitMCMC(iso,niter=5e4,nburn=1e4,thin=100,verbose=True,**kwargs): model = isofitMCMCmodel(iso,**kwargs) M = pm.MCMC(model) M.sample(iter=niter,burn=nburn,thin=thin,verbose=verbose) return M
34.270677
152
0.52311
888f394f188ea661c1605445db9b8d21a4a1470e
1,263
py
Python
tests/pterradactyl/util/test_common.py
GiampaoloFalqui/pterradactyl
3d34f8088784c53e3d0871d8ca81f2381f2c2be9
[ "Apache-2.0" ]
2
2022-03-31T09:02:49.000Z
2022-03-31T18:56:52.000Z
tests/pterradactyl/util/test_common.py
GiampaoloFalqui/pterradactyl
3d34f8088784c53e3d0871d8ca81f2381f2c2be9
[ "Apache-2.0" ]
4
2021-12-20T18:31:47.000Z
2022-03-14T16:31:49.000Z
tests/pterradactyl/util/test_common.py
GiampaoloFalqui/pterradactyl
3d34f8088784c53e3d0871d8ca81f2381f2c2be9
[ "Apache-2.0" ]
null
null
null
import unittest from pterradactyl.util import as_list, memoize, merge_dict, lookup class TestCommonUtil(unittest.TestCase): def memoize_func(self, *arg, **kwargs): pass def test_as_list_string(self): elem = "3" r = as_list(elem) self.assertListEqual(r, list(elem)) def test_as_list_list(self): elem = ["list_elem"] r = as_list(elem) self.assertListEqual(r, elem) def test_merge_dict(self): dict1 = {'a': 'b'} dict2 = {'c': 'd'} merged_dict = merge_dict(dict1, dict2) self.assertDictEqual(merged_dict, {'a': 'b', 'c': 'd'}) def test_lookup(self): data = {'foo': 'bar', 'foo1': 'bar1'} value = lookup(data, 'foo') self.assertEqual(value, 'bar') def test_lookup_with_non_existing_key(self): data = {'foo': 'bar', 'foo1': 'bar1'} value = lookup(data, 'foo_1') self.assertEqual(value, None) def test_lookup_should_return_default(self): data = {'foo': 'bar', 'foo1': 'bar1'} value = lookup(data, 'foo_1', default='bar2') self.assertEqual(value, 'bar2') def test_memoize(self): m = memoize(self.memoize_func(mylist=[1, 2, 3, 4, 5])) self.assertTrue(m)
28.704545
66
0.588282
8890ba16069cecd8d4ab8ea601bde0d4759bc1b2
15,223
py
Python
code/functions/et_import.py
behinger/etcomp
f30389da49c3416c7a723d44951d197d6e89d40e
[ "MIT" ]
20
2018-08-08T07:08:46.000Z
2022-03-07T14:49:06.000Z
code/functions/et_import.py
Tsehao/etcomp
69485f751649090f3df589e40fb515e874be207b
[ "MIT" ]
32
2017-12-05T14:05:48.000Z
2020-10-20T10:29:43.000Z
code/functions/et_import.py
Tsehao/etcomp
69485f751649090f3df589e40fb515e874be207b
[ "MIT" ]
7
2018-12-09T22:53:10.000Z
2021-11-10T09:13:04.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import numpy as np import pandas as pd import os import logging from functions.et_helper import findFile,gaze_to_pandas import functions.et_parse as parse import functions.et_make_df as make_df import functions.et_helper as helper import imp # for edfread reload import scipy import scipy.stats #%% PUPILLABS def pl_fix_timelag(pl): #fixes the pupillabs latency lag (which can be super large!!) t_cam = np.asarray([p['recent_frame_timestamp'] for p in pl['notifications'] if p['subject']=='trigger'])# camera time t_msg = np.asarray([p['timestamp'] for p in pl['notifications'] if p['subject']=='trigger']) # msg time #slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(t_msg,t_cam) # predict camera time based on msg time slope,intercept,low,high = scipy.stats.theilslopes(t_cam,t_msg) logger = logging.getLogger(__name__) logger.warning("fixing lag (at t=0) of :%.2fms, slope of %.7f (in a perfect world this is 0ms & 1.0)"%(intercept*1000,slope)) # fill it back in # gonna do it with a for-loop because other stuff is too voodo or not readable for me # Use this code (and change t_cam and t_msg above) if you want everything in computer time timestamps #for ix,m in enumerate(pl['gaze_positions']): # pl['gaze_positions'][ix]['timestamp'] = pl['gaze_positions'][ix]['timestamp'] * slope + intercept # for ix2,m2 in enumerate(pl['gaze_positions'][ix]['pupil_positions']): # pl['gaze_positions'][ix]['pupil_positions']['timestamp'] = pl['gaze_positions'][ix]['pupil_positions']['timestamp'] * slope + intercept #for ix,m in enumerate(pl['gaze_positions']): # pl['pupil_positions'][ix]['timestamp'] = pl['pupil_positions'][ix]['timestamp'] * slope + intercept# + 0.045 # the 45ms are the pupillabs defined delay between camera image & timestamp3 # this code is to get notifications into sample time stamp. But for now we for ix,m in enumerate(pl['notifications']): pl['notifications'][ix]['timestamp'] = pl['notifications'][ix]['timestamp'] * slope + intercept + 0.045 # the 45ms are the pupillabs defined delay between camera image & timestamp3 return(pl) def raw_pl_data(subject='',datapath='/net/store/nbp/projects/etcomp/',postfix='raw'): # Input: subjectname, datapath # Output: Returns pupillabs dictionary from lib.pupil.pupil_src.shared_modules import file_methods as pl_file_methods if subject == '': filename = datapath else: filename = os.path.join(datapath,subject,postfix) print(os.path.join(filename,'pupil_data')) # with dict_keys(['notifications', 'pupil_positions', 'gaze_positions']) # where each value is a list that contains a dictionary original_pldata = pl_file_methods.load_object(os.path.join(filename,'pupil_data')) #original_pldata = pl_file_methods.Incremental_Legacy_Pupil_Data_Loader(os.path.join(filename,'pupil_data')) # 'notification' # dict_keys(['record', 'subject', 'timestamp', 'label', 'duration']) # 'pupil_positions' # dict_keys(['diameter', 'confidence', 'method', 'norm_pos', 'timestamp', 'id', 'topic', 'ellipse']) # 'gaze_positions' # dict_keys(['base_data', 'timestamp', 'topic', 'confidence', 'norm_pos']) # where 'base_data' has a dict within a list # dict_keys(['diameter', 'confidence', 'method', 'norm_pos', 'timestamp', 'id', 'topic', 'ellipse']) # where 'normpos' is a list (with horizon. and vert. component) # Fix the (possible) timelag of pupillabs camera vs. computer time return original_pldata def import_pl(subject='', datapath='/net/store/nbp/projects/etcomp/', recalib=True, surfaceMap=True,parsemsg=True,fixTimeLag=True,px2deg=True,pupildetect=None, pupildetect_options=None): # Input: subject: (str) name # datapath: (str) location where data is stored # surfaceMap: # Output: Returns 2 dfs (plsamples and plmsgs) # get a logger logger = logging.getLogger(__name__) if pupildetect: # has to be imported first import av import ctypes ctypes.cdll.LoadLibrary('/net/store/nbp/users/behinger/projects/etcomp/local/build/build_ceres_working/lib/libceres.so.2') if surfaceMap: # has to be imported before nbp recalib try: import functions.pl_surface as pl_surface except ImportError: raise('Custom Error:Could not import pl_surface') assert(type(subject)==str) # Get samples df # (is still a dictionary here) original_pldata = raw_pl_data(subject=subject, datapath=datapath) if pupildetect is not None: # can be 2d or 3d from functions.nbp_pupildetect import nbp_pupildetect if subject == '': filename = datapath else: filename = os.path.join(datapath,subject,'raw') pupil_positions_0= nbp_pupildetect(detector_type = pupildetect, eye_id = 0,folder=filename,pupildetect_options=pupildetect_options) pupil_positions_1= nbp_pupildetect(detector_type = pupildetect, eye_id = 1,folder=filename,pupildetect_options=pupildetect_options) pupil_positions = pupil_positions_0 + pupil_positions_1 original_pldata['pupil_positions'] = pupil_positions recalib=True # recalibrate data if recalib: from functions import nbp_recalib if pupildetect is not None: original_pldata['gaze_positions'] = nbp_recalib.nbp_recalib(original_pldata,calibration_mode=pupildetect) original_pldata['gaze_positions'] = nbp_recalib.nbp_recalib(original_pldata) # Fix timing # Pupillabs cameras ,have their own timestamps & clock. The msgs are clocked via computertime. Sometimes computertime&cameratime show drift (~40% of cases). # We fix this here if fixTimeLag: original_pldata = pl_fix_timelag(original_pldata) if surfaceMap: folder= os.path.join(datapath,subject,'raw') tracker = pl_surface.map_surface(folder) gaze_on_srf = pl_surface.surface_map_data(tracker,original_pldata['gaze_positions']) logger.warning('Original Data Samples: %s on surface: %s',len(original_pldata['gaze_positions']),len(gaze_on_srf)) original_pldata['gaze_positions'] = gaze_on_srf # use pupilhelper func to make samples df (confidence, gx, gy, smpl_time, diameter) pldata = gaze_to_pandas(original_pldata['gaze_positions']) if surfaceMap: pldata.gx = pldata.gx*(1920 - 2*(75+18))+(75+18) # minus white border of marker & marker pldata.gy = pldata.gy*(1080- 2*(75+18))+(75+18) logger.debug('Mapped Surface to ScreenSize 1920 & 1080 (minus markers)') del tracker # sort according to smpl_time pldata.sort_values('smpl_time',inplace=True) # get the nice samples df plsamples = make_df.make_samples_df(pldata,px2deg=px2deg) # if parsemsg: # Get msgs df # make a list of gridnotes that contain all notifications of original_pldata if they contain 'label' gridnotes = [note for note in original_pldata['notifications'] if 'label' in note.keys()] plmsgs = pd.DataFrame(); for note in gridnotes: msg = parse.parse_message(note) if not msg.empty: plmsgs = plmsgs.append(msg, ignore_index=True) plmsgs = fix_smallgrid_parser(plmsgs) else: plmsgs = original_pldata['notifications'] plevents = pd.DataFrame() return plsamples, plmsgs,plevents #%% EYELINK def raw_el_data(subject, datapath='/net/store/nbp/projects/etcomp/'): # Input: subjectname, datapath # Output: Returns pupillabs dictionary filename = os.path.join(datapath,subject,'raw') from pyedfread import edf # parses SR research EDF data files into pandas df elsamples, elevents, elnotes = edf.pread(os.path.join(filename,findFile(filename,'.EDF')[0]), trial_marker=b'') return (elsamples,elevents,elnotes) def import_el(subject, datapath='/net/store/nbp/projects/etcomp/'): # Input: subject: (str) name # datapath: (str) location where data is stored # Output: Returns list of 3 el df (elsamples, elmsgs, elevents) assert(type(subject)==str) # get a logger logger = logging.getLogger(__name__) # Load edf # load and preprocess data from raw data files # elsamples: contains individual EL samples # elevents: contains fixation and saccade definitions # elnotes: contains notes (meta data) associated with each trial elsamples,elevents,elnotes = raw_el_data(subject,datapath) # TODO understand and fix this count = 0 while np.any(elsamples.time>1e10) and count < 40: from pyedfread import edf # parses SR research EDF data files into pandas df imp.reload(edf) count = count + 1 # logger.error(elsamples.time[elsamples.time>1e10]) logger.error('Attention: Found sampling time above 1*e100. Clearly wrong! Trying again (check again later)') elsamples, elevents, elnotes = raw_el_data(subject,datapath) # We also delete Samples with interpolated pupil responses. In one dataset these were ~800samples. logger.warning('Deleting %.4f%% due to interpolated pupil (online during eyelink recording)'%(100*np.mean(elsamples.errors ==8))) logger.warning('Deleting %.4f%% due to other errors in the import process'%(100*np.mean((elsamples.errors !=8) & (elsamples.errors!=0)))) elsamples = elsamples.loc[elsamples.errors == 0] # We had issues with samples with negative time logger.warning('Deleting %.4f%% samples due to time<=0'%(100*np.mean(elsamples.time<=0))) elsamples = elsamples.loc[elsamples.time > 0] # Also at the end of the recording, we had time samples that were smaller than the first sample. # Note that this assumes the samples are correctly ordered and the last samples actually # refer to artefacts. If you use %SYNCTIME% this might be problematic (don't know how nwilming's edfread incorporates synctime) logger.warning('Deleting %.4f%% samples due to time being less than the starting time'%(100*np.mean(elsamples.time <= elsamples.time[0]))) elsamples = elsamples.loc[elsamples.time > elsamples.time[0]] elsamples = elsamples.reset_index() # Convert to same units # change to seconds to be the same as pupil elsamples['smpl_time'] = elsamples['time'] / 1000 elnotes['msg_time'] = elnotes['trialid_time'] / 1000 elnotes = elnotes.drop('trialid_time',axis=1) elevents['start'] = elevents['start'] / 1000 elevents['end'] = elevents['end'] / 1000 # TODO solve this! if np.any(elsamples.smpl_time>1e10): logger.error(elsamples.smpl_time[elsamples.smpl_time>1e10]) logger.error('Error, even after reloading the data once, found sampling time above 1*e100. This is clearly wrong. Investigate') raise Exception('Error, even after reloading the data once, found sampling time above 1*e100. This is clearly wrong. Investigate') # for horizontal gaze component # Idea: Logical indexing ix_left = elsamples.gx_left != -32768 ix_right = elsamples.gx_right != -32768 # take the pupil area pa of the recorded eye # set pa to NaN instead of 0 or -32768 elsamples.loc[elsamples['pa_right'] < 1e-20,'pa_right'] = np.nan elsamples.loc[~ix_right,'pa_right'] = np.nan elsamples.loc[elsamples['pa_left'] < 1e-20,'pa_left'] = np.nan elsamples.loc[~ix_left,'pa_left'] = np.nan # add pa column that takes the value that is not NaN ix_left = ~np.isnan(elsamples.pa_left) ix_right = ~np.isnan(elsamples.pa_right) # init with nan elsamples['pa'] = np.nan elsamples.loc[ix_left, 'pa'] = elsamples.pa_left[ix_left] elsamples.loc[ix_right,'pa'] = elsamples.pa_right[ix_right] # Determine which eye was recorded ix_left = elsamples.gx_left != -32768 ix_right = elsamples.gx_right != -32768 if (np.mean(ix_left | ix_right)<0.99): raise NameError('In more than 1 % neither left or right data') # for horizontal gaze component elsamples.loc[ix_left,'gx'] = elsamples.gx_left[ix_left] elsamples.loc[ix_right,'gx'] = elsamples.gx_right[ix_right] # for horizontal gaze velocity component elsamples.loc[ix_left,'gx_vel'] = elsamples.gxvel_left[ix_left] elsamples.loc[ix_right,'gx_vel'] = elsamples.gxvel_right[ix_right] # for vertical gaze component ix_left = elsamples.gy_left != -32768 ix_right = elsamples.gy_right != -32768 elsamples.loc[ix_left,'gy'] = elsamples.gy_left[ix_left] elsamples.loc[ix_right,'gy'] = elsamples.gy_right[ix_right] # for vertical gaze velocity component elsamples.loc[ix_left,'gy_vel'] = elsamples.gyvel_left[ix_left] elsamples.loc[ix_right,'gy_vel'] = elsamples.gyvel_right[ix_right] # Make (0,0) the point bottom left elsamples['gy'] = 1080 - elsamples['gy'] # "select" relevant columns elsamples = make_df.make_samples_df(elsamples) # Parse EL msg elmsgs = elnotes.apply(parse.parse_message,axis=1) elmsgs = elmsgs.drop(elmsgs.index[elmsgs.isnull().all(1)]) elmsgs = fix_smallgrid_parser(elmsgs) return elsamples, elmsgs, elevents def fix_smallgrid_parser(etmsgs): # This fixes the missing separation between smallgrid before and small grid after. During experimental sending both were named identical. replaceGrid = pd.Series([k for l in [13*['SMALLGRID_BEFORE'],13*['SMALLGRID_AFTER']]*6 for k in l]) ix = etmsgs.query('grid_size==13').index if len(ix) is not 156: raise RuntimeError('we need to have 156 small grid msgs') replaceGrid.index = ix etmsgs.loc[ix,'condition'] = replaceGrid # this here fixes that all buttonpresses and stop messages etc. were send as GRID and not SMALLGG for blockid in etmsgs.block.dropna().unique(): if blockid == 0: continue tmp = etmsgs.query('block==@blockid') t_before_start = tmp.query('condition=="DILATION"& exp_event=="stop"').msg_time.values t_before_end = tmp.query('condition=="SHAKE" & exp_event=="stop"').msg_time.values t_after_start = tmp.query('condition=="SHAKE" & exp_event=="stop"').msg_time.values t_after_end =tmp.iloc[-1].msg_time ix = tmp.query('condition=="GRID"&msg_time>@t_before_start & msg_time<=@t_before_end').index etmsgs.loc[ix,'condition'] = 'SMALLGRID_BEFORE' ix = tmp.query('condition=="GRID"&msg_time>@t_after_start & msg_time<=@t_after_end').index etmsgs.loc[ix,'condition'] = 'SMALLGRID_AFTER' return(etmsgs)
42.522346
200
0.670564
889138e8c38a61134d0f1c1dd8b79dfd0eb55e28
768
py
Python
EXPERIMENT_5/loader.py
PRamoneda/RL_PianoFingering
d9a42c3cb0777c54c1b3e2355128479ef97e8e63
[ "MIT" ]
4
2021-09-24T13:44:22.000Z
2022-03-23T14:03:51.000Z
EXPERIMENT_5/loader.py
PRamoneda/RL_PianoFingering
d9a42c3cb0777c54c1b3e2355128479ef97e8e63
[ "MIT" ]
null
null
null
EXPERIMENT_5/loader.py
PRamoneda/RL_PianoFingering
d9a42c3cb0777c54c1b3e2355128479ef97e8e63
[ "MIT" ]
2
2022-02-14T10:01:10.000Z
2022-03-31T15:40:06.000Z
import music21 KEY_TO_SEMITONE = {'c': 0, 'c#': 1, 'db': 1, 'd': 2, 'd#': 3, 'eb': 3, 'e': 4, 'f': 5, 'f#': 6, 'gb': 6, 'g': 7, 'g#': 8, 'ab': 8, 'a': 9, 'a#': 10, 'bb': 10, 'b': 11, 'x': None} def parse_note(note): n = KEY_TO_SEMITONE[note[:-1].lower()] octave = int(note[-1]) + 1 return octave * 12 + n - 21 translate5 = { 46: 0, 48: 1, 50: 2, 51: 3, 53: 4, 55: 5, 56: 6, 58: 7, } def load_test5(times=1): sc = music21.converter.parse('test5.musicxml') rh = [translate5[parse_note(str(n.pitch).lower())] for n in sc.parts[0].flat.getElementsByClass('Note')] pieces = [] for _ in range(times): pieces.append(rh) return pieces # print(load_test5())
21.333333
108
0.492188
889140ee18ea1e06b9b18606e947a9585cb410f1
145
py
Python
DSA/Python/src/dsa/lib/math/ds/tests/fixture.py
JackieMa000/problems
c521558830a0bbf67f94109af92d7be4397d0a43
[ "BSD-3-Clause" ]
null
null
null
DSA/Python/src/dsa/lib/math/ds/tests/fixture.py
JackieMa000/problems
c521558830a0bbf67f94109af92d7be4397d0a43
[ "BSD-3-Clause" ]
1
2020-10-23T04:06:56.000Z
2020-10-23T04:06:56.000Z
DSA/Python/src/dsa/lib/math/ds/tests/fixture.py
JackieMa000/problems
c521558830a0bbf67f94109af92d7be4397d0a43
[ "BSD-3-Clause" ]
null
null
null
from dsa.lib.math.tests.fixture import MathTestCase class DsTestCase(MathTestCase): pass class ParenthesesTestCase(DsTestCase): pass
14.5
51
0.77931
8892e5ee057de88b320831e0b236a98d613b4df9
763
py
Python
pacu/data/roles.py
RyanJarv/Pacu2
27df4bcf296fc8f467d3dc671a47bf9519ce7a24
[ "MIT" ]
1
2022-03-09T14:51:54.000Z
2022-03-09T14:51:54.000Z
pacu/data/roles.py
RyanJarv/Pacu2
27df4bcf296fc8f467d3dc671a47bf9519ce7a24
[ "MIT" ]
null
null
null
pacu/data/roles.py
RyanJarv/Pacu2
27df4bcf296fc8f467d3dc671a47bf9519ce7a24
[ "MIT" ]
null
null
null
# import typing # # import boto3 # import typer # # import pacu.data as p # # # if typing.TYPE_CHECKING: # from mypy_boto3_iam import type_defs as t # from mypy_boto3_iam.client import IAMClient # from mypy_boto3_iam.paginator import ListRolesPaginator # # # def fetch(profile_name: typing.Optional[str] = typer.Option(default=None)): # sess = boto3.session.Session(profile_name=profile_name) # iam: IAMClient = sess.client('iam') # paginator: ListRolesPaginator = iam.get_paginator('list_roles') # page_iterator: typing.Iterator[t.ListRolesResponseTypeDef] = paginator.paginate() # for page in page_iterator: # p.db["roles"].insert_all(page['Roles'], pk="RoleName") # # # if __name__ == '__main__': # typer.run(fetch)
29.346154
87
0.709043
88940ecc81bb7244f9aadc5c0b28b58ae24e3599
1,698
py
Python
v2ex_daily.py
ZHLHZHU/v2ex
b8458b6834eb22fe337146251f2f1bcd2ecb1a92
[ "MIT" ]
null
null
null
v2ex_daily.py
ZHLHZHU/v2ex
b8458b6834eb22fe337146251f2f1bcd2ecb1a92
[ "MIT" ]
null
null
null
v2ex_daily.py
ZHLHZHU/v2ex
b8458b6834eb22fe337146251f2f1bcd2ecb1a92
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import re import http.cookiejar import urllib.request # your v2ex cookie value for key [auth] after login # refer README.md if cannot find cookie [auth] V2EX_COOKIE = '' V2EX_DOMAIN = r'v2ex.com' V2EX_URL_START = r'https://' + V2EX_DOMAIN V2EX_MISSION = V2EX_URL_START + r'/mission/daily' V2EX_COIN_URL = r'/mission/daily/redeem?once=' def get_once_url(data): p = '/mission/daily/redeem\?once=\d+' m = re.search(p, data.decode()) if m: return m.group() else: return None def make_cookie(name, value): return http.cookiejar.Cookie( version=0, name=name, value=value, port=None, port_specified=False, domain=V2EX_DOMAIN, domain_specified=True, domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=False, comment=None, comment_url=None, rest=None ) if __name__ == '__main__': cj = http.cookiejar.CookieJar() cj.set_cookie(make_cookie('auth', V2EX_COOKIE)) opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj)) opener.addheaders = [ ('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:20.0) Gecko/20100101 Firefox/20.0'), ('Referer', V2EX_MISSION) ] opener.open(V2EX_URL_START).read() data = opener.open(V2EX_MISSION).read() once = get_once_url(data) if not once: print('"once" not found, maybe you already got coins') sys.exit(-1) v2ex_coin_url = V2EX_URL_START + once print(v2ex_coin_url) opener.open(v2ex_coin_url).read()
24.257143
92
0.636631
8894291bf420c1eeb84dea70fc3a6ddba70429ed
2,309
py
Python
read_env.py
sloria/read_env
90c5a7b38d70f06cd96b5d9a7e68e422bb5bd605
[ "MIT" ]
null
null
null
read_env.py
sloria/read_env
90c5a7b38d70f06cd96b5d9a7e68e422bb5bd605
[ "MIT" ]
1
2017-07-18T20:49:43.000Z
2017-07-20T15:14:10.000Z
read_env.py
sloria/read_env
90c5a7b38d70f06cd96b5d9a7e68e422bb5bd605
[ "MIT" ]
1
2018-04-11T11:55:55.000Z
2018-04-11T11:55:55.000Z
# -*- coding: utf-8 -*- import re import shlex import os import inspect __version__ = '1.1.0' try: FileNotFoundError except NameError: # Python 2 FileNotFoundError = IOError ENV = '.env' def read_env(path=None, environ=None, recurse=True): """Reads a .env file into ``environ`` (which defaults to ``os.environ``). If .env is not found in the directory from which this function is called, recurse up the directory tree until a .env file is found. """ environ = environ if environ is not None else os.environ # By default, start search from the same file this function is called if path is None: frame = inspect.currentframe().f_back caller_dir = os.path.dirname(frame.f_code.co_filename) path = os.path.join(os.path.abspath(caller_dir), ENV) if recurse: current = path pardir = os.path.abspath(os.path.join(current, os.pardir)) while current != pardir: target = os.path.join(current, ENV) if os.path.exists(target): path = os.path.abspath(target) break else: current = os.path.abspath(os.path.join(current, os.pardir)) pardir = os.path.abspath(os.path.join(current, os.pardir)) if not path: raise FileNotFoundError('Could not find a .env file') with open(path, 'r') as fp: content = fp.read() parsed = parse_env(content) for key, value in parsed.items(): environ.setdefault(key, value) _ITEM_RE = re.compile(r'[A-Za-z_][A-Za-z_0-9]*') # From Honcho. See NOTICE file for license details. def parse_env(content): """Parse the content of a .env file (a line-delimited KEY=value format) into a dictionary mapping keys to values. """ values = {} for line in content.splitlines(): lexer = shlex.shlex(line, posix=True) tokens = list(lexer) # parses the assignment statement if len(tokens) < 3: continue name, op = tokens[:2] value = ''.join(tokens[2:]) if op != '=': continue if not _ITEM_RE.match(name): continue value = value.replace(r'\n', '\n') value = value.replace(r'\t', '\t') values[name] = value return values
30.381579
85
0.603291
889bb7e2d51608191ee475ae210800ea251a72c4
2,535
py
Python
trinity/contextgroup.py
g-r-a-n-t/trinity
f108b6cd34ed9aabfcf9e235badd91597650ecd5
[ "MIT" ]
14
2020-08-24T18:23:31.000Z
2021-11-04T14:11:04.000Z
trinity/contextgroup.py
g-r-a-n-t/trinity
f108b6cd34ed9aabfcf9e235badd91597650ecd5
[ "MIT" ]
19
2020-08-25T15:57:05.000Z
2021-07-07T00:49:45.000Z
trinity/contextgroup.py
g-r-a-n-t/trinity
f108b6cd34ed9aabfcf9e235badd91597650ecd5
[ "MIT" ]
7
2020-08-24T22:53:02.000Z
2022-03-28T18:51:48.000Z
import asyncio import sys from types import TracebackType from typing import Any, AsyncContextManager, List, Optional, Sequence, Tuple, Type from trio import MultiError from p2p.asyncio_utils import create_task class AsyncContextGroup: def __init__(self, context_managers: Sequence[AsyncContextManager[Any]]) -> None: self.cms = tuple(context_managers) self.cms_to_exit: Sequence[AsyncContextManager[Any]] = tuple() async def __aenter__(self) -> Tuple[Any, ...]: futures = [create_task(cm.__aenter__(), f'AsyncContextGroup/{repr(cm)}') for cm in self.cms] await asyncio.wait(futures) # Exclude futures not successfully entered from the list so that we don't attempt to exit # them. self.cms_to_exit = tuple( cm for cm, future in zip(self.cms, futures) if not future.cancelled() and not future.exception()) try: return tuple(future.result() for future in futures) except: # noqa: E722 await self._exit(*sys.exc_info()) raise async def _exit(self, exc_type: Optional[Type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[TracebackType], ) -> None: if not self.cms_to_exit: return # don't use gather() to ensure that we wait for all __aexit__s # to complete even if one of them raises done, _pending = await asyncio.wait( [cm.__aexit__(exc_type, exc_value, traceback) for cm in self.cms_to_exit]) # This is to ensure we re-raise any exceptions our coroutines raise when exiting. errors: List[Tuple[Type[BaseException], BaseException, TracebackType]] = [] for d in done: try: d.result() except BaseException: errors.append(sys.exc_info()) if errors: raise MultiError( tuple(exc_value.with_traceback(exc_tb) for _, exc_value, exc_tb in errors)) async def __aexit__(self, exc_type: Optional[Type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[TracebackType], ) -> None: # Since exits are running in parallel, they can't see each # other exceptions, so send exception info from `async with` # body to all. await self._exit(exc_type, exc_value, traceback)
40.887097
100
0.613018
889d4cf4c9e065bcd8eb21c034baa0e27279103e
895
py
Python
setup.py
sophilabs/trybox-django
87776a75e995a903d08b06dc47ec54a7ce796400
[ "MIT" ]
null
null
null
setup.py
sophilabs/trybox-django
87776a75e995a903d08b06dc47ec54a7ce796400
[ "MIT" ]
null
null
null
setup.py
sophilabs/trybox-django
87776a75e995a903d08b06dc47ec54a7ce796400
[ "MIT" ]
null
null
null
from setuptools import setup, find_packages VERSION = '0.2' setup( name='trybox-django', version=VERSION, description='TryBox:Django', author='Sophilabs', author_email='contact@sophilabs.com', url='https://github.com/sophilabs/trybox-django', download_url='http://github.com/sophilabs/trybox-django/tarball/trybox-django-v{0}#egg=trybox-django'.format(VERSION), license='MIT', install_requires=['django', 'trybox'], dependency_links=['https://github.com/sophilabs/trybox/tarball/master#egg=trybox'], packages=find_packages(), classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Framework :: Django', ], )
34.423077
122
0.660335
889e2666b623f8c9aac578a42112779d0960a46c
1,152
py
Python
bflib/tables/gemsandjewelry/gemtype.py
ChrisLR/BasicDungeonRL
b293d40bd9a0d3b7aec41b5e1d58441165997ff1
[ "MIT" ]
3
2017-10-28T11:28:38.000Z
2018-09-12T09:47:00.000Z
bflib/tables/gemsandjewelry/gemtype.py
ChrisLR/BasicDungeonRL
b293d40bd9a0d3b7aec41b5e1d58441165997ff1
[ "MIT" ]
null
null
null
bflib/tables/gemsandjewelry/gemtype.py
ChrisLR/BasicDungeonRL
b293d40bd9a0d3b7aec41b5e1d58441165997ff1
[ "MIT" ]
null
null
null
from bflib.items import gems class GemTypeRow(object): __slots__ = ["min_percent", "max_percent", "gem_type"] def __init__(self, min_percent, max_percent, gem_type): self.min_percent = min_percent self.max_percent = max_percent self.gem_type = gem_type class GemTypeTable(object): rows = [ GemTypeRow(1, 10, gems.Greenstone), GemTypeRow(11, 20, gems.Malachite), GemTypeRow(21, 28, gems.Aventurine), GemTypeRow(29, 38, gems.Phenalope), GemTypeRow(39, 45, gems.Amethyst), GemTypeRow(46, 54, gems.Fluorospar), GemTypeRow(55, 60, gems.Garnet), GemTypeRow(61, 65, gems.Alexandrite), GemTypeRow(66, 70, gems.Topaz), GemTypeRow(71, 75, gems.Bloodstone), GemTypeRow(76, 79, gems.Sapphire), GemTypeRow(80, 89, gems.Diamond), GemTypeRow(90, 94, gems.FireOpal), GemTypeRow(95, 97, gems.Ruby), GemTypeRow(98, 100, gems.Emerald), ] @classmethod def get(cls, roll_value): return next((row for row in cls.rows if row.min_percent <= roll_value <= row.max_percent))
32
74
0.623264
889e61707ed575e870d03501921a624e597540da
158
py
Python
RecPatrones/__init__.py
riemannruiz/MediaMovil
282cf498551671f97436bff563c8b1a5c2dbf4ad
[ "MIT" ]
null
null
null
RecPatrones/__init__.py
riemannruiz/MediaMovil
282cf498551671f97436bff563c8b1a5c2dbf4ad
[ "MIT" ]
1
2019-03-21T21:16:00.000Z
2019-03-21T21:16:00.000Z
RecPatrones/__init__.py
riemannruiz/MediaMovil
282cf498551671f97436bff563c8b1a5c2dbf4ad
[ "MIT" ]
3
2019-03-20T22:43:36.000Z
2019-03-27T19:16:38.000Z
from Simulacion import Optimizacion from Simulacion import Graficos from Simulacion import Genetico from Simulacion import Model_close from mylib import mylib
31.6
35
0.879747
889eb3167b3872b5371c8a539d4347c8d68744c1
760
py
Python
SBTK_League_Helper/src/tools/exceptions.py
juanchodepisa/sbtk
7cba7748e75a02b491e551d6c1be9bd7987c5051
[ "MIT" ]
null
null
null
SBTK_League_Helper/src/tools/exceptions.py
juanchodepisa/sbtk
7cba7748e75a02b491e551d6c1be9bd7987c5051
[ "MIT" ]
null
null
null
SBTK_League_Helper/src/tools/exceptions.py
juanchodepisa/sbtk
7cba7748e75a02b491e551d6c1be9bd7987c5051
[ "MIT" ]
null
null
null
# This need to be sorted out in a smarter way class InitializationError(Exception): def __init__(self, SomeClass, description): self.value = SomeClass self.description = description.format(SomeClass.__name__) def __str__(self): return self.description class ReservedValueError(Exception): def __init__(self, expected, received, description): self.value= received self.expected = expected self.description = description.format(expected, received) def __str__(self): return self.description class ApplicationError(Exception): pass class NonFatalError(ApplicationError): pass class FatalError(Exception): pass class UserError(NonFatalError): pass
24.516129
65
0.696053
88a1741eae3c2334f95c70dcecbe762feec732c9
1,964
py
Python
tools/python/smessage_encryption.py
radetsky/themis
18ea2e39a7258e23ca9a5bb642691a9431c63d0b
[ "Apache-2.0" ]
1,561
2015-05-20T05:19:29.000Z
2022-03-31T17:32:55.000Z
tools/python/smessage_encryption.py
radetsky/themis
18ea2e39a7258e23ca9a5bb642691a9431c63d0b
[ "Apache-2.0" ]
536
2015-05-20T13:57:08.000Z
2022-03-15T18:02:59.000Z
tools/python/smessage_encryption.py
radetsky/themis
18ea2e39a7258e23ca9a5bb642691a9431c63d0b
[ "Apache-2.0" ]
141
2015-05-20T13:22:45.000Z
2022-03-29T01:29:40.000Z
# # Copyright (c) 2017 Cossack Labs Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys from base64 import b64encode, b64decode from pythemis import smessage _, COMMAND, SENDER_PRIVATE_KEY, RECIPIENT_PUBLIC_KEY, MESSAGE = range(5) if len(sys.argv) != 5: print('Usage: <command: enc | dec | sign | verify > <send_private_key> <recipient_public_key> <message>') exit(1) command = sys.argv[COMMAND] private_key_path = sys.argv[SENDER_PRIVATE_KEY] public_key_path = sys.argv[RECIPIENT_PUBLIC_KEY] message = sys.argv[MESSAGE] with open(private_key_path, 'rb') as f: private_key = f.read() with open(public_key_path, 'rb') as f: public_key = f.read() message_encrypter = smessage.SMessage(private_key, public_key) if command == 'enc': encrypted = message_encrypter.wrap(message.encode('utf-8')) encoded = b64encode(encrypted) print(encoded.decode('ascii')) elif command == 'dec': decoded = b64decode(message.encode('utf-8')) decrypted = message_encrypter.unwrap(decoded) print(decrypted.decode('utf-8')) elif command == 'sign': encrypted = smessage.ssign(private_key, message.encode('utf-8')) encoded = b64encode(encrypted) print(encoded.decode('ascii')) elif command == 'verify': decoded = b64decode(message.encode('utf-8')) decrypted = smessage.sverify(public_key, decoded) print(decrypted.decode('utf-8')) else: print('Wrong command, use <enc | dev | sign | verify>') exit(1)
33.288136
109
0.726578
88a50848a3ac961cc89962bc6f936cbbfc7cd63c
819
py
Python
tests/apps/test_rpc.py
PyCN/pulsar
fee44e871954aa6ca36d00bb5a3739abfdb89b26
[ "BSD-3-Clause" ]
1,410
2015-01-02T14:55:07.000Z
2022-03-28T17:22:06.000Z
tests/apps/test_rpc.py
PyCN/pulsar
fee44e871954aa6ca36d00bb5a3739abfdb89b26
[ "BSD-3-Clause" ]
194
2015-01-22T06:18:24.000Z
2020-10-20T21:21:58.000Z
tests/apps/test_rpc.py
PyCN/pulsar
fee44e871954aa6ca36d00bb5a3739abfdb89b26
[ "BSD-3-Clause" ]
168
2015-01-31T10:29:55.000Z
2022-03-14T10:22:24.000Z
'''Tests the rpc middleware and utilities. It uses the calculator example.''' import unittest from pulsar.apps import rpc from pulsar.apps.http import HttpWsgiClient class rpcTest(unittest.TestCase): def proxy(self): from examples.calculator.manage import Site http = HttpWsgiClient(Site()) return rpc.JsonProxy('http://127.0.0.1:8060/', http=http, timeout=20) def test_proxy(self): p = self.proxy() http = p.http self.assertTrue(len(http.headers)) self.assertEqual(http.headers['user-agent'], 'Pulsar-Http-Wsgi-Client') self.assertTrue(http.wsgi_callable) self.assertEqual(p._version, '2.0') async def test_addition(self): p = self.proxy() response = await p.calc.add(4, 5) self.assertEqual(response, 9)
30.333333
79
0.660562
88a536261708445fb27628f3196b4050db295eff
22,042
py
Python
BAN_main.py
EMBEDDIA/BAN
5f9cbb383c69f03bf8aa55fc1149f3fb78e1af11
[ "MIT" ]
2
2021-07-01T16:24:45.000Z
2021-12-08T06:59:08.000Z
BAN_main.py
EMBEDDIA/BAN
5f9cbb383c69f03bf8aa55fc1149f3fb78e1af11
[ "MIT" ]
1
2021-12-08T07:01:46.000Z
2021-12-08T07:01:46.000Z
BAN_main.py
KristianMiok/BAN
e8afc73c60152186b073d7dacc04e39888ec67fc
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """EN3-BT MCD Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1cnvSgNDexJ0cqrTWGygI_smZ0y8EIWZn """ import torch import numpy as np import tqdm import copy from torch.nn import functional as F from torch.nn.modules.module import Module from sklearn.calibration import calibration_curve, CalibratedClassifierCV from torch.nn.modules.activation import MultiheadAttention from torch.nn.modules.container import ModuleList from torch.nn.init import xavier_uniform_ from torch.nn.modules.dropout import Dropout from torch.nn.modules.linear import Linear from torch.nn.modules.normalization import LayerNorm from torch.utils.data import DataLoader, Dataset import logging logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S') logging.getLogger().setLevel(logging.INFO) from utils import * import math from torch.autograd import Variable import re import pandas as pd torch.manual_seed(2) def set_dropout_to_train(m): if type(m) == torch.nn.Dropout: m.train() class Embedder(Module): def __init__(self, vocab_size, d_model): super().__init__() self.d_model = d_model print(vocab_size, d_model) self.embed = torch.nn.Embedding(vocab_size + 1, d_model) def forward(self, x): x = self.embed(x) return x class PositionalEncoder(Module): def __init__(self, d_model, max_seq_len = 768, dropout = 0.5): super().__init__() self.d_model = d_model self.dropout = Dropout(dropout) # create constant 'pe' matrix with values dependant on # pos and i pe = torch.zeros(max_seq_len, d_model) for pos in range(max_seq_len): for i in range(0, d_model, 2): pe[pos, i] = \ math.sin(pos / (10000 ** ((2 * i)/d_model))) pe[pos, i + 1] = \ math.cos(pos / (10000 ** ((2 * (i + 1))/d_model))) pe = pe.unsqueeze(0) self.register_buffer('pe', pe) def forward(self, x): # make embeddings relatively larger x = x * math.sqrt(self.d_model) #add constant to embedding seq_len = x.size(1) pe = Variable(self.pe[:,:seq_len], requires_grad=False) if x.is_cuda: pe.cuda() x = x + pe return self.dropout(x) def get_clones(module, N): return torch.nn.ModuleList([copy.deepcopy(module) for i in range(N)]) def attention(q, k, v, d_k, mask=None, dropout=None): scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k) scores = F.softmax(scores, dim=-1) if dropout is not None: scores = dropout(scores) output = torch.matmul(scores, v) return output class MultiHeadAttention(Module): def __init__(self, heads, d_model, dropout = 0.5): super().__init__() self.d_model = d_model self.d_k = d_model // heads self.h = heads self.q_linear = torch.nn.Linear(d_model, d_model) self.v_linear = torch.nn.Linear(d_model, d_model) self.k_linear = torch.nn.Linear(d_model, d_model) self.dropout = Dropout(dropout) self.out = torch.nn.Linear(d_model, d_model) def forward(self, q, k, v, mask=None): bs = q.size(0) # perform linear operation and split into h heads k = self.k_linear(k).view(bs, -1, self.h, self.d_k) q = self.q_linear(q).view(bs, -1, self.h, self.d_k) v = self.v_linear(v).view(bs, -1, self.h, self.d_k) # transpose to get dimensions bs * h * sl * d_model k = k.transpose(1,2) q = q.transpose(1,2) v = v.transpose(1,2) # calculate attention using function we will define next scores = attention(q, k, v, self.d_k, mask, self.dropout) # concatenate heads and put through final linear layer concat = scores.transpose(1,2).contiguous().view(bs, -1, self.d_model) output = self.out(concat) return output class Norm(Module): def __init__(self, d_model, eps = 1e-6): super().__init__() self.size = d_model # create two learnable parameters to calibrate normalisation self.alpha = torch.nn.Parameter(torch.ones(self.size)) self.bias = torch.nn.Parameter(torch.zeros(self.size)) self.eps = eps def forward(self, x): norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) \ / (x.std(dim=-1, keepdim=True) + self.eps) + self.bias return norm class FeedForward(Module): def __init__(self, d_model, d_ff=2048, dropout = 0.3): super().__init__() # We set d_ff as a default to 2048 self.linear_1 = torch.nn.Linear(d_model, d_ff) self.dropout = Dropout(dropout) self.linear_2 = torch.nn.Linear(d_ff, d_model) def forward(self, x): x = self.dropout(F.relu(self.linear_1(x))) x = self.linear_2(x) return x class EncoderLayer(Module): def __init__(self, d_model, heads, dropout = 0.3): super().__init__() self.norm_1 = Norm(d_model) self.norm_2 = Norm(d_model) self.attn = MultiHeadAttention(heads, d_model) self.ff = FeedForward(d_model) self.dropout_1 = Dropout(dropout) self.dropout_2 = Dropout(dropout) def forward(self, x, mask = None): x2 = self.norm_1(x) x = x + self.dropout_1(self.attn(x2,x2,x2,mask = None)) x2 = self.norm_2(x) x = x + self.dropout_2(self.ff(x2)) return x class Encoder(Module): def __init__(self, vocab_size = 1000, d_model = 32, N = 1, heads= 1): super().__init__() self.N = N self.embed = Embedder(vocab_size, d_model) self.pe = PositionalEncoder(d_model) self.layers = get_clones(EncoderLayer(d_model, heads), N) self.norm = Norm(d_model) self.output_layer = torch.nn.Linear(d_model**2, 1) self.output_activation = torch.nn.Sigmoid() def forward(self, src): bdim = src.shape[0] x = self.embed(src) #tlen = int(math.sqrt(src.shape[1])) #x = src.reshape(int(src.shape[0]), tlen, tlen) x = self.pe(x) for i in range(self.N): x = self.layers[i](x) x = self.norm(x) outputs = torch.autograd.Variable(torch.zeros(bdim), requires_grad = False) for j in range(bdim): outputs[j] = self.output_layer(x[j,:,:].flatten()) s = self.output_activation(outputs) return s class Dataset_single(Dataset): def __init__(self, features, targets = None, transform=None): self.features = features if not targets is None: self.targets = np.array(targets) else: self.targets = None def __len__(self): return self.features.shape[0] def __getitem__(self, index): instance = torch.tensor(self.features[index], dtype=torch.long, device='cpu') if self.targets is not None: target = torch.as_tensor(self.targets.reshape(-1, 1)[index], device='cpu') else: target = -1 return instance, target # This is were we train our model class BAN: def __init__ (self, num_epochs = 200, vocab_size = 100000, stopping_crit = 5, learning_rate = 0.001, tokenizer_num_words = 100000, max_padding = 256,N=1,heads = 1, batch_size = 64): #self.learning_rate = 0.001 self.d_model = max_padding self.N = N self.attention_heads = heads self.max_padding = max_padding self.image_folder = None self.learning_rate = learning_rate self.classes_ = [0,1] self.validation_index = 0 self.batch_size = batch_size self.threshold_perf_tuples = [] self.num_epochs = num_epochs self.probability_threshold = None self.vocab_size = vocab_size self.stopping_crit = stopping_crit self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') logging.info("Using {}".format(self.device)) self.tokenizer = Tokenizer(num_words=tokenizer_num_words) def pad_sequence(self, list_of_texts): # https://towardsdatascience.com/text-classification-in-keras-part-2-how-to-use-the-keras-tokenizer-word-representations-fd571674df23 # pad_seq = get_input_attentions(list_of_texts, max_size = self.max_padding) # logging.info(pad_seq.shape) self.tokenizer.fit_on_texts(list_of_texts) sequences = self.tokenizer.texts_to_sequences(list_of_texts) pad_seq = pad_sequences(sequences, maxlen=self.max_padding) return pad_seq def encode_input_text_integer(self, list_of_texts, mapping = None): ## somewhat adhoc -> can be improved -> TODO: Byte pair. unique_words = set() for text in list_of_texts: [unique_words.add(x) for x in text.strip().split()] unique_words = list(unique_words) if mapping is None: mapping = {} for index, word in enumerate(unique_words): mapping[word] = index+1 encoded_texts = [] for text in list_of_texts: encoded_sentence = [mapping[x] for x in text.strip().split()] + [0]*self.max_padding encoded_texts.append(np.array(encoded_sentence[0:self.max_padding])) return encoded_texts, mapping def predict_proba(self, input_text_sequences, T=10, output_mean_probabilities = True): pad_seq = input_text_sequences #pad_sequences(input_text_sequences, maxlen=self.max_padding) pad_seq = np.array(pad_seq) val_dataset = Dataset_single(pad_seq) val_dataset = DataLoader(val_dataset, batch_size = self.batch_size, shuffle = False) outputs = [] p = [] w= [] for i, (features, labels) in tqdm.tqdm(enumerate(val_dataset), total = len(val_dataset)): features = features.to(self.device) collection_of_preds = [] self.model.eval() ## Immutable during predictions. self.model.apply(set_dropout_to_train) ## Unlock Dropout layers. for _ in range(T): prediction = self.model(features).detach().cpu().numpy() collection_of_preds.append(prediction) p = np.matrix(collection_of_preds).T w.append(p) w = np.concatenate(w, axis = 0) assert w.shape[0] == pad_seq.shape[0] assert w.shape[1] == T MC_pred = w.reshape(pad_seq.shape[0], T) pred = pd.DataFrame(MC_pred) MC_pred_positive = pred.mean(axis=1).values MC_pred_negative = 1 - MC_pred_positive MC_pred = np.vstack((MC_pred_negative, MC_pred_positive)).T self.model.train() return MC_pred.astype(np.float64) def predict(self, input_text_sequences, T=100, output_mean_probabilities = True): pad_seq = input_text_sequences #pad_sequences(input_text_sequences, maxlen=self.max_padding) pad_seq = np.array(pad_seq) val_dataset = Dataset_single(pad_seq) val_dataset = DataLoader(val_dataset, batch_size = self.batch_size, shuffle = False) outputs = [] p = [] w= [] for i, (features, labels) in tqdm.tqdm(enumerate(val_dataset), total = len(val_dataset)): features = features.to(self.device) collection_of_preds = [] self.model.eval() ## Immutable during predictions. self.model.apply(set_dropout_to_train) ## Unlock Dropout layers. for _ in range(T): prediction = self.model(features).detach().cpu().numpy() collection_of_preds.append(prediction) p = np.matrix(collection_of_preds).T w.append(p) w = np.concatenate(w, axis = 0) assert w.shape[0] == pad_seq.shape[0] assert w.shape[1] == T if output_mean_probabilities: MC_pred = w.reshape(pad_seq.shape[0], T) pred = pd.DataFrame(MC_pred) MC_pred = pred.mean(axis=1).values else: MC_pred = w.reshape(pad_seq.shape[0], T) self.model.train() return MC_pred def ece_score(self, probab_pred, real_y, mbin = 3, threshold = 0.5): all_vals = len(real_y) bin_perf = [] current_bin = 0 predictions = probab_pred.copy() predictions[predictions >= threshold] = 1 predictions[predictions < threshold] = 0 reals_internal = [] predictions_internal = [] ## compute bins (last one is extended with the remainder) intercept_bins = [x for x in range(1,all_vals) if x % mbin == 0] remainder = all_vals % mbin if len(intercept_bins) == 0: intercept_bins = [all_vals] intercept_bins[-1] += remainder intercept_index = 0 for j in range(all_vals): if j == intercept_bins[intercept_index] and j > 0: if intercept_index < len(intercept_bins)-1: intercept_index += 1 current_bin += 1 equals = np.where(np.array(reals_internal) == np.array(predictions_internal)) acc_bin = len(equals)/len(predictions_internal) conf_bin = np.mean(np.array(predictions_internal)) bin_perf.append([current_bin, acc_bin, conf_bin,len(reals_internal)]) reals_internal = [real_y[j]] predictions_internal = [predictions[j]] else: reals_internal.append(real_y[j]) predictions_internal.append(predictions[j]) ece_score_final = 0 for bins in bin_perf: bin_size = bins[3] total = len(probab_pred) partial = (bin_size/total) * np.abs(bins[1] - bins[2]) ece_score_final += partial return ece_score_final def fit(self, input_text_sequences, targets, val_percentage = 0.2, adaptive_threshold = True, validation_metric = "precision"): """ The main fit method. Given an ordered set of documents, this train the architecture along with intermediary, validation set-based calibration. The validation percentage is specified with :param input_text_sequences: inputs :param targets: target vector :val_percentage: percentage used for stopping + calibration assessment """ ## generate stratified split for validation already_traversed = set() total_val = int(val_percentage * len(targets)) validation_indices = [] training_indices = [] trigger = False vnum = int(input_text_sequences.shape[0]*val_percentage) input_text_sequences = input_text_sequences[vnum:] targets = targets[vnum:] self.validation_index = vnum ## get val data val_sequences = input_text_sequences[:vnum] val_targets = targets[:vnum] train_dataset = Dataset_single(input_text_sequences, targets) train_dataset = DataLoader(train_dataset, batch_size = self.batch_size, shuffle = True) val_dataset = Dataset_single(val_sequences, val_targets) val_dataset = DataLoader(val_dataset, batch_size = 1, shuffle = False) self.validation_loader = val_dataset ## this is used for temperature-based calibration self.loss = torch.nn.BCELoss() self.model = Encoder(vocab_size = self.vocab_size, d_model = self.d_model, N = self.N, heads= self.attention_heads) self.model.train() self.optimizer = torch.optim.Adamax(self.model.parameters(), lr=self.learning_rate) self.num_params = sum(p.numel() for p in self.model.parameters()) logging.info("Number of parameters {}".format(self.num_params)) # for param_tensor in self.model.state_dict(): # logging.info(" ".join(str(x) for x in [param_tensor, "\t", self.model.state_dict()[param_tensor].size()])) current_loss = 0 loss = 1 stopping_iteration = 0 amax = 0 stopping = 0 top_state_dict = None g_amax = 0 for epoch in range(self.num_epochs): if stopping >= self.stopping_crit: logging.info("Stopping ..") break # here we put all the losses losses_per_batch = [] self.model.train() for i, (features, labels) in tqdm.tqdm(enumerate(train_dataset), total = len(train_dataset)): # defining the input features = features.to(self.device) labels = labels.to(self.device) self.model.to(self.device) outputs = self.model(features) ## if unable to predict predict random. loss = self.loss(outputs, labels.view(-1).cpu().float()) self.optimizer.zero_grad() loss.backward() self.optimizer.step() losses_per_batch.append(float(loss)) means_pred = self.predict(val_sequences, T = 30) assert len(means_pred) == len(val_targets) val_acc = 0 if adaptive_threshold: for threshold in np.arange(0.1,0.9,0.0005): ## Copy not to overwrite means = means_pred.copy() means[means >= threshold] = 1 means[means < threshold] = 0 if validation_metric == "accuracy": acc = metrics.accuracy_score(val_targets, means) else: acc = metrics.precision_score(val_targets, means) * metrics.accuracy_score(val_targets, means) if val_acc < acc: val_acc = acc if acc > amax: amax = acc self.probability_threshold = threshold self.threshold_perf_tuples.append([amax, threshold]) top_state_dict = self.model.state_dict() logging.info("New top Score: {}, thr: {}".format(amax, threshold)) else: threshold = 0.5 means = means_pred.copy() means[means >= threshold] = 1 means[means < threshold] = 0 acc = metrics.accuracy_score(val_targets, means) if val_acc < acc: val_acc = acc if acc > amax: amax = acc self.probability_threshold = threshold self.threshold_perf_tuples.append([amax, threshold]) top_state_dict = self.model.state_dict() logging.info("New top Acc: {}, thr: {}".format(amax, threshold)) if amax > val_acc: stopping += 1 mean_loss = np.mean(losses_per_batch) logging.info("epoch {}, mean loss per batch {}, threshold: {}, MaxScore: {}".format(epoch, mean_loss, np.round(self.probability_threshold,2), amax)) ## revert to the top-performing parameter setting. self.model.load_state_dict(top_state_dict) fop, mpv = calibration_curve(val_targets, means_pred, n_bins=10) plt.plot([0, 1], [0, 1], linestyle='--', color = "black") plt.plot(mpv, fop, marker='.', color = "red") plt.xlabel("Mean prediction value") plt.ylabel("Fraction of positives") plt.savefig(self.image_folder+"/training_cal_{}_{}_visualization.pdf".format(self.num_epochs, adaptive_threshold), dpi = 300) plt.clf() # Return model return self.model if __name__ == "__main__": # Read data from file import pandas as pd from sklearn.utils import shuffle from sklearn import metrics import numpy as np from sklearn.model_selection import StratifiedKFold import os from sklearn.datasets import fetch_20newsgroups newsgroups_train = fetch_20newsgroups(subset='train') ## some random data X = newsgroups_train['data'] Y = newsgroups_train['target'] print(len(X)) skf = StratifiedKFold(n_splits=5) final_scores = [] heads = 2 max_padding = 200 learning_rate = 0.01 num_epochs = 300 num_layers = 2 for train_index, test_index in skf.split(X, Y): nnet = BAN(heads = heads, max_padding = max_padding, learning_rate = learning_rate, num_epochs = num_epochs, batch_size = 8, N = num_layers, stopping_crit = 20) total_padded = nnet.pad_sequence(X) x_train = total_padded[train_index] x_test = total_padded[test_index] y_train = Y[train_index] y_test = Y[test_index] nnet.fit(x_train, y_train, adaptive_threshold = False, val_percentage = 0.1) predictions = nnet.predict(x_test) score = metrics.f1_score(predictions, y_test) final_scores.append(score) mean_per = np.mean(final_scores) std_per = np.std(final_scores) print("Final performance (F1): {mean_per} +- {std_per}")
35.957586
185
0.585881
88a59898c3adf079bc749d806b0bcd5596a4b2d8
16,974
py
Python
src/main/python/ui/jriver.py
bmiller/beqdesigner
36d0c780507a564536038e2c9fc3b03b75dedaf4
[ "MIT" ]
16
2019-04-12T00:04:56.000Z
2022-03-15T14:26:56.000Z
src/main/python/ui/jriver.py
bmiller/beqdesigner
36d0c780507a564536038e2c9fc3b03b75dedaf4
[ "MIT" ]
400
2018-08-27T10:04:00.000Z
2022-03-15T21:32:33.000Z
src/main/python/ui/jriver.py
bmiller/beqdesigner
36d0c780507a564536038e2c9fc3b03b75dedaf4
[ "MIT" ]
6
2018-09-19T21:02:27.000Z
2020-10-18T04:11:01.000Z
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'jriver.ui' # # Created by: PyQt5 UI code generator 5.15.2 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_jriverDspDialog(object): def setupUi(self, jriverDspDialog): jriverDspDialog.setObjectName("jriverDspDialog") jriverDspDialog.resize(1703, 858) self.dialogLayout = QtWidgets.QGridLayout(jriverDspDialog) self.dialogLayout.setObjectName("dialogLayout") self.mainLayout = QtWidgets.QVBoxLayout() self.mainLayout.setObjectName("mainLayout") self.configLayout = QtWidgets.QGridLayout() self.configLayout.setObjectName("configLayout") self.configRow1Layout = QtWidgets.QHBoxLayout() self.configRow1Layout.setObjectName("configRow1Layout") self.newConfigButton = QtWidgets.QToolButton(jriverDspDialog) self.newConfigButton.setObjectName("newConfigButton") self.configRow1Layout.addWidget(self.newConfigButton) self.findFilenameButton = QtWidgets.QToolButton(jriverDspDialog) self.findFilenameButton.setObjectName("findFilenameButton") self.configRow1Layout.addWidget(self.findFilenameButton) self.loadZoneButton = QtWidgets.QToolButton(jriverDspDialog) self.loadZoneButton.setObjectName("loadZoneButton") self.configRow1Layout.addWidget(self.loadZoneButton) self.filename = QtWidgets.QLineEdit(jriverDspDialog) self.filename.setReadOnly(True) self.filename.setObjectName("filename") self.configRow1Layout.addWidget(self.filename) self.saveButton = QtWidgets.QToolButton(jriverDspDialog) self.saveButton.setEnabled(False) self.saveButton.setObjectName("saveButton") self.configRow1Layout.addWidget(self.saveButton) self.saveAsButton = QtWidgets.QToolButton(jriverDspDialog) self.saveAsButton.setEnabled(False) self.saveAsButton.setObjectName("saveAsButton") self.configRow1Layout.addWidget(self.saveAsButton) self.uploadButton = QtWidgets.QToolButton(jriverDspDialog) self.uploadButton.setEnabled(False) self.uploadButton.setObjectName("uploadButton") self.configRow1Layout.addWidget(self.uploadButton) self.configLayout.addLayout(self.configRow1Layout, 0, 0, 1, 1) self.configRow2Layout = QtWidgets.QHBoxLayout() self.configRow2Layout.setObjectName("configRow2Layout") self.backButton = QtWidgets.QToolButton(jriverDspDialog) self.backButton.setEnabled(False) self.backButton.setObjectName("backButton") self.configRow2Layout.addWidget(self.backButton) self.forwardButton = QtWidgets.QToolButton(jriverDspDialog) self.forwardButton.setEnabled(False) self.forwardButton.setObjectName("forwardButton") self.configRow2Layout.addWidget(self.forwardButton) self.outputFormat = QtWidgets.QLineEdit(jriverDspDialog) self.outputFormat.setReadOnly(True) self.outputFormat.setObjectName("outputFormat") self.configRow2Layout.addWidget(self.outputFormat) self.blockSelector = QtWidgets.QComboBox(jriverDspDialog) self.blockSelector.setObjectName("blockSelector") self.configRow2Layout.addWidget(self.blockSelector) self.configRow2Layout.setStretch(2, 1) self.configRow2Layout.setStretch(3, 1) self.configLayout.addLayout(self.configRow2Layout, 1, 0, 1, 1) self.mainLayout.addLayout(self.configLayout) self.selectorLayout = QtWidgets.QGridLayout() self.selectorLayout.setObjectName("selectorLayout") self.moveButtonsLayout = QtWidgets.QVBoxLayout() self.moveButtonsLayout.setObjectName("moveButtonsLayout") self.moveTopButton = QtWidgets.QToolButton(jriverDspDialog) self.moveTopButton.setEnabled(False) self.moveTopButton.setObjectName("moveTopButton") self.moveButtonsLayout.addWidget(self.moveTopButton) self.moveUpButton = QtWidgets.QToolButton(jriverDspDialog) self.moveUpButton.setEnabled(False) self.moveUpButton.setObjectName("moveUpButton") self.moveButtonsLayout.addWidget(self.moveUpButton) spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.moveButtonsLayout.addItem(spacerItem) self.moveDownButton = QtWidgets.QToolButton(jriverDspDialog) self.moveDownButton.setEnabled(False) self.moveDownButton.setObjectName("moveDownButton") self.moveButtonsLayout.addWidget(self.moveDownButton) self.moveBottomButton = QtWidgets.QToolButton(jriverDspDialog) self.moveBottomButton.setEnabled(False) self.moveBottomButton.setObjectName("moveBottomButton") self.moveButtonsLayout.addWidget(self.moveBottomButton) self.selectorLayout.addLayout(self.moveButtonsLayout, 0, 0, 1, 1) self.filterButtonsLayout = QtWidgets.QVBoxLayout() self.filterButtonsLayout.setObjectName("filterButtonsLayout") self.addFilterButton = QtWidgets.QToolButton(jriverDspDialog) self.addFilterButton.setEnabled(False) self.addFilterButton.setPopupMode(QtWidgets.QToolButton.InstantPopup) self.addFilterButton.setObjectName("addFilterButton") self.filterButtonsLayout.addWidget(self.addFilterButton) self.editFilterButton = QtWidgets.QToolButton(jriverDspDialog) self.editFilterButton.setEnabled(False) self.editFilterButton.setObjectName("editFilterButton") self.filterButtonsLayout.addWidget(self.editFilterButton) self.deleteFilterButton = QtWidgets.QToolButton(jriverDspDialog) self.deleteFilterButton.setEnabled(False) self.deleteFilterButton.setObjectName("deleteFilterButton") self.filterButtonsLayout.addWidget(self.deleteFilterButton) self.clearFiltersButton = QtWidgets.QToolButton(jriverDspDialog) self.clearFiltersButton.setEnabled(False) self.clearFiltersButton.setObjectName("clearFiltersButton") self.filterButtonsLayout.addWidget(self.clearFiltersButton) self.splitFilterButton = QtWidgets.QToolButton(jriverDspDialog) self.splitFilterButton.setEnabled(False) self.splitFilterButton.setObjectName("splitFilterButton") self.filterButtonsLayout.addWidget(self.splitFilterButton) self.mergeFilterButton = QtWidgets.QToolButton(jriverDspDialog) self.mergeFilterButton.setEnabled(False) self.mergeFilterButton.setObjectName("mergeFilterButton") self.filterButtonsLayout.addWidget(self.mergeFilterButton) spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.filterButtonsLayout.addItem(spacerItem1) self.selectorLayout.addLayout(self.filterButtonsLayout, 0, 2, 1, 1) self.channelList = QtWidgets.QListWidget(jriverDspDialog) self.channelList.setProperty("showDropIndicator", False) self.channelList.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection) self.channelList.setObjectName("channelList") self.selectorLayout.addWidget(self.channelList, 1, 0, 1, 3) self.filterList = QtWidgets.QListWidget(jriverDspDialog) self.filterList.setProperty("showDropIndicator", False) self.filterList.setDragDropMode(QtWidgets.QAbstractItemView.NoDragDrop) self.filterList.setDefaultDropAction(QtCore.Qt.IgnoreAction) self.filterList.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection) self.filterList.setObjectName("filterList") self.selectorLayout.addWidget(self.filterList, 0, 1, 1, 1) self.selectorLayout.setRowStretch(0, 1) self.mainLayout.addLayout(self.selectorLayout) self.dialogLayout.addLayout(self.mainLayout, 0, 0, 3, 1) self.pipelineControlLayout = QtWidgets.QHBoxLayout() self.pipelineControlLayout.setObjectName("pipelineControlLayout") self.showDotButton = QtWidgets.QToolButton(jriverDspDialog) self.showDotButton.setEnabled(False) self.showDotButton.setObjectName("showDotButton") self.pipelineControlLayout.addWidget(self.showDotButton) self.direction = QtWidgets.QCheckBox(jriverDspDialog) self.direction.setEnabled(False) self.direction.setObjectName("direction") self.pipelineControlLayout.addWidget(self.direction) self.dialogLayout.addLayout(self.pipelineControlLayout, 0, 1, 1, 1) self.viewSplitter = QtWidgets.QSplitter(jriverDspDialog) self.viewSplitter.setLineWidth(1) self.viewSplitter.setOrientation(QtCore.Qt.Vertical) self.viewSplitter.setObjectName("viewSplitter") self.pipelineView = SvgView(self.viewSplitter) self.pipelineView.setObjectName("pipelineView") self.chartWrapper = QtWidgets.QWidget(self.viewSplitter) self.chartWrapper.setObjectName("chartWrapper") self.chartLayout = QtWidgets.QHBoxLayout(self.chartWrapper) self.chartLayout.setContentsMargins(0, 0, 0, 0) self.chartLayout.setObjectName("chartLayout") self.previewChart = MplWidget(self.chartWrapper) self.previewChart.setObjectName("previewChart") self.chartLayout.addWidget(self.previewChart) self.chartControlLayout = QtWidgets.QVBoxLayout() self.chartControlLayout.setObjectName("chartControlLayout") self.limitsButton = QtWidgets.QToolButton(self.chartWrapper) self.limitsButton.setObjectName("limitsButton") self.chartControlLayout.addWidget(self.limitsButton) self.fullRangeButton = QtWidgets.QToolButton(self.chartWrapper) self.fullRangeButton.setObjectName("fullRangeButton") self.chartControlLayout.addWidget(self.fullRangeButton) self.subOnlyButton = QtWidgets.QToolButton(self.chartWrapper) self.subOnlyButton.setObjectName("subOnlyButton") self.chartControlLayout.addWidget(self.subOnlyButton) self.showPhase = QtWidgets.QToolButton(self.chartWrapper) self.showPhase.setCheckable(True) self.showPhase.setObjectName("showPhase") self.chartControlLayout.addWidget(self.showPhase) self.showImpulseButton = QtWidgets.QToolButton(self.chartWrapper) self.showImpulseButton.setObjectName("showImpulseButton") self.chartControlLayout.addWidget(self.showImpulseButton) spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) self.chartControlLayout.addItem(spacerItem2) self.chartLayout.addLayout(self.chartControlLayout) self.chartLayout.setStretch(0, 1) self.dialogLayout.addWidget(self.viewSplitter, 1, 1, 1, 1) self.buttonBox = QtWidgets.QDialogButtonBox(jriverDspDialog) self.buttonBox.setOrientation(QtCore.Qt.Horizontal) self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Close|QtWidgets.QDialogButtonBox.Reset) self.buttonBox.setCenterButtons(False) self.buttonBox.setObjectName("buttonBox") self.dialogLayout.addWidget(self.buttonBox, 3, 0, 1, 2) self.dialogLayout.setColumnStretch(0, 1) self.dialogLayout.setColumnStretch(1, 3) self.retranslateUi(jriverDspDialog) self.buttonBox.accepted.connect(jriverDspDialog.accept) self.buttonBox.rejected.connect(jriverDspDialog.reject) self.findFilenameButton.clicked.connect(jriverDspDialog.find_dsp_file) self.showPhase.toggled['bool'].connect(jriverDspDialog.show_phase_response) self.subOnlyButton.clicked.connect(jriverDspDialog.show_sub_only) self.fullRangeButton.clicked.connect(jriverDspDialog.show_full_range) self.limitsButton.clicked.connect(jriverDspDialog.show_limits) self.blockSelector.currentTextChanged['QString'].connect(jriverDspDialog.show_filters) self.channelList.itemSelectionChanged.connect(jriverDspDialog.redraw) self.saveButton.clicked.connect(jriverDspDialog.save_dsp) self.saveAsButton.clicked.connect(jriverDspDialog.save_as_dsp) self.deleteFilterButton.clicked.connect(jriverDspDialog.delete_filter) self.clearFiltersButton.clicked.connect(jriverDspDialog.clear_filters) self.filterList.itemSelectionChanged.connect(jriverDspDialog.on_filter_select) self.filterList.itemDoubleClicked['QListWidgetItem*'].connect(jriverDspDialog.edit_filter) self.editFilterButton.clicked.connect(jriverDspDialog.edit_selected_filter) self.splitFilterButton.clicked.connect(jriverDspDialog.split_filter) self.mergeFilterButton.clicked.connect(jriverDspDialog.merge_filters) self.moveBottomButton.clicked.connect(jriverDspDialog.move_filter_to_bottom) self.moveDownButton.clicked.connect(jriverDspDialog.move_filter_down) self.moveUpButton.clicked.connect(jriverDspDialog.move_filter_up) self.moveTopButton.clicked.connect(jriverDspDialog.move_filter_to_top) self.newConfigButton.clicked.connect(jriverDspDialog.create_new_config) self.showImpulseButton.clicked.connect(jriverDspDialog.show_impulse) QtCore.QMetaObject.connectSlotsByName(jriverDspDialog) jriverDspDialog.setTabOrder(self.findFilenameButton, self.limitsButton) jriverDspDialog.setTabOrder(self.limitsButton, self.fullRangeButton) jriverDspDialog.setTabOrder(self.fullRangeButton, self.subOnlyButton) jriverDspDialog.setTabOrder(self.subOnlyButton, self.showPhase) jriverDspDialog.setTabOrder(self.showPhase, self.showDotButton) jriverDspDialog.setTabOrder(self.showDotButton, self.direction) jriverDspDialog.setTabOrder(self.direction, self.pipelineView) jriverDspDialog.setTabOrder(self.pipelineView, self.previewChart) jriverDspDialog.setTabOrder(self.previewChart, self.filterList) jriverDspDialog.setTabOrder(self.filterList, self.channelList) def retranslateUi(self, jriverDspDialog): _translate = QtCore.QCoreApplication.translate jriverDspDialog.setWindowTitle(_translate("jriverDspDialog", "JRiver Media Center DSP Editor")) self.newConfigButton.setText(_translate("jriverDspDialog", "...")) self.findFilenameButton.setText(_translate("jriverDspDialog", "...")) self.loadZoneButton.setText(_translate("jriverDspDialog", "...")) self.saveButton.setText(_translate("jriverDspDialog", "...")) self.saveAsButton.setText(_translate("jriverDspDialog", "...")) self.uploadButton.setText(_translate("jriverDspDialog", "...")) self.backButton.setText(_translate("jriverDspDialog", "...")) self.forwardButton.setText(_translate("jriverDspDialog", "...")) self.moveTopButton.setText(_translate("jriverDspDialog", "...")) self.moveTopButton.setShortcut(_translate("jriverDspDialog", "Shift+Up")) self.moveUpButton.setText(_translate("jriverDspDialog", "...")) self.moveUpButton.setShortcut(_translate("jriverDspDialog", "Ctrl+Up")) self.moveDownButton.setText(_translate("jriverDspDialog", "...")) self.moveDownButton.setShortcut(_translate("jriverDspDialog", "Ctrl+Down")) self.moveBottomButton.setText(_translate("jriverDspDialog", "...")) self.moveBottomButton.setShortcut(_translate("jriverDspDialog", "Shift+Down")) self.addFilterButton.setText(_translate("jriverDspDialog", "...")) self.addFilterButton.setShortcut(_translate("jriverDspDialog", "=")) self.editFilterButton.setText(_translate("jriverDspDialog", "...")) self.editFilterButton.setShortcut(_translate("jriverDspDialog", "E")) self.deleteFilterButton.setText(_translate("jriverDspDialog", "...")) self.deleteFilterButton.setShortcut(_translate("jriverDspDialog", "-")) self.clearFiltersButton.setText(_translate("jriverDspDialog", "...")) self.clearFiltersButton.setShortcut(_translate("jriverDspDialog", "X")) self.splitFilterButton.setText(_translate("jriverDspDialog", "...")) self.mergeFilterButton.setText(_translate("jriverDspDialog", "...")) self.showDotButton.setText(_translate("jriverDspDialog", "...")) self.direction.setText(_translate("jriverDspDialog", "Vertical?")) self.limitsButton.setText(_translate("jriverDspDialog", "...")) self.fullRangeButton.setText(_translate("jriverDspDialog", "...")) self.subOnlyButton.setText(_translate("jriverDspDialog", "...")) self.showPhase.setText(_translate("jriverDspDialog", "...")) self.showImpulseButton.setText(_translate("jriverDspDialog", "...")) from mpl import MplWidget from svg import SvgView
62.175824
115
0.747143
88a8aa3a3b09f7b8f22914184124db2a1414e747
320
py
Python
src/sensors/__init__.py
ivanbukhtiyarov/elevators
e7ff582bbc9a26d22880bec61bede747427430c2
[ "MIT" ]
2
2021-03-22T16:12:56.000Z
2021-03-22T16:19:09.000Z
src/sensors/__init__.py
ivanbukhtiyarov/elevators
e7ff582bbc9a26d22880bec61bede747427430c2
[ "MIT" ]
46
2021-04-01T10:25:25.000Z
2021-12-26T23:43:46.000Z
src/sensors/__init__.py
ivanbukhtiyarov/elevators
e7ff582bbc9a26d22880bec61bede747427430c2
[ "MIT" ]
4
2021-04-01T10:22:46.000Z
2021-12-26T21:51:10.000Z
from src.sensors.door_block_sensor import DoorBlockSensor from src.sensors.door_state_sensor import DoorStateSensor from src.sensors.light_sensor import LightSensor from src.sensors.movement_sensor import MovementSensor from src.sensors.smoke_sensor import SmokeSensor from src.sensors.weight_sensor import WeightSensor
45.714286
57
0.8875
88a9377893db4fc2f5048d2336cea72ff934579e
888
py
Python
code/animation/sine-cosine.py
geo7/scientific-visualization-book
71f6bac4db7ee2f26e88052fe7faa800303d8b00
[ "BSD-2-Clause" ]
2
2021-11-17T15:10:09.000Z
2021-12-24T13:31:10.000Z
code/animation/sine-cosine.py
WuShichao/scientific-visualization-book
389766215aa6b234ed1cf560a3768437d41d1d37
[ "BSD-2-Clause" ]
1
2021-12-12T11:37:48.000Z
2021-12-12T11:39:00.000Z
code/animation/sine-cosine.py
WuShichao/scientific-visualization-book
389766215aa6b234ed1cf560a3768437d41d1d37
[ "BSD-2-Clause" ]
2
2021-12-30T12:20:07.000Z
2022-02-24T06:36:41.000Z
# ---------------------------------------------------------------------------- # Title: Scientific Visualisation - Python & Matplotlib # Author: Nicolas P. Rougier # License: BSD # ---------------------------------------------------------------------------- import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation fig = plt.figure(figsize=(7, 2)) ax = plt.subplot() X = np.linspace(-np.pi, np.pi, 256, endpoint=True) C, S = np.cos(X), np.sin(X) (line1,) = ax.plot(X, C, marker="o", markevery=[-1], markeredgecolor="white") (line2,) = ax.plot(X, S, marker="o", markevery=[-1], markeredgecolor="white") def update(frame): line1.set_data(X[:frame], C[:frame]) line2.set_data(X[:frame], S[:frame]) plt.tight_layout() ani = animation.FuncAnimation(fig, update, interval=10) plt.savefig("../../figures/animation/sine-cosine.pdf") plt.show()
31.714286
78
0.566441
88ab2de7a369fd311ec763905e71a9bc7d4f2e49
2,773
py
Python
main.py
ghostcodekc/leagueoflegends-block-chat
0d68345964344410159d834cba81da4224196f87
[ "MIT" ]
null
null
null
main.py
ghostcodekc/leagueoflegends-block-chat
0d68345964344410159d834cba81da4224196f87
[ "MIT" ]
null
null
null
main.py
ghostcodekc/leagueoflegends-block-chat
0d68345964344410159d834cba81da4224196f87
[ "MIT" ]
null
null
null
import yaml import socket import subprocess, ctypes, os, sys from subprocess import Popen, DEVNULL def read_yaml(file_path): with open(file_path, "r") as f: return yaml.safe_load(f) def check_admin(): """ Force to start application with admin rights """ try: isAdmin = ctypes.windll.shell32.IsUserAnAdmin() except AttributeError: isAdmin = False if not isAdmin: ctypes.windll.shell32.ShellExecuteW(None, "runas", sys.executable, __file__, None, 1) def check_for_firewall_rule(firewall_rule_name): """ Check for existing rule in Windows Firewall """ print("Checking to see if firewall rule exists") x = subprocess.call( f"netsh advfirewall firewall show rule {firewall_rule_name}", shell=True, stdout=DEVNULL, stderr=DEVNULL ) if x == 0: print(F"Rule exists.") return True else: print(F"Rule does not exist.") return False def add_or_modify_rule(firewall_rule_name, state, firewall_exists, ip): """ Add Rule if the rule doesn't already exist. Delete the rule if the rule exists. """ if firewall_exists and state == 1: delete_rule(firewall_rule_name) add_rule(firewall_rule_name, ip) if firewall_exists and state == 0: delete_rule(firewall_rule_name) if not firewall_exists and state == 1: add_rule(firewall_rule_name, ip) if not firewall_exists and state == 0: print("Firewall rule does not exist, and `block chat` is set to disabled") def delete_rule(firewall_rule_name): subprocess.call( f"netsh advfirewall firewall delete rule name={firewall_rule_name}", shell=True, stdout=DEVNULL, stderr=DEVNULL ) print(f"Rule '{firewall_rule_name}' deleted") def add_rule(firewall_rule_name, ip): """ Add rule to Windows Firewall """ subprocess.call( f"netsh advfirewall firewall add rule name={firewall_rule_name} dir=out action=block remoteip={ip} protocol=TCP", shell=True, stdout=DEVNULL, stderr=DEVNULL ) print(f"Current League of Legends Chat IP Address: {ip}. \nRule {firewall_rule_name} added. ") if __name__ == '__main__': config = read_yaml(".\config.yaml") state = config['config']['block_chat'] firewall_rule_name = config['config']['firewall_rule_name'] lol_config_file = config['config']['dir'] region = config['config']['region'] lol_config = read_yaml(lol_config_file) host = lol_config['region_data'][region]['servers']['chat']['chat_host'] ip = socket.gethostbyname(host) check_admin() firewall_exists = check_for_firewall_rule(firewall_rule_name) add_or_modify_rule(firewall_rule_name, state, firewall_exists, ip)
36.012987
121
0.679769
88ac260681c50b787cb8306fb30da9bc778c277f
5,623
py
Python
src/Leorio/tokenization.py
majiajue/Listed-company-news-crawl-and-text-analysis
fd3b23814039cbe8fbb2e25cbadb68238e0d998b
[ "MIT" ]
635
2018-02-25T08:45:06.000Z
2022-03-30T10:05:23.000Z
src/Leorio/tokenization.py
NongMaYiSheng/Listed-company-news-crawl-and-text-analysis
fd3b23814039cbe8fbb2e25cbadb68238e0d998b
[ "MIT" ]
5
2018-10-29T16:21:28.000Z
2022-01-03T12:59:28.000Z
src/Leorio/tokenization.py
NongMaYiSheng/Listed-company-news-crawl-and-text-analysis
fd3b23814039cbe8fbb2e25cbadb68238e0d998b
[ "MIT" ]
216
2018-02-26T09:27:15.000Z
2022-03-30T10:05:26.000Z
import __init__ from Kite.database import Database from Kite import config from Kite import utils import jieba import pkuseg import logging logging.basicConfig(level=logging.INFO, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S') class Tokenization(object): def __init__(self, import_module="jieba", user_dict=None, chn_stop_words_dir=None): #self.database = Database().conn[config.DATABASE_NAME] #.get_collection(config.COLLECTION_NAME_CNSTOCK) self.database = Database() self.import_module = import_module self.user_dict = user_dict if self.user_dict: self.update_user_dict(self.user_dict) if chn_stop_words_dir: self.stop_words_list = utils.get_chn_stop_words(chn_stop_words_dir) else: self.stop_words_list = list() def update_user_dict(self, old_user_dict_dir, new_user_dict_dir=None): # 将缺失的(或新的)股票名称、金融新词等,添加进金融词典中 word_list = [] with open(old_user_dict_dir, "r", encoding="utf-8") as file: for row in file: word_list.append(row.split("\n")[0]) name_code_df = self.database.get_data(config.STOCK_DATABASE_NAME, config.COLLECTION_NAME_STOCK_BASIC_INFO, keys=["name", "code"]) new_words_list = list(set(name_code_df["name"].tolist())) for word in new_words_list: if word not in word_list: word_list.append(word) new_user_dict_dir = old_user_dict_dir if not new_user_dict_dir else new_user_dict_dir with open(new_user_dict_dir, "w", encoding="utf-8") as file: for word in word_list: file.write(word + "\n") def cut_words(self, text): outstr = list() sentence_seged = None if self.import_module == "jieba": if self.user_dict: jieba.load_userdict(self.user_dict) sentence_seged = list(jieba.cut(text)) elif self.import_module == "pkuseg": seg = pkuseg.pkuseg(user_dict=self.user_dict) # 添加自定义词典 sentence_seged = seg.cut(text) # 进行分词 if sentence_seged: for word in sentence_seged: if word not in self.stop_words_list \ and word != "\t" \ and word != " " \ and utils.is_contain_chn(word)\ and len(word) > 1: outstr.append(word) return outstr else: return False def find_relevant_stock_codes_in_article(self, article, stock_name_code_dict): stock_codes_set = list() cut_words_list = self.cut_words(article) if cut_words_list: for word in cut_words_list: try: stock_codes_set.append(stock_name_code_dict[word]) except Exception: pass return list(set(stock_codes_set)) def update_news_database_rows(self, database_name, collection_name, incremental_column_name="RelatedStockCodes"): name_code_df = self.database.get_data(config.STOCK_DATABASE_NAME, config.COLLECTION_NAME_STOCK_BASIC_INFO, keys=["name", "code"]) name_code_dict = dict(name_code_df.values) data = self.database.get_collection(database_name, collection_name).find() for row in data: # if row["Date"] > "2019-05-20 00:00:00": # 在新增数据中,并不存在更新列,但是旧数据中已存在更新列,因此需要 # 判断数据结构中是否包含该incremental_column_name字段 if incremental_column_name not in row.keys(): related_stock_codes_list = self.find_relevant_stock_codes_in_article( row["Article"], name_code_dict) self.database.update_row(database_name, collection_name, {"_id": row["_id"]}, {incremental_column_name: " ".join(related_stock_codes_list)} ) logging.info("[{} -> {} -> {}] updated {} key value ... " .format(database_name, collection_name, row["Date"], incremental_column_name)) else: logging.info("[{} -> {} -> {}] has already existed {} key value ... " .format(database_name, collection_name, row["Date"], incremental_column_name)) if __name__ == "__main__": tokenization = Tokenization(import_module="jieba", user_dict="financedict.txt", chn_stop_words_dir="chnstopwords.txt") # documents_list = \ # [ # "中央、地方支持政策频出,煤炭行业站上了风口 券商研报浩如烟海,投资线索眼花缭乱,\ # 第一财经推出《一财研选》产品,挖掘研报精华,每期梳理5条投资线索,便于您短时间内获\ # 取有价值的信息。专业团队每周日至每周四晚8点准时“上新”,助您投资顺利!", # "郭文仓到重点工程项目督导检查 2月2日,公司党委书记、董事长、总经理郭文仓,公司董事,\ # 股份公司副总经理、总工程师、郭毅民,股份公司副总经理张国富、柴高贵及相关单位负责人到\ # 焦化厂煤场全封闭和干熄焦等重点工程项目建设工地督导检查施工进度和安全工作情况。" # ] # for text in documents_list: # cut_words_list = tokenization.cut_words(text) # print(cut_words_list) # tokenization.update_news_database_rows(config.DATABASE_NAME, "jrj")
44.626984
112
0.560377
88ade7f8dfd3c3fdb9f4bfa3e09536d509c88764
2,659
py
Python
Server/app/docs/signup.py
Sporrow/Sporrow-Backend
a711f8a25c0b6fdbbeff0a980fbf39a470020e23
[ "Apache-2.0" ]
null
null
null
Server/app/docs/signup.py
Sporrow/Sporrow-Backend
a711f8a25c0b6fdbbeff0a980fbf39a470020e23
[ "Apache-2.0" ]
null
null
null
Server/app/docs/signup.py
Sporrow/Sporrow-Backend
a711f8a25c0b6fdbbeff0a980fbf39a470020e23
[ "Apache-2.0" ]
null
null
null
from app.docs import SAMPLE_OBJECT_IDS ID_DUPLICATION_CHECK_GET = { 'tags': ['회원가입'], 'description': '이메일이 이미 가입되었는지를 체크(중복체크)합니다.', 'parameters': [ { 'name': 'email', 'description': '중복을 체크할 이메일', 'in': 'path', 'type': 'str', 'required': True } ], 'responses': { '200': { 'description': '중복되지 않음', }, '409': { 'description': '중복됨' } } } SIGNUP_POST = { 'tags': ['회원가입'], 'description': '회원가입합니다.', 'parameters': [ { 'name': 'email', 'description': '이메일', 'in': 'json', 'type': 'str', 'required': True }, { 'name': 'pw', 'description': '비밀번호', 'in': 'json', 'type': 'str', 'required': True } ], 'responses': { '201': { 'description': '회원가입 성공, 인증 이메일 발송 완료. 기본 정보 초기화 액티비티로 이동하면 됩니다. 인증 이메일의 유효 시간은 5분입니다.', }, '409': { 'description': '이메일 중복됨' } } } EMAIL_RESEND_GET = { 'tags': ['회원가입'], 'description': '인증 메일을 재전송합니다.', 'parameters': [ { 'name': 'email', 'description': '인증 메일을 재전송할 이메일', 'in': 'path', 'type': 'str', 'required': True } ], 'responses': { '200': { 'description': '이메일 재전송 성공', }, '204': { 'description': '가입되지 않은 이메일' } } } INITIALIZE_INFO_POST = { 'tags': ['회원가입'], 'description': '기본 정보를 업로드합니다.', 'parameters': [ { 'name': 'email', 'description': '기본 정보 업로드 대상 이메일', 'in': 'path', 'type': 'str', 'required': True }, { 'name': 'nickname', 'description': '닉네임', 'in': 'json', 'type': 'str', 'required': True }, { 'name': 'categories', 'description': '관심사 ID 목록 ex) ["{}"], ["{}"], ["{}"]'.format(*SAMPLE_OBJECT_IDS), 'in': 'json', 'type': 'list', 'required': True } ], 'responses': { '201': { 'description': '업로드 성공', }, '204': { 'description': '가입되지 않은 이메일' }, '400': { 'description': '관심사 ID 중 존재하지 않는 관심사가 존재함' }, '401': { 'description': '이메일 인증되지 않음' }, '409': { 'description': '닉네임이 중복됨' } } }
22.158333
100
0.371944
88afd99ff229803940fa1debc0013f26c15e67cf
366
py
Python
workflow/config/add_path.py
ForestMars/Coda.to
55e99a8fb1867738e0bb2292461fa2bf3a7770f7
[ "DOC", "MIT" ]
null
null
null
workflow/config/add_path.py
ForestMars/Coda.to
55e99a8fb1867738e0bb2292461fa2bf3a7770f7
[ "DOC", "MIT" ]
null
null
null
workflow/config/add_path.py
ForestMars/Coda.to
55e99a8fb1867738e0bb2292461fa2bf3a7770f7
[ "DOC", "MIT" ]
null
null
null
import sys class add_path(): def __init__(self, path): self.path = path def __enter__(self): sys.path.insert(0, self.path) def __exit__(self, exc_type, exc_value, traceback): try: sys.path.remove(self.path) except ValueError: pass """ Remove """ def add_test(): return "add-test-success"
17.428571
55
0.581967
88b0a8ec143545a862b4e668eb425512ef141839
295
py
Python
incal_lib/exportres.py
barel-mishal/InCal_lib
3aa63ebccf2ed3277fac55049c88178541cbb94b
[ "MIT" ]
null
null
null
incal_lib/exportres.py
barel-mishal/InCal_lib
3aa63ebccf2ed3277fac55049c88178541cbb94b
[ "MIT" ]
null
null
null
incal_lib/exportres.py
barel-mishal/InCal_lib
3aa63ebccf2ed3277fac55049c88178541cbb94b
[ "MIT" ]
null
null
null
import numpy as np import pandas as pd from collections import OrderedDict, Counter import itertools from typing import * Group = tuple[str, list[int]] Groups = list[Group] def df_groups(groups: Groups) -> pd.DataFrame: return pd.DataFrame(dict_groups.values(), index=dict_groups.keys())
22.692308
71
0.762712
88b1ab4a72c456e8f8edbf2cf4dc0a0cd36b09d4
517
py
Python
my_lambdata/my_mod.py
tatianaportsova/Lambdata_12
4cab1dc4f65d479b8f2919155c4bb6b58243d8db
[ "MIT" ]
null
null
null
my_lambdata/my_mod.py
tatianaportsova/Lambdata_12
4cab1dc4f65d479b8f2919155c4bb6b58243d8db
[ "MIT" ]
null
null
null
my_lambdata/my_mod.py
tatianaportsova/Lambdata_12
4cab1dc4f65d479b8f2919155c4bb6b58243d8db
[ "MIT" ]
null
null
null
# my_lambdata/my_mod.py def enlarge(n): """ Param n is a number Function will enlarge the number """ return n * 100 # this code breakes our ability to omport enlarge from other files # print("HELLO") # y = int(input("Please choose a number")) # print(y, enlarge(y)) if __name__ == "__main__": # only runs the code IF script is invoked from the command-line # not if it is imported from another print("HELLO") y = int(input("Please choose a number")) print(y, enlarge(y))
21.541667
67
0.651838
88b1b8b8cd2dc825c7e83bc1234dcf21c35cee9e
2,406
py
Python
allhub/users/users.py
srinivasreddy/allhub
ff20858c9984da5c4edd5043c39eed3b6d5d693d
[ "Apache-2.0" ]
2
2019-10-07T15:46:33.000Z
2019-11-26T04:30:39.000Z
allhub/users/users.py
srinivasreddy/allhub
ff20858c9984da5c4edd5043c39eed3b6d5d693d
[ "Apache-2.0" ]
1
2020-03-09T14:44:04.000Z
2020-03-09T14:44:04.000Z
allhub/users/users.py
srinivasreddy/allhub
ff20858c9984da5c4edd5043c39eed3b6d5d693d
[ "Apache-2.0" ]
2
2019-10-08T05:22:37.000Z
2019-10-08T06:20:47.000Z
from allhub.response import Response from enum import Enum class SubjectType(Enum): ORGANIZATION = "organization" REPOSITORY = "repository" ISSUE = "issue" PULL_REQUEST = "pull_request" NONE = None class UsersMixin: def user(self, username): """ Provides publicly available information about someone with a GitHub account. :param username: :return: """ url = "/user/{username}".format(username=username) self.response = Response(self.get(url), "User") return self.response.transform() def auth_user(self): """ Get the authenticated user :return: """ url = "/user" self.response = Response(self.get(url), "User") return self.response.transform() def update_auth_user(self, **kwargs): params = [] for attribute in ( "name", "email", "blog", "company", "location", "hireable", "bio", ): if attribute in kwargs: params.append((attribute, kwargs.pop(attribute))) if params: url = "/user" self.response = Response(self.patch(url, params=params), "User") return self.response.transform() def hover_card(self, username, subject_tye=SubjectType.NONE, subject_id=None): if bool(subject_tye.value) != bool(subject_id): # Python shortcut for XOR. raise ValueError( "subject_type and subject_id both should provided or both left out" ) params = [] if subject_id and subject_tye.value: params = [("subject_type", subject_tye.value), ("subject_id", subject_id)] url = "/users/{username}/hovercard".format(username=username) self.response = Response( self.get( url, params=params, **{"Accept": "application/vnd.github.hagar-preview+json"}, ), "HoverCard", ) return self.response.transform() def users(self, since): # TODO: Looks like this API is not working currently. # As of 19-Sep-2019. url = "/users" params = [("since", since)] self.response = Response(self.patch(url, params=params), "Users") return self.response.transform()
30.846154
86
0.558188
88b1bbc1e2f422bb622061ec6f6d2f4ceb966b0b
22,360
bzl
Python
third_party/boringssl-with-bazel/BUILD.generated_tests.bzl
miyachu/grpc
a06ea3c3162c10ff90a1578bf82bbbff95dc799d
[ "BSD-3-Clause" ]
91
2018-11-24T05:33:58.000Z
2022-03-16T05:58:05.000Z
third_party/boringssl-with-bazel/BUILD.generated_tests.bzl
miyachu/grpc
a06ea3c3162c10ff90a1578bf82bbbff95dc799d
[ "BSD-3-Clause" ]
11
2019-06-02T23:50:17.000Z
2022-02-04T23:58:56.000Z
third_party/boringssl-with-bazel/BUILD.generated_tests.bzl
miyachu/grpc
a06ea3c3162c10ff90a1578bf82bbbff95dc799d
[ "BSD-3-Clause" ]
18
2018-11-24T10:35:29.000Z
2021-04-22T07:22:10.000Z
# This file is created by generate_build_files.py. Do not edit manually. test_support_sources = [ "src/crypto/aes/internal.h", "src/crypto/asn1/asn1_locl.h", "src/crypto/bio/internal.h", "src/crypto/bn/internal.h", "src/crypto/bn/rsaz_exp.h", "src/crypto/bytestring/internal.h", "src/crypto/cipher/internal.h", "src/crypto/conf/conf_def.h", "src/crypto/conf/internal.h", "src/crypto/curve25519/internal.h", "src/crypto/des/internal.h", "src/crypto/digest/internal.h", "src/crypto/digest/md32_common.h", "src/crypto/ec/internal.h", "src/crypto/ec/p256-x86_64-table.h", "src/crypto/ec/p256-x86_64.h", "src/crypto/evp/internal.h", "src/crypto/internal.h", "src/crypto/modes/internal.h", "src/crypto/obj/obj_dat.h", "src/crypto/pkcs8/internal.h", "src/crypto/poly1305/internal.h", "src/crypto/pool/internal.h", "src/crypto/rand/internal.h", "src/crypto/rsa/internal.h", "src/crypto/test/file_test.cc", "src/crypto/test/file_test.h", "src/crypto/test/test_util.cc", "src/crypto/test/test_util.h", "src/crypto/x509/charmap.h", "src/crypto/x509/internal.h", "src/crypto/x509/vpm_int.h", "src/crypto/x509v3/ext_dat.h", "src/crypto/x509v3/pcy_int.h", "src/ssl/internal.h", "src/ssl/test/async_bio.h", "src/ssl/test/packeted_bio.h", "src/ssl/test/test_config.h", ] def create_tests(copts, crypto, ssl): native.cc_test( name = "aes_test", size = "small", srcs = ["src/crypto/aes/aes_test.cc"] + test_support_sources, args = [ "$(location src/crypto/aes/aes_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/aes/aes_tests.txt", ], deps = [crypto], ) native.cc_test( name = "asn1_test", size = "small", srcs = ["src/crypto/asn1/asn1_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "base64_test", size = "small", srcs = ["src/crypto/base64/base64_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "bio_test", size = "small", srcs = ["src/crypto/bio/bio_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "bn_test", size = "small", srcs = ["src/crypto/bn/bn_test.cc"] + test_support_sources, args = [ "$(location src/crypto/bn/bn_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/bn/bn_tests.txt", ], deps = [crypto], ) native.cc_test( name = "bytestring_test", size = "small", srcs = ["src/crypto/bytestring/bytestring_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "chacha_test", size = "small", srcs = ["src/crypto/chacha/chacha_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "aead_test_aes_128_gcm", size = "small", srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources, args = [ "aes-128-gcm", "$(location src/crypto/cipher/test/aes_128_gcm_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/cipher/test/aes_128_gcm_tests.txt", ], deps = [crypto], ) native.cc_test( name = "aead_test_aes_256_gcm", size = "small", srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources, args = [ "aes-256-gcm", "$(location src/crypto/cipher/test/aes_256_gcm_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/cipher/test/aes_256_gcm_tests.txt", ], deps = [crypto], ) native.cc_test( name = "aead_test_aes_128_gcm_siv", size = "small", srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources, args = [ "aes-128-gcm-siv", "$(location src/crypto/cipher/test/aes_128_gcm_siv_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/cipher/test/aes_128_gcm_siv_tests.txt", ], deps = [crypto], ) native.cc_test( name = "aead_test_aes_256_gcm_siv", size = "small", srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources, args = [ "aes-256-gcm-siv", "$(location src/crypto/cipher/test/aes_256_gcm_siv_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/cipher/test/aes_256_gcm_siv_tests.txt", ], deps = [crypto], ) native.cc_test( name = "aead_test_chacha20_poly1305", size = "small", srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources, args = [ "chacha20-poly1305", "$(location src/crypto/cipher/test/chacha20_poly1305_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/cipher/test/chacha20_poly1305_tests.txt", ], deps = [crypto], ) native.cc_test( name = "aead_test_chacha20_poly1305_old", size = "small", srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources, args = [ "chacha20-poly1305-old", "$(location src/crypto/cipher/test/chacha20_poly1305_old_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/cipher/test/chacha20_poly1305_old_tests.txt", ], deps = [crypto], ) native.cc_test( name = "aead_test_aes_128_cbc_sha1_tls", size = "small", srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources, args = [ "aes-128-cbc-sha1-tls", "$(location src/crypto/cipher/test/aes_128_cbc_sha1_tls_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/cipher/test/aes_128_cbc_sha1_tls_tests.txt", ], deps = [crypto], ) native.cc_test( name = "aead_test_aes_128_cbc_sha1_tls_implicit_iv", size = "small", srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources, args = [ "aes-128-cbc-sha1-tls-implicit-iv", "$(location src/crypto/cipher/test/aes_128_cbc_sha1_tls_implicit_iv_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/cipher/test/aes_128_cbc_sha1_tls_implicit_iv_tests.txt", ], deps = [crypto], ) native.cc_test( name = "aead_test_aes_128_cbc_sha256_tls", size = "small", srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources, args = [ "aes-128-cbc-sha256-tls", "$(location src/crypto/cipher/test/aes_128_cbc_sha256_tls_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/cipher/test/aes_128_cbc_sha256_tls_tests.txt", ], deps = [crypto], ) native.cc_test( name = "aead_test_aes_256_cbc_sha1_tls", size = "small", srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources, args = [ "aes-256-cbc-sha1-tls", "$(location src/crypto/cipher/test/aes_256_cbc_sha1_tls_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/cipher/test/aes_256_cbc_sha1_tls_tests.txt", ], deps = [crypto], ) native.cc_test( name = "aead_test_aes_256_cbc_sha1_tls_implicit_iv", size = "small", srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources, args = [ "aes-256-cbc-sha1-tls-implicit-iv", "$(location src/crypto/cipher/test/aes_256_cbc_sha1_tls_implicit_iv_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/cipher/test/aes_256_cbc_sha1_tls_implicit_iv_tests.txt", ], deps = [crypto], ) native.cc_test( name = "aead_test_aes_256_cbc_sha256_tls", size = "small", srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources, args = [ "aes-256-cbc-sha256-tls", "$(location src/crypto/cipher/test/aes_256_cbc_sha256_tls_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/cipher/test/aes_256_cbc_sha256_tls_tests.txt", ], deps = [crypto], ) native.cc_test( name = "aead_test_aes_256_cbc_sha384_tls", size = "small", srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources, args = [ "aes-256-cbc-sha384-tls", "$(location src/crypto/cipher/test/aes_256_cbc_sha384_tls_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/cipher/test/aes_256_cbc_sha384_tls_tests.txt", ], deps = [crypto], ) native.cc_test( name = "aead_test_des_ede3_cbc_sha1_tls", size = "small", srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources, args = [ "des-ede3-cbc-sha1-tls", "$(location src/crypto/cipher/test/des_ede3_cbc_sha1_tls_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/cipher/test/des_ede3_cbc_sha1_tls_tests.txt", ], deps = [crypto], ) native.cc_test( name = "aead_test_des_ede3_cbc_sha1_tls_implicit_iv", size = "small", srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources, args = [ "des-ede3-cbc-sha1-tls-implicit-iv", "$(location src/crypto/cipher/test/des_ede3_cbc_sha1_tls_implicit_iv_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/cipher/test/des_ede3_cbc_sha1_tls_implicit_iv_tests.txt", ], deps = [crypto], ) native.cc_test( name = "aead_test_aes_128_cbc_sha1_ssl3", size = "small", srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources, args = [ "aes-128-cbc-sha1-ssl3", "$(location src/crypto/cipher/test/aes_128_cbc_sha1_ssl3_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/cipher/test/aes_128_cbc_sha1_ssl3_tests.txt", ], deps = [crypto], ) native.cc_test( name = "aead_test_aes_256_cbc_sha1_ssl3", size = "small", srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources, args = [ "aes-256-cbc-sha1-ssl3", "$(location src/crypto/cipher/test/aes_256_cbc_sha1_ssl3_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/cipher/test/aes_256_cbc_sha1_ssl3_tests.txt", ], deps = [crypto], ) native.cc_test( name = "aead_test_des_ede3_cbc_sha1_ssl3", size = "small", srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources, args = [ "des-ede3-cbc-sha1-ssl3", "$(location src/crypto/cipher/test/des_ede3_cbc_sha1_ssl3_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/cipher/test/des_ede3_cbc_sha1_ssl3_tests.txt", ], deps = [crypto], ) native.cc_test( name = "aead_test_aes_128_ctr_hmac_sha256", size = "small", srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources, args = [ "aes-128-ctr-hmac-sha256", "$(location src/crypto/cipher/test/aes_128_ctr_hmac_sha256.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/cipher/test/aes_128_ctr_hmac_sha256.txt", ], deps = [crypto], ) native.cc_test( name = "aead_test_aes_256_ctr_hmac_sha256", size = "small", srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources, args = [ "aes-256-ctr-hmac-sha256", "$(location src/crypto/cipher/test/aes_256_ctr_hmac_sha256.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/cipher/test/aes_256_ctr_hmac_sha256.txt", ], deps = [crypto], ) native.cc_test( name = "cipher_test", size = "small", srcs = ["src/crypto/cipher/cipher_test.cc"] + test_support_sources, args = [ "$(location src/crypto/cipher/test/cipher_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/cipher/test/cipher_tests.txt", ], deps = [crypto], ) native.cc_test( name = "cmac_test", size = "small", srcs = ["src/crypto/cmac/cmac_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "constant_time_test", size = "small", srcs = ["src/crypto/constant_time_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "ed25519_test", size = "small", srcs = ["src/crypto/curve25519/ed25519_test.cc"] + test_support_sources, args = [ "$(location src/crypto/curve25519/ed25519_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/curve25519/ed25519_tests.txt", ], deps = [crypto], ) native.cc_test( name = "x25519_test", size = "small", srcs = ["src/crypto/curve25519/x25519_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "spake25519_test", size = "small", srcs = ["src/crypto/curve25519/spake25519_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "dh_test", size = "small", srcs = ["src/crypto/dh/dh_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "digest_test", size = "small", srcs = ["src/crypto/digest/digest_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "dsa_test", size = "small", srcs = ["src/crypto/dsa/dsa_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "ec_test", size = "small", srcs = ["src/crypto/ec/ec_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "example_mul", size = "small", srcs = ["src/crypto/ec/example_mul.c"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "p256-x86_64_test", size = "small", srcs = ["src/crypto/ec/p256-x86_64_test.cc"] + test_support_sources, args = [ "$(location src/crypto/ec/p256-x86_64_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/ec/p256-x86_64_tests.txt", ], deps = [crypto], ) native.cc_test( name = "ecdh_test", size = "small", srcs = ["src/crypto/ecdh/ecdh_test.cc"] + test_support_sources, args = [ "$(location src/crypto/ecdh/ecdh_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/ecdh/ecdh_tests.txt", ], deps = [crypto], ) native.cc_test( name = "ecdsa_sign_test", size = "small", srcs = ["src/crypto/ecdsa/ecdsa_sign_test.cc"] + test_support_sources, args = [ "$(location src/crypto/ecdsa/ecdsa_sign_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/ecdsa/ecdsa_sign_tests.txt", ], deps = [crypto], ) native.cc_test( name = "ecdsa_test", size = "small", srcs = ["src/crypto/ecdsa/ecdsa_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "ecdsa_verify_test", size = "small", srcs = ["src/crypto/ecdsa/ecdsa_verify_test.cc"] + test_support_sources, args = [ "$(location src/crypto/ecdsa/ecdsa_verify_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/ecdsa/ecdsa_verify_tests.txt", ], deps = [crypto], ) native.cc_test( name = "err_test", size = "small", srcs = ["src/crypto/err/err_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "evp_extra_test", size = "small", srcs = ["src/crypto/evp/evp_extra_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "evp_test", size = "small", srcs = ["src/crypto/evp/evp_test.cc"] + test_support_sources, args = [ "$(location src/crypto/evp/evp_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/evp/evp_tests.txt", ], deps = [crypto], ) native.cc_test( name = "pbkdf_test", size = "small", srcs = ["src/crypto/evp/pbkdf_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "hkdf_test", size = "small", srcs = ["src/crypto/hkdf/hkdf_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "hmac_test", size = "small", srcs = ["src/crypto/hmac/hmac_test.cc"] + test_support_sources, args = [ "$(location src/crypto/hmac/hmac_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/hmac/hmac_tests.txt", ], deps = [crypto], ) native.cc_test( name = "lhash_test", size = "small", srcs = ["src/crypto/lhash/lhash_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "gcm_test", size = "small", srcs = ["src/crypto/modes/gcm_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "obj_test", size = "small", srcs = ["src/crypto/obj/obj_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "pkcs12_test", size = "small", srcs = ["src/crypto/pkcs8/pkcs12_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "pkcs8_test", size = "small", srcs = ["src/crypto/pkcs8/pkcs8_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "poly1305_test", size = "small", srcs = ["src/crypto/poly1305/poly1305_test.cc"] + test_support_sources, args = [ "$(location src/crypto/poly1305/poly1305_tests.txt)", ], copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], data = [ "src/crypto/poly1305/poly1305_tests.txt", ], deps = [crypto], ) native.cc_test( name = "pool_test", size = "small", srcs = ["src/crypto/pool/pool_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "refcount_test", size = "small", srcs = ["src/crypto/refcount_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "rsa_test", size = "small", srcs = ["src/crypto/rsa/rsa_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "thread_test", size = "small", srcs = ["src/crypto/thread_test.c"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "pkcs7_test", size = "small", srcs = ["src/crypto/x509/pkcs7_test.c"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "x509_test", size = "small", srcs = ["src/crypto/x509/x509_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "tab_test", size = "small", srcs = ["src/crypto/x509v3/tab_test.c"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "v3name_test", size = "small", srcs = ["src/crypto/x509v3/v3name_test.c"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [crypto], ) native.cc_test( name = "ssl_test", size = "small", srcs = ["src/ssl/ssl_test.cc"] + test_support_sources, copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"], deps = [ crypto, ssl, ], )
29.228758
91
0.590474
88b2a19329a06c11a7f27402a51fc753d23d3562
1,291
py
Python
DB_resource/code/ci.py
DaiShuHeng/shiyizhonghua_resource
6faa284292102ab97438f356cf9bf69d2472335b
[ "Apache-2.0" ]
null
null
null
DB_resource/code/ci.py
DaiShuHeng/shiyizhonghua_resource
6faa284292102ab97438f356cf9bf69d2472335b
[ "Apache-2.0" ]
1
2021-11-29T03:38:21.000Z
2021-11-29T03:38:21.000Z
DB_resource/code/ci.py
DaiShuHeng/shiyizhonghua_resource
6faa284292102ab97438f356cf9bf69d2472335b
[ "Apache-2.0" ]
13
2021-11-06T03:17:45.000Z
2021-12-02T15:12:54.000Z
# -*- coding: utf-8 -*- """ Author:by 王林清 on 2021/11/2 13:02 FileName:ci.py in shiyizhonghua_resource Tools:PyCharm python3.8.4 """ from util import get_time_str, get_json, get_file_path, save_json, \ save_split_json if __name__ == '__main__': dir_name = r'./../data/ci' authors = {} ci_jsons = [] paths = get_file_path(dir_name) author_path = paths.pop(0) author_dicts = get_json(author_path) for author in author_dicts: name = author['name'] authors[name] = { 'name': name, 'time': '宋', 'desc': author['description'], } for path in paths: try: ci_json = get_json(path) for ci in ci_json: time = get_time_str() ci_jsons.append( { 'title': ci['rhythmic'], 'author': authors[ci['author']], 'type': '词', 'content': ci['paragraphs'], 'create_time': time, 'update_time': time, 'valid_delete': True } ) except Exception as ex: print(f'{path}:{ex}') save_split_json('ci', ci_jsons)
26.346939
68
0.473277
88b2a9e556e312a49635b929210b47f14c9cd821
2,307
py
Python
tools/pfif-tools/app/settings.py
priyanshu-kumar02/personfinder
d5390b60709cd0ccaaade9a3b6224a60cd523ed9
[ "Apache-2.0" ]
561
2015-02-16T07:59:42.000Z
2022-03-30T17:31:21.000Z
tools/pfif-tools/app/settings.py
Anthonymcqueen21/personfinder
ee7791fbc434eb4ec5cfad449288a1e884db5b1e
[ "Apache-2.0" ]
591
2015-01-30T05:09:30.000Z
2022-02-26T09:31:25.000Z
tools/pfif-tools/app/settings.py
Anthonymcqueen21/personfinder
ee7791fbc434eb4ec5cfad449288a1e884db5b1e
[ "Apache-2.0" ]
258
2015-01-25T18:35:12.000Z
2021-12-25T01:44:14.000Z
# Copyright 2019 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # If we actually did anything that used the secret key we'd need to set it to # some constant value and find a way to secretly store it. However, pfif-tools # doesn't use it for anything. We need to set it to something to make Django # happy though, and we set it to something random to be safe in case we # unknowingly do something in the future that uses it (better to have a password # reset token break because this changed or something like that than a security # hole we don't know about). SECRET_KEY = os.urandom(30) if 'Development' in os.environ.get('SERVER_SOFTWARE', ''): DEBUG = True # If DEBUG is True and ALLOWED_HOSTS is empty, Django permits localhost. ALLOWED_HOSTS = [] else: DEBUG = False ALLOWED_HOSTS = ['pfif-tools.appspot.com'] # Application definition INSTALLED_APPS = [ 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': ['resources'], 'APP_DIRS': False, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', ], }, }, ] WSGI_APPLICATION = 'wsgi.application' # Internationalization LANGUAGE_CODE = 'en' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) STATIC_URL = '/static/'
26.825581
80
0.706545
88b4ec64c6302ef8adda35ec81fbd48bb0e0a469
2,379
py
Python
tests/test_types.py
RodrigoDeRosa/related
3799cde862b8c9500931706f5f1ce5576028f642
[ "MIT" ]
190
2017-05-25T11:57:15.000Z
2022-03-17T01:44:53.000Z
tests/test_types.py
RodrigoDeRosa/related
3799cde862b8c9500931706f5f1ce5576028f642
[ "MIT" ]
42
2017-06-11T14:05:11.000Z
2021-12-14T21:12:07.000Z
tests/test_types.py
RodrigoDeRosa/related
3799cde862b8c9500931706f5f1ce5576028f642
[ "MIT" ]
18
2018-01-05T08:47:30.000Z
2022-01-28T06:24:05.000Z
# coding=utf-8 from related.types import TypedSequence, TypedMapping, TypedSet, ImmutableDict from attr.exceptions import FrozenInstanceError from related.converters import str_if_not_none from collections import OrderedDict import pytest def test_immutable_dict(): immutable = ImmutableDict(dict(a=1)) with pytest.raises(FrozenInstanceError): del immutable['a'] assert immutable == dict(a=1) with pytest.raises(FrozenInstanceError): immutable['b'] = 2 assert immutable == dict(a=1) with pytest.raises(FrozenInstanceError): immutable.clear() assert immutable == dict(a=1) with pytest.raises(FrozenInstanceError): immutable.pop('a') assert immutable == dict(a=1) with pytest.raises(FrozenInstanceError): immutable.something = 0 assert immutable == dict(a=1) with pytest.raises(FrozenInstanceError): del immutable.something_else assert immutable == dict(a=1) def test_str_if_not_none(): unicode_value = "Registered Trademark ®" assert unicode_value == str_if_not_none(unicode_value) assert "1" == str_if_not_none(1) assert str_if_not_none(None) is None def test_sequence(): lst = ["a", "b", "c"] seq = TypedSequence(str, lst) assert seq == lst assert str(seq) == str(lst) assert repr(seq) == repr(lst) assert len(seq) == len(lst) del seq[1] del lst[1] assert seq == lst seq[1] = "d" assert seq != lst with pytest.raises(TypeError): seq[1] = 4.0 def test_mapping(): dct = OrderedDict(a=1, b=2, c=3) map = TypedMapping(int, dct) assert map == dct assert str(map) == str(dct) assert repr(map) == repr(dct) assert len(map) == len(dct) del map["b"] del dct["b"] assert map == dct with pytest.raises(TypeError): map["d"] = 4.0 with pytest.raises(TypeError): map.add(5) map.add(4, 'd') dct['d'] = 4 assert map == dct def test_set(): orig = {"a", "b", "c"} typed = TypedSet(str, orig) assert orig == typed assert len(orig) == len(typed) assert 'a' in str(typed) assert 'a' in repr(typed) typed.add("d") assert "d" in typed assert orig != typed typed.discard("d") assert "d" not in typed assert orig == typed with pytest.raises(TypeError): typed.add(5)
21.432432
78
0.628415
88b6a45922cec7be62ee13004dcced019e40a855
2,203
py
Python
ex115Library/home.py
pepev123/PythonEx
8f39751bf87a9099d7b733aa829988595dab2344
[ "MIT" ]
null
null
null
ex115Library/home.py
pepev123/PythonEx
8f39751bf87a9099d7b733aa829988595dab2344
[ "MIT" ]
null
null
null
ex115Library/home.py
pepev123/PythonEx
8f39751bf87a9099d7b733aa829988595dab2344
[ "MIT" ]
null
null
null
def inicio(): print('\033[33m=' * 60) print('MENU PRINCIPAL'.center(50)) print('=' * 60) print('\033[34m1\033[m - \033[35mCadastrar nova pessoa\033[m') print('\033[34m2\033[m - \033[35mVer pessoas cadastradas\033[m') print('\033[34m3\033[m - \033[35mSair do Sistema\033[m') print('\033[33m=\033[m' * 60) def escolha(): while True: try: escolha = int(input('Sua escolha: ')) while escolha > 3 or escolha < 1: print('\033[31mValor digitado não condiz com a tabela\033[m') escolha = int(input('Sua escolha: ')) if escolha > 3 and escolha < 1: break except: print('\033[31mValor digitado não condiz com a tabela\033[m') else: break return escolha def arquivoExiste(nome): try: arquivo = open(nome, 'rt') arquivo.close() except (FileNotFoundError): return False else: return True def criarArquivo(nome): try: arquivo = open(nome, 'wt+') arquivo.close() except: print('Houve algum erro') def opcao1(arquivo) : print('\033[33m-' * 60) print('CADASTRAR PESSOA'.center(50)) print('\033[33m-\033[m' * 60) nome = input('Digite o nome: ') idade = int(input('Digite a idade: ')) try: arquivo = open(arquivo, 'at') except: print('Arquivo não conseguiu ser aberto') else: try: arquivo.write(f'{nome};{idade}\n') except: print('Não consegui computar') else: print('Pessoa cadastrada com sucesso!') arquivo.close() def opcao2(nome): print('\033[33m-' * 60) print('LISTA DE PESSOAS'.center(50)) print('\033[33m-\033[m' * 60) try: arquivo = open(nome, 'rt') except: print('Arquivo não conseguiu ser aberto') else: print('...') print(f'Nome Idade') print('-' * 60) for linha in arquivo: dado = linha.split(';') dado[1] = dado[1].replace('\n', '') print(f'{dado[0]:<30}{dado[1]:>3} anos') arquivo.close()
28.61039
77
0.523831
88b820c740bb5a71614b2fa5da303eb9d7016011
1,613
py
Python
tornado-lab/main8.py
zhyq0826/test-lab
b5b4d1372f2201dc5299bbe8983af18e8d4e1a1a
[ "MIT" ]
null
null
null
tornado-lab/main8.py
zhyq0826/test-lab
b5b4d1372f2201dc5299bbe8983af18e8d4e1a1a
[ "MIT" ]
null
null
null
tornado-lab/main8.py
zhyq0826/test-lab
b5b4d1372f2201dc5299bbe8983af18e8d4e1a1a
[ "MIT" ]
null
null
null
import tornado.ioloop import tornado.web import tornado.gen import logging from concurrent.futures import ThreadPoolExecutor logger = logging.getLogger() logger.setLevel(logging.DEBUG) executor = ThreadPoolExecutor(max_workers=2) @tornado.gen.coroutine def callback(): """ when a function decorator with coroutine, this function return immediately """ import time def ca(): print 'i am callback' tornado.ioloop.IOLoop.instance().add_timeout(time.time()+5, ca) def sleep_func(): import time print 'sleep start' time.sleep(10) print 'sleep end' return 'sleep func' def callback2(future): print 'callback2' print future.result() class MainHandler(tornado.web.RequestHandler): def get(self): self.write('hello world') class SleepHandler(tornado.web.RequestHandler): def get(self): # add one callback to ioloop, wait for next ioloop # this function should not be asynchronous tornado.ioloop.IOLoop.instance().add_callback(callback) self.write('sleep world') class SleepFutureHandler(tornado.web.RequestHandler): #@tornado.gen.coroutine def get(self): future = executor.submit(sleep_func) tornado.ioloop.IOLoop.instance().add_future(future, callback2) self.write('sleep future') settings = { 'debug': True } application = tornado.web.Application([ (r'/', MainHandler), (r'/sleep', SleepHandler), (r'/sleep2', SleepFutureHandler), ],**settings) if __name__ == '__main__': application.listen(8888) tornado.ioloop.IOLoop.instance().start()
22.71831
78
0.691878
31eefe99531de5ae9af50c89852e0a1767f078c7
12,523
py
Python
dpia/views/threats.py
ait-csr/dpia-tool
458f106e25b1d3bd2f07fd9df18bde880f4edc4a
[ "MIT" ]
4
2018-12-25T05:53:17.000Z
2022-02-07T10:07:06.000Z
dpia/views/threats.py
ait-csr/dpia-tool
458f106e25b1d3bd2f07fd9df18bde880f4edc4a
[ "MIT" ]
9
2020-02-12T00:57:33.000Z
2022-03-11T23:24:13.000Z
dpia/views/threats.py
CSR-AIT/dpia-tool
458f106e25b1d3bd2f07fd9df18bde880f4edc4a
[ "MIT" ]
null
null
null
from dpia.modules import * # @primary_assets_required # @supporting_assets_required @login_required def threat_identification(request, q_id=None): ''' Shows a list of the added supporting assets which are assigned to a primary asset. The user here selects threats from the list of generic threats or adds a new threat to a supporting asset. ''' user = request.user q = get_object_or_404(Questionaire, q_in_membership__member=user, id=q_id) # query supporting assets supporting_assets = Supporting.objects.filter(supporting_in_psrel__primary__questionaire=q).distinct() args = {} args.update(csrf(request)) args['q'] = q args['supporting_assets'] = supporting_assets return render(request, "threats/threat_identification.html", args) # supporting-asset add @login_required def threat_sa_rel_add(request, sa_id=None): ''' Adds generic threats to a supporting asset. ''' user = request.user supporting_object = get_object_or_404(Supporting, id=sa_id) if supporting_object: pa_sa_rel = PrimarySupportingRel.objects.filter(supporting=supporting_object)[0] # [0]: to select only one object when there are duplicates primary_id = pa_sa_rel.primary_id primary = get_object_or_404(Primary, id=primary_id) q = get_object_or_404(Questionaire, q_in_membership__member=user, id=primary.questionaire_id) data = dict() ## Add Threats to a SA if request.POST and request.is_ajax(): if 'threat' in request.POST: with reversion.create_revision(): checked_threats = request.POST.getlist('threat') threat_list = [] for checked_threat in checked_threats: threat_object = get_object_or_404(Threat, id=checked_threat) # create a new relationship with the above objects, no duplicates rel, created = Threat_SA_REL.objects.get_or_create(affected_supporting_asset=supporting_object, threat=threat_object) threat_list.append(threat_object.name) comment = ", ".join(threat_list) # Store some meta-information. save_revision_meta(user, q, 'Added generic threats "%s" to supporting asset "%s".' %(comment, supporting_object)) ## ajax data django_messages = [] messages.success(request, u'Generic threats were added successfully to supporting asset "%s".' %(supporting_object)) for message in messages.get_messages(request): django_messages.append({ "level": message.level, "message": message.message, "extra_tags": message.tags, }) data['messages'] = django_messages data['form_is_valid'] = True # query supporting assets supporting_assets = Supporting.objects.filter(supporting_in_psrel__primary__questionaire=q).distinct() args = {} args['q'] = q args['supporting_assets'] = supporting_assets data['html_q_list'] = render_to_string('threats/partial_threats_list.html', args) else: data['form_is_valid'] = False # query generic_threats and each newly created Threat per questionnaire generic_threats = Threat.objects.all() #.exclude(~Q(threat_sa_rel__affected_supporting_asset__primary__questionaire=q), threat_sa_rel__affected_supporting_asset__primary__questionaire__isnull=False).order_by("type_of_jeopardy") # # query threats the user selects // of the instant questionaire # selected_threats = Threat_SA_REL.objects.prefetch_related().all().filter(affected_supporting_asset__primary__questionaire=q).distinct() args = {} args.update(csrf(request)) args['q'] = q args['supporting_object'] = supporting_object args['generic_threats'] = generic_threats args['primary'] = primary data['html_form'] = render_to_string('threats/threat_sa_rel_add.html', args, request=request) return JsonResponse(data) @login_required def threat_add(request, q_id=None, sa_id=None): ''' Adds new threats (defined by the user) to a supporting asset. ''' user = request.user q = get_object_or_404(Questionaire, q_in_membership__member=user, id=q_id) sa = get_object_or_404(Supporting, id=sa_id) data = dict() ## Add Threat threat_form = ThreatForm(request.POST or None) if request.POST and request.is_ajax(): if threat_form.is_valid(): with reversion.create_revision(): threat = threat_form.save(commit=False) threat.supporting_asset_type = sa.supporting_type threat.save() new_threat_sa_rel = Threat_SA_REL.objects.get_or_create(affected_supporting_asset=sa, threat=threat) # Store some meta-information. save_revision_meta(user, q, 'Added new threat "%s" to supporting asset "%s".' %(threat.name, sa)) ## ajax data django_messages = [] messages.success(request, u'New threat "%s" was added successfully to supporting asset "%s".' %(threat.name, sa)) for message in messages.get_messages(request): django_messages.append({ "level": message.level, "message": message.message, "extra_tags": message.tags, }) data['messages'] = django_messages data['form_is_valid'] = True # query supporting assets supporting_assets = Supporting.objects.filter(supporting_in_psrel__primary__questionaire=q).distinct() args = {} args['q'] = q args['supporting_assets'] = supporting_assets data['html_q_list'] = render_to_string('threats/partial_threats_list.html', args) else: data['form_is_valid'] = False args = {} args.update(csrf(request)) args['q'] = q args['sa'] = sa args['threat_form'] = threat_form data['html_form'] = render_to_string('threats/threat_add.html', args, request=request) return JsonResponse(data) @login_required def threat_rel_delete(request, q_id=None, threat_id=None): ''' Delete a relationship between threat and supporting asset. It doesn't delete the threat completely; it simply removes it from the supporting asset it is assigned to. ''' user = request.user q = get_object_or_404(Questionaire, q_in_membership__member=user, id=q_id) threat_rel = get_object_or_404(Threat_SA_REL, id=threat_id) data = dict() if request.POST and request.is_ajax(): threat_rel.delete() ## ajax data django_messages = [] messages.success(request, u'Threat "%s" was removed successfully from supporting asset "%s".' %(threat_rel.threat, threat_rel.affected_supporting_asset)) for message in messages.get_messages(request): django_messages.append({ "level": message.level, "message": message.message, "extra_tags": message.tags, }) data['form_is_valid'] = True data['messages'] = django_messages # query threats the user has selected and order by the MaxValue of the Sum selected_threats = Threat_SA_REL.objects.filter(affected_supporting_asset__questionaire=q) # query supporting assets supporting_assets = Supporting.objects.filter(supporting_in_psrel__primary__questionaire=q).distinct() args = {} args['q'] = q args['supporting_assets'] = supporting_assets data['html_q_list'] = render_to_string('threats/partial_threats_list.html', args) else: args = {} args.update(csrf(request)) args['q'] = q args['threat_rel'] = threat_rel data['html_form'] = render_to_string('threats/threat_rel_remove.html', args, request=request) return JsonResponse(data) # @supporting_assets_required # @threats_required @login_required def threat_assessment(request, q_id=None): ''' Shows a formset table of all the threats (ordered by their "likelihood" value) selected by the user in the step "Threat Identification". It accepts two values, namely "level of vulnerability" and "risk source capability". If either of them is entered above the max number value (4) or not entered at all, an error is raised. The likelihood value is automatically calculated as the sum of the level of vulnerability and risk source capability. ''' user = request.user q = get_object_or_404(Questionaire, q_in_membership__member=user, id=q_id) # query threats the user has selected and order by the MaxValue of the Sum; # and filter only those that have a relationship to a primary asset. the "is_null" filtering is done in case the user goes back to # the primary list step to remove supporting assets. selected_threats = q.get_threats() ## Selected threats formset ThreatFormset = modelformset_factory(Threat_SA_REL, form=Threat_SA_REL_Form, extra=0) threat_formset = ThreatFormset(queryset=selected_threats) if request.POST: if selected_threats.exists(): threat_formset = ThreatFormset(request.POST, request.FILES) if threat_formset.is_valid(): with reversion.create_revision(): for form in threat_formset.forms: threat = form.save(commit=False) threat.likelihood = threat.level_of_vulnerability + threat.risk_source_capability threat.save() threat_formset.save() threat_list = selected_threats.values_list('threat__name', flat=True) comment = ", ".join(threat_list) # Store some meta-information. save_revision_meta(user, q, 'Assessed likelihood of threats "{}".'.format(comment)) messages.success(request, u'Likelihood of threats was assessed successfully.') return redirect(reverse('risk_assessment', args=[q.id])) else: messages.error(request, u'Please fill out the required fields.') else: return redirect('risk_assessment', q.id) args = {} args.update(csrf(request)) args['q'] = q args['selected_threats'] = selected_threats args['threat_formset'] = threat_formset return render(request, "threats/threat_assessment.html", args) # @supporting_assets_required # @threats_required # @threat_assessment_required # @risk_assessment_required @login_required def threat_controls(request, q_id=None): ''' Shows a formset list of all the assessed threats. The user is required to fill out only the controls field. ''' user = request.user q = get_object_or_404(Questionaire, q_in_membership__member=user, id=q_id) ## query Threats threats = q.get_threats() ThreatFormset2 = modelformset_factory(Threat_SA_REL, form=Threat_SA_REL_Form2, extra=0) if request.POST: if threats.exists(): threat_formset = ThreatFormset2(request.POST, queryset=threats) for form in threat_formset.forms: form.fields['control'].required = True with reversion.create_revision(): if threat_formset.is_valid(): threat_formset.save() # Store some meta-information. threat_list = threats.values_list('threat__name', flat=True) comment = ", ".join(threat_list) save_revision_meta(user, q, 'Implemented controls to threats "{}".'.format(comment)) messages.success(request, u'Controls were implemented successfully.') return redirect(reverse('risk_mitigation', args=[q.id])) else: messages.error(request, u'Please fill out the required fields.') else: return redirect('risk_mitigation', q.id) else: threat_formset = ThreatFormset2(queryset=threats) args = {} args.update(csrf(request)) args['q'] = q args['threat_formset'] = threat_formset return render(request, "threats/threat_controls.html", args)
45.046763
231
0.649844
31ef1dd8e273d8cef398fef10de951765faeb7fd
10,737
py
Python
ROS/Thymio/controller.py
gyani91/Robotics
124b9df7ae82e8c9b9ad54c74292585e81c7a3bb
[ "MIT" ]
null
null
null
ROS/Thymio/controller.py
gyani91/Robotics
124b9df7ae82e8c9b9ad54c74292585e81c7a3bb
[ "MIT" ]
null
null
null
ROS/Thymio/controller.py
gyani91/Robotics
124b9df7ae82e8c9b9ad54c74292585e81c7a3bb
[ "MIT" ]
null
null
null
#!/usr/bin/env python import rospy import sys import numpy as np from geometry_msgs.msg import Pose, Twist from nav_msgs.msg import Odometry from sensor_msgs.msg import Range from math import cos, sin, asin, tan, atan2 # msgs and srv for working with the set_model_service from gazebo_msgs.msg import ModelState from gazebo_msgs.srv import SetModelState from std_srvs.srv import Empty from visualization_msgs.msg import Marker from geometry_msgs.msg import Quaternion, Pose, Point, Vector3 from std_msgs.msg import Header, ColorRGBA import math # a handy tool to convert orientations from tf.transformations import euler_from_quaternion, quaternion_from_euler class BasicThymio: def __init__(self, thymio_name): """init""" self.thymio_name = thymio_name rospy.init_node('basic_thymio_controller', anonymous=True) # Publish to the topic '/thymioX/cmd_vel'. self.velocity_publisher = rospy.Publisher(self.thymio_name + '/cmd_vel', Twist, queue_size=10) # A subscriber to the topic '/turtle1/pose'. self.update_pose is called # when a message of type Pose is received. self.pose_subscriber = rospy.Subscriber(self.thymio_name + '/odom', Odometry, self.update_state) self.prox_center_sub = rospy.Subscriber(self.thymio_name + '/proximity/center', Range, self.update_prox_center) self.prox_center_left_sub = rospy.Subscriber(self.thymio_name + '/proximity/center_left', Range, self.update_prox_center_left) self.prox_center_right_sub = rospy.Subscriber(self.thymio_name + '/proximity/center_right', Range, self.update_prox_center_right) self.prox_left_sub = rospy.Subscriber(self.thymio_name + '/proximity/left', Range, self.update_prox_left) self.prox_right_sub = rospy.Subscriber(self.thymio_name + '/proximity/right', Range, self.update_prox_right) self.prox_rear_left_sub = rospy.Subscriber(self.thymio_name + '/proximity/rear_left', Range, self.update_prox_rear_left) self.prox_rear_right_sub = rospy.Subscriber(self.thymio_name + '/proximity/rear_right', Range, self.update_prox_rear_right) self.current_pose = Pose() self.current_twist = Twist() self.current_prox_center = Range() self.current_prox_center_left = Range() self.current_prox_center_right = Range() self.current_prox_left = Range() self.current_prox_right = Range() # publish at this rate self.tick_rate = 50 self.rate = rospy.Rate(self.tick_rate) self.vel_msg = Twist() def update_prox_center(self, data): self.current_prox_center = data def update_prox_center_left(self, data): self.current_prox_center_left = data def update_prox_center_right(self, data): self.current_prox_center_right = data def update_prox_left(self, data): self.current_prox_left = data def update_prox_right(self, data): self.current_prox_right = data def update_prox_rear_left(self, data): self.current_prox_rear_left = data def update_prox_rear_right(self, data): self.current_prox_rear_right = data def thymio_state_service_request(self, position, orientation): """Request the service (set thymio state values) exposed by the simulated thymio. A teleportation tool, by default in gazebo world frame. Be aware, this does not mean a reset (e.g. odometry values).""" rospy.wait_for_service('/gazebo/set_model_state') try: model_state = ModelState() model_state.model_name = self.thymio_name model_state.reference_frame = '' # the frame for the pose information model_state.pose.position.x = position[0] model_state.pose.position.y = position[1] model_state.pose.position.z = position[2] qto = quaternion_from_euler(orientation[0], orientation[0], orientation[0], axes='sxyz') model_state.pose.orientation.x = qto[0] model_state.pose.orientation.y = qto[1] model_state.pose.orientation.z = qto[2] model_state.pose.orientation.w = qto[3] # a Twist can also be set but not recomended to do it in a service gms = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState) response = gms(model_state) return response except rospy.ServiceException, e: print "Service call failed: %s"%e def update_state(self, data): """A new Odometry message has arrived. See Odometry msg definition.""" # Note: Odmetry message also provides covariance self.current_pose = data.pose.pose self.current_twist = data.twist.twist quat = ( self.current_pose.orientation.x, self.current_pose.orientation.y, self.current_pose.orientation.z, self.current_pose.orientation.w) (roll, pitch, yaw) = euler_from_quaternion (quat) # rospy.loginfo("State from Odom: (%.5f, %.5f, %.5f) " % (self.current_pose.position.x, self.current_pose.position.y, yaw)) def turn_left(self, speed): self.vel_msg.angular.z = speed # print(' turning left') self.velocity_publisher.publish(self.vel_msg) def turn_right(self, speed): self.vel_msg.angular.z = -speed self.velocity_publisher.publish(self.vel_msg) def make_figure_8(self): ang_speed = 1.0 full_circle_duration = rospy.Duration(2 * math.pi / ang_speed) start = rospy.Time() while start.secs == 0: start = rospy.get_rostime() print('start: ' + str(start.nsecs)) self.vel_msg.linear.x = 0.2 while not rospy.is_shutdown(): now = rospy.get_rostime() if (now - start) < full_circle_duration: self.turn_left(ang_speed) elif (now - start) < 2 * full_circle_duration: self.turn_right(ang_speed) else: break self.rate.sleep() self.vel_msg.linear.x = 0 self.vel_msg.angular.z = 0 self.velocity_publisher.publish(self.vel_msg) rospy.spin() def is_close_to_wall(self): return (0 < self.current_prox_left.range < 0.08) or \ (0 < self.current_prox_center_left.range < 0.08) or \ (0 < self.current_prox_center.range < 0.08) or \ (0 < self.current_prox_center_right.range < 0.08) or \ (0 < self.current_prox_right.range < 0.08) def drive_to_wall(self): self.vel_msg.linear.x = 0.1 self.velocity_publisher.publish(self.vel_msg) close = self.is_close_to_wall() while not close: self.velocity_publisher.publish(self.vel_msg) close = self.is_close_to_wall() self.rate.sleep() self.vel_msg.linear.x = 0 self.velocity_publisher.publish(self.vel_msg) # TODO: it seems that if this script is started when the thymio # is already close to the wall, the thymio sometimes rotated the wrong way # Also, we might want to make it proportional, and maybe incorporate # the sensor inputs of center_left and center_right as well def face_wall(self): wall_is_left = self.current_prox_left.range < self.current_prox_right.range last_prox_center = self.current_prox_center if wall_is_left: ang_vel = 0.1 else: ang_vel = -0.1 while abs(self.current_prox_center_left.range - self.current_prox_center_right.range) > 0.005: self.vel_msg.angular.z = ang_vel self.velocity_publisher.publish(self.vel_msg) self.rate.sleep() self.vel_msg.angular.z = 0.0 self.velocity_publisher.publish(self.vel_msg) # TODO: might want to have a proportional controller # Also, need to incorporate rear sensor inputs to get perfect final orientation # (now it just turns 180 degrees and ignores rear sensor inputs) def turn_around(self): print('turning around') ang_speed = 1.0 half_circle_duration = rospy.Duration(math.pi / ang_speed) start = rospy.Time() while start.secs == 0: start = rospy.get_rostime() print('start: ' + str(start.nsecs)) while rospy.get_rostime() - start < half_circle_duration: self.vel_msg.angular.z = ang_speed self.velocity_publisher.publish(self.vel_msg) self.rate.sleep() self.vel_msg.angular.z = 0 self.velocity_publisher.publish(self.vel_msg) # FIXME: thymio stops too late, probably because the sensors are not in the # same location as the base_link def drive_forward(self): current_dist = self.current_prox_rear_left.range dist_to_go = 2.0 - current_dist lin_vel = 0.2 fwd_duration = rospy.Duration(dist_to_go / lin_vel) start = rospy.Time() while start.secs == 0: start = rospy.get_rostime() print('start: ' + str(start.nsecs)) while rospy.get_rostime() - start < fwd_duration: self.vel_msg.linear.x = lin_vel self.velocity_publisher.publish(self.vel_msg) self.rate.sleep() self.vel_msg.linear.x = 0 self.velocity_publisher.publish(self.vel_msg) def task1(self): self.make_figure_8() def task2(self): self.drive_to_wall() self.face_wall() def task3(self): self.drive_to_wall() self.face_wall() self.turn_around() self.drive_forward() def usage(): return "Wrong number of parameters. basic_move.py [thymio_name]" if __name__ == '__main__': if len(sys.argv) == 2: thymio_name = sys.argv[1] print "Now working with robot: %s" % thymio_name else: print usage() sys.exit(1) thymio = BasicThymio(thymio_name) # Teleport the robot to a certain pose. If pose is different to the # origin of the world, you must account for a transformation between # odom and gazebo world frames. # NOTE: The goal of this step is *only* to show the available # tools. The launch file process should take care of initializing # the simulation and spawning the respective models #thymio.thymio_state_service_request([0.,0.,0.], [0.,0.,0.]) #rospy.sleep(1.) thymio.task1() # thymio.task2() # thymio.task3()
37.28125
131
0.644593
31ef8505dceb7b8308004964199bac366c51558c
54
py
Python
src/pkgcore/merge/__init__.py
thesamesam/pkgcore
be2d9264a3fe61a323f0075cbc4838ed6ec5ffcf
[ "BSD-3-Clause" ]
null
null
null
src/pkgcore/merge/__init__.py
thesamesam/pkgcore
be2d9264a3fe61a323f0075cbc4838ed6ec5ffcf
[ "BSD-3-Clause" ]
null
null
null
src/pkgcore/merge/__init__.py
thesamesam/pkgcore
be2d9264a3fe61a323f0075cbc4838ed6ec5ffcf
[ "BSD-3-Clause" ]
null
null
null
""" package related livefs modification subsystem """
13.5
45
0.759259
31efc2692f61977bbe23784db9dd5034a2c6c959
1,153
py
Python
Week 2/id_165/LeetCode_105_165.py
larryRishi/algorithm004-05
e60d0b1176acd32a9184b215e36d4122ba0b6263
[ "Apache-2.0" ]
1
2019-10-12T06:48:45.000Z
2019-10-12T06:48:45.000Z
Week 2/id_165/LeetCode_105_165.py
larryRishi/algorithm004-05
e60d0b1176acd32a9184b215e36d4122ba0b6263
[ "Apache-2.0" ]
1
2019-12-01T10:02:03.000Z
2019-12-01T10:02:03.000Z
Week 2/id_165/LeetCode_105_165.py
larryRishi/algorithm004-05
e60d0b1176acd32a9184b215e36d4122ba0b6263
[ "Apache-2.0" ]
null
null
null
# 根据一棵树的前序遍历与中序遍历构造二叉树。 # # 注意: # 你可以假设树中没有重复的元素。 # # 例如,给出 # # 前序遍历 preorder = [3,9,20,15,7] # 中序遍历 inorder = [9,3,15,20,7] # # 返回如下的二叉树: # # 3 # / \ # 9 20 # / \ # 15 7 # Related Topics 树 深度优先搜索 数组 # leetcode submit region begin(Prohibit modification and deletion) # Definition for a binary tree node. class TreeNode(object): def __init__(self, x): self.val = x self.left = None self.right = None class Solution(object): def buildTreeNode(self, preorder, inorder): if not preorder: return None root = preorder[0] node = TreeNode(root) partition = inorder.index(root) node.left = self.buildTreeNode(preorder[1:partition + 1], inorder[0:partition]) node.right = self.buildTreeNode(preorder[partition + 1:], inorder[partition + 1:]) return node def buildTree(self, preorder, inorder): """ :type preorder: List[int] :type inorder: List[int] :rtype: TreeNode """ return self.buildTreeNode(preorder, inorder) # leetcode submit region end(Prohibit modification and deletion)
19.542373
90
0.608846
31f3b0c501535082c9dc4e17a5fec3613081ac58
1,644
py
Python
GFG/cloud.py
Navya-tec/100daysofDSA
fa3338122099afdcf01b0f1725d7c5613b53bed8
[ "MIT" ]
13
2021-02-27T10:27:22.000Z
2021-10-05T13:56:28.000Z
GFG/cloud.py
Navya-tec/100daysofDSA
fa3338122099afdcf01b0f1725d7c5613b53bed8
[ "MIT" ]
15
2021-03-06T14:23:49.000Z
2022-01-29T07:10:21.000Z
GFG/cloud.py
Navya-tec/100daysofDSA
fa3338122099afdcf01b0f1725d7c5613b53bed8
[ "MIT" ]
24
2021-03-04T06:15:12.000Z
2021-11-14T22:15:13.000Z
# A child is playing a cloud hopping game. In this game, there are sequentially numbered clouds that can be thunderheads or cumulus clouds. The character must jump from cloud to cloud until it reaches the start again. # There is an array of clouds, e and an energy level e=100. The character starts from c[0] and uses 1 unit of energy to make a jump of size k to cloud c[(i+k)%n]. If it lands on a thundercloud, c[i]=1, its energy (e) decreases by 2 additional units. The game ends when the character lands back on cloud 0. # Given the values of n, k, and the configuration of the clouds as an array c, determine the final value of e after the game ends. # Example. c = [0,0,1,0] and k=2, then the character makes the following jumps: # The indices of the path are 0 -> 2 -> 0 . The energy level reduces by 1 for each jump to 98. The character landed on one thunderhead at an additional cost of 2 energy units. The final energy level is 96. def jumpingOnClouds(c, k): # There is an array of clouds, e and an energy level e=100. The character starts from c[0] and uses 1 unit of energy to make a jump of size k to cloud c[(i+k)%n]. If it lands on a thundercloud, c[i]=1, its energy (e) decreases by 2 additional units. The game ends when the character lands back on cloud 0. # The indices of the path are 0 -> 2 -> 0 . The energy level reduces by 1 for each jump to 98. The character landed on one thunderhead at an additional cost of 2 energy units. The final energy level is 96. e = 100 i = 0 while i != 0: if c[(i+k)%len(c)] == 1: e -= 2 e -= 1 i = (i+k)%len(c) return e
68.5
309
0.695864
31f50a275cce9e7222985c09de6a704fd2d856df
1,575
py
Python
pyACA/PitchTimeAmdf.py
ruohoruotsi/pyACA
339e9395b65a217aa5965638af941b32d5c95454
[ "MIT" ]
null
null
null
pyACA/PitchTimeAmdf.py
ruohoruotsi/pyACA
339e9395b65a217aa5965638af941b32d5c95454
[ "MIT" ]
null
null
null
pyACA/PitchTimeAmdf.py
ruohoruotsi/pyACA
339e9395b65a217aa5965638af941b32d5c95454
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ computes the lag of the amdf function Args: x: audio signal iBlockLength: block length in samples iHopLength: hop length in samples f_s: sample rate of audio data (unused) Returns: f frequency t time stamp for the frequency value """ import numpy as np import math def PitchTimeAmdf(x, iBlockLength, iHopLength, f_s): # initialize f_max = 2000 f_min = 50 iNumOfBlocks = math.ceil(x.size / iHopLength) # compute time stamps t = (np.arange(0, iNumOfBlocks) * iHopLength + (iBlockLength / 2)) / f_s # allocate memory f = np.zeros(iNumOfBlocks) eta_min = int(round(f_s / f_max)) - 1 eta_max = int(round(f_s / f_min)) - 1 for n in range(0, iNumOfBlocks): i_start = n * iHopLength i_stop = np.min([x.size - 1, i_start + iBlockLength - 1]) # calculate the acf if not x[np.arange(i_start, i_stop + 1)].sum(): continue else: x_tmp = x[np.arange(i_start, i_stop + 1)] afCorr = computeAmdf(x_tmp, eta_max) # find the coefficients specified in eta f[n] = np.argmin(afCorr[np.arange(eta_min + 1, afCorr.size)]) + 1 # convert to Hz f[n] = f_s / (f[n] + eta_min + 1) return (f, t) def computeAmdf(x, eta_max): K = x.shape[0] if K <= 0: return 0 afAmdf = np.ones(K) for eta in range(0, np.min([K, eta_max + 1])): afAmdf[eta] = np.sum(np.abs(x[np.arange(0, K - 1 - eta)] - x[np.arange(eta + 1, K)])) / K return (afAmdf)
23.161765
97
0.581587
31f50c4b34b2287216379f7b02c2f0676d6b7602
636
py
Python
models/edit.py
DOSYCORPS/correct-the-news
92509c1bf8ba88fec273b812142f1b3d73312840
[ "MIT" ]
1
2017-10-16T16:02:33.000Z
2017-10-16T16:02:33.000Z
models/edit.py
dosyago-corp/correct-the-news
92509c1bf8ba88fec273b812142f1b3d73312840
[ "MIT" ]
3
2017-10-19T07:58:42.000Z
2017-10-21T12:01:46.000Z
models/edit.py
DOSYCORPS/correct-the-news
92509c1bf8ba88fec273b812142f1b3d73312840
[ "MIT" ]
null
null
null
from google.appengine.ext import ndb ATTACHMENTS = [ 'beforeBegin', 'afterEnd', 'middle' ] TYPES = [ 'strike', 'insert' ] class Modification(ndb.Model): type = ndb.StringProperty(choices=TYPES) start_index = ndb.IntegerProperty() trigram_at_start = ndb.StringProperty() content = ndb.StringProperty() class Edit(ndb.Model): selector = ndb.StringProperty() attachment = ndb.StringProperty(choices=ATTACHMENTS) selected_index = ndb.IntegerProperty() sentence_index = ndb.IntegerProperty() trigram_at_attach_point = ndb.StringProperty() modifications = ndb.StructuredProperty(Modification, repeated=True)
23.555556
69
0.75
31f5361ec9609ecfe4d1234a6a87870a550e57fe
316
py
Python
depccg/lang.py
masashi-y/myccg
263fd0afa7a619626fc2d506016625b6068bb27b
[ "MIT" ]
75
2017-05-01T09:32:56.000Z
2022-03-07T02:57:31.000Z
depccg/lang.py
masashi-y/myccg
263fd0afa7a619626fc2d506016625b6068bb27b
[ "MIT" ]
23
2017-05-10T08:28:57.000Z
2022-02-15T05:15:25.000Z
depccg/lang.py
masashi-y/myccg
263fd0afa7a619626fc2d506016625b6068bb27b
[ "MIT" ]
15
2017-05-08T13:02:33.000Z
2022-03-07T01:40:26.000Z
import logging logger = logging.getLogger(__name__) GLOBAL_LANG_NAME = 'en' def set_global_language_to(lang: str) -> None: global GLOBAL_LANG_NAME logger.info('Setting the global language config to: %s', lang) GLOBAL_LANG_NAME = lang def get_global_language() -> str: return GLOBAL_LANG_NAME
17.555556
66
0.737342
31f5cac689f164c99d0da2f1eb8dc6d483e34f4e
6,878
py
Python
trainfile/mfeam_shapenet-spix-disc.py
aabbcco/ssn-3d-pytorch
3b5a1bb807ce751b03501772ed9da48ac7f9f30b
[ "MIT" ]
null
null
null
trainfile/mfeam_shapenet-spix-disc.py
aabbcco/ssn-3d-pytorch
3b5a1bb807ce751b03501772ed9da48ac7f9f30b
[ "MIT" ]
null
null
null
trainfile/mfeam_shapenet-spix-disc.py
aabbcco/ssn-3d-pytorch
3b5a1bb807ce751b03501772ed9da48ac7f9f30b
[ "MIT" ]
null
null
null
import os import math import numpy as np import time import torch import torch.optim as optim from torch.utils.data import DataLoader from tensorboardX import SummaryWriter import sys sys.path.append(os.path.dirname("../")) from lib.utils.meter import Meter from models.model_MNFEAM import MFEAM_SSN from lib.dataset.shapenet import shapenet_spix from lib.utils.loss import reconstruct_loss_with_cross_etnropy, reconstruct_loss_with_mse, uniform_compact_loss from lib.MEFEAM.MEFEAM import discriminative_loss @torch.no_grad() def eval(model, loader, pos_scale, device): def achievable_segmentation_accuracy(superpixel, label): """ Function to calculate Achievable Segmentation Accuracy: ASA(S,G) = sum_j max_i |s_j \cap g_i| / sum_i |g_i| Args: input: superpixel image (H, W), output: ground-truth (H, W) """ TP = 0 unique_id = np.unique(superpixel) for uid in unique_id: mask = superpixel == uid label_hist = np.histogram(label[mask]) maximum_regionsize = label_hist[0].max() TP += maximum_regionsize return TP / label.size model.eval() # change the mode of model to eval sum_asa = 0 for data in loader: inputs, labels = data # b*c*npoint inputs = inputs.to(device) # b*c*w*h labels = labels.to(device) # sematic_lable inputs = pos_scale * inputs # calculation,return affinity,hard lable,feature tensor Q, H, feat = model(inputs) asa = achievable_segmentation_accuracy( H.to("cpu").detach().numpy(), labels.to("cpu").numpy()) # return data to cpu sum_asa += asa model.train() return sum_asa / len(loader) # cal asa def update_param(data, model, optimizer, compactness, pos_scale, device, disc_loss): inputs, labels, _, spix = data inputs = inputs.to(device) labels = labels.to(device) inputs = pos_scale * inputs (Q, H, _, _), msf_feature = model(inputs) recons_loss = reconstruct_loss_with_cross_etnropy(Q, labels) compact_loss = reconstruct_loss_with_mse(Q, inputs, H) disc = disc_loss(msf_feature, spix) #uniform_compactness = uniform_compact_loss(Q,coords.reshape(*coords.shape[:2], -1), H,device=device) loss = recons_loss + compactness * compact_loss + disc optimizer.zero_grad() # clear previous grad loss.backward() # cal the grad optimizer.step() # backprop return { "loss": loss.item(), "reconstruction": recons_loss.item(), "compact": compact_loss.item(), "disc": disc.item() } def train(cfg): if torch.cuda.is_available(): device = "cuda" else: device = "cpu" model = MFEAM_SSN(10, 50).to(device) disc_loss = discriminative_loss(0.1, 0.5) optimizer = optim.Adam(model.parameters(), cfg.lr) train_dataset = shapenet_spix(cfg.root) train_loader = DataLoader(train_dataset, cfg.batchsize, shuffle=True, drop_last=True, num_workers=cfg.nworkers) # test_dataset = shapenet.shapenet(cfg.root, split="test") # test_loader = DataLoader(test_dataset, 1, shuffle=False, drop_last=False) meter = Meter() iterations = 0 max_val_asa = 0 writer = SummaryWriter(log_dir='log', comment='traininglog') while iterations < cfg.train_iter: for data in train_loader: iterations += 1 metric = update_param(data, model, optimizer, cfg.compactness, cfg.pos_scale, device, disc_loss) meter.add(metric) state = meter.state(f"[{iterations}/{cfg.train_iter}]") print(state) # return {"loss": loss.item(), "reconstruction": recons_loss.item(), "compact": compact_loss.item()} writer.add_scalar("comprehensive/loss", metric["loss"], iterations) writer.add_scalar("loss/reconstruction_loss", metric["reconstruction"], iterations) writer.add_scalar("loss/compact_loss", metric["compact"], iterations) writer.add_scalar("loss/disc_loss", metric["disc"], iterations) if (iterations % 1000) == 0: torch.save( model.state_dict(), os.path.join(cfg.out_dir, "model_iter" + str(iterations) + ".pth")) # if (iterations % cfg.test_interval) == 0: # asa = eval(model, test_loader, cfg.pos_scale, device) # print(f"validation asa {asa}") # writer.add_scalar("comprehensive/asa", asa, iterations) # if asa > max_val_asa: # max_val_asa = asa # torch.save(model.state_dict(), os.path.join( # cfg.out_dir, "bset_model_sp_loss.pth")) if iterations == cfg.train_iter: break unique_id = str(int(time.time())) torch.save(model.state_dict(), os.path.join(cfg.out_dir, "model" + unique_id + ".pth")) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("--root", type=str, default='../shapenet_partseg_spix', help="/ path/to/shapenet") parser.add_argument("--out_dir", default="./log", type=str, help="/path/to/output directory") parser.add_argument("--batchsize", default=8, type=int) parser.add_argument("--nworkers", default=8, type=int, help="number of threads for CPU parallel") parser.add_argument("--lr", default=1e-6, type=float, help="learning rate") parser.add_argument("--train_iter", default=10000, type=int) parser.add_argument("--fdim", default=10, type=int, help="embedding dimension") parser.add_argument("--niter", default=5, type=int, help="number of iterations for differentiable SLIC") parser.add_argument("--nspix", default=50, type=int, help="number of superpixels") parser.add_argument("--pos_scale", default=10, type=float) parser.add_argument("--compactness", default=1e-4, type=float) parser.add_argument("--test_interval", default=100, type=int) args = parser.parse_args() os.makedirs(args.out_dir, exist_ok=True) train(args)
35.453608
112
0.577639
31f5d73f045c9db55e784a4166f4f9708822341f
5,331
py
Python
great_international/migrations/0023_internationaleuexitformpage_internationaleuexitformsuccesspage.py
uktrade/directory-cms
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
[ "MIT" ]
6
2018-03-20T11:19:07.000Z
2021-10-05T07:53:11.000Z
great_international/migrations/0023_internationaleuexitformpage_internationaleuexitformsuccesspage.py
uktrade/directory-cms
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
[ "MIT" ]
802
2018-02-05T14:16:13.000Z
2022-02-10T10:59:21.000Z
great_international/migrations/0023_internationaleuexitformpage_internationaleuexitformsuccesspage.py
uktrade/directory-cms
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
[ "MIT" ]
6
2019-01-22T13:19:37.000Z
2019-07-01T10:35:26.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2019-05-09 12:19 from __future__ import unicode_literals import core.model_fields import core.models import core.validators import core.wagtail_fields from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('wagtailcore', '0040_page_draft_title'), ('great_international', '0022_auto_20190508_1300'), ] operations = [ migrations.CreateModel( name='InternationalEUExitFormPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')), ('service_name', models.CharField(choices=[('FIND_A_SUPPLIER', 'Find a Supplier'), ('EXPORT_READINESS', 'Export Readiness'), ('INVEST', 'Invest'), ('COMPONENTS', 'Components'), ('GREAT_INTERNATIONAL', 'Great International')], db_index=True, max_length=100, null=True)), ('uses_tree_based_routing', models.BooleanField(default=False, help_text="Allow this page's URL to be determined by its slug, and the slugs of its ancestors in the page tree.", verbose_name='tree-based routing enabled')), ('breadcrumbs_label', models.CharField(max_length=50)), ('heading', models.CharField(max_length=255)), ('body_text', core.model_fields.MarkdownField(validators=[core.validators.slug_hyperlinks])), ('submit_button_text', models.CharField(max_length=50)), ('disclaimer', models.TextField(max_length=500)), ('first_name_help_text', core.wagtail_fields.FormHelpTextField(blank=True, max_length=200, null=True, verbose_name='Help text')), ('first_name_label', core.wagtail_fields.FormLabelField(max_length=200, verbose_name='label')), ('last_name_help_text', core.wagtail_fields.FormHelpTextField(blank=True, max_length=200, null=True, verbose_name='Help text')), ('last_name_label', core.wagtail_fields.FormLabelField(max_length=200, verbose_name='label')), ('email_help_text', core.wagtail_fields.FormHelpTextField(blank=True, max_length=200, null=True, verbose_name='Help text')), ('email_label', core.wagtail_fields.FormLabelField(max_length=200, verbose_name='label')), ('organisation_type_help_text', core.wagtail_fields.FormHelpTextField(blank=True, max_length=200, null=True, verbose_name='Help text')), ('organisation_type_label', core.wagtail_fields.FormLabelField(max_length=200, verbose_name='label')), ('company_name_help_text', core.wagtail_fields.FormHelpTextField(blank=True, max_length=200, null=True, verbose_name='Help text')), ('company_name_label', core.wagtail_fields.FormLabelField(max_length=200, verbose_name='label')), ('country_help_text', core.wagtail_fields.FormHelpTextField(blank=True, max_length=200, null=True, verbose_name='Help text')), ('country_label', core.wagtail_fields.FormLabelField(max_length=200, verbose_name='label')), ('city_help_text', core.wagtail_fields.FormHelpTextField(blank=True, max_length=200, null=True, verbose_name='Help text')), ('city_label', core.wagtail_fields.FormLabelField(max_length=200, verbose_name='label')), ('comment_help_text', core.wagtail_fields.FormHelpTextField(blank=True, max_length=200, null=True, verbose_name='Help text')), ('comment_label', core.wagtail_fields.FormLabelField(max_length=200, verbose_name='label')), ], options={ 'abstract': False, }, bases=(core.models.ExclusivePageMixin, 'wagtailcore.page'), ), migrations.CreateModel( name='InternationalEUExitFormSuccessPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')), ('service_name', models.CharField(choices=[('FIND_A_SUPPLIER', 'Find a Supplier'), ('EXPORT_READINESS', 'Export Readiness'), ('INVEST', 'Invest'), ('COMPONENTS', 'Components'), ('GREAT_INTERNATIONAL', 'Great International')], db_index=True, max_length=100, null=True)), ('uses_tree_based_routing', models.BooleanField(default=False, help_text="Allow this page's URL to be determined by its slug, and the slugs of its ancestors in the page tree.", verbose_name='tree-based routing enabled')), ('breadcrumbs_label', models.CharField(max_length=50)), ('heading', models.CharField(max_length=255, verbose_name='Title')), ('body_text', models.CharField(max_length=255, verbose_name='Body text')), ('next_title', models.CharField(max_length=255, verbose_name='Title')), ('next_body_text', models.CharField(max_length=255, verbose_name='Body text')), ], options={ 'abstract': False, }, bases=(core.models.ExclusivePageMixin, 'wagtailcore.page'), ), ]
74.041667
285
0.673982
31f5dfe6cc07ddb8e01d014bf1bbf7cebbdd8060
3,800
py
Python
integrationtest/vm/basic/account/test_multi_users_cli_login.py
sherry546/zstack-woodpecker
54a37459f2d72ce6820974feaa6eb55772c3d2ce
[ "Apache-2.0" ]
1
2021-03-21T12:41:11.000Z
2021-03-21T12:41:11.000Z
integrationtest/vm/basic/account/test_multi_users_cli_login.py
sherry546/zstack-woodpecker
54a37459f2d72ce6820974feaa6eb55772c3d2ce
[ "Apache-2.0" ]
null
null
null
integrationtest/vm/basic/account/test_multi_users_cli_login.py
sherry546/zstack-woodpecker
54a37459f2d72ce6820974feaa6eb55772c3d2ce
[ "Apache-2.0" ]
1
2017-05-19T06:40:40.000Z
2017-05-19T06:40:40.000Z
''' New Integration Test for 2 normal users zstack-cli login @author: MengLai ''' import hashlib import zstackwoodpecker.operations.account_operations as acc_ops import zstackwoodpecker.test_util as test_util import zstackwoodpecker.test_lib as test_lib import zstackwoodpecker.test_state as test_state import zstackwoodpecker.zstack_test.zstack_test_account as test_account import zstackwoodpecker.zstack_test.zstack_test_user as test_user import zstacklib.utils.shell as shell test_stub = test_lib.lib_get_test_stub() test_obj_dict = test_state.TestStateDict() def login_cli_by_user(account_name, user_name, user_pass): cmd = '''zstack-cli << EOF LogInByUser accountName=%s userName=%s password=%s quit ''' % (account_name, user_name, user_pass) return shell.call(cmd) def test_query(): cmd = '''zstack-cli << EOF QueryVmNic quit ''' return shell.call(cmd) def logout_cli(): cmd = '''zstack-cli << EOF LogOut quit ''' return shell.call(cmd) def test(): import uuid test_util.test_dsc('Create an normal account and login with it') account_name1 = uuid.uuid1().get_hex() account_pass1 = hashlib.sha512(account_name1).hexdigest() test_account1 = test_account.ZstackTestAccount() test_account1.create(account_name1, account_pass1) test_obj_dict.add_account(test_account1) test_account_session = acc_ops.login_by_account(account_name1, account_pass1) test_util.test_dsc('Create an normal user-1 under the new account and login with it') user_name1 = uuid.uuid1().get_hex() user_pass1 = hashlib.sha512(user_name1).hexdigest() test_user1 = test_user.ZstackTestUser() test_user1.create(user_name1, user_pass1, session_uuid = test_account_session) test_obj_dict.add_user(test_user1) login_output = login_cli_by_user(account_name1, user_name1, user_name1) if login_output.find('%s/%s >>>' % (account_name1, user_name1)) < 0: test_util.test_fail('zstack-cli is not display correct name for logined user: %s' % (login_output)) test_util.test_dsc('Create an normal user-2 under the new account and login with it') user_name2 = uuid.uuid1().get_hex() user_pass2 = hashlib.sha512(user_name2).hexdigest() test_user2 = test_user.ZstackTestUser() test_user2.create(user_name2, user_pass2, session_uuid = test_account_session) test_obj_dict.add_user(test_user2) login_output = login_cli_by_user(account_name1, user_name2, user_name2) if login_output.find('%s/%s >>>' % (account_name1, user_name2)) < 0: test_util.test_fail('zstack-cli is not display correct name for logined user: %s' % (login_output)) test_util.test_dsc('Delete user-2 and check the login status') test_user2.delete() test_obj_dict.rm_user(test_user2) query_output = test_query() if query_output.find('- >>>') < 0: test_util.test_fail('zstack-cli is not display correct after delete user: %s' % (query_output)) test_util.test_dsc('login user-1, logout user-1 and check the login status') login_output = login_cli_by_user(account_name1, user_name1, user_name1) if login_output.find('%s/%s >>>' % (account_name1, user_name1)) < 0: test_util.test_fail('zstack-cli is not display correct name for logined user: %s' % (login_output)) logout_output = logout_cli() if logout_output.find('- >>>') < 0: test_util.test_fail('zstack-cli is not display correct after logout: %s' % (login_output)) test_user1.delete() test_account1.delete() test_obj_dict.rm_user(test_user1) test_obj_dict.rm_account(test_account1) #Will be called only if exception happens in test(). def error_cleanup(): test_lib.lib_error_cleanup(test_obj_dict)
40.860215
108
0.727105
31f62416b0ccc5186e179c986b3ee82c422d3de0
5,226
py
Python
venv/Lib/site-packages/networkx/algorithms/tests/test_structuralholes.py
amelliaaas/tugastkc4
f442382c72379e911f3780543b95345a3b1c9407
[ "Apache-2.0" ]
10,024
2015-01-01T13:06:43.000Z
2022-03-31T12:45:25.000Z
venv/Lib/site-packages/networkx/algorithms/tests/test_structuralholes.py
amelliaaas/tugastkc4
f442382c72379e911f3780543b95345a3b1c9407
[ "Apache-2.0" ]
3,191
2015-01-01T18:13:11.000Z
2022-03-31T22:06:00.000Z
venv/Lib/site-packages/networkx/algorithms/tests/test_structuralholes.py
amelliaaas/tugastkc4
f442382c72379e911f3780543b95345a3b1c9407
[ "Apache-2.0" ]
3,272
2015-01-01T05:04:53.000Z
2022-03-31T17:46:35.000Z
"""Unit tests for the :mod:`networkx.algorithms.structuralholes` module.""" import math import pytest import networkx as nx class TestStructuralHoles: """Unit tests for computing measures of structural holes. The expected values for these functions were originally computed using the proprietary software `UCINET`_ and the free software `IGraph`_ , and then computed by hand to make sure that the results are correct. .. _UCINET: https://sites.google.com/site/ucinetsoftware/home .. _IGraph: http://igraph.org/ """ def setup(self): self.D = nx.DiGraph() self.D.add_edges_from([(0, 1), (0, 2), (1, 0), (2, 1)]) self.D_weights = {(0, 1): 2, (0, 2): 2, (1, 0): 1, (2, 1): 1} # Example from http://www.analytictech.com/connections/v20(1)/holes.htm self.G = nx.Graph() self.G.add_edges_from( [ ("A", "B"), ("A", "F"), ("A", "G"), ("A", "E"), ("E", "G"), ("F", "G"), ("B", "G"), ("B", "D"), ("D", "G"), ("G", "C"), ] ) self.G_weights = { ("A", "B"): 2, ("A", "F"): 3, ("A", "G"): 5, ("A", "E"): 2, ("E", "G"): 8, ("F", "G"): 3, ("B", "G"): 4, ("B", "D"): 1, ("D", "G"): 3, ("G", "C"): 10, } def test_constraint_directed(self): constraint = nx.constraint(self.D) assert constraint[0] == pytest.approx(1.003, abs=1e-3) assert constraint[1] == pytest.approx(1.003, abs=1e-3) assert constraint[2] == pytest.approx(1.389, abs=1e-3) def test_effective_size_directed(self): effective_size = nx.effective_size(self.D) assert effective_size[0] == pytest.approx(1.167, abs=1e-3) assert effective_size[1] == pytest.approx(1.167, abs=1e-3) assert effective_size[2] == pytest.approx(1, abs=1e-3) def test_constraint_weighted_directed(self): D = self.D.copy() nx.set_edge_attributes(D, self.D_weights, "weight") constraint = nx.constraint(D, weight="weight") assert constraint[0] == pytest.approx(0.840, abs=1e-3) assert constraint[1] == pytest.approx(1.143, abs=1e-3) assert constraint[2] == pytest.approx(1.378, abs=1e-3) def test_effective_size_weighted_directed(self): D = self.D.copy() nx.set_edge_attributes(D, self.D_weights, "weight") effective_size = nx.effective_size(D, weight="weight") assert effective_size[0] == pytest.approx(1.567, abs=1e-3) assert effective_size[1] == pytest.approx(1.083, abs=1e-3) assert effective_size[2] == pytest.approx(1, abs=1e-3) def test_constraint_undirected(self): constraint = nx.constraint(self.G) assert constraint["G"] == pytest.approx(0.400, abs=1e-3) assert constraint["A"] == pytest.approx(0.595, abs=1e-3) assert constraint["C"] == pytest.approx(1, abs=1e-3) def test_effective_size_undirected_borgatti(self): effective_size = nx.effective_size(self.G) assert effective_size["G"] == pytest.approx(4.67, abs=1e-2) assert effective_size["A"] == pytest.approx(2.50, abs=1e-2) assert effective_size["C"] == pytest.approx(1, abs=1e-2) def test_effective_size_undirected(self): G = self.G.copy() nx.set_edge_attributes(G, 1, "weight") effective_size = nx.effective_size(G, weight="weight") assert effective_size["G"] == pytest.approx(4.67, abs=1e-2) assert effective_size["A"] == pytest.approx(2.50, abs=1e-2) assert effective_size["C"] == pytest.approx(1, abs=1e-2) def test_constraint_weighted_undirected(self): G = self.G.copy() nx.set_edge_attributes(G, self.G_weights, "weight") constraint = nx.constraint(G, weight="weight") assert constraint["G"] == pytest.approx(0.299, abs=1e-3) assert constraint["A"] == pytest.approx(0.795, abs=1e-3) assert constraint["C"] == pytest.approx(1, abs=1e-3) def test_effective_size_weighted_undirected(self): G = self.G.copy() nx.set_edge_attributes(G, self.G_weights, "weight") effective_size = nx.effective_size(G, weight="weight") assert effective_size["G"] == pytest.approx(5.47, abs=1e-2) assert effective_size["A"] == pytest.approx(2.47, abs=1e-2) assert effective_size["C"] == pytest.approx(1, abs=1e-2) def test_constraint_isolated(self): G = self.G.copy() G.add_node(1) constraint = nx.constraint(G) assert math.isnan(constraint[1]) def test_effective_size_isolated(self): G = self.G.copy() G.add_node(1) nx.set_edge_attributes(G, self.G_weights, "weight") effective_size = nx.effective_size(G, weight="weight") assert math.isnan(effective_size[1]) def test_effective_size_borgatti_isolated(self): G = self.G.copy() G.add_node(1) effective_size = nx.effective_size(G) assert math.isnan(effective_size[1])
39
79
0.580559
31f9305a21377f64bd0e727a4e26ba7424caa0ac
39
py
Python
tests/components/logbook/__init__.py
domwillcode/home-assistant
f170c80bea70c939c098b5c88320a1c789858958
[ "Apache-2.0" ]
30,023
2016-04-13T10:17:53.000Z
2020-03-02T12:56:31.000Z
tests/components/logbook/__init__.py
jagadeeshvenkatesh/core
1bd982668449815fee2105478569f8e4b5670add
[ "Apache-2.0" ]
31,101
2020-03-02T13:00:16.000Z
2022-03-31T23:57:36.000Z
tests/components/logbook/__init__.py
jagadeeshvenkatesh/core
1bd982668449815fee2105478569f8e4b5670add
[ "Apache-2.0" ]
11,956
2016-04-13T18:42:31.000Z
2020-03-02T09:32:12.000Z
"""Tests for the logbook component."""
19.5
38
0.692308
31fb74a7001125577af1d8ec0c7f1936437a0db6
19,069
py
Python
AssetAllocation.py
MomsLasanga/AssetAllocation
3729da4f73402d9162c444636002a964f26e40eb
[ "CC0-1.0" ]
null
null
null
AssetAllocation.py
MomsLasanga/AssetAllocation
3729da4f73402d9162c444636002a964f26e40eb
[ "CC0-1.0" ]
null
null
null
AssetAllocation.py
MomsLasanga/AssetAllocation
3729da4f73402d9162c444636002a964f26e40eb
[ "CC0-1.0" ]
null
null
null
""" Asset Allocation By Patrick Murrell Created 6/17/2020 This program that takes a csv positions file from fidelity.com from a Roth IRA account that contains the investments of SPAXX, FXNAX, FZILX, and FZROX. Since SPAXX is a Money Market fund then it is assumed that the money in here is not meant to be calculated in the total asset allocation of the account. Once the csv file is entered its data is scraped using the csv python library and the data used in calculations and tables that display useful statistics to the user. The user then should enter the amount they want to invest, and then click the "Calculate Investment Strategy" button to generate a table of values and display the recommended investment strategy on three buttons. These three buttons tell us whether to buy or sell or hold a dollar amount of each fund. Clicking these buttons copy their number values to the clip board to make the buying and selling of stocks easier This is a program written ideally for a single user (my investment strategy), but anyone can use the code in order to build their own version if they want. """ import csv # for the scraping of the csv file import re # for making sure we just copy the buttons numbers from PyQt5.QtWidgets import QFileDialog # to use the file browser in order to select a fidelity issued csv file from PyQt5 import QtCore, QtWidgets, QtGui # to build the applications GUI import sys # for starting and exiting the application # noinspection PyBroadException class UiMainWindow(object): # decides whether or not we buy/sell/hold the current allocation of a fund def buy_or_sell(self, percentage, total, current, money_to_invest, key): s: str # the string we print onto the buttons target = total * percentage # our ideal dollar amount invested in the fund actual_vs_target_ratio = target / current # the ratio of the ideal target allocation and the current allocation # if the fund is 5% outside of its target allocation and we are putting in/taking out new money then we # adjust the fund if .95 < actual_vs_target_ratio < 1.05 and int(money_to_invest) == 0: s = "Looks good for " else: # buy or sell the exact amount of the fund so we hit the target allocation amount_to_trade = str(round(abs(current - target), 2)) if actual_vs_target_ratio > 1.0: s = "Buy " else: s = "Sell " s += "$" + amount_to_trade + " " self.target_value.append(str(round(target, 2))) # so we can display the target value in the info table s += self.info_table[1][key] # add the name of the investment to the string return s # return the text to add to the button # uses pandas to read from a csv file and add the current balances of investments to list def scrape_values_from_csv(self): temp_names = [] # temporarily stores labels of funds temp_balances = [] # temporarily stores current balances of funds csv_list = [] # list that stores the contents of the csv file self.current_balances.clear() # clear the list of balances so we can replace them with the current csv values try: # import the list from Fidelity using pandas with open(self.filename, 'r') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') for row in csv_reader: # read the csv file contents into a list csv_list.append(row) except: # if this doesn't work we just notify the user by returning a list of -1 (a flag of sorts) self.current_balances = [-1] # we check this to report that they did not enter a correct csv file else: # reset the info table list and set its values to the headings self.info_table = [['Symbol', 'Current Value', 'Current Allocation', 'Target value', 'Target Allocation']] for i in range(2, 5): # read through the csv list temp_balances.append(csv_list[i][6]) # access the current values self.current_balances.append(float(temp_balances[i - 2].replace('$', ''))) # remove the '$' sign temp_names.append(csv_list[i][1]) # add the name of the fund to label names self.info_table.append(temp_names) # add the names and balances lists to the self.info_table.append(temp_balances) # info table list # takes values from csv list, money we want to invest, and def calculate_strategy(self, money_to_invest): # Fixed Asset Allocation Percentages based on age/year of user (mine is set to every 20XX year, because I was # born in 1999) if "2020" in self.filename: bond_percentage = .2 international_index_percentage = .3 national_index_percentage = .5 elif "2030" in self.filename: bond_percentage = .3 international_index_percentage = .27 national_index_percentage = .43 elif "2040" in self.filename: bond_percentage = .4 international_index_percentage = .23 national_index_percentage = .37 elif "2050" in self.filename: bond_percentage = .5 international_index_percentage = .19 national_index_percentage = .31 elif "2060" in self.filename: bond_percentage = .6 international_index_percentage = .15 national_index_percentage = .25 elif "2070" in self.filename: bond_percentage = .7 international_index_percentage = .11 national_index_percentage = .19 elif "2080" in self.filename: bond_percentage = .8 international_index_percentage = .08 national_index_percentage = .12 elif "2090" in self.filename: bond_percentage = .9 international_index_percentage = .04 national_index_percentage = .06 else: bond_percentage = 1.0 international_index_percentage = 0.0 national_index_percentage = 0.0 total_amount = money_to_invest + sum(self.current_balances) # total current amount of money to be invested self.target_value.clear() # clear the target values list # updates the buttons to display the recommended asset allocation to the user self.bonds_button.setText(self._translate("main_window", self.buy_or_sell( # set bonds button text bond_percentage, total_amount, self.current_balances[0], money_to_invest, 0))) self.international_button.setText(self._translate("main_window", self.buy_or_sell( # set international button international_index_percentage, total_amount, self.current_balances[1], money_to_invest, 1))) self.national_button.setText(self._translate("main_window", self.buy_or_sell( # set national button text national_index_percentage, total_amount, self.current_balances[2], money_to_invest, 2))) # add current allocation, ideal fund balances, and ideal allocation of account to info table list self.info_table.append([str(round(100 * self.current_balances[0] / (total_amount - money_to_invest), 2)) + "%", str(round(100 * self.current_balances[1] / (total_amount - money_to_invest), 2)) + "%", str(round(100 * self.current_balances[2] / (total_amount - money_to_invest), 2)) + "%"]) self.info_table.append(self.target_value) self.info_table.append([str(100 * bond_percentage) + "%", str(100 * international_index_percentage) + "%", str(100 * national_index_percentage) + "%"]) # this method sets up the ui as well as a couple of variables used accross the program def __init__(self, main_win): button_stylesheet = "background-color: #3F3F3F; color: #ffffff" # style sheet self.info_table = [] # table of investment information and positions we print out to the user self.current_balances = [-1] # current balances tracks the list of fund balances pulled from the csv file self.numbers = re.compile(r'\d+(?:\.\d+)?') # regular expression that is used to copy the button text numbers self.target_value = [] # stores the ideal balance values for each fund self.filename = '' # name path of csv file is stored here self._translate = QtCore.QCoreApplication.translate # shortened function name for ease of use # UI related code generated by PyQt file main_win.setObjectName("main_window") main_win.resize(780, 350) main_win.setAutoFillBackground(True) main_win.setStyleSheet("background-color: #4a4a4a; color: #ffffff; font: 10pt 'Consolas'") self.central_widget = QtWidgets.QWidget(main_win) self.central_widget.setAutoFillBackground(True) self.central_widget.setObjectName("central_widget") self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.central_widget) self.verticalLayout_2.setObjectName("verticalLayout_2") self.main_vlayout = QtWidgets.QVBoxLayout() self.main_vlayout.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint) self.main_vlayout.setContentsMargins(5, 5, 5, 5) self.main_vlayout.setSpacing(5) self.main_vlayout.setObjectName("main_vlayout") self.entry_hlayout = QtWidgets.QHBoxLayout() self.entry_hlayout.setContentsMargins(5, 5, 5, 5) self.entry_hlayout.setSpacing(5) self.entry_hlayout.setObjectName("entry_hlayout") self.entry_label = QtWidgets.QLabel(self.central_widget) self.entry_label.setObjectName("entry_label") self.entry_hlayout.addWidget(self.entry_label) self.entry_lineEdit = QtWidgets.QLineEdit(self.central_widget) self.entry_lineEdit.setObjectName("entry_lineEdit") self.entry_hlayout.addWidget(self.entry_lineEdit) spacer_item = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.entry_hlayout.addItem(spacer_item) self.main_vlayout.addLayout(self.entry_hlayout) self.two_button_horizontal = QtWidgets.QHBoxLayout() self.two_button_horizontal.setContentsMargins(5, 5, 5, 5) self.two_button_horizontal.setSpacing(5) self.two_button_horizontal.setObjectName("two_button_horizontal") self.csv_button = QtWidgets.QPushButton(self.central_widget) self.csv_button.setObjectName("csv_button") self.csv_button.setStyleSheet(button_stylesheet) self.csv_button.clicked.connect(self.open_csv) # when csv button is clicked run the open csv method self.two_button_horizontal.addWidget(self.csv_button) self.calculate_button = QtWidgets.QPushButton(self.central_widget) self.calculate_button.setStyleSheet(button_stylesheet) self.calculate_button.setObjectName("calculate_button") self.calculate_button.clicked.connect(self.calculate) # when the calculate button is clicked run calculate() self.two_button_horizontal.addWidget(self.calculate_button) self.main_vlayout.addLayout(self.two_button_horizontal) self.error_vlayout = QtWidgets.QVBoxLayout() self.error_vlayout.setContentsMargins(5, 5, 5, 5) self.error_vlayout.setSpacing(5) self.error_vlayout.setObjectName("error_vlayout") self.error_label = QtWidgets.QLabel(self.central_widget) self.error_label.setLayoutDirection(QtCore.Qt.LeftToRight) self.error_label.setFrameShape(QtWidgets.QFrame.NoFrame) self.error_label.setFrameShadow(QtWidgets.QFrame.Plain) self.error_label.setAlignment(QtCore.Qt.AlignCenter) self.error_label.setObjectName("error_label") self.info_label = QtWidgets.QLabel(self.central_widget) self.info_label.setLayoutDirection(QtCore.Qt.LeftToRight) self.info_label.setFrameShape(QtWidgets.QFrame.NoFrame) self.info_label.setFrameShadow(QtWidgets.QFrame.Plain) self.info_label.setAlignment(QtCore.Qt.AlignCenter) self.info_label.setObjectName("error_label") self.error_vlayout.addWidget(self.info_label) self.error_vlayout.addWidget(self.error_label) self.main_vlayout.addLayout(self.error_vlayout) self.three_button_horizontal = QtWidgets.QHBoxLayout() self.three_button_horizontal.setContentsMargins(5, 5, 5, 5) self.three_button_horizontal.setSpacing(5) self.three_button_horizontal.setObjectName("three_button_horizontal") self.bonds_button = QtWidgets.QPushButton(self.central_widget) self.bonds_button.setObjectName("bonds_button") self.bonds_button.setStyleSheet(button_stylesheet) self.bonds_button.clicked.connect(self.copy_bond_number) self.three_button_horizontal.addWidget(self.bonds_button) self.international_button = QtWidgets.QPushButton(self.central_widget) self.international_button.setObjectName("international_button") self.international_button.setStyleSheet(button_stylesheet) self.international_button.clicked.connect(self.copy_international_number) self.three_button_horizontal.addWidget(self.international_button) self.national_button = QtWidgets.QPushButton(self.central_widget) self.national_button.setObjectName("national_button") self.national_button.setStyleSheet(button_stylesheet) self.national_button.clicked.connect(self.copy_national_number) self.three_button_horizontal.addWidget(self.national_button) self.main_vlayout.addLayout(self.three_button_horizontal) self.verticalLayout_2.addLayout(self.main_vlayout) main_win.setCentralWidget(self.central_widget) self.menubar = QtWidgets.QMenuBar(main_win) self.menubar.setGeometry(QtCore.QRect(0, 0, 884, 21)) self.menubar.setObjectName("menubar") main_win.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(main_win) self.statusbar.setObjectName("statusbar") main_win.setStatusBar(self.statusbar) self.reanimate_ui(main_win) QtCore.QMetaObject.connectSlotsByName(main_win) # Ui function that sets initial ui text def reanimate_ui(self, main_w): main_w.setWindowTitle(self._translate("main_window", "Asset Allocation")) self.entry_label.setText(self._translate("main_window", "The amount you want to invest:")) self.csv_button.setText(self._translate("main_window", "Browse For CSV")) self.calculate_button.setText(self._translate("main_window", "Calculate Investment Strategy")) self.error_label.setText(self._translate("main_window", "")) self.info_label.setText(self._translate("main_window", "")) self.bonds_button.setText(self._translate("main_window", "")) self.international_button.setText(self._translate("main_window", "")) self.national_button.setText(self._translate("main_window", "")) # creates a file explorer dialog to select csv. checks and reports if a valid csv was selected def open_csv(self): # open and select file from csv button filename_list = list(QFileDialog.getOpenFileName(main_window, 'Open file', "/", "csv files (*.csv)")) self.filename = str(filename_list[0]) self.scrape_values_from_csv() if self.current_balances == [-1]: # if a csv file is not detected self.csv_file_error() # report an error to the user else: self.error_label.setText(self._translate("main_window", self.filename)) # show the file name to the user # check to make sure the user entered either a number or nothing, also entered a csv, then run calculate_strategy() def calculate(self): try: amount_to_invest = float(self.entry_lineEdit.text()) # check to see if the user entered a proper number except: if self.entry_lineEdit.text() == '': # if the user enters nothing assume they are investing $0.00 amount_to_invest = 0.00 else: # since the user did not enter a number throw an error and exit the function self.error_label.setText(self._translate("main_window", "You did not enter a valid amount")) return if self.current_balances != [-1]: # if the user entered a valid csv self.calculate_strategy(amount_to_invest) # calculate our strategy and fill the rest of the info table list self.error_label.setText(self._translate("main_window", "Strategy Calculated")) # print our info table list onto the screen in the form of a table s = 'Values From CSV: \n\n|' # create the info table in a string called s for i in range(len(self.info_table[0])): s += "{:20}|".format((str(self.info_table[0][i]).ljust(15))) s += "\n" + "-" * int((len(s) * .85)) for i in range(len(self.info_table[1])): s += "\n|" for j in range(1, len(self.info_table)): s += "{:20}|".format((str(self.info_table[j][i]).ljust(15))) s += '\n' self.info_label.setText(self._translate("main_window", s)) # set the info label to the info table for i in range(3): # remove last three values of info table list so they do not overlap with themselves self.info_table.remove(self.info_table[len(self.info_table) - 1]) else: self.csv_file_error() # report an error to the user # methods that copy the text of how much to buy/sell/hold from button onto clipboard def copy_bond_number(self): cb.setText(''.join(self.numbers.findall(self.bonds_button.text())), mode=cb.Clipboard) def copy_national_number(self): cb.setText(''.join(self.numbers.findall(self.national_button.text())), mode=cb.Clipboard) def copy_international_number(self): cb.setText(''.join(self.numbers.findall(self.international_button.text())), mode=cb.Clipboard) # report an error if a csv file is not detected (when self.current_values == [-1]) def csv_file_error(self): self.error_label.setText(self._translate("main_window", "you did not enter a csv file")) # main function that starts and closes the app if __name__ == "__main__": app = QtWidgets.QApplication(sys.argv) main_window = QtWidgets.QMainWindow() cb = QtWidgets.QApplication.clipboard() cb.clear(mode=cb.Clipboard) ui = UiMainWindow(main_window) main_window.show() sys.exit(app.exec_())
60.536508
121
0.679637
31fb8c91aed632440a47a6131c0345c5540769ba
919
py
Python
setup.py
matsurih/pyknp
e4d0756868676a0c2058dbc0d8dfa77102fe0ba4
[ "BSD-3-Clause" ]
null
null
null
setup.py
matsurih/pyknp
e4d0756868676a0c2058dbc0d8dfa77102fe0ba4
[ "BSD-3-Clause" ]
null
null
null
setup.py
matsurih/pyknp
e4d0756868676a0c2058dbc0d8dfa77102fe0ba4
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python __author__ = 'Kurohashi-Kawahara Lab, Kyoto Univ.' __email__ = 'contact@nlp.ist.i.kyoto-u.ac.jp' __copyright__ = '' __license__ = 'See COPYING' import os from setuptools import setup, find_packages about = {} here = os.path.abspath(os.path.dirname(__file__)) exec(open(os.path.join(here, 'pyknp', '__version__.py')).read(), about) with open('README.md', encoding='utf8') as f: long_description = f.read() setup( name='pyknp', version=about['__version__'], maintainer=__author__, maintainer_email=__email__, author=__author__, author_email=__email__, description='Python module for JUMAN/KNP.', license=__license__, url='https://github.com/ku-nlp/pyknp', long_description=long_description, long_description_content_type='text/markdown', scripts=['pyknp/scripts/knp-drawtree', ], packages=find_packages(), install_requires=['six'], )
27.029412
71
0.709467
31fb8e7e0a458819ec76cfb1aa5beea0df8e62d1
559
py
Python
supriya/ugens/Dreset.py
deeuu/supriya
14fcb5316eccb4dafbe498932ceff56e1abb9d27
[ "MIT" ]
null
null
null
supriya/ugens/Dreset.py
deeuu/supriya
14fcb5316eccb4dafbe498932ceff56e1abb9d27
[ "MIT" ]
null
null
null
supriya/ugens/Dreset.py
deeuu/supriya
14fcb5316eccb4dafbe498932ceff56e1abb9d27
[ "MIT" ]
null
null
null
import collections from supriya import CalculationRate from supriya.ugens.DUGen import DUGen class Dreset(DUGen): """ Resets demand-rate UGens. :: >>> source = supriya.ugens.Dseries(start=0, step=2) >>> dreset = supriya.ugens.Dreset( ... reset=0, ... source=source, ... ) >>> dreset Dreset() """ ### CLASS VARIABLES ### _ordered_input_names = collections.OrderedDict([("source", None), ("reset", 0)]) _valid_calculation_rates = (CalculationRate.DEMAND,)
19.964286
84
0.581395
31fbbf24a86c0801f6f0f2045710204934802521
1,729
py
Python
src/api/store/export.py
gregory-chekler/api
11ecbea945e7eb6fa677a0c0bb32bda51ba15f28
[ "MIT" ]
null
null
null
src/api/store/export.py
gregory-chekler/api
11ecbea945e7eb6fa677a0c0bb32bda51ba15f28
[ "MIT" ]
null
null
null
src/api/store/export.py
gregory-chekler/api
11ecbea945e7eb6fa677a0c0bb32bda51ba15f28
[ "MIT" ]
null
null
null
from database.models import Team, UserProfile from _main_.utils.massenergize_errors import MassEnergizeAPIError, InvalidResourceError, ServerError, CustomMassenergizeError from _main_.utils.massenergize_response import MassenergizeResponse from _main_.utils.context import Context class TeamStore: def __init__(self): self.name = "Team Store/DB" def get_team_info(self, team_id) -> (dict, MassEnergizeAPIError): team = Team.objects.filter(id=team_id) if not team: return None, InvalidResourceError() return team, None def list_teams(self, community_id) -> (list, MassEnergizeAPIError): teams = Team.objects.filter(community__id=community_id) if not teams: return [], None return teams, None def create_team(self, args) -> (dict, MassEnergizeAPIError): try: new_team = Team.create(**args) new_team.save() return new_team, None except Exception: return None, ServerError() def update_team(self, team_id, args) -> (dict, MassEnergizeAPIError): team = Team.objects.filter(id=team_id) if not team: return None, InvalidResourceError() team.update(**args) return team, None def delete_team(self, team_id) -> (dict, MassEnergizeAPIError): teams = Team.objects.filter(id=team_id) if not teams: return None, InvalidResourceError() def list_teams_for_community_admin(self, community_id) -> (list, MassEnergizeAPIError): teams = Team.objects.filter(community__id = community_id) return teams, None def list_teams_for_super_admin(self): try: teams = Team.objects.all() return teams, None except Exception as e: print(e) return None, CustomMassenergizeError(str(e))
29.810345
125
0.717178
31fbec348a03e3f7b7667b0fb7f3d122e939f326
1,125
py
Python
valid_parentheses.py
fossilet/leetcode
4cf787c74fc339dc6aee6a0b633ca15b38ac18a1
[ "MIT" ]
5
2015-12-10T14:19:02.000Z
2021-07-02T01:23:34.000Z
valid_parentheses.py
fossilet/leetcode
4cf787c74fc339dc6aee6a0b633ca15b38ac18a1
[ "MIT" ]
null
null
null
valid_parentheses.py
fossilet/leetcode
4cf787c74fc339dc6aee6a0b633ca15b38ac18a1
[ "MIT" ]
1
2015-10-01T01:43:14.000Z
2015-10-01T01:43:14.000Z
""" https://oj.leetcode.com/problems/valid-parentheses/ Given a string containing just the characters '(', ')', '{', '}', '[' and ']', determine if the input string is valid. The brackets must close in the correct order, "()" and "()[]{}" are all valid but "(]" and "([)]" are not. """ class Solution: # @return a boolean def isValid(self, s): stack = [] for x in s: if x in '({[': stack.append(x) else: try: y = stack.pop() except IndexError: return False if not ((x == '(' and y == ')') or (x == '[' and y == ']') or (x == '{' and y == '}') or (y == '(' and x == ')') or (y == '[' and x == ']') or (y == '{' and x == '}')): return False return stack == [] if __name__ == '__main__': s = Solution() assert s.isValid('()') assert s.isValid('[]') assert not s.isValid('[') assert not s.isValid('}') assert not s.isValid('([') assert s.isValid('([]{})[]') assert not s.isValid('([)]')
30.405405
118
0.441778
31fea9ffda59127cd7bda0c20fd0fcfb295048c1
142
py
Python
joga_moeda.py
lucaslk122/Programas-python
816bdaa128f2d279c255c588c1ff61cb4b834ccd
[ "MIT" ]
null
null
null
joga_moeda.py
lucaslk122/Programas-python
816bdaa128f2d279c255c588c1ff61cb4b834ccd
[ "MIT" ]
null
null
null
joga_moeda.py
lucaslk122/Programas-python
816bdaa128f2d279c255c588c1ff61cb4b834ccd
[ "MIT" ]
null
null
null
from random import random def joga_moeda(): if random() > 0.5: return "Coroa" else: return "Cara" print (joga_moeda())
20.285714
25
0.598592
31ffce463b8361c5e6ba6697268ac0aace1de85c
483
py
Python
shop/migrations/0006_auto_20171130_1638.py
zahidkizmaz/VehicleShop
5a2dc5cc4eb4c4692db9f19108818069250a950a
[ "MIT" ]
10
2017-11-29T17:41:23.000Z
2020-01-13T14:45:18.000Z
shop/migrations/0006_auto_20171130_1638.py
zahidkizmaz/VehicleShop
5a2dc5cc4eb4c4692db9f19108818069250a950a
[ "MIT" ]
1
2017-12-15T17:24:42.000Z
2017-12-17T00:05:54.000Z
shop/migrations/0006_auto_20171130_1638.py
zahidkizmaz/VehicleShop
5a2dc5cc4eb4c4692db9f19108818069250a950a
[ "MIT" ]
2
2020-03-31T00:01:27.000Z
2022-01-02T05:45:17.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.11.7 on 2017-11-30 16:38 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('shop', '0005_frequentlysearched_latestsearches_soldvehicles_transaction'), ] operations = [ migrations.AlterField( model_name='user', name='role', field=models.IntegerField(null=True), ), ]
23
84
0.641822
31ffd8fdd3242dbfb70cd647f01afb511ece19be
315
py
Python
settings/tests/test_global_settings.py
stanwood/traidoo-api
83e8599f2eb54352988bac27e2d4acd30734816d
[ "MIT" ]
3
2020-05-05T12:12:09.000Z
2020-05-08T08:48:16.000Z
settings/tests/test_global_settings.py
stanwood/traidoo-api
83e8599f2eb54352988bac27e2d4acd30734816d
[ "MIT" ]
160
2020-05-19T13:03:43.000Z
2022-03-12T00:35:28.000Z
settings/tests/test_global_settings.py
stanwood/traidoo-api
83e8599f2eb54352988bac27e2d4acd30734816d
[ "MIT" ]
null
null
null
import pytest from model_bakery import baker pytestmark = pytest.mark.django_db def test_get_global_settings(client_anonymous): settings = baker.make_recipe("settings.global_setting") response = client_anonymous.get("/global_settings") assert response.json() == {"productVat": settings.product_vat}
26.25
66
0.780952
ee006fceb0659547ddf28bebb8b2d57c2c9c4600
181
py
Python
dotpyle/commands/add/__init__.py
jorgebodega/dotpyle
896bcb2126904b58e70c1c63af21da07438ce7b9
[ "MIT" ]
null
null
null
dotpyle/commands/add/__init__.py
jorgebodega/dotpyle
896bcb2126904b58e70c1c63af21da07438ce7b9
[ "MIT" ]
2
2021-04-15T16:36:58.000Z
2022-01-04T00:03:24.000Z
dotpyle/commands/add/__init__.py
jorgebodega/dotpyle
896bcb2126904b58e70c1c63af21da07438ce7b9
[ "MIT" ]
1
2021-12-21T16:57:21.000Z
2021-12-21T16:57:21.000Z
import click from dotpyle.commands.add.dotfile import dotfile @click.group() def add(): """ This command will take KEY and ... DOTFILE """ add.add_command(dotfile)
12.928571
48
0.674033
ee0270408f2e8be9f93c82f2617c2e04aaafceac
646
py
Python
benchmark/legacy/workload.py
zhangjyr/faas
a40ea1848e6b5428177319285097168faf3253de
[ "MIT" ]
null
null
null
benchmark/legacy/workload.py
zhangjyr/faas
a40ea1848e6b5428177319285097168faf3253de
[ "MIT" ]
null
null
null
benchmark/legacy/workload.py
zhangjyr/faas
a40ea1848e6b5428177319285097168faf3253de
[ "MIT" ]
null
null
null
import sys import os from common.invoker import newRequest from common.invoker import request from common.invoker import threadize port = 8080 if len(sys.argv) > 1: port = int(sys.argv[1]) workload = 10000 req = newRequest( "GET", "http://localhost:{0}/".format(port), headers = { "X-FUNCTION": "hello" } ) base = os.path.dirname(__file__) if base == '': base = '.' entries = threadize(workload, req, num = 12, reuse = False) fileObject = open(base + '/data/response.txt', 'w') for i in entries: fileObject.write(",".join(map(lambda field: str(field), i))) fileObject.write('\n') fileObject.close()
19.575758
64
0.648607
ee02c3e71989e00196fcabde81e3802364cd921e
3,679
py
Python
em_net/util/misc.py
weihuang527/superhuman_network
a89820bda4d0006198bac3bb5922a958ac87f2ae
[ "MIT" ]
null
null
null
em_net/util/misc.py
weihuang527/superhuman_network
a89820bda4d0006198bac3bb5922a958ac87f2ae
[ "MIT" ]
null
null
null
em_net/util/misc.py
weihuang527/superhuman_network
a89820bda4d0006198bac3bb5922a958ac87f2ae
[ "MIT" ]
null
null
null
import sys import numpy as np import h5py import random import os from subprocess import check_output # 1. h5 i/o def readh5(filename, datasetname): data=np.array(h5py.File(filename,'r')[datasetname]) return data def writeh5(filename, datasetname, dtarray): # reduce redundant fid=h5py.File(filename,'w') ds = fid.create_dataset(datasetname, dtarray.shape, compression="gzip", dtype=dtarray.dtype) ds[:] = dtarray fid.close() def readh5k(filename, datasetname): fid=h5py.File(filename) data={} for kk in datasetname: data[kk]=array(fid[kk]) fid.close() return data def writeh5k(filename, datasetname, dtarray): fid=h5py.File(filename,'w') for kk in datasetname: ds = fid.create_dataset(kk, dtarray[kk].shape, compression="gzip", dtype=dtarray[kk].dtype) ds[:] = dtarray[kk] fid.close() def resizeh5(path_in, path_out, dataset, ratio=(0.5,0.5), interp=2, offset=[0,0,0]): from scipy.ndimage.interpolation import zoom # for half-res im = h5py.File( path_in, 'r')[ dataset ][:] shape = im.shape if len(shape)==3: im_out = np.zeros((shape[0]-2*offset[0], int(np.ceil(shape[1]*ratio[0])), int(np.ceil(shape[2]*ratio[1]))), dtype=im.dtype) for i in xrange(shape[0]-2*offset[0]): im_out[i,...] = zoom( im[i+offset[0],...], zoom=ratio, order=interp) if offset[1]!=0: im_out=im_out[:,offset[1]:-offset[1],offset[2]:-offset[2]] elif len(shape)==4: im_out = np.zeros((shape[0]-2*offset[0], shape[1], int(shape[2]*ratio[0]), int(shape[3]*ratio[1])), dtype=im.dtype) for i in xrange(shape[0]-2*offset[0]): for j in xrange(shape[1]): im_out[i,j,...] = zoom( im[i+offset[0],j,...], ratio, order=interp) if offset[1]!=0: im_out=im_out[:,offset[1]:-offset[1],offset[2]:-offset[2],offset[3]:-offset[3]] if path_out is None: return im_out writeh5(path_out, dataset, im_out) def writetxt(filename, dtarray): a = open(filename,'w') a.write(dtarray) a.close() # 2. segmentation wrapper def segToAffinity(seg): from ..lib import malis_core as malisL nhood = malisL.mknhood3d() return malisL.seg_to_affgraph(seg,nhood) def bwlabel(mat): ran = [int(mat.min()),int(mat.max())]; out = np.zeros(ran[1]-ran[0]+1); for i in range(ran[0],ran[1]+1): out[i] = np.count_nonzero(mat==i) return out def genSegMalis(gg3,iter_num): # given input seg map, widen the seg border from scipy.ndimage import morphology as skmorph #from skimage import morphology as skmorph gg3_dz = np.zeros(gg3.shape).astype(np.uint32) gg3_dz[1:,:,:] = (np.diff(gg3,axis=0)) gg3_dy = np.zeros(gg3.shape).astype(np.uint32) gg3_dy[:,1:,:] = (np.diff(gg3,axis=1)) gg3_dx = np.zeros(gg3.shape).astype(np.uint32) gg3_dx[:,:,1:] = (np.diff(gg3,axis=2)) gg3g = ((gg3_dx+gg3_dy)>0) #stel=np.array([[1, 1],[1,1]]).astype(bool) #stel=np.array([[0, 1, 0],[1,1,1], [0,1,0]]).astype(bool) stel=np.array([[1, 1, 1],[1,1,1], [1,1,1]]).astype(bool) #stel=np.array([[1,1,1,1],[1, 1, 1, 1],[1,1,1,1],[1,1,1,1]]).astype(bool) gg3gd=np.zeros(gg3g.shape) for i in range(gg3g.shape[0]): gg3gd[i,:,:]=skmorph.binary_dilation(gg3g[i,:,:],structure=stel,iterations=iter_num) out = gg3.copy() out[gg3gd==1]=0 #out[0,:,:]=0 # for malis return out # 3. evaluation """ def runBash(cmd): fn = '/tmp/tmp_'+str(random.random())[2:]+'.sh' print('tmp bash file:',fn) writetxt(fn, cmd) os.chmod(fn, 0755) out = check_output([fn]) os.remove(fn) print(out) """
33.144144
131
0.611579
ee02e152f754c131475d2144d7eda38e3e662a80
3,277
py
Python
Examples/VisualizationAlgorithms/Python/warpComb.py
satya-arjunan/vtk8
ee7ced57de6d382a2d12693c01e2fcdac350b25f
[ "BSD-3-Clause" ]
3
2015-07-28T18:07:50.000Z
2018-02-28T20:59:58.000Z
Examples/VisualizationAlgorithms/Python/warpComb.py
satya-arjunan/vtk8
ee7ced57de6d382a2d12693c01e2fcdac350b25f
[ "BSD-3-Clause" ]
14
2015-04-25T17:54:13.000Z
2017-01-13T15:30:39.000Z
Examples/VisualizationAlgorithms/Python/warpComb.py
satya-arjunan/vtk8
ee7ced57de6d382a2d12693c01e2fcdac350b25f
[ "BSD-3-Clause" ]
5
2020-10-02T10:14:35.000Z
2022-03-10T07:50:22.000Z
#!/usr/bin/env python # This example demonstrates how to extract "computational planes" from # a structured dataset. Structured data has a natural, logical # coordinate system based on i-j-k indices. Specifying imin,imax, # jmin,jmax, kmin,kmax pairs can indicate a point, line, plane, or # volume of data. # # In this example, we extract three planes and warp them using scalar # values in the direction of the local normal at each point. This # gives a sort of "velocity profile" that indicates the nature of the # flow. import vtk from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # Here we read data from a annular combustor. A combustor burns fuel # and air in a gas turbine (e.g., a jet engine) and the hot gas # eventually makes its way to the turbine section. pl3d = vtk.vtkMultiBlockPLOT3DReader() pl3d.SetXYZFileName(VTK_DATA_ROOT + "/Data/combxyz.bin") pl3d.SetQFileName(VTK_DATA_ROOT + "/Data/combq.bin") pl3d.SetScalarFunctionNumber(100) pl3d.SetVectorFunctionNumber(202) pl3d.Update() pl3d_output = pl3d.GetOutput().GetBlock(0) # Planes are specified using a imin,imax, jmin,jmax, kmin,kmax # coordinate specification. Min and max i,j,k values are clamped to 0 # and maximum value. plane = vtk.vtkStructuredGridGeometryFilter() plane.SetInputData(pl3d_output) plane.SetExtent(10, 10, 1, 100, 1, 100) plane2 = vtk.vtkStructuredGridGeometryFilter() plane2.SetInputData(pl3d_output) plane2.SetExtent(30, 30, 1, 100, 1, 100) plane3 = vtk.vtkStructuredGridGeometryFilter() plane3.SetInputData(pl3d_output) plane3.SetExtent(45, 45, 1, 100, 1, 100) # We use an append filter because that way we can do the warping, # etc. just using a single pipeline and actor. appendF = vtk.vtkAppendPolyData() appendF.AddInputConnection(plane.GetOutputPort()) appendF.AddInputConnection(plane2.GetOutputPort()) appendF.AddInputConnection(plane3.GetOutputPort()) warp = vtk.vtkWarpScalar() warp.SetInputConnection(appendF.GetOutputPort()) warp.UseNormalOn() warp.SetNormal(1.0, 0.0, 0.0) warp.SetScaleFactor(2.5) normals = vtk.vtkPolyDataNormals() normals.SetInputConnection(warp.GetOutputPort()) normals.SetFeatureAngle(60) planeMapper = vtk.vtkPolyDataMapper() planeMapper.SetInputConnection(normals.GetOutputPort()) planeMapper.SetScalarRange(pl3d_output.GetScalarRange()) planeActor = vtk.vtkActor() planeActor.SetMapper(planeMapper) # The outline provides context for the data and the planes. outline = vtk.vtkStructuredGridOutlineFilter() outline.SetInputData(pl3d_output) outlineMapper = vtk.vtkPolyDataMapper() outlineMapper.SetInputConnection(outline.GetOutputPort()) outlineActor = vtk.vtkActor() outlineActor.SetMapper(outlineMapper) outlineActor.GetProperty().SetColor(0, 0, 0) # Create the usual graphics stuff. ren = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) ren.AddActor(outlineActor) ren.AddActor(planeActor) ren.SetBackground(1, 1, 1) renWin.SetSize(500, 500) # Create an initial view. cam1 = ren.GetActiveCamera() cam1.SetClippingRange(3.95297, 50) cam1.SetFocalPoint(8.88908, 0.595038, 29.3342) cam1.SetPosition(-12.3332, 31.7479, 41.2387) cam1.SetViewUp(0.060772, -0.319905, 0.945498) iren.Initialize() renWin.Render() iren.Start()
35.236559
70
0.790357
ee0422fdb0bb9569ffe5bfce8332b0898922c7e6
1,482
py
Python
src/docs/architecture/demo.py
coding-fastandslow/testing-spring-boot-applications-masterclass
7564987d4a54e60bf59c4b0eb48ac3736e136904
[ "MIT" ]
1
2021-06-02T03:46:23.000Z
2021-06-02T03:46:23.000Z
src/docs/architecture/demo.py
coding-fastandslow/testing-spring-boot-applications-masterclass
7564987d4a54e60bf59c4b0eb48ac3736e136904
[ "MIT" ]
30
2021-08-04T02:14:16.000Z
2022-03-28T02:19:17.000Z
src/docs/architecture/demo.py
coding-fastandslow/testing-spring-boot-applications-masterclass
7564987d4a54e60bf59c4b0eb48ac3736e136904
[ "MIT" ]
1
2021-10-08T12:44:38.000Z
2021-10-08T12:44:38.000Z
from diagrams import Cluster, Diagram, Edge from diagrams.aws.compute import EC2 from diagrams.aws.database import RDS from diagrams.aws.integration import SQS from diagrams.aws.network import ELB from diagrams.aws.storage import S3 from diagrams.onprem.ci import Jenkins from diagrams.onprem.client import Client, User, Users from diagrams.onprem.compute import Server from diagrams.onprem.container import Docker from diagrams.onprem.database import PostgreSQL from diagrams.onprem.monitoring import Grafana, Prometheus from diagrams.onprem.network import Internet from diagrams.programming.framework import Spring, React graph_attr = { "fontsize": "20", "bgcolor": "white" #transparent } with Diagram("Application Architecture", graph_attr=graph_attr, outformat="png", filename="application_architecture"): ELB("lb") >> EC2("web") >> RDS("userdb") >> S3("store") ELB("lb") >> EC2("web") >> RDS("userdb") << EC2("stat") (ELB("lb") >> EC2("web")) - EC2("web") >> RDS("userdb") with Cluster("Application Context"): app = EC2("Spring Boot") ELB("lb") >> app metrics = Prometheus("metric") metrics << Edge(color="firebrick", style="dashed") << Grafana("monitoring") Jenkins("CI") client = Client("A") client >> User("B") >> Users("S") client >> PostgreSQL("Database") client >> Internet("Remote API") client >> Docker("Docker") client >> Server("Server") client >> SQS("Sync Books") client >> Spring("Backend") client >> React("React")
33.681818
118
0.711876
ee0479dc94dba3bc24c12f4b6da817cfb2faeead
259
py
Python
main/filesAndForms/models.py
nynguyen/sprint_zero_demo
5657ae1dac28a7085dc9cd1bc8d4d706301f5e35
[ "MIT" ]
null
null
null
main/filesAndForms/models.py
nynguyen/sprint_zero_demo
5657ae1dac28a7085dc9cd1bc8d4d706301f5e35
[ "MIT" ]
null
null
null
main/filesAndForms/models.py
nynguyen/sprint_zero_demo
5657ae1dac28a7085dc9cd1bc8d4d706301f5e35
[ "MIT" ]
null
null
null
from django.db import models class InputFile(models.Model): input = models.FileField(upload_to='input/%Y/%m/%d') next_one = models.FileField(upload_to='documents/%Y/%m/%d') name=models.CharField(max_length=100) privacy=models.BooleanField(default=False)
32.375
60
0.76834
ee05a479ec4cb10a4599fc18fc14885ce8e8c098
1,751
py
Python
examples/console/a_in.py
Picarro-kskog/mcculw
5a00dfbef2426772f0ec381f7795a2d5fd696a76
[ "MIT" ]
null
null
null
examples/console/a_in.py
Picarro-kskog/mcculw
5a00dfbef2426772f0ec381f7795a2d5fd696a76
[ "MIT" ]
null
null
null
examples/console/a_in.py
Picarro-kskog/mcculw
5a00dfbef2426772f0ec381f7795a2d5fd696a76
[ "MIT" ]
null
null
null
from __future__ import absolute_import, division, print_function from builtins import * # @UnusedWildImport from mcculw import ul from mcculw.ul import ULError from examples.console import util from examples.props.ai import AnalogInputProps use_device_detection = True def run_example(): board_num = 0 if use_device_detection: ul.ignore_instacal() if not util.config_first_detected_device(board_num): print("Could not find device.") return channel = 0 ai_props = AnalogInputProps(board_num) if ai_props.num_ai_chans < 1: util.print_unsupported_example(board_num) return ai_range = ai_props.available_ranges[0] try: # Get a value from the device if ai_props.resolution <= 16: # Use the a_in method for devices with a resolution <= 16 value = ul.a_in(board_num, channel, ai_range) # Convert the raw value to engineering units eng_units_value = ul.to_eng_units(board_num, ai_range, value) else: # Use the a_in_32 method for devices with a resolution > 16 # (optional parameter omitted) value = ul.a_in_32(board_num, channel, ai_range) # Convert the raw value to engineering units eng_units_value = ul.to_eng_units_32(board_num, ai_range, value) # Display the raw value print("Raw Value: " + str(value)) # Display the engineering value print("Engineering Value: " + '{:.3f}'.format(eng_units_value)) except ULError as e: util.print_ul_error(e) finally: if use_device_detection: ul.release_daq_device(board_num) if __name__ == '__main__': run_example()
29.677966
76
0.65791
ee05eaf652dcacea5d625e928ba76476b8f2f36d
721
py
Python
Communication_adaptor/OOCSI/main.py
tahir80/Crowd_of_Oz
a79e1e8a10b99879aeff83b00ef89b480c8d168c
[ "MIT" ]
null
null
null
Communication_adaptor/OOCSI/main.py
tahir80/Crowd_of_Oz
a79e1e8a10b99879aeff83b00ef89b480c8d168c
[ "MIT" ]
3
2021-03-19T03:45:27.000Z
2022-01-13T01:38:22.000Z
Communication_adaptor/OOCSI/main.py
tahir80/Crowd_of_Oz
a79e1e8a10b99879aeff83b00ef89b480c8d168c
[ "MIT" ]
2
2020-02-19T13:58:03.000Z
2022-01-17T19:42:02.000Z
from oocsi import OOCSI from NAO_Speak import NAO_Speak # (file name followed by class name) import unidecode ################################# IP = "IP_OF_PEPPER_ROBOT" text = "" my_nao = NAO_Speak(IP, 9559) ################################## def receiveEvent(sender, recipient, event): print('from ', sender, ' -> ', event) # this will convert unicode string to plain string msg = str(event['message']) sender = str(sender) x, y = sender.split('_') if x == 'webclient': my_nao.say_text(msg) if __name__ == "__main__": #o = OOCSI('abc', "oocsi.id.tue.nl", callback=receiveEvent) o = OOCSI('pepper_receiver', 'oocsi.id.tue.nl') o.subscribe('__test123__', receiveEvent)
25.75
69
0.599168
ee06d5c5bec6e01c97e370a892a4bf6a429c5e09
8,161
py
Python
CS305_Computer-Network/Lab6-cdn-dash/web_file_system_server.py
Eveneko/SUSTech-Courses
0420873110e91e8d13e6e85a974f1856e01d28d6
[ "MIT" ]
4
2020-11-11T11:56:57.000Z
2021-03-11T10:05:09.000Z
CS305_Computer-Network/Lab6-cdn-dash/web_file_system_server.py
Eveneko/SUSTech-Courses
0420873110e91e8d13e6e85a974f1856e01d28d6
[ "MIT" ]
null
null
null
CS305_Computer-Network/Lab6-cdn-dash/web_file_system_server.py
Eveneko/SUSTech-Courses
0420873110e91e8d13e6e85a974f1856e01d28d6
[ "MIT" ]
3
2021-01-07T04:14:11.000Z
2021-04-27T13:41:36.000Z
# encoding:utf-8 import asyncio import os import mimetypes from urllib import parse response = { # 200: [b'HTTP/1.0 200 OK\r\n', # 正常的response # b'Connection: close\r\n', # b'Content-Type:text/html; charset=utf-8\r\n', # b'\r\n'], 404: [b'HTTP/1.0 404 Not Found\r\n', # 请求文件不存在的response b'Connection: close\r\n', b'Content-Type:text/html; charset=utf-8\r\n', b'\r\n', b'<html><body>404 Not Found<body></html>\r\n', b'\r\n'], 405: [b'HTTP/1.0 405 Method Not Allowed\r\n', # 请求为GET/HEAD之外的request时的response b'Connection: close\r\n', b'Content-Type:text/html; charset=utf-8\r\n', b'\r\n', b'<html><body>405 Method Not Allowed<body></html>\r\n', b'\r\n'], 416: [b'HTTP/1.0 416 Requested Range Not Satisfiable\r\n', # Range Header error b'Connection: close\r\n', b'Content-Type:text/html; charset=utf-8\r\n', b'\r\n', b'<html><body>416 Requested Range Not Satisfiable<body></html>\r\n', b'\r\n'] } # get mime by mimetypes.guess_type def get_mime(path): mime = mimetypes.guess_type(path)[0] # 返回文件类型,由浏览器决定怎么打开,或者下载 if mime is None: # 如果浏览器不支持打开,就下载 mime = 'application/octet-stream' return mime # seperate the raw cookie info to get the location def get_cookie(raw_cookie): for content in raw_cookie: cookie = content.strip('\r\n').split(' ') for sub_cookie in cookie: if 'loc=' in sub_cookie: return sub_cookie.strip(';').replace('path=/', '') async def dispatch(reader, writer): header = {} while True: data = await reader.readline() if data == b'\r\n': break if data == b'': break message = data.decode().split(' ') # seperate the header and store in the dictionary if message[0] == 'GET' or message[0] == 'HEAD': header['METHOD'] = message[0] header['PATH'] = message[1] if message[0] == 'Range:': header['RANGE'] = message[1] if message[0] == 'Cookie:': header['COOKIE'] = message if message[0] == 'Referer:': header['REFERER'] = message[1] if message[0] == 'Host:': header['HOST'] = message[1] """test start""" print('----------header') print(header) print('----------header') """test end""" # Handle the header r_head = [] r = [] if 'METHOD' not in header: # if the request is not GET or HEAD writer.writelines(response[405]) await writer.drain() writer.close() return cookie = '' if 'COOKIE' in header: # get the location cookie = get_cookie(header['COOKIE']) """test start""" # print('----------cookie') # print(cookie) # print('----------cookie') """test end""" # set http status if 'RANGE' in header: r_head.append(b'HTTP/1.0 206 Partial Content\r\n') else: if header['PATH'] == '/' and 'REFERER' not in header and 'COOKIE' in header and \ 'loc=' in cookie and cookie != 'loc=/': r_head.append(b'HTTP/1.0 302 Found\r\n') else: r_head.append(b'HTTP/1.0 200 OK\r\n') # make the 302 header if header['PATH'] == '/' and 'REFERER' not in header and 'COOKIE' in header and \ 'loc=' in cookie and cookie != 'loc=/': cookie_loc = cookie[4:] header['HOST'] = header['HOST'].strip('\r\n') url = 'http://' + header['HOST'] + cookie_loc """test start""" print('----------url') print(url) print('----------url') """test end""" r_head.append('Location: {}\r\n\r\n'.format(url).encode('utf-8')) # set max-age for a day r_head.append('Cache-control: private; max-age={}\r\n\r\n'.format(86400).encode('utf-8')) print(r_head) writer.writelines(r_head) await writer.drain() writer.close() return # if header['PATH'] == 'favicon.ico': # Chrome会多发一个这样的包,忽略 # pass # else: path = './' + header['PATH'] try: # url解码 path = parse.unquote(path, errors='surrogatepass') except UnicodeDecodeError: path = parse.unquote(path) if os.path.isfile(path): # 判断是否为文件 file_size = int(os.path.getsize(path)) start_index = 0 end_index = file_size - 1 length = file_size if 'RANGE' in header: # divide the piece of file start_index, end_index = header['RANGE'].strip('bytes=').split('-') # - if start_index == '' and end_index == '' or end_index == '\r\n': start_index, end_index = 0, file_size-1 # x- elif end_index == '' or end_index == '\r\n': start_index, end_index = int(start_index), file_size-1 # -x elif start_index == '': end_index = int(end_index) start_index = file_size - end_index end_index = file_size - 1 # x-x start_index = int(start_index) end_index = int(end_index) length = end_index - start_index + 1 if start_index < 0 or end_index >= file_size or start_index > end_index: writer.writelines(response[416]) await writer.drain() writer.close() return r_head.append( 'Content-Range: bytes {}-{}/{}\r\n'.format(start_index, end_index, file_size).encode('utf-8')) # guess the type mime = get_mime(path) r_head.append('Content-Type: {}\r\n'.format(mime).encode('utf-8')) r_head.append('Content-Length: {}\r\n'.format(length).encode('utf-8')) r_head.append(b'Connection: close\r\n') r_head.append(b'\r\n') writer.writelines(r_head) if header['METHOD'] == 'GET': file = open(path, 'rb') file.seek(start_index) writer.write(file.read(length)) file.close() elif os.path.isdir(path): # 判断是否为文件夹 r_head.append(b'Connection: close\r\n') r_head.append(b'Content-Type:text/html; charset=utf-8\r\n') r_head.append('Set-Cookie: loc={};path=/\r\n'.format(header['PATH']).encode('utf-8')) r_head.append(b'\r\n') if header['METHOD'] == 'HEAD': writer.writelines(r_head) elif header['METHOD'] == 'GET': writer.writelines(r_head) file_list = os.listdir(path) # 获取文件夹内文件名 r.append(b'<html>') r.append(b'<head><title>Index of %s</title></head>' % (path.encode('utf-8'))) r.append(b'<body bgcolor="white">') r.append(b'<h1>Index of %s</h1><hr>' % (path.encode('utf-8'))) r.append(b'<ul>') if path != './': r.append(b'<li><a href=".."> ../ </a></li>') for name in file_list: if os.path.isdir(path + name + '/'): name = name + '/' r.append(b'<li><a href="%s"> %s </a></li>' % (name.encode('utf-8'), name.encode('utf-8'))) r.append(b'</ul>') r.append(b'</body>') r.append(b'</html>') writer.writelines(r) else: writer.writelines(response[404]) await writer.drain() writer.close() if __name__ == '__main__': loop = asyncio.get_event_loop() # 创建事件循环 coro = asyncio.start_server( dispatch, '127.0.0.1', 8080, loop=loop) # 开启一个新的协程 server = loop.run_until_complete(coro) # 将协程注册到事件循环 # Serve requests until Ctrl+C is pressed print('Serving on {}'.format(server.sockets[0].getsockname())) try: loop.run_forever() except KeyboardInterrupt: pass # Close the server server.close() # 关闭服务 # 保持等待,直到数据流关闭。保持等待,直到底层连接被关闭,应该在close()后调用此方法。 loop.run_until_complete(server.wait_closed()) loop.close()
36.433036
110
0.527754
ee06e35d76242b1961bfd6577cb91d816498e4c1
157
py
Python
template/Test/components/Counter/style.py
Salenia/Orca
1b3381eb22c6c269fa3d6d9c07e997bcc92f1702
[ "MIT" ]
null
null
null
template/Test/components/Counter/style.py
Salenia/Orca
1b3381eb22c6c269fa3d6d9c07e997bcc92f1702
[ "MIT" ]
null
null
null
template/Test/components/Counter/style.py
Salenia/Orca
1b3381eb22c6c269fa3d6d9c07e997bcc92f1702
[ "MIT" ]
null
null
null
from core.object import Object style= Object({ 'btn': { 'bg': '#123321', 'fg': '#dddddd', 'font': ('Ariel', 14, 'bold') } })
17.444444
37
0.452229
ee084007853fb17a734c30e770875d1d79a60983
193
py
Python
OOP/ConstructorExample5.py
suprit08/PythonAssignments
6cab78660d8c77cf573cbea82e4dada19b0fc08c
[ "MIT" ]
null
null
null
OOP/ConstructorExample5.py
suprit08/PythonAssignments
6cab78660d8c77cf573cbea82e4dada19b0fc08c
[ "MIT" ]
null
null
null
OOP/ConstructorExample5.py
suprit08/PythonAssignments
6cab78660d8c77cf573cbea82e4dada19b0fc08c
[ "MIT" ]
null
null
null
class Test: def __init__(self): self.a=10 self.b=20 def display(self): print(self.a) print(self.b) t=Test() t.display() print(t.a,t.b)
14.846154
24
0.487047
ee0b159a9b052e35cbc0b56e022fa3be6c4dec93
151
py
Python
tests/testing.py
Shlol762/physics
a142e812bac2da8edec36cdd814b49ea765d9cdc
[ "MIT" ]
null
null
null
tests/testing.py
Shlol762/physics
a142e812bac2da8edec36cdd814b49ea765d9cdc
[ "MIT" ]
null
null
null
tests/testing.py
Shlol762/physics
a142e812bac2da8edec36cdd814b49ea765d9cdc
[ "MIT" ]
null
null
null
from physics import * s1, s2 = Speed(9, 3, unit='cm/s', extra_units=['cm/h']), Speed(9, 2, unit='cm/h', extra_units=['cm/h']) print(s2.distance.unit)
30.2
103
0.635762
ee0b88c2d64968744709b3b0f62395d7460bc8b4
378
py
Python
Part_3_advanced/m19_concurrency_II/threading_queue/homework_1_solution/book_book/actions.py
Mikma03/InfoShareacademy_Python_Courses
3df1008c8c92831bebf1625f960f25b39d6987e6
[ "MIT" ]
null
null
null
Part_3_advanced/m19_concurrency_II/threading_queue/homework_1_solution/book_book/actions.py
Mikma03/InfoShareacademy_Python_Courses
3df1008c8c92831bebf1625f960f25b39d6987e6
[ "MIT" ]
null
null
null
Part_3_advanced/m19_concurrency_II/threading_queue/homework_1_solution/book_book/actions.py
Mikma03/InfoShareacademy_Python_Courses
3df1008c8c92831bebf1625f960f25b39d6987e6
[ "MIT" ]
null
null
null
import random import time from book_book.books_directory import books_requests_queue from book_book.rental_request import RentalRequest def rent_a_book(author: str, title: str, renter_name: str) -> None: time.sleep(random.randint(0, 1)) rental_request = RentalRequest(author=author, title=title, renter_name=renter_name) books_requests_queue.put(rental_request)
29.076923
87
0.801587
ee0c3eec70fd399ae7e39453c7ddf4147eae1d8b
19
py
Python
tutorial/2_java/emk_rules.py
awm/emk
ee26ceb47a96e1fae8729dea19c7da4f62f798da
[ "BSD-2-Clause" ]
10
2017-04-02T13:05:55.000Z
2021-10-04T22:06:50.000Z
tutorial/2_java/emk_rules.py
awm/emk
ee26ceb47a96e1fae8729dea19c7da4f62f798da
[ "BSD-2-Clause" ]
null
null
null
tutorial/2_java/emk_rules.py
awm/emk
ee26ceb47a96e1fae8729dea19c7da4f62f798da
[ "BSD-2-Clause" ]
null
null
null
emk.module("java")
9.5
18
0.684211
ee0caea10657e730ca0edcf6cea3ad5049994afa
2,111
py
Python
rally/rally-plugins/glance/glance_create_boot_delete.py
jsitnicki/browbeat
f5f9dcef2375a28fed8cc97f973eeecabd2114b7
[ "Apache-2.0" ]
null
null
null
rally/rally-plugins/glance/glance_create_boot_delete.py
jsitnicki/browbeat
f5f9dcef2375a28fed8cc97f973eeecabd2114b7
[ "Apache-2.0" ]
null
null
null
rally/rally-plugins/glance/glance_create_boot_delete.py
jsitnicki/browbeat
f5f9dcef2375a28fed8cc97f973eeecabd2114b7
[ "Apache-2.0" ]
1
2019-12-01T14:35:28.000Z
2019-12-01T14:35:28.000Z
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from rally.plugins.openstack.scenarios.glance.images import GlanceBasic from rally.plugins.openstack.scenarios.neutron import utils as neutron_utils from rally.plugins.openstack.scenarios.nova import utils as nova_utils from rally.task import scenario from rally.task import types from rally.task import validation from rally import consts @types.convert(flavor={"type": "nova_flavor"}, image_location={"type": "path_or_url"}) @validation.add("required_services", services=[consts.Service.GLANCE, consts.Service.NEUTRON, consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["glance", "neutron", "nova"]}, name="BrowbeatPlugin.glance_create_boot_delete", platform="openstack") class GlanceCreateBootDelete(GlanceBasic, neutron_utils.NeutronScenario, nova_utils.NovaScenario): def run(self, container_format, image_location, disk_format, flavor, network_create_args=None, subnet_create_args=None, **kwargs): image = self.glance.create_image( container_format=container_format, image_location=image_location, disk_format=disk_format) net = self._create_network(network_create_args or {}) self._create_subnet(net, subnet_create_args or {}) kwargs['nics'] = [{'net-id': net['network']['id']}] server = self._boot_server(image.id, flavor, **kwargs) self._delete_server(server) self.glance.delete_image(image.id)
50.261905
98
0.732828
ee0cf6c256acb1e19d545ad5310115b214f0b6ae
11,168
py
Python
Evaluation/hbm_axpy_dot_based.py
jnice-81/FpgaHbmForDaCe
b80749524264b4884cbd852d2db825cf8a6007aa
[ "BSD-3-Clause" ]
null
null
null
Evaluation/hbm_axpy_dot_based.py
jnice-81/FpgaHbmForDaCe
b80749524264b4884cbd852d2db825cf8a6007aa
[ "BSD-3-Clause" ]
null
null
null
Evaluation/hbm_axpy_dot_based.py
jnice-81/FpgaHbmForDaCe
b80749524264b4884cbd852d2db825cf8a6007aa
[ "BSD-3-Clause" ]
null
null
null
from typing import List import dace from dace import subsets from dace import memlet from dace import dtypes from dace.sdfg.sdfg import InterstateEdge, SDFG from dace.sdfg.state import SDFGState from dace.transformation.interstate.sdfg_nesting import NestSDFG from dace.transformation.optimizer import Optimizer from dace.transformation.interstate import InlineSDFG, FPGATransformSDFG from dace.transformation.dataflow import StripMining from dace.sdfg import graph, nodes, propagation, utils from dace.libraries.blas.nodes import dot from hbm_transform import HbmTransform from hbm_bank_split import HbmBankSplit from hbm_transform import set_shape from hbm_transform import transform_sdfg_for_hbm from hbm_transform import all_innermost_edges from helper import * ######## Simple base versions of the pure blas applications without HBM use def simple_vadd_sdfg(N, vec_len=16, tile_size=4096): alpha = dace.symbol("alpha", dtype=dace.float32) @dace.program def axpy(x: dace.vector(dace.float32, vec_len)[N/vec_len], y: dace.vector(dace.float32, vec_len)[N/vec_len], z: dace.vector(dace.float32, vec_len)[N/vec_len]): for i in dace.map[0:N/vec_len]: with dace.tasklet: xin << x[i] yin << y[i] zout >> z[i] zout = xin + yin * alpha sdfg = axpy.to_sdfg() sdfg.apply_strict_transformations() sdfg.apply_transformations(StripMining, {"tile_size": tile_size, "divides_evenly": True}) map = get_first_node(sdfg.start_state, lambda x: isinstance(x, nodes.MapEntry) and x.map.params[0] == "i") map.map.schedule = dtypes.ScheduleType.FPGA_Device return sdfg def simple_dot_sdfg(N, tile_size=8192): sdfg: SDFG = SDFG("dot") state = sdfg.add_state() sdfg.add_array("x", [N/8], dace.vector(dace.float32, 8), dtypes.StorageType.FPGA_Global) sdfg.add_array("y", [N/8], dace.vector(dace.float32, 8), dtypes.StorageType.FPGA_Global) sdfg.add_array("result", [1], dace.float32, dtypes.StorageType.FPGA_Global) lib_node = dot.Dot("dot") state.add_node(lib_node) read_x = state.add_read("x") read_y = state.add_read("y") write_result = state.add_write("result") state.add_edge(read_x, None, lib_node, "_x", memlet.Memlet("x")) state.add_edge(read_y, None, lib_node, "_y", memlet.Memlet("y")) state.add_edge(lib_node, "_result", write_result, None, memlet.Memlet(f"result")) lib_node.implementation = "FPGA_PartialSums" lib_node.expand(sdfg, state, partial_width=64, n=N) sdfg.arrays["x"].storage = dtypes.StorageType.Default sdfg.arrays["y"].storage = dtypes.StorageType.Default sdfg.arrays["result"].storage = dtypes.StorageType.Default strip_map = get_first_node(state, lambda x: isinstance(x, nodes.MapEntry) and x.label == "stream") for nsdfg in sdfg.all_sdfgs_recursive(): if nsdfg.states()[0].label == "stream": StripMining.apply_to(nsdfg, {"tile_size": tile_size, "divides_evenly": True}, _map_entry=strip_map) state = nsdfg.start_state tile_map = get_first_node(state, lambda x: isinstance(x, nodes.MapEntry) and x.label == "stream" and x.map.params[0] == "i") tile_map.map.schedule = dtypes.ScheduleType.FPGA_Device break return sdfg ######### On Device HBM-implementations of pure blas def hbm_axpy_sdfg(banks_per_input: int): N = dace.symbol("N") sdfg = simple_vadd_sdfg(N) map = get_first_node(sdfg.start_state, lambda x: isinstance(x, nodes.MapEntry) and x.map.params[0] == "tile_i") banks = {"x": ("HBM", f"0:{banks_per_input}", [banks_per_input]), "y": ("HBM", f"{banks_per_input}:{2*banks_per_input}", [banks_per_input]), "z": ("HBM", f"{2*banks_per_input}:{3*banks_per_input}", [banks_per_input])} transform_sdfg_for_hbm(sdfg, ("k", banks_per_input), banks, {(map, 0): banks_per_input}) return sdfg def hbm_dot_sdfg(banks_per_input: int): N = dace.symbol("N") sdfg = simple_dot_sdfg(N) state = sdfg.states()[0] for edge, state in sdfg.all_edges_recursive(): if isinstance(edge, graph.MultiConnectorEdge): if isinstance(edge.dst, nodes.AccessNode) and edge.dst.data == "_result": edge.data.other_subset = subsets.Range.from_string("k") set_shape(state.parent.arrays["_result"], [banks_per_input]) if isinstance(edge.dst, nodes.AccessNode) and edge.dst.data == "result": #one cannot update the other_subset. Leads to problems with out of bounds checking #edge.data.other_subset = subsets.Range.from_string("k") set_shape(state.parent.arrays["result"], [banks_per_input]) array_banks = {"x": ("HBM", f"0:{banks_per_input}", [banks_per_input]), "y": ("HBM", f"{banks_per_input}:{2*banks_per_input}", [banks_per_input]), "result": ("DDR", "0", None)} div_map = get_first_node(state, lambda x: isinstance(x, nodes.MapEntry) and x.label == "stream" and x.map.params[0] == "tile_i") transform_sdfg_for_hbm(sdfg, ("k", banks_per_input), array_banks, {(div_map.map, 0): banks_per_input}, True) return sdfg ######### Full implementations of pure blas applications def only_hbm_axpy_sdfg(banks_per_input: int): sdfg = hbm_axpy_sdfg(banks_per_input) sdfg.apply_fpga_transformations() sdfg.apply_transformations_repeated(InlineSDFG) z_access1 = get_first_node(sdfg.start_state, lambda x: isinstance(x, nodes.AccessNode) and x.data == "z") sdfg.start_state.remove_nodes_from([sdfg.start_state.out_edges(z_access1)[0].dst, z_access1]) distribute_along_dim0(sdfg, ["x", "y", "z"]) return sdfg def _modify_dot_host_side(sdfg, start_state, end_state): # Add final reduction state = end_state host_result = get_first_node(state, lambda x: isinstance(x, nodes.AccessNode) and x.data == "result") sum_up = state.add_reduce("lambda a, b : a + b", None, 0) sdfg.add_array("final_result", [1], dace.float32) host_final = state.add_access("final_result") state.add_edge(host_result, None, sum_up, None, memlet.Memlet("result")) state.add_edge(sum_up, None, host_final, None, memlet.Memlet("final_result[0]")) sum_up.expand(sdfg, state) sdfg.apply_transformations(InlineSDFG) # Remove copy result state = start_state access_result_start = get_first_node(state, lambda x: isinstance(x, nodes.AccessNode) and x.data == "result") state.remove_nodes_from([state.out_edges(access_result_start)[0].dst, access_result_start]) sdfg.arrays["result"].transient = True def only_hbm_dot_sdfg(banks_per_input: int): sdfg = hbm_dot_sdfg(banks_per_input) sdfg.apply_fpga_transformations() sdfg.apply_transformations_repeated(InlineSDFG) distribute_along_dim0(sdfg, ["x", "y"]) _modify_dot_host_side(sdfg, sdfg.start_state, sdfg.states()[2]) return sdfg def hbm_axpy_dot(banks_per_input: int): N = dace.symbol("N") axpy_sdfg = simple_vadd_sdfg(N, vec_len=8, tile_size=8192) dot_sdfg = simple_dot_sdfg(N, tile_size=8192) sdfg = SDFG("axpydot") sdfg.add_symbol("alpha", dace.float32) state = sdfg.add_state() sdfg.add_array("axpy_x", [N//8], dace.vector(dace.float32, 8)) sdfg.add_array("axpy_y", [N//8], dace.vector(dace.float32, 8)) sdfg.add_array("dot_y", [N//8], dace.vector(dace.float32, 8)) sdfg.add_array("middle", [N//8], dace.vector(dace.float32, 8), transient=True) sdfg.add_array("result", [banks_per_input], dace.float32) acc_axpy_x = state.add_access("axpy_x") acc_axpy_y = state.add_access("axpy_y") acc_dot_y = state.add_access("dot_y") acc_middle = state.add_access("middle") acc_result = state.add_access("result") axpynode = state.add_nested_sdfg(axpy_sdfg, sdfg, set(["x", "y", "z"]), set(["z"]), {"N": N, "alpha": "alpha"}) dotnode = state.add_nested_sdfg(dot_sdfg, sdfg, set(["x", "y", "result"]), set(["result"]), {"N": N}) acc_middle_dummy = state.add_access("middle") acc_middle_dummy_2 = state.add_access("middle") acc_result_dummy = state.add_access("result") state.add_edge(acc_axpy_x, None, axpynode, "x", memlet.Memlet("axpy_x")) state.add_edge(acc_axpy_y, None, axpynode, "y", memlet.Memlet("axpy_y")) state.add_edge(acc_middle_dummy, None, axpynode, "z", memlet.Memlet("middle")) state.add_edge(axpynode, "z", acc_middle, None, memlet.Memlet("middle")) state.add_edge(acc_middle_dummy_2, None, dotnode, "x", memlet.Memlet("middle")) state.add_edge(acc_dot_y, None, dotnode, "y", memlet.Memlet("dot_y")) state.add_edge(acc_result_dummy, None, dotnode, "result", memlet.Memlet("result")) state.add_edge(dotnode, "result", acc_result, None, memlet.Memlet("result")) sdfg.apply_transformations_repeated(InlineSDFG) def _nodes_from_path(path): nodes = [path[0].src] for edge in path: nodes.append(edge.dst) return nodes sdfg.add_stream("connect", dace.vector(dace.float32, 8), 128, [banks_per_input], storage=dtypes.StorageType.FPGA_Local, transient=True) old_acc_node = get_first_node(state, lambda x: isinstance(x, nodes.AccessNode) and x.data == "middle" and state.in_degree(x) == 1) update_access(state, old_acc_node, "connect", memlet.Memlet("connect[k]")) old_acc_node = get_first_node(state, lambda x: isinstance(x, nodes.AccessNode) and x.data == "middle" and state.out_degree(x) == 1) update_access(state, old_acc_node, "connect", memlet.Memlet("connect[k]")) acc_result = get_first_node(state, lambda x: isinstance(x, nodes.AccessNode) and x.data == "result") path = state.memlet_path(state.in_edges(acc_result)[0]) path[0].data.subset = subsets.Range.from_string("k") modification_map_axpy = get_first_node(state, lambda x: isinstance(x, nodes.MapEntry) and "axpy" in x.label and x.params[0] == "tile_i") modification_map_dot = get_first_node(state, lambda x: isinstance(x, nodes.MapEntry) and x.label == "stream" and x.params[0] == "tile_i") array_updates = {"axpy_x": ("HBM", f"0:{banks_per_input}", [banks_per_input]), "axpy_y": ("HBM", f"{banks_per_input}:{2*banks_per_input}", [banks_per_input]), "dot_y": ("HBM", f"{2*banks_per_input}:{3*banks_per_input}", [banks_per_input]), "result": ("DDR", "0", None)} transform_sdfg_for_hbm(sdfg, ("k", banks_per_input), array_updates, {(modification_map_axpy, 0): banks_per_input, (modification_map_dot, 0): banks_per_input}) # Fpga transform cannot be applied here, because stream is not in a map, and because there # are FPGA storagetypes and schedules around. However since the actual application of # FPGATransform works non-destructive we just force application here fpga_xform = FPGATransformSDFG(sdfg.sdfg_id, -1, {}, -1) fpga_xform.apply(sdfg) sdfg.apply_transformations_repeated(InlineSDFG) _modify_dot_host_side(sdfg, sdfg.start_state, sdfg.states()[2]) return sdfg
46.92437
115
0.689201
ee0db8b98c1815168cdf176d5c487ac08d4df051
1,134
py
Python
aiida_vasp/parsers/file_parsers/wavecar.py
muhrin/aiida-vasp
641fdc2ccd40bdd041e59af1fa3e1dcf9b037415
[ "MIT" ]
1
2021-06-13T09:13:01.000Z
2021-06-13T09:13:01.000Z
aiida_vasp/parsers/file_parsers/wavecar.py
muhrin/aiida-vasp
641fdc2ccd40bdd041e59af1fa3e1dcf9b037415
[ "MIT" ]
null
null
null
aiida_vasp/parsers/file_parsers/wavecar.py
muhrin/aiida-vasp
641fdc2ccd40bdd041e59af1fa3e1dcf9b037415
[ "MIT" ]
null
null
null
""" WAVECAR parser. --------------- The file parser that handles the parsing of WAVECAR files. """ from aiida_vasp.parsers.file_parsers.parser import BaseFileParser from aiida_vasp.parsers.node_composer import NodeComposer class WavecarParser(BaseFileParser): """Add WAVECAR as a single file node.""" PARSABLE_ITEMS = { 'wavecar': { 'inputs': [], 'name': 'wavecar', 'prerequisites': [] }, } def __init__(self, *args, **kwargs): super(WavecarParser, self).__init__(*args, **kwargs) self._wavecar = None self.init_with_kwargs(**kwargs) def _parse_file(self, inputs): """Create a DB Node for the WAVECAR file.""" result = inputs result = {} wfn = self._data_obj.path if wfn is None: return {'wavecar': None} result['wavecar'] = wfn return result @property def wavecar(self): if self._wavecar is None: composer = NodeComposer(file_parsers=[self]) self._wavecar = composer.compose('vasp.wavefun') return self._wavecar
25.2
65
0.591711
ee0f6bdca365641ee9474e0436ab4c38b5187dad
4,184
py
Python
waflib/package.py
fannymagnet/cwaf
60510f3596f1ee859ea73a50ee56dd636cde14b4
[ "Apache-2.0" ]
null
null
null
waflib/package.py
fannymagnet/cwaf
60510f3596f1ee859ea73a50ee56dd636cde14b4
[ "Apache-2.0" ]
null
null
null
waflib/package.py
fannymagnet/cwaf
60510f3596f1ee859ea73a50ee56dd636cde14b4
[ "Apache-2.0" ]
null
null
null
#! /usr/bin/env python # encoding: utf-8 import re import os import subprocess import json class Package: def __init__(self) -> None: self.manager = "" self.name = "" self.version = "" def toString(self): print('package manager:' + self.manager) print('package name:' + self.name) print('package version:' + self.version) class PackageRepo: def __init__(self) -> None: self.packages = {} self.include_dirs = [] self.lib_dirs = [] self.stlibs = [] self.shlibs = [] def installPackages(self, packages): pass class PackageManager: def __init__(self) -> None: self.package_repos = {} self.packages = {} self.include_dirs = ['.'] self.lib_dirs = ['.'] self.stlibs = [] self.shlibs = [] self.add_package_repo("conan", ConanRepo) def add_package_repo(self, name, repo_type): self.package_repos[name] = repo_type() def add_requires(self, *args): pkgs = [] for arg in args: match_result = re.match(r'(.*)::(.*)/(.*)', arg) pkg = Package() pkg.manager = match_result.group(1) pkg.name = match_result.group(2) pkg.version = match_result.group(3) pkgs.append(pkg) self.addPackages(pkgs) # TODO: call this in the end self.installPackages() def addPackage(self, package): if package.manager in self.packages: self.packages[package.manager].append(package) else: self.packages[package.manager] = [package] def addPackages(self, packages): for package in packages: self.addPackage(package) def installPackages(self): for k, v in self.packages.items(): if k in self.package_repos: repo = self.package_repos[k] repo.installPackages(v) for include_dir in repo.include_dirs: self.include_dirs.append(include_dir) for lib_dir in repo.lib_dirs: self.lib_dirs.append(lib_dir) for stlib in repo.stlibs: self.stlibs.append(stlib) for shlib in repo.shlibs: self.shlibs.append(shlib) else: print("unsupported packaged manager: " + k) continue class ConanRepo(PackageRepo): def __init__(self) -> None: PackageRepo.__init__(self) def installPackages(self, packages): # gen conanfile.txt conanfile_content = '[requires]\n' for package in packages: conanfile_content += package.name + '/' + package.version + '\n' conanfile_content += '[generators]\njson' print(conanfile_content) self.installConanPackages(conanfile_content) def installConanPackages(self, conanfile_content): if not os.path.exists("tmp"): os.makedirs("tmp") os.curdir = os.getcwd() os.chdir('tmp') with open('conanfile.txt', 'w') as f: f.write(conanfile_content) cmd = "conan install . --build=missing" subprocess.run(cmd) with open('conanbuildinfo.json') as f: data = json.loads(f.read()) options = data['options'] deps = data['dependencies'] for dep in deps: print(dep['name']) pkg_include_dirs = dep['include_paths'] for pkg_include_dir in pkg_include_dirs: self.include_dirs.append(pkg_include_dir) pkg_lib_dirs = dep['lib_paths'] for pkg_lib_dir in pkg_lib_dirs: self.lib_dirs.append(pkg_lib_dir) pkg_libs = dep['libs'] for pkg_lib in pkg_libs: if options[pkg_lib]['shared'] == 'False': self.stlibs.append(pkg_lib) else: self.shlibs.append(pkg_lib) os.chdir(os.curdir) print("install conan packages finished")
28.462585
76
0.546845
ee0fd6c103aa5c0dda88b9b7d6ada7be67c461d9
16,951
py
Python
excut/embedding/ampligraph_extend/EmbeddingModelContinue.py
mhmgad/ExCut
09e943a23207381de3c3a9e6f70015882b8ec4af
[ "Apache-2.0" ]
5
2020-11-17T19:59:49.000Z
2021-09-23T23:10:39.000Z
excut/embedding/ampligraph_extend/EmbeddingModelContinue.py
mhmgad/ExCut
09e943a23207381de3c3a9e6f70015882b8ec4af
[ "Apache-2.0" ]
null
null
null
excut/embedding/ampligraph_extend/EmbeddingModelContinue.py
mhmgad/ExCut
09e943a23207381de3c3a9e6f70015882b8ec4af
[ "Apache-2.0" ]
null
null
null
from copy import deepcopy import numpy as np import tensorflow as tf from ampligraph.datasets import NumpyDatasetAdapter, AmpligraphDatasetAdapter from ampligraph.latent_features import SGDOptimizer, constants from ampligraph.latent_features.initializers import DEFAULT_XAVIER_IS_UNIFORM from ampligraph.latent_features.models import EmbeddingModel from ampligraph.latent_features.models.EmbeddingModel import ENTITY_THRESHOLD from sklearn.utils import check_random_state from tqdm import tqdm from excut.utils.logging import logger class EmbeddingModelContinue(EmbeddingModel): def __init__(self, k=constants.DEFAULT_EMBEDDING_SIZE, eta=constants.DEFAULT_ETA, epochs=constants.DEFAULT_EPOCH, batches_count=constants.DEFAULT_BATCH_COUNT, seed=constants.DEFAULT_SEED, embedding_model_params={}, optimizer=constants.DEFAULT_OPTIM, optimizer_params={'lr': constants.DEFAULT_LR}, loss=constants.DEFAULT_LOSS, loss_params={}, regularizer=constants.DEFAULT_REGULARIZER, regularizer_params={}, initializer=constants.DEFAULT_INITIALIZER, initializer_params={'uniform': DEFAULT_XAVIER_IS_UNIFORM}, large_graphs=False, verbose=constants.DEFAULT_VERBOSE): logger.warning('entities min_quality %i' % ENTITY_THRESHOLD) super(EmbeddingModelContinue, self).__init__(k, eta, epochs, batches_count, seed, embedding_model_params, optimizer, optimizer_params, loss, loss_params, regularizer, regularizer_params, initializer, initializer_params, large_graphs, verbose) self.tf_config = tf.ConfigProto(allow_soft_placement=True, device_count={"CPU": 40}, inter_op_parallelism_threads=40, intra_op_parallelism_threads=1) def copy_old_model_params(self, old_model): if not old_model.is_fitted: raise Exception('Old Model os not Fitted!') self.ent_to_idx = deepcopy(old_model.ent_to_idx) self.rel_to_idx = deepcopy(old_model.rel_to_idx) # self.is_fitted = old_model_params['is_fitted'] # is_calibrated = old_model_params['is_calibrated'] old_model_params = dict() old_model.get_embedding_model_params(old_model_params) copied_params = deepcopy(old_model_params) self.restore_model_params(copied_params) def fit(self, X, early_stopping=False, early_stopping_params={}, continue_training=False): """Train an EmbeddingModel (with optional early stopping). The model is trained on a training set X using the training protocol described in :cite:`trouillon2016complex`. Parameters ---------- X : ndarray (shape [n, 3]) or object of AmpligraphDatasetAdapter Numpy array of training triples OR handle of Dataset adapter which would help retrieve data. early_stopping: bool Flag to enable early stopping (default:``False``) early_stopping_params: dictionary Dictionary of hyperparameters for the early stopping heuristics. The following string keys are supported: - **'x_valid'**: ndarray (shape [n, 3]) or object of AmpligraphDatasetAdapter : Numpy array of validation triples OR handle of Dataset adapter which would help retrieve data. - **'criteria'**: string : criteria for early stopping 'hits10', 'hits3', 'hits1' or 'mrr'(default). - **'x_filter'**: ndarray, shape [n, 3] : Positive triples to use as filter if a 'filtered' early stopping criteria is desired (i.e. filtered-MRR if 'criteria':'mrr'). Note this will affect training time (no filter by default). If the filter has already been set in the adapter, pass True - **'burn_in'**: int : Number of epochs to pass before kicking in early stopping (default: 100). - **check_interval'**: int : Early stopping interval after burn-in (default:10). - **'stop_interval'**: int : Stop if criteria is performing worse over n consecutive checks (default: 3) - **'corruption_entities'**: List of entities to be used for corruptions. If 'all', it uses all entities (default: 'all') - **'corrupt_side'**: Specifies which side to corrupt. 's', 'o', 's+o' (default) Example: ``early_stopping_params={x_valid=X['valid'], 'criteria': 'mrr'}`` """ self.train_dataset_handle = None # try-except block is mainly to handle clean up in case of exception or manual stop in jupyter notebook # TODO change 0: Update the mapping if there are new entities. if continue_training: self.update_mapping(X) try: if isinstance(X, np.ndarray): # Adapt the numpy data in the internal format - to generalize self.train_dataset_handle = NumpyDatasetAdapter() self.train_dataset_handle.set_data(X, "train") elif isinstance(X, AmpligraphDatasetAdapter): self.train_dataset_handle = X else: msg = 'Invalid type for input X. Expected ndarray/AmpligraphDataset object, got {}'.format(type(X)) logger.error(msg) raise ValueError(msg) # create internal IDs mappings # TODO Change 1: fist change to reuse the existing mappings rel_to_idx and ent_to_idx if not continue_training: self.rel_to_idx, self.ent_to_idx = self.train_dataset_handle.generate_mappings() else: self.train_dataset_handle.use_mappings(self.rel_to_idx, self.ent_to_idx) prefetch_batches = 1 if len(self.ent_to_idx) > ENTITY_THRESHOLD: self.dealing_with_large_graphs = True logger.warning('Your graph has a large number of distinct entities. ' 'Found {} distinct entities'.format(len(self.ent_to_idx))) logger.warning('Changing the variable initialization strategy.') logger.warning('Changing the strategy to use lazy loading of variables...') if early_stopping: raise Exception('Early stopping not supported for large graphs') if not isinstance(self.optimizer, SGDOptimizer): raise Exception("This mode works well only with SGD optimizer with decay (read docs for details).\ Kindly change the optimizer and restart the experiment") if self.dealing_with_large_graphs: prefetch_batches = 0 # CPU matrix of embeddings # TODO Change 2.1: do not intialize if continue training if not continue_training: self.ent_emb_cpu = self.initializer.get_np_initializer(len(self.ent_to_idx), self.internal_k) self.train_dataset_handle.map_data() # This is useful when we re-fit the same model (e.g. retraining in model selection) if self.is_fitted: tf.reset_default_graph() self.rnd = check_random_state(self.seed) tf.random.set_random_seed(self.seed) self.sess_train = tf.Session(config=self.tf_config) # change 2.2 : Do not change batch size with new training data, just use the old (for large KGs) # if not continue_training: batch_size = int(np.ceil(self.train_dataset_handle.get_size("train") / self.batches_count)) # else: # batch_size = self.batch_size logger.info("Batch Size: %i" % batch_size) # dataset = tf.data.Dataset.from_tensor_slices(X).repeat().batch(batch_size).prefetch(2) if len(self.ent_to_idx) > ENTITY_THRESHOLD: logger.warning('Only {} embeddings would be loaded in memory per batch...'.format(batch_size * 2)) self.batch_size = batch_size # TODO change 3: load model from trained params if continue instead of re_initialize the ent_emb and rel_emb if not continue_training: self._initialize_parameters() else: self._load_model_from_trained_params() dataset = tf.data.Dataset.from_generator(self._training_data_generator, output_types=(tf.int32, tf.int32, tf.float32), output_shapes=((None, 3), (None, 1), (None, self.internal_k))) dataset = dataset.repeat().prefetch(prefetch_batches) dataset_iterator = tf.data.make_one_shot_iterator(dataset) # init tf graph/dataflow for training # init variables (model parameters to be learned - i.e. the embeddings) if self.loss.get_state('require_same_size_pos_neg'): batch_size = batch_size * self.eta loss = self._get_model_loss(dataset_iterator) train = self.optimizer.minimize(loss) # Entity embeddings normalization normalize_ent_emb_op = self.ent_emb.assign(tf.clip_by_norm(self.ent_emb, clip_norm=1, axes=1)) self.early_stopping_params = early_stopping_params # early stopping if early_stopping: self._initialize_early_stopping() self.sess_train.run(tf.tables_initializer()) self.sess_train.run(tf.global_variables_initializer()) try: self.sess_train.run(self.set_training_true) except AttributeError: pass normalize_rel_emb_op = self.rel_emb.assign(tf.clip_by_norm(self.rel_emb, clip_norm=1, axes=1)) if self.embedding_model_params.get('normalize_ent_emb', constants.DEFAULT_NORMALIZE_EMBEDDINGS): self.sess_train.run(normalize_rel_emb_op) self.sess_train.run(normalize_ent_emb_op) epoch_iterator_with_progress = tqdm(range(1, self.epochs + 1), disable=(not self.verbose), unit='epoch') # print("before epochs!") # print(self.sess_train.run(self.ent_emb)) # print(self.sess_train.run(self.rel_emb)) for epoch in epoch_iterator_with_progress: losses = [] for batch in range(1, self.batches_count + 1): feed_dict = {} self.optimizer.update_feed_dict(feed_dict, batch, epoch) if self.dealing_with_large_graphs: loss_batch, unique_entities, _ = self.sess_train.run([loss, self.unique_entities, train], feed_dict=feed_dict) self.ent_emb_cpu[np.squeeze(unique_entities), :] = \ self.sess_train.run(self.ent_emb)[:unique_entities.shape[0], :] else: loss_batch, _ = self.sess_train.run([loss, train], feed_dict=feed_dict) if np.isnan(loss_batch) or np.isinf(loss_batch): msg = 'Loss is {}. Please change the hyperparameters.'.format(loss_batch) logger.error(msg) raise ValueError(msg) losses.append(loss_batch) if self.embedding_model_params.get('normalize_ent_emb', constants.DEFAULT_NORMALIZE_EMBEDDINGS): self.sess_train.run(normalize_ent_emb_op) if self.verbose: msg = 'Average Loss: {:10f}'.format(sum(losses) / (batch_size * self.batches_count)) if early_stopping and self.early_stopping_best_value is not None: msg += ' — Best validation ({}): {:5f}'.format(self.early_stopping_criteria, self.early_stopping_best_value) logger.debug(msg) epoch_iterator_with_progress.set_description(msg) if early_stopping: try: self.sess_train.run(self.set_training_false) except AttributeError: pass if self._perform_early_stopping_test(epoch): self._end_training() return try: self.sess_train.run(self.set_training_true) except AttributeError: pass self._save_trained_params() self._end_training() except BaseException as e: self._end_training() raise e def _load_model_from_trained_params(self): """Load the model from trained params. While restoring make sure that the order of loaded parameters match the saved order. It's the duty of the embedding model to load the variables correctly. This method must be overridden if the model has any other parameters (apart from entity-relation embeddings). This function also set's the evaluation mode to do lazy loading of variables based on the number of distinct entities present in the graph. """ # Generate the batch size based on entity length and batch_count # TODO change 4.1: batch size based on the training data or more generally if it was computed to bigger number self.batch_size = max(self.batch_size, int(np.ceil(len(self.ent_to_idx) / self.batches_count))) # logger.warning('entities min_quality inside load model %i' % ENTITY_THRESHOLD) # logger.warning('_load_model_from_trained_params is it a big graph yet? %s' % self.dealing_with_large_graphs) if len(self.ent_to_idx) > ENTITY_THRESHOLD: self.dealing_with_large_graphs = True logger.warning('Your graph has a large number of distinct entities. ' 'Found {} distinct entities'.format(len(self.ent_to_idx))) logger.warning('Changing the variable loading strategy to use lazy loading of variables...') logger.warning('Evaluation would take longer than usual.') if not self.dealing_with_large_graphs: self.ent_emb = tf.Variable(self.trained_model_params[0], dtype=tf.float32) else: self.ent_emb_cpu = self.trained_model_params[0] # TODO change 4.2: doable the batch size self.ent_emb = tf.Variable(np.zeros((self.batch_size * 2, self.internal_k)), dtype=tf.float32) self.rel_emb = tf.Variable(self.trained_model_params[1], dtype=tf.float32) def update_mapping(self, X): """ update entities and relations mappings in continue case :param X: :return: """ unique_ent = set(np.unique(np.concatenate((X[:, 0], X[:, 2])))) unique_rel = set(np.unique(X[:, 1])) new_unique_ent = unique_ent - set(self.ent_to_idx.keys()) new_unique_rel = unique_rel - set(self.rel_to_idx.keys()) if len(new_unique_ent)>0 or len(new_unique_rel)>-0: logger.warning('Org entities (%i) or relations (%i)' % (len(self.ent_to_idx), len(self.rel_to_idx))) logger.warning('New entities (%i) or relations (%i)'%(len(new_unique_ent), len(new_unique_rel))) ent_id_start = max(self.ent_to_idx.values()) + 1 rel_id_start = max(self.rel_to_idx.values()) + 1 new_ent_count = len(new_unique_ent) new_rel_count = len(new_unique_rel) self.ent_to_idx.update(dict(zip(new_unique_ent, range(ent_id_start, ent_id_start+new_ent_count)))) self.rel_to_idx.update(dict(zip(new_unique_rel, range(rel_id_start, rel_id_start+new_rel_count)))) # Extend the emebdding vectors themselves with randomly initialized vectors extend_ent_emb = self.initializer.get_np_initializer(new_ent_count, self.internal_k) extend_rel_emb = self.initializer.get_np_initializer(new_rel_count, self.internal_k) self.trained_model_params[0] = np.concatenate([self.trained_model_params[0], extend_ent_emb]) self.trained_model_params[1] = np.concatenate([self.trained_model_params[1], extend_rel_emb])
51.21148
128
0.610583
ee1052ee4cf13eb970ced19001be494a24ecb620
1,518
py
Python
projects/Happy Times/num2txt.py
jsportland/jssmith.biz
1184e4c0c011d0b9bfdbe8e813c08c2a9b436fdd
[ "MIT" ]
null
null
null
projects/Happy Times/num2txt.py
jsportland/jssmith.biz
1184e4c0c011d0b9bfdbe8e813c08c2a9b436fdd
[ "MIT" ]
7
2020-06-05T21:15:16.000Z
2021-09-22T18:43:04.000Z
projects/Happy Times/num2txt.py
jsportland/jsportland.github.io
1184e4c0c011d0b9bfdbe8e813c08c2a9b436fdd
[ "MIT" ]
null
null
null
# num2txt.py # Jeff Smith ''' Convert a given number into its text representation. e.g. 67 becomes 'sixty-seven'. Handle numbers from 0-99. ''' # Create dictionaries of number-text key pairs ones = {0: '', 1: 'one', 2: 'two', 3: 'three', 4: 'four', 5: 'five', 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine'} twos = {10: 'ten', 11: 'eleven', 12: 'twelve', 13: 'thirteen', 14: 'fourteen', 15: 'fifteen', 16: 'sixteen', 17: 'seventeen', 18: 'eighteen', 19: 'nineteen'} tens = {0: '', 1: '', 2: 'twenty', 3: 'thirty', 4: 'forty', 5: 'fifty', 6: 'sixty', 7: 'seventy', 8: 'eighty', 9: 'ninety'} huns = {0: '', 1: 'one hundred', 2: 'two hundred', 3: 'three hundred', 4: 'four hundred', 5: 'five hundred', 6: 'six hundred', 7: 'seven hundred', 8: 'eight hundred', 9: 'nine hundred'} # Obtain input from console num = int(input('Enter a number 0-999: ')) def textnum(num): # Iterate through dictionaries for text matches to input # Return text representations if num == 0: return 'zero' elif num > 0 and num <= 9: return ones[num] elif num >= 10 and num <= 19: return twos[num] elif num >= 20 and num <= 99: n1 = num // 10 n2 = num % 10 return tens[n1] + '-' + ones[n2] elif num >= 100 and num < 1000: n1 = num % 1000 // 100 n2 = num % 100 // 10 n3 = num % 10 return(f"{ones[n1]} hundred, {tens[n2]}-{ones[n3]}") else: print("Number out of range") print(textnum(num))
31.625
103
0.550725
ee1117aa879343fdc2d1539ab537208c88466d45
1,811
py
Python
src/pointers/struct.py
ZeroIntensity/pointers.py
c41b0a131d9d538130cf61b19be84c6cdf251cb7
[ "MIT" ]
461
2022-03-10T03:05:30.000Z
2022-03-31T17:53:32.000Z
src/pointers/struct.py
ZeroIntensity/pointers.py
c41b0a131d9d538130cf61b19be84c6cdf251cb7
[ "MIT" ]
7
2022-03-11T03:55:01.000Z
2022-03-23T20:34:21.000Z
src/pointers/struct.py
ZeroIntensity/pointers.py
c41b0a131d9d538130cf61b19be84c6cdf251cb7
[ "MIT" ]
8
2022-03-10T19:30:37.000Z
2022-03-23T20:35:11.000Z
import ctypes from typing import get_type_hints, Any from abc import ABC from .c_pointer import TypedCPointer, attempt_decode from contextlib import suppress class Struct(ABC): """Abstract class representing a struct.""" def __init__(self, *args, **kwargs): hints = get_type_hints(self.__class__) self._hints = hints class _InternalStruct(ctypes.Structure): _fields_ = [ (name, TypedCPointer.get_mapped(typ)) for name, typ in hints.items() # fmt: off ] self._struct = _InternalStruct(*args, **kwargs) do_sync = kwargs.get("do_sync") if (kwargs.get("do_sync") is None) or (do_sync): self._sync() @property def _as_parameter_(self) -> ctypes.Structure: return self._struct @classmethod def from_existing(cls, struct: ctypes.Structure): instance = cls(do_sync=False) instance._struct = struct instance._sync() return instance def __getattribute__(self, name: str): attr = super().__getattribute__(name) with suppress(AttributeError): hints = super().__getattribute__("_hints") if (name in hints) and (type(attr)) is bytes: attr = attempt_decode(attr) return attr def __setattr__(self, name: str, value: Any): if hasattr(self, "_struct"): self._struct.__setattr__(name, value) super().__setattr__(name, value) def _sync(self): for name in self._hints: setattr(self, name, getattr(self._struct, name)) def __repr__(self) -> str: return f"<struct {self.__class__.__name__} at {hex(ctypes.addressof(self._struct))}>" # noqa
29.688525
102
0.59746
ee11a749896f2dac758fc2f13a4c31bc96f85f10
4,852
py
Python
cs15211/KthSmallestNumberinMultiplicationTable.py
JulyKikuAkita/PythonPrac
0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c
[ "Apache-2.0" ]
1
2021-07-05T01:53:30.000Z
2021-07-05T01:53:30.000Z
cs15211/KthSmallestNumberinMultiplicationTable.py
JulyKikuAkita/PythonPrac
0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c
[ "Apache-2.0" ]
null
null
null
cs15211/KthSmallestNumberinMultiplicationTable.py
JulyKikuAkita/PythonPrac
0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c
[ "Apache-2.0" ]
1
2018-01-08T07:14:08.000Z
2018-01-08T07:14:08.000Z
__source__ = 'https://leetcode.com/problems/kth-smallest-number-in-multiplication-table/' # Time: O() # Space: O() # # Description: Leetcode # 668. Kth Smallest Number in Multiplication Table # # Nearly every one have used the Multiplication Table. # But could you find out the k-th smallest number quickly from the multiplication table? # # Given the height m and the length n of a m * n Multiplication Table, # and a positive integer k, you need to return the k-th smallest number in this table. # # Example 1: # Input: m = 3, n = 3, k = 5 # Output: # Explanation: # The Multiplication Table: # 1 2 3 # 2 4 6 # 3 6 9 # # The 5-th smallest number is 3 (1, 2, 2, 3, 3). # Example 2: # Input: m = 2, n = 3, k = 6 # Output: # Explanation: # The Multiplication Table: # 1 2 3 # 2 4 6 # # The 6-th smallest number is 6 (1, 2, 2, 3, 4, 6). # Note: # The m and n will be in the range [1, 30000]. # The k will be in the range [1, m * n] # import unittest # 532ms 78.12% class Solution(object): def findKthNumber(self, m, n, k): """ :type m: int :type n: int :type k: int :rtype: int """ def enough(x): count = 0 for i in xrange(1, m+1): count += min(x // i, n) return count >= k lo, hi = 1, m * n while lo < hi: mi = (lo + hi) / 2 if not enough(mi): lo = mi + 1 else: hi = mi return lo class TestMethods(unittest.TestCase): def test_Local(self): self.assertEqual(1, 1) if __name__ == '__main__': unittest.main() Java = ''' # Thought: https://leetcode.com/problems/kth-smallest-number-in-multiplication-table/solution/ Approach #1: Brute Force [Memory Limit Exceeded] Complexity Analysis Time Complexity: O(m*n) to create the table, and O(m*nlog(m*n)) to sort it. Space Complexity: O(m*n) to store the table. # Memory Limit Exceeded # Last executed input: # 9895 # 28405 # 100787757 class Solution { public int findKthNumber(int m, int n, int k) { int[] table = new int[m*n]; for (int i = 1; i <= m; i++) { for (int j = 1; j <= n; j++) { table[(i - 1) * n + j - 1] = i * j; } } Arrays.sort(table); return table[k-1]; } } Approach #2: Next Heap [Time Limit Exceeded] Complexity Analysis Time Complexity: O(k*mlogm)=O(m^2 nlogm). Our initial heapify operation is O(m). Afterwards, each pop and push is O(mlogm), and our outer loop is O(k) = O(m*n)O(k)=O(m*n) Space Complexity: O(m). Our heap is implemented as an array with mm elements. # TLE # 9895 # 28405 # 100787757 class Solution { public int findKthNumber(int m, int n, int k) { PriorityQueue<Node> heap = new PriorityQueue<Node>(m, Comparator.<Node> comparingInt(node -> node.val)); for (int i = 1; i <= m; i++) { heap.offer(new Node(i, i)); } Node node = null; for (int i = 0; i < k; i++) { node = heap.poll(); int nxt = node.val + node.root; if (nxt <= node.root * n) { heap.offer(new Node(nxt, node.root)); } } return node.val; } } class Node { int val; int root; public Node(int v, int r) { val = v; root = r; } } Approach #3: Binary Search [Accepted] Complexity Analysis Time Complexity: O(m*log(m*n)). Our binary search divides the interval [lo, hi] into half at each step. At each step, we call enough which requires O(m)O(m) time. Space Complexity: O(1). We only keep integers in memory during our intermediate calculations # 19ms 34.94% class Solution { public boolean enough(int x, int m, int n, int k) { int count = 0; for (int i = 1; i <= m; i++) { count += Math.min(x / i, n); } return count >= k; } public int findKthNumber(int m, int n, int k) { int lo = 1, hi = m * n; while (lo < hi) { int mi = lo + (hi - lo) / 2; if (!enough(mi, m, n, k)) lo = mi + 1; else hi = mi; } return lo; } } # 10ms 99.60% class Solution { public int findKthNumber(int m, int n, int k) { int l = 1, r = m * n; while (l < r) { int mid = l + (r - l) / 2; int c = count(m, n ,mid); if (c < k) { l = mid + 1; } else { r = mid; } } return l; } private int count (int m, int n, int target) { int i = 1, j = n, count = 0; while (j >= 1 && i <= m) { if (target >= i * j) { count += j; i++; } else { j--; } } return count; } } '''
25.671958
103
0.523495
ee11bc1258b4bdb1e1c427f2c1da2afcdccd329f
1,141
py
Python
examples/python3/force_platform.py
pariterre/ezc3d
53999b44a17819dc86e485f99887cea5ee4b7112
[ "MIT" ]
83
2018-05-26T00:08:14.000Z
2022-03-31T17:08:41.000Z
examples/python3/force_platform.py
pariterre/ezc3d
53999b44a17819dc86e485f99887cea5ee4b7112
[ "MIT" ]
183
2018-05-23T18:09:59.000Z
2022-03-31T18:36:04.000Z
examples/python3/force_platform.py
pariterre/ezc3d
53999b44a17819dc86e485f99887cea5ee4b7112
[ "MIT" ]
37
2018-07-20T14:15:30.000Z
2021-12-09T06:40:26.000Z
import ezc3d # This example reads a file that contains 2 force platforms. It thereafter print some metadata and data for one them c3d = ezc3d.c3d("../c3dFiles/ezc3d-testFiles-master/ezc3d-testFiles-master/Qualisys.c3d", extract_forceplat_data=True) print(f"Number of force platform = {len(c3d['data']['platform'])}") print("") print("Printing information and data for force platform 0") print("") pf0 = c3d["data"]["platform"][0] # Units print(f"Force unit = {pf0['unit_force']}") print(f"Moment unit = {pf0['unit_moment']}") print(f"Center of pressure unit = {pf0['unit_position']}") print("") # Position of pf print(f"Position of origin = {pf0['origin']}") print(f"Position of corners = \n{pf0['corners']}") print("") # Calibration matrix print(f"Calibation matrix = \n{pf0['cal_matrix']}") print("") # Data at 3 different time frames = [0, 10, 1000, -1] print(f"Data (in global reference frame) at frames = {frames}") print(f"Force = \n{pf0['force'][:, frames]}") print(f"Moment = \n{pf0['moment'][:, frames]}") print(f"Center of pressure = \n{pf0['center_of_pressure'][:, frames]}") print(f"Moment at CoP = \n{pf0['Tz'][:, frames]}")
36.806452
118
0.688869
ee12205ea3c9735342c4affa7e463d604044c45b
7,062
py
Python
docs/html_docs/get_classes_in_file.py
ACea15/pyNastran
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
[ "BSD-3-Clause" ]
293
2015-03-22T20:22:01.000Z
2022-03-14T20:28:24.000Z
docs/html_docs/get_classes_in_file.py
ACea15/pyNastran
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
[ "BSD-3-Clause" ]
512
2015-03-14T18:39:27.000Z
2022-03-31T16:15:43.000Z
docs/html_docs/get_classes_in_file.py
ACea15/pyNastran
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
[ "BSD-3-Clause" ]
136
2015-03-19T03:26:06.000Z
2022-03-25T22:14:54.000Z
from __future__ import print_function, unicode_literals import os from io import open from pyNastran.utils.log import get_logger2 import shutil IGNORE_DIRS = ['src', 'dmap', 'solver', '__pycache__', 'op4_old', 'calculix', 'bars', 'case_control', 'pch', 'old', 'solver', 'test', 'dev', 'bkp', 'bdf_vectorized'] MODS_SKIP = ['spike', 'shell_backup'] SKIP_DIRECTORIES = ['.svn', '.idea', '.settings', '.git', 'test', 'bkp', '__pycache__', 'dev', 'htmlcov', 'vtk_examples', 'SnakeRiverCanyon', 'M100', 'SWB'] SKIP_FILE_SUFFIX = [ '.pyc', '.pyx', # python '.bdf', '.op2', '.f06', '.op4', '.dat', '.inp', # nastran '.err', '.log', '.rej', '.db', '.db.jou', '.ses', '.ses.01', # patran '.pptx', '.png', '.gif', # pictures '.txt', '.csv', '.out', '.coverage', '.whl', # generic '.mapbc', '.front', '.flo', 'cogsg', '.bc', '.d3m', '.inpt', '.nml', # usm3d/fun3d '.ele', '.node', '.smesh', '.off', '.mk5', '.wgs', '.stl', '.fgrid', '.su2', '.obj', # other formats '.tri', '.cntl', '.c3d', # cart3d '.surf', '.tags', '.ugrid', '.bedge', # aflr '.plt', # tecplot '.p3d', '.tex', '.bib', # latex ] MAKE_FILES = True def get_folders_files(dirname, skip_file_suffix=None, skip_directories=None): """ Return list of directories and files in a given tree path. By default discards: * directories ".svn", ".idea", ".settings" * files that ends with ".pyc", .pyx", ".bdf" """ if skip_directories is None: skip_directories = SKIP_DIRECTORIES if skip_file_suffix is None: skip_file_suffix = tuple(SKIP_FILE_SUFFIX) dirname = os.path.join(dirname) files = [] folders = [] for root, dirs, filenames in os.walk(dirname): folders.append(root) for filename in filenames: if filename.endswith(skip_file_suffix): continue if 'test_' in os.path.basename(filename): continue files.append(os.path.join(root, filename)) #files += [os.path.join(root, filename) for filename in filenames #if not filename.endswith(skip_file_suffix)] dirs[:] = [d for d in dirs if not d in skip_directories] #if len(dirs): #print('root = %s' % root) #print(dirs) #print('------------------') return folders, files def get_classes_functions_in_file(py_filename): with open(py_filename, 'r', encoding='utf8') as f: lines = f.readlines() function_list = [] class_list = [] for line in lines: line = line.split('#')[0].rstrip() if line.startswith('class '): # class ASDF(object): class_name = line.split('(')[0].split(' ')[1] is_object = False if '(object):' in line: is_object = True class_list.append((class_name, is_object)) elif line.startswith('def '): function_name = line.split('(')[0].split(' ')[1] if function_name.startswith('_'): continue function_list.append(function_name) #for class_name in class_list: #print(class_name) return class_list, function_list def get_pyfilenames(): folders, filenames = get_folders_files('../../pyNastran') filenames_classes = [] for py_filename in filenames: py_filename2, dot_path = get_location_filename_for_pyfilename(py_filename) class_names, function_names = get_classes_functions_in_file(py_filename) #for class_name, is_object in class_names: #print(' %s (class)' % class_name) #for function_name in function_names: #print(' %s (function)' % function_name) filenames_classes.append((py_filename, py_filename2, dot_path, class_names)) return filenames_classes def get_location_filename_for_pyfilename(py_filename): """../../pyNastran/utils/nastran_utils.py -> pyNastran/utils/nastran_utils.py""" path = py_filename.lstrip('../\\') no_py = os.path.splitext(path)[0] dot_path = no_py.replace('\\', '.').replace('/', '.') #print(dot_path) return path, dot_path def filenames_to_rsts(filenames_classes, make_rsts=False): for py_filename, py_filename2, dot_path, class_names in filenames_classes: if not class_names: continue base_folder = os.path.dirname(py_filename2) #print('%-20s %s %s' % (base_folder[:20], py_filename2, dot_path)) folder = os.path.join('rsts', base_folder) if 'cards' in folder: while not folder.endswith('cards'): folder = os.path.dirname(folder) if not os.path.exists(folder): os.makedirs(folder) rst_filename = os.path.join(folder, 'index.rst') mode = 'w' rst_lines = '.. toctree::\n\n' if os.path.exists(rst_filename): rst_lines = '' mode = 'a' for class_name, is_object in class_names: create_rst_file_for_class(folder, dot_path, class_name, is_object) print(' %s' % str(class_name)) #pyNastran.bdf.cards.aset rst_lines += ' %s.%s\n' % (dot_path, class_name) #print(rst_lines) with open(rst_filename, mode) as rst_file: rst_file.write(rst_lines) def create_rst_file_for_class(folder, dot_path, class_name, is_object): split_path = dot_path.split('.') split_path[-1] += '.rst' #rst_filename = os.path.join(*split_path) dot_class_path = '%s.%s.rst' % (dot_path, class_name) rst_filename = os.path.join(folder, dot_class_path) #dirname = os.path.dirname(rst_filename) #if not os.path.exists(dirname): #os.makedirs(dirname) lines = '' if is_object: lines = '%s\n' % class_name lines += '%s\n' % (len(class_name) * '-') lines += '.. autoclass:: %s.%s\n' % (dot_path, class_name) lines += ' :inherited-members:\n' lines += ' :members:\n' #lines += ' :private-members:\n' else: lines = '%s\n' % class_name lines += '%s\n' % (len(class_name) * '-') lines += '.. autoclass:: %s.%s\n' % (dot_path, class_name) lines += ' :show-inheritance:\n' lines += ' :inherited-members:\n' lines += ' :members:\n' #lines += ' :private-members:\n' #ASET #---- #.. autoclass:: pyNastran.bdf.cards.bdf_sets.ASET #:show-inheritance: #:inherited-members: #:members: #:private-members: #print(rst_filename) if lines: with open(rst_filename, 'w') as rst_file: rst_file.write(lines) def main(): if os.path.exists('rsts'): shutil.rmtree('rsts') filenames_classes = get_pyfilenames() filenames_to_rsts(filenames_classes, make_rsts=False) #py_filename = r'C:\NASA\m4\formats\git\pyNastran\pyNastran\bdf\cards\bdf_sets.py' #get_classes_in_file(py_filename) if __name__ == '__main__': main()
35.487437
94
0.585387
ee139bfd29a89a7a4a5d77d7a8c7900ad5b256b6
4,650
py
Python
tests/utils/postprocess/test_top.py
ToucanToco/toucan-data-sdk
1d82b7112231b65f8a310327b6d6673d137b7378
[ "BSD-3-Clause" ]
9
2017-12-21T23:09:10.000Z
2020-08-20T13:53:24.000Z
tests/utils/postprocess/test_top.py
ToucanToco/toucan-data-sdk
1d82b7112231b65f8a310327b6d6673d137b7378
[ "BSD-3-Clause" ]
144
2017-11-24T17:23:02.000Z
2022-03-28T02:34:15.000Z
tests/utils/postprocess/test_top.py
ToucanToco/toucan-data-sdk
1d82b7112231b65f8a310327b6d6673d137b7378
[ "BSD-3-Clause" ]
5
2018-03-07T13:22:01.000Z
2021-05-31T11:53:07.000Z
import pandas as pd from toucan_data_sdk.utils.postprocess import top, top_group def test_top(): """ It should return result for top """ data = pd.DataFrame( [ {'variable': 'toto', 'Category': 1, 'value': 100}, {'variable': 'toto', 'Category': 1, 'value': 200}, {'variable': 'toto', 'Category': 1, 'value': 300}, {'variable': 'lala', 'Category': 1, 'value': 100}, {'variable': 'lala', 'Category': 1, 'value': 150}, {'variable': 'lala', 'Category': 1, 'value': 250}, {'variable': 'lala', 'Category': 2, 'value': 350}, {'variable': 'lala', 'Category': 2, 'value': 450}, ] ) # ~~~ without group ~~~ expected = pd.DataFrame( [ {'variable': 'lala', 'Category': 2, 'value': 450}, {'variable': 'lala', 'Category': 2, 'value': 350}, {'variable': 'toto', 'Category': 1, 'value': 300}, ] ) kwargs = {'value': 'value', 'limit': 3, 'order': 'desc'} df = top(data, **kwargs).reset_index(drop=True) assert df.equals(expected) # ~~~ with group ~~~ expected = pd.DataFrame( [ {'variable': 'lala', 'Category': 1, 'value': 150}, {'variable': 'lala', 'Category': 1, 'value': 100}, {'variable': 'lala', 'Category': 2, 'value': 450}, {'variable': 'lala', 'Category': 2, 'value': 350}, {'variable': 'toto', 'Category': 1, 'value': 200}, {'variable': 'toto', 'Category': 1, 'value': 100}, ] ) kwargs = {'group': ['variable', 'Category'], 'value': 'value', 'limit': -2, 'order': 'desc'} df = top(data, **kwargs) assert df.equals(expected) def test_top_date_strings(): """It should manage to use top if the column can be interpretated as date""" df = pd.DataFrame( {'date': ['2017-01-01', '2017-03-02', '2018-01-02', '2016-04-02', '2017-01-03']} ) top_df = top(df, value='date', limit=2) assert top_df['date'].tolist() == ['2016-04-02', '2017-01-01'] top_df = top(df, value='date', limit=3, order='desc') assert top_df['date'].tolist() == ['2018-01-02', '2017-03-02', '2017-01-03'] top_df = top(df, value='date', limit=3, order='desc', date_format='%Y-%d-%m') assert top_df['date'].tolist() == ['2018-01-02', '2017-01-03', '2017-03-02'] def test_top_date_strings_temp_column(): """It should not change existing columns""" df = pd.DataFrame( {'date': ['2017-01-01', '2017-03-02'], 'date_': ['a', 'b'], 'date__': ['aa', 'bb']} ) assert top(df, value='date', limit=2, order='desc').equals(df[::-1]) def test_top_group(): """ It should return result for top_group """ data = pd.DataFrame( { 'Label': ['G1', 'G2', 'G3', 'G4', 'G5', 'G3', 'G3'], 'Categories': ['C1', 'C2', 'C1', 'C2', 'C1', 'C2', 'C3'], 'Valeurs': [6, 1, 9, 4, 8, 2, 5], 'Periode': ['mois', 'mois', 'mois', 'semaine', 'semaine', 'semaine', 'semaine'], } ) # ~~~ with filters ~~~ expected = pd.DataFrame( { 'Periode': ['mois', 'mois', 'semaine', 'semaine', 'semaine'], 'Label': ['G3', 'G1', 'G5', 'G3', 'G3'], 'Categories': ['C1', 'C1', 'C1', 'C2', 'C3'], 'Valeurs': [9, 6, 8, 2, 5], } ) kwargs = { 'group': 'Periode', 'value': 'Valeurs', 'aggregate_by': ['Label'], 'limit': 2, 'order': 'desc', } df = top_group(data, **kwargs) assert df.equals(expected) # ~~~ without groups ~~~ expected = pd.DataFrame( { 'Label': ['G3', 'G3', 'G3', 'G5'], 'Categories': ['C1', 'C2', 'C3', 'C1'], 'Valeurs': [9, 2, 5, 8], 'Periode': ['mois', 'semaine', 'semaine', 'semaine'], } ) kwargs = { 'group': None, 'value': 'Valeurs', 'aggregate_by': ['Label'], 'limit': 2, 'order': 'desc', } df = top_group(data, **kwargs) assert df.equals(expected) # ~~~ with group and function = mean ~~~ expected = pd.DataFrame( { 'Periode': ['mois', 'mois', 'semaine', 'semaine'], 'Label': ['G3', 'G1', 'G5', 'G4'], 'Categories': ['C1', 'C1', 'C1', 'C2'], 'Valeurs': [9, 6, 8, 4], } ) kwargs = { 'group': ['Periode'], 'value': 'Valeurs', 'aggregate_by': ['Label'], 'limit': 2, 'function': 'mean', 'order': 'desc', } df = top_group(data, **kwargs) assert df.equals(expected)
33.214286
96
0.476129
ee13fe06ca5c309a9828923b3634c76483f38157
2,439
py
Python
Main/models.py
semvis123/Project-Webtechnologie
09f56fba96c7352a52a00cf4c3b6e1b83022aa6e
[ "MIT" ]
null
null
null
Main/models.py
semvis123/Project-Webtechnologie
09f56fba96c7352a52a00cf4c3b6e1b83022aa6e
[ "MIT" ]
null
null
null
Main/models.py
semvis123/Project-Webtechnologie
09f56fba96c7352a52a00cf4c3b6e1b83022aa6e
[ "MIT" ]
null
null
null
from Main import db, login_manager from faker import Faker from flask_login import UserMixin from werkzeug.security import generate_password_hash, check_password_hash fake = Faker() @login_manager.user_loader def load_user(user_id): return User.query.get(user_id) class User(db.Model, UserMixin): __tablename__ = 'users' id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String) email = db.Column(db.String) password_hash = db.Column(db.String(128)) profile_color = db.Column(db.String) def __init__(self, email, password): self.email = email self.password_hash = generate_password_hash(password) # create a random config first, user can change it later self.username = fake.user_name() self.profile_color = fake.color(luminosity='dark') def check_password(self, password): return check_password_hash(self.password_hash, password) def __repr__(self): return "user({},{},{},{})".format(self.id, self.username, self.email, self.profile_color) class Comment(db.Model): __tablename__ = 'comments' id = db.Column(db.Integer, primary_key=True) owner_id = db.Column(db.Integer, db.ForeignKey('users.id')) post_id = db.Column(db.Integer, db.ForeignKey('posts.id')) message = db.Column(db.String) def __init__(self, owner_id, post_id, message): self.owner_id = owner_id self.post_id = post_id self.message = message def __repr__(self): return "comment({},{},{},{})".format(self.id, self.owner_id, self.post_id, self.message) class Like(db.Model): __tablename__ = 'likes' id = db.Column(db.Integer, primary_key=True) owner_id = db.Column(db.Integer, db.ForeignKey('users.id')) post_id = db.Column(db.Integer, db.ForeignKey('posts.id')) def __init__(self, owner_id, post_id): self.owner_id = owner_id self.post_id = post_id def __repr__(self): return "like({},{},{})".format(self.id, self.owner_id, self.post_id) class Post(db.Model): __tablename__ = 'posts' id = db.Column(db.Integer, primary_key=True) owner_id = db.Column(db.Integer, db.ForeignKey('users.id')) text = db.Column(db.Text) def __init__(self, text, owner_id): self.text = text self.owner_id = owner_id def __repr__(self): return "post({}, {}, {})".format(self.id, self.owner_id, self.text)
30.111111
97
0.671587
ee14c24926c18fc83e37f709865f20c7c3816477
2,199
py
Python
MoveRestructure.py
bsmarine/dicomConversionToNiftiHCC
ea8d4c922a299a2b9e1936bdb08c22d445e48db7
[ "BSD-3-Clause" ]
1
2021-06-25T17:13:37.000Z
2021-06-25T17:13:37.000Z
MoveRestructure.py
bsmarine/dicomConversionToNiftiHCC
ea8d4c922a299a2b9e1936bdb08c22d445e48db7
[ "BSD-3-Clause" ]
null
null
null
MoveRestructure.py
bsmarine/dicomConversionToNiftiHCC
ea8d4c922a299a2b9e1936bdb08c22d445e48db7
[ "BSD-3-Clause" ]
1
2021-07-08T22:27:57.000Z
2021-07-08T22:27:57.000Z
import sys import os import SimpleITK as sitk import pydicom from slugify import slugify import shutil import argparse def gen_dcm_identifiers(in_dir): ##Get Absolute Path For Every DCM File Recursively dcms_path_list = [os.path.abspath(os.path.join(dire,dcm)) for dire,sub_dir,dcms in os.walk(in_dir) if 'dcm' in str(dcms) for dcm in dcms] ##Output List output_list = list() ## Generate List with MRN, Accession Number, Series Description, Series Number, Acq Date for dcm_file in dcms_path_list: info = pydicom.read_file(dcm_file) try: mrn = info[0x010,0x0020][:] acc = info[0x008,0x0050][:] series_desc = info[0x0008,0x103e].value series_num = info[0x0020,0x0011].value acq_date = info[0x0008,0x0020].value string = str(series_desc)+"_"+str(series_num)+"_"+str(acq_date) string_date = slugify(string) output_list.append([mrn,acc,string_date,dcm_file]) except KeyError: print ("Error getting metadata from "+str(dcm_file)) return output_list def create_folders_move(dcm_ids,out_dir): if os.path.exists(out_dir) == False: os.mkdir(out_dir) for i in dcm_ids: print (i) if os.path.exists(os.path.join(out_dir,i[0]))==False: os.mkdir(os.path.join(out_dir,i[0])) if os.path.exists(os.path.join(out_dir,i[0],i[1]))==False: os.mkdir(os.path.join(out_dir,i[0],i[1])) if os.path.exists(os.path.join(out_dir,i[0],i[1],i[2]))==False: os.mkdir(os.path.join(out_dir,i[0],i[1],i[2])) try: shutil.move(i[3],os.path.join(out_dir,i[0],i[1],i[2])) print ("######## Moving "+str(i[3])) except: print ("Error, likely file already exists in destination") parser = argparse.ArgumentParser(description='MoveRestructureScript') parser.add_argument("--dicomDir", dest="in_dir", required=True) parser.add_argument("--outDir", dest="out_dir", required=True) op = parser.parse_args() create_folders_move(gen_dcm_identifiers(op.in_dir), op.out_dir)
36.04918
141
0.622556
ee1671d5719714f90ce4ce8110a4344a83fa25b3
2,384
py
Python
mesmerize_napari/cnmf_viz_gui.py
nel-lab/mesmerize-napari
24f0c92c0c78eecdd063c82fe6d5ff8f1179fc1b
[ "Apache-2.0" ]
1
2022-01-11T16:18:17.000Z
2022-01-11T16:18:17.000Z
mesmerize_napari/cnmf_viz_gui.py
nel-lab/caiman-napari-prototype
24f0c92c0c78eecdd063c82fe6d5ff8f1179fc1b
[ "Apache-2.0" ]
12
2022-01-11T16:21:01.000Z
2022-02-17T04:43:50.000Z
mesmerize_napari/cnmf_viz_gui.py
nel-lab/mesmerize-napari
24f0c92c0c78eecdd063c82fe6d5ff8f1179fc1b
[ "Apache-2.0" ]
null
null
null
from PyQt5 import QtWidgets from .cnmf_viz_pytemplate import Ui_VizualizationWidget from .evaluate_components import EvalComponentsWidgets from mesmerize_core.utils import * from mesmerize_core import * import caiman as cm class VizWidget(QtWidgets.QDockWidget): def __init__(self, cnmf_viewer, batch_item): QtWidgets.QDockWidget.__init__(self, parent=None) self.ui = Ui_VizualizationWidget() self.ui.setupUi(self) self.cnmf_obj = batch_item.cnmf.get_output() self.batch_item = batch_item self.cnmf_viewer = cnmf_viewer self.eval_gui = EvalComponentsWidgets(cnmf_viewer=cnmf_viewer) self.ui.pushButtonInputMovie.clicked.connect(self.view_input) self.ui.pushButtonCnImage.clicked.connect(self.load_correlation_image) self.ui.pushButtonViewProjection.clicked.connect(self.view_projections) self.ui.pushButtonEvalGui.clicked.connect(self.show_eval_gui) self.ui.pushButtonUpdateBoxSize.clicked.connect(self.select_contours) def _open_movie(self, path: Union[Path, str]): file_ext = Path(path).suffix if file_ext == ".mmap": Yr, dims, T = cm.load_memmap(path) images = np.reshape(Yr.T, [T] + list(dims), order="F") self.cnmf_viewer.viewer.add_image(images, colormap="gray") else: self.cnmf_viewer.viewer.open(path, colormap="gray") def view_input(self): path = self.batch_item.caiman.get_input_movie_path() full_path = get_full_data_path(path) self._open_movie(full_path) def load_correlation_image(self): corr_img = self.batch_item.caiman.get_correlation_image() self.cnmf_viewer.viewer.add_image( corr_img, name=f'corr: {self.batch_item["name"]}', colormap="gray" ) def view_projections(self): proj_type = self.ui.comboBoxProjection.currentText() projection = self.batch_item.caiman.get_projection(proj_type=proj_type) self.cnmf_viewer.viewer.add_image( projection, name=f'{proj_type} projection: {self.batch_item["name"]}', colormap="gray", ) def show_eval_gui(self): self.eval_gui.show() def select_contours(self): box_size = self.ui.spinBoxBoxSize.value() self.cnmf_viewer.select_contours(box_size=box_size, update_box=True)
38.451613
79
0.693792
ee1b99ad06589e6918f63fd1de2e355976ecde19
7,459
py
Python
graphics/text/config.py
sergeirocks100/satanicinfestation
3eff98631b71207ffbf5aa9791e194ebf41e2027
[ "CC-BY-4.0" ]
5
2020-04-28T02:59:08.000Z
2022-02-26T20:32:54.000Z
graphics/text/config.py
sergeirocks100/satanicinfestation
3eff98631b71207ffbf5aa9791e194ebf41e2027
[ "CC-BY-4.0" ]
null
null
null
graphics/text/config.py
sergeirocks100/satanicinfestation
3eff98631b71207ffbf5aa9791e194ebf41e2027
[ "CC-BY-4.0" ]
null
null
null
# SPDX-License-Identifier: BSD-3-Clause # # Configuration file for textgen. This file defines the graphic lumps # that are generated, and the text to show in each one. # import re # Adjustments for character position based on character pairs. Some # pairs of characters can fit more snugly together, which looks more # visually appealing. This is highly dependent on the font graphics, # and if the font is changed this probably needs to be redone. FONT_KERNING_RULES = { # Right character fits under left character: r"T[0ACOSZacos]": -2, r"V[OC]": -2, r"Y[ASZacs]": -2, r"Y[CO0]": -1, r"P[Aa]": -3, r"P[7]": -2, r"P[Z]": -1, r"[0O][Aa]": -1, r"S[A]": -1, r"Sa": -2, r"Wa": -1, r"p[a]": -1, r"s[ao]": -1, r"ta": -2, r"v[oc]": -1, r"y[oacs]": -1, # Left character fits under right character: r"L[4Q]": -3, r"L[O0CTYtcq]": -2, r"L[oyVv]": -1, r"l[tTY]": -2, r"l[y]": -1, r"[0O][4TYy]": -2, r"[0O][1]": -1, r"Q[1TY]": -2, r"A[CGTYt]": -2, r"A[cgy]": -1, r"a[cTYt]": -2, r"a[vVy]": -1, # Fits into "hole" in left character: r"B[0CGOQ]": -2, r"B[0cgq]": -2, r"C[0CGOQ]": -3, r"C[q]": -2, r"C[cgo]": -1, r"X[0CO]": -3, r"X[Qqco]": -2, r"8[0CO]": -3, r"8[GQcgqo]": -2, r"Z[0CO]": -2, r"Z[GQocgq]": -1, r"I[0COQcoq]": -1, r"K[0CO]": -4, r"K[GQ]": -3, r"K[cgo]": -2, r"K[Eq]": -1, r"P[0COQcoq]": -1, r"R[0COQcoq]": -1, # Fits into "hole" in right character: r"[O0][2X8]": -3, r"[O0][9Kx]": -2, r"[O0][Iik]": -1, r"Q[28X]": -2, r"Q[9Iix]": -1, r"q[IXx]": -1, # Just because. r"[O0][O0]": -1, } white_graphics = { "wibp1": "P1", "wibp2": "P2", "wibp3": "P3", "wibp4": "P4", "wicolon": ":", # These files are for the title screens of Phase 1 and Phase 2 "t_phase1": "Part 1: Phobos Infestation", "t_phase2": "Part 2: Earth Infestation", # Note: level names are also included in this dictionary, with # the data added programatically from the DEHACKED lump, see # code below. } blue_graphics = { "m_disopt": "DISPLAY OPTIONS", "m_episod": "Choose Chapter:", "m_optttl": "OPTIONS", "m_skill": "Choose Skill Level:", } red_graphics = { # Title for the HELP/HELP1 screen: "helpttl": "Help", # Title for CREDIT "freettl": "Satanic Infestation", "m_ngame": "New Game", "m_option": "Options", "m_loadg": "Load Game", "m_saveg": "Save Game", "m_rdthis": "Read This!", "m_quitg": "Quit Game", "m_newg": "NEW GAME", "m_epi1": "Outpost Outbreak", "m_epi2": "Military Labs", "m_epi3": "Event Horizon", "m_epi4": "Double Impact", "m_jkill": "Little Girl", "m_rough": "Fighting Words", "m_hurt": "Shoot To Kill", "m_ultra": "This Machine Kills Demons", "m_nmare": "WICKED MOTHERFUCKER!", "m_lgttl": "LOAD GAME", "m_sgttl": "SAVE GAME", "m_endgam": "End Game", "m_messg": "Messages:", "m_msgoff": "off", "m_msgon": "on", "m_msens": "Mouse Sensitivity", "m_detail": "Graphic Detail:", "m_gdhigh": "high", "m_gdlow": "low", "m_scrnsz": "Screen Size", "m_svol": "Sound Volume", "m_sfxvol": "Sfx Volume", "m_musvol": "Music Volume", "m_disp": "Display", "wif": "finished", "wiostk": "kills", "wiosti": "items", "wiscrt2": "secret", "wiosts": "scrt", "wifrgs": "frgs", "witime": "Time:", "wisucks": "sucks", "wimstt": "Total:", "wipar": "Par:", "wip1": "P1", "wip2": "P2", "wip3": "P3", "wip4": "P4", "wiostf": "f.", "wimstar": "you", "winum0": "0", "winum1": "1", "winum2": "2", "winum3": "3", "winum4": "4", "winum5": "5", "winum6": "6", "winum7": "7", "winum8": "8", "winum9": "9", "wipcnt": "%", "wiminus": "-", "wienter": "ENTERING", "m_pause": "pause", # Extra graphics used in PrBoom's menus. Generate these as well # so that when we play in PrBoom the menus look consistent. "prboom": "PrBoom", "m_generl": "General", "m_setup": "Setup", "m_keybnd": "Key Bindings", "m_weap": "Weapons", "m_stat": "Status Bar/HUD", "m_auto": "Automap", "m_enem": "Enemies", "m_mess": "Messages", "m_chat": "Chat Strings", "m_horsen": "horizontal", "m_versen": "vertical", "m_loksen": "mouse look", "m_accel": "acceleration", # Extra graphics from SMMU/Eternity Engine: "m_about": "about", "m_chatm": "Chat Strings", "m_compat": "Compatibility", "m_demos": "demos", "m_dmflag": "deathmatch flags", "m_etcopt": "eternity options", "m_feat": "Features", "m_gset": "game settings", "m_hud": "heads up display", "m_joyset": "joysticks", "m_ldsv": "Load/Save", "m_menus": "Menu Options", "m_mouse": "mouse options", "m_player": "player setup", "m_serial": "serial connection", "m_sound": "sound options", "m_status": "status bar", "m_tcpip": "tcp/ip connection", "m_video": "video options", "m_wad": "load wad", "m_wadopt": "wad options", # This is from SMMU too, and if we follow things to the letter, # ought to be all lower-case. However, same lump name is used # by other ports (Zandronum) which expect a taller graphic to # match the other main menu graphics. Eternity Engine doesn't # use it any more, and on SMMU there's enough space for it. "m_multi": "Multiplayer", } def read_bex_lump(filename): """Read the BEX (Dehacked) lump from the given filename. Returns: Dictionary mapping from name to value. """ result = {} with open(filename) as f: for line in f: # Ignore comments: line = line.strip() if len(line) == 0 or line[0] in "#;": continue # Just split on '=' and interpret that as an # assignment. This is primitive and doesn't read # like a full BEX parser should, but it's good # enough for our purposes here. assign = line.split("=", 2) if len(assign) != 2: continue result[assign[0].strip()] = assign[1].strip() return result def update_level_name(lumpname, bexdata, bexname): """Set the level name for the given graphic from BEX file. Args: lumpname: Name of output graphic file. bexdata: Dictionary of data read from BEX file. bexname: Name of entry in BEX file to use. """ if bexname not in bexdata: raise Exception( "Level name %s not defined in " "DEHACKED lump!" % bexname ) # Strip "MAP01: " or "E1M2: " etc. from start, if present: levelname = re.sub("^\w*\d:\s*", "", bexdata[bexname]) white_graphics[lumpname] = levelname freedoom_bex = read_bex_lump("../../lumps/p2_deh.lmp") freedm_bex = read_bex_lump("../../lumps/fdm_deh.lmp") for e in range(4): for m in range(9): # HUSTR_E1M1 from BEX => wilv00 update_level_name( "wilv%i%i" % (e, m), freedoom_bex, "HUSTR_E%iM%i" % (e + 1, m + 1) ) for m in range(32): # HUSTR_1 => cwilv00 update_level_name("cwilv%02i" % m, freedoom_bex, "HUSTR_%i" % (m + 1)) # HUSTR_1 => dmwilv00 (from freedm.bex) update_level_name("dmwilv%02i" % m, freedm_bex, "HUSTR_%i" % (m + 1))
28.14717
78
0.553962
ee1bd9711953e27dca2cb44d9b6e0151df623ebc
1,840
py
Python
generateFuncdb.py
daeken/GrinningSoul
d35ac197dade35d8a831ffb10fffd2ec93a791ea
[ "Apache-2.0" ]
94
2020-11-22T00:35:08.000Z
2022-02-22T15:45:41.000Z
generateFuncdb.py
CrackerCat/GrinningSoul
d35ac197dade35d8a831ffb10fffd2ec93a791ea
[ "Apache-2.0" ]
1
2021-05-14T14:18:23.000Z
2021-05-15T16:03:24.000Z
generateFuncdb.py
CrackerCat/GrinningSoul
d35ac197dade35d8a831ffb10fffd2ec93a791ea
[ "Apache-2.0" ]
8
2020-11-22T10:23:26.000Z
2022-02-22T08:34:46.000Z
import fnmatch, os, subprocess from multiprocessing import Pool import tqdm sdkPath = subprocess.check_output('xcodebuild -version -sdk iphonesimulator Path', shell=True).strip() def parseSymbols(fn): args = [ 'headerparser_output/headerparse', fn, '-ObjC', '-fmodules', '-isysroot', sdkPath, '-I%s/usr/include' % sdkPath, '-I%s/usr/include/libxml2' % sdkPath, '-F%s/System/Library/Frameworks' % sdkPath, '-I/usr/local/lib/clang/9.0.1/include', '-DTARGET_OS_SIMULATOR' ] if '.framework' in fn: args.append('-framework') args.append(fn.split('.framework', 1)[0].rsplit('/', 1)[1]) symsByFile = {} try: output = subprocess.check_output(args, stderr=subprocess.STDOUT).strip().split('\n') if len(output) == 1 and output[0] == '': return fn, {} for line in output: line = line.strip() if not line: continue if line.startswith('~~~'): print line[3:] continue fn, sym, encoding = line.split(':::', 2) if fn not in symsByFile: symsByFile[fn] = {} symsByFile[fn][sym] = encoding except Exception, e: #import traceback #traceback.print_exc() pass #print #print ' '.join(map(repr, args)) #print `e.output` return fn, symsByFile allFns = [] for root, dirnames, filenames in os.walk(sdkPath): # + '/usr/include'): for filename in fnmatch.filter(filenames, '*.h'): allFns.append(os.path.join(root, filename)) pool = Pool(20) allSymsByFn = {} for fn, symbols in tqdm.tqdm(pool.imap_unordered(parseSymbols, allFns), total=len(allFns)): for dfn, syms in symbols.items(): if dfn not in allSymsByFn: allSymsByFn[dfn] = {} allSymsByFn[dfn].update(syms) with file('funcdb', 'w') as fp: for fn, syms in allSymsByFn.items(): print >>fp, fn for name, encoding in sorted(syms.items(), key=lambda x: x[0]): print >>fp, '\t' + name, '=', encoding
27.462687
102
0.658696
ee1f75cb3cbebacad1e1938152badf91c6fd14cd
1,331
py
Python
src/cli/dcos_vagrant/commands/wait.py
Fabs/dcos-e2e
8836dfa5b83f9d61e92b8b4bd8b058404a3bdc20
[ "Apache-2.0" ]
null
null
null
src/cli/dcos_vagrant/commands/wait.py
Fabs/dcos-e2e
8836dfa5b83f9d61e92b8b4bd8b058404a3bdc20
[ "Apache-2.0" ]
null
null
null
src/cli/dcos_vagrant/commands/wait.py
Fabs/dcos-e2e
8836dfa5b83f9d61e92b8b4bd8b058404a3bdc20
[ "Apache-2.0" ]
null
null
null
""" Tools for waiting for a cluster. """ import click import click_spinner import urllib3 from cli.common.options import ( superuser_password_option, superuser_username_option, ) from ._common import ClusterVMs from ._options import existing_cluster_id_option @click.command('wait') @existing_cluster_id_option @superuser_username_option @superuser_password_option def wait( cluster_id: str, superuser_username: str, superuser_password: str, ) -> None: """ Wait for DC/OS to start. """ urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) message = ( 'A cluster may take some time to be ready.\n' 'The amount of time it takes to start a cluster depends on a variety ' 'of factors.\n' 'If you are concerned that this is hanging, try "dcos-vagrant doctor" ' 'to diagnose common issues.' ) click.echo(message) cluster_vms = ClusterVMs(cluster_id=cluster_id) with click_spinner.spinner(): if cluster_vms.is_enterprise: cluster_vms.cluster.wait_for_dcos_ee( superuser_username=superuser_username, superuser_password=superuser_password, http_checks=True, ) return cluster_vms.cluster.wait_for_dcos_oss(http_checks=True)
26.62
79
0.689707
ee203c8c6303ac213b7e84dfaa580b41ed282776
1,922
py
Python
Think Complexity/GraphCode.py
henry-zhuang/reading-lists
128b71b24cd49e346df74bee8126d3d3ef0907df
[ "BSD-2-Clause" ]
3
2017-11-06T06:28:07.000Z
2021-12-28T07:04:57.000Z
Think Complexity/GraphCode.py
henry-zhuang/reading-lists
128b71b24cd49e346df74bee8126d3d3ef0907df
[ "BSD-2-Clause" ]
null
null
null
Think Complexity/GraphCode.py
henry-zhuang/reading-lists
128b71b24cd49e346df74bee8126d3d3ef0907df
[ "BSD-2-Clause" ]
null
null
null
class Graph(dict): """A Graph is a dictionary of dictionaris. The outer dictionary maps from a vertex to an inner dictionary. The inner dictionary maps from other vertices to edges. For vertices a and b, graph([a, b], [ab]) maps to the edge that connects a->b, if it exists.""" def __init__(self, vs=[], es=[]): """Creates a new graph. vs: list of vertices; es: list of edge. """ for v in vs: self.add_vertex(v) for e in es: self.add_edge(e) def add_vertex(self, v): """Add a vertex to the graph.""" self[v] = {} def add_edge(self, e): """Add a edge to the graph by adding an entry in both directons. if there is already an edge connecting these Vertices, the new edge replaces it. """ v, w = e self[v][w] = e self[w][v] = e class Vertex(object): """A Vertex is a node in a graph.""" def __init__(self, label = ''): self.label = label def __repr__(self): """Returns a string representation of this object that can be evaluated as a Python expression.""" return 'Vertex (%s)' % repr(self.label) __str__ = __repr__ class Edge(tuple): """An Edge is a list of two Vertics.""" def __new__(cls, e1, e2): """The Edge constructor takes two Vertics.""" if len(vs) != 2: raise ValueError, 'Edges must connect exactly two vertices.' return tuple.__new__(cls, (e1, e2)) def __repr__(self): """Returns a string representation of this object that can be evaluated as a Python expression.""" return 'Edge (%s, %s)' % (repr(self[0]), repr(self[1])) __str__ = __repr__ if __name__ == '__main__': x = Vertex('x') y = Vertex('y') xy = Edge(x, y) g = Graph([x, y], [xy]) print x print xy print g
25.972973
72
0.565557
ee2050a50daad419401ba669fc24c584b7a2b6fa
65
py
Python
tests/__init__.py
GraphQL-python-archive/graphql-env
d82c02c4a82486c69a1a2fa9c262d74f335bdf26
[ "MIT" ]
null
null
null
tests/__init__.py
GraphQL-python-archive/graphql-env
d82c02c4a82486c69a1a2fa9c262d74f335bdf26
[ "MIT" ]
3
2019-07-24T21:05:52.000Z
2021-11-15T17:46:27.000Z
tests/__init__.py
GraphQL-python-archive/graphql-env
d82c02c4a82486c69a1a2fa9c262d74f335bdf26
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """Unit test package for graphql_env."""
21.666667
40
0.6
ee2135f4afd77e09b1a2e652846d3ab3f3aa9ee1
3,642
py
Python
model/gastric_cancer_ResNet_cnn.py
bd-z/Gastric_Biopsy_Cancer_Detector
fac18b6484ff10b09b50eb6d81af9984f9fe3019
[ "MIT" ]
1
2022-01-08T14:19:31.000Z
2022-01-08T14:19:31.000Z
model/gastric_cancer_ResNet_cnn.py
bd-z/Gastric_Biopsy_Cancer_Detector
fac18b6484ff10b09b50eb6d81af9984f9fe3019
[ "MIT" ]
null
null
null
model/gastric_cancer_ResNet_cnn.py
bd-z/Gastric_Biopsy_Cancer_Detector
fac18b6484ff10b09b50eb6d81af9984f9fe3019
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Fri Sep 3 20:33:21 2021 @author: zhang """ import os import numpy as np import pandas as pd import tensorflow as tf from sklearn.model_selection import train_test_split from sklearn.utils import shuffle import tensorflow.keras as keras from tensorflow.keras.preprocessing import image from tensorflow.keras import backend as K from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.applications.resnet_v2 import ResNet50V2 from tensorflow.keras.models import Model, Sequential from tensorflow.keras.layers import Flatten, Dense, Dropout, Conv2D, BatchNormalization, MaxPool2D ,Activation, MaxPooling2D def data_table(folder): '''create a dataframe which has 'id' and 'label' columns. The id column is the path of each image and the label column contain 1 and 0 which indicate cancer cells exist or not ''' p=os.walk(folder) list_empty=[] dict_empty={} for path, dir_list,file_list in p: for file_name in file_list: file_path=os.path.join(path,file_name) list_empty.append(file_path) for file_path in list_empty: if 'non_cancer' in file_path: label=0 else: label=1 dict_empty['{}'.format(file_path)]=label df = pd.DataFrame.from_dict(dict_empty, orient='index',columns=['label']) df = df.reset_index().rename(columns={'index':'id'}) df = shuffle(df) return df #folder where the images data stored f=r'G:\BaiduNetdiskDownload\train' df_full=data_table(f) #define X and y X=df_full['id'] y=df_full['label'] # train and test split X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 100) # split into test and train sets def slice_load(file_list): ''' load the images''' images=[] for filename in file_list: im = image.load_img(filename,target_size=(512, 512, 3)) b = image.img_to_array(im) images.append(b) return images X_train_image=slice_load(X_train) X_train_array=np.array(X_train_image)/255 X_test_image=slice_load(X_test) X_test_array=np.array(X_test_image)/255 X_train_array.shape type(y_train) #clear sessions K.clear_session() input_shape = (512, 512, 3) # transfer learning with ResNet50V2 resMod = ResNet50V2(include_top=False, weights='imagenet', input_shape=input_shape) #frozen the layers in ResNet50V2 for layer in resMod.layers: layer.trainable = False # build model model = Sequential() model.add(resMod) model.add(tf.keras.layers.GlobalAveragePooling2D()) #1st Dense: (None, 60) model.add(keras.layers.Dense(60, activation='relu')) #regularization with penalty term model.add(Dropout(0.2)) # 2nd Dense: (None, 50) model.add(keras.layers.Dense(50, activation='relu')) #regularization model.add(keras.layers.BatchNormalization()) # 2nd Dense: (None, 50) model.add(keras.layers.Dense(50, activation='relu')) model.add(keras.layers.BatchNormalization()) # Output Layer: (None, 1) model.add(keras.layers.Dense(1, activation='sigmoid')) model.summary() # Compile model.compile(loss='categorical_crossentropy', optimizer='adam',\ metrics=['accuracy']) #add early stoping callback = EarlyStopping(monitor='val_loss', patience=3) #(5)Train results=model.fit(X_train_array, y_train, batch_size=64, epochs=50, verbose=1, \ validation_split=0.2,callbacks=[callback], shuffle=True) model.evaluate(X_test_array, y_test) results.history['val_accuracy'] #save model model.save(r'C:\Users\zhang\GitHub_projects\GTBR\Gastric_Biopsy_Cancer_Detector\model\resnet_gastric.h5')
23.197452
124
0.720209
ee245853feab4e3b1a6bbf63e986448df5eef06f
2,280
py
Python
esim_torch/test_single_pixel.py
Giamm9998/face_detection_on_sim_events
d0917a3fff9427f3b898834f37f7e5ff03c3c8e0
[ "MIT" ]
null
null
null
esim_torch/test_single_pixel.py
Giamm9998/face_detection_on_sim_events
d0917a3fff9427f3b898834f37f7e5ff03c3c8e0
[ "MIT" ]
null
null
null
esim_torch/test_single_pixel.py
Giamm9998/face_detection_on_sim_events
d0917a3fff9427f3b898834f37f7e5ff03c3c8e0
[ "MIT" ]
null
null
null
import torch import matplotlib.pyplot as plt import numpy as np import glob import cv2 from esim_torch import EventSimulator_torch def increasing_sin_wave(t): return (400 * np.sin((t-t[0])*20*np.pi)*(t-t[0])+150).astype("uint8").reshape((-1,1,1)) if __name__ == "__main__": c = 0.2 refractory_period_ns = 5e6 esim_torch = EventSimulator_torch(contrast_threshold_neg=c, contrast_threshold_pos=c, refractory_period_ns=refractory_period_ns) print("Loading images") timestamps_s = np.genfromtxt("../esim_py/tests/data/images/timestamps.txt") images = increasing_sin_wave(timestamps_s) timestamps_ns = (timestamps_s * 1e9).astype("int64") log_images = np.log(images.astype("float32") / 255 + 1e-4) # generate torch tensors print("Loading data to GPU") device = "cuda:0" log_images = torch.from_numpy(log_images).to(device) timestamps_ns = torch.from_numpy(timestamps_ns).to(device) # generate events with GPU support print("Generating events") events = esim_torch.forward(log_images, timestamps_ns) # render events image = images[0] print("Plotting") event_timestamps = events['t'] event_polarities = events['p'] i0 = log_images[0].cpu().numpy().ravel() fig, ax = plt.subplots(ncols=2) timestamps_ns = timestamps_ns.cpu().numpy() log_images = log_images.cpu().numpy().ravel() ax[0].plot(timestamps_ns, log_images) ax[0].plot(timestamps_ns, images.ravel()) ax[0].set_ylim([np.log(1e-1),np.log(1 + 1e-4)]) ax[0].set_ylabel("Log Intensity") ax[0].set_xlabel("Time [ns]") ax[1].set_ylabel("Time since last event [ns]") ax[1].set_xlabel("Timestamp of event [ns]") ax[1].set_xlim([0,3e8]) for i in range(-10,3): ax[0].plot([0,timestamps_ns[-1]], [i0+i*c, i0+i*c], c='g') event_timestamps = event_timestamps.cpu().numpy() for i, (t, p) in enumerate(zip(event_timestamps, event_polarities)): color = "r" if p == -1 else "b" ax[0].plot([t, t], [-3, 0], c=color) if i > 0: ax[1].scatter([t], [t-event_timestamps[i-1]], c=color) ax[1].plot([0,3e8], [refractory_period_ns, refractory_period_ns]) plt.show()
33.043478
91
0.638158
ee24a1450e91db84cc047da4850276c21c83ee5a
6,642
py
Python
load_csv.py
alexkchew/AppSciTools
7fff312115bd109a5391adff9e0f9cdec8ebbdab
[ "MIT" ]
null
null
null
load_csv.py
alexkchew/AppSciTools
7fff312115bd109a5391adff9e0f9cdec8ebbdab
[ "MIT" ]
null
null
null
load_csv.py
alexkchew/AppSciTools
7fff312115bd109a5391adff9e0f9cdec8ebbdab
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ load_csv.py This script controlls all load csv information. Created on: Fri Jul 16 15:54:43 2021 Author: Alex K. Chew (alex.chew@schrodinger.com) Copyright Schrodinger, LLC. All rights reserved. """ # Loading modules import os import pandas as pd import numpy as np # Importing filtration tools from .filtration import filter_by_variance_threshold # Defining default columns DEFAULT_INDEX_COLS = ["Title", "Entry Name"] # Loading experimental data def load_property_data(csv_data_path, keep_list = []): """ This function loads property data from spreadsheet Parameters ---------- csv_data_path: [str] path to csv file keep_list: [list, default = []] list of columns to keep. If None, the entire dataframe is outputted. Returns ------- csv_data: [df] dataframe containing csv information with the keep list """ # Loading dataframe csv_data = pd.read_csv(csv_data_path) # Checking if list is empty if len(keep_list) == 0: return csv_data else: return csv_data[keep_list] # Function to load descriptor data def load_descriptor_data(csv_path, clean_data = True, filter_by_variance = True, output_filtered_data = False, na_filter = 'remove', default_index_cols = DEFAULT_INDEX_COLS): """ This function loads the descriptor information. Note that all: - non-numerical descriptors are removed automatically. - missing NaN columns are removed automatically Parameters ---------- csv_path : str Path to csv file clean_data: logical, default = True True if you want to clean the data by removing non-numerical descriptors / NaN columns output_filtered_data: logical, optional True if you want to output the filtered data as a separate csv file. The default value is False. filter_by_variance: logical, optional True if you want to filter by variance. By default, this is True. na_filter: str, optional Method of dealing with non-existing numbers. The different methods are summarized below: 'remove': (default) Remove all columns that have non-existing numbers. 'fill_with_zeros': Fill all nans with zeros. It will also look for infinities and replace them with zeros. Returns ------- output_df : str dataframe containing csv file """ # Loading csv file csv_df = pd.read_csv(csv_path) # Printing print("\nLoading CSV file: %s"%(csv_path)) # Checking if you want to clean the dataframe if clean_data is True: # Cleaning the dataframe if na_filter == 'remove': print("Removing all columns with nan's") csv_df_nonan = csv_df.dropna(axis=1) # Removes NaN values elif na_filter == 'fill_with_zeros': print("Filling nan's with zeros") csv_df_nonan = csv_df.fillna(0) csv_df_nonan.replace([np.inf, -np.inf], 0) else: print("Error! na_filter of %s is not defined!"%(na_filter)) # Selecting only portions of the dataframe with numbers. csv_df_nums = csv_df_nonan.select_dtypes(['number']) # try: # Removing cols with low variance if filter_by_variance is True: output_df = filter_by_variance_threshold(X_df = csv_df_nums) else: print("Skipping variance filtration for %s"%(csv_path)) output_df = csv_df_nums # Adding back the index cols to the beginning for each_col in default_index_cols[::-1]: # Reverse order if each_col in csv_df and each_col not in output_df: output_df.insert (0, each_col, csv_df[each_col]) except ValueError: # Happens when you have a blank dataframe print("No columns found that matches filtration for %s"%(csv_path)) cols_to_include = [each_col for each_col in default_index_cols if each_col in csv_df.columns] output_df = csv_df[cols_to_include] # Storing dataframe if output_filtered_data is True: # Getting path without csv_path_without_ext = os.path.splitext(csv_path)[0] # Getting filtered nomenclature csv_path_with_new_name = csv_path_without_ext + "_filtered.csv" # Storing print("Storing filtered data to: %s"%(csv_path_with_new_name)) output_df.to_csv(csv_path_with_new_name, index = False) return output_df else: return csv_df # Function to load multiple descriptor datas def load_multiple_descriptor_data(default_csv_paths, descriptor_list = ["2d_descriptors", "3d_descriptors",], **args ): """ This function loads multiple descriptor data given a descriptor list. Parameters ---------- default_csv_paths: dict dictionary of csv paths descriptor_list : list list of descriptors to load from dictionary Remainder of arguments go into the load descriptor function Returns ------- descriptor_df_dict: dict dictionary containing descritpors """ # Loading all descriptor files descriptor_df_dict = { each_descriptor_key: load_descriptor_data(default_csv_paths[each_descriptor_key], **args) for each_descriptor_key in descriptor_list } return descriptor_df_dict # Function to strip title and etc to get numerical descriptors only def strip_df_index(df, col2remove = DEFAULT_INDEX_COLS): """ This function strips the dataframe from the index information. Parameters ---------- df : dataframe pandas dataframe containing descriptor information. col2remove: list list of columns to remove from the dataframe. Returns ------- df_clean: dataframe] pandas dataframe without any "Title" or index information """ # Dropping the columns df_clean = df.drop(columns = col2remove, errors='ignore') return df_clean
33.545455
117
0.609907
ee24a2a2d1d21059fb919ebc02526d5846c7d278
303
py
Python
ivory/layers/__init__.py
daizutabi/scratch
4c56fad47da0938eda89f3c2b6cb2f1919bee180
[ "MIT" ]
null
null
null
ivory/layers/__init__.py
daizutabi/scratch
4c56fad47da0938eda89f3c2b6cb2f1919bee180
[ "MIT" ]
null
null
null
ivory/layers/__init__.py
daizutabi/scratch
4c56fad47da0938eda89f3c2b6cb2f1919bee180
[ "MIT" ]
null
null
null
from ivory.layers import (activation, affine, convolution, core, dropout, embedding, loss, normalization, recurrent) __all__ = [ "activation", "affine", "convolution", "core", "dropout", "embedding", "loss", "normalization", "recurrent", ]
20.2
73
0.574257