text string | size int64 | token_count int64 |
|---|---|---|
#! usr/bin/env python3
from os import times
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime
import requests
import pandas as pd
import json
import datetime
import time
import math
from twitter import get_coin_tweets_dates
#beautifulsoup cannot scrape dynamically changing webpages.
#Instead we use third party library called Selenium and webdrivers.
def convert_date_to_unixtime(year,month,day):
dt = datetime.datetime(year,month,day)
timestamp = (dt - datetime.datetime(1970,1,1)).total_seconds()
return round(timestamp)
def date_parser(date):
return datetime.datetime.strptime(date, '%b %d, %Y').date()
def is_valid(s):
return len(s) > 1
def scraping_data(y1,m1,d1,y2,m2,d2,coin):
DAYS_PER_SCROLL = 100
SECONDS_PER_DAY = 86400
start_date = convert_date_to_unixtime(y1,m1,d1)
end_date = convert_date_to_unixtime(y2,m2,d2)
url = f'https://finance.yahoo.com/quote/{coin}-USD/history?period1={start_date}&period2={end_date}&interval=1d&filter=history&frequency=1d&includeAdjustedClose=true'
# initiating the webdriver. Parameter includes the path of the webdriver.
chrome_options = Options()
# run chrome without GUI
chrome_options.headless = True
chrome_options.add_argument("--log-level=3")
driver = webdriver.Chrome(executable_path='./chromedriver',options = chrome_options)
driver.get(url)
html = driver.find_element_by_tag_name('html')
#Webdriver press ESC to stop loading the page
html.send_keys(Keys.ESCAPE)
days_between = (end_date - start_date) / SECONDS_PER_DAY
scroll = math.ceil(days_between / DAYS_PER_SCROLL)
for i in range(scroll):
soup = BeautifulSoup(driver.page_source,'html.parser')
dates = []
prices = []
# extract date and price information
for tr in soup.tbody.contents:
#Navigable string is not callable
date_source = tr.contents[0]
#convert navigable string into callable string
date_string = str(date_source.string)
date = date_parser(date_string)
price = tr.contents[4].string
if is_valid(price):
dates.insert(0,date)
prices.insert(0,float(price.replace(',','')))
#webdriver press END key to scroll down to the buttom of the page to load more data
html.send_keys(Keys.END)
WebDriverWait(driver,timeout=0.5)
time.sleep(0.3)
driver.close()
return [dates,prices]
"""
draw coin price fluctuation with Elon's tweet
"""
def draw(dates,prices,coin,tw_dates):
fig, ax = plt.subplots()
#set graph size 12inch by 10inch
fig.set_size_inches((12, 10))
#draw fist graph---coin price and date
ax.plot(dates, prices,label='coin price')
tw_prices = []
for tw_date in tw_dates:
index = dates.index(tw_date)
tw_prices.append(prices[index])
#draw second graph---Elon's tweet and date
ax.plot(tw_dates,tw_prices,'ro',label='Elon\'s Doge tweet' )
ax.xaxis.set_major_locator(mdates.AutoDateLocator())
ax.xaxis.set_minor_locator(mdates.DayLocator())
#auto rotate x axis ticks
fig.autofmt_xdate()
ax.grid(True)
plt.xlabel('Date')
plt.ylabel('Price')
plt.title(f'{coin} coin Price',loc='center')
plt.legend(loc='upper left')
plt.show()
def main():
start_time = time.time()
[dates,prices] = scraping_data(2021,1,1,2021,5,21,'DOGE')
tweet_dates = get_coin_tweets_dates('elonmusk')
draw(dates,prices,'DOGE',tweet_dates)
duration = time.time() - start_time
print(f'It took {duration}s to run this application.')
if __name__ == '__main__':
main()
| 4,031 | 1,380 |
"""
64. Minimum Path Sum
Given a m x n grid filled with non-negative numbers,
find a path from top left to bottom right
which minimizes the sum of all numbers along its path.
Note: You can only move either down or right at any point in time.
http://www.tangjikai.com/algorithms/leetcode-64-minimum-path-sum
Dynamic Programming
We can use an two-dimensional array
to record the minimum sum at each position of grid,
finally return the last element as output.
"""
class Solution(object):
def minPathSum(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
m = len(grid)
n = len(grid[0])
dp = [[0] * n for _ in range(m)]
for i in range(m):
for j in range(n):
# first element is first element in grid
if i == 0 and j == 0:
dp[i][j] = grid[0][0]
elif i == 0: # first column
dp[i][j] = dp[i][j - 1] + grid[i][j]
elif j == 0: # first row
dp[i][j] = dp[i - 1][j] + grid[i][j]
else: # either top or left sum plus current position
dp[i][j] = min(dp[i - 1][j], dp[i][j - 1]) + grid[i][j]
return dp[-1][-1]
| 1,264 | 420 |
# coding=utf-8
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import re
import subprocess
import sys
from contextlib import contextmanager
from abc import abstractproperty
from pants.binaries.binary_util import BinaryUtil
from pants.engine.isolated_process import ExecuteProcessRequest, ExecuteProcessResult
from pants.fs.archive import TGZ
from pants.subsystem.subsystem import Subsystem
from pants.util.contextutil import environment_as, temporary_file_path
from pants.util.dirutil import safe_mkdir
from pants.util.memo import memoized_method, memoized_property
from pants.util.meta import AbstractClass
from pants.util.objects import datatype
from pants.util.strutil import ensure_binary
logger = logging.getLogger(__name__)
class RDependency(AbstractClass):
@abstractproperty
def name(self):
"""???"""
class RInvocationException(Exception):
INVOCATION_ERROR_BOILERPLATE = "`{cmd}` failed: {what_happened}"
def __init__(self, cmd, what_happened):
msg = self.INVOCATION_ERROR_BOILERPLATE.format(
cmd=' '.join(cmd),
what_happened=what_happened,
)
super(RInvocationException, self).__init__(msg)
class RSpawnFailure(RInvocationException):
def __init__(self, cmd, err):
super(RSpawnFailure, self).__init__(cmd=cmd, what_happened=repr(err))
class RProcessResultFailure(RInvocationException):
PROCESS_RESULT_FAILURE_BOILERPLATE = "exited non-zero ({exit_code}){rest}"
def __init__(self, cmd, exit_code, rest=''):
what_happened = self.PROCESS_RESULT_FAILURE_BOILERPLATE.format(
exit_code=exit_code,
rest=rest,
)
super(RProcessResultFailure, self).__init__(
cmd=cmd, what_happened=what_happened)
class RProcessInvokedForOutputFailure(RProcessResultFailure):
INVOKE_OUTPUT_ERROR_BOILERPLATE = """
stdout:
{stdout}
stderr:
{stderr}
"""
def __init__(self, cmd, exit_code, stdout, stderr):
rest = self.INVOKE_OUTPUT_ERROR_BOILERPLATE.format(
stdout=stdout,
stderr=stderr,
)
super(RProcessInvokedForOutputFailure, self).__init__(
cmd=cmd, exit_code=exit_code, rest=rest)
class RDistribution(object):
DEVTOOLS_CRAN_NAME = 'devtools'
MODULES_GITHUB_ORG_NAME = 'klmr'
MODULES_GITHUB_REPO_NAME = 'modules'
class Factory(Subsystem):
options_scope = 'r-distribution'
@classmethod
def subsystem_dependencies(cls):
return super(RDistribution.Factory, cls).subsystem_dependencies() + (
BinaryUtil.Factory,
)
@classmethod
def register_options(cls, register):
super(RDistribution.Factory, cls).register_options(register)
register('--r-version', fingerprint=True,
help='R distribution version. Used as part of the path to '
'lookup the distribution with --binary-util-baseurls and '
'--pants-bootstrapdir.',
default='3.4.3')
register('--modules-git-ref', fingerprint=True,
help='git ref of the klmr/modules repo to use for R modules.',
default='d4199f2d216c6d20c3b092c691d3099c3325f2a3')
register('--tools-cache-dir', advanced=True, metavar='<dir>',
default=None,
help='The parent directory for downloaded R tools. '
'If unspecified, a standard path under the workdir is '
'used.')
register('--resolver-cache-dir', advanced=True, metavar='<dir>',
default=None,
help='The parent directory for resolved R packages. '
'If unspecified, a standard path under the workdir is '
'used.')
register('--chroot-cache-dir', advanced=True, metavar='<dir>',
default=None,
help='The parent directory for the chroot cache. '
'If unspecified, a standard path under the workdir is '
'used.')
@memoized_property
def scratch_dir(self):
return os.path.join(
self.get_options().pants_workdir, *self.options_scope.split('.'))
def create(self):
binary_util = BinaryUtil.Factory.create()
options = self.get_options()
tools_cache_dir = options.tools_cache_dir or os.path.join(
self.scratch_dir, 'tools')
resolver_cache_dir = options.resolver_cache_dir or os.path.join(
self.scratch_dir, 'resolved_packages')
chroot_cache_dir = options.chroot_cache_dir or os.path.join(
self.scratch_dir, 'chroots')
return RDistribution(
binary_util,
r_version=options.r_version,
modules_git_ref=options.modules_git_ref,
tools_cache_dir=tools_cache_dir,
resolver_cache_dir=resolver_cache_dir,
chroot_cache_dir=chroot_cache_dir,
)
def __init__(self, binary_util, r_version, modules_git_ref, tools_cache_dir,
resolver_cache_dir, chroot_cache_dir):
self._binary_util = binary_util
self._r_version = r_version
self.modules_git_ref = modules_git_ref
self.tools_cache_dir = tools_cache_dir
self.resolver_cache_dir = resolver_cache_dir
self.chroot_cache_dir = chroot_cache_dir
def _unpack_distribution(self, supportdir, r_version, output_filename):
logger.debug('unpacking R distribution, version: %s', r_version)
tarball_filepath = self._binary_util.select_binary(
supportdir=supportdir, version=r_version, name=output_filename)
logger.debug('Tarball for %s(%s): %s', supportdir, r_version, tarball_filepath)
work_dir = os.path.join(os.path.dirname(tarball_filepath), 'unpacked')
TGZ.extract(tarball_filepath, work_dir, concurrency_safe=True)
return work_dir
@memoized_property
def r_installation(self):
r_dist_path = self._unpack_distribution(
supportdir='bin/R', r_version=self._r_version, output_filename='r.tar.gz')
return r_dist_path
@memoized_property
def r_bin_dir(self):
return os.path.join(self.r_installation, 'bin')
R_SAVE_IMAGE_BOILERPLATE = """{initial_input}
save.image(file='{save_file_path}', safe=FALSE)
"""
RDATA_FILE_NAME = '.Rdata'
def r_invoke_isolated_process(self, context, cmd):
logger.debug("isolated process '{}'".format(cmd))
env_path = ['PATH', self.r_bin_dir]
req = ExecuteProcessRequest(tuple(cmd), env_path)
res, = context._scheduler.product_request(
ExecuteProcessResult, [req])
if res.exit_code != 0:
raise RProcessInvokedForOutputFailure(
cmd, res.exit_code, res.stdout, res.stderr)
return res
@contextmanager
def r_isolated_invoke_with_input(self, context, stdin_input, suffix='.R'):
logger.debug("isolated invoke with stdin_input:\n{}".format(stdin_input))
with temporary_file_path(suffix=suffix) as tmp_file_path:
with open(tmp_file_path, 'w') as tmpfile:
tmpfile.write(stdin_input)
yield tmp_file_path
def r_invoke_repl_sandboxed(self, workunit, cmd, cwd):
new_path = ':'.join([
self.r_bin_dir,
os.environ.get('PATH'),
])
with environment_as(PATH=new_path):
try:
subproc = subprocess.Popen(
cmd,
stdin=sys.stdin,
stdout=workunit.output('stdout'),
stderr=workunit.output('stderr'),
cwd=cwd,
)
return subproc.wait()
except OSError as e:
raise RSpawnFailure(cmd, e)
except subprocess.CalledProcessError as e:
raise RProcessResultFailure(cmd, e.returncode, e)
def invoke_r_interactive(self, context, workunit, initial_input, chroot_dir,
clean_chroot=False):
logger.debug("interactive in '{}', initial_input: '{}'".format(
chroot_dir, initial_input))
rdata_path = os.path.join(chroot_dir, self.RDATA_FILE_NAME)
input_with_save = self.R_SAVE_IMAGE_BOILERPLATE.format(
initial_input=initial_input,
save_file_path=rdata_path,
)
safe_mkdir(chroot_dir, clean=clean_chroot)
with self.r_isolated_invoke_with_input(
context, input_with_save) as tmp_file_path:
save_cmd = [
'R',
'--vanilla',
'--slave',
'--file={}'.format(tmp_file_path)
]
self.r_invoke_isolated_process(context, save_cmd)
r_cmd = [
'R',
'--save',
'--restore',
'--interactive',
]
return self.r_invoke_repl_sandboxed(workunit, r_cmd, chroot_dir)
def invoke_rscript(self, context, stdin_input):
with self.r_isolated_invoke_with_input(
context, stdin_input) as tmp_file_path:
r_cmd = [
'Rscript',
'--verbose',
tmp_file_path,
]
return self.r_invoke_isolated_process(context, r_cmd)
class PackageInfoFormatError(Exception):
"""???"""
BLANK_LINE_REGEX = re.compile('^\s*$')
@classmethod
def is_valid_package_name(cls, name):
return cls.BLANK_LINE_REGEX.match(name) is None
@classmethod
def check_valid_package_name(cls, name):
if not cls.is_valid_package_name(name):
raise PackageInfoFormatError(
"'{}' is not a valid package name (must not be blank)".format(name))
return name
@classmethod
def filter_packages_lines_stdout(cls, lines):
return [p for p in lines if cls.is_valid_package_name(p)]
VALID_VERSION_REGEX = re.compile('^[0-9]+(\.[0-9]+)*$')
@classmethod
def is_valid_version(cls, version):
if version is None:
return True
return cls.VALID_VERSION_REGEX.match(version) is not None
@classmethod
def check_valid_version(cls, version):
if not cls.is_valid_version(version):
raise PackageInfoFormatError(
"'{}' is not a valid package version "
"(must be 'None' or match '{}')"
.format(version, cls.VALID_VERSION_REGEX.pattern))
return version
@classmethod
def gen_script_load_stmts(cls, srcs_rel):
if len(srcs_rel) == 0:
return ''
source_stmts = ["source('{}')".format(s.encode('ascii')) for s in srcs_rel]
return '\n'.join(source_stmts) + '\n'
@classmethod
def convert_to_list_of_ascii(cls, arg):
if not isinstance(arg, list):
arg = [ensure_binary(arg)]
return [ensure_binary(x) for x in arg]
@classmethod
def create_valid_r_charvec_input(cls, elements, drop_empty=False):
elements = cls.convert_to_list_of_ascii(elements)
if len(elements) == 0:
if drop_empty:
return None
return 'character(0)'
elif len(elements) == 1:
return "'{}'".format(elements[0])
quoted = ["'{}'".format(el) for el in elements]
return "c({})".format(', '.join(quoted))
@classmethod
def gen_libs_input(cls, lib_paths):
libs_charvec = cls.create_valid_r_charvec_input(lib_paths, drop_empty=True)
if libs_charvec is None:
return ''
return ".libPaths({})".format(libs_charvec) + '\n'
R_LIST_PACKAGES_BOILERPLATE = """{libs_input}
cat(installed.packages(lib.loc={libs_joined})[,'Package'], sep='\\n')
"""
def get_installed_packages(self, context, lib_paths):
libs_input = self.gen_libs_input(lib_paths)
libs_charvec = self.create_valid_r_charvec_input(lib_paths, drop_empty=True)
if libs_charvec is None:
libs_charvec="NULL"
installed_packages_input = self.R_LIST_PACKAGES_BOILERPLATE.format(
libs_input=libs_input,
libs_joined=libs_charvec,
)
pkgs = self.invoke_rscript(context, installed_packages_input).stdout.split('\n')
return self.filter_packages_lines_stdout(pkgs)
# R_INSTALL_SOURCE_PACKAGE_BOILERPLATE = """???"""
# def gen_source_install_input(self, source_dir, outdir):
# return self.R_INSTALL_SOURCE_PACKAGE_BOILERPLATE.format(
# expr="devtools::install_local('{}', lib='{}')".format(
# source_dir, outdir),
# outdir=outdir,
# )
# def install_source_package(self, context, source_dir, pkg_cache_dir):
# source_input = self.gen_source_install_input(source_dir, pkg_cache_dir)
# self.invoke_rscript(context, source_input).stdout.split('\n')
def install_cran_package(self, cran, context, cran_dep, outdir):
cran_input = cran.gen_cran_install_input(cran_dep, outdir)
self.invoke_rscript(context, cran_input)
def install_github_package(self, github, context, github_dep, outdir):
github_input = github.gen_github_install_input(
self.tools_cache_dir, github_dep, outdir)
logger.debug("github_input: '{}'".format(github_input))
self.invoke_rscript(context, github_input).stdout.split('\n')
| 12,507 | 4,284 |
import torch
from torch import nn
import numpy as np
class convmodel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, 3, 1, padding=1, bias=False)
self.conv2 = nn.Conv2d(16, 32, 3, 1, padding=1, bias=False)
self.linear = nn.Linear(32*10*10, 1, bias=False)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.linear(x.view(x.size(0), -1))
return x
import torch
from torch import nn
def batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum):
# Use `is_grad_enabled` to determine whether the current mode is training
# mode or prediction mode
if not torch.is_grad_enabled():
# If it is prediction mode, directly use the mean and variance
# obtained by moving average
X_hat = (X - moving_mean) / torch.sqrt(moving_var + eps)
else:
assert len(X.shape) in (2, 4)
if len(X.shape) == 2:
# When using a fully-connected layer, calculate the mean and
# variance on the feature dimension
mean = X.mean(dim=0)
var = ((X - mean) ** 2).mean(dim=0)
else:
# When using a two-dimensional convolutional layer, calculate the
# mean and variance on the channel dimension (axis=1). Here we
# need to maintain the shape of `X`, so that the broadcasting
# operation can be carried out later
mean = X.mean(dim=(0, 2, 3), keepdim=True)
var = ((X - mean) ** 2).mean(dim=(0, 2, 3), keepdim=True)
# In training mode, the current mean and variance are used for the
# standardization
X_hat = (X - mean) / torch.sqrt(var + eps)
# Update the mean and variance using moving average
moving_mean = momentum * moving_mean + (1.0 - momentum) * mean
moving_var = momentum * moving_var + (1.0 - momentum) * var
Y = gamma * X_hat + beta # Scale and shift
return Y, moving_mean.data, moving_var.data
class BatchNorm(nn.Module):
# `num_features`: the number of outputs for a fully-connected layer
# or the number of output channels for a convolutional layer. `num_dims`:
# 2 for a fully-connected layer and 4 for a convolutional layer
def __init__(self, num_features, num_dims):
super().__init__()
if num_dims == 2:
shape = (1, num_features)
else:
shape = (1, num_features, 1, 1)
# The scale parameter and the shift parameter (model parameters) are
# initialized to 1 and 0, respectively
self.gamma = nn.Parameter(torch.ones(shape))
self.beta = nn.Parameter(torch.zeros(shape))
# The variables that are not model parameters are initialized to 0 and 1
self.moving_mean = torch.zeros(shape)
self.moving_var = torch.ones(shape)
def forward(self, X):
# If `X` is not on the main memory, copy `moving_mean` and
# `moving_var` to the device where `X` is located
if self.moving_mean.device != X.device:
self.moving_mean = self.moving_mean.to(X.device)
self.moving_var = self.moving_var.to(X.device)
# Save the updated `moving_mean` and `moving_var`
Y, self.moving_mean, self.moving_var = batch_norm(
X, self.gamma, self.beta, self.moving_mean,
self.moving_var, eps=1e-5, momentum=0.9)
return Y
if __name__=='__main__':
model = convmodel()
for m in model.parameters():
m.data.fill_(0.1)
# criterion = nn.CrossEntropyLoss()
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1.0)
model.train()
# 模拟输入8个 sample,每个的大小是 10x10,
# 值都初始化为1,让每次输出结果都固定,方便观察
images = torch.ones(8, 3, 10, 10)
targets = torch.ones(8, dtype=torch.float)
output = model(images)
print(output.shape)
# torch.Size([8, 20])
loss = criterion(output.view(-1,), targets)
print(model.conv1.weight.grad)
# None
loss.backward()
print(model.conv1.weight.grad[0][0][0])
# tensor([-0.0782, -0.0842, -0.0782])
# 通过一次反向传播,计算出网络参数的导数,
# 因为篇幅原因,我们只观察一小部分结果
print(model.conv1.weight[0][0][0])
# tensor([0.1000, 0.1000, 0.1000], grad_fn=<SelectBackward>)
# 我们知道网络参数的值一开始都初始化为 0.1 的
optimizer.step()
print(model.conv1.weight[0][0][0])
# tensor([0.1782, 0.1842, 0.1782], grad_fn=<SelectBackward>)
# 回想刚才我们设置 learning rate 为 1,这样,
# 更新后的结果,正好是 (原始权重 - 求导结果) !
optimizer.zero_grad()
print(model.conv1.weight.grad[0][0][0])
# tensor([0., 0., 0.])
# 每次更新完权重之后,我们记得要把导数清零啊,
# 不然下次会得到一个和上次计算一起累加的结果。
# 当然,zero_grad() 的位置,可以放到前边去,
# 只要保证在计算导数前,参数的导数是清零的就好。
print('>>>test for bn<<<')
bn = nn.BatchNorm2d(2)
aa = torch.randn(2,2,1,1)
bb = bn(aa)
print('aa=', aa)
print('bb=', bb)
cc = BatchNorm(2, 4)(aa)
print('cc=', cc)
shape = (1, 2, 1, 1)
mean = aa.mean(dim=(0,2,3), keepdim=True)
dd = (aa - mean) / torch.sqrt(((aa-mean)**2).mean(dim=(0,2,3), keepdim=True))
print('dd=', dd)
| 5,121 | 2,049 |
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-12-06 15:32 | 64 | 41 |
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/topics/items.html
from scrapy.item import Item, Field
class KrakItem(Item):
# define the fields for your item here like:
# name = Field()
# pass#
company_name = Field()
company_site_url = Field()
short_description = Field()
address = Field()
phone = Field()
phone_type = Field()
gen_description = Field()
description_headers = Field()
description_paragraphs = Field()
tags = Field()
category = Field()
| 563 | 167 |
import re
import copy
from mau.lexers.base_lexer import TokenTypes, Token
from mau.lexers.main_lexer import MainLexer
from mau.parsers.base_parser import (
BaseParser,
TokenError,
ConfigurationError,
parser,
)
from mau.parsers.text_parser import TextParser
from mau.parsers.arguments_parser import ArgumentsParser
from mau.parsers.preprocess_variables_parser import PreprocessVariablesParser
from mau.parsers.nodes import (
HorizontalRuleNode,
TextNode,
BlockNode,
ContentNode,
ContentImageNode,
CommandNode,
HeaderNode,
ListNode,
ListItemNode,
ParagraphNode,
TocNode,
TocEntryNode,
FootnotesNode,
)
class EngineError(ValueError):
""" Used to signal that the engine selected for a code block is not known """
def header_anchor(text, level):
"""
Return a sanitised anchor for a header.
"""
# Everything lowercase
sanitised_text = text.lower()
# Get only letters, numbers, dashes, spaces, and dots
sanitised_text = "".join(re.findall("[a-z0-9-\\. ]+", sanitised_text))
# Remove multiple spaces
sanitised_text = "-".join(sanitised_text.split())
return sanitised_text
# The MainParser is in charge of parsing
# the whole input, calling other parsers
# to manage single paragraphs or other
# things like variables.
class MainParser(BaseParser):
def __init__(self, variables=None):
super().__init__()
self.lexer = MainLexer()
# This is used as a storage for attributes.
# Block attributes are defined before the block
# so when we parse them we store them here and
# then use them when dealing with the block itself.
self.argsparser = ArgumentsParser()
# Copy the variables and make sure the "mau" namespace exists
self.variables = copy.deepcopy(variables) if variables else {}
if "mau" not in self.variables:
self.variables["mau"] = {}
self.headers = []
self.footnote_defs = []
self.blocks = {}
self.toc = None
# When we define a block we establish an alias
# {alias:actual_block_name}
self.block_aliases = {}
# Each block we define can have default values
# {actual_block_name:kwargs}
self.block_defaults = {}
# Each block we define can have names for unnamed arguments
# {actual_block_name:kwargs}
self.block_names = {}
# Backward compatibility with Mau 1.x
# Mau 1.x used [source] to format source, while Mau 2.x
# uses [myblock, engine=source], so this establishes
# a default block definition so that
# [source] = [source, engine=source]
# In Mau 2.x this block uses the template "block-source"
# so any template called "source" (e.g. "source.html")
# must be renamed.
# This definition can be overridden by custom block definitions
self.block_aliases["source"] = "source"
self.block_defaults["source"] = {"engine": "source", "language": "text"}
self.block_names["source"] = ["language"]
self.block_aliases["admonition"] = "admonition"
self.block_names["admonition"] = ["class", "icon", "label"]
self.block_aliases["quote"] = "quote"
self.block_defaults["quote"] = {"attribution": None}
self.block_names["quote"] = ["attribution"]
# Iterate through block definitions passed as variables
for alias, block_definition in (
self.variables["mau"].get("block_definitions", {}).items()
):
try:
blocktype = block_definition["blocktype"]
self.block_aliases[alias] = blocktype
except KeyError:
raise ConfigurationError(
f"Block definition '{alias}' is missing key 'blocktype'"
)
try:
self.block_defaults[blocktype] = block_definition["kwargs"]
except KeyError:
raise ConfigurationError(
f"Block definition '{alias}' is missing key 'kwargs'"
)
# This is a buffer for a block title
self._title = None
# This is the function used to create the header
# anchors. It can be specified through
# mau.header_anchor_function to override
# the default one.
self.header_anchor = self.variables["mau"].get(
"header_anchor_function", header_anchor
)
self.v1_backward_compatibility = self.variables["mau"].get(
"v1_backward_compatibility", False
)
def _pop_title(self):
# This return the title and resets the
# cached one, so no other block will
# use it.
title = self._title
self._title = None
return title
def _push_title(self, title):
# When we parse a title we can store it here
# so that it is available to the next block
# that will use it.
self._title = title
def _collect_lines(self, stop_tokens):
# This collects several lines of text in a list
# until it gets to a line that begins with one
# of the tokens listed in stop_tokens.
# It is useful for block or other elements that
# are clearly surrounded by delimiters.
lines = []
while self.peek_token() not in stop_tokens:
lines.append(self.collect_join([Token(TokenTypes.EOL)]))
self.get_token(TokenTypes.EOL)
return lines
def _collect_text_content(self):
# Collects all adjacent text tokens
# into a single string
if not self.peek_token_is(TokenTypes.TEXT):
return None
values = []
# Get all tokens
while self.peek_token_is(TokenTypes.TEXT):
values.append(self.get_token().value)
self.get_token(TokenTypes.EOL)
return " ".join(values)
def _parse_text_content(self, text):
# Parse a text using the TextParser.
# Replace variables
p = PreprocessVariablesParser(self.variables).analyse(
text,
)
text = p.nodes[0].value
# Parse the text
p = TextParser(
footnotes_start_with=len(self.footnote_defs) + 1,
v1_backward_compatibility=self.v1_backward_compatibility,
).analyse(text)
# Text should return a single sentence node
result = p.nodes[0]
# Store the footnotes
self.footnote_defs.extend(p.footnote_defs)
return result
@parser
def _parse_eol(self):
# This simply parses the end of line.
self.get_token(TokenTypes.EOL)
@parser
def _parse_horizontal_rule(self):
# The horizontal rule ---
self.get_token(TokenTypes.LITERAL, "---")
self.get_token(TokenTypes.EOL)
self._save(HorizontalRuleNode())
@parser
def _parse_single_line_comment(self):
# // A comment on a single line
self.get_token(TokenTypes.TEXT, check=lambda x: x.startswith("//"))
self.get_token(TokenTypes.EOL)
@parser
def _parse_multi_line_comment(self):
# ////
# A comment
# on multiple lines
# ////
self.get_token(TokenTypes.LITERAL, "////")
self._collect_lines([Token(TokenTypes.LITERAL, "////"), Token(TokenTypes.EOF)])
self.force_token(TokenTypes.LITERAL, "////")
@parser
def _parse_variable_definition(self):
# This parses a variable definition
#
# Simple variables are defined as :name:value
# as True booleans as just :name:
# and as False booleas as :!name:
#
# Variable names can use a namespace with
# :namespace.name:value
# Get the mandatory variable name
self.get_token(TokenTypes.LITERAL, ":")
variable_name = self.get_token(TokenTypes.TEXT).value
self.get_token(TokenTypes.LITERAL, ":")
# Assume the variable is a flag
variable_value = True
# If the name starts with ! it's a false flag
if variable_name.startswith("!"):
variable_value = False
variable_name = variable_name[1:]
# Get the optional value
value = self.collect_join([Token(TokenTypes.EOL)])
# The value is assigned only if the variable
# is not a negative flag. In that case it is ignored
if variable_value and len(value) > 0:
variable_value = value
# If the variable name contains a dot we
# want to use a namespace
if "." not in variable_name:
self.variables[variable_name] = variable_value
else:
# Let's ignore all others dots
namespace, variable_name = variable_name.split(".", maxsplit=1)
# This defines the namespace if it's not already there
try:
self.variables[namespace][variable_name] = variable_value
except KeyError:
self.variables[namespace] = {variable_name: variable_value}
@parser
def _parse_command(self):
# Parse a command in the form ::command:
self.get_token(TokenTypes.LITERAL, "::")
name = self.get_token(TokenTypes.TEXT).value
self.get_token(TokenTypes.LITERAL, ":")
args = []
kwargs = {}
# Commands can have arguments
with self:
arguments = self.get_token(TokenTypes.TEXT).value
self.argsparser.analyse(arguments)
# Consume the attributes
args, kwargs = self.argsparser.get_arguments_and_reset()
if name == "defblock":
# Block definitions must have at least 2 arguments,
# the alias and the block type.
if len(args) < 2:
self.error(
"Block definitions require at least two unnamed arguments: ALIAS and BLOCKTYPE"
)
block_alias = args.pop(0)
block_type = args.pop(0)
self.block_aliases[block_alias] = block_type
self.block_defaults[block_type] = kwargs
self.block_names[block_type] = args
return None
self._save(CommandNode(name=name, args=args, kwargs=kwargs))
@parser
def _parse_title(self):
# Parse a title in the form
#
# . This is a title
# or
# .This is a title
# Parse the mandatory dot
self.get_token(TokenTypes.LITERAL, ".")
# Parse the optional white spaces
with self:
self.get_token(TokenTypes.WHITESPACE)
# Get the text of the title
text = self.get_token(TokenTypes.TEXT).value
self.get_token(TokenTypes.EOL)
# Titles can contain Mau code
p = TextParser(
footnotes_start_with=len(self.footnote_defs) + 1,
v1_backward_compatibility=self.v1_backward_compatibility,
).analyse(text)
title = p.nodes[0]
self._push_title(title)
@parser
def _parse_attributes(self):
# Parse block attributes in the form
# [unnamed1, unnamed2, ..., named1=value1, name2=value2, ...]
self.get_token(TokenTypes.LITERAL, "[")
attributes = self.get_token(TokenTypes.TEXT).value
self.get_token(TokenTypes.LITERAL, "]")
# Attributes can use variables
p = PreprocessVariablesParser(self.variables).analyse(
attributes,
)
attributes = p.nodes[0].value
# Parse the arguments
self.argsparser.analyse(attributes)
@parser
def _parse_header(self):
# Parse a header in the form
#
# = Header
#
# The number of equal signs is arbitrary
# and represents the level of the header.
# Headers are automatically assigned an anchor
# created using the provided function self.header_anchor
#
# Headers in the form
# =! Header
# are rendered but not included in the TOC
# Get all the equal signs
header = self.get_token(
TokenTypes.LITERAL, check=lambda x: x.startswith("=")
).value
# Get the mandatory white spaces
self.get_token(TokenTypes.WHITESPACE)
# Check if the header has to be in the TOC
in_toc = True
if header.endswith("!"):
header = header[:-1]
in_toc = False
# Get the text of the header and calculate the level
text = self.get_token(TokenTypes.TEXT).value
level = len(header)
# Generate the anchor and append it to the TOC
anchor = self.header_anchor(text, level)
# Consume the attributes
args, kwargs = self.argsparser.get_arguments_and_reset()
# Generate the header node
header_node = HeaderNode(value=text, level=level, anchor=anchor, kwargs=kwargs)
if in_toc:
self.headers.append(header_node)
self._save(header_node)
@parser
def _parse_block(self):
# Parse a block in the form
#
# [block_type]
# ----
# Content
# ----
# Optional secondary content
#
# Blocks are delimited by 4 consecutive identical characters.
# Get the delimiter and check the length
delimiter = self.get_token(TokenTypes.TEXT).value
if len(delimiter) != 4 or len(set(delimiter)) != 1:
raise TokenError
self.get_token(TokenTypes.EOL)
# Collect everything until the next delimiter
content = self._collect_lines(
[Token(TokenTypes.TEXT, delimiter), Token(TokenTypes.EOF)]
)
self.force_token(TokenTypes.TEXT, delimiter)
self.get_token(TokenTypes.EOL)
# Get the optional secondary content
secondary_content = self._collect_lines(
[Token(TokenTypes.EOL), Token(TokenTypes.EOF)]
)
# Consume the title
title = self._pop_title()
# The first unnamed argument is the block type
blocktype = self.argsparser.pop()
# If there is a block alias for blocktype replace it
# otherwise use the blocktype we already have
blocktype = self.block_aliases.get(blocktype, blocktype)
# Assign names
self.argsparser.set_names_and_defaults(
self.block_names.get(blocktype, []), self.block_defaults.get(blocktype, {})
)
# Consume the attributes
args, kwargs = self.argsparser.get_arguments_and_reset()
# Extract classes and convert them into a list
classes = [i for i in kwargs.pop("classes", "").split(",") if len(i) > 0]
# Extract condition if present and process it
condition = kwargs.pop("condition", "")
# Run this only if there is a condition on this block
if len(condition) > 0:
try:
# The condition should be either test:variable:value or test:variable:
test, variable, value = condition.split(":")
except ValueError:
self.error(
f'Condition {condition} is not in the form "test:variable:value" or "test:variable:'
)
# If there is no value use True
if len(value) == 0:
value = True
# Check if the variable matches the value and apply the requested test
match = self.variables.get(variable) == value
result = True if test == "if" else False
# If the condition is not satisfied return
if match is not result:
return
# Extract the preprocessor
preprocessor = kwargs.pop("preprocessor", "none")
# Extract the engine
engine = kwargs.pop("engine", "default")
# Create the node parameters according to the engine
if engine in ["raw", "mau"]:
# Engine "raw" doesn't process the content,
# so we just pass it untouched in the form of
# a TextNode per line. The same is true for "mau"
# as the visitor will have to fire up an new parser
# to process the content.
content = [TextNode(line) for line in content]
secondary_content = [TextNode(line) for line in secondary_content]
elif engine == "source":
# Engine "source" extracts the content (source code),
# the callouts, and the highlights.
# The default language is "text".
content, callouts, highlights = self._parse_source_engine(
content, secondary_content, kwargs
)
secondary_content = []
kwargs["callouts"] = callouts
kwargs["highlights"] = highlights
kwargs["language"] = kwargs.get("language", "text")
elif engine == "default":
# This is the default engine and it parses
# both content and secondary content using a new parser
# but then merges headers and footnotes into the
# current one.
# Parse the primary and secondary content and record footnotes
pc = MainParser(variables=self.variables).analyse("\n".join(content))
ps = MainParser(variables=self.variables).analyse(
"\n".join(secondary_content)
)
content = pc.nodes
secondary_content = ps.nodes
self.footnote_defs.extend(pc.footnote_defs)
self.headers.extend(pc.headers)
else:
raise EngineError(f"Engine {engine} is not available")
self._save(
BlockNode(
blocktype=blocktype,
content=content,
secondary_content=secondary_content,
args=args,
classes=classes,
engine=engine,
preprocessor=preprocessor,
kwargs=kwargs,
title=title,
)
)
def _parse_source_engine(self, content, secondary_content, kwargs):
# Parse a source block in the form
#
# [source, language, attributes...]
# ----
# content
# ----
#
# Source blocks support the following attributes
#
# callouts=":" The separator used by callouts
# highlight="@" The special character to turn on highlight
#
# [source, language, attributes...]
# ----
# content:1:
# ----
#
# [source, language, attributes...]
# ----
# content:@:
# ----
#
# Callout descriptions can be added to the block
# as secondary content with the syntax
#
# [source, language, attributes...]
# ----
# content:name:
# ----
# <name>: <description>
#
# Since Mau uses Pygments, the attribute language
# is one of the langauges supported by that tool.
# Get the delimiter for callouts (":" by default)
delimiter = kwargs.pop("callouts", ":")
# A dictionary that contains callout markers in
# the form {linenum:name}
callout_markers = {}
# Get the marker for highlighted lines ("@" by default)
highlight_marker = kwargs.pop("highlight", "@")
# A list of highlighted lines
highlighted_lines = []
# This is a list of all lines that might contain
# a callout. They will be further processed
# later to be sure.
lines_with_callouts = [
(linenum, line)
for linenum, line in enumerate(content)
if line.endswith(delimiter)
]
# Each line in the previous list is processed
# and stored if it contains a callout
for linenum, line in lines_with_callouts:
# Remove the final delimiter
line = line[:-1]
splits = line.split(delimiter)
if len(splits) < 2:
# It's a trap! There are no separators left
continue
# Get the callout and the line
callout_name = splits[-1]
line = delimiter.join(splits[:-1])
content[linenum] = line
# Check if we want to just highlight the line
if callout_name == highlight_marker:
highlighted_lines.append(linenum)
else:
callout_markers[linenum] = callout_name
# A dictionary that contains the text for each
# marker in the form {name:text}
callout_contents = {}
# If there was secondary content it should be formatted
# with callout names followed by colon and the
# callout text.
for line in secondary_content:
if ":" not in line:
self.error(
f"Callout description should be written as 'name: text'. Missing ':' in '{line}'"
)
name, text = line.split(":")
if name not in callout_markers.values():
self.error(f"Callout {name} has not been created in the source code")
text = text.strip()
callout_contents[name] = text
# Put markers and contents together
callouts = {"markers": callout_markers, "contents": callout_contents}
# Source blocks must preserve the content literally
textlines = [TextNode(line) for line in content]
return textlines, callouts, highlighted_lines
# self._save(
# SourceNode(
# language,
# callouts=callouts,
# highlights=highlighted_lines,
# delimiter=delimiter,
# code=textlines,
# title=title,
# kwargs=kwargs,
# )
# )
@parser
def _parse_content(self):
# Parse attached content in the form
#
# [attributes]
# << content_type:uri
# Get the mandatory "<<" and white spaces
self.get_token(TokenTypes.LITERAL, check=lambda x: x.startswith("<<"))
self.get_token(TokenTypes.WHITESPACE)
# Get the content type and the content URI
content_type_and_uri = self.get_token(TokenTypes.TEXT).value
content_type, uri = content_type_and_uri.split(":", maxsplit=1)
title = self._pop_title()
if content_type == "image":
return self._parse_content_image(uri, title)
return self._parse_standard_content(content_type, uri, title)
def _parse_content_image(self, uri, title):
# Parse a content image in the form
#
# [alt_text, classes]
# << image:uri
#
# alt_text is the alternate text to use is the image is not reachable
# and classes is a comma-separated list of classes
# Assign names and consume the attributes
self.argsparser.set_names_and_defaults(
["alt_text", "classes"], {"alt_text": None, "classes": None}
)
args, kwargs = self.argsparser.get_arguments_and_reset()
alt_text = kwargs.pop("alt_text")
classes = kwargs.pop("classes")
if classes:
classes = classes.split(",")
self._save(
ContentImageNode(
uri=uri,
alt_text=alt_text,
classes=classes,
title=title,
kwargs=kwargs,
)
)
def _parse_standard_content(self, content_type, uri, title):
# This is the fallback for an unknown content type
# Consume the attributes
args, kwargs = self.argsparser.get_arguments_and_reset()
self._save(
ContentNode(
uri=uri,
title=title,
args=args,
kwargs=kwargs,
)
)
@parser
def _parse_list(self):
# Parse a list.
# Lists can be ordered (using numbers)
#
# * One item
# * Another item
#
# or unordered (using bullets)
#
# # Item 1
# # Item 2
#
# The number of headers increases
# the depth of each item
#
# # Item 1
# ## Sub-Item 1.1
#
# Spaces before and after the header are ignored.
# So the previous list can be also written
#
# # Item 1
# ## Sub-Item 1.1
#
# Ordered and unordered lists can be mixed.
#
# * One item
# ## Sub Item 1
# ## Sub Item 2
#
# Ignore initial white spaces
with self:
self.get_token(TokenTypes.WHITESPACE)
# Get the header and decide if it's a numbered or unnumbered list
header = self.peek_token(TokenTypes.LITERAL, check=lambda x: x[0] in "*#")
numbered = True if header.value[0] == "#" else False
# Parse all the following items
nodes = self._parse_list_nodes()
self._save(ListNode(numbered, nodes, main_node=True))
def _parse_list_nodes(self):
# This parses all items of a list
# Ignore initial white spaces
with self:
self.get_token(TokenTypes.WHITESPACE)
# Parse the header and ignore the following white spaces
header = self.get_token(TokenTypes.LITERAL, check=lambda x: x[0] in "*#").value
self.get_token(TokenTypes.WHITESPACE)
# Collect and parse the text of the item
text = self._collect_text_content()
content = self._parse_text_content(text)
# Compute the level of the item
level = len(header)
nodes = []
nodes.append(ListItemNode(level, content))
while not self.peek_token() in [Token(TokenTypes.EOF), Token(TokenTypes.EOL)]:
# This is the SentenceNode inside the last node added to the list
# which is used to append potential nested nodes
last_node_sentence = nodes[-1].content
# Ignore the initial white spaces
with self:
self.get_token(TokenTypes.WHITESPACE)
if len(self.peek_token().value) == level:
# The new item is on the same level
# Get the header
header = self.get_token().value
# Ignore white spaces
self.get_token(TokenTypes.WHITESPACE)
# Collect and parse the text of the item
text = self._collect_text_content()
content = self._parse_text_content(text)
nodes.append(ListItemNode(len(header), content))
elif len(self.peek_token().value) > level:
# The new item is on a deeper level
# Treat the new line as a new list
numbered = True if self.peek_token().value[0] == "#" else False
subnodes = self._parse_list_nodes()
last_node_sentence.content.append(ListNode(numbered, subnodes))
else:
break
return nodes
@parser
def _parse_paragraph(self):
# This parses a paragraph.
# Paragraphs can be written on multiple lines and
# end with an empty line.
# Get all the lines, join them and parse them
lines = self._collect_lines([Token(TokenTypes.EOL), Token(TokenTypes.EOF)])
text = " ".join(lines)
sentence = self._parse_text_content(text)
# Consume the attributes
args, kwargs = self.argsparser.get_arguments_and_reset()
self._save(ParagraphNode(sentence, args=args, kwargs=kwargs))
def _parse_functions(self):
# All the functions that this parser provides.
return [
self._parse_eol,
self._parse_horizontal_rule,
self._parse_single_line_comment,
self._parse_multi_line_comment,
self._parse_variable_definition,
self._parse_command,
self._parse_title,
self._parse_attributes,
self._parse_header,
self._parse_block,
self._parse_content,
self._parse_list,
self._parse_paragraph,
]
def _create_toc(self):
# Create the TOC from the list of headers.
nodes = []
latest_by_level = {}
for header_node in self.headers:
# This is the current node
node = TocEntryNode(header_node)
level = header_node.level
# This collects the latest node added with a given level
latest_by_level[level] = node
try:
# Simplest case, add it to the latest one
# with a level just 1 step lower
latest_by_level[level - 1].children.append(node)
except KeyError:
# Find all the latest ones added with a level lower than this
latest = [latest_by_level.get(i, None) for i in range(1, level)]
# Get the children list of each one, plus nodes for the root
children = [nodes] + [i.children for i in latest if i is not None]
# Get the nearest one and append to that
children[-1].append(node)
return TocNode(entries=nodes)
def parse(self):
super().parse()
self.toc = self._create_toc()
self.footnotes = FootnotesNode(entries=self.footnote_defs)
| 29,630 | 8,051 |
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Network definitions for multiplane image (MPI) prediction networks.
"""
from __future__ import division
import numpy as np
#import tensorflow as tf
import tensorflow.compat.v1 as tf
#from tensorflow.contrib import slim
import tf_slim as slim
def mpi_net(inputs, num_outputs, ngf=64, vscope='net', reuse_weights=False):
"""Network definition for multiplane image (MPI) inference.
Args:
inputs: stack of input images [batch, height, width, input_channels]
num_outputs: number of output channels
ngf: number of features for the first conv layer
vscope: variable scope
reuse_weights: whether to reuse weights (for weight sharing)
Returns:
pred: network output at the same spatial resolution as the inputs.
"""
with tf.variable_scope(vscope, reuse=reuse_weights):
with slim.arg_scope(
[slim.conv2d, slim.conv2d_transpose], normalizer_fn=slim.layer_norm):
cnv1_1 = slim.conv2d(inputs, ngf, [3, 3], scope='conv1_1', stride=1)
cnv1_2 = slim.conv2d(cnv1_1, ngf * 2, [3, 3], scope='conv1_2', stride=2)
cnv2_1 = slim.conv2d(cnv1_2, ngf * 2, [3, 3], scope='conv2_1', stride=1)
cnv2_2 = slim.conv2d(cnv2_1, ngf * 4, [3, 3], scope='conv2_2', stride=2)
cnv3_1 = slim.conv2d(cnv2_2, ngf * 4, [3, 3], scope='conv3_1', stride=1)
cnv3_2 = slim.conv2d(cnv3_1, ngf * 4, [3, 3], scope='conv3_2', stride=1)
cnv3_3 = slim.conv2d(cnv3_2, ngf * 8, [3, 3], scope='conv3_3', stride=2)
cnv4_1 = slim.conv2d(
cnv3_3, ngf * 8, [3, 3], scope='conv4_1', stride=1, rate=2)
cnv4_2 = slim.conv2d(
cnv4_1, ngf * 8, [3, 3], scope='conv4_2', stride=1, rate=2)
cnv4_3 = slim.conv2d(
cnv4_2, ngf * 8, [3, 3], scope='conv4_3', stride=1, rate=2)
# Adding skips
skip = tf.concat([cnv4_3, cnv3_3], axis=3)
cnv6_1 = slim.conv2d_transpose(
skip, ngf * 4, [4, 4], scope='conv6_1', stride=2)
cnv6_2 = slim.conv2d(cnv6_1, ngf * 4, [3, 3], scope='conv6_2', stride=1)
cnv6_3 = slim.conv2d(cnv6_2, ngf * 4, [3, 3], scope='conv6_3', stride=1)
skip = tf.concat([cnv6_3, cnv2_2], axis=3)
cnv7_1 = slim.conv2d_transpose(
skip, ngf * 2, [4, 4], scope='conv7_1', stride=2)
cnv7_2 = slim.conv2d(cnv7_1, ngf * 2, [3, 3], scope='conv7_2', stride=1)
skip = tf.concat([cnv7_2, cnv1_2], axis=3)
cnv8_1 = slim.conv2d_transpose(
skip, ngf, [4, 4], scope='conv8_1', stride=2)
cnv8_2 = slim.conv2d(cnv8_1, ngf, [3, 3], scope='conv8_2', stride=1)
feat = cnv8_2
pred = slim.conv2d(
feat,
num_outputs, [1, 1],
stride=1,
activation_fn=tf.nn.tanh,
normalizer_fn=None,
scope='color_pred')
return pred
| 3,344 | 1,413 |
from pathlib import Path
from unittest import mock
import pytest
from freedesktop_icons import Icon, Theme, lookup, lookup_fallback, theme_search_dirs
@pytest.mark.parametrize(
("env", "expected"),
(
("", [Path.home() / '.icons']),
("/foo:", [Path.home() / '.icons', Path('/foo/icons')]),
),
)
def test_theme_search_dirs(env, expected, monkeypatch):
monkeypatch.setenv('XDG_DATA_DIRS', env)
assert list(theme_search_dirs()) == expected
def _stub_get_theme(get_theme, **kwargs):
get_theme.side_effect = kwargs.get
@mock.patch("freedesktop_icons.get_theme", autospec=True)
def test_lookup(get_theme):
real_theme = mock.create_autospec(Theme, name="real_theme")
real_theme.parents = ['parent', 'hicolor']
_stub_get_theme(get_theme, Adwaita=real_theme)
lookup("org.mozilla.firefox", "Adwaita")
assert get_theme.mock_calls == [mock.call('Adwaita')]
@mock.patch("freedesktop_icons.get_theme", autospec=True)
def test_lookup_icon(get_theme):
real_theme = mock.create_autospec(Theme, name="real_theme")
real_theme.parents = []
_stub_get_theme(get_theme, Adwaita=real_theme)
icon = Icon("org.mozilla.firefox")
lookup(icon, "Adwaita")
assert get_theme.mock_calls == [mock.call('Adwaita')]
@mock.patch("freedesktop_icons.get_theme", autospec=True)
def test_lookup_in_parent(get_theme):
real_theme = mock.create_autospec(Theme, name="real_theme")
real_theme.parents = ['parent']
real_theme.lookup.return_value = None
parent_theme = mock.create_autospec(Theme, name="parent_theme")
_stub_get_theme(get_theme, Adwaita=real_theme, parent=parent_theme)
lookup("org.mozilla.firefox", "Adwaita")
assert get_theme.mock_calls == [mock.call('Adwaita'), mock.call('parent')]
@mock.patch("freedesktop_icons.get_theme", autospec=True)
def test_lookup_in_hicolor(get_theme):
real_theme = mock.create_autospec(Theme, name="real_theme")
real_theme.parents = ['parent']
real_theme.lookup.return_value = None
parent_theme = mock.create_autospec(Theme, name="parent_theme")
parent_theme.lookup.return_value = None
hicolor = mock.create_autospec(Theme, name="hicolor")
hicolor.lookup.return_value = mock.MagicMock()
_stub_get_theme(get_theme, Adwaita=real_theme, parent=parent_theme, hicolor=hicolor)
path = lookup("org.mozilla.firefox", "Adwaita")
assert get_theme.mock_calls == [mock.call('Adwaita'), mock.call('parent'), mock.call('hicolor')]
assert path is hicolor.lookup.return_value
@mock.patch("freedesktop_icons.get_theme", autospec=True)
@mock.patch("freedesktop_icons.lookup_fallback", autospec=True)
def test_lookup_in_fallback(lookup_fallback, get_theme):
real_theme = mock.create_autospec(Theme, name="real_theme")
real_theme.lookup.return_value = None
hicolor = mock.create_autospec(Theme, name="hicolor")
hicolor.lookup.return_value = None
_stub_get_theme(get_theme, Adwaita=real_theme, hicolor=hicolor)
lookup_fallback.return_value = mock.MagicMock()
path = lookup("org.mozilla.firefox", "Adwaita")
assert get_theme.mock_calls == [mock.call('Adwaita'), mock.call('hicolor')]
assert lookup_fallback.mock_calls == [mock.call('org.mozilla.firefox', ['svg', 'png', 'xpm'])]
assert path is lookup_fallback.return_value
@mock.patch("freedesktop_icons.fallback_paths")
def test_lookup_fallback(fallback_paths, tmpdir):
file = tmpdir / 'org.mozilla.firefox.svg'
file.open('w').close()
fallback_paths.return_value = [tmpdir]
assert lookup_fallback("not-there", ['svg']) is None
assert lookup_fallback("org.mozilla.firefox", ['png']) is None
assert lookup_fallback("org.mozilla.firefox", ['svg']) == file
| 3,731 | 1,343 |
# -*- coding: utf-8 -*-
__author__ = "Paul Schifferer <dm@sweetrpg.com>"
"""
"""
import os
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sweetrpg_library_api.application import constants
sentry_sdk.init(dsn=os.environ[constants.SENTRY_DSN],
traces_sample_rate=0.2,
environment=os.environ.get(constants.SENTRY_ENV) or 'Unknown',
integrations=[
FlaskIntegration(), RedisIntegration(),
])
| 569 | 181 |
"""
Compiles stellar model isochrones into an easy-to-access format.
"""
from numpy import *
from scipy.interpolate import LinearNDInterpolator as interpnd
from consts import *
import os,sys,re
import scipy.optimize
#try:
# import pymc as pm
#except:
# print 'isochrones: pymc not loaded! MCMC will not work'
import numpy.random as rand
import atpy
DATAFOLDER = os.environ['ASTROUTIL_DATADIR'] #'/Users/tdm/Dropbox/astroutil/data'
def gr2B(g,r):
return gr2V(g,r) + 1.04*(g-r) + 0.19
def gr2V(g,r):
return r + 0.44*(g-r)-0.02
def BV2r(B,V):
return (1/(1 + 0.56/0.44))*((1/0.44)*(V+0.02) - (1/1.04)*(B-V-0.19))
def BV2g(B,V):
return BV2r(B,V) + (1/1.04)*(B-V) - 0.19/1.04
def fehstr(feh,minfeh=-1.0,maxfeh=0.5):
if feh < minfeh:
return '%.1f' % minfeh
elif feh > maxfeh:
return '%.1f' % maxfeh
elif (feh > -0.05 and feh < 0):
return '0.0'
else:
return '%.1f' % feh
class isochrone(object):
"""Generic isochrone class."""
def __init__(self,age,m_ini,m_act,logL,Teff,logg,mags,fehs=None):
self.minage = age.min()
self.maxage = age.max()
self.minmass = m_act.min()
self.maxmass = m_act.max()
self.bands = []
for band in mags:
self.bands.append(band)
L = 10**logL
#R = sqrt(G*m_act*MSUN/10**logg)/RSUN
if fehs is None:
points = array([m_ini,age]).T
self.is3d = False
else:
points = array([m_ini,age,fehs]).T
self.is3d = True
if self.is3d:
self.feh = lambda m,age,feh: feh
else:
self.feh = lambda m,age: self.isofeh
self.M = interpnd(points,m_act)
self.tri = self.M.tri
#self.R = interpnd(points,R)
self.logL = interpnd(self.tri,logL)
self.logg = interpnd(self.tri,logg)
self.logTe = interpnd(self.tri,log10(Teff))
def Teff_fn(*pts):
return 10**self.logTe(*pts)
#self.Teff = lambda *pts: 10**self.logTe(*pts)
self.Teff = Teff_fn
def R_fn(*pts):
return sqrt(G*self.M(*pts)*MSUN/10**self.logg(*pts))/RSUN
#self.R = lambda *pts: sqrt(G*self.M(*pts)*MSUN/10**self.logg(*pts))/RSUN
self.R = R_fn
self.mag = {}
for band in self.bands:
self.mag[band] = interpnd(self.tri,mags[band])
def __call__(self,*args):
if self.is3d:
if len(args) != 3:
raise ValueError('must call with M, age, and [Fe/H]')
m,age,feh = args
else:
if len(args) != 2:
raise ValueError('must call with M,age')
m,age = args
Ms = self.M(*args)
Rs = self.R(*args)
logLs = self.logL(*args)
loggs = self.logg(*args)
Teffs = self.Teff(*args)
mags = {}
for band in self.bands:
mags[band] = self.mag[band](*args)
return {'age':age,'M':Ms,'feh':self.feh(*args),'R':Rs,'logL':logLs,'logg':loggs,'Teff':Teffs,'mag':mags}
def evtrack(self,m,minage=6.7,maxage=10,dage=0.05):
ages = arange(minage,maxage,dage)
Ms = self.M(m,ages)
Rs = self.R(m,ages)
logLs = self.logL(m,ages)
loggs = self.logg(m,ages)
Teffs = self.Teff(m,ages)
mags = {}
for band in self.bands:
mags[band] = self.mag[band](m,ages)
#return array([ages,Ms,Rs,logLs,loggs,Teffs, #record array?
return {'age':ages,'M':Ms,'R':Rs,'logL':logLs,'Teff':Teffs,'mag':mags}
def isochrone(self,age,minm=0.1,maxm=2,dm=0.02):
ms = arange(minm,maxm,dm)
ages = ones(ms.shape)*age
Ms = self.M(ms,ages)
Rs = self.R(ms,ages)
logLs = self.logL(ms,ages)
loggs = self.logg(ms,ages)
Teffs = self.Teff(ms,ages)
mags = {}
for band in self.bands:
mags[band] = self.mag[band](ms,ages)
return {'M':Ms,'R':Rs,'logL':logLs,'Teff':Teffs,'mag':mags}
class WD(isochrone):
def __init__(self,composition='H'):
if composition not in ['H','He']:
raise ValueError('Unknown composition: %s (must be H or He)' %
composition)
self.composition = composition
filename = '%s/stars/WDs_%s.txt' % (DATAFOLDER,composition)
data = recfromtxt(filename,names=True)
mags = {'bol':data.Mbol,'U':data.U,'B':data.B,'V':data.V,'R':data.R,'I':data.I,
'J':data.J,'H':data.H,'K':data.K,'u':data.u,'g':data.g,'r':data.r,
'i':data.i,'z':data.z,'y':data.y}
logL = -2.5*(data.Mbol-4.77)
gr = mags['g']-mags['r']
mags['kep'] = 0.25*mags['g'] + 0.75*mags['r']
w = where(gr > 0.3)
mags['kep'][w] = 0.3*mags['g'][w] + 0.7*mags['r'][w]
isochrone.__init__(self,log10(data.Age),data.mass,data.mass,logL,
data.Teff,data.logg,mags)
class padova(isochrone):
def __init__(self,feh=0.):
filename = DATAFOLDER + '/stars/padova_%s.dat' % fehstr(feh,-2,0.2)
self.isofeh = feh
#filename = 'data/kepisochrones.dat'
age,m_ini,m_act,logL,logT,logg,mbol,kep,g,r,i,z,dd051,J,H,K = \
loadtxt(filename,usecols=(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15),unpack=True)
mags = {'bol':mbol,'kep':kep,'g':g,'r':r,'i':i,'z':z,'dd051':dd051,'J':J,'H':H,'K':K}
mags['B'] = gr2B(g,r)
mags['V'] = gr2V(g,r)
mags['R'] = r #cheating
mags['I'] = i #cheating
mags['Kepler'] = mags['kep']
isochrone.__init__(self,age,m_ini,m_act,logL,10**logT,logg,mags)
class padova3d(isochrone):
def __init__(self,minm=0.9,maxm=1.1,minage=9,maxage=10,minfeh=-0.2,maxfeh=0.2):
#ages = array([]); m_inis = array([]); m_acts = array([]); logLs = array([]); logTs = array([]); loggs = array([])
#mbols = array([]); keps = array([]); gs = array([]); rs = array([]); iz = array([]); zs = array([])
#dd051s = array([]); Js = array([])
data = None
fehs = array([])
fehlist = arange(-2,0.3,0.1)
fehlist = fehlist[where((fehlist >= minfeh) & (fehlist <= maxfeh+0.001))]
for feh in fehlist:
print 'loading isochrone for [Fe/H] = %.1f...' % feh
filename = DATAFOLDER + '/stars/padova_%.1f.dat' % feh
if data is None:
data = loadtxt(filename)
n = len(data)
else:
newdata = loadtxt(filename)
n = len(newdata)
data = concatenate((data,newdata))
fehs = concatenate((fehs,ones(n)*feh))
inds = where((data[:,2] >= minm) & (data[:,2] <= maxm) & (data[:,0] >= minage) & (data[:,0] <= maxage))[0]
data = data[inds,:]
fehs = fehs[inds]
age,m_ini,m_act,logL,logT,logg,mbol,kep,g,r,i,z,dd051,J,H,K = \
(data[:,0],data[:,1],data[:,2],data[:,3],data[:,4],data[:,5],data[:,6],data[:,7],data[:,8],
data[:,9],data[:,10],data[:,11],data[:,12],data[:,13],data[:,14],data[:,15])
self.minm = m_ini.min()
self.maxm = m_ini.max()
self.minage = age.min()
self.maxage = age.max()
self.minfeh = fehs.min()
self.maxfeh = fehs.max()
mags = {'bol':mbol,'kep':kep,'g':g,'r':r,'i':i,'z':z,'dd051':dd051,'J':J,'H':H,'K':K}
mags['B'] = gr2B(g,r)
mags['V'] = gr2V(g,r)
mags['R'] = r #cheating
mags['I'] = i #cheating
isochrone.__init__(self,age,m_ini,m_act,logL,10**logT,logg,mags,fehs=fehs)
class baraffe(isochrone):
def __init__(self,feh=0):
filename = '%s/stars/baraffe0.0.txt' % DATAFOLDER
data = recfromtxt(filename,names=True)
self.isofeh = feh
mags = {'V':data.Mv,'R':data.Mr,'I':data.Mi,'J':data.Mj,'H':data.Mh,
'K':data.Mk,'L':data.Ml,'M':data.Mm}
#mags['kep'] = (mags['V'] + mags['R'])/2 - 2
mags['g'] = mags['V']
mags['r'] = mags['R']
mags['i'] = mags['I']
mags['z'] = 0.8*mags['I'] + 0.2*mags['J']
mags['kep'] = 0.1*mags['g'] + 0.9*mags['r']
isochrone.__init__(self,log10(data.age*1e9),data.m,data.m,data.logL,
data.Teff,data.g,mags)
class dartmouth(isochrone):
def __init__(self,feh=0,bands=['U','B','V','R','I','J','H','K','g','r','i','z','Kepler']):
filename = '%s/stars/dartmouth_%s.fits' % (DATAFOLDER,fehstr(feh,-1.0,0.5))
t = atpy.Table(filename)
self.isofeh = feh
mags = {}
for band in bands:
try:
mags[band] = t[band]
except:
if band == 'kep' or band == 'Kepler':
mags[band] = t['Kp']
else:
raise
#Fg = 10**(-0.4*mags['g'])
#Fr = 10**(-0.4*mags['r'])
#gr = mags['g']-mags['r']
#mags['kep'] = 0.25*mags['g'] + 0.75*mags['r']
#w = where(gr > 0.3)
#mags['kep'][w] = 0.3*mags['g'][w] + 0.7*mags['r'][w]
#mags['kep'] = (mags['g']+mags['r'])/2 #fix this!
isochrone.__init__(self,log10(t['age']*1e9),t['M'],t['M'],t['logL'],
10**t['logTe'],t['logg'],mags)
def write_all_dartmouth_to_fits(fehs=arange(-1,0.51,0.1)):
for feh in fehs:
try:
print feh
dartmouth_to_fits(feh)
except:
raise
pass
def dartmouth_to_fits(feh):
filename_2mass = '%s/stars/dartmouth_%s_2massKp.iso' % (DATAFOLDER,fehstr(feh,-1.0,0.5))
filename_ugriz = '%s/stars/dartmouth_%s_ugriz.iso' % (DATAFOLDER,fehstr(feh,-1.0,0.5))
data_2mass = recfromtxt(filename_2mass,skiprows=8,names=True)
data_ugriz = recfromtxt(filename_ugriz,skiprows=8,names=True)
n = len(data_2mass)
ages = zeros(n)
curage = 0
i=0
for line in open(filename_2mass):
m = re.match('#',line)
if m:
m = re.match('#AGE=\s*(\d+\.\d+)\s+',line)
if m:
curage=m.group(1)
else:
if re.search('\d',line):
ages[i]=curage
i+=1
t = atpy.Table()
t.add_column('age',ages)
t.add_column('M',data_2mass.MMo)
t.add_column('logTe',data_2mass.LogTeff)
t.add_column('logg',data_2mass.LogG)
t.add_column('logL',data_2mass.LogLLo)
t.add_column('U',data_2mass.U)
t.add_column('B',data_2mass.B)
t.add_column('V',data_2mass.V)
t.add_column('R',data_2mass.R)
t.add_column('I',data_2mass.I)
t.add_column('J',data_2mass.J)
t.add_column('H',data_2mass.H)
t.add_column('K',data_2mass.Ks)
t.add_column('Kp',data_2mass.Kp)
t.add_column('u',data_ugriz.sdss_u)
t.add_column('g',data_ugriz.sdss_g)
t.add_column('r',data_ugriz.sdss_r)
t.add_column('i',data_ugriz.sdss_i)
t.add_column('z',data_ugriz.sdss_z)
t.write('%s/stars/dartmouth_%s.fits' % (DATAFOLDER,fehstr(feh,-1,0.5)),overwrite=True)
def isofit(iso,p0=None,**kwargs):
def chisqfn(pars):
if iso.is3d:
m,age,feh = pars
else:
m,age = pars
tot = 0
for kw in kwargs:
val,err = kwargs[kw]
fn = getattr(iso,kw)
tot += (val-fn(*pars))**2/err**2
return tot
if iso.is3d:
if p0 is None:
p0 = ((iso.minm+iso.maxm)/2,(iso.minage + iso.maxage)/2.,(iso.minfeh + iso.maxfeh)/2.)
else:
if p0 is None:
p0 = (1,9.5)
pfit = scipy.optimize.fmin(chisqfn,p0,disp=False)
print pfit
return iso(*pfit)
def shotgun_isofit(iso,n=100,**kwargs):
simdata = {}
for kw in kwargs:
val,err = kwargs[kw]
simdata[kw] = rand.normal(size=n)*err + val
if iso.is3d:
Ms,ages,fehs = (zeros(n),zeros(n),zeros(n))
else:
Ms,ages = (zeros(n),zeros(n))
for i in arange(n):
simkwargs = {}
for kw in kwargs:
val = simdata[kw][i]
err = kwargs[kw][1]
simkwargs[kw] = (val,err)
fit = isofit(iso,**simkwargs)
Ms[i] = fit['M']
ages[i] = fit['age']
if iso.is3d:
fehs[i] = fit['feh']
if iso.is3d:
res = iso(Ms,ages,fehs)
else:
res = iso(Ms,ages)
return res
def isofitMCMCmodel(iso,**kwargs):
if iso.is3d:
mass = pm.Uniform('mass',lower=iso.minm,upper=iso.maxm)
age = pm.Uniform('age',lower=iso.minage,upper=iso.maxage)
feh = pm.Uniform('feh',lower=iso.minfeh,upper=iso.maxfeh)
ns = {'pm':pm,'mass':mass,'age':age,'feh':feh}
else:
mass = pm.Uniform('mass',lower=0.1,upper=5)
age = pm.Uniform('age',lower=6.7,upper=10.1)
ns = {'pm':pm,'mass':mass,'age':age}
for kw in kwargs:
val,dval = kwargs[kw]
fn = getattr(iso,kw)
ns['fn'] = fn
ns['val'] = val
ns['dval'] = dval
if iso.is3d:
code = "@pm.observed(dtype=float)\ndef %s(value=val,mass=mass,age=age,feh=feh): return max(-1000,-(fn(mass,age,feh) - val)**2/dval**2)" % kw
else:
code = "@pm.observed(dtype=float)\ndef %s(value=val,mass=mass,age=age): return max(-1000,-(fn(mass,age) - val)**2/dval**2)" % kw
exec code in ns
return ns
def isofitMCMC(iso,niter=5e4,nburn=1e4,thin=100,verbose=True,**kwargs):
model = isofitMCMCmodel(iso,**kwargs)
M = pm.MCMC(model)
M.sample(iter=niter,burn=nburn,thin=thin,verbose=verbose)
return M
| 13,674 | 5,789 |
import unittest
from pterradactyl.util import as_list, memoize, merge_dict, lookup
class TestCommonUtil(unittest.TestCase):
def memoize_func(self, *arg, **kwargs):
pass
def test_as_list_string(self):
elem = "3"
r = as_list(elem)
self.assertListEqual(r, list(elem))
def test_as_list_list(self):
elem = ["list_elem"]
r = as_list(elem)
self.assertListEqual(r, elem)
def test_merge_dict(self):
dict1 = {'a': 'b'}
dict2 = {'c': 'd'}
merged_dict = merge_dict(dict1, dict2)
self.assertDictEqual(merged_dict, {'a': 'b', 'c': 'd'})
def test_lookup(self):
data = {'foo': 'bar', 'foo1': 'bar1'}
value = lookup(data, 'foo')
self.assertEqual(value, 'bar')
def test_lookup_with_non_existing_key(self):
data = {'foo': 'bar', 'foo1': 'bar1'}
value = lookup(data, 'foo_1')
self.assertEqual(value, None)
def test_lookup_should_return_default(self):
data = {'foo': 'bar', 'foo1': 'bar1'}
value = lookup(data, 'foo_1', default='bar2')
self.assertEqual(value, 'bar2')
def test_memoize(self):
m = memoize(self.memoize_func(mylist=[1, 2, 3, 4, 5]))
self.assertTrue(m)
| 1,263 | 460 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import os
import logging
from functions.et_helper import findFile,gaze_to_pandas
import functions.et_parse as parse
import functions.et_make_df as make_df
import functions.et_helper as helper
import imp # for edfread reload
import scipy
import scipy.stats
#%% PUPILLABS
def pl_fix_timelag(pl):
#fixes the pupillabs latency lag (which can be super large!!)
t_cam = np.asarray([p['recent_frame_timestamp'] for p in pl['notifications'] if p['subject']=='trigger'])# camera time
t_msg = np.asarray([p['timestamp'] for p in pl['notifications'] if p['subject']=='trigger']) # msg time
#slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(t_msg,t_cam) # predict camera time based on msg time
slope,intercept,low,high = scipy.stats.theilslopes(t_cam,t_msg)
logger = logging.getLogger(__name__)
logger.warning("fixing lag (at t=0) of :%.2fms, slope of %.7f (in a perfect world this is 0ms & 1.0)"%(intercept*1000,slope))
# fill it back in
# gonna do it with a for-loop because other stuff is too voodo or not readable for me
# Use this code (and change t_cam and t_msg above) if you want everything in computer time timestamps
#for ix,m in enumerate(pl['gaze_positions']):
# pl['gaze_positions'][ix]['timestamp'] = pl['gaze_positions'][ix]['timestamp'] * slope + intercept
# for ix2,m2 in enumerate(pl['gaze_positions'][ix]['pupil_positions']):
# pl['gaze_positions'][ix]['pupil_positions']['timestamp'] = pl['gaze_positions'][ix]['pupil_positions']['timestamp'] * slope + intercept
#for ix,m in enumerate(pl['gaze_positions']):
# pl['pupil_positions'][ix]['timestamp'] = pl['pupil_positions'][ix]['timestamp'] * slope + intercept# + 0.045 # the 45ms are the pupillabs defined delay between camera image & timestamp3
# this code is to get notifications into sample time stamp. But for now we
for ix,m in enumerate(pl['notifications']):
pl['notifications'][ix]['timestamp'] = pl['notifications'][ix]['timestamp'] * slope + intercept + 0.045 # the 45ms are the pupillabs defined delay between camera image & timestamp3
return(pl)
def raw_pl_data(subject='',datapath='/net/store/nbp/projects/etcomp/',postfix='raw'):
# Input: subjectname, datapath
# Output: Returns pupillabs dictionary
from lib.pupil.pupil_src.shared_modules import file_methods as pl_file_methods
if subject == '':
filename = datapath
else:
filename = os.path.join(datapath,subject,postfix)
print(os.path.join(filename,'pupil_data'))
# with dict_keys(['notifications', 'pupil_positions', 'gaze_positions'])
# where each value is a list that contains a dictionary
original_pldata = pl_file_methods.load_object(os.path.join(filename,'pupil_data'))
#original_pldata = pl_file_methods.Incremental_Legacy_Pupil_Data_Loader(os.path.join(filename,'pupil_data'))
# 'notification'
# dict_keys(['record', 'subject', 'timestamp', 'label', 'duration'])
# 'pupil_positions'
# dict_keys(['diameter', 'confidence', 'method', 'norm_pos', 'timestamp', 'id', 'topic', 'ellipse'])
# 'gaze_positions'
# dict_keys(['base_data', 'timestamp', 'topic', 'confidence', 'norm_pos'])
# where 'base_data' has a dict within a list
# dict_keys(['diameter', 'confidence', 'method', 'norm_pos', 'timestamp', 'id', 'topic', 'ellipse'])
# where 'normpos' is a list (with horizon. and vert. component)
# Fix the (possible) timelag of pupillabs camera vs. computer time
return original_pldata
def import_pl(subject='', datapath='/net/store/nbp/projects/etcomp/', recalib=True, surfaceMap=True,parsemsg=True,fixTimeLag=True,px2deg=True,pupildetect=None,
pupildetect_options=None):
# Input: subject: (str) name
# datapath: (str) location where data is stored
# surfaceMap:
# Output: Returns 2 dfs (plsamples and plmsgs)
# get a logger
logger = logging.getLogger(__name__)
if pupildetect:
# has to be imported first
import av
import ctypes
ctypes.cdll.LoadLibrary('/net/store/nbp/users/behinger/projects/etcomp/local/build/build_ceres_working/lib/libceres.so.2')
if surfaceMap:
# has to be imported before nbp recalib
try:
import functions.pl_surface as pl_surface
except ImportError:
raise('Custom Error:Could not import pl_surface')
assert(type(subject)==str)
# Get samples df
# (is still a dictionary here)
original_pldata = raw_pl_data(subject=subject, datapath=datapath)
if pupildetect is not None: # can be 2d or 3d
from functions.nbp_pupildetect import nbp_pupildetect
if subject == '':
filename = datapath
else:
filename = os.path.join(datapath,subject,'raw')
pupil_positions_0= nbp_pupildetect(detector_type = pupildetect, eye_id = 0,folder=filename,pupildetect_options=pupildetect_options)
pupil_positions_1= nbp_pupildetect(detector_type = pupildetect, eye_id = 1,folder=filename,pupildetect_options=pupildetect_options)
pupil_positions = pupil_positions_0 + pupil_positions_1
original_pldata['pupil_positions'] = pupil_positions
recalib=True
# recalibrate data
if recalib:
from functions import nbp_recalib
if pupildetect is not None:
original_pldata['gaze_positions'] = nbp_recalib.nbp_recalib(original_pldata,calibration_mode=pupildetect)
original_pldata['gaze_positions'] = nbp_recalib.nbp_recalib(original_pldata)
# Fix timing
# Pupillabs cameras ,have their own timestamps & clock. The msgs are clocked via computertime. Sometimes computertime&cameratime show drift (~40% of cases).
# We fix this here
if fixTimeLag:
original_pldata = pl_fix_timelag(original_pldata)
if surfaceMap:
folder= os.path.join(datapath,subject,'raw')
tracker = pl_surface.map_surface(folder)
gaze_on_srf = pl_surface.surface_map_data(tracker,original_pldata['gaze_positions'])
logger.warning('Original Data Samples: %s on surface: %s',len(original_pldata['gaze_positions']),len(gaze_on_srf))
original_pldata['gaze_positions'] = gaze_on_srf
# use pupilhelper func to make samples df (confidence, gx, gy, smpl_time, diameter)
pldata = gaze_to_pandas(original_pldata['gaze_positions'])
if surfaceMap:
pldata.gx = pldata.gx*(1920 - 2*(75+18))+(75+18) # minus white border of marker & marker
pldata.gy = pldata.gy*(1080- 2*(75+18))+(75+18)
logger.debug('Mapped Surface to ScreenSize 1920 & 1080 (minus markers)')
del tracker
# sort according to smpl_time
pldata.sort_values('smpl_time',inplace=True)
# get the nice samples df
plsamples = make_df.make_samples_df(pldata,px2deg=px2deg) #
if parsemsg:
# Get msgs df
# make a list of gridnotes that contain all notifications of original_pldata if they contain 'label'
gridnotes = [note for note in original_pldata['notifications'] if 'label' in note.keys()]
plmsgs = pd.DataFrame();
for note in gridnotes:
msg = parse.parse_message(note)
if not msg.empty:
plmsgs = plmsgs.append(msg, ignore_index=True)
plmsgs = fix_smallgrid_parser(plmsgs)
else:
plmsgs = original_pldata['notifications']
plevents = pd.DataFrame()
return plsamples, plmsgs,plevents
#%% EYELINK
def raw_el_data(subject, datapath='/net/store/nbp/projects/etcomp/'):
# Input: subjectname, datapath
# Output: Returns pupillabs dictionary
filename = os.path.join(datapath,subject,'raw')
from pyedfread import edf # parses SR research EDF data files into pandas df
elsamples, elevents, elnotes = edf.pread(os.path.join(filename,findFile(filename,'.EDF')[0]), trial_marker=b'')
return (elsamples,elevents,elnotes)
def import_el(subject, datapath='/net/store/nbp/projects/etcomp/'):
# Input: subject: (str) name
# datapath: (str) location where data is stored
# Output: Returns list of 3 el df (elsamples, elmsgs, elevents)
assert(type(subject)==str)
# get a logger
logger = logging.getLogger(__name__)
# Load edf
# load and preprocess data from raw data files
# elsamples: contains individual EL samples
# elevents: contains fixation and saccade definitions
# elnotes: contains notes (meta data) associated with each trial
elsamples,elevents,elnotes = raw_el_data(subject,datapath)
# TODO understand and fix this
count = 0
while np.any(elsamples.time>1e10) and count < 40:
from pyedfread import edf # parses SR research EDF data files into pandas df
imp.reload(edf)
count = count + 1
# logger.error(elsamples.time[elsamples.time>1e10])
logger.error('Attention: Found sampling time above 1*e100. Clearly wrong! Trying again (check again later)')
elsamples, elevents, elnotes = raw_el_data(subject,datapath)
# We also delete Samples with interpolated pupil responses. In one dataset these were ~800samples.
logger.warning('Deleting %.4f%% due to interpolated pupil (online during eyelink recording)'%(100*np.mean(elsamples.errors ==8)))
logger.warning('Deleting %.4f%% due to other errors in the import process'%(100*np.mean((elsamples.errors !=8) & (elsamples.errors!=0))))
elsamples = elsamples.loc[elsamples.errors == 0]
# We had issues with samples with negative time
logger.warning('Deleting %.4f%% samples due to time<=0'%(100*np.mean(elsamples.time<=0)))
elsamples = elsamples.loc[elsamples.time > 0]
# Also at the end of the recording, we had time samples that were smaller than the first sample.
# Note that this assumes the samples are correctly ordered and the last samples actually
# refer to artefacts. If you use %SYNCTIME% this might be problematic (don't know how nwilming's edfread incorporates synctime)
logger.warning('Deleting %.4f%% samples due to time being less than the starting time'%(100*np.mean(elsamples.time <= elsamples.time[0])))
elsamples = elsamples.loc[elsamples.time > elsamples.time[0]]
elsamples = elsamples.reset_index()
# Convert to same units
# change to seconds to be the same as pupil
elsamples['smpl_time'] = elsamples['time'] / 1000
elnotes['msg_time'] = elnotes['trialid_time'] / 1000
elnotes = elnotes.drop('trialid_time',axis=1)
elevents['start'] = elevents['start'] / 1000
elevents['end'] = elevents['end'] / 1000
# TODO solve this!
if np.any(elsamples.smpl_time>1e10):
logger.error(elsamples.smpl_time[elsamples.smpl_time>1e10])
logger.error('Error, even after reloading the data once, found sampling time above 1*e100. This is clearly wrong. Investigate')
raise Exception('Error, even after reloading the data once, found sampling time above 1*e100. This is clearly wrong. Investigate')
# for horizontal gaze component
# Idea: Logical indexing
ix_left = elsamples.gx_left != -32768
ix_right = elsamples.gx_right != -32768
# take the pupil area pa of the recorded eye
# set pa to NaN instead of 0 or -32768
elsamples.loc[elsamples['pa_right'] < 1e-20,'pa_right'] = np.nan
elsamples.loc[~ix_right,'pa_right'] = np.nan
elsamples.loc[elsamples['pa_left'] < 1e-20,'pa_left'] = np.nan
elsamples.loc[~ix_left,'pa_left'] = np.nan
# add pa column that takes the value that is not NaN
ix_left = ~np.isnan(elsamples.pa_left)
ix_right = ~np.isnan(elsamples.pa_right)
# init with nan
elsamples['pa'] = np.nan
elsamples.loc[ix_left, 'pa'] = elsamples.pa_left[ix_left]
elsamples.loc[ix_right,'pa'] = elsamples.pa_right[ix_right]
# Determine which eye was recorded
ix_left = elsamples.gx_left != -32768
ix_right = elsamples.gx_right != -32768
if (np.mean(ix_left | ix_right)<0.99):
raise NameError('In more than 1 % neither left or right data')
# for horizontal gaze component
elsamples.loc[ix_left,'gx'] = elsamples.gx_left[ix_left]
elsamples.loc[ix_right,'gx'] = elsamples.gx_right[ix_right]
# for horizontal gaze velocity component
elsamples.loc[ix_left,'gx_vel'] = elsamples.gxvel_left[ix_left]
elsamples.loc[ix_right,'gx_vel'] = elsamples.gxvel_right[ix_right]
# for vertical gaze component
ix_left = elsamples.gy_left != -32768
ix_right = elsamples.gy_right != -32768
elsamples.loc[ix_left,'gy'] = elsamples.gy_left[ix_left]
elsamples.loc[ix_right,'gy'] = elsamples.gy_right[ix_right]
# for vertical gaze velocity component
elsamples.loc[ix_left,'gy_vel'] = elsamples.gyvel_left[ix_left]
elsamples.loc[ix_right,'gy_vel'] = elsamples.gyvel_right[ix_right]
# Make (0,0) the point bottom left
elsamples['gy'] = 1080 - elsamples['gy']
# "select" relevant columns
elsamples = make_df.make_samples_df(elsamples)
# Parse EL msg
elmsgs = elnotes.apply(parse.parse_message,axis=1)
elmsgs = elmsgs.drop(elmsgs.index[elmsgs.isnull().all(1)])
elmsgs = fix_smallgrid_parser(elmsgs)
return elsamples, elmsgs, elevents
def fix_smallgrid_parser(etmsgs):
# This fixes the missing separation between smallgrid before and small grid after. During experimental sending both were named identical.
replaceGrid = pd.Series([k for l in [13*['SMALLGRID_BEFORE'],13*['SMALLGRID_AFTER']]*6 for k in l])
ix = etmsgs.query('grid_size==13').index
if len(ix) is not 156:
raise RuntimeError('we need to have 156 small grid msgs')
replaceGrid.index = ix
etmsgs.loc[ix,'condition'] = replaceGrid
# this here fixes that all buttonpresses and stop messages etc. were send as GRID and not SMALLGG
for blockid in etmsgs.block.dropna().unique():
if blockid == 0:
continue
tmp = etmsgs.query('block==@blockid')
t_before_start = tmp.query('condition=="DILATION"& exp_event=="stop"').msg_time.values
t_before_end = tmp.query('condition=="SHAKE" & exp_event=="stop"').msg_time.values
t_after_start = tmp.query('condition=="SHAKE" & exp_event=="stop"').msg_time.values
t_after_end =tmp.iloc[-1].msg_time
ix = tmp.query('condition=="GRID"&msg_time>@t_before_start & msg_time<=@t_before_end').index
etmsgs.loc[ix,'condition'] = 'SMALLGRID_BEFORE'
ix = tmp.query('condition=="GRID"&msg_time>@t_after_start & msg_time<=@t_after_end').index
etmsgs.loc[ix,'condition'] = 'SMALLGRID_AFTER'
return(etmsgs)
| 15,223 | 5,227 |
import music21
KEY_TO_SEMITONE = {'c': 0, 'c#': 1, 'db': 1, 'd': 2, 'd#': 3, 'eb': 3, 'e': 4,
'f': 5, 'f#': 6, 'gb': 6, 'g': 7, 'g#': 8, 'ab': 8, 'a': 9,
'a#': 10, 'bb': 10, 'b': 11, 'x': None}
def parse_note(note):
n = KEY_TO_SEMITONE[note[:-1].lower()]
octave = int(note[-1]) + 1
return octave * 12 + n - 21
translate5 = {
46: 0,
48: 1,
50: 2,
51: 3,
53: 4,
55: 5,
56: 6,
58: 7,
}
def load_test5(times=1):
sc = music21.converter.parse('test5.musicxml')
rh = [translate5[parse_note(str(n.pitch).lower())] for n in sc.parts[0].flat.getElementsByClass('Note')]
pieces = []
for _ in range(times):
pieces.append(rh)
return pieces
# print(load_test5()) | 768 | 377 |
from dsa.lib.math.tests.fixture import MathTestCase
class DsTestCase(MathTestCase):
pass
class ParenthesesTestCase(DsTestCase):
pass
| 145 | 52 |
# import typing
#
# import boto3
# import typer
#
# import pacu.data as p
#
#
# if typing.TYPE_CHECKING:
# from mypy_boto3_iam import type_defs as t
# from mypy_boto3_iam.client import IAMClient
# from mypy_boto3_iam.paginator import ListRolesPaginator
#
#
# def fetch(profile_name: typing.Optional[str] = typer.Option(default=None)):
# sess = boto3.session.Session(profile_name=profile_name)
# iam: IAMClient = sess.client('iam')
# paginator: ListRolesPaginator = iam.get_paginator('list_roles')
# page_iterator: typing.Iterator[t.ListRolesResponseTypeDef] = paginator.paginate()
# for page in page_iterator:
# p.db["roles"].insert_all(page['Roles'], pk="RoleName")
#
#
# if __name__ == '__main__':
# typer.run(fetch)
| 763 | 287 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
import http.cookiejar
import urllib.request
# your v2ex cookie value for key [auth] after login
# refer README.md if cannot find cookie [auth]
V2EX_COOKIE = ''
V2EX_DOMAIN = r'v2ex.com'
V2EX_URL_START = r'https://' + V2EX_DOMAIN
V2EX_MISSION = V2EX_URL_START + r'/mission/daily'
V2EX_COIN_URL = r'/mission/daily/redeem?once='
def get_once_url(data):
p = '/mission/daily/redeem\?once=\d+'
m = re.search(p, data.decode())
if m:
return m.group()
else:
return None
def make_cookie(name, value):
return http.cookiejar.Cookie(
version=0,
name=name,
value=value,
port=None,
port_specified=False,
domain=V2EX_DOMAIN,
domain_specified=True,
domain_initial_dot=False,
path='/',
path_specified=True,
secure=False,
expires=None,
discard=False,
comment=None,
comment_url=None,
rest=None
)
if __name__ == '__main__':
cj = http.cookiejar.CookieJar()
cj.set_cookie(make_cookie('auth', V2EX_COOKIE))
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
opener.addheaders = [
('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:20.0) Gecko/20100101 Firefox/20.0'),
('Referer', V2EX_MISSION)
]
opener.open(V2EX_URL_START).read()
data = opener.open(V2EX_MISSION).read()
once = get_once_url(data)
if not once:
print('"once" not found, maybe you already got coins')
sys.exit(-1)
v2ex_coin_url = V2EX_URL_START + once
print(v2ex_coin_url)
opener.open(v2ex_coin_url).read()
| 1,698 | 661 |
# -*- coding: utf-8 -*-
import re
import shlex
import os
import inspect
__version__ = '1.1.0'
try:
FileNotFoundError
except NameError: # Python 2
FileNotFoundError = IOError
ENV = '.env'
def read_env(path=None, environ=None, recurse=True):
"""Reads a .env file into ``environ`` (which defaults to ``os.environ``).
If .env is not found in the directory from which this function is called, recurse
up the directory tree until a .env file is found.
"""
environ = environ if environ is not None else os.environ
# By default, start search from the same file this function is called
if path is None:
frame = inspect.currentframe().f_back
caller_dir = os.path.dirname(frame.f_code.co_filename)
path = os.path.join(os.path.abspath(caller_dir), ENV)
if recurse:
current = path
pardir = os.path.abspath(os.path.join(current, os.pardir))
while current != pardir:
target = os.path.join(current, ENV)
if os.path.exists(target):
path = os.path.abspath(target)
break
else:
current = os.path.abspath(os.path.join(current, os.pardir))
pardir = os.path.abspath(os.path.join(current, os.pardir))
if not path:
raise FileNotFoundError('Could not find a .env file')
with open(path, 'r') as fp:
content = fp.read()
parsed = parse_env(content)
for key, value in parsed.items():
environ.setdefault(key, value)
_ITEM_RE = re.compile(r'[A-Za-z_][A-Za-z_0-9]*')
# From Honcho. See NOTICE file for license details.
def parse_env(content):
"""Parse the content of a .env file (a line-delimited KEY=value format) into a
dictionary mapping keys to values.
"""
values = {}
for line in content.splitlines():
lexer = shlex.shlex(line, posix=True)
tokens = list(lexer)
# parses the assignment statement
if len(tokens) < 3:
continue
name, op = tokens[:2]
value = ''.join(tokens[2:])
if op != '=':
continue
if not _ITEM_RE.match(name):
continue
value = value.replace(r'\n', '\n')
value = value.replace(r'\t', '\t')
values[name] = value
return values
| 2,309 | 739 |
import asyncio
import sys
from types import TracebackType
from typing import Any, AsyncContextManager, List, Optional, Sequence, Tuple, Type
from trio import MultiError
from p2p.asyncio_utils import create_task
class AsyncContextGroup:
def __init__(self, context_managers: Sequence[AsyncContextManager[Any]]) -> None:
self.cms = tuple(context_managers)
self.cms_to_exit: Sequence[AsyncContextManager[Any]] = tuple()
async def __aenter__(self) -> Tuple[Any, ...]:
futures = [create_task(cm.__aenter__(), f'AsyncContextGroup/{repr(cm)}') for cm in self.cms]
await asyncio.wait(futures)
# Exclude futures not successfully entered from the list so that we don't attempt to exit
# them.
self.cms_to_exit = tuple(
cm for cm, future in zip(self.cms, futures)
if not future.cancelled() and not future.exception())
try:
return tuple(future.result() for future in futures)
except: # noqa: E722
await self._exit(*sys.exc_info())
raise
async def _exit(self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
if not self.cms_to_exit:
return
# don't use gather() to ensure that we wait for all __aexit__s
# to complete even if one of them raises
done, _pending = await asyncio.wait(
[cm.__aexit__(exc_type, exc_value, traceback) for cm in self.cms_to_exit])
# This is to ensure we re-raise any exceptions our coroutines raise when exiting.
errors: List[Tuple[Type[BaseException], BaseException, TracebackType]] = []
for d in done:
try:
d.result()
except BaseException:
errors.append(sys.exc_info())
if errors:
raise MultiError(
tuple(exc_value.with_traceback(exc_tb) for _, exc_value, exc_tb in errors))
async def __aexit__(self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
# Since exits are running in parallel, they can't see each
# other exceptions, so send exception info from `async with`
# body to all.
await self._exit(exc_type, exc_value, traceback)
| 2,535 | 709 |
from setuptools import setup, find_packages
VERSION = '0.2'
setup(
name='trybox-django',
version=VERSION,
description='TryBox:Django',
author='Sophilabs',
author_email='contact@sophilabs.com',
url='https://github.com/sophilabs/trybox-django',
download_url='http://github.com/sophilabs/trybox-django/tarball/trybox-django-v{0}#egg=trybox-django'.format(VERSION),
license='MIT',
install_requires=['django', 'trybox'],
dependency_links=['https://github.com/sophilabs/trybox/tarball/master#egg=trybox'],
packages=find_packages(),
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
) | 895 | 286 |
from bflib.items import gems
class GemTypeRow(object):
__slots__ = ["min_percent", "max_percent", "gem_type"]
def __init__(self, min_percent, max_percent, gem_type):
self.min_percent = min_percent
self.max_percent = max_percent
self.gem_type = gem_type
class GemTypeTable(object):
rows = [
GemTypeRow(1, 10, gems.Greenstone),
GemTypeRow(11, 20, gems.Malachite),
GemTypeRow(21, 28, gems.Aventurine),
GemTypeRow(29, 38, gems.Phenalope),
GemTypeRow(39, 45, gems.Amethyst),
GemTypeRow(46, 54, gems.Fluorospar),
GemTypeRow(55, 60, gems.Garnet),
GemTypeRow(61, 65, gems.Alexandrite),
GemTypeRow(66, 70, gems.Topaz),
GemTypeRow(71, 75, gems.Bloodstone),
GemTypeRow(76, 79, gems.Sapphire),
GemTypeRow(80, 89, gems.Diamond),
GemTypeRow(90, 94, gems.FireOpal),
GemTypeRow(95, 97, gems.Ruby),
GemTypeRow(98, 100, gems.Emerald),
]
@classmethod
def get(cls, roll_value):
return next((row for row in cls.rows
if row.min_percent <= roll_value <= row.max_percent))
| 1,152 | 467 |
from Simulacion import Optimizacion
from Simulacion import Graficos
from Simulacion import Genetico
from Simulacion import Model_close
from mylib import mylib | 158 | 43 |
# This need to be sorted out in a smarter way
class InitializationError(Exception):
def __init__(self, SomeClass, description):
self.value = SomeClass
self.description = description.format(SomeClass.__name__)
def __str__(self):
return self.description
class ReservedValueError(Exception):
def __init__(self, expected, received, description):
self.value= received
self.expected = expected
self.description = description.format(expected, received)
def __str__(self):
return self.description
class ApplicationError(Exception):
pass
class NonFatalError(ApplicationError):
pass
class FatalError(Exception):
pass
class UserError(NonFatalError):
pass | 760 | 197 |
#
# Copyright (c) 2017 Cossack Labs Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from base64 import b64encode, b64decode
from pythemis import smessage
_, COMMAND, SENDER_PRIVATE_KEY, RECIPIENT_PUBLIC_KEY, MESSAGE = range(5)
if len(sys.argv) != 5:
print('Usage: <command: enc | dec | sign | verify > <send_private_key> <recipient_public_key> <message>')
exit(1)
command = sys.argv[COMMAND]
private_key_path = sys.argv[SENDER_PRIVATE_KEY]
public_key_path = sys.argv[RECIPIENT_PUBLIC_KEY]
message = sys.argv[MESSAGE]
with open(private_key_path, 'rb') as f:
private_key = f.read()
with open(public_key_path, 'rb') as f:
public_key = f.read()
message_encrypter = smessage.SMessage(private_key, public_key)
if command == 'enc':
encrypted = message_encrypter.wrap(message.encode('utf-8'))
encoded = b64encode(encrypted)
print(encoded.decode('ascii'))
elif command == 'dec':
decoded = b64decode(message.encode('utf-8'))
decrypted = message_encrypter.unwrap(decoded)
print(decrypted.decode('utf-8'))
elif command == 'sign':
encrypted = smessage.ssign(private_key, message.encode('utf-8'))
encoded = b64encode(encrypted)
print(encoded.decode('ascii'))
elif command == 'verify':
decoded = b64decode(message.encode('utf-8'))
decrypted = smessage.sverify(public_key, decoded)
print(decrypted.decode('utf-8'))
else:
print('Wrong command, use <enc | dev | sign | verify>')
exit(1)
| 1,964 | 683 |
'''Tests the rpc middleware and utilities. It uses the calculator example.'''
import unittest
from pulsar.apps import rpc
from pulsar.apps.http import HttpWsgiClient
class rpcTest(unittest.TestCase):
def proxy(self):
from examples.calculator.manage import Site
http = HttpWsgiClient(Site())
return rpc.JsonProxy('http://127.0.0.1:8060/', http=http, timeout=20)
def test_proxy(self):
p = self.proxy()
http = p.http
self.assertTrue(len(http.headers))
self.assertEqual(http.headers['user-agent'], 'Pulsar-Http-Wsgi-Client')
self.assertTrue(http.wsgi_callable)
self.assertEqual(p._version, '2.0')
async def test_addition(self):
p = self.proxy()
response = await p.calc.add(4, 5)
self.assertEqual(response, 9)
| 819 | 278 |
# -*- coding: utf-8 -*-
"""EN3-BT MCD
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1cnvSgNDexJ0cqrTWGygI_smZ0y8EIWZn
"""
import torch
import numpy as np
import tqdm
import copy
from torch.nn import functional as F
from torch.nn.modules.module import Module
from sklearn.calibration import calibration_curve, CalibratedClassifierCV
from torch.nn.modules.activation import MultiheadAttention
from torch.nn.modules.container import ModuleList
from torch.nn.init import xavier_uniform_
from torch.nn.modules.dropout import Dropout
from torch.nn.modules.linear import Linear
from torch.nn.modules.normalization import LayerNorm
from torch.utils.data import DataLoader, Dataset
import logging
logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')
logging.getLogger().setLevel(logging.INFO)
from utils import *
import math
from torch.autograd import Variable
import re
import pandas as pd
torch.manual_seed(2)
def set_dropout_to_train(m):
if type(m) == torch.nn.Dropout:
m.train()
class Embedder(Module):
def __init__(self, vocab_size, d_model):
super().__init__()
self.d_model = d_model
print(vocab_size, d_model)
self.embed = torch.nn.Embedding(vocab_size + 1, d_model)
def forward(self, x):
x = self.embed(x)
return x
class PositionalEncoder(Module):
def __init__(self, d_model, max_seq_len = 768, dropout = 0.5):
super().__init__()
self.d_model = d_model
self.dropout = Dropout(dropout)
# create constant 'pe' matrix with values dependant on
# pos and i
pe = torch.zeros(max_seq_len, d_model)
for pos in range(max_seq_len):
for i in range(0, d_model, 2):
pe[pos, i] = \
math.sin(pos / (10000 ** ((2 * i)/d_model)))
pe[pos, i + 1] = \
math.cos(pos / (10000 ** ((2 * (i + 1))/d_model)))
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
# make embeddings relatively larger
x = x * math.sqrt(self.d_model)
#add constant to embedding
seq_len = x.size(1)
pe = Variable(self.pe[:,:seq_len], requires_grad=False)
if x.is_cuda:
pe.cuda()
x = x + pe
return self.dropout(x)
def get_clones(module, N):
return torch.nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output
class MultiHeadAttention(Module):
def __init__(self, heads, d_model, dropout = 0.5):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = torch.nn.Linear(d_model, d_model)
self.v_linear = torch.nn.Linear(d_model, d_model)
self.k_linear = torch.nn.Linear(d_model, d_model)
self.dropout = Dropout(dropout)
self.out = torch.nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask=None):
bs = q.size(0)
# perform linear operation and split into h heads
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
# transpose to get dimensions bs * h * sl * d_model
k = k.transpose(1,2)
q = q.transpose(1,2)
v = v.transpose(1,2)
# calculate attention using function we will define next
scores = attention(q, k, v, self.d_k, mask, self.dropout)
# concatenate heads and put through final linear layer
concat = scores.transpose(1,2).contiguous().view(bs, -1, self.d_model)
output = self.out(concat)
return output
class Norm(Module):
def __init__(self, d_model, eps = 1e-6):
super().__init__()
self.size = d_model
# create two learnable parameters to calibrate normalisation
self.alpha = torch.nn.Parameter(torch.ones(self.size))
self.bias = torch.nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) \
/ (x.std(dim=-1, keepdim=True) + self.eps) + self.bias
return norm
class FeedForward(Module):
def __init__(self, d_model, d_ff=2048, dropout = 0.3):
super().__init__()
# We set d_ff as a default to 2048
self.linear_1 = torch.nn.Linear(d_model, d_ff)
self.dropout = Dropout(dropout)
self.linear_2 = torch.nn.Linear(d_ff, d_model)
def forward(self, x):
x = self.dropout(F.relu(self.linear_1(x)))
x = self.linear_2(x)
return x
class EncoderLayer(Module):
def __init__(self, d_model, heads, dropout = 0.3):
super().__init__()
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.attn = MultiHeadAttention(heads, d_model)
self.ff = FeedForward(d_model)
self.dropout_1 = Dropout(dropout)
self.dropout_2 = Dropout(dropout)
def forward(self, x, mask = None):
x2 = self.norm_1(x)
x = x + self.dropout_1(self.attn(x2,x2,x2,mask = None))
x2 = self.norm_2(x)
x = x + self.dropout_2(self.ff(x2))
return x
class Encoder(Module):
def __init__(self, vocab_size = 1000, d_model = 32, N = 1, heads= 1):
super().__init__()
self.N = N
self.embed = Embedder(vocab_size, d_model)
self.pe = PositionalEncoder(d_model)
self.layers = get_clones(EncoderLayer(d_model, heads), N)
self.norm = Norm(d_model)
self.output_layer = torch.nn.Linear(d_model**2, 1)
self.output_activation = torch.nn.Sigmoid()
def forward(self, src):
bdim = src.shape[0]
x = self.embed(src)
#tlen = int(math.sqrt(src.shape[1]))
#x = src.reshape(int(src.shape[0]), tlen, tlen)
x = self.pe(x)
for i in range(self.N):
x = self.layers[i](x)
x = self.norm(x)
outputs = torch.autograd.Variable(torch.zeros(bdim), requires_grad = False)
for j in range(bdim):
outputs[j] = self.output_layer(x[j,:,:].flatten())
s = self.output_activation(outputs)
return s
class Dataset_single(Dataset):
def __init__(self, features, targets = None, transform=None):
self.features = features
if not targets is None:
self.targets = np.array(targets)
else:
self.targets = None
def __len__(self):
return self.features.shape[0]
def __getitem__(self, index):
instance = torch.tensor(self.features[index],
dtype=torch.long,
device='cpu')
if self.targets is not None:
target = torch.as_tensor(self.targets.reshape(-1, 1)[index],
device='cpu')
else:
target = -1
return instance, target
# This is were we train our model
class BAN:
def __init__ (self, num_epochs = 200, vocab_size = 100000, stopping_crit = 5, learning_rate = 0.001, tokenizer_num_words = 100000, max_padding = 256,N=1,heads = 1, batch_size = 64):
#self.learning_rate = 0.001
self.d_model = max_padding
self.N = N
self.attention_heads = heads
self.max_padding = max_padding
self.image_folder = None
self.learning_rate = learning_rate
self.classes_ = [0,1]
self.validation_index = 0
self.batch_size = batch_size
self.threshold_perf_tuples = []
self.num_epochs = num_epochs
self.probability_threshold = None
self.vocab_size = vocab_size
self.stopping_crit = stopping_crit
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logging.info("Using {}".format(self.device))
self.tokenizer = Tokenizer(num_words=tokenizer_num_words)
def pad_sequence(self, list_of_texts):
# https://towardsdatascience.com/text-classification-in-keras-part-2-how-to-use-the-keras-tokenizer-word-representations-fd571674df23
# pad_seq = get_input_attentions(list_of_texts, max_size = self.max_padding)
# logging.info(pad_seq.shape)
self.tokenizer.fit_on_texts(list_of_texts)
sequences = self.tokenizer.texts_to_sequences(list_of_texts)
pad_seq = pad_sequences(sequences, maxlen=self.max_padding)
return pad_seq
def encode_input_text_integer(self, list_of_texts, mapping = None):
## somewhat adhoc -> can be improved -> TODO: Byte pair.
unique_words = set()
for text in list_of_texts:
[unique_words.add(x) for x in text.strip().split()]
unique_words = list(unique_words)
if mapping is None:
mapping = {}
for index, word in enumerate(unique_words):
mapping[word] = index+1
encoded_texts = []
for text in list_of_texts:
encoded_sentence = [mapping[x] for x in text.strip().split()] + [0]*self.max_padding
encoded_texts.append(np.array(encoded_sentence[0:self.max_padding]))
return encoded_texts, mapping
def predict_proba(self, input_text_sequences, T=10, output_mean_probabilities = True):
pad_seq = input_text_sequences #pad_sequences(input_text_sequences, maxlen=self.max_padding)
pad_seq = np.array(pad_seq)
val_dataset = Dataset_single(pad_seq)
val_dataset = DataLoader(val_dataset, batch_size = self.batch_size, shuffle = False)
outputs = []
p = []
w= []
for i, (features, labels) in tqdm.tqdm(enumerate(val_dataset), total = len(val_dataset)):
features = features.to(self.device)
collection_of_preds = []
self.model.eval() ## Immutable during predictions.
self.model.apply(set_dropout_to_train) ## Unlock Dropout layers.
for _ in range(T):
prediction = self.model(features).detach().cpu().numpy()
collection_of_preds.append(prediction)
p = np.matrix(collection_of_preds).T
w.append(p)
w = np.concatenate(w, axis = 0)
assert w.shape[0] == pad_seq.shape[0]
assert w.shape[1] == T
MC_pred = w.reshape(pad_seq.shape[0], T)
pred = pd.DataFrame(MC_pred)
MC_pred_positive = pred.mean(axis=1).values
MC_pred_negative = 1 - MC_pred_positive
MC_pred = np.vstack((MC_pred_negative, MC_pred_positive)).T
self.model.train()
return MC_pred.astype(np.float64)
def predict(self, input_text_sequences, T=100, output_mean_probabilities = True):
pad_seq = input_text_sequences #pad_sequences(input_text_sequences, maxlen=self.max_padding)
pad_seq = np.array(pad_seq)
val_dataset = Dataset_single(pad_seq)
val_dataset = DataLoader(val_dataset, batch_size = self.batch_size, shuffle = False)
outputs = []
p = []
w= []
for i, (features, labels) in tqdm.tqdm(enumerate(val_dataset), total = len(val_dataset)):
features = features.to(self.device)
collection_of_preds = []
self.model.eval() ## Immutable during predictions.
self.model.apply(set_dropout_to_train) ## Unlock Dropout layers.
for _ in range(T):
prediction = self.model(features).detach().cpu().numpy()
collection_of_preds.append(prediction)
p = np.matrix(collection_of_preds).T
w.append(p)
w = np.concatenate(w, axis = 0)
assert w.shape[0] == pad_seq.shape[0]
assert w.shape[1] == T
if output_mean_probabilities:
MC_pred = w.reshape(pad_seq.shape[0], T)
pred = pd.DataFrame(MC_pred)
MC_pred = pred.mean(axis=1).values
else:
MC_pred = w.reshape(pad_seq.shape[0], T)
self.model.train()
return MC_pred
def ece_score(self, probab_pred, real_y, mbin = 3, threshold = 0.5):
all_vals = len(real_y)
bin_perf = []
current_bin = 0
predictions = probab_pred.copy()
predictions[predictions >= threshold] = 1
predictions[predictions < threshold] = 0
reals_internal = []
predictions_internal = []
## compute bins (last one is extended with the remainder)
intercept_bins = [x for x in range(1,all_vals) if x % mbin == 0]
remainder = all_vals % mbin
if len(intercept_bins) == 0:
intercept_bins = [all_vals]
intercept_bins[-1] += remainder
intercept_index = 0
for j in range(all_vals):
if j == intercept_bins[intercept_index] and j > 0:
if intercept_index < len(intercept_bins)-1:
intercept_index += 1
current_bin += 1
equals = np.where(np.array(reals_internal) == np.array(predictions_internal))
acc_bin = len(equals)/len(predictions_internal)
conf_bin = np.mean(np.array(predictions_internal))
bin_perf.append([current_bin, acc_bin, conf_bin,len(reals_internal)])
reals_internal = [real_y[j]]
predictions_internal = [predictions[j]]
else:
reals_internal.append(real_y[j])
predictions_internal.append(predictions[j])
ece_score_final = 0
for bins in bin_perf:
bin_size = bins[3]
total = len(probab_pred)
partial = (bin_size/total) * np.abs(bins[1] - bins[2])
ece_score_final += partial
return ece_score_final
def fit(self, input_text_sequences, targets, val_percentage = 0.2, adaptive_threshold = True, validation_metric = "precision"):
"""
The main fit method. Given an ordered set of documents, this train the architecture
along with intermediary, validation set-based calibration. The validation
percentage is specified with
:param input_text_sequences: inputs
:param targets: target vector
:val_percentage: percentage used for stopping + calibration assessment
"""
## generate stratified split for validation
already_traversed = set()
total_val = int(val_percentage * len(targets))
validation_indices = []
training_indices = []
trigger = False
vnum = int(input_text_sequences.shape[0]*val_percentage)
input_text_sequences = input_text_sequences[vnum:]
targets = targets[vnum:]
self.validation_index = vnum
## get val data
val_sequences = input_text_sequences[:vnum]
val_targets = targets[:vnum]
train_dataset = Dataset_single(input_text_sequences, targets)
train_dataset = DataLoader(train_dataset, batch_size = self.batch_size, shuffle = True)
val_dataset = Dataset_single(val_sequences, val_targets)
val_dataset = DataLoader(val_dataset, batch_size = 1, shuffle = False)
self.validation_loader = val_dataset ## this is used for temperature-based calibration
self.loss = torch.nn.BCELoss()
self.model = Encoder(vocab_size = self.vocab_size, d_model = self.d_model, N = self.N, heads= self.attention_heads)
self.model.train()
self.optimizer = torch.optim.Adamax(self.model.parameters(), lr=self.learning_rate)
self.num_params = sum(p.numel() for p in self.model.parameters())
logging.info("Number of parameters {}".format(self.num_params))
# for param_tensor in self.model.state_dict():
# logging.info(" ".join(str(x) for x in [param_tensor, "\t", self.model.state_dict()[param_tensor].size()]))
current_loss = 0
loss = 1
stopping_iteration = 0
amax = 0
stopping = 0
top_state_dict = None
g_amax = 0
for epoch in range(self.num_epochs):
if stopping >= self.stopping_crit:
logging.info("Stopping ..")
break
# here we put all the losses
losses_per_batch = []
self.model.train()
for i, (features, labels) in tqdm.tqdm(enumerate(train_dataset), total = len(train_dataset)):
# defining the input
features = features.to(self.device)
labels = labels.to(self.device)
self.model.to(self.device)
outputs = self.model(features)
## if unable to predict predict random.
loss = self.loss(outputs, labels.view(-1).cpu().float())
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
losses_per_batch.append(float(loss))
means_pred = self.predict(val_sequences, T = 30)
assert len(means_pred) == len(val_targets)
val_acc = 0
if adaptive_threshold:
for threshold in np.arange(0.1,0.9,0.0005):
## Copy not to overwrite
means = means_pred.copy()
means[means >= threshold] = 1
means[means < threshold] = 0
if validation_metric == "accuracy":
acc = metrics.accuracy_score(val_targets, means)
else:
acc = metrics.precision_score(val_targets, means) * metrics.accuracy_score(val_targets, means)
if val_acc < acc:
val_acc = acc
if acc > amax:
amax = acc
self.probability_threshold = threshold
self.threshold_perf_tuples.append([amax, threshold])
top_state_dict = self.model.state_dict()
logging.info("New top Score: {}, thr: {}".format(amax, threshold))
else:
threshold = 0.5
means = means_pred.copy()
means[means >= threshold] = 1
means[means < threshold] = 0
acc = metrics.accuracy_score(val_targets, means)
if val_acc < acc:
val_acc = acc
if acc > amax:
amax = acc
self.probability_threshold = threshold
self.threshold_perf_tuples.append([amax, threshold])
top_state_dict = self.model.state_dict()
logging.info("New top Acc: {}, thr: {}".format(amax, threshold))
if amax > val_acc:
stopping += 1
mean_loss = np.mean(losses_per_batch)
logging.info("epoch {}, mean loss per batch {}, threshold: {}, MaxScore: {}".format(epoch, mean_loss, np.round(self.probability_threshold,2), amax))
## revert to the top-performing parameter setting.
self.model.load_state_dict(top_state_dict)
fop, mpv = calibration_curve(val_targets, means_pred, n_bins=10)
plt.plot([0, 1], [0, 1], linestyle='--', color = "black")
plt.plot(mpv, fop, marker='.', color = "red")
plt.xlabel("Mean prediction value")
plt.ylabel("Fraction of positives")
plt.savefig(self.image_folder+"/training_cal_{}_{}_visualization.pdf".format(self.num_epochs, adaptive_threshold), dpi = 300)
plt.clf()
# Return model
return self.model
if __name__ == "__main__":
# Read data from file
import pandas as pd
from sklearn.utils import shuffle
from sklearn import metrics
import numpy as np
from sklearn.model_selection import StratifiedKFold
import os
from sklearn.datasets import fetch_20newsgroups
newsgroups_train = fetch_20newsgroups(subset='train') ## some random data
X = newsgroups_train['data']
Y = newsgroups_train['target']
print(len(X))
skf = StratifiedKFold(n_splits=5)
final_scores = []
heads = 2
max_padding = 200
learning_rate = 0.01
num_epochs = 300
num_layers = 2
for train_index, test_index in skf.split(X, Y):
nnet = BAN(heads = heads, max_padding = max_padding, learning_rate = learning_rate, num_epochs = num_epochs, batch_size = 8, N = num_layers, stopping_crit = 20)
total_padded = nnet.pad_sequence(X)
x_train = total_padded[train_index]
x_test = total_padded[test_index]
y_train = Y[train_index]
y_test = Y[test_index]
nnet.fit(x_train, y_train, adaptive_threshold = False, val_percentage = 0.1)
predictions = nnet.predict(x_test)
score = metrics.f1_score(predictions, y_test)
final_scores.append(score)
mean_per = np.mean(final_scores)
std_per = np.std(final_scores)
print("Final performance (F1): {mean_per} +- {std_per}")
| 22,042 | 7,080 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'jriver.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_jriverDspDialog(object):
def setupUi(self, jriverDspDialog):
jriverDspDialog.setObjectName("jriverDspDialog")
jriverDspDialog.resize(1703, 858)
self.dialogLayout = QtWidgets.QGridLayout(jriverDspDialog)
self.dialogLayout.setObjectName("dialogLayout")
self.mainLayout = QtWidgets.QVBoxLayout()
self.mainLayout.setObjectName("mainLayout")
self.configLayout = QtWidgets.QGridLayout()
self.configLayout.setObjectName("configLayout")
self.configRow1Layout = QtWidgets.QHBoxLayout()
self.configRow1Layout.setObjectName("configRow1Layout")
self.newConfigButton = QtWidgets.QToolButton(jriverDspDialog)
self.newConfigButton.setObjectName("newConfigButton")
self.configRow1Layout.addWidget(self.newConfigButton)
self.findFilenameButton = QtWidgets.QToolButton(jriverDspDialog)
self.findFilenameButton.setObjectName("findFilenameButton")
self.configRow1Layout.addWidget(self.findFilenameButton)
self.loadZoneButton = QtWidgets.QToolButton(jriverDspDialog)
self.loadZoneButton.setObjectName("loadZoneButton")
self.configRow1Layout.addWidget(self.loadZoneButton)
self.filename = QtWidgets.QLineEdit(jriverDspDialog)
self.filename.setReadOnly(True)
self.filename.setObjectName("filename")
self.configRow1Layout.addWidget(self.filename)
self.saveButton = QtWidgets.QToolButton(jriverDspDialog)
self.saveButton.setEnabled(False)
self.saveButton.setObjectName("saveButton")
self.configRow1Layout.addWidget(self.saveButton)
self.saveAsButton = QtWidgets.QToolButton(jriverDspDialog)
self.saveAsButton.setEnabled(False)
self.saveAsButton.setObjectName("saveAsButton")
self.configRow1Layout.addWidget(self.saveAsButton)
self.uploadButton = QtWidgets.QToolButton(jriverDspDialog)
self.uploadButton.setEnabled(False)
self.uploadButton.setObjectName("uploadButton")
self.configRow1Layout.addWidget(self.uploadButton)
self.configLayout.addLayout(self.configRow1Layout, 0, 0, 1, 1)
self.configRow2Layout = QtWidgets.QHBoxLayout()
self.configRow2Layout.setObjectName("configRow2Layout")
self.backButton = QtWidgets.QToolButton(jriverDspDialog)
self.backButton.setEnabled(False)
self.backButton.setObjectName("backButton")
self.configRow2Layout.addWidget(self.backButton)
self.forwardButton = QtWidgets.QToolButton(jriverDspDialog)
self.forwardButton.setEnabled(False)
self.forwardButton.setObjectName("forwardButton")
self.configRow2Layout.addWidget(self.forwardButton)
self.outputFormat = QtWidgets.QLineEdit(jriverDspDialog)
self.outputFormat.setReadOnly(True)
self.outputFormat.setObjectName("outputFormat")
self.configRow2Layout.addWidget(self.outputFormat)
self.blockSelector = QtWidgets.QComboBox(jriverDspDialog)
self.blockSelector.setObjectName("blockSelector")
self.configRow2Layout.addWidget(self.blockSelector)
self.configRow2Layout.setStretch(2, 1)
self.configRow2Layout.setStretch(3, 1)
self.configLayout.addLayout(self.configRow2Layout, 1, 0, 1, 1)
self.mainLayout.addLayout(self.configLayout)
self.selectorLayout = QtWidgets.QGridLayout()
self.selectorLayout.setObjectName("selectorLayout")
self.moveButtonsLayout = QtWidgets.QVBoxLayout()
self.moveButtonsLayout.setObjectName("moveButtonsLayout")
self.moveTopButton = QtWidgets.QToolButton(jriverDspDialog)
self.moveTopButton.setEnabled(False)
self.moveTopButton.setObjectName("moveTopButton")
self.moveButtonsLayout.addWidget(self.moveTopButton)
self.moveUpButton = QtWidgets.QToolButton(jriverDspDialog)
self.moveUpButton.setEnabled(False)
self.moveUpButton.setObjectName("moveUpButton")
self.moveButtonsLayout.addWidget(self.moveUpButton)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.moveButtonsLayout.addItem(spacerItem)
self.moveDownButton = QtWidgets.QToolButton(jriverDspDialog)
self.moveDownButton.setEnabled(False)
self.moveDownButton.setObjectName("moveDownButton")
self.moveButtonsLayout.addWidget(self.moveDownButton)
self.moveBottomButton = QtWidgets.QToolButton(jriverDspDialog)
self.moveBottomButton.setEnabled(False)
self.moveBottomButton.setObjectName("moveBottomButton")
self.moveButtonsLayout.addWidget(self.moveBottomButton)
self.selectorLayout.addLayout(self.moveButtonsLayout, 0, 0, 1, 1)
self.filterButtonsLayout = QtWidgets.QVBoxLayout()
self.filterButtonsLayout.setObjectName("filterButtonsLayout")
self.addFilterButton = QtWidgets.QToolButton(jriverDspDialog)
self.addFilterButton.setEnabled(False)
self.addFilterButton.setPopupMode(QtWidgets.QToolButton.InstantPopup)
self.addFilterButton.setObjectName("addFilterButton")
self.filterButtonsLayout.addWidget(self.addFilterButton)
self.editFilterButton = QtWidgets.QToolButton(jriverDspDialog)
self.editFilterButton.setEnabled(False)
self.editFilterButton.setObjectName("editFilterButton")
self.filterButtonsLayout.addWidget(self.editFilterButton)
self.deleteFilterButton = QtWidgets.QToolButton(jriverDspDialog)
self.deleteFilterButton.setEnabled(False)
self.deleteFilterButton.setObjectName("deleteFilterButton")
self.filterButtonsLayout.addWidget(self.deleteFilterButton)
self.clearFiltersButton = QtWidgets.QToolButton(jriverDspDialog)
self.clearFiltersButton.setEnabled(False)
self.clearFiltersButton.setObjectName("clearFiltersButton")
self.filterButtonsLayout.addWidget(self.clearFiltersButton)
self.splitFilterButton = QtWidgets.QToolButton(jriverDspDialog)
self.splitFilterButton.setEnabled(False)
self.splitFilterButton.setObjectName("splitFilterButton")
self.filterButtonsLayout.addWidget(self.splitFilterButton)
self.mergeFilterButton = QtWidgets.QToolButton(jriverDspDialog)
self.mergeFilterButton.setEnabled(False)
self.mergeFilterButton.setObjectName("mergeFilterButton")
self.filterButtonsLayout.addWidget(self.mergeFilterButton)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.filterButtonsLayout.addItem(spacerItem1)
self.selectorLayout.addLayout(self.filterButtonsLayout, 0, 2, 1, 1)
self.channelList = QtWidgets.QListWidget(jriverDspDialog)
self.channelList.setProperty("showDropIndicator", False)
self.channelList.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
self.channelList.setObjectName("channelList")
self.selectorLayout.addWidget(self.channelList, 1, 0, 1, 3)
self.filterList = QtWidgets.QListWidget(jriverDspDialog)
self.filterList.setProperty("showDropIndicator", False)
self.filterList.setDragDropMode(QtWidgets.QAbstractItemView.NoDragDrop)
self.filterList.setDefaultDropAction(QtCore.Qt.IgnoreAction)
self.filterList.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.filterList.setObjectName("filterList")
self.selectorLayout.addWidget(self.filterList, 0, 1, 1, 1)
self.selectorLayout.setRowStretch(0, 1)
self.mainLayout.addLayout(self.selectorLayout)
self.dialogLayout.addLayout(self.mainLayout, 0, 0, 3, 1)
self.pipelineControlLayout = QtWidgets.QHBoxLayout()
self.pipelineControlLayout.setObjectName("pipelineControlLayout")
self.showDotButton = QtWidgets.QToolButton(jriverDspDialog)
self.showDotButton.setEnabled(False)
self.showDotButton.setObjectName("showDotButton")
self.pipelineControlLayout.addWidget(self.showDotButton)
self.direction = QtWidgets.QCheckBox(jriverDspDialog)
self.direction.setEnabled(False)
self.direction.setObjectName("direction")
self.pipelineControlLayout.addWidget(self.direction)
self.dialogLayout.addLayout(self.pipelineControlLayout, 0, 1, 1, 1)
self.viewSplitter = QtWidgets.QSplitter(jriverDspDialog)
self.viewSplitter.setLineWidth(1)
self.viewSplitter.setOrientation(QtCore.Qt.Vertical)
self.viewSplitter.setObjectName("viewSplitter")
self.pipelineView = SvgView(self.viewSplitter)
self.pipelineView.setObjectName("pipelineView")
self.chartWrapper = QtWidgets.QWidget(self.viewSplitter)
self.chartWrapper.setObjectName("chartWrapper")
self.chartLayout = QtWidgets.QHBoxLayout(self.chartWrapper)
self.chartLayout.setContentsMargins(0, 0, 0, 0)
self.chartLayout.setObjectName("chartLayout")
self.previewChart = MplWidget(self.chartWrapper)
self.previewChart.setObjectName("previewChart")
self.chartLayout.addWidget(self.previewChart)
self.chartControlLayout = QtWidgets.QVBoxLayout()
self.chartControlLayout.setObjectName("chartControlLayout")
self.limitsButton = QtWidgets.QToolButton(self.chartWrapper)
self.limitsButton.setObjectName("limitsButton")
self.chartControlLayout.addWidget(self.limitsButton)
self.fullRangeButton = QtWidgets.QToolButton(self.chartWrapper)
self.fullRangeButton.setObjectName("fullRangeButton")
self.chartControlLayout.addWidget(self.fullRangeButton)
self.subOnlyButton = QtWidgets.QToolButton(self.chartWrapper)
self.subOnlyButton.setObjectName("subOnlyButton")
self.chartControlLayout.addWidget(self.subOnlyButton)
self.showPhase = QtWidgets.QToolButton(self.chartWrapper)
self.showPhase.setCheckable(True)
self.showPhase.setObjectName("showPhase")
self.chartControlLayout.addWidget(self.showPhase)
self.showImpulseButton = QtWidgets.QToolButton(self.chartWrapper)
self.showImpulseButton.setObjectName("showImpulseButton")
self.chartControlLayout.addWidget(self.showImpulseButton)
spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.chartControlLayout.addItem(spacerItem2)
self.chartLayout.addLayout(self.chartControlLayout)
self.chartLayout.setStretch(0, 1)
self.dialogLayout.addWidget(self.viewSplitter, 1, 1, 1, 1)
self.buttonBox = QtWidgets.QDialogButtonBox(jriverDspDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Close|QtWidgets.QDialogButtonBox.Reset)
self.buttonBox.setCenterButtons(False)
self.buttonBox.setObjectName("buttonBox")
self.dialogLayout.addWidget(self.buttonBox, 3, 0, 1, 2)
self.dialogLayout.setColumnStretch(0, 1)
self.dialogLayout.setColumnStretch(1, 3)
self.retranslateUi(jriverDspDialog)
self.buttonBox.accepted.connect(jriverDspDialog.accept)
self.buttonBox.rejected.connect(jriverDspDialog.reject)
self.findFilenameButton.clicked.connect(jriverDspDialog.find_dsp_file)
self.showPhase.toggled['bool'].connect(jriverDspDialog.show_phase_response)
self.subOnlyButton.clicked.connect(jriverDspDialog.show_sub_only)
self.fullRangeButton.clicked.connect(jriverDspDialog.show_full_range)
self.limitsButton.clicked.connect(jriverDspDialog.show_limits)
self.blockSelector.currentTextChanged['QString'].connect(jriverDspDialog.show_filters)
self.channelList.itemSelectionChanged.connect(jriverDspDialog.redraw)
self.saveButton.clicked.connect(jriverDspDialog.save_dsp)
self.saveAsButton.clicked.connect(jriverDspDialog.save_as_dsp)
self.deleteFilterButton.clicked.connect(jriverDspDialog.delete_filter)
self.clearFiltersButton.clicked.connect(jriverDspDialog.clear_filters)
self.filterList.itemSelectionChanged.connect(jriverDspDialog.on_filter_select)
self.filterList.itemDoubleClicked['QListWidgetItem*'].connect(jriverDspDialog.edit_filter)
self.editFilterButton.clicked.connect(jriverDspDialog.edit_selected_filter)
self.splitFilterButton.clicked.connect(jriverDspDialog.split_filter)
self.mergeFilterButton.clicked.connect(jriverDspDialog.merge_filters)
self.moveBottomButton.clicked.connect(jriverDspDialog.move_filter_to_bottom)
self.moveDownButton.clicked.connect(jriverDspDialog.move_filter_down)
self.moveUpButton.clicked.connect(jriverDspDialog.move_filter_up)
self.moveTopButton.clicked.connect(jriverDspDialog.move_filter_to_top)
self.newConfigButton.clicked.connect(jriverDspDialog.create_new_config)
self.showImpulseButton.clicked.connect(jriverDspDialog.show_impulse)
QtCore.QMetaObject.connectSlotsByName(jriverDspDialog)
jriverDspDialog.setTabOrder(self.findFilenameButton, self.limitsButton)
jriverDspDialog.setTabOrder(self.limitsButton, self.fullRangeButton)
jriverDspDialog.setTabOrder(self.fullRangeButton, self.subOnlyButton)
jriverDspDialog.setTabOrder(self.subOnlyButton, self.showPhase)
jriverDspDialog.setTabOrder(self.showPhase, self.showDotButton)
jriverDspDialog.setTabOrder(self.showDotButton, self.direction)
jriverDspDialog.setTabOrder(self.direction, self.pipelineView)
jriverDspDialog.setTabOrder(self.pipelineView, self.previewChart)
jriverDspDialog.setTabOrder(self.previewChart, self.filterList)
jriverDspDialog.setTabOrder(self.filterList, self.channelList)
def retranslateUi(self, jriverDspDialog):
_translate = QtCore.QCoreApplication.translate
jriverDspDialog.setWindowTitle(_translate("jriverDspDialog", "JRiver Media Center DSP Editor"))
self.newConfigButton.setText(_translate("jriverDspDialog", "..."))
self.findFilenameButton.setText(_translate("jriverDspDialog", "..."))
self.loadZoneButton.setText(_translate("jriverDspDialog", "..."))
self.saveButton.setText(_translate("jriverDspDialog", "..."))
self.saveAsButton.setText(_translate("jriverDspDialog", "..."))
self.uploadButton.setText(_translate("jriverDspDialog", "..."))
self.backButton.setText(_translate("jriverDspDialog", "..."))
self.forwardButton.setText(_translate("jriverDspDialog", "..."))
self.moveTopButton.setText(_translate("jriverDspDialog", "..."))
self.moveTopButton.setShortcut(_translate("jriverDspDialog", "Shift+Up"))
self.moveUpButton.setText(_translate("jriverDspDialog", "..."))
self.moveUpButton.setShortcut(_translate("jriverDspDialog", "Ctrl+Up"))
self.moveDownButton.setText(_translate("jriverDspDialog", "..."))
self.moveDownButton.setShortcut(_translate("jriverDspDialog", "Ctrl+Down"))
self.moveBottomButton.setText(_translate("jriverDspDialog", "..."))
self.moveBottomButton.setShortcut(_translate("jriverDspDialog", "Shift+Down"))
self.addFilterButton.setText(_translate("jriverDspDialog", "..."))
self.addFilterButton.setShortcut(_translate("jriverDspDialog", "="))
self.editFilterButton.setText(_translate("jriverDspDialog", "..."))
self.editFilterButton.setShortcut(_translate("jriverDspDialog", "E"))
self.deleteFilterButton.setText(_translate("jriverDspDialog", "..."))
self.deleteFilterButton.setShortcut(_translate("jriverDspDialog", "-"))
self.clearFiltersButton.setText(_translate("jriverDspDialog", "..."))
self.clearFiltersButton.setShortcut(_translate("jriverDspDialog", "X"))
self.splitFilterButton.setText(_translate("jriverDspDialog", "..."))
self.mergeFilterButton.setText(_translate("jriverDspDialog", "..."))
self.showDotButton.setText(_translate("jriverDspDialog", "..."))
self.direction.setText(_translate("jriverDspDialog", "Vertical?"))
self.limitsButton.setText(_translate("jriverDspDialog", "..."))
self.fullRangeButton.setText(_translate("jriverDspDialog", "..."))
self.subOnlyButton.setText(_translate("jriverDspDialog", "..."))
self.showPhase.setText(_translate("jriverDspDialog", "..."))
self.showImpulseButton.setText(_translate("jriverDspDialog", "..."))
from mpl import MplWidget
from svg import SvgView
| 16,974 | 5,109 |
from src.sensors.door_block_sensor import DoorBlockSensor
from src.sensors.door_state_sensor import DoorStateSensor
from src.sensors.light_sensor import LightSensor
from src.sensors.movement_sensor import MovementSensor
from src.sensors.smoke_sensor import SmokeSensor
from src.sensors.weight_sensor import WeightSensor
| 320 | 104 |
# ----------------------------------------------------------------------------
# Title: Scientific Visualisation - Python & Matplotlib
# Author: Nicolas P. Rougier
# License: BSD
# ----------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
fig = plt.figure(figsize=(7, 2))
ax = plt.subplot()
X = np.linspace(-np.pi, np.pi, 256, endpoint=True)
C, S = np.cos(X), np.sin(X)
(line1,) = ax.plot(X, C, marker="o", markevery=[-1], markeredgecolor="white")
(line2,) = ax.plot(X, S, marker="o", markevery=[-1], markeredgecolor="white")
def update(frame):
line1.set_data(X[:frame], C[:frame])
line2.set_data(X[:frame], S[:frame])
plt.tight_layout()
ani = animation.FuncAnimation(fig, update, interval=10)
plt.savefig("../../figures/animation/sine-cosine.pdf")
plt.show()
| 888 | 298 |
import yaml
import socket
import subprocess, ctypes, os, sys
from subprocess import Popen, DEVNULL
def read_yaml(file_path):
with open(file_path, "r") as f:
return yaml.safe_load(f)
def check_admin():
""" Force to start application with admin rights """
try:
isAdmin = ctypes.windll.shell32.IsUserAnAdmin()
except AttributeError:
isAdmin = False
if not isAdmin:
ctypes.windll.shell32.ShellExecuteW(None, "runas", sys.executable, __file__, None, 1)
def check_for_firewall_rule(firewall_rule_name):
""" Check for existing rule in Windows Firewall """
print("Checking to see if firewall rule exists")
x = subprocess.call(
f"netsh advfirewall firewall show rule {firewall_rule_name}",
shell=True,
stdout=DEVNULL,
stderr=DEVNULL
)
if x == 0:
print(F"Rule exists.")
return True
else:
print(F"Rule does not exist.")
return False
def add_or_modify_rule(firewall_rule_name, state, firewall_exists, ip):
""" Add Rule if the rule doesn't already exist. Delete the rule if the rule exists. """
if firewall_exists and state == 1:
delete_rule(firewall_rule_name)
add_rule(firewall_rule_name, ip)
if firewall_exists and state == 0:
delete_rule(firewall_rule_name)
if not firewall_exists and state == 1:
add_rule(firewall_rule_name, ip)
if not firewall_exists and state == 0:
print("Firewall rule does not exist, and `block chat` is set to disabled")
def delete_rule(firewall_rule_name):
subprocess.call(
f"netsh advfirewall firewall delete rule name={firewall_rule_name}",
shell=True,
stdout=DEVNULL,
stderr=DEVNULL
)
print(f"Rule '{firewall_rule_name}' deleted")
def add_rule(firewall_rule_name, ip):
""" Add rule to Windows Firewall """
subprocess.call(
f"netsh advfirewall firewall add rule name={firewall_rule_name} dir=out action=block remoteip={ip} protocol=TCP",
shell=True,
stdout=DEVNULL,
stderr=DEVNULL
)
print(f"Current League of Legends Chat IP Address: {ip}. \nRule {firewall_rule_name} added. ")
if __name__ == '__main__':
config = read_yaml(".\config.yaml")
state = config['config']['block_chat']
firewall_rule_name = config['config']['firewall_rule_name']
lol_config_file = config['config']['dir']
region = config['config']['region']
lol_config = read_yaml(lol_config_file)
host = lol_config['region_data'][region]['servers']['chat']['chat_host']
ip = socket.gethostbyname(host)
check_admin()
firewall_exists = check_for_firewall_rule(firewall_rule_name)
add_or_modify_rule(firewall_rule_name, state, firewall_exists, ip) | 2,773 | 892 |
import __init__
from Kite.database import Database
from Kite import config
from Kite import utils
import jieba
import pkuseg
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
class Tokenization(object):
def __init__(self, import_module="jieba", user_dict=None, chn_stop_words_dir=None):
#self.database = Database().conn[config.DATABASE_NAME] #.get_collection(config.COLLECTION_NAME_CNSTOCK)
self.database = Database()
self.import_module = import_module
self.user_dict = user_dict
if self.user_dict:
self.update_user_dict(self.user_dict)
if chn_stop_words_dir:
self.stop_words_list = utils.get_chn_stop_words(chn_stop_words_dir)
else:
self.stop_words_list = list()
def update_user_dict(self, old_user_dict_dir, new_user_dict_dir=None):
# 将缺失的(或新的)股票名称、金融新词等,添加进金融词典中
word_list = []
with open(old_user_dict_dir, "r", encoding="utf-8") as file:
for row in file:
word_list.append(row.split("\n")[0])
name_code_df = self.database.get_data(config.STOCK_DATABASE_NAME,
config.COLLECTION_NAME_STOCK_BASIC_INFO,
keys=["name", "code"])
new_words_list = list(set(name_code_df["name"].tolist()))
for word in new_words_list:
if word not in word_list:
word_list.append(word)
new_user_dict_dir = old_user_dict_dir if not new_user_dict_dir else new_user_dict_dir
with open(new_user_dict_dir, "w", encoding="utf-8") as file:
for word in word_list:
file.write(word + "\n")
def cut_words(self, text):
outstr = list()
sentence_seged = None
if self.import_module == "jieba":
if self.user_dict:
jieba.load_userdict(self.user_dict)
sentence_seged = list(jieba.cut(text))
elif self.import_module == "pkuseg":
seg = pkuseg.pkuseg(user_dict=self.user_dict) # 添加自定义词典
sentence_seged = seg.cut(text) # 进行分词
if sentence_seged:
for word in sentence_seged:
if word not in self.stop_words_list \
and word != "\t" \
and word != " " \
and utils.is_contain_chn(word)\
and len(word) > 1:
outstr.append(word)
return outstr
else:
return False
def find_relevant_stock_codes_in_article(self, article, stock_name_code_dict):
stock_codes_set = list()
cut_words_list = self.cut_words(article)
if cut_words_list:
for word in cut_words_list:
try:
stock_codes_set.append(stock_name_code_dict[word])
except Exception:
pass
return list(set(stock_codes_set))
def update_news_database_rows(self,
database_name,
collection_name,
incremental_column_name="RelatedStockCodes"):
name_code_df = self.database.get_data(config.STOCK_DATABASE_NAME,
config.COLLECTION_NAME_STOCK_BASIC_INFO,
keys=["name", "code"])
name_code_dict = dict(name_code_df.values)
data = self.database.get_collection(database_name, collection_name).find()
for row in data:
# if row["Date"] > "2019-05-20 00:00:00":
# 在新增数据中,并不存在更新列,但是旧数据中已存在更新列,因此需要
# 判断数据结构中是否包含该incremental_column_name字段
if incremental_column_name not in row.keys():
related_stock_codes_list = self.find_relevant_stock_codes_in_article(
row["Article"], name_code_dict)
self.database.update_row(database_name,
collection_name,
{"_id": row["_id"]},
{incremental_column_name: " ".join(related_stock_codes_list)}
)
logging.info("[{} -> {} -> {}] updated {} key value ... "
.format(database_name, collection_name, row["Date"], incremental_column_name))
else:
logging.info("[{} -> {} -> {}] has already existed {} key value ... "
.format(database_name, collection_name, row["Date"], incremental_column_name))
if __name__ == "__main__":
tokenization = Tokenization(import_module="jieba",
user_dict="financedict.txt",
chn_stop_words_dir="chnstopwords.txt")
# documents_list = \
# [
# "中央、地方支持政策频出,煤炭行业站上了风口 券商研报浩如烟海,投资线索眼花缭乱,\
# 第一财经推出《一财研选》产品,挖掘研报精华,每期梳理5条投资线索,便于您短时间内获\
# 取有价值的信息。专业团队每周日至每周四晚8点准时“上新”,助您投资顺利!",
# "郭文仓到重点工程项目督导检查 2月2日,公司党委书记、董事长、总经理郭文仓,公司董事,\
# 股份公司副总经理、总工程师、郭毅民,股份公司副总经理张国富、柴高贵及相关单位负责人到\
# 焦化厂煤场全封闭和干熄焦等重点工程项目建设工地督导检查施工进度和安全工作情况。"
# ]
# for text in documents_list:
# cut_words_list = tokenization.cut_words(text)
# print(cut_words_list)
# tokenization.update_news_database_rows(config.DATABASE_NAME, "jrj")
| 5,623 | 2,056 |
from app.docs import SAMPLE_OBJECT_IDS
ID_DUPLICATION_CHECK_GET = {
'tags': ['회원가입'],
'description': '이메일이 이미 가입되었는지를 체크(중복체크)합니다.',
'parameters': [
{
'name': 'email',
'description': '중복을 체크할 이메일',
'in': 'path',
'type': 'str',
'required': True
}
],
'responses': {
'200': {
'description': '중복되지 않음',
},
'409': {
'description': '중복됨'
}
}
}
SIGNUP_POST = {
'tags': ['회원가입'],
'description': '회원가입합니다.',
'parameters': [
{
'name': 'email',
'description': '이메일',
'in': 'json',
'type': 'str',
'required': True
},
{
'name': 'pw',
'description': '비밀번호',
'in': 'json',
'type': 'str',
'required': True
}
],
'responses': {
'201': {
'description': '회원가입 성공, 인증 이메일 발송 완료. 기본 정보 초기화 액티비티로 이동하면 됩니다. 인증 이메일의 유효 시간은 5분입니다.',
},
'409': {
'description': '이메일 중복됨'
}
}
}
EMAIL_RESEND_GET = {
'tags': ['회원가입'],
'description': '인증 메일을 재전송합니다.',
'parameters': [
{
'name': 'email',
'description': '인증 메일을 재전송할 이메일',
'in': 'path',
'type': 'str',
'required': True
}
],
'responses': {
'200': {
'description': '이메일 재전송 성공',
},
'204': {
'description': '가입되지 않은 이메일'
}
}
}
INITIALIZE_INFO_POST = {
'tags': ['회원가입'],
'description': '기본 정보를 업로드합니다.',
'parameters': [
{
'name': 'email',
'description': '기본 정보 업로드 대상 이메일',
'in': 'path',
'type': 'str',
'required': True
},
{
'name': 'nickname',
'description': '닉네임',
'in': 'json',
'type': 'str',
'required': True
},
{
'name': 'categories',
'description': '관심사 ID 목록 ex) ["{}"], ["{}"], ["{}"]'.format(*SAMPLE_OBJECT_IDS),
'in': 'json',
'type': 'list',
'required': True
}
],
'responses': {
'201': {
'description': '업로드 성공',
},
'204': {
'description': '가입되지 않은 이메일'
},
'400': {
'description': '관심사 ID 중 존재하지 않는 관심사가 존재함'
},
'401': {
'description': '이메일 인증되지 않음'
},
'409': {
'description': '닉네임이 중복됨'
}
}
}
| 2,659 | 1,229 |
import sys
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
try:
sys.path.remove(self.path)
except ValueError:
pass
""" Remove """
def add_test():
return "add-test-success"
| 366 | 120 |
import numpy as np
import pandas as pd
from collections import OrderedDict, Counter
import itertools
from typing import *
Group = tuple[str, list[int]]
Groups = list[Group]
def df_groups(groups: Groups) -> pd.DataFrame:
return pd.DataFrame(dict_groups.values(), index=dict_groups.keys())
| 295 | 88 |
# my_lambdata/my_mod.py
def enlarge(n):
"""
Param n is a number
Function will enlarge the number
"""
return n * 100
# this code breakes our ability to omport enlarge from other files
# print("HELLO")
# y = int(input("Please choose a number"))
# print(y, enlarge(y))
if __name__ == "__main__":
# only runs the code IF script is invoked from the command-line
# not if it is imported from another
print("HELLO")
y = int(input("Please choose a number"))
print(y, enlarge(y))
| 517 | 174 |
from allhub.response import Response
from enum import Enum
class SubjectType(Enum):
ORGANIZATION = "organization"
REPOSITORY = "repository"
ISSUE = "issue"
PULL_REQUEST = "pull_request"
NONE = None
class UsersMixin:
def user(self, username):
"""
Provides publicly available information about someone with a GitHub account.
:param username:
:return:
"""
url = "/user/{username}".format(username=username)
self.response = Response(self.get(url), "User")
return self.response.transform()
def auth_user(self):
"""
Get the authenticated user
:return:
"""
url = "/user"
self.response = Response(self.get(url), "User")
return self.response.transform()
def update_auth_user(self, **kwargs):
params = []
for attribute in (
"name",
"email",
"blog",
"company",
"location",
"hireable",
"bio",
):
if attribute in kwargs:
params.append((attribute, kwargs.pop(attribute)))
if params:
url = "/user"
self.response = Response(self.patch(url, params=params), "User")
return self.response.transform()
def hover_card(self, username, subject_tye=SubjectType.NONE, subject_id=None):
if bool(subject_tye.value) != bool(subject_id): # Python shortcut for XOR.
raise ValueError(
"subject_type and subject_id both should provided or both left out"
)
params = []
if subject_id and subject_tye.value:
params = [("subject_type", subject_tye.value), ("subject_id", subject_id)]
url = "/users/{username}/hovercard".format(username=username)
self.response = Response(
self.get(
url,
params=params,
**{"Accept": "application/vnd.github.hagar-preview+json"},
),
"HoverCard",
)
return self.response.transform()
def users(self, since):
# TODO: Looks like this API is not working currently.
# As of 19-Sep-2019.
url = "/users"
params = [("since", since)]
self.response = Response(self.patch(url, params=params), "Users")
return self.response.transform()
| 2,406 | 660 |
# This file is created by generate_build_files.py. Do not edit manually.
test_support_sources = [
"src/crypto/aes/internal.h",
"src/crypto/asn1/asn1_locl.h",
"src/crypto/bio/internal.h",
"src/crypto/bn/internal.h",
"src/crypto/bn/rsaz_exp.h",
"src/crypto/bytestring/internal.h",
"src/crypto/cipher/internal.h",
"src/crypto/conf/conf_def.h",
"src/crypto/conf/internal.h",
"src/crypto/curve25519/internal.h",
"src/crypto/des/internal.h",
"src/crypto/digest/internal.h",
"src/crypto/digest/md32_common.h",
"src/crypto/ec/internal.h",
"src/crypto/ec/p256-x86_64-table.h",
"src/crypto/ec/p256-x86_64.h",
"src/crypto/evp/internal.h",
"src/crypto/internal.h",
"src/crypto/modes/internal.h",
"src/crypto/obj/obj_dat.h",
"src/crypto/pkcs8/internal.h",
"src/crypto/poly1305/internal.h",
"src/crypto/pool/internal.h",
"src/crypto/rand/internal.h",
"src/crypto/rsa/internal.h",
"src/crypto/test/file_test.cc",
"src/crypto/test/file_test.h",
"src/crypto/test/test_util.cc",
"src/crypto/test/test_util.h",
"src/crypto/x509/charmap.h",
"src/crypto/x509/internal.h",
"src/crypto/x509/vpm_int.h",
"src/crypto/x509v3/ext_dat.h",
"src/crypto/x509v3/pcy_int.h",
"src/ssl/internal.h",
"src/ssl/test/async_bio.h",
"src/ssl/test/packeted_bio.h",
"src/ssl/test/test_config.h",
]
def create_tests(copts, crypto, ssl):
native.cc_test(
name = "aes_test",
size = "small",
srcs = ["src/crypto/aes/aes_test.cc"] + test_support_sources,
args = [
"$(location src/crypto/aes/aes_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/aes/aes_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "asn1_test",
size = "small",
srcs = ["src/crypto/asn1/asn1_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "base64_test",
size = "small",
srcs = ["src/crypto/base64/base64_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "bio_test",
size = "small",
srcs = ["src/crypto/bio/bio_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "bn_test",
size = "small",
srcs = ["src/crypto/bn/bn_test.cc"] + test_support_sources,
args = [
"$(location src/crypto/bn/bn_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/bn/bn_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "bytestring_test",
size = "small",
srcs = ["src/crypto/bytestring/bytestring_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "chacha_test",
size = "small",
srcs = ["src/crypto/chacha/chacha_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "aead_test_aes_128_gcm",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources,
args = [
"aes-128-gcm",
"$(location src/crypto/cipher/test/aes_128_gcm_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/cipher/test/aes_128_gcm_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "aead_test_aes_256_gcm",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources,
args = [
"aes-256-gcm",
"$(location src/crypto/cipher/test/aes_256_gcm_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/cipher/test/aes_256_gcm_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "aead_test_aes_128_gcm_siv",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources,
args = [
"aes-128-gcm-siv",
"$(location src/crypto/cipher/test/aes_128_gcm_siv_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/cipher/test/aes_128_gcm_siv_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "aead_test_aes_256_gcm_siv",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources,
args = [
"aes-256-gcm-siv",
"$(location src/crypto/cipher/test/aes_256_gcm_siv_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/cipher/test/aes_256_gcm_siv_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "aead_test_chacha20_poly1305",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources,
args = [
"chacha20-poly1305",
"$(location src/crypto/cipher/test/chacha20_poly1305_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/cipher/test/chacha20_poly1305_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "aead_test_chacha20_poly1305_old",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources,
args = [
"chacha20-poly1305-old",
"$(location src/crypto/cipher/test/chacha20_poly1305_old_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/cipher/test/chacha20_poly1305_old_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "aead_test_aes_128_cbc_sha1_tls",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources,
args = [
"aes-128-cbc-sha1-tls",
"$(location src/crypto/cipher/test/aes_128_cbc_sha1_tls_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/cipher/test/aes_128_cbc_sha1_tls_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "aead_test_aes_128_cbc_sha1_tls_implicit_iv",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources,
args = [
"aes-128-cbc-sha1-tls-implicit-iv",
"$(location src/crypto/cipher/test/aes_128_cbc_sha1_tls_implicit_iv_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/cipher/test/aes_128_cbc_sha1_tls_implicit_iv_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "aead_test_aes_128_cbc_sha256_tls",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources,
args = [
"aes-128-cbc-sha256-tls",
"$(location src/crypto/cipher/test/aes_128_cbc_sha256_tls_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/cipher/test/aes_128_cbc_sha256_tls_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "aead_test_aes_256_cbc_sha1_tls",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources,
args = [
"aes-256-cbc-sha1-tls",
"$(location src/crypto/cipher/test/aes_256_cbc_sha1_tls_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/cipher/test/aes_256_cbc_sha1_tls_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "aead_test_aes_256_cbc_sha1_tls_implicit_iv",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources,
args = [
"aes-256-cbc-sha1-tls-implicit-iv",
"$(location src/crypto/cipher/test/aes_256_cbc_sha1_tls_implicit_iv_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/cipher/test/aes_256_cbc_sha1_tls_implicit_iv_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "aead_test_aes_256_cbc_sha256_tls",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources,
args = [
"aes-256-cbc-sha256-tls",
"$(location src/crypto/cipher/test/aes_256_cbc_sha256_tls_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/cipher/test/aes_256_cbc_sha256_tls_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "aead_test_aes_256_cbc_sha384_tls",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources,
args = [
"aes-256-cbc-sha384-tls",
"$(location src/crypto/cipher/test/aes_256_cbc_sha384_tls_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/cipher/test/aes_256_cbc_sha384_tls_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "aead_test_des_ede3_cbc_sha1_tls",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources,
args = [
"des-ede3-cbc-sha1-tls",
"$(location src/crypto/cipher/test/des_ede3_cbc_sha1_tls_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/cipher/test/des_ede3_cbc_sha1_tls_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "aead_test_des_ede3_cbc_sha1_tls_implicit_iv",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources,
args = [
"des-ede3-cbc-sha1-tls-implicit-iv",
"$(location src/crypto/cipher/test/des_ede3_cbc_sha1_tls_implicit_iv_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/cipher/test/des_ede3_cbc_sha1_tls_implicit_iv_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "aead_test_aes_128_cbc_sha1_ssl3",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources,
args = [
"aes-128-cbc-sha1-ssl3",
"$(location src/crypto/cipher/test/aes_128_cbc_sha1_ssl3_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/cipher/test/aes_128_cbc_sha1_ssl3_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "aead_test_aes_256_cbc_sha1_ssl3",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources,
args = [
"aes-256-cbc-sha1-ssl3",
"$(location src/crypto/cipher/test/aes_256_cbc_sha1_ssl3_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/cipher/test/aes_256_cbc_sha1_ssl3_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "aead_test_des_ede3_cbc_sha1_ssl3",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources,
args = [
"des-ede3-cbc-sha1-ssl3",
"$(location src/crypto/cipher/test/des_ede3_cbc_sha1_ssl3_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/cipher/test/des_ede3_cbc_sha1_ssl3_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "aead_test_aes_128_ctr_hmac_sha256",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources,
args = [
"aes-128-ctr-hmac-sha256",
"$(location src/crypto/cipher/test/aes_128_ctr_hmac_sha256.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/cipher/test/aes_128_ctr_hmac_sha256.txt",
],
deps = [crypto],
)
native.cc_test(
name = "aead_test_aes_256_ctr_hmac_sha256",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources,
args = [
"aes-256-ctr-hmac-sha256",
"$(location src/crypto/cipher/test/aes_256_ctr_hmac_sha256.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/cipher/test/aes_256_ctr_hmac_sha256.txt",
],
deps = [crypto],
)
native.cc_test(
name = "cipher_test",
size = "small",
srcs = ["src/crypto/cipher/cipher_test.cc"] + test_support_sources,
args = [
"$(location src/crypto/cipher/test/cipher_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/cipher/test/cipher_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "cmac_test",
size = "small",
srcs = ["src/crypto/cmac/cmac_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "constant_time_test",
size = "small",
srcs = ["src/crypto/constant_time_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "ed25519_test",
size = "small",
srcs = ["src/crypto/curve25519/ed25519_test.cc"] + test_support_sources,
args = [
"$(location src/crypto/curve25519/ed25519_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/curve25519/ed25519_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "x25519_test",
size = "small",
srcs = ["src/crypto/curve25519/x25519_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "spake25519_test",
size = "small",
srcs = ["src/crypto/curve25519/spake25519_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "dh_test",
size = "small",
srcs = ["src/crypto/dh/dh_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "digest_test",
size = "small",
srcs = ["src/crypto/digest/digest_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "dsa_test",
size = "small",
srcs = ["src/crypto/dsa/dsa_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "ec_test",
size = "small",
srcs = ["src/crypto/ec/ec_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "example_mul",
size = "small",
srcs = ["src/crypto/ec/example_mul.c"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "p256-x86_64_test",
size = "small",
srcs = ["src/crypto/ec/p256-x86_64_test.cc"] + test_support_sources,
args = [
"$(location src/crypto/ec/p256-x86_64_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/ec/p256-x86_64_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "ecdh_test",
size = "small",
srcs = ["src/crypto/ecdh/ecdh_test.cc"] + test_support_sources,
args = [
"$(location src/crypto/ecdh/ecdh_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/ecdh/ecdh_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "ecdsa_sign_test",
size = "small",
srcs = ["src/crypto/ecdsa/ecdsa_sign_test.cc"] + test_support_sources,
args = [
"$(location src/crypto/ecdsa/ecdsa_sign_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/ecdsa/ecdsa_sign_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "ecdsa_test",
size = "small",
srcs = ["src/crypto/ecdsa/ecdsa_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "ecdsa_verify_test",
size = "small",
srcs = ["src/crypto/ecdsa/ecdsa_verify_test.cc"] + test_support_sources,
args = [
"$(location src/crypto/ecdsa/ecdsa_verify_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/ecdsa/ecdsa_verify_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "err_test",
size = "small",
srcs = ["src/crypto/err/err_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "evp_extra_test",
size = "small",
srcs = ["src/crypto/evp/evp_extra_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "evp_test",
size = "small",
srcs = ["src/crypto/evp/evp_test.cc"] + test_support_sources,
args = [
"$(location src/crypto/evp/evp_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/evp/evp_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "pbkdf_test",
size = "small",
srcs = ["src/crypto/evp/pbkdf_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "hkdf_test",
size = "small",
srcs = ["src/crypto/hkdf/hkdf_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "hmac_test",
size = "small",
srcs = ["src/crypto/hmac/hmac_test.cc"] + test_support_sources,
args = [
"$(location src/crypto/hmac/hmac_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/hmac/hmac_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "lhash_test",
size = "small",
srcs = ["src/crypto/lhash/lhash_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "gcm_test",
size = "small",
srcs = ["src/crypto/modes/gcm_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "obj_test",
size = "small",
srcs = ["src/crypto/obj/obj_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "pkcs12_test",
size = "small",
srcs = ["src/crypto/pkcs8/pkcs12_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "pkcs8_test",
size = "small",
srcs = ["src/crypto/pkcs8/pkcs8_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "poly1305_test",
size = "small",
srcs = ["src/crypto/poly1305/poly1305_test.cc"] + test_support_sources,
args = [
"$(location src/crypto/poly1305/poly1305_tests.txt)",
],
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
data = [
"src/crypto/poly1305/poly1305_tests.txt",
],
deps = [crypto],
)
native.cc_test(
name = "pool_test",
size = "small",
srcs = ["src/crypto/pool/pool_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "refcount_test",
size = "small",
srcs = ["src/crypto/refcount_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "rsa_test",
size = "small",
srcs = ["src/crypto/rsa/rsa_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "thread_test",
size = "small",
srcs = ["src/crypto/thread_test.c"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "pkcs7_test",
size = "small",
srcs = ["src/crypto/x509/pkcs7_test.c"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "x509_test",
size = "small",
srcs = ["src/crypto/x509/x509_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "tab_test",
size = "small",
srcs = ["src/crypto/x509v3/tab_test.c"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "v3name_test",
size = "small",
srcs = ["src/crypto/x509v3/v3name_test.c"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [crypto],
)
native.cc_test(
name = "ssl_test",
size = "small",
srcs = ["src/ssl/ssl_test.cc"] + test_support_sources,
copts = copts + ["-DBORINGSSL_SHARED_LIBRARY"],
deps = [
crypto,
ssl,
],
)
| 22,360 | 9,405 |
# -*- coding: utf-8 -*-
"""
Author:by 王林清 on 2021/11/2 13:02
FileName:ci.py in shiyizhonghua_resource
Tools:PyCharm python3.8.4
"""
from util import get_time_str, get_json, get_file_path, save_json, \
save_split_json
if __name__ == '__main__':
dir_name = r'./../data/ci'
authors = {}
ci_jsons = []
paths = get_file_path(dir_name)
author_path = paths.pop(0)
author_dicts = get_json(author_path)
for author in author_dicts:
name = author['name']
authors[name] = {
'name': name,
'time': '宋',
'desc': author['description'],
}
for path in paths:
try:
ci_json = get_json(path)
for ci in ci_json:
time = get_time_str()
ci_jsons.append(
{
'title': ci['rhythmic'],
'author': authors[ci['author']],
'type': '词',
'content': ci['paragraphs'],
'create_time': time,
'update_time': time,
'valid_delete': True
}
)
except Exception as ex:
print(f'{path}:{ex}')
save_split_json('ci', ci_jsons)
| 1,291 | 419 |
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# If we actually did anything that used the secret key we'd need to set it to
# some constant value and find a way to secretly store it. However, pfif-tools
# doesn't use it for anything. We need to set it to something to make Django
# happy though, and we set it to something random to be safe in case we
# unknowingly do something in the future that uses it (better to have a password
# reset token break because this changed or something like that than a security
# hole we don't know about).
SECRET_KEY = os.urandom(30)
if 'Development' in os.environ.get('SERVER_SOFTWARE', ''):
DEBUG = True
# If DEBUG is True and ALLOWED_HOSTS is empty, Django permits localhost.
ALLOWED_HOSTS = []
else:
DEBUG = False
ALLOWED_HOSTS = ['pfif-tools.appspot.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['resources'],
'APP_DIRS': False,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
# Internationalization
LANGUAGE_CODE = 'en'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
| 2,307 | 733 |
# coding=utf-8
from related.types import TypedSequence, TypedMapping, TypedSet, ImmutableDict
from attr.exceptions import FrozenInstanceError
from related.converters import str_if_not_none
from collections import OrderedDict
import pytest
def test_immutable_dict():
immutable = ImmutableDict(dict(a=1))
with pytest.raises(FrozenInstanceError):
del immutable['a']
assert immutable == dict(a=1)
with pytest.raises(FrozenInstanceError):
immutable['b'] = 2
assert immutable == dict(a=1)
with pytest.raises(FrozenInstanceError):
immutable.clear()
assert immutable == dict(a=1)
with pytest.raises(FrozenInstanceError):
immutable.pop('a')
assert immutable == dict(a=1)
with pytest.raises(FrozenInstanceError):
immutable.something = 0
assert immutable == dict(a=1)
with pytest.raises(FrozenInstanceError):
del immutable.something_else
assert immutable == dict(a=1)
def test_str_if_not_none():
unicode_value = "Registered Trademark ®"
assert unicode_value == str_if_not_none(unicode_value)
assert "1" == str_if_not_none(1)
assert str_if_not_none(None) is None
def test_sequence():
lst = ["a", "b", "c"]
seq = TypedSequence(str, lst)
assert seq == lst
assert str(seq) == str(lst)
assert repr(seq) == repr(lst)
assert len(seq) == len(lst)
del seq[1]
del lst[1]
assert seq == lst
seq[1] = "d"
assert seq != lst
with pytest.raises(TypeError):
seq[1] = 4.0
def test_mapping():
dct = OrderedDict(a=1, b=2, c=3)
map = TypedMapping(int, dct)
assert map == dct
assert str(map) == str(dct)
assert repr(map) == repr(dct)
assert len(map) == len(dct)
del map["b"]
del dct["b"]
assert map == dct
with pytest.raises(TypeError):
map["d"] = 4.0
with pytest.raises(TypeError):
map.add(5)
map.add(4, 'd')
dct['d'] = 4
assert map == dct
def test_set():
orig = {"a", "b", "c"}
typed = TypedSet(str, orig)
assert orig == typed
assert len(orig) == len(typed)
assert 'a' in str(typed)
assert 'a' in repr(typed)
typed.add("d")
assert "d" in typed
assert orig != typed
typed.discard("d")
assert "d" not in typed
assert orig == typed
with pytest.raises(TypeError):
typed.add(5)
| 2,379 | 867 |
def inicio():
print('\033[33m=' * 60)
print('MENU PRINCIPAL'.center(50))
print('=' * 60)
print('\033[34m1\033[m - \033[35mCadastrar nova pessoa\033[m')
print('\033[34m2\033[m - \033[35mVer pessoas cadastradas\033[m')
print('\033[34m3\033[m - \033[35mSair do Sistema\033[m')
print('\033[33m=\033[m' * 60)
def escolha():
while True:
try:
escolha = int(input('Sua escolha: '))
while escolha > 3 or escolha < 1:
print('\033[31mValor digitado não condiz com a tabela\033[m')
escolha = int(input('Sua escolha: '))
if escolha > 3 and escolha < 1:
break
except:
print('\033[31mValor digitado não condiz com a tabela\033[m')
else:
break
return escolha
def arquivoExiste(nome):
try:
arquivo = open(nome, 'rt')
arquivo.close()
except (FileNotFoundError):
return False
else:
return True
def criarArquivo(nome):
try:
arquivo = open(nome, 'wt+')
arquivo.close()
except:
print('Houve algum erro')
def opcao1(arquivo) :
print('\033[33m-' * 60)
print('CADASTRAR PESSOA'.center(50))
print('\033[33m-\033[m' * 60)
nome = input('Digite o nome: ')
idade = int(input('Digite a idade: '))
try:
arquivo = open(arquivo, 'at')
except:
print('Arquivo não conseguiu ser aberto')
else:
try:
arquivo.write(f'{nome};{idade}\n')
except:
print('Não consegui computar')
else:
print('Pessoa cadastrada com sucesso!')
arquivo.close()
def opcao2(nome):
print('\033[33m-' * 60)
print('LISTA DE PESSOAS'.center(50))
print('\033[33m-\033[m' * 60)
try:
arquivo = open(nome, 'rt')
except:
print('Arquivo não conseguiu ser aberto')
else:
print('...')
print(f'Nome Idade')
print('-' * 60)
for linha in arquivo:
dado = linha.split(';')
dado[1] = dado[1].replace('\n', '')
print(f'{dado[0]:<30}{dado[1]:>3} anos')
arquivo.close() | 2,203 | 871 |
import tornado.ioloop
import tornado.web
import tornado.gen
import logging
from concurrent.futures import ThreadPoolExecutor
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
executor = ThreadPoolExecutor(max_workers=2)
@tornado.gen.coroutine
def callback():
"""
when a function decorator with coroutine, this function return immediately
"""
import time
def ca():
print 'i am callback'
tornado.ioloop.IOLoop.instance().add_timeout(time.time()+5, ca)
def sleep_func():
import time
print 'sleep start'
time.sleep(10)
print 'sleep end'
return 'sleep func'
def callback2(future):
print 'callback2'
print future.result()
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write('hello world')
class SleepHandler(tornado.web.RequestHandler):
def get(self):
# add one callback to ioloop, wait for next ioloop
# this function should not be asynchronous
tornado.ioloop.IOLoop.instance().add_callback(callback)
self.write('sleep world')
class SleepFutureHandler(tornado.web.RequestHandler):
#@tornado.gen.coroutine
def get(self):
future = executor.submit(sleep_func)
tornado.ioloop.IOLoop.instance().add_future(future, callback2)
self.write('sleep future')
settings = {
'debug': True
}
application = tornado.web.Application([
(r'/', MainHandler),
(r'/sleep', SleepHandler),
(r'/sleep2', SleepFutureHandler),
],**settings)
if __name__ == '__main__':
application.listen(8888)
tornado.ioloop.IOLoop.instance().start() | 1,613 | 520 |
from dpia.modules import *
# @primary_assets_required
# @supporting_assets_required
@login_required
def threat_identification(request, q_id=None):
'''
Shows a list of the added supporting assets which are assigned to a primary asset.
The user here selects threats from the list of generic threats or adds a new threat to a supporting asset.
'''
user = request.user
q = get_object_or_404(Questionaire, q_in_membership__member=user, id=q_id)
# query supporting assets
supporting_assets = Supporting.objects.filter(supporting_in_psrel__primary__questionaire=q).distinct()
args = {}
args.update(csrf(request))
args['q'] = q
args['supporting_assets'] = supporting_assets
return render(request, "threats/threat_identification.html", args)
# supporting-asset add
@login_required
def threat_sa_rel_add(request, sa_id=None):
'''
Adds generic threats to a supporting asset.
'''
user = request.user
supporting_object = get_object_or_404(Supporting, id=sa_id)
if supporting_object:
pa_sa_rel = PrimarySupportingRel.objects.filter(supporting=supporting_object)[0] # [0]: to select only one object when there are duplicates
primary_id = pa_sa_rel.primary_id
primary = get_object_or_404(Primary, id=primary_id)
q = get_object_or_404(Questionaire, q_in_membership__member=user, id=primary.questionaire_id)
data = dict()
## Add Threats to a SA
if request.POST and request.is_ajax():
if 'threat' in request.POST:
with reversion.create_revision():
checked_threats = request.POST.getlist('threat')
threat_list = []
for checked_threat in checked_threats:
threat_object = get_object_or_404(Threat, id=checked_threat)
# create a new relationship with the above objects, no duplicates
rel, created = Threat_SA_REL.objects.get_or_create(affected_supporting_asset=supporting_object, threat=threat_object)
threat_list.append(threat_object.name)
comment = ", ".join(threat_list)
# Store some meta-information.
save_revision_meta(user, q, 'Added generic threats "%s" to supporting asset "%s".' %(comment, supporting_object))
## ajax data
django_messages = []
messages.success(request, u'Generic threats were added successfully to supporting asset "%s".' %(supporting_object))
for message in messages.get_messages(request):
django_messages.append({
"level": message.level,
"message": message.message,
"extra_tags": message.tags,
})
data['messages'] = django_messages
data['form_is_valid'] = True
# query supporting assets
supporting_assets = Supporting.objects.filter(supporting_in_psrel__primary__questionaire=q).distinct()
args = {}
args['q'] = q
args['supporting_assets'] = supporting_assets
data['html_q_list'] = render_to_string('threats/partial_threats_list.html', args)
else:
data['form_is_valid'] = False
# query generic_threats and each newly created Threat per questionnaire
generic_threats = Threat.objects.all() #.exclude(~Q(threat_sa_rel__affected_supporting_asset__primary__questionaire=q), threat_sa_rel__affected_supporting_asset__primary__questionaire__isnull=False).order_by("type_of_jeopardy")
# # query threats the user selects // of the instant questionaire
# selected_threats = Threat_SA_REL.objects.prefetch_related().all().filter(affected_supporting_asset__primary__questionaire=q).distinct()
args = {}
args.update(csrf(request))
args['q'] = q
args['supporting_object'] = supporting_object
args['generic_threats'] = generic_threats
args['primary'] = primary
data['html_form'] = render_to_string('threats/threat_sa_rel_add.html', args, request=request)
return JsonResponse(data)
@login_required
def threat_add(request, q_id=None, sa_id=None):
'''
Adds new threats (defined by the user) to a supporting asset.
'''
user = request.user
q = get_object_or_404(Questionaire, q_in_membership__member=user, id=q_id)
sa = get_object_or_404(Supporting, id=sa_id)
data = dict()
## Add Threat
threat_form = ThreatForm(request.POST or None)
if request.POST and request.is_ajax():
if threat_form.is_valid():
with reversion.create_revision():
threat = threat_form.save(commit=False)
threat.supporting_asset_type = sa.supporting_type
threat.save()
new_threat_sa_rel = Threat_SA_REL.objects.get_or_create(affected_supporting_asset=sa, threat=threat)
# Store some meta-information.
save_revision_meta(user, q, 'Added new threat "%s" to supporting asset "%s".' %(threat.name, sa))
## ajax data
django_messages = []
messages.success(request, u'New threat "%s" was added successfully to supporting asset "%s".' %(threat.name, sa))
for message in messages.get_messages(request):
django_messages.append({
"level": message.level,
"message": message.message,
"extra_tags": message.tags,
})
data['messages'] = django_messages
data['form_is_valid'] = True
# query supporting assets
supporting_assets = Supporting.objects.filter(supporting_in_psrel__primary__questionaire=q).distinct()
args = {}
args['q'] = q
args['supporting_assets'] = supporting_assets
data['html_q_list'] = render_to_string('threats/partial_threats_list.html', args)
else:
data['form_is_valid'] = False
args = {}
args.update(csrf(request))
args['q'] = q
args['sa'] = sa
args['threat_form'] = threat_form
data['html_form'] = render_to_string('threats/threat_add.html', args, request=request)
return JsonResponse(data)
@login_required
def threat_rel_delete(request, q_id=None, threat_id=None):
'''
Delete a relationship between threat and supporting asset.
It doesn't delete the threat completely; it simply removes it from the supporting asset it is assigned to.
'''
user = request.user
q = get_object_or_404(Questionaire, q_in_membership__member=user, id=q_id)
threat_rel = get_object_or_404(Threat_SA_REL, id=threat_id)
data = dict()
if request.POST and request.is_ajax():
threat_rel.delete()
## ajax data
django_messages = []
messages.success(request, u'Threat "%s" was removed successfully from supporting asset "%s".' %(threat_rel.threat, threat_rel.affected_supporting_asset))
for message in messages.get_messages(request):
django_messages.append({
"level": message.level,
"message": message.message,
"extra_tags": message.tags,
})
data['form_is_valid'] = True
data['messages'] = django_messages
# query threats the user has selected and order by the MaxValue of the Sum
selected_threats = Threat_SA_REL.objects.filter(affected_supporting_asset__questionaire=q)
# query supporting assets
supporting_assets = Supporting.objects.filter(supporting_in_psrel__primary__questionaire=q).distinct()
args = {}
args['q'] = q
args['supporting_assets'] = supporting_assets
data['html_q_list'] = render_to_string('threats/partial_threats_list.html', args)
else:
args = {}
args.update(csrf(request))
args['q'] = q
args['threat_rel'] = threat_rel
data['html_form'] = render_to_string('threats/threat_rel_remove.html', args, request=request)
return JsonResponse(data)
# @supporting_assets_required
# @threats_required
@login_required
def threat_assessment(request, q_id=None):
'''
Shows a formset table of all the threats (ordered by their "likelihood" value) selected by the user in the step "Threat Identification".
It accepts two values, namely "level of vulnerability" and "risk source capability".
If either of them is entered above the max number value (4) or not entered at all, an error is raised.
The likelihood value is automatically calculated as the sum of the level of vulnerability and risk source capability.
'''
user = request.user
q = get_object_or_404(Questionaire, q_in_membership__member=user, id=q_id)
# query threats the user has selected and order by the MaxValue of the Sum;
# and filter only those that have a relationship to a primary asset. the "is_null" filtering is done in case the user goes back to
# the primary list step to remove supporting assets.
selected_threats = q.get_threats()
## Selected threats formset
ThreatFormset = modelformset_factory(Threat_SA_REL, form=Threat_SA_REL_Form, extra=0)
threat_formset = ThreatFormset(queryset=selected_threats)
if request.POST:
if selected_threats.exists():
threat_formset = ThreatFormset(request.POST, request.FILES)
if threat_formset.is_valid():
with reversion.create_revision():
for form in threat_formset.forms:
threat = form.save(commit=False)
threat.likelihood = threat.level_of_vulnerability + threat.risk_source_capability
threat.save()
threat_formset.save()
threat_list = selected_threats.values_list('threat__name', flat=True)
comment = ", ".join(threat_list)
# Store some meta-information.
save_revision_meta(user, q, 'Assessed likelihood of threats "{}".'.format(comment))
messages.success(request, u'Likelihood of threats was assessed successfully.')
return redirect(reverse('risk_assessment', args=[q.id]))
else:
messages.error(request, u'Please fill out the required fields.')
else:
return redirect('risk_assessment', q.id)
args = {}
args.update(csrf(request))
args['q'] = q
args['selected_threats'] = selected_threats
args['threat_formset'] = threat_formset
return render(request, "threats/threat_assessment.html", args)
# @supporting_assets_required
# @threats_required
# @threat_assessment_required
# @risk_assessment_required
@login_required
def threat_controls(request, q_id=None):
'''
Shows a formset list of all the assessed threats.
The user is required to fill out only the controls field.
'''
user = request.user
q = get_object_or_404(Questionaire, q_in_membership__member=user, id=q_id)
## query Threats
threats = q.get_threats()
ThreatFormset2 = modelformset_factory(Threat_SA_REL, form=Threat_SA_REL_Form2, extra=0)
if request.POST:
if threats.exists():
threat_formset = ThreatFormset2(request.POST, queryset=threats)
for form in threat_formset.forms:
form.fields['control'].required = True
with reversion.create_revision():
if threat_formset.is_valid():
threat_formset.save()
# Store some meta-information.
threat_list = threats.values_list('threat__name', flat=True)
comment = ", ".join(threat_list)
save_revision_meta(user, q, 'Implemented controls to threats "{}".'.format(comment))
messages.success(request, u'Controls were implemented successfully.')
return redirect(reverse('risk_mitigation', args=[q.id]))
else:
messages.error(request, u'Please fill out the required fields.')
else:
return redirect('risk_mitigation', q.id)
else:
threat_formset = ThreatFormset2(queryset=threats)
args = {}
args.update(csrf(request))
args['q'] = q
args['threat_formset'] = threat_formset
return render(request, "threats/threat_controls.html", args)
| 12,523 | 3,679 |
#!/usr/bin/env python
import rospy
import sys
import numpy as np
from geometry_msgs.msg import Pose, Twist
from nav_msgs.msg import Odometry
from sensor_msgs.msg import Range
from math import cos, sin, asin, tan, atan2
# msgs and srv for working with the set_model_service
from gazebo_msgs.msg import ModelState
from gazebo_msgs.srv import SetModelState
from std_srvs.srv import Empty
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Quaternion, Pose, Point, Vector3
from std_msgs.msg import Header, ColorRGBA
import math
# a handy tool to convert orientations
from tf.transformations import euler_from_quaternion, quaternion_from_euler
class BasicThymio:
def __init__(self, thymio_name):
"""init"""
self.thymio_name = thymio_name
rospy.init_node('basic_thymio_controller', anonymous=True)
# Publish to the topic '/thymioX/cmd_vel'.
self.velocity_publisher = rospy.Publisher(self.thymio_name + '/cmd_vel',
Twist, queue_size=10)
# A subscriber to the topic '/turtle1/pose'. self.update_pose is called
# when a message of type Pose is received.
self.pose_subscriber = rospy.Subscriber(self.thymio_name + '/odom',
Odometry, self.update_state)
self.prox_center_sub = rospy.Subscriber(self.thymio_name + '/proximity/center',
Range, self.update_prox_center)
self.prox_center_left_sub = rospy.Subscriber(self.thymio_name + '/proximity/center_left',
Range, self.update_prox_center_left)
self.prox_center_right_sub = rospy.Subscriber(self.thymio_name + '/proximity/center_right',
Range, self.update_prox_center_right)
self.prox_left_sub = rospy.Subscriber(self.thymio_name + '/proximity/left',
Range, self.update_prox_left)
self.prox_right_sub = rospy.Subscriber(self.thymio_name + '/proximity/right',
Range, self.update_prox_right)
self.prox_rear_left_sub = rospy.Subscriber(self.thymio_name + '/proximity/rear_left',
Range, self.update_prox_rear_left)
self.prox_rear_right_sub = rospy.Subscriber(self.thymio_name + '/proximity/rear_right',
Range, self.update_prox_rear_right)
self.current_pose = Pose()
self.current_twist = Twist()
self.current_prox_center = Range()
self.current_prox_center_left = Range()
self.current_prox_center_right = Range()
self.current_prox_left = Range()
self.current_prox_right = Range()
# publish at this rate
self.tick_rate = 50
self.rate = rospy.Rate(self.tick_rate)
self.vel_msg = Twist()
def update_prox_center(self, data):
self.current_prox_center = data
def update_prox_center_left(self, data):
self.current_prox_center_left = data
def update_prox_center_right(self, data):
self.current_prox_center_right = data
def update_prox_left(self, data):
self.current_prox_left = data
def update_prox_right(self, data):
self.current_prox_right = data
def update_prox_rear_left(self, data):
self.current_prox_rear_left = data
def update_prox_rear_right(self, data):
self.current_prox_rear_right = data
def thymio_state_service_request(self, position, orientation):
"""Request the service (set thymio state values) exposed by
the simulated thymio. A teleportation tool, by default in gazebo world frame.
Be aware, this does not mean a reset (e.g. odometry values)."""
rospy.wait_for_service('/gazebo/set_model_state')
try:
model_state = ModelState()
model_state.model_name = self.thymio_name
model_state.reference_frame = '' # the frame for the pose information
model_state.pose.position.x = position[0]
model_state.pose.position.y = position[1]
model_state.pose.position.z = position[2]
qto = quaternion_from_euler(orientation[0], orientation[0], orientation[0], axes='sxyz')
model_state.pose.orientation.x = qto[0]
model_state.pose.orientation.y = qto[1]
model_state.pose.orientation.z = qto[2]
model_state.pose.orientation.w = qto[3]
# a Twist can also be set but not recomended to do it in a service
gms = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
response = gms(model_state)
return response
except rospy.ServiceException, e:
print "Service call failed: %s"%e
def update_state(self, data):
"""A new Odometry message has arrived. See Odometry msg definition."""
# Note: Odmetry message also provides covariance
self.current_pose = data.pose.pose
self.current_twist = data.twist.twist
quat = (
self.current_pose.orientation.x,
self.current_pose.orientation.y,
self.current_pose.orientation.z,
self.current_pose.orientation.w)
(roll, pitch, yaw) = euler_from_quaternion (quat)
# rospy.loginfo("State from Odom: (%.5f, %.5f, %.5f) " % (self.current_pose.position.x, self.current_pose.position.y, yaw))
def turn_left(self, speed):
self.vel_msg.angular.z = speed
# print(' turning left')
self.velocity_publisher.publish(self.vel_msg)
def turn_right(self, speed):
self.vel_msg.angular.z = -speed
self.velocity_publisher.publish(self.vel_msg)
def make_figure_8(self):
ang_speed = 1.0
full_circle_duration = rospy.Duration(2 * math.pi / ang_speed)
start = rospy.Time()
while start.secs == 0:
start = rospy.get_rostime()
print('start: ' + str(start.nsecs))
self.vel_msg.linear.x = 0.2
while not rospy.is_shutdown():
now = rospy.get_rostime()
if (now - start) < full_circle_duration:
self.turn_left(ang_speed)
elif (now - start) < 2 * full_circle_duration:
self.turn_right(ang_speed)
else:
break
self.rate.sleep()
self.vel_msg.linear.x = 0
self.vel_msg.angular.z = 0
self.velocity_publisher.publish(self.vel_msg)
rospy.spin()
def is_close_to_wall(self):
return (0 < self.current_prox_left.range < 0.08) or \
(0 < self.current_prox_center_left.range < 0.08) or \
(0 < self.current_prox_center.range < 0.08) or \
(0 < self.current_prox_center_right.range < 0.08) or \
(0 < self.current_prox_right.range < 0.08)
def drive_to_wall(self):
self.vel_msg.linear.x = 0.1
self.velocity_publisher.publish(self.vel_msg)
close = self.is_close_to_wall()
while not close:
self.velocity_publisher.publish(self.vel_msg)
close = self.is_close_to_wall()
self.rate.sleep()
self.vel_msg.linear.x = 0
self.velocity_publisher.publish(self.vel_msg)
# TODO: it seems that if this script is started when the thymio
# is already close to the wall, the thymio sometimes rotated the wrong way
# Also, we might want to make it proportional, and maybe incorporate
# the sensor inputs of center_left and center_right as well
def face_wall(self):
wall_is_left = self.current_prox_left.range < self.current_prox_right.range
last_prox_center = self.current_prox_center
if wall_is_left:
ang_vel = 0.1
else:
ang_vel = -0.1
while abs(self.current_prox_center_left.range - self.current_prox_center_right.range) > 0.005:
self.vel_msg.angular.z = ang_vel
self.velocity_publisher.publish(self.vel_msg)
self.rate.sleep()
self.vel_msg.angular.z = 0.0
self.velocity_publisher.publish(self.vel_msg)
# TODO: might want to have a proportional controller
# Also, need to incorporate rear sensor inputs to get perfect final orientation
# (now it just turns 180 degrees and ignores rear sensor inputs)
def turn_around(self):
print('turning around')
ang_speed = 1.0
half_circle_duration = rospy.Duration(math.pi / ang_speed)
start = rospy.Time()
while start.secs == 0:
start = rospy.get_rostime()
print('start: ' + str(start.nsecs))
while rospy.get_rostime() - start < half_circle_duration:
self.vel_msg.angular.z = ang_speed
self.velocity_publisher.publish(self.vel_msg)
self.rate.sleep()
self.vel_msg.angular.z = 0
self.velocity_publisher.publish(self.vel_msg)
# FIXME: thymio stops too late, probably because the sensors are not in the
# same location as the base_link
def drive_forward(self):
current_dist = self.current_prox_rear_left.range
dist_to_go = 2.0 - current_dist
lin_vel = 0.2
fwd_duration = rospy.Duration(dist_to_go / lin_vel)
start = rospy.Time()
while start.secs == 0:
start = rospy.get_rostime()
print('start: ' + str(start.nsecs))
while rospy.get_rostime() - start < fwd_duration:
self.vel_msg.linear.x = lin_vel
self.velocity_publisher.publish(self.vel_msg)
self.rate.sleep()
self.vel_msg.linear.x = 0
self.velocity_publisher.publish(self.vel_msg)
def task1(self):
self.make_figure_8()
def task2(self):
self.drive_to_wall()
self.face_wall()
def task3(self):
self.drive_to_wall()
self.face_wall()
self.turn_around()
self.drive_forward()
def usage():
return "Wrong number of parameters. basic_move.py [thymio_name]"
if __name__ == '__main__':
if len(sys.argv) == 2:
thymio_name = sys.argv[1]
print "Now working with robot: %s" % thymio_name
else:
print usage()
sys.exit(1)
thymio = BasicThymio(thymio_name)
# Teleport the robot to a certain pose. If pose is different to the
# origin of the world, you must account for a transformation between
# odom and gazebo world frames.
# NOTE: The goal of this step is *only* to show the available
# tools. The launch file process should take care of initializing
# the simulation and spawning the respective models
#thymio.thymio_state_service_request([0.,0.,0.], [0.,0.,0.])
#rospy.sleep(1.)
thymio.task1()
# thymio.task2()
# thymio.task3()
| 10,737 | 3,543 |
"""
package related livefs modification subsystem
"""
| 54 | 14 |
# 根据一棵树的前序遍历与中序遍历构造二叉树。
#
# 注意:
# 你可以假设树中没有重复的元素。
#
# 例如,给出
#
# 前序遍历 preorder = [3,9,20,15,7]
# 中序遍历 inorder = [9,3,15,20,7]
#
# 返回如下的二叉树:
#
# 3
# / \
# 9 20
# / \
# 15 7
# Related Topics 树 深度优先搜索 数组
# leetcode submit region begin(Prohibit modification and deletion)
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def buildTreeNode(self, preorder, inorder):
if not preorder:
return None
root = preorder[0]
node = TreeNode(root)
partition = inorder.index(root)
node.left = self.buildTreeNode(preorder[1:partition + 1], inorder[0:partition])
node.right = self.buildTreeNode(preorder[partition + 1:], inorder[partition + 1:])
return node
def buildTree(self, preorder, inorder):
"""
:type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
"""
return self.buildTreeNode(preorder, inorder)
# leetcode submit region end(Prohibit modification and deletion)
| 1,153 | 496 |
# A child is playing a cloud hopping game. In this game, there are sequentially numbered clouds that can be thunderheads or cumulus clouds. The character must jump from cloud to cloud until it reaches the start again.
# There is an array of clouds, e and an energy level e=100. The character starts from c[0] and uses 1 unit of energy to make a jump of size k to cloud c[(i+k)%n]. If it lands on a thundercloud, c[i]=1, its energy (e) decreases by 2 additional units. The game ends when the character lands back on cloud 0.
# Given the values of n, k, and the configuration of the clouds as an array c, determine the final value of e after the game ends.
# Example. c = [0,0,1,0] and k=2, then the character makes the following jumps:
# The indices of the path are 0 -> 2 -> 0 . The energy level reduces by 1 for each jump to 98. The character landed on one thunderhead at an additional cost of 2 energy units. The final energy level is 96.
def jumpingOnClouds(c, k):
# There is an array of clouds, e and an energy level e=100. The character starts from c[0] and uses 1 unit of energy to make a jump of size k to cloud c[(i+k)%n]. If it lands on a thundercloud, c[i]=1, its energy (e) decreases by 2 additional units. The game ends when the character lands back on cloud 0.
# The indices of the path are 0 -> 2 -> 0 . The energy level reduces by 1 for each jump to 98. The character landed on one thunderhead at an additional cost of 2 energy units. The final energy level is 96.
e = 100
i = 0
while i != 0:
if c[(i+k)%len(c)] == 1:
e -= 2
e -= 1
i = (i+k)%len(c)
return e
| 1,644 | 512 |
# -*- coding: utf-8 -*-
"""
computes the lag of the amdf function
Args:
x: audio signal
iBlockLength: block length in samples
iHopLength: hop length in samples
f_s: sample rate of audio data (unused)
Returns:
f frequency
t time stamp for the frequency value
"""
import numpy as np
import math
def PitchTimeAmdf(x, iBlockLength, iHopLength, f_s):
# initialize
f_max = 2000
f_min = 50
iNumOfBlocks = math.ceil(x.size / iHopLength)
# compute time stamps
t = (np.arange(0, iNumOfBlocks) * iHopLength + (iBlockLength / 2)) / f_s
# allocate memory
f = np.zeros(iNumOfBlocks)
eta_min = int(round(f_s / f_max)) - 1
eta_max = int(round(f_s / f_min)) - 1
for n in range(0, iNumOfBlocks):
i_start = n * iHopLength
i_stop = np.min([x.size - 1, i_start + iBlockLength - 1])
# calculate the acf
if not x[np.arange(i_start, i_stop + 1)].sum():
continue
else:
x_tmp = x[np.arange(i_start, i_stop + 1)]
afCorr = computeAmdf(x_tmp, eta_max)
# find the coefficients specified in eta
f[n] = np.argmin(afCorr[np.arange(eta_min + 1, afCorr.size)]) + 1
# convert to Hz
f[n] = f_s / (f[n] + eta_min + 1)
return (f, t)
def computeAmdf(x, eta_max):
K = x.shape[0]
if K <= 0:
return 0
afAmdf = np.ones(K)
for eta in range(0, np.min([K, eta_max + 1])):
afAmdf[eta] = np.sum(np.abs(x[np.arange(0, K - 1 - eta)] - x[np.arange(eta + 1, K)])) / K
return (afAmdf)
| 1,575 | 655 |
from google.appengine.ext import ndb
ATTACHMENTS = [
'beforeBegin',
'afterEnd',
'middle'
]
TYPES = [
'strike',
'insert'
]
class Modification(ndb.Model):
type = ndb.StringProperty(choices=TYPES)
start_index = ndb.IntegerProperty()
trigram_at_start = ndb.StringProperty()
content = ndb.StringProperty()
class Edit(ndb.Model):
selector = ndb.StringProperty()
attachment = ndb.StringProperty(choices=ATTACHMENTS)
selected_index = ndb.IntegerProperty()
sentence_index = ndb.IntegerProperty()
trigram_at_attach_point = ndb.StringProperty()
modifications = ndb.StructuredProperty(Modification, repeated=True)
| 636 | 234 |
import logging
logger = logging.getLogger(__name__)
GLOBAL_LANG_NAME = 'en'
def set_global_language_to(lang: str) -> None:
global GLOBAL_LANG_NAME
logger.info('Setting the global language config to: %s', lang)
GLOBAL_LANG_NAME = lang
def get_global_language() -> str:
return GLOBAL_LANG_NAME
| 316 | 117 |
import os
import math
import numpy as np
import time
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
import sys
sys.path.append(os.path.dirname("../"))
from lib.utils.meter import Meter
from models.model_MNFEAM import MFEAM_SSN
from lib.dataset.shapenet import shapenet_spix
from lib.utils.loss import reconstruct_loss_with_cross_etnropy, reconstruct_loss_with_mse, uniform_compact_loss
from lib.MEFEAM.MEFEAM import discriminative_loss
@torch.no_grad()
def eval(model, loader, pos_scale, device):
def achievable_segmentation_accuracy(superpixel, label):
"""
Function to calculate Achievable Segmentation Accuracy:
ASA(S,G) = sum_j max_i |s_j \cap g_i| / sum_i |g_i|
Args:
input: superpixel image (H, W),
output: ground-truth (H, W)
"""
TP = 0
unique_id = np.unique(superpixel)
for uid in unique_id:
mask = superpixel == uid
label_hist = np.histogram(label[mask])
maximum_regionsize = label_hist[0].max()
TP += maximum_regionsize
return TP / label.size
model.eval() # change the mode of model to eval
sum_asa = 0
for data in loader:
inputs, labels = data # b*c*npoint
inputs = inputs.to(device) # b*c*w*h
labels = labels.to(device) # sematic_lable
inputs = pos_scale * inputs
# calculation,return affinity,hard lable,feature tensor
Q, H, feat = model(inputs)
asa = achievable_segmentation_accuracy(
H.to("cpu").detach().numpy(),
labels.to("cpu").numpy()) # return data to cpu
sum_asa += asa
model.train()
return sum_asa / len(loader) # cal asa
def update_param(data, model, optimizer, compactness, pos_scale, device,
disc_loss):
inputs, labels, _, spix = data
inputs = inputs.to(device)
labels = labels.to(device)
inputs = pos_scale * inputs
(Q, H, _, _), msf_feature = model(inputs)
recons_loss = reconstruct_loss_with_cross_etnropy(Q, labels)
compact_loss = reconstruct_loss_with_mse(Q, inputs, H)
disc = disc_loss(msf_feature, spix)
#uniform_compactness = uniform_compact_loss(Q,coords.reshape(*coords.shape[:2], -1), H,device=device)
loss = recons_loss + compactness * compact_loss + disc
optimizer.zero_grad() # clear previous grad
loss.backward() # cal the grad
optimizer.step() # backprop
return {
"loss": loss.item(),
"reconstruction": recons_loss.item(),
"compact": compact_loss.item(),
"disc": disc.item()
}
def train(cfg):
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
model = MFEAM_SSN(10, 50).to(device)
disc_loss = discriminative_loss(0.1, 0.5)
optimizer = optim.Adam(model.parameters(), cfg.lr)
train_dataset = shapenet_spix(cfg.root)
train_loader = DataLoader(train_dataset,
cfg.batchsize,
shuffle=True,
drop_last=True,
num_workers=cfg.nworkers)
# test_dataset = shapenet.shapenet(cfg.root, split="test")
# test_loader = DataLoader(test_dataset, 1, shuffle=False, drop_last=False)
meter = Meter()
iterations = 0
max_val_asa = 0
writer = SummaryWriter(log_dir='log', comment='traininglog')
while iterations < cfg.train_iter:
for data in train_loader:
iterations += 1
metric = update_param(data, model, optimizer, cfg.compactness,
cfg.pos_scale, device, disc_loss)
meter.add(metric)
state = meter.state(f"[{iterations}/{cfg.train_iter}]")
print(state)
# return {"loss": loss.item(), "reconstruction": recons_loss.item(), "compact": compact_loss.item()}
writer.add_scalar("comprehensive/loss", metric["loss"], iterations)
writer.add_scalar("loss/reconstruction_loss",
metric["reconstruction"], iterations)
writer.add_scalar("loss/compact_loss", metric["compact"],
iterations)
writer.add_scalar("loss/disc_loss", metric["disc"], iterations)
if (iterations % 1000) == 0:
torch.save(
model.state_dict(),
os.path.join(cfg.out_dir,
"model_iter" + str(iterations) + ".pth"))
# if (iterations % cfg.test_interval) == 0:
# asa = eval(model, test_loader, cfg.pos_scale, device)
# print(f"validation asa {asa}")
# writer.add_scalar("comprehensive/asa", asa, iterations)
# if asa > max_val_asa:
# max_val_asa = asa
# torch.save(model.state_dict(), os.path.join(
# cfg.out_dir, "bset_model_sp_loss.pth"))
if iterations == cfg.train_iter:
break
unique_id = str(int(time.time()))
torch.save(model.state_dict(),
os.path.join(cfg.out_dir, "model" + unique_id + ".pth"))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--root",
type=str,
default='../shapenet_partseg_spix',
help="/ path/to/shapenet")
parser.add_argument("--out_dir",
default="./log",
type=str,
help="/path/to/output directory")
parser.add_argument("--batchsize", default=8, type=int)
parser.add_argument("--nworkers",
default=8,
type=int,
help="number of threads for CPU parallel")
parser.add_argument("--lr", default=1e-6, type=float, help="learning rate")
parser.add_argument("--train_iter", default=10000, type=int)
parser.add_argument("--fdim",
default=10,
type=int,
help="embedding dimension")
parser.add_argument("--niter",
default=5,
type=int,
help="number of iterations for differentiable SLIC")
parser.add_argument("--nspix",
default=50,
type=int,
help="number of superpixels")
parser.add_argument("--pos_scale", default=10, type=float)
parser.add_argument("--compactness", default=1e-4, type=float)
parser.add_argument("--test_interval", default=100, type=int)
args = parser.parse_args()
os.makedirs(args.out_dir, exist_ok=True)
train(args)
| 6,878 | 2,141 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-05-09 12:19
from __future__ import unicode_literals
import core.model_fields
import core.models
import core.validators
import core.wagtail_fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('great_international', '0022_auto_20190508_1300'),
]
operations = [
migrations.CreateModel(
name='InternationalEUExitFormPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('service_name', models.CharField(choices=[('FIND_A_SUPPLIER', 'Find a Supplier'), ('EXPORT_READINESS', 'Export Readiness'), ('INVEST', 'Invest'), ('COMPONENTS', 'Components'), ('GREAT_INTERNATIONAL', 'Great International')], db_index=True, max_length=100, null=True)),
('uses_tree_based_routing', models.BooleanField(default=False, help_text="Allow this page's URL to be determined by its slug, and the slugs of its ancestors in the page tree.", verbose_name='tree-based routing enabled')),
('breadcrumbs_label', models.CharField(max_length=50)),
('heading', models.CharField(max_length=255)),
('body_text', core.model_fields.MarkdownField(validators=[core.validators.slug_hyperlinks])),
('submit_button_text', models.CharField(max_length=50)),
('disclaimer', models.TextField(max_length=500)),
('first_name_help_text', core.wagtail_fields.FormHelpTextField(blank=True, max_length=200, null=True, verbose_name='Help text')),
('first_name_label', core.wagtail_fields.FormLabelField(max_length=200, verbose_name='label')),
('last_name_help_text', core.wagtail_fields.FormHelpTextField(blank=True, max_length=200, null=True, verbose_name='Help text')),
('last_name_label', core.wagtail_fields.FormLabelField(max_length=200, verbose_name='label')),
('email_help_text', core.wagtail_fields.FormHelpTextField(blank=True, max_length=200, null=True, verbose_name='Help text')),
('email_label', core.wagtail_fields.FormLabelField(max_length=200, verbose_name='label')),
('organisation_type_help_text', core.wagtail_fields.FormHelpTextField(blank=True, max_length=200, null=True, verbose_name='Help text')),
('organisation_type_label', core.wagtail_fields.FormLabelField(max_length=200, verbose_name='label')),
('company_name_help_text', core.wagtail_fields.FormHelpTextField(blank=True, max_length=200, null=True, verbose_name='Help text')),
('company_name_label', core.wagtail_fields.FormLabelField(max_length=200, verbose_name='label')),
('country_help_text', core.wagtail_fields.FormHelpTextField(blank=True, max_length=200, null=True, verbose_name='Help text')),
('country_label', core.wagtail_fields.FormLabelField(max_length=200, verbose_name='label')),
('city_help_text', core.wagtail_fields.FormHelpTextField(blank=True, max_length=200, null=True, verbose_name='Help text')),
('city_label', core.wagtail_fields.FormLabelField(max_length=200, verbose_name='label')),
('comment_help_text', core.wagtail_fields.FormHelpTextField(blank=True, max_length=200, null=True, verbose_name='Help text')),
('comment_label', core.wagtail_fields.FormLabelField(max_length=200, verbose_name='label')),
],
options={
'abstract': False,
},
bases=(core.models.ExclusivePageMixin, 'wagtailcore.page'),
),
migrations.CreateModel(
name='InternationalEUExitFormSuccessPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('service_name', models.CharField(choices=[('FIND_A_SUPPLIER', 'Find a Supplier'), ('EXPORT_READINESS', 'Export Readiness'), ('INVEST', 'Invest'), ('COMPONENTS', 'Components'), ('GREAT_INTERNATIONAL', 'Great International')], db_index=True, max_length=100, null=True)),
('uses_tree_based_routing', models.BooleanField(default=False, help_text="Allow this page's URL to be determined by its slug, and the slugs of its ancestors in the page tree.", verbose_name='tree-based routing enabled')),
('breadcrumbs_label', models.CharField(max_length=50)),
('heading', models.CharField(max_length=255, verbose_name='Title')),
('body_text', models.CharField(max_length=255, verbose_name='Body text')),
('next_title', models.CharField(max_length=255, verbose_name='Title')),
('next_body_text', models.CharField(max_length=255, verbose_name='Body text')),
],
options={
'abstract': False,
},
bases=(core.models.ExclusivePageMixin, 'wagtailcore.page'),
),
]
| 5,331 | 1,663 |
'''
New Integration Test for 2 normal users zstack-cli login
@author: MengLai
'''
import hashlib
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.zstack_test.zstack_test_account as test_account
import zstackwoodpecker.zstack_test.zstack_test_user as test_user
import zstacklib.utils.shell as shell
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
def login_cli_by_user(account_name, user_name, user_pass):
cmd = '''zstack-cli << EOF
LogInByUser accountName=%s userName=%s password=%s
quit
''' % (account_name, user_name, user_pass)
return shell.call(cmd)
def test_query():
cmd = '''zstack-cli << EOF
QueryVmNic
quit
'''
return shell.call(cmd)
def logout_cli():
cmd = '''zstack-cli << EOF
LogOut
quit
'''
return shell.call(cmd)
def test():
import uuid
test_util.test_dsc('Create an normal account and login with it')
account_name1 = uuid.uuid1().get_hex()
account_pass1 = hashlib.sha512(account_name1).hexdigest()
test_account1 = test_account.ZstackTestAccount()
test_account1.create(account_name1, account_pass1)
test_obj_dict.add_account(test_account1)
test_account_session = acc_ops.login_by_account(account_name1, account_pass1)
test_util.test_dsc('Create an normal user-1 under the new account and login with it')
user_name1 = uuid.uuid1().get_hex()
user_pass1 = hashlib.sha512(user_name1).hexdigest()
test_user1 = test_user.ZstackTestUser()
test_user1.create(user_name1, user_pass1, session_uuid = test_account_session)
test_obj_dict.add_user(test_user1)
login_output = login_cli_by_user(account_name1, user_name1, user_name1)
if login_output.find('%s/%s >>>' % (account_name1, user_name1)) < 0:
test_util.test_fail('zstack-cli is not display correct name for logined user: %s' % (login_output))
test_util.test_dsc('Create an normal user-2 under the new account and login with it')
user_name2 = uuid.uuid1().get_hex()
user_pass2 = hashlib.sha512(user_name2).hexdigest()
test_user2 = test_user.ZstackTestUser()
test_user2.create(user_name2, user_pass2, session_uuid = test_account_session)
test_obj_dict.add_user(test_user2)
login_output = login_cli_by_user(account_name1, user_name2, user_name2)
if login_output.find('%s/%s >>>' % (account_name1, user_name2)) < 0:
test_util.test_fail('zstack-cli is not display correct name for logined user: %s' % (login_output))
test_util.test_dsc('Delete user-2 and check the login status')
test_user2.delete()
test_obj_dict.rm_user(test_user2)
query_output = test_query()
if query_output.find('- >>>') < 0:
test_util.test_fail('zstack-cli is not display correct after delete user: %s' % (query_output))
test_util.test_dsc('login user-1, logout user-1 and check the login status')
login_output = login_cli_by_user(account_name1, user_name1, user_name1)
if login_output.find('%s/%s >>>' % (account_name1, user_name1)) < 0:
test_util.test_fail('zstack-cli is not display correct name for logined user: %s' % (login_output))
logout_output = logout_cli()
if logout_output.find('- >>>') < 0:
test_util.test_fail('zstack-cli is not display correct after logout: %s' % (login_output))
test_user1.delete()
test_account1.delete()
test_obj_dict.rm_user(test_user1)
test_obj_dict.rm_account(test_account1)
#Will be called only if exception happens in test().
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
| 3,800 | 1,378 |
"""Unit tests for the :mod:`networkx.algorithms.structuralholes` module."""
import math
import pytest
import networkx as nx
class TestStructuralHoles:
"""Unit tests for computing measures of structural holes.
The expected values for these functions were originally computed using the
proprietary software `UCINET`_ and the free software `IGraph`_ , and then
computed by hand to make sure that the results are correct.
.. _UCINET: https://sites.google.com/site/ucinetsoftware/home
.. _IGraph: http://igraph.org/
"""
def setup(self):
self.D = nx.DiGraph()
self.D.add_edges_from([(0, 1), (0, 2), (1, 0), (2, 1)])
self.D_weights = {(0, 1): 2, (0, 2): 2, (1, 0): 1, (2, 1): 1}
# Example from http://www.analytictech.com/connections/v20(1)/holes.htm
self.G = nx.Graph()
self.G.add_edges_from(
[
("A", "B"),
("A", "F"),
("A", "G"),
("A", "E"),
("E", "G"),
("F", "G"),
("B", "G"),
("B", "D"),
("D", "G"),
("G", "C"),
]
)
self.G_weights = {
("A", "B"): 2,
("A", "F"): 3,
("A", "G"): 5,
("A", "E"): 2,
("E", "G"): 8,
("F", "G"): 3,
("B", "G"): 4,
("B", "D"): 1,
("D", "G"): 3,
("G", "C"): 10,
}
def test_constraint_directed(self):
constraint = nx.constraint(self.D)
assert constraint[0] == pytest.approx(1.003, abs=1e-3)
assert constraint[1] == pytest.approx(1.003, abs=1e-3)
assert constraint[2] == pytest.approx(1.389, abs=1e-3)
def test_effective_size_directed(self):
effective_size = nx.effective_size(self.D)
assert effective_size[0] == pytest.approx(1.167, abs=1e-3)
assert effective_size[1] == pytest.approx(1.167, abs=1e-3)
assert effective_size[2] == pytest.approx(1, abs=1e-3)
def test_constraint_weighted_directed(self):
D = self.D.copy()
nx.set_edge_attributes(D, self.D_weights, "weight")
constraint = nx.constraint(D, weight="weight")
assert constraint[0] == pytest.approx(0.840, abs=1e-3)
assert constraint[1] == pytest.approx(1.143, abs=1e-3)
assert constraint[2] == pytest.approx(1.378, abs=1e-3)
def test_effective_size_weighted_directed(self):
D = self.D.copy()
nx.set_edge_attributes(D, self.D_weights, "weight")
effective_size = nx.effective_size(D, weight="weight")
assert effective_size[0] == pytest.approx(1.567, abs=1e-3)
assert effective_size[1] == pytest.approx(1.083, abs=1e-3)
assert effective_size[2] == pytest.approx(1, abs=1e-3)
def test_constraint_undirected(self):
constraint = nx.constraint(self.G)
assert constraint["G"] == pytest.approx(0.400, abs=1e-3)
assert constraint["A"] == pytest.approx(0.595, abs=1e-3)
assert constraint["C"] == pytest.approx(1, abs=1e-3)
def test_effective_size_undirected_borgatti(self):
effective_size = nx.effective_size(self.G)
assert effective_size["G"] == pytest.approx(4.67, abs=1e-2)
assert effective_size["A"] == pytest.approx(2.50, abs=1e-2)
assert effective_size["C"] == pytest.approx(1, abs=1e-2)
def test_effective_size_undirected(self):
G = self.G.copy()
nx.set_edge_attributes(G, 1, "weight")
effective_size = nx.effective_size(G, weight="weight")
assert effective_size["G"] == pytest.approx(4.67, abs=1e-2)
assert effective_size["A"] == pytest.approx(2.50, abs=1e-2)
assert effective_size["C"] == pytest.approx(1, abs=1e-2)
def test_constraint_weighted_undirected(self):
G = self.G.copy()
nx.set_edge_attributes(G, self.G_weights, "weight")
constraint = nx.constraint(G, weight="weight")
assert constraint["G"] == pytest.approx(0.299, abs=1e-3)
assert constraint["A"] == pytest.approx(0.795, abs=1e-3)
assert constraint["C"] == pytest.approx(1, abs=1e-3)
def test_effective_size_weighted_undirected(self):
G = self.G.copy()
nx.set_edge_attributes(G, self.G_weights, "weight")
effective_size = nx.effective_size(G, weight="weight")
assert effective_size["G"] == pytest.approx(5.47, abs=1e-2)
assert effective_size["A"] == pytest.approx(2.47, abs=1e-2)
assert effective_size["C"] == pytest.approx(1, abs=1e-2)
def test_constraint_isolated(self):
G = self.G.copy()
G.add_node(1)
constraint = nx.constraint(G)
assert math.isnan(constraint[1])
def test_effective_size_isolated(self):
G = self.G.copy()
G.add_node(1)
nx.set_edge_attributes(G, self.G_weights, "weight")
effective_size = nx.effective_size(G, weight="weight")
assert math.isnan(effective_size[1])
def test_effective_size_borgatti_isolated(self):
G = self.G.copy()
G.add_node(1)
effective_size = nx.effective_size(G)
assert math.isnan(effective_size[1])
| 5,226 | 1,920 |
"""Tests for the logbook component."""
| 39 | 11 |
"""
Asset Allocation
By Patrick Murrell
Created 6/17/2020
This program that takes a csv positions file from fidelity.com from a Roth IRA account that contains the
investments of SPAXX, FXNAX, FZILX, and FZROX. Since SPAXX is a Money Market fund then it is assumed that the money in
here is not meant to be calculated in the total asset allocation of the account.
Once the csv file is entered its data is scraped using the csv python library and the data used in calculations and
tables that display useful statistics to the user. The user then should enter the amount they want to invest, and then
click the "Calculate Investment Strategy" button to generate a table of values and display the recommended investment
strategy on three buttons. These three buttons tell us whether to buy or sell or hold a dollar amount of each
fund. Clicking these buttons copy their number values to the clip board to make the buying and selling of stocks easier
This is a program written ideally for a single user (my investment strategy), but anyone can use the code in order to
build their own version if they want.
"""
import csv # for the scraping of the csv file
import re # for making sure we just copy the buttons numbers
from PyQt5.QtWidgets import QFileDialog # to use the file browser in order to select a fidelity issued csv file
from PyQt5 import QtCore, QtWidgets, QtGui # to build the applications GUI
import sys # for starting and exiting the application
# noinspection PyBroadException
class UiMainWindow(object):
# decides whether or not we buy/sell/hold the current allocation of a fund
def buy_or_sell(self, percentage, total, current, money_to_invest, key):
s: str # the string we print onto the buttons
target = total * percentage # our ideal dollar amount invested in the fund
actual_vs_target_ratio = target / current # the ratio of the ideal target allocation and the current allocation
# if the fund is 5% outside of its target allocation and we are putting in/taking out new money then we
# adjust the fund
if .95 < actual_vs_target_ratio < 1.05 and int(money_to_invest) == 0:
s = "Looks good for "
else: # buy or sell the exact amount of the fund so we hit the target allocation
amount_to_trade = str(round(abs(current - target), 2))
if actual_vs_target_ratio > 1.0:
s = "Buy "
else:
s = "Sell "
s += "$" + amount_to_trade + " "
self.target_value.append(str(round(target, 2))) # so we can display the target value in the info table
s += self.info_table[1][key] # add the name of the investment to the string
return s # return the text to add to the button
# uses pandas to read from a csv file and add the current balances of investments to list
def scrape_values_from_csv(self):
temp_names = [] # temporarily stores labels of funds
temp_balances = [] # temporarily stores current balances of funds
csv_list = [] # list that stores the contents of the csv file
self.current_balances.clear() # clear the list of balances so we can replace them with the current csv values
try: # import the list from Fidelity using pandas
with open(self.filename, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader: # read the csv file contents into a list
csv_list.append(row)
except: # if this doesn't work we just notify the user by returning a list of -1 (a flag of sorts)
self.current_balances = [-1] # we check this to report that they did not enter a correct csv file
else:
# reset the info table list and set its values to the headings
self.info_table = [['Symbol', 'Current Value', 'Current Allocation', 'Target value', 'Target Allocation']]
for i in range(2, 5): # read through the csv list
temp_balances.append(csv_list[i][6]) # access the current values
self.current_balances.append(float(temp_balances[i - 2].replace('$', ''))) # remove the '$' sign
temp_names.append(csv_list[i][1]) # add the name of the fund to label names
self.info_table.append(temp_names) # add the names and balances lists to the
self.info_table.append(temp_balances) # info table list
# takes values from csv list, money we want to invest, and
def calculate_strategy(self, money_to_invest):
# Fixed Asset Allocation Percentages based on age/year of user (mine is set to every 20XX year, because I was
# born in 1999)
if "2020" in self.filename:
bond_percentage = .2
international_index_percentage = .3
national_index_percentage = .5
elif "2030" in self.filename:
bond_percentage = .3
international_index_percentage = .27
national_index_percentage = .43
elif "2040" in self.filename:
bond_percentage = .4
international_index_percentage = .23
national_index_percentage = .37
elif "2050" in self.filename:
bond_percentage = .5
international_index_percentage = .19
national_index_percentage = .31
elif "2060" in self.filename:
bond_percentage = .6
international_index_percentage = .15
national_index_percentage = .25
elif "2070" in self.filename:
bond_percentage = .7
international_index_percentage = .11
national_index_percentage = .19
elif "2080" in self.filename:
bond_percentage = .8
international_index_percentage = .08
national_index_percentage = .12
elif "2090" in self.filename:
bond_percentage = .9
international_index_percentage = .04
national_index_percentage = .06
else:
bond_percentage = 1.0
international_index_percentage = 0.0
national_index_percentage = 0.0
total_amount = money_to_invest + sum(self.current_balances) # total current amount of money to be invested
self.target_value.clear() # clear the target values list
# updates the buttons to display the recommended asset allocation to the user
self.bonds_button.setText(self._translate("main_window", self.buy_or_sell( # set bonds button text
bond_percentage, total_amount, self.current_balances[0], money_to_invest, 0)))
self.international_button.setText(self._translate("main_window", self.buy_or_sell( # set international button
international_index_percentage, total_amount, self.current_balances[1], money_to_invest, 1)))
self.national_button.setText(self._translate("main_window", self.buy_or_sell( # set national button text
national_index_percentage, total_amount, self.current_balances[2], money_to_invest, 2)))
# add current allocation, ideal fund balances, and ideal allocation of account to info table list
self.info_table.append([str(round(100 * self.current_balances[0] / (total_amount - money_to_invest), 2)) + "%",
str(round(100 * self.current_balances[1] / (total_amount - money_to_invest), 2)) + "%",
str(round(100 * self.current_balances[2] / (total_amount - money_to_invest), 2)) + "%"])
self.info_table.append(self.target_value)
self.info_table.append([str(100 * bond_percentage) + "%", str(100 * international_index_percentage) + "%",
str(100 * national_index_percentage) + "%"])
# this method sets up the ui as well as a couple of variables used accross the program
def __init__(self, main_win):
button_stylesheet = "background-color: #3F3F3F; color: #ffffff" # style sheet
self.info_table = [] # table of investment information and positions we print out to the user
self.current_balances = [-1] # current balances tracks the list of fund balances pulled from the csv file
self.numbers = re.compile(r'\d+(?:\.\d+)?') # regular expression that is used to copy the button text numbers
self.target_value = [] # stores the ideal balance values for each fund
self.filename = '' # name path of csv file is stored here
self._translate = QtCore.QCoreApplication.translate # shortened function name for ease of use
# UI related code generated by PyQt file
main_win.setObjectName("main_window")
main_win.resize(780, 350)
main_win.setAutoFillBackground(True)
main_win.setStyleSheet("background-color: #4a4a4a; color: #ffffff; font: 10pt 'Consolas'")
self.central_widget = QtWidgets.QWidget(main_win)
self.central_widget.setAutoFillBackground(True)
self.central_widget.setObjectName("central_widget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.central_widget)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.main_vlayout = QtWidgets.QVBoxLayout()
self.main_vlayout.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.main_vlayout.setContentsMargins(5, 5, 5, 5)
self.main_vlayout.setSpacing(5)
self.main_vlayout.setObjectName("main_vlayout")
self.entry_hlayout = QtWidgets.QHBoxLayout()
self.entry_hlayout.setContentsMargins(5, 5, 5, 5)
self.entry_hlayout.setSpacing(5)
self.entry_hlayout.setObjectName("entry_hlayout")
self.entry_label = QtWidgets.QLabel(self.central_widget)
self.entry_label.setObjectName("entry_label")
self.entry_hlayout.addWidget(self.entry_label)
self.entry_lineEdit = QtWidgets.QLineEdit(self.central_widget)
self.entry_lineEdit.setObjectName("entry_lineEdit")
self.entry_hlayout.addWidget(self.entry_lineEdit)
spacer_item = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.entry_hlayout.addItem(spacer_item)
self.main_vlayout.addLayout(self.entry_hlayout)
self.two_button_horizontal = QtWidgets.QHBoxLayout()
self.two_button_horizontal.setContentsMargins(5, 5, 5, 5)
self.two_button_horizontal.setSpacing(5)
self.two_button_horizontal.setObjectName("two_button_horizontal")
self.csv_button = QtWidgets.QPushButton(self.central_widget)
self.csv_button.setObjectName("csv_button")
self.csv_button.setStyleSheet(button_stylesheet)
self.csv_button.clicked.connect(self.open_csv) # when csv button is clicked run the open csv method
self.two_button_horizontal.addWidget(self.csv_button)
self.calculate_button = QtWidgets.QPushButton(self.central_widget)
self.calculate_button.setStyleSheet(button_stylesheet)
self.calculate_button.setObjectName("calculate_button")
self.calculate_button.clicked.connect(self.calculate) # when the calculate button is clicked run calculate()
self.two_button_horizontal.addWidget(self.calculate_button)
self.main_vlayout.addLayout(self.two_button_horizontal)
self.error_vlayout = QtWidgets.QVBoxLayout()
self.error_vlayout.setContentsMargins(5, 5, 5, 5)
self.error_vlayout.setSpacing(5)
self.error_vlayout.setObjectName("error_vlayout")
self.error_label = QtWidgets.QLabel(self.central_widget)
self.error_label.setLayoutDirection(QtCore.Qt.LeftToRight)
self.error_label.setFrameShape(QtWidgets.QFrame.NoFrame)
self.error_label.setFrameShadow(QtWidgets.QFrame.Plain)
self.error_label.setAlignment(QtCore.Qt.AlignCenter)
self.error_label.setObjectName("error_label")
self.info_label = QtWidgets.QLabel(self.central_widget)
self.info_label.setLayoutDirection(QtCore.Qt.LeftToRight)
self.info_label.setFrameShape(QtWidgets.QFrame.NoFrame)
self.info_label.setFrameShadow(QtWidgets.QFrame.Plain)
self.info_label.setAlignment(QtCore.Qt.AlignCenter)
self.info_label.setObjectName("error_label")
self.error_vlayout.addWidget(self.info_label)
self.error_vlayout.addWidget(self.error_label)
self.main_vlayout.addLayout(self.error_vlayout)
self.three_button_horizontal = QtWidgets.QHBoxLayout()
self.three_button_horizontal.setContentsMargins(5, 5, 5, 5)
self.three_button_horizontal.setSpacing(5)
self.three_button_horizontal.setObjectName("three_button_horizontal")
self.bonds_button = QtWidgets.QPushButton(self.central_widget)
self.bonds_button.setObjectName("bonds_button")
self.bonds_button.setStyleSheet(button_stylesheet)
self.bonds_button.clicked.connect(self.copy_bond_number)
self.three_button_horizontal.addWidget(self.bonds_button)
self.international_button = QtWidgets.QPushButton(self.central_widget)
self.international_button.setObjectName("international_button")
self.international_button.setStyleSheet(button_stylesheet)
self.international_button.clicked.connect(self.copy_international_number)
self.three_button_horizontal.addWidget(self.international_button)
self.national_button = QtWidgets.QPushButton(self.central_widget)
self.national_button.setObjectName("national_button")
self.national_button.setStyleSheet(button_stylesheet)
self.national_button.clicked.connect(self.copy_national_number)
self.three_button_horizontal.addWidget(self.national_button)
self.main_vlayout.addLayout(self.three_button_horizontal)
self.verticalLayout_2.addLayout(self.main_vlayout)
main_win.setCentralWidget(self.central_widget)
self.menubar = QtWidgets.QMenuBar(main_win)
self.menubar.setGeometry(QtCore.QRect(0, 0, 884, 21))
self.menubar.setObjectName("menubar")
main_win.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(main_win)
self.statusbar.setObjectName("statusbar")
main_win.setStatusBar(self.statusbar)
self.reanimate_ui(main_win)
QtCore.QMetaObject.connectSlotsByName(main_win)
# Ui function that sets initial ui text
def reanimate_ui(self, main_w):
main_w.setWindowTitle(self._translate("main_window", "Asset Allocation"))
self.entry_label.setText(self._translate("main_window", "The amount you want to invest:"))
self.csv_button.setText(self._translate("main_window", "Browse For CSV"))
self.calculate_button.setText(self._translate("main_window", "Calculate Investment Strategy"))
self.error_label.setText(self._translate("main_window", ""))
self.info_label.setText(self._translate("main_window", ""))
self.bonds_button.setText(self._translate("main_window", ""))
self.international_button.setText(self._translate("main_window", ""))
self.national_button.setText(self._translate("main_window", ""))
# creates a file explorer dialog to select csv. checks and reports if a valid csv was selected
def open_csv(self):
# open and select file from csv button
filename_list = list(QFileDialog.getOpenFileName(main_window, 'Open file', "/", "csv files (*.csv)"))
self.filename = str(filename_list[0])
self.scrape_values_from_csv()
if self.current_balances == [-1]: # if a csv file is not detected
self.csv_file_error() # report an error to the user
else:
self.error_label.setText(self._translate("main_window", self.filename)) # show the file name to the user
# check to make sure the user entered either a number or nothing, also entered a csv, then run calculate_strategy()
def calculate(self):
try:
amount_to_invest = float(self.entry_lineEdit.text()) # check to see if the user entered a proper number
except:
if self.entry_lineEdit.text() == '': # if the user enters nothing assume they are investing $0.00
amount_to_invest = 0.00
else: # since the user did not enter a number throw an error and exit the function
self.error_label.setText(self._translate("main_window", "You did not enter a valid amount"))
return
if self.current_balances != [-1]: # if the user entered a valid csv
self.calculate_strategy(amount_to_invest) # calculate our strategy and fill the rest of the info table list
self.error_label.setText(self._translate("main_window", "Strategy Calculated"))
# print our info table list onto the screen in the form of a table
s = 'Values From CSV: \n\n|' # create the info table in a string called s
for i in range(len(self.info_table[0])):
s += "{:20}|".format((str(self.info_table[0][i]).ljust(15)))
s += "\n" + "-" * int((len(s) * .85))
for i in range(len(self.info_table[1])):
s += "\n|"
for j in range(1, len(self.info_table)):
s += "{:20}|".format((str(self.info_table[j][i]).ljust(15)))
s += '\n'
self.info_label.setText(self._translate("main_window", s)) # set the info label to the info table
for i in range(3): # remove last three values of info table list so they do not overlap with themselves
self.info_table.remove(self.info_table[len(self.info_table) - 1])
else:
self.csv_file_error() # report an error to the user
# methods that copy the text of how much to buy/sell/hold from button onto clipboard
def copy_bond_number(self):
cb.setText(''.join(self.numbers.findall(self.bonds_button.text())), mode=cb.Clipboard)
def copy_national_number(self):
cb.setText(''.join(self.numbers.findall(self.national_button.text())), mode=cb.Clipboard)
def copy_international_number(self):
cb.setText(''.join(self.numbers.findall(self.international_button.text())), mode=cb.Clipboard)
# report an error if a csv file is not detected (when self.current_values == [-1])
def csv_file_error(self):
self.error_label.setText(self._translate("main_window", "you did not enter a csv file"))
# main function that starts and closes the app
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
main_window = QtWidgets.QMainWindow()
cb = QtWidgets.QApplication.clipboard()
cb.clear(mode=cb.Clipboard)
ui = UiMainWindow(main_window)
main_window.show()
sys.exit(app.exec_())
| 19,069 | 5,778 |
#!/usr/bin/env python
__author__ = 'Kurohashi-Kawahara Lab, Kyoto Univ.'
__email__ = 'contact@nlp.ist.i.kyoto-u.ac.jp'
__copyright__ = ''
__license__ = 'See COPYING'
import os
from setuptools import setup, find_packages
about = {}
here = os.path.abspath(os.path.dirname(__file__))
exec(open(os.path.join(here, 'pyknp', '__version__.py')).read(), about)
with open('README.md', encoding='utf8') as f:
long_description = f.read()
setup(
name='pyknp',
version=about['__version__'],
maintainer=__author__,
maintainer_email=__email__,
author=__author__,
author_email=__email__,
description='Python module for JUMAN/KNP.',
license=__license__,
url='https://github.com/ku-nlp/pyknp',
long_description=long_description,
long_description_content_type='text/markdown',
scripts=['pyknp/scripts/knp-drawtree', ],
packages=find_packages(),
install_requires=['six'],
)
| 919 | 340 |
import collections
from supriya import CalculationRate
from supriya.ugens.DUGen import DUGen
class Dreset(DUGen):
"""
Resets demand-rate UGens.
::
>>> source = supriya.ugens.Dseries(start=0, step=2)
>>> dreset = supriya.ugens.Dreset(
... reset=0,
... source=source,
... )
>>> dreset
Dreset()
"""
### CLASS VARIABLES ###
_ordered_input_names = collections.OrderedDict([("source", None), ("reset", 0)])
_valid_calculation_rates = (CalculationRate.DEMAND,)
| 559 | 195 |
from database.models import Team, UserProfile
from _main_.utils.massenergize_errors import MassEnergizeAPIError, InvalidResourceError, ServerError, CustomMassenergizeError
from _main_.utils.massenergize_response import MassenergizeResponse
from _main_.utils.context import Context
class TeamStore:
def __init__(self):
self.name = "Team Store/DB"
def get_team_info(self, team_id) -> (dict, MassEnergizeAPIError):
team = Team.objects.filter(id=team_id)
if not team:
return None, InvalidResourceError()
return team, None
def list_teams(self, community_id) -> (list, MassEnergizeAPIError):
teams = Team.objects.filter(community__id=community_id)
if not teams:
return [], None
return teams, None
def create_team(self, args) -> (dict, MassEnergizeAPIError):
try:
new_team = Team.create(**args)
new_team.save()
return new_team, None
except Exception:
return None, ServerError()
def update_team(self, team_id, args) -> (dict, MassEnergizeAPIError):
team = Team.objects.filter(id=team_id)
if not team:
return None, InvalidResourceError()
team.update(**args)
return team, None
def delete_team(self, team_id) -> (dict, MassEnergizeAPIError):
teams = Team.objects.filter(id=team_id)
if not teams:
return None, InvalidResourceError()
def list_teams_for_community_admin(self, community_id) -> (list, MassEnergizeAPIError):
teams = Team.objects.filter(community__id = community_id)
return teams, None
def list_teams_for_super_admin(self):
try:
teams = Team.objects.all()
return teams, None
except Exception as e:
print(e)
return None, CustomMassenergizeError(str(e)) | 1,729 | 559 |
"""
https://oj.leetcode.com/problems/valid-parentheses/
Given a string containing just the characters '(', ')', '{', '}', '[' and ']', determine if the input string is valid.
The brackets must close in the correct order, "()" and "()[]{}" are all valid but "(]" and "([)]" are not.
"""
class Solution:
# @return a boolean
def isValid(self, s):
stack = []
for x in s:
if x in '({[':
stack.append(x)
else:
try:
y = stack.pop()
except IndexError:
return False
if not ((x == '(' and y == ')') or (x == '[' and y == ']') or (x == '{' and y == '}') or
(y == '(' and x == ')') or (y == '[' and x == ']') or (y == '{' and x == '}')):
return False
return stack == []
if __name__ == '__main__':
s = Solution()
assert s.isValid('()')
assert s.isValid('[]')
assert not s.isValid('[')
assert not s.isValid('}')
assert not s.isValid('([')
assert s.isValid('([]{})[]')
assert not s.isValid('([)]')
| 1,125 | 356 |
from random import random
def joga_moeda():
if random() > 0.5:
return "Coroa"
else:
return "Cara"
print (joga_moeda()) | 142 | 52 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-30 16:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0005_frequentlysearched_latestsearches_soldvehicles_transaction'),
]
operations = [
migrations.AlterField(
model_name='user',
name='role',
field=models.IntegerField(null=True),
),
]
| 483 | 166 |
import pytest
from model_bakery import baker
pytestmark = pytest.mark.django_db
def test_get_global_settings(client_anonymous):
settings = baker.make_recipe("settings.global_setting")
response = client_anonymous.get("/global_settings")
assert response.json() == {"productVat": settings.product_vat}
| 315 | 97 |
import click
from dotpyle.commands.add.dotfile import dotfile
@click.group()
def add():
"""
This command will take KEY and ... DOTFILE
"""
add.add_command(dotfile)
| 181 | 62 |
import sys
import os
from common.invoker import newRequest
from common.invoker import request
from common.invoker import threadize
port = 8080
if len(sys.argv) > 1:
port = int(sys.argv[1])
workload = 10000
req = newRequest(
"GET",
"http://localhost:{0}/".format(port),
headers = {
"X-FUNCTION": "hello"
}
)
base = os.path.dirname(__file__)
if base == '':
base = '.'
entries = threadize(workload, req, num = 12, reuse = False)
fileObject = open(base + '/data/response.txt', 'w')
for i in entries:
fileObject.write(",".join(map(lambda field: str(field), i)))
fileObject.write('\n')
fileObject.close()
| 646 | 233 |
import sys
import numpy as np
import h5py
import random
import os
from subprocess import check_output
# 1. h5 i/o
def readh5(filename, datasetname):
data=np.array(h5py.File(filename,'r')[datasetname])
return data
def writeh5(filename, datasetname, dtarray):
# reduce redundant
fid=h5py.File(filename,'w')
ds = fid.create_dataset(datasetname, dtarray.shape, compression="gzip", dtype=dtarray.dtype)
ds[:] = dtarray
fid.close()
def readh5k(filename, datasetname):
fid=h5py.File(filename)
data={}
for kk in datasetname:
data[kk]=array(fid[kk])
fid.close()
return data
def writeh5k(filename, datasetname, dtarray):
fid=h5py.File(filename,'w')
for kk in datasetname:
ds = fid.create_dataset(kk, dtarray[kk].shape, compression="gzip", dtype=dtarray[kk].dtype)
ds[:] = dtarray[kk]
fid.close()
def resizeh5(path_in, path_out, dataset, ratio=(0.5,0.5), interp=2, offset=[0,0,0]):
from scipy.ndimage.interpolation import zoom
# for half-res
im = h5py.File( path_in, 'r')[ dataset ][:]
shape = im.shape
if len(shape)==3:
im_out = np.zeros((shape[0]-2*offset[0], int(np.ceil(shape[1]*ratio[0])), int(np.ceil(shape[2]*ratio[1]))), dtype=im.dtype)
for i in xrange(shape[0]-2*offset[0]):
im_out[i,...] = zoom( im[i+offset[0],...], zoom=ratio, order=interp)
if offset[1]!=0:
im_out=im_out[:,offset[1]:-offset[1],offset[2]:-offset[2]]
elif len(shape)==4:
im_out = np.zeros((shape[0]-2*offset[0], shape[1], int(shape[2]*ratio[0]), int(shape[3]*ratio[1])), dtype=im.dtype)
for i in xrange(shape[0]-2*offset[0]):
for j in xrange(shape[1]):
im_out[i,j,...] = zoom( im[i+offset[0],j,...], ratio, order=interp)
if offset[1]!=0:
im_out=im_out[:,offset[1]:-offset[1],offset[2]:-offset[2],offset[3]:-offset[3]]
if path_out is None:
return im_out
writeh5(path_out, dataset, im_out)
def writetxt(filename, dtarray):
a = open(filename,'w')
a.write(dtarray)
a.close()
# 2. segmentation wrapper
def segToAffinity(seg):
from ..lib import malis_core as malisL
nhood = malisL.mknhood3d()
return malisL.seg_to_affgraph(seg,nhood)
def bwlabel(mat):
ran = [int(mat.min()),int(mat.max())];
out = np.zeros(ran[1]-ran[0]+1);
for i in range(ran[0],ran[1]+1):
out[i] = np.count_nonzero(mat==i)
return out
def genSegMalis(gg3,iter_num): # given input seg map, widen the seg border
from scipy.ndimage import morphology as skmorph
#from skimage import morphology as skmorph
gg3_dz = np.zeros(gg3.shape).astype(np.uint32)
gg3_dz[1:,:,:] = (np.diff(gg3,axis=0))
gg3_dy = np.zeros(gg3.shape).astype(np.uint32)
gg3_dy[:,1:,:] = (np.diff(gg3,axis=1))
gg3_dx = np.zeros(gg3.shape).astype(np.uint32)
gg3_dx[:,:,1:] = (np.diff(gg3,axis=2))
gg3g = ((gg3_dx+gg3_dy)>0)
#stel=np.array([[1, 1],[1,1]]).astype(bool)
#stel=np.array([[0, 1, 0],[1,1,1], [0,1,0]]).astype(bool)
stel=np.array([[1, 1, 1],[1,1,1], [1,1,1]]).astype(bool)
#stel=np.array([[1,1,1,1],[1, 1, 1, 1],[1,1,1,1],[1,1,1,1]]).astype(bool)
gg3gd=np.zeros(gg3g.shape)
for i in range(gg3g.shape[0]):
gg3gd[i,:,:]=skmorph.binary_dilation(gg3g[i,:,:],structure=stel,iterations=iter_num)
out = gg3.copy()
out[gg3gd==1]=0
#out[0,:,:]=0 # for malis
return out
# 3. evaluation
"""
def runBash(cmd):
fn = '/tmp/tmp_'+str(random.random())[2:]+'.sh'
print('tmp bash file:',fn)
writetxt(fn, cmd)
os.chmod(fn, 0755)
out = check_output([fn])
os.remove(fn)
print(out)
"""
| 3,679 | 1,567 |
#!/usr/bin/env python
# This example demonstrates how to extract "computational planes" from
# a structured dataset. Structured data has a natural, logical
# coordinate system based on i-j-k indices. Specifying imin,imax,
# jmin,jmax, kmin,kmax pairs can indicate a point, line, plane, or
# volume of data.
#
# In this example, we extract three planes and warp them using scalar
# values in the direction of the local normal at each point. This
# gives a sort of "velocity profile" that indicates the nature of the
# flow.
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Here we read data from a annular combustor. A combustor burns fuel
# and air in a gas turbine (e.g., a jet engine) and the hot gas
# eventually makes its way to the turbine section.
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName(VTK_DATA_ROOT + "/Data/combxyz.bin")
pl3d.SetQFileName(VTK_DATA_ROOT + "/Data/combq.bin")
pl3d.SetScalarFunctionNumber(100)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
pl3d_output = pl3d.GetOutput().GetBlock(0)
# Planes are specified using a imin,imax, jmin,jmax, kmin,kmax
# coordinate specification. Min and max i,j,k values are clamped to 0
# and maximum value.
plane = vtk.vtkStructuredGridGeometryFilter()
plane.SetInputData(pl3d_output)
plane.SetExtent(10, 10, 1, 100, 1, 100)
plane2 = vtk.vtkStructuredGridGeometryFilter()
plane2.SetInputData(pl3d_output)
plane2.SetExtent(30, 30, 1, 100, 1, 100)
plane3 = vtk.vtkStructuredGridGeometryFilter()
plane3.SetInputData(pl3d_output)
plane3.SetExtent(45, 45, 1, 100, 1, 100)
# We use an append filter because that way we can do the warping,
# etc. just using a single pipeline and actor.
appendF = vtk.vtkAppendPolyData()
appendF.AddInputConnection(plane.GetOutputPort())
appendF.AddInputConnection(plane2.GetOutputPort())
appendF.AddInputConnection(plane3.GetOutputPort())
warp = vtk.vtkWarpScalar()
warp.SetInputConnection(appendF.GetOutputPort())
warp.UseNormalOn()
warp.SetNormal(1.0, 0.0, 0.0)
warp.SetScaleFactor(2.5)
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(warp.GetOutputPort())
normals.SetFeatureAngle(60)
planeMapper = vtk.vtkPolyDataMapper()
planeMapper.SetInputConnection(normals.GetOutputPort())
planeMapper.SetScalarRange(pl3d_output.GetScalarRange())
planeActor = vtk.vtkActor()
planeActor.SetMapper(planeMapper)
# The outline provides context for the data and the planes.
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputData(pl3d_output)
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(0, 0, 0)
# Create the usual graphics stuff.
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.AddActor(outlineActor)
ren.AddActor(planeActor)
ren.SetBackground(1, 1, 1)
renWin.SetSize(500, 500)
# Create an initial view.
cam1 = ren.GetActiveCamera()
cam1.SetClippingRange(3.95297, 50)
cam1.SetFocalPoint(8.88908, 0.595038, 29.3342)
cam1.SetPosition(-12.3332, 31.7479, 41.2387)
cam1.SetViewUp(0.060772, -0.319905, 0.945498)
iren.Initialize()
renWin.Render()
iren.Start()
| 3,277 | 1,277 |
from diagrams import Cluster, Diagram, Edge
from diagrams.aws.compute import EC2
from diagrams.aws.database import RDS
from diagrams.aws.integration import SQS
from diagrams.aws.network import ELB
from diagrams.aws.storage import S3
from diagrams.onprem.ci import Jenkins
from diagrams.onprem.client import Client, User, Users
from diagrams.onprem.compute import Server
from diagrams.onprem.container import Docker
from diagrams.onprem.database import PostgreSQL
from diagrams.onprem.monitoring import Grafana, Prometheus
from diagrams.onprem.network import Internet
from diagrams.programming.framework import Spring, React
graph_attr = {
"fontsize": "20",
"bgcolor": "white" #transparent
}
with Diagram("Application Architecture", graph_attr=graph_attr, outformat="png", filename="application_architecture"):
ELB("lb") >> EC2("web") >> RDS("userdb") >> S3("store")
ELB("lb") >> EC2("web") >> RDS("userdb") << EC2("stat")
(ELB("lb") >> EC2("web")) - EC2("web") >> RDS("userdb")
with Cluster("Application Context"):
app = EC2("Spring Boot")
ELB("lb") >> app
metrics = Prometheus("metric")
metrics << Edge(color="firebrick", style="dashed") << Grafana("monitoring")
Jenkins("CI")
client = Client("A")
client >> User("B") >> Users("S")
client >> PostgreSQL("Database")
client >> Internet("Remote API")
client >> Docker("Docker")
client >> Server("Server")
client >> SQS("Sync Books")
client >> Spring("Backend")
client >> React("React")
| 1,482 | 477 |
from django.db import models
class InputFile(models.Model):
input = models.FileField(upload_to='input/%Y/%m/%d')
next_one = models.FileField(upload_to='documents/%Y/%m/%d')
name=models.CharField(max_length=100)
privacy=models.BooleanField(default=False)
| 259 | 93 |
from __future__ import absolute_import, division, print_function
from builtins import * # @UnusedWildImport
from mcculw import ul
from mcculw.ul import ULError
from examples.console import util
from examples.props.ai import AnalogInputProps
use_device_detection = True
def run_example():
board_num = 0
if use_device_detection:
ul.ignore_instacal()
if not util.config_first_detected_device(board_num):
print("Could not find device.")
return
channel = 0
ai_props = AnalogInputProps(board_num)
if ai_props.num_ai_chans < 1:
util.print_unsupported_example(board_num)
return
ai_range = ai_props.available_ranges[0]
try:
# Get a value from the device
if ai_props.resolution <= 16:
# Use the a_in method for devices with a resolution <= 16
value = ul.a_in(board_num, channel, ai_range)
# Convert the raw value to engineering units
eng_units_value = ul.to_eng_units(board_num, ai_range, value)
else:
# Use the a_in_32 method for devices with a resolution > 16
# (optional parameter omitted)
value = ul.a_in_32(board_num, channel, ai_range)
# Convert the raw value to engineering units
eng_units_value = ul.to_eng_units_32(board_num, ai_range, value)
# Display the raw value
print("Raw Value: " + str(value))
# Display the engineering value
print("Engineering Value: " + '{:.3f}'.format(eng_units_value))
except ULError as e:
util.print_ul_error(e)
finally:
if use_device_detection:
ul.release_daq_device(board_num)
if __name__ == '__main__':
run_example()
| 1,751 | 554 |
from oocsi import OOCSI
from NAO_Speak import NAO_Speak # (file name followed by class name)
import unidecode
#################################
IP = "IP_OF_PEPPER_ROBOT"
text = ""
my_nao = NAO_Speak(IP, 9559)
##################################
def receiveEvent(sender, recipient, event):
print('from ', sender, ' -> ', event)
# this will convert unicode string to plain string
msg = str(event['message'])
sender = str(sender)
x, y = sender.split('_')
if x == 'webclient':
my_nao.say_text(msg)
if __name__ == "__main__":
#o = OOCSI('abc', "oocsi.id.tue.nl", callback=receiveEvent)
o = OOCSI('pepper_receiver', 'oocsi.id.tue.nl')
o.subscribe('__test123__', receiveEvent)
| 721 | 263 |
# encoding:utf-8
import asyncio
import os
import mimetypes
from urllib import parse
response = {
# 200: [b'HTTP/1.0 200 OK\r\n', # 正常的response
# b'Connection: close\r\n',
# b'Content-Type:text/html; charset=utf-8\r\n',
# b'\r\n'],
404: [b'HTTP/1.0 404 Not Found\r\n', # 请求文件不存在的response
b'Connection: close\r\n',
b'Content-Type:text/html; charset=utf-8\r\n',
b'\r\n',
b'<html><body>404 Not Found<body></html>\r\n',
b'\r\n'],
405: [b'HTTP/1.0 405 Method Not Allowed\r\n', # 请求为GET/HEAD之外的request时的response
b'Connection: close\r\n',
b'Content-Type:text/html; charset=utf-8\r\n',
b'\r\n',
b'<html><body>405 Method Not Allowed<body></html>\r\n',
b'\r\n'],
416: [b'HTTP/1.0 416 Requested Range Not Satisfiable\r\n', # Range Header error
b'Connection: close\r\n',
b'Content-Type:text/html; charset=utf-8\r\n',
b'\r\n',
b'<html><body>416 Requested Range Not Satisfiable<body></html>\r\n',
b'\r\n']
}
# get mime by mimetypes.guess_type
def get_mime(path):
mime = mimetypes.guess_type(path)[0] # 返回文件类型,由浏览器决定怎么打开,或者下载
if mime is None: # 如果浏览器不支持打开,就下载
mime = 'application/octet-stream'
return mime
# seperate the raw cookie info to get the location
def get_cookie(raw_cookie):
for content in raw_cookie:
cookie = content.strip('\r\n').split(' ')
for sub_cookie in cookie:
if 'loc=' in sub_cookie:
return sub_cookie.strip(';').replace('path=/', '')
async def dispatch(reader, writer):
header = {}
while True:
data = await reader.readline()
if data == b'\r\n':
break
if data == b'':
break
message = data.decode().split(' ')
# seperate the header and store in the dictionary
if message[0] == 'GET' or message[0] == 'HEAD':
header['METHOD'] = message[0]
header['PATH'] = message[1]
if message[0] == 'Range:':
header['RANGE'] = message[1]
if message[0] == 'Cookie:':
header['COOKIE'] = message
if message[0] == 'Referer:':
header['REFERER'] = message[1]
if message[0] == 'Host:':
header['HOST'] = message[1]
"""test start"""
print('----------header')
print(header)
print('----------header')
"""test end"""
# Handle the header
r_head = []
r = []
if 'METHOD' not in header:
# if the request is not GET or HEAD
writer.writelines(response[405])
await writer.drain()
writer.close()
return
cookie = ''
if 'COOKIE' in header:
# get the location
cookie = get_cookie(header['COOKIE'])
"""test start"""
# print('----------cookie')
# print(cookie)
# print('----------cookie')
"""test end"""
# set http status
if 'RANGE' in header:
r_head.append(b'HTTP/1.0 206 Partial Content\r\n')
else:
if header['PATH'] == '/' and 'REFERER' not in header and 'COOKIE' in header and \
'loc=' in cookie and cookie != 'loc=/':
r_head.append(b'HTTP/1.0 302 Found\r\n')
else:
r_head.append(b'HTTP/1.0 200 OK\r\n')
# make the 302 header
if header['PATH'] == '/' and 'REFERER' not in header and 'COOKIE' in header and \
'loc=' in cookie and cookie != 'loc=/':
cookie_loc = cookie[4:]
header['HOST'] = header['HOST'].strip('\r\n')
url = 'http://' + header['HOST'] + cookie_loc
"""test start"""
print('----------url')
print(url)
print('----------url')
"""test end"""
r_head.append('Location: {}\r\n\r\n'.format(url).encode('utf-8'))
# set max-age for a day
r_head.append('Cache-control: private; max-age={}\r\n\r\n'.format(86400).encode('utf-8'))
print(r_head)
writer.writelines(r_head)
await writer.drain()
writer.close()
return
# if header['PATH'] == 'favicon.ico': # Chrome会多发一个这样的包,忽略
# pass
# else:
path = './' + header['PATH']
try: # url解码
path = parse.unquote(path, errors='surrogatepass')
except UnicodeDecodeError:
path = parse.unquote(path)
if os.path.isfile(path): # 判断是否为文件
file_size = int(os.path.getsize(path))
start_index = 0
end_index = file_size - 1
length = file_size
if 'RANGE' in header:
# divide the piece of file
start_index, end_index = header['RANGE'].strip('bytes=').split('-')
# -
if start_index == '' and end_index == '' or end_index == '\r\n':
start_index, end_index = 0, file_size-1
# x-
elif end_index == '' or end_index == '\r\n':
start_index, end_index = int(start_index), file_size-1
# -x
elif start_index == '':
end_index = int(end_index)
start_index = file_size - end_index
end_index = file_size - 1
# x-x
start_index = int(start_index)
end_index = int(end_index)
length = end_index - start_index + 1
if start_index < 0 or end_index >= file_size or start_index > end_index:
writer.writelines(response[416])
await writer.drain()
writer.close()
return
r_head.append(
'Content-Range: bytes {}-{}/{}\r\n'.format(start_index, end_index, file_size).encode('utf-8'))
# guess the type
mime = get_mime(path)
r_head.append('Content-Type: {}\r\n'.format(mime).encode('utf-8'))
r_head.append('Content-Length: {}\r\n'.format(length).encode('utf-8'))
r_head.append(b'Connection: close\r\n')
r_head.append(b'\r\n')
writer.writelines(r_head)
if header['METHOD'] == 'GET':
file = open(path, 'rb')
file.seek(start_index)
writer.write(file.read(length))
file.close()
elif os.path.isdir(path): # 判断是否为文件夹
r_head.append(b'Connection: close\r\n')
r_head.append(b'Content-Type:text/html; charset=utf-8\r\n')
r_head.append('Set-Cookie: loc={};path=/\r\n'.format(header['PATH']).encode('utf-8'))
r_head.append(b'\r\n')
if header['METHOD'] == 'HEAD':
writer.writelines(r_head)
elif header['METHOD'] == 'GET':
writer.writelines(r_head)
file_list = os.listdir(path) # 获取文件夹内文件名
r.append(b'<html>')
r.append(b'<head><title>Index of %s</title></head>' %
(path.encode('utf-8')))
r.append(b'<body bgcolor="white">')
r.append(b'<h1>Index of %s</h1><hr>' %
(path.encode('utf-8')))
r.append(b'<ul>')
if path != './':
r.append(b'<li><a href=".."> ../ </a></li>')
for name in file_list:
if os.path.isdir(path + name + '/'):
name = name + '/'
r.append(b'<li><a href="%s"> %s </a></li>' %
(name.encode('utf-8'), name.encode('utf-8')))
r.append(b'</ul>')
r.append(b'</body>')
r.append(b'</html>')
writer.writelines(r)
else:
writer.writelines(response[404])
await writer.drain()
writer.close()
if __name__ == '__main__':
loop = asyncio.get_event_loop() # 创建事件循环
coro = asyncio.start_server(
dispatch, '127.0.0.1', 8080, loop=loop) # 开启一个新的协程
server = loop.run_until_complete(coro) # 将协程注册到事件循环
# Serve requests until Ctrl+C is pressed
print('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close() # 关闭服务
# 保持等待,直到数据流关闭。保持等待,直到底层连接被关闭,应该在close()后调用此方法。
loop.run_until_complete(server.wait_closed())
loop.close()
| 8,161 | 2,906 |
from core.object import Object
style= Object({
'btn': {
'bg': '#123321',
'fg': '#dddddd',
'font': ('Ariel', 14, 'bold')
}
}) | 157 | 63 |
class Test:
def __init__(self):
self.a=10
self.b=20
def display(self):
print(self.a)
print(self.b)
t=Test()
t.display()
print(t.a,t.b) | 193 | 84 |
from physics import *
s1, s2 = Speed(9, 3, unit='cm/s', extra_units=['cm/h']), Speed(9, 2, unit='cm/h', extra_units=['cm/h'])
print(s2.distance.unit) | 151 | 71 |
import random
import time
from book_book.books_directory import books_requests_queue
from book_book.rental_request import RentalRequest
def rent_a_book(author: str, title: str, renter_name: str) -> None:
time.sleep(random.randint(0, 1))
rental_request = RentalRequest(author=author, title=title, renter_name=renter_name)
books_requests_queue.put(rental_request)
| 378 | 126 |
emk.module("java")
| 19 | 9 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rally.plugins.openstack.scenarios.glance.images import GlanceBasic
from rally.plugins.openstack.scenarios.neutron import utils as neutron_utils
from rally.plugins.openstack.scenarios.nova import utils as nova_utils
from rally.task import scenario
from rally.task import types
from rally.task import validation
from rally import consts
@types.convert(flavor={"type": "nova_flavor"}, image_location={"type": "path_or_url"})
@validation.add("required_services",
services=[consts.Service.GLANCE, consts.Service.NEUTRON, consts.Service.NOVA])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["glance", "neutron", "nova"]},
name="BrowbeatPlugin.glance_create_boot_delete",
platform="openstack")
class GlanceCreateBootDelete(GlanceBasic, neutron_utils.NeutronScenario, nova_utils.NovaScenario):
def run(self, container_format, image_location, disk_format, flavor,
network_create_args=None, subnet_create_args=None, **kwargs):
image = self.glance.create_image(
container_format=container_format, image_location=image_location,
disk_format=disk_format)
net = self._create_network(network_create_args or {})
self._create_subnet(net, subnet_create_args or {})
kwargs['nics'] = [{'net-id': net['network']['id']}]
server = self._boot_server(image.id, flavor, **kwargs)
self._delete_server(server)
self.glance.delete_image(image.id)
| 2,111 | 641 |
from typing import List
import dace
from dace import subsets
from dace import memlet
from dace import dtypes
from dace.sdfg.sdfg import InterstateEdge, SDFG
from dace.sdfg.state import SDFGState
from dace.transformation.interstate.sdfg_nesting import NestSDFG
from dace.transformation.optimizer import Optimizer
from dace.transformation.interstate import InlineSDFG, FPGATransformSDFG
from dace.transformation.dataflow import StripMining
from dace.sdfg import graph, nodes, propagation, utils
from dace.libraries.blas.nodes import dot
from hbm_transform import HbmTransform
from hbm_bank_split import HbmBankSplit
from hbm_transform import set_shape
from hbm_transform import transform_sdfg_for_hbm
from hbm_transform import all_innermost_edges
from helper import *
######## Simple base versions of the pure blas applications without HBM use
def simple_vadd_sdfg(N, vec_len=16, tile_size=4096):
alpha = dace.symbol("alpha", dtype=dace.float32)
@dace.program
def axpy(x: dace.vector(dace.float32, vec_len)[N/vec_len],
y: dace.vector(dace.float32, vec_len)[N/vec_len],
z: dace.vector(dace.float32, vec_len)[N/vec_len]):
for i in dace.map[0:N/vec_len]:
with dace.tasklet:
xin << x[i]
yin << y[i]
zout >> z[i]
zout = xin + yin * alpha
sdfg = axpy.to_sdfg()
sdfg.apply_strict_transformations()
sdfg.apply_transformations(StripMining, {"tile_size": tile_size, "divides_evenly": True})
map = get_first_node(sdfg.start_state, lambda x: isinstance(x, nodes.MapEntry) and x.map.params[0] == "i")
map.map.schedule = dtypes.ScheduleType.FPGA_Device
return sdfg
def simple_dot_sdfg(N, tile_size=8192):
sdfg: SDFG = SDFG("dot")
state = sdfg.add_state()
sdfg.add_array("x", [N/8], dace.vector(dace.float32, 8), dtypes.StorageType.FPGA_Global)
sdfg.add_array("y", [N/8], dace.vector(dace.float32, 8), dtypes.StorageType.FPGA_Global)
sdfg.add_array("result", [1], dace.float32, dtypes.StorageType.FPGA_Global)
lib_node = dot.Dot("dot")
state.add_node(lib_node)
read_x = state.add_read("x")
read_y = state.add_read("y")
write_result = state.add_write("result")
state.add_edge(read_x, None, lib_node, "_x", memlet.Memlet("x"))
state.add_edge(read_y, None, lib_node, "_y", memlet.Memlet("y"))
state.add_edge(lib_node, "_result", write_result, None, memlet.Memlet(f"result"))
lib_node.implementation = "FPGA_PartialSums"
lib_node.expand(sdfg, state, partial_width=64, n=N)
sdfg.arrays["x"].storage = dtypes.StorageType.Default
sdfg.arrays["y"].storage = dtypes.StorageType.Default
sdfg.arrays["result"].storage = dtypes.StorageType.Default
strip_map = get_first_node(state, lambda x: isinstance(x, nodes.MapEntry) and x.label == "stream")
for nsdfg in sdfg.all_sdfgs_recursive():
if nsdfg.states()[0].label == "stream":
StripMining.apply_to(nsdfg, {"tile_size": tile_size, "divides_evenly": True}, _map_entry=strip_map)
state = nsdfg.start_state
tile_map = get_first_node(state, lambda x: isinstance(x, nodes.MapEntry) and x.label == "stream"
and x.map.params[0] == "i")
tile_map.map.schedule = dtypes.ScheduleType.FPGA_Device
break
return sdfg
######### On Device HBM-implementations of pure blas
def hbm_axpy_sdfg(banks_per_input: int):
N = dace.symbol("N")
sdfg = simple_vadd_sdfg(N)
map = get_first_node(sdfg.start_state, lambda x: isinstance(x, nodes.MapEntry) and x.map.params[0] == "tile_i")
banks = {"x": ("HBM", f"0:{banks_per_input}", [banks_per_input]),
"y": ("HBM", f"{banks_per_input}:{2*banks_per_input}", [banks_per_input]),
"z": ("HBM", f"{2*banks_per_input}:{3*banks_per_input}", [banks_per_input])}
transform_sdfg_for_hbm(sdfg, ("k", banks_per_input), banks, {(map, 0): banks_per_input})
return sdfg
def hbm_dot_sdfg(banks_per_input: int):
N = dace.symbol("N")
sdfg = simple_dot_sdfg(N)
state = sdfg.states()[0]
for edge, state in sdfg.all_edges_recursive():
if isinstance(edge, graph.MultiConnectorEdge):
if isinstance(edge.dst, nodes.AccessNode) and edge.dst.data == "_result":
edge.data.other_subset = subsets.Range.from_string("k")
set_shape(state.parent.arrays["_result"], [banks_per_input])
if isinstance(edge.dst, nodes.AccessNode) and edge.dst.data == "result":
#one cannot update the other_subset. Leads to problems with out of bounds checking
#edge.data.other_subset = subsets.Range.from_string("k")
set_shape(state.parent.arrays["result"], [banks_per_input])
array_banks = {"x": ("HBM", f"0:{banks_per_input}", [banks_per_input]),
"y": ("HBM", f"{banks_per_input}:{2*banks_per_input}", [banks_per_input]),
"result": ("DDR", "0", None)}
div_map = get_first_node(state, lambda x: isinstance(x, nodes.MapEntry) and x.label == "stream"
and x.map.params[0] == "tile_i")
transform_sdfg_for_hbm(sdfg, ("k", banks_per_input),
array_banks, {(div_map.map, 0): banks_per_input}, True)
return sdfg
######### Full implementations of pure blas applications
def only_hbm_axpy_sdfg(banks_per_input: int):
sdfg = hbm_axpy_sdfg(banks_per_input)
sdfg.apply_fpga_transformations()
sdfg.apply_transformations_repeated(InlineSDFG)
z_access1 = get_first_node(sdfg.start_state, lambda x: isinstance(x, nodes.AccessNode) and x.data == "z")
sdfg.start_state.remove_nodes_from([sdfg.start_state.out_edges(z_access1)[0].dst, z_access1])
distribute_along_dim0(sdfg, ["x", "y", "z"])
return sdfg
def _modify_dot_host_side(sdfg, start_state, end_state):
# Add final reduction
state = end_state
host_result = get_first_node(state, lambda x: isinstance(x, nodes.AccessNode) and x.data == "result")
sum_up = state.add_reduce("lambda a, b : a + b", None, 0)
sdfg.add_array("final_result", [1], dace.float32)
host_final = state.add_access("final_result")
state.add_edge(host_result, None, sum_up, None, memlet.Memlet("result"))
state.add_edge(sum_up, None, host_final, None, memlet.Memlet("final_result[0]"))
sum_up.expand(sdfg, state)
sdfg.apply_transformations(InlineSDFG)
# Remove copy result
state = start_state
access_result_start = get_first_node(state, lambda x: isinstance(x, nodes.AccessNode) and x.data == "result")
state.remove_nodes_from([state.out_edges(access_result_start)[0].dst, access_result_start])
sdfg.arrays["result"].transient = True
def only_hbm_dot_sdfg(banks_per_input: int):
sdfg = hbm_dot_sdfg(banks_per_input)
sdfg.apply_fpga_transformations()
sdfg.apply_transformations_repeated(InlineSDFG)
distribute_along_dim0(sdfg, ["x", "y"])
_modify_dot_host_side(sdfg, sdfg.start_state, sdfg.states()[2])
return sdfg
def hbm_axpy_dot(banks_per_input: int):
N = dace.symbol("N")
axpy_sdfg = simple_vadd_sdfg(N, vec_len=8, tile_size=8192)
dot_sdfg = simple_dot_sdfg(N, tile_size=8192)
sdfg = SDFG("axpydot")
sdfg.add_symbol("alpha", dace.float32)
state = sdfg.add_state()
sdfg.add_array("axpy_x", [N//8], dace.vector(dace.float32, 8))
sdfg.add_array("axpy_y", [N//8], dace.vector(dace.float32, 8))
sdfg.add_array("dot_y", [N//8], dace.vector(dace.float32, 8))
sdfg.add_array("middle", [N//8], dace.vector(dace.float32, 8), transient=True)
sdfg.add_array("result", [banks_per_input], dace.float32)
acc_axpy_x = state.add_access("axpy_x")
acc_axpy_y = state.add_access("axpy_y")
acc_dot_y = state.add_access("dot_y")
acc_middle = state.add_access("middle")
acc_result = state.add_access("result")
axpynode = state.add_nested_sdfg(axpy_sdfg, sdfg, set(["x", "y", "z"]), set(["z"]), {"N": N, "alpha": "alpha"})
dotnode = state.add_nested_sdfg(dot_sdfg, sdfg, set(["x", "y", "result"]), set(["result"]), {"N": N})
acc_middle_dummy = state.add_access("middle")
acc_middle_dummy_2 = state.add_access("middle")
acc_result_dummy = state.add_access("result")
state.add_edge(acc_axpy_x, None, axpynode, "x", memlet.Memlet("axpy_x"))
state.add_edge(acc_axpy_y, None, axpynode, "y", memlet.Memlet("axpy_y"))
state.add_edge(acc_middle_dummy, None, axpynode, "z", memlet.Memlet("middle"))
state.add_edge(axpynode, "z", acc_middle, None, memlet.Memlet("middle"))
state.add_edge(acc_middle_dummy_2, None, dotnode, "x", memlet.Memlet("middle"))
state.add_edge(acc_dot_y, None, dotnode, "y", memlet.Memlet("dot_y"))
state.add_edge(acc_result_dummy, None, dotnode, "result", memlet.Memlet("result"))
state.add_edge(dotnode, "result", acc_result, None, memlet.Memlet("result"))
sdfg.apply_transformations_repeated(InlineSDFG)
def _nodes_from_path(path):
nodes = [path[0].src]
for edge in path:
nodes.append(edge.dst)
return nodes
sdfg.add_stream("connect", dace.vector(dace.float32, 8), 128, [banks_per_input],
storage=dtypes.StorageType.FPGA_Local, transient=True)
old_acc_node = get_first_node(state, lambda x: isinstance(x, nodes.AccessNode) and x.data == "middle"
and state.in_degree(x) == 1)
update_access(state, old_acc_node, "connect", memlet.Memlet("connect[k]"))
old_acc_node = get_first_node(state, lambda x: isinstance(x, nodes.AccessNode) and x.data == "middle"
and state.out_degree(x) == 1)
update_access(state, old_acc_node, "connect", memlet.Memlet("connect[k]"))
acc_result = get_first_node(state, lambda x: isinstance(x, nodes.AccessNode) and x.data == "result")
path = state.memlet_path(state.in_edges(acc_result)[0])
path[0].data.subset = subsets.Range.from_string("k")
modification_map_axpy = get_first_node(state, lambda x: isinstance(x, nodes.MapEntry) and
"axpy" in x.label and x.params[0] == "tile_i")
modification_map_dot = get_first_node(state, lambda x: isinstance(x, nodes.MapEntry) and
x.label == "stream" and x.params[0] == "tile_i")
array_updates = {"axpy_x": ("HBM", f"0:{banks_per_input}", [banks_per_input]),
"axpy_y": ("HBM", f"{banks_per_input}:{2*banks_per_input}", [banks_per_input]),
"dot_y": ("HBM", f"{2*banks_per_input}:{3*banks_per_input}", [banks_per_input]),
"result": ("DDR", "0", None)}
transform_sdfg_for_hbm(sdfg, ("k", banks_per_input), array_updates,
{(modification_map_axpy, 0): banks_per_input, (modification_map_dot, 0): banks_per_input})
# Fpga transform cannot be applied here, because stream is not in a map, and because there
# are FPGA storagetypes and schedules around. However since the actual application of
# FPGATransform works non-destructive we just force application here
fpga_xform = FPGATransformSDFG(sdfg.sdfg_id, -1, {}, -1)
fpga_xform.apply(sdfg)
sdfg.apply_transformations_repeated(InlineSDFG)
_modify_dot_host_side(sdfg, sdfg.start_state, sdfg.states()[2])
return sdfg | 11,168 | 4,340 |
"""
WAVECAR parser.
---------------
The file parser that handles the parsing of WAVECAR files.
"""
from aiida_vasp.parsers.file_parsers.parser import BaseFileParser
from aiida_vasp.parsers.node_composer import NodeComposer
class WavecarParser(BaseFileParser):
"""Add WAVECAR as a single file node."""
PARSABLE_ITEMS = {
'wavecar': {
'inputs': [],
'name': 'wavecar',
'prerequisites': []
},
}
def __init__(self, *args, **kwargs):
super(WavecarParser, self).__init__(*args, **kwargs)
self._wavecar = None
self.init_with_kwargs(**kwargs)
def _parse_file(self, inputs):
"""Create a DB Node for the WAVECAR file."""
result = inputs
result = {}
wfn = self._data_obj.path
if wfn is None:
return {'wavecar': None}
result['wavecar'] = wfn
return result
@property
def wavecar(self):
if self._wavecar is None:
composer = NodeComposer(file_parsers=[self])
self._wavecar = composer.compose('vasp.wavefun')
return self._wavecar
| 1,134 | 354 |
#! /usr/bin/env python
# encoding: utf-8
import re
import os
import subprocess
import json
class Package:
def __init__(self) -> None:
self.manager = ""
self.name = ""
self.version = ""
def toString(self):
print('package manager:' + self.manager)
print('package name:' + self.name)
print('package version:' + self.version)
class PackageRepo:
def __init__(self) -> None:
self.packages = {}
self.include_dirs = []
self.lib_dirs = []
self.stlibs = []
self.shlibs = []
def installPackages(self, packages):
pass
class PackageManager:
def __init__(self) -> None:
self.package_repos = {}
self.packages = {}
self.include_dirs = ['.']
self.lib_dirs = ['.']
self.stlibs = []
self.shlibs = []
self.add_package_repo("conan", ConanRepo)
def add_package_repo(self, name, repo_type):
self.package_repos[name] = repo_type()
def add_requires(self, *args):
pkgs = []
for arg in args:
match_result = re.match(r'(.*)::(.*)/(.*)', arg)
pkg = Package()
pkg.manager = match_result.group(1)
pkg.name = match_result.group(2)
pkg.version = match_result.group(3)
pkgs.append(pkg)
self.addPackages(pkgs)
# TODO: call this in the end
self.installPackages()
def addPackage(self, package):
if package.manager in self.packages:
self.packages[package.manager].append(package)
else:
self.packages[package.manager] = [package]
def addPackages(self, packages):
for package in packages:
self.addPackage(package)
def installPackages(self):
for k, v in self.packages.items():
if k in self.package_repos:
repo = self.package_repos[k]
repo.installPackages(v)
for include_dir in repo.include_dirs:
self.include_dirs.append(include_dir)
for lib_dir in repo.lib_dirs:
self.lib_dirs.append(lib_dir)
for stlib in repo.stlibs:
self.stlibs.append(stlib)
for shlib in repo.shlibs:
self.shlibs.append(shlib)
else:
print("unsupported packaged manager: " + k)
continue
class ConanRepo(PackageRepo):
def __init__(self) -> None:
PackageRepo.__init__(self)
def installPackages(self, packages):
# gen conanfile.txt
conanfile_content = '[requires]\n'
for package in packages:
conanfile_content += package.name + '/' + package.version + '\n'
conanfile_content += '[generators]\njson'
print(conanfile_content)
self.installConanPackages(conanfile_content)
def installConanPackages(self, conanfile_content):
if not os.path.exists("tmp"):
os.makedirs("tmp")
os.curdir = os.getcwd()
os.chdir('tmp')
with open('conanfile.txt', 'w') as f:
f.write(conanfile_content)
cmd = "conan install . --build=missing"
subprocess.run(cmd)
with open('conanbuildinfo.json') as f:
data = json.loads(f.read())
options = data['options']
deps = data['dependencies']
for dep in deps:
print(dep['name'])
pkg_include_dirs = dep['include_paths']
for pkg_include_dir in pkg_include_dirs:
self.include_dirs.append(pkg_include_dir)
pkg_lib_dirs = dep['lib_paths']
for pkg_lib_dir in pkg_lib_dirs:
self.lib_dirs.append(pkg_lib_dir)
pkg_libs = dep['libs']
for pkg_lib in pkg_libs:
if options[pkg_lib]['shared'] == 'False':
self.stlibs.append(pkg_lib)
else:
self.shlibs.append(pkg_lib)
os.chdir(os.curdir)
print("install conan packages finished")
| 4,184 | 1,266 |
from copy import deepcopy
import numpy as np
import tensorflow as tf
from ampligraph.datasets import NumpyDatasetAdapter, AmpligraphDatasetAdapter
from ampligraph.latent_features import SGDOptimizer, constants
from ampligraph.latent_features.initializers import DEFAULT_XAVIER_IS_UNIFORM
from ampligraph.latent_features.models import EmbeddingModel
from ampligraph.latent_features.models.EmbeddingModel import ENTITY_THRESHOLD
from sklearn.utils import check_random_state
from tqdm import tqdm
from excut.utils.logging import logger
class EmbeddingModelContinue(EmbeddingModel):
def __init__(self, k=constants.DEFAULT_EMBEDDING_SIZE, eta=constants.DEFAULT_ETA, epochs=constants.DEFAULT_EPOCH,
batches_count=constants.DEFAULT_BATCH_COUNT, seed=constants.DEFAULT_SEED, embedding_model_params={},
optimizer=constants.DEFAULT_OPTIM, optimizer_params={'lr': constants.DEFAULT_LR},
loss=constants.DEFAULT_LOSS, loss_params={}, regularizer=constants.DEFAULT_REGULARIZER,
regularizer_params={}, initializer=constants.DEFAULT_INITIALIZER,
initializer_params={'uniform': DEFAULT_XAVIER_IS_UNIFORM}, large_graphs=False,
verbose=constants.DEFAULT_VERBOSE):
logger.warning('entities min_quality %i' % ENTITY_THRESHOLD)
super(EmbeddingModelContinue, self).__init__(k, eta, epochs, batches_count, seed, embedding_model_params,
optimizer, optimizer_params, loss,
loss_params, regularizer, regularizer_params, initializer,
initializer_params, large_graphs,
verbose)
self.tf_config = tf.ConfigProto(allow_soft_placement=True, device_count={"CPU": 40},
inter_op_parallelism_threads=40, intra_op_parallelism_threads=1)
def copy_old_model_params(self, old_model):
if not old_model.is_fitted:
raise Exception('Old Model os not Fitted!')
self.ent_to_idx = deepcopy(old_model.ent_to_idx)
self.rel_to_idx = deepcopy(old_model.rel_to_idx)
# self.is_fitted = old_model_params['is_fitted']
# is_calibrated = old_model_params['is_calibrated']
old_model_params = dict()
old_model.get_embedding_model_params(old_model_params)
copied_params = deepcopy(old_model_params)
self.restore_model_params(copied_params)
def fit(self, X, early_stopping=False, early_stopping_params={}, continue_training=False):
"""Train an EmbeddingModel (with optional early stopping).
The model is trained on a training set X using the training protocol
described in :cite:`trouillon2016complex`.
Parameters
----------
X : ndarray (shape [n, 3]) or object of AmpligraphDatasetAdapter
Numpy array of training triples OR handle of Dataset adapter which would help retrieve data.
early_stopping: bool
Flag to enable early stopping (default:``False``)
early_stopping_params: dictionary
Dictionary of hyperparameters for the early stopping heuristics.
The following string keys are supported:
- **'x_valid'**: ndarray (shape [n, 3]) or object of AmpligraphDatasetAdapter :
Numpy array of validation triples OR handle of Dataset adapter which
would help retrieve data.
- **'criteria'**: string : criteria for early stopping 'hits10', 'hits3', 'hits1' or 'mrr'(default).
- **'x_filter'**: ndarray, shape [n, 3] : Positive triples to use as filter if a 'filtered' early
stopping criteria is desired (i.e. filtered-MRR if 'criteria':'mrr').
Note this will affect training time (no filter by default).
If the filter has already been set in the adapter, pass True
- **'burn_in'**: int : Number of epochs to pass before kicking in early stopping (default: 100).
- **check_interval'**: int : Early stopping interval after burn-in (default:10).
- **'stop_interval'**: int : Stop if criteria is performing worse over n consecutive checks (default: 3)
- **'corruption_entities'**: List of entities to be used for corruptions. If 'all',
it uses all entities (default: 'all')
- **'corrupt_side'**: Specifies which side to corrupt. 's', 'o', 's+o' (default)
Example: ``early_stopping_params={x_valid=X['valid'], 'criteria': 'mrr'}``
"""
self.train_dataset_handle = None
# try-except block is mainly to handle clean up in case of exception or manual stop in jupyter notebook
# TODO change 0: Update the mapping if there are new entities.
if continue_training:
self.update_mapping(X)
try:
if isinstance(X, np.ndarray):
# Adapt the numpy data in the internal format - to generalize
self.train_dataset_handle = NumpyDatasetAdapter()
self.train_dataset_handle.set_data(X, "train")
elif isinstance(X, AmpligraphDatasetAdapter):
self.train_dataset_handle = X
else:
msg = 'Invalid type for input X. Expected ndarray/AmpligraphDataset object, got {}'.format(type(X))
logger.error(msg)
raise ValueError(msg)
# create internal IDs mappings
# TODO Change 1: fist change to reuse the existing mappings rel_to_idx and ent_to_idx
if not continue_training:
self.rel_to_idx, self.ent_to_idx = self.train_dataset_handle.generate_mappings()
else:
self.train_dataset_handle.use_mappings(self.rel_to_idx, self.ent_to_idx)
prefetch_batches = 1
if len(self.ent_to_idx) > ENTITY_THRESHOLD:
self.dealing_with_large_graphs = True
logger.warning('Your graph has a large number of distinct entities. '
'Found {} distinct entities'.format(len(self.ent_to_idx)))
logger.warning('Changing the variable initialization strategy.')
logger.warning('Changing the strategy to use lazy loading of variables...')
if early_stopping:
raise Exception('Early stopping not supported for large graphs')
if not isinstance(self.optimizer, SGDOptimizer):
raise Exception("This mode works well only with SGD optimizer with decay (read docs for details).\
Kindly change the optimizer and restart the experiment")
if self.dealing_with_large_graphs:
prefetch_batches = 0
# CPU matrix of embeddings
# TODO Change 2.1: do not intialize if continue training
if not continue_training:
self.ent_emb_cpu = self.initializer.get_np_initializer(len(self.ent_to_idx), self.internal_k)
self.train_dataset_handle.map_data()
# This is useful when we re-fit the same model (e.g. retraining in model selection)
if self.is_fitted:
tf.reset_default_graph()
self.rnd = check_random_state(self.seed)
tf.random.set_random_seed(self.seed)
self.sess_train = tf.Session(config=self.tf_config)
# change 2.2 : Do not change batch size with new training data, just use the old (for large KGs)
# if not continue_training:
batch_size = int(np.ceil(self.train_dataset_handle.get_size("train") / self.batches_count))
# else:
# batch_size = self.batch_size
logger.info("Batch Size: %i" % batch_size)
# dataset = tf.data.Dataset.from_tensor_slices(X).repeat().batch(batch_size).prefetch(2)
if len(self.ent_to_idx) > ENTITY_THRESHOLD:
logger.warning('Only {} embeddings would be loaded in memory per batch...'.format(batch_size * 2))
self.batch_size = batch_size
# TODO change 3: load model from trained params if continue instead of re_initialize the ent_emb and rel_emb
if not continue_training:
self._initialize_parameters()
else:
self._load_model_from_trained_params()
dataset = tf.data.Dataset.from_generator(self._training_data_generator,
output_types=(tf.int32, tf.int32, tf.float32),
output_shapes=((None, 3), (None, 1), (None, self.internal_k)))
dataset = dataset.repeat().prefetch(prefetch_batches)
dataset_iterator = tf.data.make_one_shot_iterator(dataset)
# init tf graph/dataflow for training
# init variables (model parameters to be learned - i.e. the embeddings)
if self.loss.get_state('require_same_size_pos_neg'):
batch_size = batch_size * self.eta
loss = self._get_model_loss(dataset_iterator)
train = self.optimizer.minimize(loss)
# Entity embeddings normalization
normalize_ent_emb_op = self.ent_emb.assign(tf.clip_by_norm(self.ent_emb, clip_norm=1, axes=1))
self.early_stopping_params = early_stopping_params
# early stopping
if early_stopping:
self._initialize_early_stopping()
self.sess_train.run(tf.tables_initializer())
self.sess_train.run(tf.global_variables_initializer())
try:
self.sess_train.run(self.set_training_true)
except AttributeError:
pass
normalize_rel_emb_op = self.rel_emb.assign(tf.clip_by_norm(self.rel_emb, clip_norm=1, axes=1))
if self.embedding_model_params.get('normalize_ent_emb', constants.DEFAULT_NORMALIZE_EMBEDDINGS):
self.sess_train.run(normalize_rel_emb_op)
self.sess_train.run(normalize_ent_emb_op)
epoch_iterator_with_progress = tqdm(range(1, self.epochs + 1), disable=(not self.verbose), unit='epoch')
# print("before epochs!")
# print(self.sess_train.run(self.ent_emb))
# print(self.sess_train.run(self.rel_emb))
for epoch in epoch_iterator_with_progress:
losses = []
for batch in range(1, self.batches_count + 1):
feed_dict = {}
self.optimizer.update_feed_dict(feed_dict, batch, epoch)
if self.dealing_with_large_graphs:
loss_batch, unique_entities, _ = self.sess_train.run([loss, self.unique_entities, train],
feed_dict=feed_dict)
self.ent_emb_cpu[np.squeeze(unique_entities), :] = \
self.sess_train.run(self.ent_emb)[:unique_entities.shape[0], :]
else:
loss_batch, _ = self.sess_train.run([loss, train], feed_dict=feed_dict)
if np.isnan(loss_batch) or np.isinf(loss_batch):
msg = 'Loss is {}. Please change the hyperparameters.'.format(loss_batch)
logger.error(msg)
raise ValueError(msg)
losses.append(loss_batch)
if self.embedding_model_params.get('normalize_ent_emb', constants.DEFAULT_NORMALIZE_EMBEDDINGS):
self.sess_train.run(normalize_ent_emb_op)
if self.verbose:
msg = 'Average Loss: {:10f}'.format(sum(losses) / (batch_size * self.batches_count))
if early_stopping and self.early_stopping_best_value is not None:
msg += ' — Best validation ({}): {:5f}'.format(self.early_stopping_criteria,
self.early_stopping_best_value)
logger.debug(msg)
epoch_iterator_with_progress.set_description(msg)
if early_stopping:
try:
self.sess_train.run(self.set_training_false)
except AttributeError:
pass
if self._perform_early_stopping_test(epoch):
self._end_training()
return
try:
self.sess_train.run(self.set_training_true)
except AttributeError:
pass
self._save_trained_params()
self._end_training()
except BaseException as e:
self._end_training()
raise e
def _load_model_from_trained_params(self):
"""Load the model from trained params.
While restoring make sure that the order of loaded parameters match the saved order.
It's the duty of the embedding model to load the variables correctly.
This method must be overridden if the model has any other parameters (apart from entity-relation embeddings).
This function also set's the evaluation mode to do lazy loading of variables based on the number of
distinct entities present in the graph.
"""
# Generate the batch size based on entity length and batch_count
# TODO change 4.1: batch size based on the training data or more generally if it was computed to bigger number
self.batch_size = max(self.batch_size, int(np.ceil(len(self.ent_to_idx) / self.batches_count)))
# logger.warning('entities min_quality inside load model %i' % ENTITY_THRESHOLD)
# logger.warning('_load_model_from_trained_params is it a big graph yet? %s' % self.dealing_with_large_graphs)
if len(self.ent_to_idx) > ENTITY_THRESHOLD:
self.dealing_with_large_graphs = True
logger.warning('Your graph has a large number of distinct entities. '
'Found {} distinct entities'.format(len(self.ent_to_idx)))
logger.warning('Changing the variable loading strategy to use lazy loading of variables...')
logger.warning('Evaluation would take longer than usual.')
if not self.dealing_with_large_graphs:
self.ent_emb = tf.Variable(self.trained_model_params[0], dtype=tf.float32)
else:
self.ent_emb_cpu = self.trained_model_params[0]
# TODO change 4.2: doable the batch size
self.ent_emb = tf.Variable(np.zeros((self.batch_size * 2, self.internal_k)), dtype=tf.float32)
self.rel_emb = tf.Variable(self.trained_model_params[1], dtype=tf.float32)
def update_mapping(self, X):
"""
update entities and relations mappings in continue case
:param X:
:return:
"""
unique_ent = set(np.unique(np.concatenate((X[:, 0], X[:, 2]))))
unique_rel = set(np.unique(X[:, 1]))
new_unique_ent = unique_ent - set(self.ent_to_idx.keys())
new_unique_rel = unique_rel - set(self.rel_to_idx.keys())
if len(new_unique_ent)>0 or len(new_unique_rel)>-0:
logger.warning('Org entities (%i) or relations (%i)' % (len(self.ent_to_idx), len(self.rel_to_idx)))
logger.warning('New entities (%i) or relations (%i)'%(len(new_unique_ent), len(new_unique_rel)))
ent_id_start = max(self.ent_to_idx.values()) + 1
rel_id_start = max(self.rel_to_idx.values()) + 1
new_ent_count = len(new_unique_ent)
new_rel_count = len(new_unique_rel)
self.ent_to_idx.update(dict(zip(new_unique_ent, range(ent_id_start, ent_id_start+new_ent_count))))
self.rel_to_idx.update(dict(zip(new_unique_rel, range(rel_id_start, rel_id_start+new_rel_count))))
# Extend the emebdding vectors themselves with randomly initialized vectors
extend_ent_emb = self.initializer.get_np_initializer(new_ent_count, self.internal_k)
extend_rel_emb = self.initializer.get_np_initializer(new_rel_count, self.internal_k)
self.trained_model_params[0] = np.concatenate([self.trained_model_params[0], extend_ent_emb])
self.trained_model_params[1] = np.concatenate([self.trained_model_params[1], extend_rel_emb])
| 16,951 | 4,784 |
# num2txt.py
# Jeff Smith
'''
Convert a given number into its text representation.
e.g. 67 becomes 'sixty-seven'. Handle numbers from 0-99.
'''
# Create dictionaries of number-text key pairs
ones = {0: '', 1: 'one', 2: 'two', 3: 'three', 4: 'four',
5: 'five', 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine'}
twos = {10: 'ten', 11: 'eleven', 12: 'twelve', 13: 'thirteen', 14: 'fourteen',
15: 'fifteen', 16: 'sixteen', 17: 'seventeen', 18: 'eighteen', 19: 'nineteen'}
tens = {0: '', 1: '', 2: 'twenty', 3: 'thirty', 4: 'forty',
5: 'fifty', 6: 'sixty', 7: 'seventy', 8: 'eighty', 9: 'ninety'}
huns = {0: '', 1: 'one hundred', 2: 'two hundred', 3: 'three hundred', 4: 'four hundred',
5: 'five hundred', 6: 'six hundred', 7: 'seven hundred', 8: 'eight hundred', 9: 'nine hundred'}
# Obtain input from console
num = int(input('Enter a number 0-999: '))
def textnum(num):
# Iterate through dictionaries for text matches to input
# Return text representations
if num == 0:
return 'zero'
elif num > 0 and num <= 9:
return ones[num]
elif num >= 10 and num <= 19:
return twos[num]
elif num >= 20 and num <= 99:
n1 = num // 10
n2 = num % 10
return tens[n1] + '-' + ones[n2]
elif num >= 100 and num < 1000:
n1 = num % 1000 // 100
n2 = num % 100 // 10
n3 = num % 10
return(f"{ones[n1]} hundred, {tens[n2]}-{ones[n3]}")
else:
print("Number out of range")
print(textnum(num))
| 1,518 | 643 |
import ctypes
from typing import get_type_hints, Any
from abc import ABC
from .c_pointer import TypedCPointer, attempt_decode
from contextlib import suppress
class Struct(ABC):
"""Abstract class representing a struct."""
def __init__(self, *args, **kwargs):
hints = get_type_hints(self.__class__)
self._hints = hints
class _InternalStruct(ctypes.Structure):
_fields_ = [
(name, TypedCPointer.get_mapped(typ))
for name, typ in hints.items() # fmt: off
]
self._struct = _InternalStruct(*args, **kwargs)
do_sync = kwargs.get("do_sync")
if (kwargs.get("do_sync") is None) or (do_sync):
self._sync()
@property
def _as_parameter_(self) -> ctypes.Structure:
return self._struct
@classmethod
def from_existing(cls, struct: ctypes.Structure):
instance = cls(do_sync=False)
instance._struct = struct
instance._sync()
return instance
def __getattribute__(self, name: str):
attr = super().__getattribute__(name)
with suppress(AttributeError):
hints = super().__getattribute__("_hints")
if (name in hints) and (type(attr)) is bytes:
attr = attempt_decode(attr)
return attr
def __setattr__(self, name: str, value: Any):
if hasattr(self, "_struct"):
self._struct.__setattr__(name, value)
super().__setattr__(name, value)
def _sync(self):
for name in self._hints:
setattr(self, name, getattr(self._struct, name))
def __repr__(self) -> str:
return f"<struct {self.__class__.__name__} at {hex(ctypes.addressof(self._struct))}>" # noqa
| 1,811 | 576 |
__source__ = 'https://leetcode.com/problems/kth-smallest-number-in-multiplication-table/'
# Time: O()
# Space: O()
#
# Description: Leetcode # 668. Kth Smallest Number in Multiplication Table
#
# Nearly every one have used the Multiplication Table.
# But could you find out the k-th smallest number quickly from the multiplication table?
#
# Given the height m and the length n of a m * n Multiplication Table,
# and a positive integer k, you need to return the k-th smallest number in this table.
#
# Example 1:
# Input: m = 3, n = 3, k = 5
# Output:
# Explanation:
# The Multiplication Table:
# 1 2 3
# 2 4 6
# 3 6 9
#
# The 5-th smallest number is 3 (1, 2, 2, 3, 3).
# Example 2:
# Input: m = 2, n = 3, k = 6
# Output:
# Explanation:
# The Multiplication Table:
# 1 2 3
# 2 4 6
#
# The 6-th smallest number is 6 (1, 2, 2, 3, 4, 6).
# Note:
# The m and n will be in the range [1, 30000].
# The k will be in the range [1, m * n]
#
import unittest
# 532ms 78.12%
class Solution(object):
def findKthNumber(self, m, n, k):
"""
:type m: int
:type n: int
:type k: int
:rtype: int
"""
def enough(x):
count = 0
for i in xrange(1, m+1):
count += min(x // i, n)
return count >= k
lo, hi = 1, m * n
while lo < hi:
mi = (lo + hi) / 2
if not enough(mi):
lo = mi + 1
else:
hi = mi
return lo
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/kth-smallest-number-in-multiplication-table/solution/
Approach #1: Brute Force [Memory Limit Exceeded]
Complexity Analysis
Time Complexity: O(m*n) to create the table, and O(m*nlog(m*n)) to sort it.
Space Complexity: O(m*n) to store the table.
# Memory Limit Exceeded
# Last executed input:
# 9895
# 28405
# 100787757
class Solution {
public int findKthNumber(int m, int n, int k) {
int[] table = new int[m*n];
for (int i = 1; i <= m; i++) {
for (int j = 1; j <= n; j++) {
table[(i - 1) * n + j - 1] = i * j;
}
}
Arrays.sort(table);
return table[k-1];
}
}
Approach #2: Next Heap [Time Limit Exceeded]
Complexity Analysis
Time Complexity: O(k*mlogm)=O(m^2 nlogm).
Our initial heapify operation is O(m).
Afterwards, each pop and push is O(mlogm), and our outer loop is O(k) = O(m*n)O(k)=O(m*n)
Space Complexity: O(m). Our heap is implemented as an array with mm elements.
# TLE
# 9895
# 28405
# 100787757
class Solution {
public int findKthNumber(int m, int n, int k) {
PriorityQueue<Node> heap = new PriorityQueue<Node>(m,
Comparator.<Node> comparingInt(node -> node.val));
for (int i = 1; i <= m; i++) {
heap.offer(new Node(i, i));
}
Node node = null;
for (int i = 0; i < k; i++) {
node = heap.poll();
int nxt = node.val + node.root;
if (nxt <= node.root * n) {
heap.offer(new Node(nxt, node.root));
}
}
return node.val;
}
}
class Node {
int val;
int root;
public Node(int v, int r) {
val = v;
root = r;
}
}
Approach #3: Binary Search [Accepted]
Complexity Analysis
Time Complexity: O(m*log(m*n)). Our binary search divides the interval [lo, hi] into half at each step.
At each step, we call enough which requires O(m)O(m) time.
Space Complexity: O(1). We only keep integers in memory during our intermediate calculations
# 19ms 34.94%
class Solution {
public boolean enough(int x, int m, int n, int k) {
int count = 0;
for (int i = 1; i <= m; i++) {
count += Math.min(x / i, n);
}
return count >= k;
}
public int findKthNumber(int m, int n, int k) {
int lo = 1, hi = m * n;
while (lo < hi) {
int mi = lo + (hi - lo) / 2;
if (!enough(mi, m, n, k)) lo = mi + 1;
else hi = mi;
}
return lo;
}
}
# 10ms 99.60%
class Solution {
public int findKthNumber(int m, int n, int k) {
int l = 1, r = m * n;
while (l < r) {
int mid = l + (r - l) / 2;
int c = count(m, n ,mid);
if (c < k) {
l = mid + 1;
} else {
r = mid;
}
}
return l;
}
private int count (int m, int n, int target) {
int i = 1, j = n, count = 0;
while (j >= 1 && i <= m) {
if (target >= i * j) {
count += j;
i++;
} else {
j--;
}
}
return count;
}
}
''' | 4,852 | 1,778 |
import ezc3d
# This example reads a file that contains 2 force platforms. It thereafter print some metadata and data for one them
c3d = ezc3d.c3d("../c3dFiles/ezc3d-testFiles-master/ezc3d-testFiles-master/Qualisys.c3d", extract_forceplat_data=True)
print(f"Number of force platform = {len(c3d['data']['platform'])}")
print("")
print("Printing information and data for force platform 0")
print("")
pf0 = c3d["data"]["platform"][0]
# Units
print(f"Force unit = {pf0['unit_force']}")
print(f"Moment unit = {pf0['unit_moment']}")
print(f"Center of pressure unit = {pf0['unit_position']}")
print("")
# Position of pf
print(f"Position of origin = {pf0['origin']}")
print(f"Position of corners = \n{pf0['corners']}")
print("")
# Calibration matrix
print(f"Calibation matrix = \n{pf0['cal_matrix']}")
print("")
# Data at 3 different time
frames = [0, 10, 1000, -1]
print(f"Data (in global reference frame) at frames = {frames}")
print(f"Force = \n{pf0['force'][:, frames]}")
print(f"Moment = \n{pf0['moment'][:, frames]}")
print(f"Center of pressure = \n{pf0['center_of_pressure'][:, frames]}")
print(f"Moment at CoP = \n{pf0['Tz'][:, frames]}")
| 1,141 | 433 |
from __future__ import print_function, unicode_literals
import os
from io import open
from pyNastran.utils.log import get_logger2
import shutil
IGNORE_DIRS = ['src', 'dmap', 'solver', '__pycache__',
'op4_old', 'calculix', 'bars', 'case_control',
'pch', 'old', 'solver', 'test', 'dev', 'bkp', 'bdf_vectorized']
MODS_SKIP = ['spike', 'shell_backup']
SKIP_DIRECTORIES = ['.svn', '.idea', '.settings', '.git', 'test', 'bkp', '__pycache__', 'dev',
'htmlcov', 'vtk_examples', 'SnakeRiverCanyon', 'M100', 'SWB']
SKIP_FILE_SUFFIX = [
'.pyc', '.pyx', # python
'.bdf', '.op2', '.f06', '.op4', '.dat', '.inp', # nastran
'.err', '.log', '.rej', '.db', '.db.jou', '.ses', '.ses.01', # patran
'.pptx',
'.png', '.gif', # pictures
'.txt', '.csv', '.out', '.coverage', '.whl', # generic
'.mapbc', '.front', '.flo', 'cogsg', '.bc', '.d3m', '.inpt', '.nml', # usm3d/fun3d
'.ele', '.node', '.smesh', '.off',
'.mk5', '.wgs', '.stl', '.fgrid', '.su2', '.obj', # other formats
'.tri', '.cntl', '.c3d', # cart3d
'.surf', '.tags', '.ugrid', '.bedge', # aflr
'.plt', # tecplot
'.p3d',
'.tex', '.bib', # latex
]
MAKE_FILES = True
def get_folders_files(dirname, skip_file_suffix=None, skip_directories=None):
"""
Return list of directories and files in a given tree path. By default discards:
* directories ".svn", ".idea", ".settings"
* files that ends with ".pyc", .pyx", ".bdf"
"""
if skip_directories is None:
skip_directories = SKIP_DIRECTORIES
if skip_file_suffix is None:
skip_file_suffix = tuple(SKIP_FILE_SUFFIX)
dirname = os.path.join(dirname)
files = []
folders = []
for root, dirs, filenames in os.walk(dirname):
folders.append(root)
for filename in filenames:
if filename.endswith(skip_file_suffix):
continue
if 'test_' in os.path.basename(filename):
continue
files.append(os.path.join(root, filename))
#files += [os.path.join(root, filename) for filename in filenames
#if not filename.endswith(skip_file_suffix)]
dirs[:] = [d for d in dirs if not d in skip_directories]
#if len(dirs):
#print('root = %s' % root)
#print(dirs)
#print('------------------')
return folders, files
def get_classes_functions_in_file(py_filename):
with open(py_filename, 'r', encoding='utf8') as f:
lines = f.readlines()
function_list = []
class_list = []
for line in lines:
line = line.split('#')[0].rstrip()
if line.startswith('class '):
# class ASDF(object):
class_name = line.split('(')[0].split(' ')[1]
is_object = False
if '(object):' in line:
is_object = True
class_list.append((class_name, is_object))
elif line.startswith('def '):
function_name = line.split('(')[0].split(' ')[1]
if function_name.startswith('_'):
continue
function_list.append(function_name)
#for class_name in class_list:
#print(class_name)
return class_list, function_list
def get_pyfilenames():
folders, filenames = get_folders_files('../../pyNastran')
filenames_classes = []
for py_filename in filenames:
py_filename2, dot_path = get_location_filename_for_pyfilename(py_filename)
class_names, function_names = get_classes_functions_in_file(py_filename)
#for class_name, is_object in class_names:
#print(' %s (class)' % class_name)
#for function_name in function_names:
#print(' %s (function)' % function_name)
filenames_classes.append((py_filename, py_filename2, dot_path, class_names))
return filenames_classes
def get_location_filename_for_pyfilename(py_filename):
"""../../pyNastran/utils/nastran_utils.py -> pyNastran/utils/nastran_utils.py"""
path = py_filename.lstrip('../\\')
no_py = os.path.splitext(path)[0]
dot_path = no_py.replace('\\', '.').replace('/', '.')
#print(dot_path)
return path, dot_path
def filenames_to_rsts(filenames_classes, make_rsts=False):
for py_filename, py_filename2, dot_path, class_names in filenames_classes:
if not class_names:
continue
base_folder = os.path.dirname(py_filename2)
#print('%-20s %s %s' % (base_folder[:20], py_filename2, dot_path))
folder = os.path.join('rsts', base_folder)
if 'cards' in folder:
while not folder.endswith('cards'):
folder = os.path.dirname(folder)
if not os.path.exists(folder):
os.makedirs(folder)
rst_filename = os.path.join(folder, 'index.rst')
mode = 'w'
rst_lines = '.. toctree::\n\n'
if os.path.exists(rst_filename):
rst_lines = ''
mode = 'a'
for class_name, is_object in class_names:
create_rst_file_for_class(folder, dot_path, class_name, is_object)
print(' %s' % str(class_name))
#pyNastran.bdf.cards.aset
rst_lines += ' %s.%s\n' % (dot_path, class_name)
#print(rst_lines)
with open(rst_filename, mode) as rst_file:
rst_file.write(rst_lines)
def create_rst_file_for_class(folder, dot_path, class_name, is_object):
split_path = dot_path.split('.')
split_path[-1] += '.rst'
#rst_filename = os.path.join(*split_path)
dot_class_path = '%s.%s.rst' % (dot_path, class_name)
rst_filename = os.path.join(folder, dot_class_path)
#dirname = os.path.dirname(rst_filename)
#if not os.path.exists(dirname):
#os.makedirs(dirname)
lines = ''
if is_object:
lines = '%s\n' % class_name
lines += '%s\n' % (len(class_name) * '-')
lines += '.. autoclass:: %s.%s\n' % (dot_path, class_name)
lines += ' :inherited-members:\n'
lines += ' :members:\n'
#lines += ' :private-members:\n'
else:
lines = '%s\n' % class_name
lines += '%s\n' % (len(class_name) * '-')
lines += '.. autoclass:: %s.%s\n' % (dot_path, class_name)
lines += ' :show-inheritance:\n'
lines += ' :inherited-members:\n'
lines += ' :members:\n'
#lines += ' :private-members:\n'
#ASET
#----
#.. autoclass:: pyNastran.bdf.cards.bdf_sets.ASET
#:show-inheritance:
#:inherited-members:
#:members:
#:private-members:
#print(rst_filename)
if lines:
with open(rst_filename, 'w') as rst_file:
rst_file.write(lines)
def main():
if os.path.exists('rsts'):
shutil.rmtree('rsts')
filenames_classes = get_pyfilenames()
filenames_to_rsts(filenames_classes, make_rsts=False)
#py_filename = r'C:\NASA\m4\formats\git\pyNastran\pyNastran\bdf\cards\bdf_sets.py'
#get_classes_in_file(py_filename)
if __name__ == '__main__':
main()
| 7,062 | 2,504 |
import pandas as pd
from toucan_data_sdk.utils.postprocess import top, top_group
def test_top():
""" It should return result for top """
data = pd.DataFrame(
[
{'variable': 'toto', 'Category': 1, 'value': 100},
{'variable': 'toto', 'Category': 1, 'value': 200},
{'variable': 'toto', 'Category': 1, 'value': 300},
{'variable': 'lala', 'Category': 1, 'value': 100},
{'variable': 'lala', 'Category': 1, 'value': 150},
{'variable': 'lala', 'Category': 1, 'value': 250},
{'variable': 'lala', 'Category': 2, 'value': 350},
{'variable': 'lala', 'Category': 2, 'value': 450},
]
)
# ~~~ without group ~~~
expected = pd.DataFrame(
[
{'variable': 'lala', 'Category': 2, 'value': 450},
{'variable': 'lala', 'Category': 2, 'value': 350},
{'variable': 'toto', 'Category': 1, 'value': 300},
]
)
kwargs = {'value': 'value', 'limit': 3, 'order': 'desc'}
df = top(data, **kwargs).reset_index(drop=True)
assert df.equals(expected)
# ~~~ with group ~~~
expected = pd.DataFrame(
[
{'variable': 'lala', 'Category': 1, 'value': 150},
{'variable': 'lala', 'Category': 1, 'value': 100},
{'variable': 'lala', 'Category': 2, 'value': 450},
{'variable': 'lala', 'Category': 2, 'value': 350},
{'variable': 'toto', 'Category': 1, 'value': 200},
{'variable': 'toto', 'Category': 1, 'value': 100},
]
)
kwargs = {'group': ['variable', 'Category'], 'value': 'value', 'limit': -2, 'order': 'desc'}
df = top(data, **kwargs)
assert df.equals(expected)
def test_top_date_strings():
"""It should manage to use top if the column can be interpretated as date"""
df = pd.DataFrame(
{'date': ['2017-01-01', '2017-03-02', '2018-01-02', '2016-04-02', '2017-01-03']}
)
top_df = top(df, value='date', limit=2)
assert top_df['date'].tolist() == ['2016-04-02', '2017-01-01']
top_df = top(df, value='date', limit=3, order='desc')
assert top_df['date'].tolist() == ['2018-01-02', '2017-03-02', '2017-01-03']
top_df = top(df, value='date', limit=3, order='desc', date_format='%Y-%d-%m')
assert top_df['date'].tolist() == ['2018-01-02', '2017-01-03', '2017-03-02']
def test_top_date_strings_temp_column():
"""It should not change existing columns"""
df = pd.DataFrame(
{'date': ['2017-01-01', '2017-03-02'], 'date_': ['a', 'b'], 'date__': ['aa', 'bb']}
)
assert top(df, value='date', limit=2, order='desc').equals(df[::-1])
def test_top_group():
""" It should return result for top_group """
data = pd.DataFrame(
{
'Label': ['G1', 'G2', 'G3', 'G4', 'G5', 'G3', 'G3'],
'Categories': ['C1', 'C2', 'C1', 'C2', 'C1', 'C2', 'C3'],
'Valeurs': [6, 1, 9, 4, 8, 2, 5],
'Periode': ['mois', 'mois', 'mois', 'semaine', 'semaine', 'semaine', 'semaine'],
}
)
# ~~~ with filters ~~~
expected = pd.DataFrame(
{
'Periode': ['mois', 'mois', 'semaine', 'semaine', 'semaine'],
'Label': ['G3', 'G1', 'G5', 'G3', 'G3'],
'Categories': ['C1', 'C1', 'C1', 'C2', 'C3'],
'Valeurs': [9, 6, 8, 2, 5],
}
)
kwargs = {
'group': 'Periode',
'value': 'Valeurs',
'aggregate_by': ['Label'],
'limit': 2,
'order': 'desc',
}
df = top_group(data, **kwargs)
assert df.equals(expected)
# ~~~ without groups ~~~
expected = pd.DataFrame(
{
'Label': ['G3', 'G3', 'G3', 'G5'],
'Categories': ['C1', 'C2', 'C3', 'C1'],
'Valeurs': [9, 2, 5, 8],
'Periode': ['mois', 'semaine', 'semaine', 'semaine'],
}
)
kwargs = {
'group': None,
'value': 'Valeurs',
'aggregate_by': ['Label'],
'limit': 2,
'order': 'desc',
}
df = top_group(data, **kwargs)
assert df.equals(expected)
# ~~~ with group and function = mean ~~~
expected = pd.DataFrame(
{
'Periode': ['mois', 'mois', 'semaine', 'semaine'],
'Label': ['G3', 'G1', 'G5', 'G4'],
'Categories': ['C1', 'C1', 'C1', 'C2'],
'Valeurs': [9, 6, 8, 4],
}
)
kwargs = {
'group': ['Periode'],
'value': 'Valeurs',
'aggregate_by': ['Label'],
'limit': 2,
'function': 'mean',
'order': 'desc',
}
df = top_group(data, **kwargs)
assert df.equals(expected)
| 4,650 | 1,818 |
from Main import db, login_manager
from faker import Faker
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
fake = Faker()
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
class User(db.Model, UserMixin):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String)
email = db.Column(db.String)
password_hash = db.Column(db.String(128))
profile_color = db.Column(db.String)
def __init__(self, email, password):
self.email = email
self.password_hash = generate_password_hash(password)
# create a random config first, user can change it later
self.username = fake.user_name()
self.profile_color = fake.color(luminosity='dark')
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def __repr__(self):
return "user({},{},{},{})".format(self.id, self.username, self.email, self.profile_color)
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
owner_id = db.Column(db.Integer, db.ForeignKey('users.id'))
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
message = db.Column(db.String)
def __init__(self, owner_id, post_id, message):
self.owner_id = owner_id
self.post_id = post_id
self.message = message
def __repr__(self):
return "comment({},{},{},{})".format(self.id, self.owner_id, self.post_id, self.message)
class Like(db.Model):
__tablename__ = 'likes'
id = db.Column(db.Integer, primary_key=True)
owner_id = db.Column(db.Integer, db.ForeignKey('users.id'))
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
def __init__(self, owner_id, post_id):
self.owner_id = owner_id
self.post_id = post_id
def __repr__(self):
return "like({},{},{})".format(self.id, self.owner_id, self.post_id)
class Post(db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer, primary_key=True)
owner_id = db.Column(db.Integer, db.ForeignKey('users.id'))
text = db.Column(db.Text)
def __init__(self, text, owner_id):
self.text = text
self.owner_id = owner_id
def __repr__(self):
return "post({}, {}, {})".format(self.id, self.owner_id, self.text)
| 2,439 | 836 |
import sys
import os
import SimpleITK as sitk
import pydicom
from slugify import slugify
import shutil
import argparse
def gen_dcm_identifiers(in_dir):
##Get Absolute Path For Every DCM File Recursively
dcms_path_list = [os.path.abspath(os.path.join(dire,dcm)) for dire,sub_dir,dcms in os.walk(in_dir) if 'dcm' in str(dcms) for dcm in dcms]
##Output List
output_list = list()
## Generate List with MRN, Accession Number, Series Description, Series Number, Acq Date
for dcm_file in dcms_path_list:
info = pydicom.read_file(dcm_file)
try:
mrn = info[0x010,0x0020][:]
acc = info[0x008,0x0050][:]
series_desc = info[0x0008,0x103e].value
series_num = info[0x0020,0x0011].value
acq_date = info[0x0008,0x0020].value
string = str(series_desc)+"_"+str(series_num)+"_"+str(acq_date)
string_date = slugify(string)
output_list.append([mrn,acc,string_date,dcm_file])
except KeyError:
print ("Error getting metadata from "+str(dcm_file))
return output_list
def create_folders_move(dcm_ids,out_dir):
if os.path.exists(out_dir) == False:
os.mkdir(out_dir)
for i in dcm_ids:
print (i)
if os.path.exists(os.path.join(out_dir,i[0]))==False:
os.mkdir(os.path.join(out_dir,i[0]))
if os.path.exists(os.path.join(out_dir,i[0],i[1]))==False:
os.mkdir(os.path.join(out_dir,i[0],i[1]))
if os.path.exists(os.path.join(out_dir,i[0],i[1],i[2]))==False:
os.mkdir(os.path.join(out_dir,i[0],i[1],i[2]))
try:
shutil.move(i[3],os.path.join(out_dir,i[0],i[1],i[2]))
print ("######## Moving "+str(i[3]))
except:
print ("Error, likely file already exists in destination")
parser = argparse.ArgumentParser(description='MoveRestructureScript')
parser.add_argument("--dicomDir", dest="in_dir", required=True)
parser.add_argument("--outDir", dest="out_dir", required=True)
op = parser.parse_args()
create_folders_move(gen_dcm_identifiers(op.in_dir), op.out_dir)
| 2,199 | 822 |
from PyQt5 import QtWidgets
from .cnmf_viz_pytemplate import Ui_VizualizationWidget
from .evaluate_components import EvalComponentsWidgets
from mesmerize_core.utils import *
from mesmerize_core import *
import caiman as cm
class VizWidget(QtWidgets.QDockWidget):
def __init__(self, cnmf_viewer, batch_item):
QtWidgets.QDockWidget.__init__(self, parent=None)
self.ui = Ui_VizualizationWidget()
self.ui.setupUi(self)
self.cnmf_obj = batch_item.cnmf.get_output()
self.batch_item = batch_item
self.cnmf_viewer = cnmf_viewer
self.eval_gui = EvalComponentsWidgets(cnmf_viewer=cnmf_viewer)
self.ui.pushButtonInputMovie.clicked.connect(self.view_input)
self.ui.pushButtonCnImage.clicked.connect(self.load_correlation_image)
self.ui.pushButtonViewProjection.clicked.connect(self.view_projections)
self.ui.pushButtonEvalGui.clicked.connect(self.show_eval_gui)
self.ui.pushButtonUpdateBoxSize.clicked.connect(self.select_contours)
def _open_movie(self, path: Union[Path, str]):
file_ext = Path(path).suffix
if file_ext == ".mmap":
Yr, dims, T = cm.load_memmap(path)
images = np.reshape(Yr.T, [T] + list(dims), order="F")
self.cnmf_viewer.viewer.add_image(images, colormap="gray")
else:
self.cnmf_viewer.viewer.open(path, colormap="gray")
def view_input(self):
path = self.batch_item.caiman.get_input_movie_path()
full_path = get_full_data_path(path)
self._open_movie(full_path)
def load_correlation_image(self):
corr_img = self.batch_item.caiman.get_correlation_image()
self.cnmf_viewer.viewer.add_image(
corr_img, name=f'corr: {self.batch_item["name"]}', colormap="gray"
)
def view_projections(self):
proj_type = self.ui.comboBoxProjection.currentText()
projection = self.batch_item.caiman.get_projection(proj_type=proj_type)
self.cnmf_viewer.viewer.add_image(
projection,
name=f'{proj_type} projection: {self.batch_item["name"]}',
colormap="gray",
)
def show_eval_gui(self):
self.eval_gui.show()
def select_contours(self):
box_size = self.ui.spinBoxBoxSize.value()
self.cnmf_viewer.select_contours(box_size=box_size, update_box=True)
| 2,384 | 836 |
# SPDX-License-Identifier: BSD-3-Clause
#
# Configuration file for textgen. This file defines the graphic lumps
# that are generated, and the text to show in each one.
#
import re
# Adjustments for character position based on character pairs. Some
# pairs of characters can fit more snugly together, which looks more
# visually appealing. This is highly dependent on the font graphics,
# and if the font is changed this probably needs to be redone.
FONT_KERNING_RULES = {
# Right character fits under left character:
r"T[0ACOSZacos]": -2,
r"V[OC]": -2,
r"Y[ASZacs]": -2,
r"Y[CO0]": -1,
r"P[Aa]": -3,
r"P[7]": -2,
r"P[Z]": -1,
r"[0O][Aa]": -1,
r"S[A]": -1,
r"Sa": -2,
r"Wa": -1,
r"p[a]": -1,
r"s[ao]": -1,
r"ta": -2,
r"v[oc]": -1,
r"y[oacs]": -1,
# Left character fits under right character:
r"L[4Q]": -3,
r"L[O0CTYtcq]": -2,
r"L[oyVv]": -1,
r"l[tTY]": -2,
r"l[y]": -1,
r"[0O][4TYy]": -2,
r"[0O][1]": -1,
r"Q[1TY]": -2,
r"A[CGTYt]": -2,
r"A[cgy]": -1,
r"a[cTYt]": -2,
r"a[vVy]": -1,
# Fits into "hole" in left character:
r"B[0CGOQ]": -2,
r"B[0cgq]": -2,
r"C[0CGOQ]": -3,
r"C[q]": -2,
r"C[cgo]": -1,
r"X[0CO]": -3,
r"X[Qqco]": -2,
r"8[0CO]": -3,
r"8[GQcgqo]": -2,
r"Z[0CO]": -2,
r"Z[GQocgq]": -1,
r"I[0COQcoq]": -1,
r"K[0CO]": -4,
r"K[GQ]": -3,
r"K[cgo]": -2,
r"K[Eq]": -1,
r"P[0COQcoq]": -1,
r"R[0COQcoq]": -1,
# Fits into "hole" in right character:
r"[O0][2X8]": -3,
r"[O0][9Kx]": -2,
r"[O0][Iik]": -1,
r"Q[28X]": -2,
r"Q[9Iix]": -1,
r"q[IXx]": -1,
# Just because.
r"[O0][O0]": -1,
}
white_graphics = {
"wibp1": "P1",
"wibp2": "P2",
"wibp3": "P3",
"wibp4": "P4",
"wicolon": ":",
# These files are for the title screens of Phase 1 and Phase 2
"t_phase1": "Part 1: Phobos Infestation",
"t_phase2": "Part 2: Earth Infestation",
# Note: level names are also included in this dictionary, with
# the data added programatically from the DEHACKED lump, see
# code below.
}
blue_graphics = {
"m_disopt": "DISPLAY OPTIONS",
"m_episod": "Choose Chapter:",
"m_optttl": "OPTIONS",
"m_skill": "Choose Skill Level:",
}
red_graphics = {
# Title for the HELP/HELP1 screen:
"helpttl": "Help",
# Title for CREDIT
"freettl": "Satanic Infestation",
"m_ngame": "New Game",
"m_option": "Options",
"m_loadg": "Load Game",
"m_saveg": "Save Game",
"m_rdthis": "Read This!",
"m_quitg": "Quit Game",
"m_newg": "NEW GAME",
"m_epi1": "Outpost Outbreak",
"m_epi2": "Military Labs",
"m_epi3": "Event Horizon",
"m_epi4": "Double Impact",
"m_jkill": "Little Girl",
"m_rough": "Fighting Words",
"m_hurt": "Shoot To Kill",
"m_ultra": "This Machine Kills Demons",
"m_nmare": "WICKED MOTHERFUCKER!",
"m_lgttl": "LOAD GAME",
"m_sgttl": "SAVE GAME",
"m_endgam": "End Game",
"m_messg": "Messages:",
"m_msgoff": "off",
"m_msgon": "on",
"m_msens": "Mouse Sensitivity",
"m_detail": "Graphic Detail:",
"m_gdhigh": "high",
"m_gdlow": "low",
"m_scrnsz": "Screen Size",
"m_svol": "Sound Volume",
"m_sfxvol": "Sfx Volume",
"m_musvol": "Music Volume",
"m_disp": "Display",
"wif": "finished",
"wiostk": "kills",
"wiosti": "items",
"wiscrt2": "secret",
"wiosts": "scrt",
"wifrgs": "frgs",
"witime": "Time:",
"wisucks": "sucks",
"wimstt": "Total:",
"wipar": "Par:",
"wip1": "P1",
"wip2": "P2",
"wip3": "P3",
"wip4": "P4",
"wiostf": "f.",
"wimstar": "you",
"winum0": "0",
"winum1": "1",
"winum2": "2",
"winum3": "3",
"winum4": "4",
"winum5": "5",
"winum6": "6",
"winum7": "7",
"winum8": "8",
"winum9": "9",
"wipcnt": "%",
"wiminus": "-",
"wienter": "ENTERING",
"m_pause": "pause",
# Extra graphics used in PrBoom's menus. Generate these as well
# so that when we play in PrBoom the menus look consistent.
"prboom": "PrBoom",
"m_generl": "General",
"m_setup": "Setup",
"m_keybnd": "Key Bindings",
"m_weap": "Weapons",
"m_stat": "Status Bar/HUD",
"m_auto": "Automap",
"m_enem": "Enemies",
"m_mess": "Messages",
"m_chat": "Chat Strings",
"m_horsen": "horizontal",
"m_versen": "vertical",
"m_loksen": "mouse look",
"m_accel": "acceleration",
# Extra graphics from SMMU/Eternity Engine:
"m_about": "about",
"m_chatm": "Chat Strings",
"m_compat": "Compatibility",
"m_demos": "demos",
"m_dmflag": "deathmatch flags",
"m_etcopt": "eternity options",
"m_feat": "Features",
"m_gset": "game settings",
"m_hud": "heads up display",
"m_joyset": "joysticks",
"m_ldsv": "Load/Save",
"m_menus": "Menu Options",
"m_mouse": "mouse options",
"m_player": "player setup",
"m_serial": "serial connection",
"m_sound": "sound options",
"m_status": "status bar",
"m_tcpip": "tcp/ip connection",
"m_video": "video options",
"m_wad": "load wad",
"m_wadopt": "wad options",
# This is from SMMU too, and if we follow things to the letter,
# ought to be all lower-case. However, same lump name is used
# by other ports (Zandronum) which expect a taller graphic to
# match the other main menu graphics. Eternity Engine doesn't
# use it any more, and on SMMU there's enough space for it.
"m_multi": "Multiplayer",
}
def read_bex_lump(filename):
"""Read the BEX (Dehacked) lump from the given filename.
Returns:
Dictionary mapping from name to value.
"""
result = {}
with open(filename) as f:
for line in f:
# Ignore comments:
line = line.strip()
if len(line) == 0 or line[0] in "#;":
continue
# Just split on '=' and interpret that as an
# assignment. This is primitive and doesn't read
# like a full BEX parser should, but it's good
# enough for our purposes here.
assign = line.split("=", 2)
if len(assign) != 2:
continue
result[assign[0].strip()] = assign[1].strip()
return result
def update_level_name(lumpname, bexdata, bexname):
"""Set the level name for the given graphic from BEX file.
Args:
lumpname: Name of output graphic file.
bexdata: Dictionary of data read from BEX file.
bexname: Name of entry in BEX file to use.
"""
if bexname not in bexdata:
raise Exception(
"Level name %s not defined in " "DEHACKED lump!" % bexname
)
# Strip "MAP01: " or "E1M2: " etc. from start, if present:
levelname = re.sub("^\w*\d:\s*", "", bexdata[bexname])
white_graphics[lumpname] = levelname
freedoom_bex = read_bex_lump("../../lumps/p2_deh.lmp")
freedm_bex = read_bex_lump("../../lumps/fdm_deh.lmp")
for e in range(4):
for m in range(9):
# HUSTR_E1M1 from BEX => wilv00
update_level_name(
"wilv%i%i" % (e, m), freedoom_bex, "HUSTR_E%iM%i" % (e + 1, m + 1)
)
for m in range(32):
# HUSTR_1 => cwilv00
update_level_name("cwilv%02i" % m, freedoom_bex, "HUSTR_%i" % (m + 1))
# HUSTR_1 => dmwilv00 (from freedm.bex)
update_level_name("dmwilv%02i" % m, freedm_bex, "HUSTR_%i" % (m + 1))
| 7,459 | 3,227 |
import fnmatch, os, subprocess
from multiprocessing import Pool
import tqdm
sdkPath = subprocess.check_output('xcodebuild -version -sdk iphonesimulator Path', shell=True).strip()
def parseSymbols(fn):
args = [
'headerparser_output/headerparse', fn,
'-ObjC',
'-fmodules',
'-isysroot', sdkPath,
'-I%s/usr/include' % sdkPath,
'-I%s/usr/include/libxml2' % sdkPath,
'-F%s/System/Library/Frameworks' % sdkPath,
'-I/usr/local/lib/clang/9.0.1/include',
'-DTARGET_OS_SIMULATOR'
]
if '.framework' in fn:
args.append('-framework')
args.append(fn.split('.framework', 1)[0].rsplit('/', 1)[1])
symsByFile = {}
try:
output = subprocess.check_output(args, stderr=subprocess.STDOUT).strip().split('\n')
if len(output) == 1 and output[0] == '':
return fn, {}
for line in output:
line = line.strip()
if not line:
continue
if line.startswith('~~~'):
print line[3:]
continue
fn, sym, encoding = line.split(':::', 2)
if fn not in symsByFile:
symsByFile[fn] = {}
symsByFile[fn][sym] = encoding
except Exception, e:
#import traceback
#traceback.print_exc()
pass
#print
#print ' '.join(map(repr, args))
#print `e.output`
return fn, symsByFile
allFns = []
for root, dirnames, filenames in os.walk(sdkPath): # + '/usr/include'):
for filename in fnmatch.filter(filenames, '*.h'):
allFns.append(os.path.join(root, filename))
pool = Pool(20)
allSymsByFn = {}
for fn, symbols in tqdm.tqdm(pool.imap_unordered(parseSymbols, allFns), total=len(allFns)):
for dfn, syms in symbols.items():
if dfn not in allSymsByFn:
allSymsByFn[dfn] = {}
allSymsByFn[dfn].update(syms)
with file('funcdb', 'w') as fp:
for fn, syms in allSymsByFn.items():
print >>fp, fn
for name, encoding in sorted(syms.items(), key=lambda x: x[0]):
print >>fp, '\t' + name, '=', encoding
| 1,840 | 784 |
"""
Tools for waiting for a cluster.
"""
import click
import click_spinner
import urllib3
from cli.common.options import (
superuser_password_option,
superuser_username_option,
)
from ._common import ClusterVMs
from ._options import existing_cluster_id_option
@click.command('wait')
@existing_cluster_id_option
@superuser_username_option
@superuser_password_option
def wait(
cluster_id: str,
superuser_username: str,
superuser_password: str,
) -> None:
"""
Wait for DC/OS to start.
"""
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
message = (
'A cluster may take some time to be ready.\n'
'The amount of time it takes to start a cluster depends on a variety '
'of factors.\n'
'If you are concerned that this is hanging, try "dcos-vagrant doctor" '
'to diagnose common issues.'
)
click.echo(message)
cluster_vms = ClusterVMs(cluster_id=cluster_id)
with click_spinner.spinner():
if cluster_vms.is_enterprise:
cluster_vms.cluster.wait_for_dcos_ee(
superuser_username=superuser_username,
superuser_password=superuser_password,
http_checks=True,
)
return
cluster_vms.cluster.wait_for_dcos_oss(http_checks=True)
| 1,331 | 409 |
class Graph(dict):
"""A Graph is a dictionary of dictionaris. The outer dictionary maps
from a vertex to an inner dictionary.
The inner dictionary maps from other vertices to edges.
For vertices a and b, graph([a, b], [ab]) maps to the edge that
connects a->b, if it exists."""
def __init__(self, vs=[], es=[]):
"""Creates a new graph.
vs: list of vertices;
es: list of edge.
"""
for v in vs:
self.add_vertex(v)
for e in es:
self.add_edge(e)
def add_vertex(self, v):
"""Add a vertex to the graph."""
self[v] = {}
def add_edge(self, e):
"""Add a edge to the graph by adding an entry in both directons.
if there is already an edge connecting these Vertices,
the new edge replaces it.
"""
v, w = e
self[v][w] = e
self[w][v] = e
class Vertex(object):
"""A Vertex is a node in a graph."""
def __init__(self, label = ''):
self.label = label
def __repr__(self):
"""Returns a string representation of this object that can be
evaluated as a Python expression."""
return 'Vertex (%s)' % repr(self.label)
__str__ = __repr__
class Edge(tuple):
"""An Edge is a list of two Vertics."""
def __new__(cls, e1, e2):
"""The Edge constructor takes two Vertics."""
if len(vs) != 2:
raise ValueError, 'Edges must connect exactly two vertices.'
return tuple.__new__(cls, (e1, e2))
def __repr__(self):
"""Returns a string representation of this object that can be
evaluated as a Python expression."""
return 'Edge (%s, %s)' % (repr(self[0]), repr(self[1]))
__str__ = __repr__
if __name__ == '__main__':
x = Vertex('x')
y = Vertex('y')
xy = Edge(x, y)
g = Graph([x, y], [xy])
print x
print xy
print g
| 1,922 | 609 |
# -*- coding: utf-8 -*-
"""Unit test package for graphql_env."""
| 65 | 27 |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 3 20:33:21 2021
@author: zhang
"""
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import tensorflow.keras as keras
from tensorflow.keras.preprocessing import image
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.applications.resnet_v2 import ResNet50V2
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Flatten, Dense, Dropout, Conv2D, BatchNormalization, MaxPool2D ,Activation, MaxPooling2D
def data_table(folder):
'''create a dataframe which has 'id' and 'label' columns. The id column is the path of each image
and the label column contain 1 and 0 which indicate cancer cells exist or not
'''
p=os.walk(folder)
list_empty=[]
dict_empty={}
for path, dir_list,file_list in p:
for file_name in file_list:
file_path=os.path.join(path,file_name)
list_empty.append(file_path)
for file_path in list_empty:
if 'non_cancer' in file_path:
label=0
else:
label=1
dict_empty['{}'.format(file_path)]=label
df = pd.DataFrame.from_dict(dict_empty, orient='index',columns=['label'])
df = df.reset_index().rename(columns={'index':'id'})
df = shuffle(df)
return df
#folder where the images data stored
f=r'G:\BaiduNetdiskDownload\train'
df_full=data_table(f)
#define X and y
X=df_full['id']
y=df_full['label']
# train and test split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 100) # split into test and train sets
def slice_load(file_list):
''' load the images'''
images=[]
for filename in file_list:
im = image.load_img(filename,target_size=(512, 512, 3))
b = image.img_to_array(im)
images.append(b)
return images
X_train_image=slice_load(X_train)
X_train_array=np.array(X_train_image)/255
X_test_image=slice_load(X_test)
X_test_array=np.array(X_test_image)/255
X_train_array.shape
type(y_train)
#clear sessions
K.clear_session()
input_shape = (512, 512, 3)
# transfer learning with ResNet50V2
resMod = ResNet50V2(include_top=False, weights='imagenet',
input_shape=input_shape)
#frozen the layers in ResNet50V2
for layer in resMod.layers:
layer.trainable = False
# build model
model = Sequential()
model.add(resMod)
model.add(tf.keras.layers.GlobalAveragePooling2D())
#1st Dense: (None, 60)
model.add(keras.layers.Dense(60, activation='relu'))
#regularization with penalty term
model.add(Dropout(0.2))
# 2nd Dense: (None, 50)
model.add(keras.layers.Dense(50, activation='relu'))
#regularization
model.add(keras.layers.BatchNormalization())
# 2nd Dense: (None, 50)
model.add(keras.layers.Dense(50, activation='relu'))
model.add(keras.layers.BatchNormalization())
# Output Layer: (None, 1)
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.summary()
# Compile
model.compile(loss='categorical_crossentropy', optimizer='adam',\
metrics=['accuracy'])
#add early stoping
callback = EarlyStopping(monitor='val_loss', patience=3)
#(5)Train
results=model.fit(X_train_array, y_train, batch_size=64, epochs=50, verbose=1, \
validation_split=0.2,callbacks=[callback], shuffle=True)
model.evaluate(X_test_array, y_test)
results.history['val_accuracy']
#save model
model.save(r'C:\Users\zhang\GitHub_projects\GTBR\Gastric_Biopsy_Cancer_Detector\model\resnet_gastric.h5')
| 3,642 | 1,352 |
import torch
import matplotlib.pyplot as plt
import numpy as np
import glob
import cv2
from esim_torch import EventSimulator_torch
def increasing_sin_wave(t):
return (400 * np.sin((t-t[0])*20*np.pi)*(t-t[0])+150).astype("uint8").reshape((-1,1,1))
if __name__ == "__main__":
c = 0.2
refractory_period_ns = 5e6
esim_torch = EventSimulator_torch(contrast_threshold_neg=c,
contrast_threshold_pos=c,
refractory_period_ns=refractory_period_ns)
print("Loading images")
timestamps_s = np.genfromtxt("../esim_py/tests/data/images/timestamps.txt")
images = increasing_sin_wave(timestamps_s)
timestamps_ns = (timestamps_s * 1e9).astype("int64")
log_images = np.log(images.astype("float32") / 255 + 1e-4)
# generate torch tensors
print("Loading data to GPU")
device = "cuda:0"
log_images = torch.from_numpy(log_images).to(device)
timestamps_ns = torch.from_numpy(timestamps_ns).to(device)
# generate events with GPU support
print("Generating events")
events = esim_torch.forward(log_images, timestamps_ns)
# render events
image = images[0]
print("Plotting")
event_timestamps = events['t']
event_polarities = events['p']
i0 = log_images[0].cpu().numpy().ravel()
fig, ax = plt.subplots(ncols=2)
timestamps_ns = timestamps_ns.cpu().numpy()
log_images = log_images.cpu().numpy().ravel()
ax[0].plot(timestamps_ns, log_images)
ax[0].plot(timestamps_ns, images.ravel())
ax[0].set_ylim([np.log(1e-1),np.log(1 + 1e-4)])
ax[0].set_ylabel("Log Intensity")
ax[0].set_xlabel("Time [ns]")
ax[1].set_ylabel("Time since last event [ns]")
ax[1].set_xlabel("Timestamp of event [ns]")
ax[1].set_xlim([0,3e8])
for i in range(-10,3):
ax[0].plot([0,timestamps_ns[-1]], [i0+i*c, i0+i*c], c='g')
event_timestamps = event_timestamps.cpu().numpy()
for i, (t, p) in enumerate(zip(event_timestamps, event_polarities)):
color = "r" if p == -1 else "b"
ax[0].plot([t, t], [-3, 0], c=color)
if i > 0:
ax[1].scatter([t], [t-event_timestamps[i-1]], c=color)
ax[1].plot([0,3e8], [refractory_period_ns, refractory_period_ns])
plt.show()
| 2,280 | 906 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
load_csv.py
This script controlls all load csv information.
Created on: Fri Jul 16 15:54:43 2021
Author: Alex K. Chew (alex.chew@schrodinger.com)
Copyright Schrodinger, LLC. All rights reserved.
"""
# Loading modules
import os
import pandas as pd
import numpy as np
# Importing filtration tools
from .filtration import filter_by_variance_threshold
# Defining default columns
DEFAULT_INDEX_COLS = ["Title", "Entry Name"]
# Loading experimental data
def load_property_data(csv_data_path,
keep_list = []):
"""
This function loads property data from spreadsheet
Parameters
----------
csv_data_path: [str]
path to csv file
keep_list: [list, default = []]
list of columns to keep. If None, the entire dataframe is outputted.
Returns
-------
csv_data: [df]
dataframe containing csv information with the keep list
"""
# Loading dataframe
csv_data = pd.read_csv(csv_data_path)
# Checking if list is empty
if len(keep_list) == 0:
return csv_data
else:
return csv_data[keep_list]
# Function to load descriptor data
def load_descriptor_data(csv_path,
clean_data = True,
filter_by_variance = True,
output_filtered_data = False,
na_filter = 'remove',
default_index_cols = DEFAULT_INDEX_COLS):
"""
This function loads the descriptor information. Note that all:
- non-numerical descriptors are removed automatically.
- missing NaN columns are removed automatically
Parameters
----------
csv_path : str
Path to csv file
clean_data: logical, default = True
True if you want to clean the data by removing non-numerical descriptors / NaN columns
output_filtered_data: logical, optional
True if you want to output the filtered data as a separate csv file.
The default value is False.
filter_by_variance: logical, optional
True if you want to filter by variance. By default, this is True.
na_filter: str, optional
Method of dealing with non-existing numbers. The different methods
are summarized below:
'remove': (default)
Remove all columns that have non-existing numbers.
'fill_with_zeros':
Fill all nans with zeros. It will also look for infinities and replace them
with zeros.
Returns
-------
output_df : str
dataframe containing csv file
"""
# Loading csv file
csv_df = pd.read_csv(csv_path)
# Printing
print("\nLoading CSV file: %s"%(csv_path))
# Checking if you want to clean the dataframe
if clean_data is True:
# Cleaning the dataframe
if na_filter == 'remove':
print("Removing all columns with nan's")
csv_df_nonan = csv_df.dropna(axis=1) # Removes NaN values
elif na_filter == 'fill_with_zeros':
print("Filling nan's with zeros")
csv_df_nonan = csv_df.fillna(0)
csv_df_nonan.replace([np.inf, -np.inf], 0)
else:
print("Error! na_filter of %s is not defined!"%(na_filter))
# Selecting only portions of the dataframe with numbers.
csv_df_nums = csv_df_nonan.select_dtypes(['number']) #
try:
# Removing cols with low variance
if filter_by_variance is True:
output_df = filter_by_variance_threshold(X_df = csv_df_nums)
else:
print("Skipping variance filtration for %s"%(csv_path))
output_df = csv_df_nums
# Adding back the index cols to the beginning
for each_col in default_index_cols[::-1]: # Reverse order
if each_col in csv_df and each_col not in output_df:
output_df.insert (0, each_col, csv_df[each_col])
except ValueError: # Happens when you have a blank dataframe
print("No columns found that matches filtration for %s"%(csv_path))
cols_to_include = [each_col for each_col in default_index_cols if each_col in csv_df.columns]
output_df = csv_df[cols_to_include]
# Storing dataframe
if output_filtered_data is True:
# Getting path without
csv_path_without_ext = os.path.splitext(csv_path)[0]
# Getting filtered nomenclature
csv_path_with_new_name = csv_path_without_ext + "_filtered.csv"
# Storing
print("Storing filtered data to: %s"%(csv_path_with_new_name))
output_df.to_csv(csv_path_with_new_name, index = False)
return output_df
else:
return csv_df
# Function to load multiple descriptor datas
def load_multiple_descriptor_data(default_csv_paths,
descriptor_list = ["2d_descriptors",
"3d_descriptors",],
**args
):
"""
This function loads multiple descriptor data given a descriptor list.
Parameters
----------
default_csv_paths: dict
dictionary of csv paths
descriptor_list : list
list of descriptors to load from dictionary
Remainder of arguments go into the load descriptor function
Returns
-------
descriptor_df_dict: dict
dictionary containing descritpors
"""
# Loading all descriptor files
descriptor_df_dict = { each_descriptor_key: load_descriptor_data(default_csv_paths[each_descriptor_key], **args)
for each_descriptor_key in descriptor_list }
return descriptor_df_dict
# Function to strip title and etc to get numerical descriptors only
def strip_df_index(df,
col2remove = DEFAULT_INDEX_COLS):
"""
This function strips the dataframe from the index information.
Parameters
----------
df : dataframe
pandas dataframe containing descriptor information.
col2remove: list
list of columns to remove from the dataframe.
Returns
-------
df_clean: dataframe]
pandas dataframe without any "Title" or index information
"""
# Dropping the columns
df_clean = df.drop(columns = col2remove,
errors='ignore')
return df_clean | 6,642 | 1,837 |
from ivory.layers import (activation, affine, convolution, core, dropout,
embedding, loss, normalization, recurrent)
__all__ = [
"activation",
"affine",
"convolution",
"core",
"dropout",
"embedding",
"loss",
"normalization",
"recurrent",
]
| 303 | 96 |