max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
main.py
|
adadesions/PacInverter
| 0
|
6627451
|
import sys
import getopt
import time
from components.PacpowerAPI import PacpowerAPI
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], 'u:p:s:v:l:',
['user=', 'pass=', 'storage=', 'volt=', 'location='])
parsed_argv = {
'user': '', 'pass': '', 'storage_id': '', 'max_volt': '', 'location': ''
}
for key, val in opts:
print(key, val)
if key in ('-u', '--user'):
parsed_argv['user'] = val
if key in ('-p', '--pass'):
parsed_argv['pass'] = val
if key in ('-s', '--storage'):
parsed_argv['storage_id'] = val
if key in ('-v', '--volt'):
parsed_argv['max_volt'] = int(val)
if key in ('-l', '--location'):
parsed_argv['location'] = val
print(parsed_argv)
pAPI = PacpowerAPI('pacpower', 'pacpower1234', 'DXE3A0305B', 54)
pAPI.initAPI()
newData = pAPI.get_data_pack()
print("NewData")
print(newData)
pAPI.send_to_cloud("pac0")
|
import sys
import getopt
import time
from components.PacpowerAPI import PacpowerAPI
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:], 'u:p:s:v:l:',
['user=', 'pass=', 'storage=', 'volt=', 'location='])
parsed_argv = {
'user': '', 'pass': '', 'storage_id': '', 'max_volt': '', 'location': ''
}
for key, val in opts:
print(key, val)
if key in ('-u', '--user'):
parsed_argv['user'] = val
if key in ('-p', '--pass'):
parsed_argv['pass'] = val
if key in ('-s', '--storage'):
parsed_argv['storage_id'] = val
if key in ('-v', '--volt'):
parsed_argv['max_volt'] = int(val)
if key in ('-l', '--location'):
parsed_argv['location'] = val
print(parsed_argv)
pAPI = PacpowerAPI('pacpower', 'pacpower1234', 'DXE3A0305B', 54)
pAPI.initAPI()
newData = pAPI.get_data_pack()
print("NewData")
print(newData)
pAPI.send_to_cloud("pac0")
|
none
| 1
| 2.238535
| 2
|
|
modules/cls_ffn.py
|
TUIlmenauAMS/nca_mss
| 2
|
6627452
|
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
__copyright__ = 'MacSeNet'
import torch
import torch.nn as nn
from torch.autograd import Variable
class DFFN(nn.Module):
def __init__(self, N, l_dim):
"""
Constructing blocks for a two-layer FFN.
Args :
N : (int) Original dimensionallity of the input.
l_dim : (int) Dimensionallity of the latent variables.
"""
super(DFFN, self).__init__()
print('Constructing 2-FFN')
self._N = N
self._ldim = l_dim
self.activation_function = torch.nn.ReLU()
# Encoder
self.ih_matrix = nn.Linear(self._N, self._ldim, bias=True)
# Decoder
self.ho_matrix = nn.Linear(self._ldim, self._N, bias=True)
# Initialize the weights
self.initialize_ffn()
def initialize_ffn(self):
"""
Manual weight/bias initialization.
"""
# Matrices
nn.init.xavier_normal(self.ih_matrix.weight)
nn.init.xavier_normal(self.ho_matrix.weight)
print('Initialization of the FFN done...')
return None
def forward(self, input_x):
if torch.has_cudnn:
x = Variable(torch.from_numpy(input_x).cuda(), requires_grad=True)
else:
x = Variable(torch.from_numpy(input_x), requires_grad=True)
# Encoder
hl_rep = self.activation_function(self.ih_matrix(x))
# Decoder
y_out = self.activation_function(self.ho_matrix(hl_rep))
return y_out, x
class DNN(nn.Module):
def __init__(self, N, l_dim):
"""
Constructing blocks for a deep neural network
for MSS.
Args :
N : (int) Original dimensionallity of the input.
l_dim : (int) Dimensionallity of the latent variables.
"""
super(DNN, self).__init__()
print('Constructing a Deep Neural Network')
self._N = N
self._ldim = l_dim
self.activation_function = torch.nn.ReLU()
# Layers
self.ih_matrix = nn.Linear(self._N, self._ldim, bias=True)
self.hh_matrix = nn.Linear(self._ldim, self._ldim, bias=True)
self.hh_b_matrix = nn.Linear(self._ldim, self._ldim, bias=True)
self.ho_matrix = nn.Linear(self._ldim, self._N, bias=True)
# Initialize the weights
self.initialize_ffn()
def initialize_ffn(self):
"""
Manual weight/bias initialization.
"""
# Matrices
nn.init.xavier_normal(self.ih_matrix.weight)
nn.init.xavier_normal(self.hh_matrix.weight)
nn.init.xavier_normal(self.hh_b_matrix.weight)
nn.init.xavier_normal(self.ho_matrix.weight)
print('Initialization of the DNN done...')
return None
def forward(self, input_x):
if torch.has_cudnn:
x = Variable(torch.from_numpy(input_x).cuda(), requires_grad=True)
else:
x = Variable(torch.from_numpy(input_x), requires_grad=True)
hl_rep = self.activation_function(self.ih_matrix(x))
hl_rep = self.activation_function(self.hh_matrix(hl_rep))
hl_rep = self.activation_function(self.hh_b_matrix(hl_rep))
y_out = self.activation_function(self.ho_matrix(hl_rep))
return y_out, x
class FFN(nn.Module):
def __init__(self, N):
"""
Constructing blocks for a single layer FFN,
for pre-training.
Args :
N : (int) Original dimensionallity of the input.
"""
super(FFN, self).__init__()
print('Constructing FFN')
self._N = N
self.activation_function = torch.nn.ReLU()
# Single Layer
self.io_matrix = nn.Linear(self._N, self._N, bias=True)
# Initialize the weights
self.initialize_ffn()
def initialize_ffn(self):
"""
Manual weight/bias initialization.
"""
# Matrix
nn.init.xavier_normal(self.io_matrix.weight)
print('Initialization of the FFN done...')
return None
def forward(self, input_x):
if torch.has_cudnn:
x = Variable(torch.from_numpy(input_x).cuda(), requires_grad=True)
else:
x = Variable(torch.from_numpy(input_x), requires_grad=True)
return self.activation_function(self.io_matrix(x)), x
# EOF
|
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
__copyright__ = 'MacSeNet'
import torch
import torch.nn as nn
from torch.autograd import Variable
class DFFN(nn.Module):
def __init__(self, N, l_dim):
"""
Constructing blocks for a two-layer FFN.
Args :
N : (int) Original dimensionallity of the input.
l_dim : (int) Dimensionallity of the latent variables.
"""
super(DFFN, self).__init__()
print('Constructing 2-FFN')
self._N = N
self._ldim = l_dim
self.activation_function = torch.nn.ReLU()
# Encoder
self.ih_matrix = nn.Linear(self._N, self._ldim, bias=True)
# Decoder
self.ho_matrix = nn.Linear(self._ldim, self._N, bias=True)
# Initialize the weights
self.initialize_ffn()
def initialize_ffn(self):
"""
Manual weight/bias initialization.
"""
# Matrices
nn.init.xavier_normal(self.ih_matrix.weight)
nn.init.xavier_normal(self.ho_matrix.weight)
print('Initialization of the FFN done...')
return None
def forward(self, input_x):
if torch.has_cudnn:
x = Variable(torch.from_numpy(input_x).cuda(), requires_grad=True)
else:
x = Variable(torch.from_numpy(input_x), requires_grad=True)
# Encoder
hl_rep = self.activation_function(self.ih_matrix(x))
# Decoder
y_out = self.activation_function(self.ho_matrix(hl_rep))
return y_out, x
class DNN(nn.Module):
def __init__(self, N, l_dim):
"""
Constructing blocks for a deep neural network
for MSS.
Args :
N : (int) Original dimensionallity of the input.
l_dim : (int) Dimensionallity of the latent variables.
"""
super(DNN, self).__init__()
print('Constructing a Deep Neural Network')
self._N = N
self._ldim = l_dim
self.activation_function = torch.nn.ReLU()
# Layers
self.ih_matrix = nn.Linear(self._N, self._ldim, bias=True)
self.hh_matrix = nn.Linear(self._ldim, self._ldim, bias=True)
self.hh_b_matrix = nn.Linear(self._ldim, self._ldim, bias=True)
self.ho_matrix = nn.Linear(self._ldim, self._N, bias=True)
# Initialize the weights
self.initialize_ffn()
def initialize_ffn(self):
"""
Manual weight/bias initialization.
"""
# Matrices
nn.init.xavier_normal(self.ih_matrix.weight)
nn.init.xavier_normal(self.hh_matrix.weight)
nn.init.xavier_normal(self.hh_b_matrix.weight)
nn.init.xavier_normal(self.ho_matrix.weight)
print('Initialization of the DNN done...')
return None
def forward(self, input_x):
if torch.has_cudnn:
x = Variable(torch.from_numpy(input_x).cuda(), requires_grad=True)
else:
x = Variable(torch.from_numpy(input_x), requires_grad=True)
hl_rep = self.activation_function(self.ih_matrix(x))
hl_rep = self.activation_function(self.hh_matrix(hl_rep))
hl_rep = self.activation_function(self.hh_b_matrix(hl_rep))
y_out = self.activation_function(self.ho_matrix(hl_rep))
return y_out, x
class FFN(nn.Module):
def __init__(self, N):
"""
Constructing blocks for a single layer FFN,
for pre-training.
Args :
N : (int) Original dimensionallity of the input.
"""
super(FFN, self).__init__()
print('Constructing FFN')
self._N = N
self.activation_function = torch.nn.ReLU()
# Single Layer
self.io_matrix = nn.Linear(self._N, self._N, bias=True)
# Initialize the weights
self.initialize_ffn()
def initialize_ffn(self):
"""
Manual weight/bias initialization.
"""
# Matrix
nn.init.xavier_normal(self.io_matrix.weight)
print('Initialization of the FFN done...')
return None
def forward(self, input_x):
if torch.has_cudnn:
x = Variable(torch.from_numpy(input_x).cuda(), requires_grad=True)
else:
x = Variable(torch.from_numpy(input_x), requires_grad=True)
return self.activation_function(self.io_matrix(x)), x
# EOF
|
en
| 0.700579
|
# -*- coding: utf-8 -*- Constructing blocks for a two-layer FFN. Args : N : (int) Original dimensionallity of the input. l_dim : (int) Dimensionallity of the latent variables. # Encoder # Decoder # Initialize the weights Manual weight/bias initialization. # Matrices # Encoder # Decoder Constructing blocks for a deep neural network for MSS. Args : N : (int) Original dimensionallity of the input. l_dim : (int) Dimensionallity of the latent variables. # Layers # Initialize the weights Manual weight/bias initialization. # Matrices Constructing blocks for a single layer FFN, for pre-training. Args : N : (int) Original dimensionallity of the input. # Single Layer # Initialize the weights Manual weight/bias initialization. # Matrix # EOF
| 2.977417
| 3
|
genmotion/algorithm/action2motion/utils/matrix_transformer.py
|
yizhouzhao/GenMotion
| 32
|
6627453
|
import numpy as np
class MatrixTransformer(object):
@staticmethod
def rotate_along_x(matrix, theta):
Rx = np.array([[1, 0, 0],
[0, np.cos(theta), np.sin(theta)],
[0, -np.sin(theta), np.cos(theta)]])
return np.dot(matrix, Rx)
@staticmethod
def rotate_along_y(matrix, theta):
Ry = np.array([[np.cos(theta), 0, -np.sin(theta)],
[0, 1, 0],
[np.sin(theta), 0, np.cos(theta)]])
return np.dot(matrix, Ry)
@staticmethod
def rotate_along_z(matrix, theta):
Rz = np.array([[np.cos(theta), np.sin(theta), 0],
[-np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
return np.dot(matrix, Rz)
@staticmethod
def swap_yz(matrix):
S_yz = np.array([[1, 0, 0],
[0, 0, 1],
[0, 1, 0]])
return np.dot(matrix, S_yz)
@staticmethod
def swap_xz(matrix):
S_xz = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0]])
return np.dot(matrix, S_xz)
@staticmethod
def swap_xy(matrix):
S_xy = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
return np.dot(matrix, S_xy)
def project_3d_to_2d(cam_f, cam_c, verts):
'''
project 3d points to original 2d coordinate space.
Input:
cam: (1, 3) camera parameters (f, cx, cy) output by model.
verts: 3d verts output by model.
proc_param: preprocessing parameters. this is for converting points from crop (model input) to original image.
Output:
'''
fx = cam_f[0]
fy = cam_f[1]
tx = cam_c[0]
ty = cam_c[1]
verts = verts.reshape(-1, 3)
verts2d = np.zeros((verts.shape[0], 2))
print(verts2d.shape)
verts2d[:, 0] = fx * verts[:, 0] / verts[:, 2] + tx
verts2d[:, 1] = fy * verts[:, 1] / verts[:, 2] + ty
return verts2d
|
import numpy as np
class MatrixTransformer(object):
@staticmethod
def rotate_along_x(matrix, theta):
Rx = np.array([[1, 0, 0],
[0, np.cos(theta), np.sin(theta)],
[0, -np.sin(theta), np.cos(theta)]])
return np.dot(matrix, Rx)
@staticmethod
def rotate_along_y(matrix, theta):
Ry = np.array([[np.cos(theta), 0, -np.sin(theta)],
[0, 1, 0],
[np.sin(theta), 0, np.cos(theta)]])
return np.dot(matrix, Ry)
@staticmethod
def rotate_along_z(matrix, theta):
Rz = np.array([[np.cos(theta), np.sin(theta), 0],
[-np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
return np.dot(matrix, Rz)
@staticmethod
def swap_yz(matrix):
S_yz = np.array([[1, 0, 0],
[0, 0, 1],
[0, 1, 0]])
return np.dot(matrix, S_yz)
@staticmethod
def swap_xz(matrix):
S_xz = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0]])
return np.dot(matrix, S_xz)
@staticmethod
def swap_xy(matrix):
S_xy = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
return np.dot(matrix, S_xy)
def project_3d_to_2d(cam_f, cam_c, verts):
'''
project 3d points to original 2d coordinate space.
Input:
cam: (1, 3) camera parameters (f, cx, cy) output by model.
verts: 3d verts output by model.
proc_param: preprocessing parameters. this is for converting points from crop (model input) to original image.
Output:
'''
fx = cam_f[0]
fy = cam_f[1]
tx = cam_c[0]
ty = cam_c[1]
verts = verts.reshape(-1, 3)
verts2d = np.zeros((verts.shape[0], 2))
print(verts2d.shape)
verts2d[:, 0] = fx * verts[:, 0] / verts[:, 2] + tx
verts2d[:, 1] = fy * verts[:, 1] / verts[:, 2] + ty
return verts2d
|
en
| 0.685145
|
project 3d points to original 2d coordinate space. Input: cam: (1, 3) camera parameters (f, cx, cy) output by model. verts: 3d verts output by model. proc_param: preprocessing parameters. this is for converting points from crop (model input) to original image. Output:
| 2.876765
| 3
|
ansible_collections/ctera/ctera/plugins/module_utils/ctera_runner_base.py
|
ctera/ctera-ansible-collection
| 0
|
6627454
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is licensed under the Apache License 2.0.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright 2020, CTERA Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from abc import ABC, abstractmethod, abstractproperty
from ansible_collections.ctera.ctera.plugins.module_utils import ctera_common
try:
from cterasdk import CTERAException, tojsonstr
except ImportError: # pragma: no cover
pass # caught by ctera_common
class CteraRunnerBase(ABC):
def __init__(self, ansible_module_class, ansible_module_args, login=True, supports_check_mode=False, required_if=None, required_by=None):
self.ansible_module = ansible_module_class(
ansible_module_args,
supports_check_mode=supports_check_mode,
required_if=required_if or [],
required_by=required_by or {}
)
self.parameters = ctera_common.get_parameters(self.ansible_module.params)
self._login = login
def run(self):
try:
self._execute()
except CTERAException as error:
self.ansible_module.ctera_return_value().failed().msg(self._generic_failure_message + (' Exception: %s' % tojsonstr(error, False)))
self.ansible_module.ctera_logout()
self.ansible_module.ctera_exit()
@property
def _generic_failure_message(self): # pragma: no cover
raise NotImplementedError("Implementing classes must implement _generic_failure_message")
@abstractmethod
def _execute(self): # pragma: no cover
raise NotImplementedError("Implementing classes must implement _execute")
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is licensed under the Apache License 2.0.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright 2020, CTERA Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from abc import ABC, abstractmethod, abstractproperty
from ansible_collections.ctera.ctera.plugins.module_utils import ctera_common
try:
from cterasdk import CTERAException, tojsonstr
except ImportError: # pragma: no cover
pass # caught by ctera_common
class CteraRunnerBase(ABC):
def __init__(self, ansible_module_class, ansible_module_args, login=True, supports_check_mode=False, required_if=None, required_by=None):
self.ansible_module = ansible_module_class(
ansible_module_args,
supports_check_mode=supports_check_mode,
required_if=required_if or [],
required_by=required_by or {}
)
self.parameters = ctera_common.get_parameters(self.ansible_module.params)
self._login = login
def run(self):
try:
self._execute()
except CTERAException as error:
self.ansible_module.ctera_return_value().failed().msg(self._generic_failure_message + (' Exception: %s' % tojsonstr(error, False)))
self.ansible_module.ctera_logout()
self.ansible_module.ctera_exit()
@property
def _generic_failure_message(self): # pragma: no cover
raise NotImplementedError("Implementing classes must implement _generic_failure_message")
@abstractmethod
def _execute(self): # pragma: no cover
raise NotImplementedError("Implementing classes must implement _execute")
|
en
| 0.891646
|
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is licensed under the Apache License 2.0. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Copyright 2020, CTERA Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pragma: no cover # caught by ctera_common # pragma: no cover # pragma: no cover
| 1.755672
| 2
|
arsenic/change_readnames_clr.py
|
jason-weirather/Au-public
| 4
|
6627455
|
#!/usr/bin/python
from __future__ import print_function
import sys
import os
#Adds subscripts to reads generated by PBSIM
if len(sys.argv) >= 3:
fastq_filename = sys.argv[1]
batchnum = sys.argv[2]
else:
print("usage: python change_readnames.py fastq_filename batchnum > fastq_out")
sys.exit(1)
reads=open(fastq_filename,'r')
lines = reads.readlines()
for i in range(0,len(lines)):
line = lines[i]
if line.startswith('@'):
print (line.rstrip(),'_clr_',batchnum,sep='')
elif line.startswith('+'):
print (line.rstrip(),'_clr_',batchnum,sep='')
else:
print (line.rstrip()),
reads.close()
|
#!/usr/bin/python
from __future__ import print_function
import sys
import os
#Adds subscripts to reads generated by PBSIM
if len(sys.argv) >= 3:
fastq_filename = sys.argv[1]
batchnum = sys.argv[2]
else:
print("usage: python change_readnames.py fastq_filename batchnum > fastq_out")
sys.exit(1)
reads=open(fastq_filename,'r')
lines = reads.readlines()
for i in range(0,len(lines)):
line = lines[i]
if line.startswith('@'):
print (line.rstrip(),'_clr_',batchnum,sep='')
elif line.startswith('+'):
print (line.rstrip(),'_clr_',batchnum,sep='')
else:
print (line.rstrip()),
reads.close()
|
en
| 0.792734
|
#!/usr/bin/python #Adds subscripts to reads generated by PBSIM
| 2.59813
| 3
|
Python/Warmup_2/array123.py
|
RCoon/CodingBat
| 1
|
6627456
|
# Given an array of ints, return True if .. 1, 2, 3, .. appears in the array
# somewhere.
# array123([1, 1, 2, 3, 1]) --> True
# array123([1, 1, 2, 4, 1]) --> False
# array123([1, 1, 2, 1, 2, 3]) --> True
def array123(nums):
a = [1,2,3]
return set(a).issubset(nums)
print(array123([1, 1, 2, 3, 1]))
print(array123([1, 1, 2, 4, 1]))
print(array123([1, 1, 2, 1, 2, 3]))
|
# Given an array of ints, return True if .. 1, 2, 3, .. appears in the array
# somewhere.
# array123([1, 1, 2, 3, 1]) --> True
# array123([1, 1, 2, 4, 1]) --> False
# array123([1, 1, 2, 1, 2, 3]) --> True
def array123(nums):
a = [1,2,3]
return set(a).issubset(nums)
print(array123([1, 1, 2, 3, 1]))
print(array123([1, 1, 2, 4, 1]))
print(array123([1, 1, 2, 1, 2, 3]))
|
en
| 0.384942
|
# Given an array of ints, return True if .. 1, 2, 3, .. appears in the array # somewhere. # array123([1, 1, 2, 3, 1]) --> True # array123([1, 1, 2, 4, 1]) --> False # array123([1, 1, 2, 1, 2, 3]) --> True
| 3.877677
| 4
|
setup_app/installers/httpd.py
|
threema-gmbh/community-edition-setup
| 0
|
6627457
|
<reponame>threema-gmbh/community-edition-setup
import os
import glob
import shutil
from setup_app import paths
from setup_app.utils import base
from setup_app.static import AppType, InstallOption
from setup_app.config import Config
from setup_app.utils.setup_utils import SetupUtils
from setup_app.installers.base import BaseInstaller
class HttpdInstaller(BaseInstaller, SetupUtils):
def __init__(self):
self.service_name = base.httpd_name
self.pbar_text = "Configuring " + base.httpd_name
self.app_type = AppType.SERVICE
self.install_type = InstallOption.OPTONAL
self.install_var = 'installHttpd'
self.register_progess()
self.needdb = False # we don't need backend connection in this class
self.apache_version = base.determineApacheVersion()
self.httpdKeyFn = os.path.join(Config.certFolder, 'httpd.key')
self.httpdCertFn = os.path.join(Config.certFolder, 'httpd.crt')
self.templates_folder = os.path.join(Config.templateFolder, 'apache')
self.output_folder = os.path.join(Config.outputFolder, 'apache')
self.apache2_conf = os.path.join(self.output_folder, 'httpd.conf')
self.apache2_ssl_conf = os.path.join(self.output_folder, 'https_gluu.conf')
self.apache2_24_conf = os.path.join(self.output_folder, 'httpd_2.4.conf')
self.apache2_ssl_24_conf = os.path.join(self.output_folder, 'https_gluu.conf')
if base.os_type == 'suse':
self.https_gluu_fn = '/etc/apache2/vhosts.d/_https_gluu.conf'
elif base.clone_type == 'rpm':
self.https_gluu_fn = '/etc/httpd/conf.d/https_gluu.conf'
else:
self.https_gluu_fn = '/etc/apache2/sites-available/https_gluu.conf'
def configure(self):
self.logIt(self.pbar_text, pbar=self.service_name)
self.stop()
self.write_httpd_config()
self.writeFile('/var/www/html/index.html', 'OK')
if base.snap:
icons_conf_fn = '/etc/apache2/mods-available/alias.conf'
if base.os_type == 'suse':
icons_conf_fn = '/etc/apache2/default-server.conf'
elif base.clone_type == 'deb':
icons_conf_fn = '/etc/apache2/mods-available/alias.conf'
elif base.clone_type == 'rpm':
icons_conf_fn = '/etc/httpd/conf.d/autoindex.conf'
with open(icons_conf_fn[:]) as f:
icons_conf = f.readlines()
for i, l in enumerate(icons_conf[:]):
if l.strip().startswith('Alias') and ('/icons/' in l.strip().split()):
icons_conf[i] = l.replace('Alias', '#Alias')
self.writeFile(icons_conf_fn, ''.join(icons_conf))
error_templates = glob.glob(os.path.join(self.templates_folder,'error_pages/*.html'))
for tmp_fn in error_templates:
self.copyFile(tmp_fn, '/var/www/html')
# we only need these modules
mods_enabled = ['env', 'log_config', 'proxy', 'proxy_http', 'access_compat', 'alias', 'authn_core', 'authz_core', 'authz_host', 'headers', 'mime', 'mpm_event', 'proxy_ajp', 'security2', 'reqtimeout', 'setenvif', 'socache_shmcb', 'ssl', 'unique_id', 'rewrite']
cmd_a2enmod = shutil.which('a2enmod')
cmd_a2dismod = shutil.which('a2dismod')
if base.snap:
mods_enabled_dir = os.path.join(base.snap_common, 'etc/apache2/mods-enabled')
mods_available_dir = os.path.join(base.snap_common, 'etc/apache2/mods-available')
for em in os.listdir(mods_enabled_dir):
em_n, em_e = os.path.splitext(em)
if not em_n in mods_enabled:
os.unlink(os.path.join(mods_enabled_dir, em))
for m in mods_enabled:
load_fn = os.path.join(mods_available_dir, m + '.load')
conf_fn = os.path.join(mods_available_dir, m + '.conf')
if os.path.exists(load_fn):
target_fn = os.path.join(mods_enabled_dir, m + '.load')
if not os.path.exists(target_fn):
os.symlink(load_fn, target_fn)
if os.path.exists(conf_fn):
target_fn = os.path.join(mods_enabled_dir, m + '.conf')
if not os.path.exists(target_fn):
os.symlink(conf_fn, target_fn)
elif base.clone_type == 'deb':
for mod_load_fn in glob.glob('/etc/apache2/mods-enabled/*'):
mod_load_base_name = os.path.basename(mod_load_fn)
f_name, f_ext = os.path.splitext(mod_load_base_name)
if not f_name in mods_enabled:
self.run([cmd_a2dismod, mod_load_fn])
for amod in mods_enabled:
if os.path.exists('/etc/apache2/mods-available/{}.load'.format(amod)):
self.run([cmd_a2enmod, amod])
elif base.os_type == 'suse':
result = self.run([cmd_a2enmod, '-l'])
current_modules = result.strip().split()
for amod in current_modules:
if not amod in mods_enabled:
self.run([cmd_a2dismod, amod])
for amod in mods_enabled:
if not amod in current_modules:
self.run([cmd_a2enmod, amod])
cmd_a2enflag = shutil.which('a2enflag')
self.run([cmd_a2enflag, 'SSL'])
httpd_conf_fn = '/etc/apache2/httpd.conf'
httpd_conf_txt = self.readFile(httpd_conf_fn)
httpd_conf = httpd_conf_txt.splitlines()
for i, l in enumerate(httpd_conf[:]):
if l.strip().startswith('DirectoryIndex'):
httpd_conf[i] = l.replace('DirectoryIndex', '#DirectoryIndex')
self.writeFile(httpd_conf_fn, '\n'.join(httpd_conf))
else:
modules_config_dir = '/etc/apache2/sysconfig.d' if base.os_type == 'suse' else '/etc/httpd/conf.modules.d'
for mod_load_fn in glob.glob(os.path.join(modules_config_dir,'*')):
if not os.path.isfile(mod_load_fn):
continue
with open(mod_load_fn) as f:
mod_load_content = f.readlines()
modified = False
for i, l in enumerate(mod_load_content[:]):
ls = l.strip()
if ls and not ls.startswith('#'):
lsl = ls.split('/')
if not lsl[0].startswith('LoadModule'):
continue
module = lsl[-1][4:-3]
if not module in mods_enabled:
mod_load_content[i] = l.replace('LoadModule', '#LoadModule')
modified = True
if modified:
self.writeFile(mod_load_fn, ''.join(mod_load_content))
if not Config.get('httpdKeyPass'):
Config.httpdKeyPass = self.getPW()
# generate httpd self signed certificate
self.gen_cert('httpd', Config.httpdKeyPass, 'jetty')
self.enable()
self.start()
def write_httpd_config(self):
self.update_rendering_dict()
for tmp in (self.apache2_conf, self.apache2_ssl_conf, self.apache2_24_conf, self.apache2_ssl_24_conf):
self.renderTemplateInOut(tmp, self.templates_folder, self.output_folder)
# CentOS 7.* + systemd + apache 2.4
if self.service_name == 'httpd' and self.apache_version == "2.4":
self.copyFile(self.apache2_24_conf, '/etc/httpd/conf/httpd.conf')
self.copyFile(self.apache2_ssl_24_conf, '/etc/httpd/conf.d/https_gluu.conf')
if base.os_type == 'suse':
self.copyFile(self.apache2_ssl_conf, self.https_gluu_fn)
elif base.clone_type == 'rpm' and base.os_initdaemon == 'init':
self.copyFile(self.apache2_conf, '/etc/httpd/conf/httpd.conf')
self.copyFile(self.apache2_ssl_conf, self.https_gluu_fn)
elif base.clone_type == 'deb':
self.copyFile(self.apache2_ssl_conf, self.https_gluu_fn)
self.run([paths.cmd_ln, '-s', self.https_gluu_fn,
'/etc/apache2/sites-enabled/https_gluu.conf'])
def installed(self):
return os.path.exists(self.https_gluu_fn)
|
import os
import glob
import shutil
from setup_app import paths
from setup_app.utils import base
from setup_app.static import AppType, InstallOption
from setup_app.config import Config
from setup_app.utils.setup_utils import SetupUtils
from setup_app.installers.base import BaseInstaller
class HttpdInstaller(BaseInstaller, SetupUtils):
def __init__(self):
self.service_name = base.httpd_name
self.pbar_text = "Configuring " + base.httpd_name
self.app_type = AppType.SERVICE
self.install_type = InstallOption.OPTONAL
self.install_var = 'installHttpd'
self.register_progess()
self.needdb = False # we don't need backend connection in this class
self.apache_version = base.determineApacheVersion()
self.httpdKeyFn = os.path.join(Config.certFolder, 'httpd.key')
self.httpdCertFn = os.path.join(Config.certFolder, 'httpd.crt')
self.templates_folder = os.path.join(Config.templateFolder, 'apache')
self.output_folder = os.path.join(Config.outputFolder, 'apache')
self.apache2_conf = os.path.join(self.output_folder, 'httpd.conf')
self.apache2_ssl_conf = os.path.join(self.output_folder, 'https_gluu.conf')
self.apache2_24_conf = os.path.join(self.output_folder, 'httpd_2.4.conf')
self.apache2_ssl_24_conf = os.path.join(self.output_folder, 'https_gluu.conf')
if base.os_type == 'suse':
self.https_gluu_fn = '/etc/apache2/vhosts.d/_https_gluu.conf'
elif base.clone_type == 'rpm':
self.https_gluu_fn = '/etc/httpd/conf.d/https_gluu.conf'
else:
self.https_gluu_fn = '/etc/apache2/sites-available/https_gluu.conf'
def configure(self):
self.logIt(self.pbar_text, pbar=self.service_name)
self.stop()
self.write_httpd_config()
self.writeFile('/var/www/html/index.html', 'OK')
if base.snap:
icons_conf_fn = '/etc/apache2/mods-available/alias.conf'
if base.os_type == 'suse':
icons_conf_fn = '/etc/apache2/default-server.conf'
elif base.clone_type == 'deb':
icons_conf_fn = '/etc/apache2/mods-available/alias.conf'
elif base.clone_type == 'rpm':
icons_conf_fn = '/etc/httpd/conf.d/autoindex.conf'
with open(icons_conf_fn[:]) as f:
icons_conf = f.readlines()
for i, l in enumerate(icons_conf[:]):
if l.strip().startswith('Alias') and ('/icons/' in l.strip().split()):
icons_conf[i] = l.replace('Alias', '#Alias')
self.writeFile(icons_conf_fn, ''.join(icons_conf))
error_templates = glob.glob(os.path.join(self.templates_folder,'error_pages/*.html'))
for tmp_fn in error_templates:
self.copyFile(tmp_fn, '/var/www/html')
# we only need these modules
mods_enabled = ['env', 'log_config', 'proxy', 'proxy_http', 'access_compat', 'alias', 'authn_core', 'authz_core', 'authz_host', 'headers', 'mime', 'mpm_event', 'proxy_ajp', 'security2', 'reqtimeout', 'setenvif', 'socache_shmcb', 'ssl', 'unique_id', 'rewrite']
cmd_a2enmod = shutil.which('a2enmod')
cmd_a2dismod = shutil.which('a2dismod')
if base.snap:
mods_enabled_dir = os.path.join(base.snap_common, 'etc/apache2/mods-enabled')
mods_available_dir = os.path.join(base.snap_common, 'etc/apache2/mods-available')
for em in os.listdir(mods_enabled_dir):
em_n, em_e = os.path.splitext(em)
if not em_n in mods_enabled:
os.unlink(os.path.join(mods_enabled_dir, em))
for m in mods_enabled:
load_fn = os.path.join(mods_available_dir, m + '.load')
conf_fn = os.path.join(mods_available_dir, m + '.conf')
if os.path.exists(load_fn):
target_fn = os.path.join(mods_enabled_dir, m + '.load')
if not os.path.exists(target_fn):
os.symlink(load_fn, target_fn)
if os.path.exists(conf_fn):
target_fn = os.path.join(mods_enabled_dir, m + '.conf')
if not os.path.exists(target_fn):
os.symlink(conf_fn, target_fn)
elif base.clone_type == 'deb':
for mod_load_fn in glob.glob('/etc/apache2/mods-enabled/*'):
mod_load_base_name = os.path.basename(mod_load_fn)
f_name, f_ext = os.path.splitext(mod_load_base_name)
if not f_name in mods_enabled:
self.run([cmd_a2dismod, mod_load_fn])
for amod in mods_enabled:
if os.path.exists('/etc/apache2/mods-available/{}.load'.format(amod)):
self.run([cmd_a2enmod, amod])
elif base.os_type == 'suse':
result = self.run([cmd_a2enmod, '-l'])
current_modules = result.strip().split()
for amod in current_modules:
if not amod in mods_enabled:
self.run([cmd_a2dismod, amod])
for amod in mods_enabled:
if not amod in current_modules:
self.run([cmd_a2enmod, amod])
cmd_a2enflag = shutil.which('a2enflag')
self.run([cmd_a2enflag, 'SSL'])
httpd_conf_fn = '/etc/apache2/httpd.conf'
httpd_conf_txt = self.readFile(httpd_conf_fn)
httpd_conf = httpd_conf_txt.splitlines()
for i, l in enumerate(httpd_conf[:]):
if l.strip().startswith('DirectoryIndex'):
httpd_conf[i] = l.replace('DirectoryIndex', '#DirectoryIndex')
self.writeFile(httpd_conf_fn, '\n'.join(httpd_conf))
else:
modules_config_dir = '/etc/apache2/sysconfig.d' if base.os_type == 'suse' else '/etc/httpd/conf.modules.d'
for mod_load_fn in glob.glob(os.path.join(modules_config_dir,'*')):
if not os.path.isfile(mod_load_fn):
continue
with open(mod_load_fn) as f:
mod_load_content = f.readlines()
modified = False
for i, l in enumerate(mod_load_content[:]):
ls = l.strip()
if ls and not ls.startswith('#'):
lsl = ls.split('/')
if not lsl[0].startswith('LoadModule'):
continue
module = lsl[-1][4:-3]
if not module in mods_enabled:
mod_load_content[i] = l.replace('LoadModule', '#LoadModule')
modified = True
if modified:
self.writeFile(mod_load_fn, ''.join(mod_load_content))
if not Config.get('httpdKeyPass'):
Config.httpdKeyPass = self.getPW()
# generate httpd self signed certificate
self.gen_cert('httpd', Config.httpdKeyPass, 'jetty')
self.enable()
self.start()
def write_httpd_config(self):
self.update_rendering_dict()
for tmp in (self.apache2_conf, self.apache2_ssl_conf, self.apache2_24_conf, self.apache2_ssl_24_conf):
self.renderTemplateInOut(tmp, self.templates_folder, self.output_folder)
# CentOS 7.* + systemd + apache 2.4
if self.service_name == 'httpd' and self.apache_version == "2.4":
self.copyFile(self.apache2_24_conf, '/etc/httpd/conf/httpd.conf')
self.copyFile(self.apache2_ssl_24_conf, '/etc/httpd/conf.d/https_gluu.conf')
if base.os_type == 'suse':
self.copyFile(self.apache2_ssl_conf, self.https_gluu_fn)
elif base.clone_type == 'rpm' and base.os_initdaemon == 'init':
self.copyFile(self.apache2_conf, '/etc/httpd/conf/httpd.conf')
self.copyFile(self.apache2_ssl_conf, self.https_gluu_fn)
elif base.clone_type == 'deb':
self.copyFile(self.apache2_ssl_conf, self.https_gluu_fn)
self.run([paths.cmd_ln, '-s', self.https_gluu_fn,
'/etc/apache2/sites-enabled/https_gluu.conf'])
def installed(self):
return os.path.exists(self.https_gluu_fn)
|
en
| 0.807666
|
# we don't need backend connection in this class # we only need these modules # generate httpd self signed certificate # CentOS 7.* + systemd + apache 2.4
| 2.081866
| 2
|
xarray/core/concat.py
|
jminsk-cc/xarray
| 0
|
6627458
|
<reponame>jminsk-cc/xarray
import warnings
from collections import OrderedDict
import pandas as pd
from . import dtypes, utils
from .alignment import align
from .variable import IndexVariable, Variable, as_variable
from .variable import concat as concat_vars
def concat(
objs,
dim=None,
data_vars="all",
coords="different",
compat="equals",
positions=None,
indexers=None,
mode=None,
concat_over=None,
fill_value=dtypes.NA,
join="outer",
):
"""Concatenate xarray objects along a new or existing dimension.
Parameters
----------
objs : sequence of Dataset and DataArray objects
xarray objects to concatenate together. Each object is expected to
consist of variables and coordinates with matching shapes except for
along the concatenated dimension.
dim : str or DataArray or pandas.Index
Name of the dimension to concatenate along. This can either be a new
dimension name, in which case it is added along axis=0, or an existing
dimension name, in which case the location of the dimension is
unchanged. If dimension is provided as a DataArray or Index, its name
is used as the dimension to concatenate along and the values are added
as a coordinate.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
These data variables will be concatenated together:
* 'minimal': Only data variables in which the dimension already
appears are included.
* 'different': Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* 'all': All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the 'minimal' data variables.
If objects are DataArrays, data_vars must be 'all'.
coords : {'minimal', 'different', 'all' or list of str}, optional
These coordinate variables will be concatenated together:
* 'minimal': Only coordinates in which the dimension already appears
are included.
* 'different': Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* 'all': All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition to the 'minimal' coordinates.
compat : {'equals', 'identical'}, optional
String indicating how to compare non-concatenated variables and
dataset global attributes for potential conflicts. 'equals' means
that all variable values and dimensions must be the same;
'identical' means that variable attributes and global attributes
must also be equal.
positions : None or list of integer arrays, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
fill_value : scalar, optional
Value to use for newly missing values
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
String indicating how to combine differing indexes
(excluding dim) in objects
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- 'override': if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
indexers, mode, concat_over : deprecated
Returns
-------
concatenated : type of objs
See also
--------
merge
auto_combine
"""
# TODO: add ignore_index arguments copied from pandas.concat
# TODO: support concatenating scalar coordinates even if the concatenated
# dimension already exists
from .dataset import Dataset
from .dataarray import DataArray
try:
first_obj, objs = utils.peek_at(objs)
except StopIteration:
raise ValueError("must supply at least one object to concatenate")
if dim is None:
warnings.warn(
"the `dim` argument to `concat` will be required "
"in a future version of xarray; for now, setting it to "
"the old default of 'concat_dim'",
FutureWarning,
stacklevel=2,
)
dim = "concat_dims"
if indexers is not None: # pragma: no cover
warnings.warn(
"indexers has been renamed to positions; the alias "
"will be removed in a future version of xarray",
FutureWarning,
stacklevel=2,
)
positions = indexers
if mode is not None:
raise ValueError(
"`mode` is no longer a valid argument to "
"xarray.concat; it has been split into the "
"`data_vars` and `coords` arguments"
)
if concat_over is not None:
raise ValueError(
"`concat_over` is no longer a valid argument to "
"xarray.concat; it has been split into the "
"`data_vars` and `coords` arguments"
)
if isinstance(first_obj, DataArray):
f = _dataarray_concat
elif isinstance(first_obj, Dataset):
f = _dataset_concat
else:
raise TypeError(
"can only concatenate xarray Dataset and DataArray "
"objects, got %s" % type(first_obj)
)
return f(objs, dim, data_vars, coords, compat, positions, fill_value, join)
def _calc_concat_dim_coord(dim):
"""
Infer the dimension name and 1d coordinate variable (if appropriate)
for concatenating along the new dimension.
"""
from .dataarray import DataArray
if isinstance(dim, str):
coord = None
elif not isinstance(dim, (DataArray, Variable)):
dim_name = getattr(dim, "name", None)
if dim_name is None:
dim_name = "concat_dim"
coord = IndexVariable(dim_name, dim)
dim = dim_name
elif not isinstance(dim, DataArray):
coord = as_variable(dim).to_index_variable()
dim, = coord.dims
else:
coord = dim
dim, = coord.dims
return dim, coord
def _calc_concat_over(datasets, dim, data_vars, coords):
"""
Determine which dataset variables need to be concatenated in the result,
and which can simply be taken from the first dataset.
"""
# Return values
concat_over = set()
equals = {}
if dim in datasets[0]:
concat_over.add(dim)
for ds in datasets:
concat_over.update(k for k, v in ds.variables.items() if dim in v.dims)
def process_subset_opt(opt, subset):
if isinstance(opt, str):
if opt == "different":
# all nonindexes that are not the same in each dataset
for k in getattr(datasets[0], subset):
if k not in concat_over:
# Compare the variable of all datasets vs. the one
# of the first dataset. Perform the minimum amount of
# loads in order to avoid multiple loads from disk
# while keeping the RAM footprint low.
v_lhs = datasets[0].variables[k].load()
# We'll need to know later on if variables are equal.
computed = []
for ds_rhs in datasets[1:]:
v_rhs = ds_rhs.variables[k].compute()
computed.append(v_rhs)
if not v_lhs.equals(v_rhs):
concat_over.add(k)
equals[k] = False
# computed variables are not to be re-computed
# again in the future
for ds, v in zip(datasets[1:], computed):
ds.variables[k].data = v.data
break
else:
equals[k] = True
elif opt == "all":
concat_over.update(
set(getattr(datasets[0], subset)) - set(datasets[0].dims)
)
elif opt == "minimal":
pass
else:
raise ValueError("unexpected value for %s: %s" % (subset, opt))
else:
invalid_vars = [k for k in opt if k not in getattr(datasets[0], subset)]
if invalid_vars:
if subset == "coords":
raise ValueError(
"some variables in coords are not coordinates on "
"the first dataset: %s" % (invalid_vars,)
)
else:
raise ValueError(
"some variables in data_vars are not data variables "
"on the first dataset: %s" % (invalid_vars,)
)
concat_over.update(opt)
process_subset_opt(data_vars, "data_vars")
process_subset_opt(coords, "coords")
return concat_over, equals
def _dataset_concat(
datasets,
dim,
data_vars,
coords,
compat,
positions,
fill_value=dtypes.NA,
join="outer",
):
"""
Concatenate a sequence of datasets along a new or existing dimension
"""
from .dataset import Dataset
if compat not in ["equals", "identical"]:
raise ValueError(
"compat=%r invalid: must be 'equals' " "or 'identical'" % compat
)
dim, coord = _calc_concat_dim_coord(dim)
# Make sure we're working on a copy (we'll be loading variables)
datasets = [ds.copy() for ds in datasets]
datasets = align(
*datasets, join=join, copy=False, exclude=[dim], fill_value=fill_value
)
concat_over, equals = _calc_concat_over(datasets, dim, data_vars, coords)
def insert_result_variable(k, v):
assert isinstance(v, Variable)
if k in datasets[0].coords:
result_coord_names.add(k)
result_vars[k] = v
# create the new dataset and add constant variables
result_vars = OrderedDict()
result_coord_names = set(datasets[0].coords)
result_attrs = datasets[0].attrs
result_encoding = datasets[0].encoding
for k, v in datasets[0].variables.items():
if k not in concat_over:
insert_result_variable(k, v)
# check that global attributes and non-concatenated variables are fixed
# across all datasets
for ds in datasets[1:]:
if compat == "identical" and not utils.dict_equiv(ds.attrs, result_attrs):
raise ValueError("dataset global attributes not equal")
for k, v in ds.variables.items():
if k not in result_vars and k not in concat_over:
raise ValueError("encountered unexpected variable %r" % k)
elif (k in result_coord_names) != (k in ds.coords):
raise ValueError(
"%r is a coordinate in some datasets but not " "others" % k
)
elif k in result_vars and k != dim:
# Don't use Variable.identical as it internally invokes
# Variable.equals, and we may already know the answer
if compat == "identical" and not utils.dict_equiv(
v.attrs, result_vars[k].attrs
):
raise ValueError("variable %s not identical across datasets" % k)
# Proceed with equals()
try:
# May be populated when using the "different" method
is_equal = equals[k]
except KeyError:
result_vars[k].load()
is_equal = v.equals(result_vars[k])
if not is_equal:
raise ValueError("variable %s not equal across datasets" % k)
# we've already verified everything is consistent; now, calculate
# shared dimension sizes so we can expand the necessary variables
dim_lengths = [ds.dims.get(dim, 1) for ds in datasets]
non_concat_dims = {}
for ds in datasets:
non_concat_dims.update(ds.dims)
non_concat_dims.pop(dim, None)
def ensure_common_dims(vars):
# ensure each variable with the given name shares the same
# dimensions and the same shape for all of them except along the
# concat dimension
common_dims = tuple(pd.unique([d for v in vars for d in v.dims]))
if dim not in common_dims:
common_dims = (dim,) + common_dims
for var, dim_len in zip(vars, dim_lengths):
if var.dims != common_dims:
common_shape = tuple(
non_concat_dims.get(d, dim_len) for d in common_dims
)
var = var.set_dims(common_dims, common_shape)
yield var
# stack up each variable to fill-out the dataset (in order)
for k in datasets[0].variables:
if k in concat_over:
vars = ensure_common_dims([ds.variables[k] for ds in datasets])
combined = concat_vars(vars, dim, positions)
insert_result_variable(k, combined)
result = Dataset(result_vars, attrs=result_attrs)
result = result.set_coords(result_coord_names)
result.encoding = result_encoding
if coord is not None:
# add concat dimension last to ensure that its in the final Dataset
result[coord.name] = coord
return result
def _dataarray_concat(
arrays,
dim,
data_vars,
coords,
compat,
positions,
fill_value=dtypes.NA,
join="outer",
):
arrays = list(arrays)
if data_vars != "all":
raise ValueError(
"data_vars is not a valid argument when " "concatenating DataArray objects"
)
datasets = []
for n, arr in enumerate(arrays):
if n == 0:
name = arr.name
elif name != arr.name:
if compat == "identical":
raise ValueError("array names not identical")
else:
arr = arr.rename(name)
datasets.append(arr._to_temp_dataset())
ds = _dataset_concat(
datasets,
dim,
data_vars,
coords,
compat,
positions,
fill_value=fill_value,
join=join,
)
return arrays[0]._from_temp_dataset(ds, name)
|
import warnings
from collections import OrderedDict
import pandas as pd
from . import dtypes, utils
from .alignment import align
from .variable import IndexVariable, Variable, as_variable
from .variable import concat as concat_vars
def concat(
objs,
dim=None,
data_vars="all",
coords="different",
compat="equals",
positions=None,
indexers=None,
mode=None,
concat_over=None,
fill_value=dtypes.NA,
join="outer",
):
"""Concatenate xarray objects along a new or existing dimension.
Parameters
----------
objs : sequence of Dataset and DataArray objects
xarray objects to concatenate together. Each object is expected to
consist of variables and coordinates with matching shapes except for
along the concatenated dimension.
dim : str or DataArray or pandas.Index
Name of the dimension to concatenate along. This can either be a new
dimension name, in which case it is added along axis=0, or an existing
dimension name, in which case the location of the dimension is
unchanged. If dimension is provided as a DataArray or Index, its name
is used as the dimension to concatenate along and the values are added
as a coordinate.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
These data variables will be concatenated together:
* 'minimal': Only data variables in which the dimension already
appears are included.
* 'different': Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* 'all': All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the 'minimal' data variables.
If objects are DataArrays, data_vars must be 'all'.
coords : {'minimal', 'different', 'all' or list of str}, optional
These coordinate variables will be concatenated together:
* 'minimal': Only coordinates in which the dimension already appears
are included.
* 'different': Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* 'all': All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition to the 'minimal' coordinates.
compat : {'equals', 'identical'}, optional
String indicating how to compare non-concatenated variables and
dataset global attributes for potential conflicts. 'equals' means
that all variable values and dimensions must be the same;
'identical' means that variable attributes and global attributes
must also be equal.
positions : None or list of integer arrays, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
fill_value : scalar, optional
Value to use for newly missing values
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
String indicating how to combine differing indexes
(excluding dim) in objects
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- 'override': if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
indexers, mode, concat_over : deprecated
Returns
-------
concatenated : type of objs
See also
--------
merge
auto_combine
"""
# TODO: add ignore_index arguments copied from pandas.concat
# TODO: support concatenating scalar coordinates even if the concatenated
# dimension already exists
from .dataset import Dataset
from .dataarray import DataArray
try:
first_obj, objs = utils.peek_at(objs)
except StopIteration:
raise ValueError("must supply at least one object to concatenate")
if dim is None:
warnings.warn(
"the `dim` argument to `concat` will be required "
"in a future version of xarray; for now, setting it to "
"the old default of 'concat_dim'",
FutureWarning,
stacklevel=2,
)
dim = "concat_dims"
if indexers is not None: # pragma: no cover
warnings.warn(
"indexers has been renamed to positions; the alias "
"will be removed in a future version of xarray",
FutureWarning,
stacklevel=2,
)
positions = indexers
if mode is not None:
raise ValueError(
"`mode` is no longer a valid argument to "
"xarray.concat; it has been split into the "
"`data_vars` and `coords` arguments"
)
if concat_over is not None:
raise ValueError(
"`concat_over` is no longer a valid argument to "
"xarray.concat; it has been split into the "
"`data_vars` and `coords` arguments"
)
if isinstance(first_obj, DataArray):
f = _dataarray_concat
elif isinstance(first_obj, Dataset):
f = _dataset_concat
else:
raise TypeError(
"can only concatenate xarray Dataset and DataArray "
"objects, got %s" % type(first_obj)
)
return f(objs, dim, data_vars, coords, compat, positions, fill_value, join)
def _calc_concat_dim_coord(dim):
"""
Infer the dimension name and 1d coordinate variable (if appropriate)
for concatenating along the new dimension.
"""
from .dataarray import DataArray
if isinstance(dim, str):
coord = None
elif not isinstance(dim, (DataArray, Variable)):
dim_name = getattr(dim, "name", None)
if dim_name is None:
dim_name = "concat_dim"
coord = IndexVariable(dim_name, dim)
dim = dim_name
elif not isinstance(dim, DataArray):
coord = as_variable(dim).to_index_variable()
dim, = coord.dims
else:
coord = dim
dim, = coord.dims
return dim, coord
def _calc_concat_over(datasets, dim, data_vars, coords):
"""
Determine which dataset variables need to be concatenated in the result,
and which can simply be taken from the first dataset.
"""
# Return values
concat_over = set()
equals = {}
if dim in datasets[0]:
concat_over.add(dim)
for ds in datasets:
concat_over.update(k for k, v in ds.variables.items() if dim in v.dims)
def process_subset_opt(opt, subset):
if isinstance(opt, str):
if opt == "different":
# all nonindexes that are not the same in each dataset
for k in getattr(datasets[0], subset):
if k not in concat_over:
# Compare the variable of all datasets vs. the one
# of the first dataset. Perform the minimum amount of
# loads in order to avoid multiple loads from disk
# while keeping the RAM footprint low.
v_lhs = datasets[0].variables[k].load()
# We'll need to know later on if variables are equal.
computed = []
for ds_rhs in datasets[1:]:
v_rhs = ds_rhs.variables[k].compute()
computed.append(v_rhs)
if not v_lhs.equals(v_rhs):
concat_over.add(k)
equals[k] = False
# computed variables are not to be re-computed
# again in the future
for ds, v in zip(datasets[1:], computed):
ds.variables[k].data = v.data
break
else:
equals[k] = True
elif opt == "all":
concat_over.update(
set(getattr(datasets[0], subset)) - set(datasets[0].dims)
)
elif opt == "minimal":
pass
else:
raise ValueError("unexpected value for %s: %s" % (subset, opt))
else:
invalid_vars = [k for k in opt if k not in getattr(datasets[0], subset)]
if invalid_vars:
if subset == "coords":
raise ValueError(
"some variables in coords are not coordinates on "
"the first dataset: %s" % (invalid_vars,)
)
else:
raise ValueError(
"some variables in data_vars are not data variables "
"on the first dataset: %s" % (invalid_vars,)
)
concat_over.update(opt)
process_subset_opt(data_vars, "data_vars")
process_subset_opt(coords, "coords")
return concat_over, equals
def _dataset_concat(
datasets,
dim,
data_vars,
coords,
compat,
positions,
fill_value=dtypes.NA,
join="outer",
):
"""
Concatenate a sequence of datasets along a new or existing dimension
"""
from .dataset import Dataset
if compat not in ["equals", "identical"]:
raise ValueError(
"compat=%r invalid: must be 'equals' " "or 'identical'" % compat
)
dim, coord = _calc_concat_dim_coord(dim)
# Make sure we're working on a copy (we'll be loading variables)
datasets = [ds.copy() for ds in datasets]
datasets = align(
*datasets, join=join, copy=False, exclude=[dim], fill_value=fill_value
)
concat_over, equals = _calc_concat_over(datasets, dim, data_vars, coords)
def insert_result_variable(k, v):
assert isinstance(v, Variable)
if k in datasets[0].coords:
result_coord_names.add(k)
result_vars[k] = v
# create the new dataset and add constant variables
result_vars = OrderedDict()
result_coord_names = set(datasets[0].coords)
result_attrs = datasets[0].attrs
result_encoding = datasets[0].encoding
for k, v in datasets[0].variables.items():
if k not in concat_over:
insert_result_variable(k, v)
# check that global attributes and non-concatenated variables are fixed
# across all datasets
for ds in datasets[1:]:
if compat == "identical" and not utils.dict_equiv(ds.attrs, result_attrs):
raise ValueError("dataset global attributes not equal")
for k, v in ds.variables.items():
if k not in result_vars and k not in concat_over:
raise ValueError("encountered unexpected variable %r" % k)
elif (k in result_coord_names) != (k in ds.coords):
raise ValueError(
"%r is a coordinate in some datasets but not " "others" % k
)
elif k in result_vars and k != dim:
# Don't use Variable.identical as it internally invokes
# Variable.equals, and we may already know the answer
if compat == "identical" and not utils.dict_equiv(
v.attrs, result_vars[k].attrs
):
raise ValueError("variable %s not identical across datasets" % k)
# Proceed with equals()
try:
# May be populated when using the "different" method
is_equal = equals[k]
except KeyError:
result_vars[k].load()
is_equal = v.equals(result_vars[k])
if not is_equal:
raise ValueError("variable %s not equal across datasets" % k)
# we've already verified everything is consistent; now, calculate
# shared dimension sizes so we can expand the necessary variables
dim_lengths = [ds.dims.get(dim, 1) for ds in datasets]
non_concat_dims = {}
for ds in datasets:
non_concat_dims.update(ds.dims)
non_concat_dims.pop(dim, None)
def ensure_common_dims(vars):
# ensure each variable with the given name shares the same
# dimensions and the same shape for all of them except along the
# concat dimension
common_dims = tuple(pd.unique([d for v in vars for d in v.dims]))
if dim not in common_dims:
common_dims = (dim,) + common_dims
for var, dim_len in zip(vars, dim_lengths):
if var.dims != common_dims:
common_shape = tuple(
non_concat_dims.get(d, dim_len) for d in common_dims
)
var = var.set_dims(common_dims, common_shape)
yield var
# stack up each variable to fill-out the dataset (in order)
for k in datasets[0].variables:
if k in concat_over:
vars = ensure_common_dims([ds.variables[k] for ds in datasets])
combined = concat_vars(vars, dim, positions)
insert_result_variable(k, combined)
result = Dataset(result_vars, attrs=result_attrs)
result = result.set_coords(result_coord_names)
result.encoding = result_encoding
if coord is not None:
# add concat dimension last to ensure that its in the final Dataset
result[coord.name] = coord
return result
def _dataarray_concat(
arrays,
dim,
data_vars,
coords,
compat,
positions,
fill_value=dtypes.NA,
join="outer",
):
arrays = list(arrays)
if data_vars != "all":
raise ValueError(
"data_vars is not a valid argument when " "concatenating DataArray objects"
)
datasets = []
for n, arr in enumerate(arrays):
if n == 0:
name = arr.name
elif name != arr.name:
if compat == "identical":
raise ValueError("array names not identical")
else:
arr = arr.rename(name)
datasets.append(arr._to_temp_dataset())
ds = _dataset_concat(
datasets,
dim,
data_vars,
coords,
compat,
positions,
fill_value=fill_value,
join=join,
)
return arrays[0]._from_temp_dataset(ds, name)
|
en
| 0.790142
|
Concatenate xarray objects along a new or existing dimension. Parameters ---------- objs : sequence of Dataset and DataArray objects xarray objects to concatenate together. Each object is expected to consist of variables and coordinates with matching shapes except for along the concatenated dimension. dim : str or DataArray or pandas.Index Name of the dimension to concatenate along. This can either be a new dimension name, in which case it is added along axis=0, or an existing dimension name, in which case the location of the dimension is unchanged. If dimension is provided as a DataArray or Index, its name is used as the dimension to concatenate along and the values are added as a coordinate. data_vars : {'minimal', 'different', 'all' or list of str}, optional These data variables will be concatenated together: * 'minimal': Only data variables in which the dimension already appears are included. * 'different': Data variables which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of data variables into memory if they are not already loaded. * 'all': All data variables will be concatenated. * list of str: The listed data variables will be concatenated, in addition to the 'minimal' data variables. If objects are DataArrays, data_vars must be 'all'. coords : {'minimal', 'different', 'all' or list of str}, optional These coordinate variables will be concatenated together: * 'minimal': Only coordinates in which the dimension already appears are included. * 'different': Coordinates which are not equal (ignoring attributes) across all datasets are also concatenated (as well as all for which dimension already appears). Beware: this option may load the data payload of coordinate variables into memory if they are not already loaded. * 'all': All coordinate variables will be concatenated, except those corresponding to other dimensions. * list of str: The listed coordinate variables will be concatenated, in addition to the 'minimal' coordinates. compat : {'equals', 'identical'}, optional String indicating how to compare non-concatenated variables and dataset global attributes for potential conflicts. 'equals' means that all variable values and dimensions must be the same; 'identical' means that variable attributes and global attributes must also be equal. positions : None or list of integer arrays, optional List of integer arrays which specifies the integer positions to which to assign each dataset along the concatenated dimension. If not supplied, objects are concatenated in the provided order. fill_value : scalar, optional Value to use for newly missing values join : {'outer', 'inner', 'left', 'right', 'exact'}, optional String indicating how to combine differing indexes (excluding dim) in objects - 'outer': use the union of object indexes - 'inner': use the intersection of object indexes - 'left': use indexes from the first object with each dimension - 'right': use indexes from the last object with each dimension - 'exact': instead of aligning, raise `ValueError` when indexes to be aligned are not equal - 'override': if indexes are of same size, rewrite indexes to be those of the first object with that dimension. Indexes for the same dimension must have the same size in all objects. indexers, mode, concat_over : deprecated Returns ------- concatenated : type of objs See also -------- merge auto_combine # TODO: add ignore_index arguments copied from pandas.concat # TODO: support concatenating scalar coordinates even if the concatenated # dimension already exists # pragma: no cover Infer the dimension name and 1d coordinate variable (if appropriate) for concatenating along the new dimension. Determine which dataset variables need to be concatenated in the result, and which can simply be taken from the first dataset. # Return values # all nonindexes that are not the same in each dataset # Compare the variable of all datasets vs. the one # of the first dataset. Perform the minimum amount of # loads in order to avoid multiple loads from disk # while keeping the RAM footprint low. # We'll need to know later on if variables are equal. # computed variables are not to be re-computed # again in the future Concatenate a sequence of datasets along a new or existing dimension # Make sure we're working on a copy (we'll be loading variables) # create the new dataset and add constant variables # check that global attributes and non-concatenated variables are fixed # across all datasets # Don't use Variable.identical as it internally invokes # Variable.equals, and we may already know the answer # Proceed with equals() # May be populated when using the "different" method # we've already verified everything is consistent; now, calculate # shared dimension sizes so we can expand the necessary variables # ensure each variable with the given name shares the same # dimensions and the same shape for all of them except along the # concat dimension # stack up each variable to fill-out the dataset (in order) # add concat dimension last to ensure that its in the final Dataset
| 3.05288
| 3
|
dev/Tools/build/waf-1.7.13/waflib/extras/build_logs.py
|
jeikabu/lumberyard
| 10
|
6627459
|
<reponame>jeikabu/lumberyard
#!/usr/bin/env python
# encoding: utf-8
# <NAME>, 2013 (ita)
"""
A system for recording all outputs to a log file. Just add the following to your wscript file::
def init(ctx):
ctx.load('build_logs')
"""
import atexit, sys, time, os, shutil, threading
from waflib import Logs, Context
# adding the logs under the build/ directory will clash with the clean/ command
try:
up = os.path.dirname(Context.g_module.__file__)
except AttributeError:
up = '.'
LOGFILE = os.path.join(up, 'logs', '%s.log' % time.strftime('%Y_%m_%d_%H_%M'))
wlock = threading.Lock()
class log_to_file(object):
def __init__(self, stream, fileobj, filename):
self.stream = stream
self.encoding = self.stream.encoding
self.fileobj = fileobj
self.filename = filename
self.is_valid = True
def replace_colors(self, data):
for x in Logs.colors_lst.values():
if isinstance(x, str):
data = data.replace(x, '')
return data
def write(self, data):
try:
wlock.acquire()
self.stream.write(data)
self.stream.flush()
if self.is_valid:
self.fileobj.write(self.replace_colors(data))
finally:
wlock.release()
def fileno(self):
return self.stream.fileno()
def flush(self):
self.stream.flush()
if self.is_valid:
self.fileobj.flush()
def isatty(self):
return self.stream.isatty()
def init(ctx):
global LOGFILE
filename = os.path.abspath(LOGFILE)
try:
os.makedirs(os.path.dirname(os.path.abspath(filename)))
except OSError:
pass
if hasattr(os, 'O_NOINHERIT'):
fd = os.open(LOGFILE, os.O_CREAT | os.O_TRUNC | os.O_WRONLY | os.O_NOINHERIT)
fileobj = os.fdopen(fd, 'w')
else:
fileobj = open(LOGFILE, 'w')
old_stderr = sys.stderr
# sys.stdout has already been replaced, so __stdout__ will be faster
#sys.stdout = log_to_file(sys.stdout, fileobj, filename)
#sys.stderr = log_to_file(sys.stderr, fileobj, filename)
sys.stdout = log_to_file(sys.__stdout__, fileobj, filename)
sys.stderr = log_to_file(sys.__stderr__, fileobj, filename)
# now mess with the logging module...
for x in Logs.log.handlers:
try:
stream = x.stream
except AttributeError:
pass
else:
if id(stream) == id(old_stderr):
x.stream = sys.stderr
def exit_cleanup():
try:
fileobj = sys.stdout.fileobj
except AttributeError:
pass
else:
sys.stdout.is_valid = False
sys.stderr.is_valid = False
fileobj.close()
filename = sys.stdout.filename
Logs.info('Output logged to %r' % filename)
# then copy the log file to "latest.log" if possible
up = os.path.dirname(os.path.abspath(filename))
try:
shutil.copy(filename, os.path.join(up, 'latest.log'))
except OSError:
# this may fail on windows due to processes spawned
#
pass
atexit.register(exit_cleanup)
|
#!/usr/bin/env python
# encoding: utf-8
# <NAME>, 2013 (ita)
"""
A system for recording all outputs to a log file. Just add the following to your wscript file::
def init(ctx):
ctx.load('build_logs')
"""
import atexit, sys, time, os, shutil, threading
from waflib import Logs, Context
# adding the logs under the build/ directory will clash with the clean/ command
try:
up = os.path.dirname(Context.g_module.__file__)
except AttributeError:
up = '.'
LOGFILE = os.path.join(up, 'logs', '%s.log' % time.strftime('%Y_%m_%d_%H_%M'))
wlock = threading.Lock()
class log_to_file(object):
def __init__(self, stream, fileobj, filename):
self.stream = stream
self.encoding = self.stream.encoding
self.fileobj = fileobj
self.filename = filename
self.is_valid = True
def replace_colors(self, data):
for x in Logs.colors_lst.values():
if isinstance(x, str):
data = data.replace(x, '')
return data
def write(self, data):
try:
wlock.acquire()
self.stream.write(data)
self.stream.flush()
if self.is_valid:
self.fileobj.write(self.replace_colors(data))
finally:
wlock.release()
def fileno(self):
return self.stream.fileno()
def flush(self):
self.stream.flush()
if self.is_valid:
self.fileobj.flush()
def isatty(self):
return self.stream.isatty()
def init(ctx):
global LOGFILE
filename = os.path.abspath(LOGFILE)
try:
os.makedirs(os.path.dirname(os.path.abspath(filename)))
except OSError:
pass
if hasattr(os, 'O_NOINHERIT'):
fd = os.open(LOGFILE, os.O_CREAT | os.O_TRUNC | os.O_WRONLY | os.O_NOINHERIT)
fileobj = os.fdopen(fd, 'w')
else:
fileobj = open(LOGFILE, 'w')
old_stderr = sys.stderr
# sys.stdout has already been replaced, so __stdout__ will be faster
#sys.stdout = log_to_file(sys.stdout, fileobj, filename)
#sys.stderr = log_to_file(sys.stderr, fileobj, filename)
sys.stdout = log_to_file(sys.__stdout__, fileobj, filename)
sys.stderr = log_to_file(sys.__stderr__, fileobj, filename)
# now mess with the logging module...
for x in Logs.log.handlers:
try:
stream = x.stream
except AttributeError:
pass
else:
if id(stream) == id(old_stderr):
x.stream = sys.stderr
def exit_cleanup():
try:
fileobj = sys.stdout.fileobj
except AttributeError:
pass
else:
sys.stdout.is_valid = False
sys.stderr.is_valid = False
fileobj.close()
filename = sys.stdout.filename
Logs.info('Output logged to %r' % filename)
# then copy the log file to "latest.log" if possible
up = os.path.dirname(os.path.abspath(filename))
try:
shutil.copy(filename, os.path.join(up, 'latest.log'))
except OSError:
# this may fail on windows due to processes spawned
#
pass
atexit.register(exit_cleanup)
|
en
| 0.674476
|
#!/usr/bin/env python # encoding: utf-8 # <NAME>, 2013 (ita) A system for recording all outputs to a log file. Just add the following to your wscript file:: def init(ctx): ctx.load('build_logs') # adding the logs under the build/ directory will clash with the clean/ command # sys.stdout has already been replaced, so __stdout__ will be faster #sys.stdout = log_to_file(sys.stdout, fileobj, filename) #sys.stderr = log_to_file(sys.stderr, fileobj, filename) # now mess with the logging module... # then copy the log file to "latest.log" if possible # this may fail on windows due to processes spawned #
| 2.305225
| 2
|
tripled/stack/resource.py
|
yeasy/tripled
| 1
|
6627460
|
__author__ = 'baohua'
class Resource(object):
"""
Resource :
"""
def __init__(self, name=None, *args, **kwargs):
self.name = name
self.attributes = args
self.options = kwargs
def __str__(self):
"""Get string to show the resource attributes
:param:
:returns: a dict e.g., {'resource_name':[string1, string2, ...]}
"""
return '\t'.join([eval('e.%s' % r) for r in self.attributes])
|
__author__ = 'baohua'
class Resource(object):
"""
Resource :
"""
def __init__(self, name=None, *args, **kwargs):
self.name = name
self.attributes = args
self.options = kwargs
def __str__(self):
"""Get string to show the resource attributes
:param:
:returns: a dict e.g., {'resource_name':[string1, string2, ...]}
"""
return '\t'.join([eval('e.%s' % r) for r in self.attributes])
|
en
| 0.364314
|
Resource : Get string to show the resource attributes :param: :returns: a dict e.g., {'resource_name':[string1, string2, ...]}
| 3.356594
| 3
|
chapter5/alarmclock.py
|
chavo1/playground-python
| 0
|
6627461
|
import time
current_time = time.localtime()
hour = current_time.tm_hour
minute = current_time.tm_min
it_is_time_to_get_up = (hour>7) or (hour==7 and minute>29)
if it_is_time_to_get_up:
print('IT IS TIME TO GET UP')
|
import time
current_time = time.localtime()
hour = current_time.tm_hour
minute = current_time.tm_min
it_is_time_to_get_up = (hour>7) or (hour==7 and minute>29)
if it_is_time_to_get_up:
print('IT IS TIME TO GET UP')
|
none
| 1
| 3.564881
| 4
|
|
aes.py
|
ahmedalsawi/wecli
| 0
|
6627462
|
# -*- coding: utf-8 -*-
# Python 3.4
# author: http://blog.dokenzy.com/
# date: 2015. 4. 8
# References
# http://www.imcore.net/encrypt-decrypt-aes256-c-objective-ios-iphone-ipad-php-java-android-perl-javascript/
# http://stackoverflow.com/questions/12562021/aes-decryption-padding-with-pkcs5-python
# http://stackoverflow.com/questions/12524994/encrypt-decrypt-using-pycrypto-aes-256
# http://www.di-mgt.com.au/cryptopad.html
# https://github.com/dlitz/pycrypto
import base64
import hashlib
from Crypto import Random
from Crypto.Cipher import AES
BS = 16
def pad(s): return s + (BS - len(s) % BS) * chr(BS - len(s) % BS).encode()
def unpad(s): return s[:-ord(s[len(s)-1:])]
def iv():
"""
The initialization vector to use for encryption or decryption.
It is ignored for MODE_ECB and MODE_CTR.
"""
return chr(0) * 16
class AESCipher(object):
"""
https://github.com/dlitz/pycrypto
"""
def __init__(self, key, iv):
self.key = key
self.iv = iv
#self.key = hashlib.sha256(key.encode()).digest()
def encrypt(self, message):
"""
It is assumed that you use Python 3.0+
, so plaintext's type must be str type(== unicode).
"""
message = message.encode()
raw = pad(message)
cipher = AES.new(self.key, AES.MODE_CBC, self.iv)
enc = cipher.encrypt(raw)
return base64.b64encode(enc).decode('utf-8')
def decrypt(self, enc):
enc = base64.b64decode(enc)
cipher = AES.new(self.key, AES.MODE_CBC, self.iv)
dec = cipher.decrypt(enc)
return unpad(dec).decode('utf-8')
if __name__ == "__main__":
key = (
b"\<KEY>")
iv = b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
message = '123456'
_enc = 'peRMxuWD5nRijMGjlN7yiQ=='
enc = AESCipher(key, iv).encrypt(message)
dec = AESCipher(key, iv).decrypt(_enc)
print(enc)
print(_enc == enc)
print(message == dec)
|
# -*- coding: utf-8 -*-
# Python 3.4
# author: http://blog.dokenzy.com/
# date: 2015. 4. 8
# References
# http://www.imcore.net/encrypt-decrypt-aes256-c-objective-ios-iphone-ipad-php-java-android-perl-javascript/
# http://stackoverflow.com/questions/12562021/aes-decryption-padding-with-pkcs5-python
# http://stackoverflow.com/questions/12524994/encrypt-decrypt-using-pycrypto-aes-256
# http://www.di-mgt.com.au/cryptopad.html
# https://github.com/dlitz/pycrypto
import base64
import hashlib
from Crypto import Random
from Crypto.Cipher import AES
BS = 16
def pad(s): return s + (BS - len(s) % BS) * chr(BS - len(s) % BS).encode()
def unpad(s): return s[:-ord(s[len(s)-1:])]
def iv():
"""
The initialization vector to use for encryption or decryption.
It is ignored for MODE_ECB and MODE_CTR.
"""
return chr(0) * 16
class AESCipher(object):
"""
https://github.com/dlitz/pycrypto
"""
def __init__(self, key, iv):
self.key = key
self.iv = iv
#self.key = hashlib.sha256(key.encode()).digest()
def encrypt(self, message):
"""
It is assumed that you use Python 3.0+
, so plaintext's type must be str type(== unicode).
"""
message = message.encode()
raw = pad(message)
cipher = AES.new(self.key, AES.MODE_CBC, self.iv)
enc = cipher.encrypt(raw)
return base64.b64encode(enc).decode('utf-8')
def decrypt(self, enc):
enc = base64.b64decode(enc)
cipher = AES.new(self.key, AES.MODE_CBC, self.iv)
dec = cipher.decrypt(enc)
return unpad(dec).decode('utf-8')
if __name__ == "__main__":
key = (
b"\<KEY>")
iv = b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
message = '123456'
_enc = 'peRMxuWD5nRijMGjlN7yiQ=='
enc = AESCipher(key, iv).encrypt(message)
dec = AESCipher(key, iv).decrypt(_enc)
print(enc)
print(_enc == enc)
print(message == dec)
|
en
| 0.549518
|
# -*- coding: utf-8 -*- # Python 3.4 # author: http://blog.dokenzy.com/ # date: 2015. 4. 8 # References # http://www.imcore.net/encrypt-decrypt-aes256-c-objective-ios-iphone-ipad-php-java-android-perl-javascript/ # http://stackoverflow.com/questions/12562021/aes-decryption-padding-with-pkcs5-python # http://stackoverflow.com/questions/12524994/encrypt-decrypt-using-pycrypto-aes-256 # http://www.di-mgt.com.au/cryptopad.html # https://github.com/dlitz/pycrypto The initialization vector to use for encryption or decryption. It is ignored for MODE_ECB and MODE_CTR. https://github.com/dlitz/pycrypto #self.key = hashlib.sha256(key.encode()).digest() It is assumed that you use Python 3.0+ , so plaintext's type must be str type(== unicode).
| 3.42996
| 3
|
galaxy/main/tests/test_content_block_model.py
|
thadguidry/galaxy
| 0
|
6627463
|
# -*- coding: utf-8 -*-
# (c) 2012-2018, Ansible
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
from django.core.exceptions import ValidationError
from django.db.models.manager import Manager
from django.db.utils import DataError, IntegrityError
from django.test import TestCase
import mock
import pytest
from galaxy.main.models import ContentBlock
from galaxy.common.testing import NOW, LATER
class ContentBlockModelTest(TestCase):
# letters, numbers, underscores or hyphens
VALID_NAME = "NAME"
VALID_CONTENT = "CONTENT"
NAME_MAX_LENGTH = 50
CONTENT_MAX_LENGTH = 512000
def setUp(self):
ContentBlock.objects.all().delete()
def test_manager_class(self):
assert isinstance(ContentBlock.objects, Manager)
@pytest.mark.model_fields_validation
@mock.patch('django.utils.timezone.now', side_effect=[NOW, NOW, LATER])
def test_create_minimal(self, fake_now):
# no mandatory fields
content_block = ContentBlock.objects.create()
assert isinstance(content_block, ContentBlock)
# check defaults
assert content_block.name == ""
assert content_block.content == ""
assert content_block.created == NOW
assert content_block.modified == NOW
content_block.save()
assert content_block.modified != NOW
assert content_block.modified == LATER
assert fake_now.call_count == 3
@pytest.mark.model_fields_validation
def test_name_is_required(self):
# does not raise
ContentBlock(name=self.VALID_NAME).full_clean()
with pytest.raises(ValidationError) as excinfo:
ContentBlock().full_clean()
assert excinfo.value.message_dict == {
'name': ['This field cannot be blank.']
}
@pytest.mark.database_integrity
def test_name_must_be_unique_in_db(self):
with pytest.raises(IntegrityError) as excinfo:
ContentBlock.objects.create(name=self.VALID_NAME)
ContentBlock.objects.create(name=self.VALID_NAME)
assert str(excinfo.value) == (
'duplicate key value violates unique constraint '
'"main_contentblock_name_key"\n'
'DETAIL: Key (name)=({duplicated_name}) already exists.\n'
).format(
duplicated_name=self.VALID_NAME
)
@pytest.mark.database_integrity
def test_name_length_is_limited_in_db(self):
# does not raise
ContentBlock.objects.create(
name='*' * self.NAME_MAX_LENGTH
)
with pytest.raises(DataError) as excinfo:
ContentBlock.objects.create(
name='*' * (self.NAME_MAX_LENGTH + 1)
)
assert str(excinfo.value) == (
'value too long for type character varying({max_allowed})\n'
).format(
max_allowed=self.NAME_MAX_LENGTH
)
@pytest.mark.model_fields_validation
def test_name_length_is_limited(self):
# does not raise
ContentBlock(name=self.VALID_NAME).full_clean()
with pytest.raises(ValidationError) as excinfo:
ContentBlock(name='a' * (self.NAME_MAX_LENGTH + 1)).full_clean()
assert excinfo.value.message_dict == {
"name": [
"Ensure this value has at most {valid} characters "
"(it has {given}).".format(
valid=self.NAME_MAX_LENGTH,
given=self.NAME_MAX_LENGTH + 1
)
]
}
@pytest.mark.model_fields_validation
def test_name_must_be_slug(self):
# does not raise
ContentBlock(name=self.VALID_NAME).full_clean()
ContentBlock(name='name').full_clean()
ContentBlock(name='na_me').full_clean()
ContentBlock(name='Name').full_clean()
ContentBlock(name='NamE').full_clean()
ContentBlock(name='na-me').full_clean()
ContentBlock(name='na-me-2').full_clean()
with pytest.raises(ValidationError) as excinfo:
ContentBlock(name='with spaces').full_clean()
assert excinfo.value.message_dict == {
"name": [
"Enter a valid 'slug' consisting of letters, "
"numbers, underscores or hyphens."
]
}
with pytest.raises(ValidationError) as excinfo:
ContentBlock(name='with spaces').full_clean()
assert excinfo.value.message_dict == {
"name": [
"Enter a valid 'slug' consisting of letters, "
"numbers, underscores or hyphens."
]
}
@pytest.mark.model_fields_validation
def test_content_is_unlimited(self):
# does not raise
ContentBlock(
name=self.VALID_NAME,
content='*' * 10000
).full_clean()
# testing custom methods
@pytest.mark.model_methods
def test_convert_to_string(self):
# __str__ will return name
content_block = ContentBlock(name=self.VALID_NAME)
assert str(content_block) == self.VALID_NAME
@pytest.mark.model_methods
def test_repr(self):
# __str__ returns name, but it did not affected __repr__
content_block = ContentBlock(name=self.VALID_NAME)
assert repr(content_block) == (
'<ContentBlock: {name}>'
.format(name=self.VALID_NAME))
@pytest.mark.model_methods
def test_get_absolute_url(self):
# this method creates url with content_block id
content_block = ContentBlock.objects.create(name=self.VALID_NAME)
assert content_block.get_absolute_url() == (
"/api/v1/content_blocks/{content_block_name}/"
.format(content_block_name=content_block.name))
|
# -*- coding: utf-8 -*-
# (c) 2012-2018, Ansible
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
from django.core.exceptions import ValidationError
from django.db.models.manager import Manager
from django.db.utils import DataError, IntegrityError
from django.test import TestCase
import mock
import pytest
from galaxy.main.models import ContentBlock
from galaxy.common.testing import NOW, LATER
class ContentBlockModelTest(TestCase):
# letters, numbers, underscores or hyphens
VALID_NAME = "NAME"
VALID_CONTENT = "CONTENT"
NAME_MAX_LENGTH = 50
CONTENT_MAX_LENGTH = 512000
def setUp(self):
ContentBlock.objects.all().delete()
def test_manager_class(self):
assert isinstance(ContentBlock.objects, Manager)
@pytest.mark.model_fields_validation
@mock.patch('django.utils.timezone.now', side_effect=[NOW, NOW, LATER])
def test_create_minimal(self, fake_now):
# no mandatory fields
content_block = ContentBlock.objects.create()
assert isinstance(content_block, ContentBlock)
# check defaults
assert content_block.name == ""
assert content_block.content == ""
assert content_block.created == NOW
assert content_block.modified == NOW
content_block.save()
assert content_block.modified != NOW
assert content_block.modified == LATER
assert fake_now.call_count == 3
@pytest.mark.model_fields_validation
def test_name_is_required(self):
# does not raise
ContentBlock(name=self.VALID_NAME).full_clean()
with pytest.raises(ValidationError) as excinfo:
ContentBlock().full_clean()
assert excinfo.value.message_dict == {
'name': ['This field cannot be blank.']
}
@pytest.mark.database_integrity
def test_name_must_be_unique_in_db(self):
with pytest.raises(IntegrityError) as excinfo:
ContentBlock.objects.create(name=self.VALID_NAME)
ContentBlock.objects.create(name=self.VALID_NAME)
assert str(excinfo.value) == (
'duplicate key value violates unique constraint '
'"main_contentblock_name_key"\n'
'DETAIL: Key (name)=({duplicated_name}) already exists.\n'
).format(
duplicated_name=self.VALID_NAME
)
@pytest.mark.database_integrity
def test_name_length_is_limited_in_db(self):
# does not raise
ContentBlock.objects.create(
name='*' * self.NAME_MAX_LENGTH
)
with pytest.raises(DataError) as excinfo:
ContentBlock.objects.create(
name='*' * (self.NAME_MAX_LENGTH + 1)
)
assert str(excinfo.value) == (
'value too long for type character varying({max_allowed})\n'
).format(
max_allowed=self.NAME_MAX_LENGTH
)
@pytest.mark.model_fields_validation
def test_name_length_is_limited(self):
# does not raise
ContentBlock(name=self.VALID_NAME).full_clean()
with pytest.raises(ValidationError) as excinfo:
ContentBlock(name='a' * (self.NAME_MAX_LENGTH + 1)).full_clean()
assert excinfo.value.message_dict == {
"name": [
"Ensure this value has at most {valid} characters "
"(it has {given}).".format(
valid=self.NAME_MAX_LENGTH,
given=self.NAME_MAX_LENGTH + 1
)
]
}
@pytest.mark.model_fields_validation
def test_name_must_be_slug(self):
# does not raise
ContentBlock(name=self.VALID_NAME).full_clean()
ContentBlock(name='name').full_clean()
ContentBlock(name='na_me').full_clean()
ContentBlock(name='Name').full_clean()
ContentBlock(name='NamE').full_clean()
ContentBlock(name='na-me').full_clean()
ContentBlock(name='na-me-2').full_clean()
with pytest.raises(ValidationError) as excinfo:
ContentBlock(name='with spaces').full_clean()
assert excinfo.value.message_dict == {
"name": [
"Enter a valid 'slug' consisting of letters, "
"numbers, underscores or hyphens."
]
}
with pytest.raises(ValidationError) as excinfo:
ContentBlock(name='with spaces').full_clean()
assert excinfo.value.message_dict == {
"name": [
"Enter a valid 'slug' consisting of letters, "
"numbers, underscores or hyphens."
]
}
@pytest.mark.model_fields_validation
def test_content_is_unlimited(self):
# does not raise
ContentBlock(
name=self.VALID_NAME,
content='*' * 10000
).full_clean()
# testing custom methods
@pytest.mark.model_methods
def test_convert_to_string(self):
# __str__ will return name
content_block = ContentBlock(name=self.VALID_NAME)
assert str(content_block) == self.VALID_NAME
@pytest.mark.model_methods
def test_repr(self):
# __str__ returns name, but it did not affected __repr__
content_block = ContentBlock(name=self.VALID_NAME)
assert repr(content_block) == (
'<ContentBlock: {name}>'
.format(name=self.VALID_NAME))
@pytest.mark.model_methods
def test_get_absolute_url(self):
# this method creates url with content_block id
content_block = ContentBlock.objects.create(name=self.VALID_NAME)
assert content_block.get_absolute_url() == (
"/api/v1/content_blocks/{content_block_name}/"
.format(content_block_name=content_block.name))
|
en
| 0.841654
|
# -*- coding: utf-8 -*- # (c) 2012-2018, Ansible # # This file is part of Ansible Galaxy # # Ansible Galaxy is free software: you can redistribute it and/or modify # it under the terms of the Apache License as published by # the Apache Software Foundation, either version 2 of the License, or # (at your option) any later version. # # Ansible Galaxy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Apache License for more details. # # You should have received a copy of the Apache License # along with Galaxy. If not, see <http://www.apache.org/licenses/>. # letters, numbers, underscores or hyphens # no mandatory fields # check defaults # does not raise # does not raise # does not raise # does not raise # does not raise # testing custom methods # __str__ will return name # __str__ returns name, but it did not affected __repr__ # this method creates url with content_block id
| 1.982759
| 2
|
scripts/analytics/addon_snapshot.py
|
saradbowman/osf.io
| 0
|
6627464
|
<filename>scripts/analytics/addon_snapshot.py
from __future__ import absolute_import
import logging
# App must be initialized before models or ADDONS_AVAILABLE are available
from website.app import init_app
init_app()
from osf.models import OSFUser, AbstractNode
from framework.database import paginated
from scripts.analytics.base import SnapshotAnalytics
from website.settings import ADDONS_AVAILABLE
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
# Modified from scripts/analytics/benchmarks.py
def get_enabled_authorized_linked(user_settings_list, has_external_account, short_name):
""" Gather the number of users who have at least one node in each of the stages for an addon
:param user_settings_list: list of user_settings for a particualr addon
:param has_external_account: where addon is derrived from, determines method to load node settings
:param short_name: short name of addon to get correct node_settings
:return: dict with number of users that have at least one project at each stage
"""
from addons.forward.models import NodeSettings as ForwardNodeSettings
num_enabled = 0 # of users w/ 1+ addon account connected
num_authorized = 0 # of users w/ 1+ addon account connected to 1+ node
num_linked = 0 # of users w/ 1+ addon account connected to 1+ node and configured
# osfstorage and wiki don't have user_settings, so always assume they're enabled, authorized, linked
if short_name == 'osfstorage' or short_name == 'wiki':
num_enabled = num_authorized = num_linked = OSFUser.objects.filter(
is_registered=True,
password__isnull=False,
merged_by__isnull=True,
date_disabled__isnull=True,
date_confirmed__isnull=False
).count()
elif short_name == 'forward':
num_enabled = num_authorized = ForwardNodeSettings.objects.count()
num_linked = ForwardNodeSettings.objects.filter(url__isnull=False).count()
else:
for user_settings in paginated(user_settings_list):
node_settings_list = []
if has_external_account:
if user_settings.has_auth:
num_enabled += 1
node_settings_list = [AbstractNode.load(guid).get_addon(short_name) for guid in user_settings.oauth_grants.keys()]
else:
num_enabled += 1
node_settings_list = [AbstractNode.load(guid).get_addon(short_name) for guid in user_settings.nodes_authorized]
if any([ns.has_auth for ns in node_settings_list if ns]):
num_authorized += 1
if any([(ns.complete and ns.configured) for ns in node_settings_list if ns]):
num_linked += 1
return {
'enabled': num_enabled,
'authorized': num_authorized,
'linked': num_linked
}
class AddonSnapshot(SnapshotAnalytics):
@property
def collection_name(self):
return 'addon_snapshot'
def get_events(self, date=None):
super(AddonSnapshot, self).get_events(date)
counts = []
addons_available = {k: v for k, v in [(addon.short_name, addon) for addon in ADDONS_AVAILABLE]}
for short_name, addon in addons_available.items():
has_external_account = hasattr(addon.models.get('nodesettings'), 'external_account')
connected_count = 0
deleted_count = 0
disconnected_count = 0
node_settings_model = addon.models.get('nodesettings')
if node_settings_model:
for node_settings in paginated(node_settings_model):
if node_settings.owner and not node_settings.owner.all_tags.filter(name='old_node_collection', system=True).exists():
connected_count += 1
deleted_count = addon.models['nodesettings'].objects.filter(deleted=True).count() if addon.models.get('nodesettings') else 0
if has_external_account:
disconnected_count = addon.models['nodesettings'].objects.filter(external_account__isnull=True, is_deleted=False).count() if addon.models.get('nodesettings') else 0
else:
if addon.models.get('nodesettings'):
for nsm in addon.models['nodesettings'].objects.filter(deleted=False):
if nsm.configured and not nsm.complete:
disconnected_count += 1
total = connected_count + deleted_count + disconnected_count
usage_counts = get_enabled_authorized_linked(addon.models.get('usersettings'), has_external_account, addon.short_name)
counts.append({
'provider': {
'name': short_name
},
'users': usage_counts,
'nodes': {
'total': total,
'connected': connected_count,
'deleted': deleted_count,
'disconnected': disconnected_count
}
})
logger.info(
'{} counted. Users with a linked node: {}, Total connected nodes: {}.'.format(
addon.short_name,
usage_counts['linked'],
total
)
)
return counts
def get_class():
return AddonSnapshot
if __name__ == '__main__':
addon_snapshot = AddonSnapshot()
events = addon_snapshot.get_events()
addon_snapshot.send_events(events)
|
<filename>scripts/analytics/addon_snapshot.py
from __future__ import absolute_import
import logging
# App must be initialized before models or ADDONS_AVAILABLE are available
from website.app import init_app
init_app()
from osf.models import OSFUser, AbstractNode
from framework.database import paginated
from scripts.analytics.base import SnapshotAnalytics
from website.settings import ADDONS_AVAILABLE
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
# Modified from scripts/analytics/benchmarks.py
def get_enabled_authorized_linked(user_settings_list, has_external_account, short_name):
""" Gather the number of users who have at least one node in each of the stages for an addon
:param user_settings_list: list of user_settings for a particualr addon
:param has_external_account: where addon is derrived from, determines method to load node settings
:param short_name: short name of addon to get correct node_settings
:return: dict with number of users that have at least one project at each stage
"""
from addons.forward.models import NodeSettings as ForwardNodeSettings
num_enabled = 0 # of users w/ 1+ addon account connected
num_authorized = 0 # of users w/ 1+ addon account connected to 1+ node
num_linked = 0 # of users w/ 1+ addon account connected to 1+ node and configured
# osfstorage and wiki don't have user_settings, so always assume they're enabled, authorized, linked
if short_name == 'osfstorage' or short_name == 'wiki':
num_enabled = num_authorized = num_linked = OSFUser.objects.filter(
is_registered=True,
password__isnull=False,
merged_by__isnull=True,
date_disabled__isnull=True,
date_confirmed__isnull=False
).count()
elif short_name == 'forward':
num_enabled = num_authorized = ForwardNodeSettings.objects.count()
num_linked = ForwardNodeSettings.objects.filter(url__isnull=False).count()
else:
for user_settings in paginated(user_settings_list):
node_settings_list = []
if has_external_account:
if user_settings.has_auth:
num_enabled += 1
node_settings_list = [AbstractNode.load(guid).get_addon(short_name) for guid in user_settings.oauth_grants.keys()]
else:
num_enabled += 1
node_settings_list = [AbstractNode.load(guid).get_addon(short_name) for guid in user_settings.nodes_authorized]
if any([ns.has_auth for ns in node_settings_list if ns]):
num_authorized += 1
if any([(ns.complete and ns.configured) for ns in node_settings_list if ns]):
num_linked += 1
return {
'enabled': num_enabled,
'authorized': num_authorized,
'linked': num_linked
}
class AddonSnapshot(SnapshotAnalytics):
@property
def collection_name(self):
return 'addon_snapshot'
def get_events(self, date=None):
super(AddonSnapshot, self).get_events(date)
counts = []
addons_available = {k: v for k, v in [(addon.short_name, addon) for addon in ADDONS_AVAILABLE]}
for short_name, addon in addons_available.items():
has_external_account = hasattr(addon.models.get('nodesettings'), 'external_account')
connected_count = 0
deleted_count = 0
disconnected_count = 0
node_settings_model = addon.models.get('nodesettings')
if node_settings_model:
for node_settings in paginated(node_settings_model):
if node_settings.owner and not node_settings.owner.all_tags.filter(name='old_node_collection', system=True).exists():
connected_count += 1
deleted_count = addon.models['nodesettings'].objects.filter(deleted=True).count() if addon.models.get('nodesettings') else 0
if has_external_account:
disconnected_count = addon.models['nodesettings'].objects.filter(external_account__isnull=True, is_deleted=False).count() if addon.models.get('nodesettings') else 0
else:
if addon.models.get('nodesettings'):
for nsm in addon.models['nodesettings'].objects.filter(deleted=False):
if nsm.configured and not nsm.complete:
disconnected_count += 1
total = connected_count + deleted_count + disconnected_count
usage_counts = get_enabled_authorized_linked(addon.models.get('usersettings'), has_external_account, addon.short_name)
counts.append({
'provider': {
'name': short_name
},
'users': usage_counts,
'nodes': {
'total': total,
'connected': connected_count,
'deleted': deleted_count,
'disconnected': disconnected_count
}
})
logger.info(
'{} counted. Users with a linked node: {}, Total connected nodes: {}.'.format(
addon.short_name,
usage_counts['linked'],
total
)
)
return counts
def get_class():
return AddonSnapshot
if __name__ == '__main__':
addon_snapshot = AddonSnapshot()
events = addon_snapshot.get_events()
addon_snapshot.send_events(events)
|
en
| 0.863746
|
# App must be initialized before models or ADDONS_AVAILABLE are available # Modified from scripts/analytics/benchmarks.py Gather the number of users who have at least one node in each of the stages for an addon :param user_settings_list: list of user_settings for a particualr addon :param has_external_account: where addon is derrived from, determines method to load node settings :param short_name: short name of addon to get correct node_settings :return: dict with number of users that have at least one project at each stage # of users w/ 1+ addon account connected # of users w/ 1+ addon account connected to 1+ node # of users w/ 1+ addon account connected to 1+ node and configured # osfstorage and wiki don't have user_settings, so always assume they're enabled, authorized, linked
| 2.096857
| 2
|
npbench/benchmarks/channel_flow/channel_flow_numba_n.py
|
frahlg/npbench
| 27
|
6627465
|
<reponame>frahlg/npbench
# Barba, <NAME>., and Forsyth, <NAME>. (2018).
# CFD Python: the 12 steps to Navier-Stokes equations.
# Journal of Open Source Education, 1(9), 21,
# https://doi.org/10.21105/jose.00021
# TODO: License
# (c) 2017 <NAME>, <NAME>.
# All content is under Creative Commons Attribution CC-BY 4.0,
# and all code is under BSD-3 clause (previously under MIT, and changed on March 8, 2018).
import numpy as np
import numba as nb
@nb.jit(nopython=True, parallel=False, fastmath=True)
def build_up_b(rho, dt, dx, dy, u, v):
b = np.zeros_like(u)
b[1:-1,
1:-1] = (rho * (1 / dt * ((u[1:-1, 2:] - u[1:-1, 0:-2]) / (2 * dx) +
(v[2:, 1:-1] - v[0:-2, 1:-1]) / (2 * dy)) -
((u[1:-1, 2:] - u[1:-1, 0:-2]) / (2 * dx))**2 - 2 *
((u[2:, 1:-1] - u[0:-2, 1:-1]) / (2 * dy) *
(v[1:-1, 2:] - v[1:-1, 0:-2]) / (2 * dx)) -
((v[2:, 1:-1] - v[0:-2, 1:-1]) / (2 * dy))**2))
# Periodic BC Pressure @ x = 2
b[1:-1, -1] = (rho * (1 / dt * ((u[1:-1, 0] - u[1:-1, -2]) / (2 * dx) +
(v[2:, -1] - v[0:-2, -1]) / (2 * dy)) -
((u[1:-1, 0] - u[1:-1, -2]) / (2 * dx))**2 - 2 *
((u[2:, -1] - u[0:-2, -1]) / (2 * dy) *
(v[1:-1, 0] - v[1:-1, -2]) / (2 * dx)) -
((v[2:, -1] - v[0:-2, -1]) / (2 * dy))**2))
# Periodic BC Pressure @ x = 0
b[1:-1, 0] = (rho * (1 / dt * ((u[1:-1, 1] - u[1:-1, -1]) / (2 * dx) +
(v[2:, 0] - v[0:-2, 0]) / (2 * dy)) -
((u[1:-1, 1] - u[1:-1, -1]) / (2 * dx))**2 - 2 *
((u[2:, 0] - u[0:-2, 0]) / (2 * dy) *
(v[1:-1, 1] - v[1:-1, -1]) /
(2 * dx)) - ((v[2:, 0] - v[0:-2, 0]) / (2 * dy))**2))
return b
@nb.jit(nopython=True, parallel=False, fastmath=True)
def pressure_poisson_periodic(nit, p, dx, dy, b):
pn = np.empty_like(p)
for q in range(nit):
pn = p.copy()
p[1:-1, 1:-1] = (((pn[1:-1, 2:] + pn[1:-1, 0:-2]) * dy**2 +
(pn[2:, 1:-1] + pn[0:-2, 1:-1]) * dx**2) /
(2 * (dx**2 + dy**2)) - dx**2 * dy**2 /
(2 * (dx**2 + dy**2)) * b[1:-1, 1:-1])
# Periodic BC Pressure @ x = 2
p[1:-1, -1] = (((pn[1:-1, 0] + pn[1:-1, -2]) * dy**2 +
(pn[2:, -1] + pn[0:-2, -1]) * dx**2) /
(2 * (dx**2 + dy**2)) - dx**2 * dy**2 /
(2 * (dx**2 + dy**2)) * b[1:-1, -1])
# Periodic BC Pressure @ x = 0
p[1:-1,
0] = (((pn[1:-1, 1] + pn[1:-1, -1]) * dy**2 +
(pn[2:, 0] + pn[0:-2, 0]) * dx**2) / (2 * (dx**2 + dy**2)) -
dx**2 * dy**2 / (2 * (dx**2 + dy**2)) * b[1:-1, 0])
# Wall boundary conditions, pressure
p[-1, :] = p[-2, :] # dp/dy = 0 at y = 2
p[0, :] = p[1, :] # dp/dy = 0 at y = 0
@nb.jit(nopython=True, parallel=False, fastmath=True)
def channel_flow(nit, u, v, dt, dx, dy, p, rho, nu, F):
udiff = 1
stepcount = 0
while udiff > .001:
un = u.copy()
vn = v.copy()
b = build_up_b(rho, dt, dx, dy, u, v)
pressure_poisson_periodic(nit, p, dx, dy, b)
u[1:-1,
1:-1] = (un[1:-1, 1:-1] - un[1:-1, 1:-1] * dt / dx *
(un[1:-1, 1:-1] - un[1:-1, 0:-2]) -
vn[1:-1, 1:-1] * dt / dy *
(un[1:-1, 1:-1] - un[0:-2, 1:-1]) - dt / (2 * rho * dx) *
(p[1:-1, 2:] - p[1:-1, 0:-2]) + nu *
(dt / dx**2 *
(un[1:-1, 2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) +
dt / dy**2 *
(un[2:, 1:-1] - 2 * un[1:-1, 1:-1] + un[0:-2, 1:-1])) +
F * dt)
v[1:-1,
1:-1] = (vn[1:-1, 1:-1] - un[1:-1, 1:-1] * dt / dx *
(vn[1:-1, 1:-1] - vn[1:-1, 0:-2]) -
vn[1:-1, 1:-1] * dt / dy *
(vn[1:-1, 1:-1] - vn[0:-2, 1:-1]) - dt / (2 * rho * dy) *
(p[2:, 1:-1] - p[0:-2, 1:-1]) + nu *
(dt / dx**2 *
(vn[1:-1, 2:] - 2 * vn[1:-1, 1:-1] + vn[1:-1, 0:-2]) +
dt / dy**2 *
(vn[2:, 1:-1] - 2 * vn[1:-1, 1:-1] + vn[0:-2, 1:-1])))
# Periodic BC u @ x = 2
u[1:-1, -1] = (
un[1:-1, -1] - un[1:-1, -1] * dt / dx *
(un[1:-1, -1] - un[1:-1, -2]) - vn[1:-1, -1] * dt / dy *
(un[1:-1, -1] - un[0:-2, -1]) - dt / (2 * rho * dx) *
(p[1:-1, 0] - p[1:-1, -2]) + nu *
(dt / dx**2 *
(un[1:-1, 0] - 2 * un[1:-1, -1] + un[1:-1, -2]) + dt / dy**2 *
(un[2:, -1] - 2 * un[1:-1, -1] + un[0:-2, -1])) + F * dt)
# Periodic BC u @ x = 0
u[1:-1,
0] = (un[1:-1, 0] - un[1:-1, 0] * dt / dx *
(un[1:-1, 0] - un[1:-1, -1]) - vn[1:-1, 0] * dt / dy *
(un[1:-1, 0] - un[0:-2, 0]) - dt / (2 * rho * dx) *
(p[1:-1, 1] - p[1:-1, -1]) + nu *
(dt / dx**2 *
(un[1:-1, 1] - 2 * un[1:-1, 0] + un[1:-1, -1]) + dt / dy**2 *
(un[2:, 0] - 2 * un[1:-1, 0] + un[0:-2, 0])) + F * dt)
# Periodic BC v @ x = 2
v[1:-1, -1] = (
vn[1:-1, -1] - un[1:-1, -1] * dt / dx *
(vn[1:-1, -1] - vn[1:-1, -2]) - vn[1:-1, -1] * dt / dy *
(vn[1:-1, -1] - vn[0:-2, -1]) - dt / (2 * rho * dy) *
(p[2:, -1] - p[0:-2, -1]) + nu *
(dt / dx**2 *
(vn[1:-1, 0] - 2 * vn[1:-1, -1] + vn[1:-1, -2]) + dt / dy**2 *
(vn[2:, -1] - 2 * vn[1:-1, -1] + vn[0:-2, -1])))
# Periodic BC v @ x = 0
v[1:-1,
0] = (vn[1:-1, 0] - un[1:-1, 0] * dt / dx *
(vn[1:-1, 0] - vn[1:-1, -1]) - vn[1:-1, 0] * dt / dy *
(vn[1:-1, 0] - vn[0:-2, 0]) - dt / (2 * rho * dy) *
(p[2:, 0] - p[0:-2, 0]) + nu *
(dt / dx**2 *
(vn[1:-1, 1] - 2 * vn[1:-1, 0] + vn[1:-1, -1]) + dt / dy**2 *
(vn[2:, 0] - 2 * vn[1:-1, 0] + vn[0:-2, 0])))
# Wall BC: u,v = 0 @ y = 0,2
u[0, :] = 0
u[-1, :] = 0
v[0, :] = 0
v[-1, :] = 0
udiff = (np.sum(u) - np.sum(un)) / np.sum(u)
stepcount += 1
return stepcount
|
# Barba, <NAME>., and Forsyth, <NAME>. (2018).
# CFD Python: the 12 steps to Navier-Stokes equations.
# Journal of Open Source Education, 1(9), 21,
# https://doi.org/10.21105/jose.00021
# TODO: License
# (c) 2017 <NAME>, <NAME>.
# All content is under Creative Commons Attribution CC-BY 4.0,
# and all code is under BSD-3 clause (previously under MIT, and changed on March 8, 2018).
import numpy as np
import numba as nb
@nb.jit(nopython=True, parallel=False, fastmath=True)
def build_up_b(rho, dt, dx, dy, u, v):
b = np.zeros_like(u)
b[1:-1,
1:-1] = (rho * (1 / dt * ((u[1:-1, 2:] - u[1:-1, 0:-2]) / (2 * dx) +
(v[2:, 1:-1] - v[0:-2, 1:-1]) / (2 * dy)) -
((u[1:-1, 2:] - u[1:-1, 0:-2]) / (2 * dx))**2 - 2 *
((u[2:, 1:-1] - u[0:-2, 1:-1]) / (2 * dy) *
(v[1:-1, 2:] - v[1:-1, 0:-2]) / (2 * dx)) -
((v[2:, 1:-1] - v[0:-2, 1:-1]) / (2 * dy))**2))
# Periodic BC Pressure @ x = 2
b[1:-1, -1] = (rho * (1 / dt * ((u[1:-1, 0] - u[1:-1, -2]) / (2 * dx) +
(v[2:, -1] - v[0:-2, -1]) / (2 * dy)) -
((u[1:-1, 0] - u[1:-1, -2]) / (2 * dx))**2 - 2 *
((u[2:, -1] - u[0:-2, -1]) / (2 * dy) *
(v[1:-1, 0] - v[1:-1, -2]) / (2 * dx)) -
((v[2:, -1] - v[0:-2, -1]) / (2 * dy))**2))
# Periodic BC Pressure @ x = 0
b[1:-1, 0] = (rho * (1 / dt * ((u[1:-1, 1] - u[1:-1, -1]) / (2 * dx) +
(v[2:, 0] - v[0:-2, 0]) / (2 * dy)) -
((u[1:-1, 1] - u[1:-1, -1]) / (2 * dx))**2 - 2 *
((u[2:, 0] - u[0:-2, 0]) / (2 * dy) *
(v[1:-1, 1] - v[1:-1, -1]) /
(2 * dx)) - ((v[2:, 0] - v[0:-2, 0]) / (2 * dy))**2))
return b
@nb.jit(nopython=True, parallel=False, fastmath=True)
def pressure_poisson_periodic(nit, p, dx, dy, b):
pn = np.empty_like(p)
for q in range(nit):
pn = p.copy()
p[1:-1, 1:-1] = (((pn[1:-1, 2:] + pn[1:-1, 0:-2]) * dy**2 +
(pn[2:, 1:-1] + pn[0:-2, 1:-1]) * dx**2) /
(2 * (dx**2 + dy**2)) - dx**2 * dy**2 /
(2 * (dx**2 + dy**2)) * b[1:-1, 1:-1])
# Periodic BC Pressure @ x = 2
p[1:-1, -1] = (((pn[1:-1, 0] + pn[1:-1, -2]) * dy**2 +
(pn[2:, -1] + pn[0:-2, -1]) * dx**2) /
(2 * (dx**2 + dy**2)) - dx**2 * dy**2 /
(2 * (dx**2 + dy**2)) * b[1:-1, -1])
# Periodic BC Pressure @ x = 0
p[1:-1,
0] = (((pn[1:-1, 1] + pn[1:-1, -1]) * dy**2 +
(pn[2:, 0] + pn[0:-2, 0]) * dx**2) / (2 * (dx**2 + dy**2)) -
dx**2 * dy**2 / (2 * (dx**2 + dy**2)) * b[1:-1, 0])
# Wall boundary conditions, pressure
p[-1, :] = p[-2, :] # dp/dy = 0 at y = 2
p[0, :] = p[1, :] # dp/dy = 0 at y = 0
@nb.jit(nopython=True, parallel=False, fastmath=True)
def channel_flow(nit, u, v, dt, dx, dy, p, rho, nu, F):
udiff = 1
stepcount = 0
while udiff > .001:
un = u.copy()
vn = v.copy()
b = build_up_b(rho, dt, dx, dy, u, v)
pressure_poisson_periodic(nit, p, dx, dy, b)
u[1:-1,
1:-1] = (un[1:-1, 1:-1] - un[1:-1, 1:-1] * dt / dx *
(un[1:-1, 1:-1] - un[1:-1, 0:-2]) -
vn[1:-1, 1:-1] * dt / dy *
(un[1:-1, 1:-1] - un[0:-2, 1:-1]) - dt / (2 * rho * dx) *
(p[1:-1, 2:] - p[1:-1, 0:-2]) + nu *
(dt / dx**2 *
(un[1:-1, 2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) +
dt / dy**2 *
(un[2:, 1:-1] - 2 * un[1:-1, 1:-1] + un[0:-2, 1:-1])) +
F * dt)
v[1:-1,
1:-1] = (vn[1:-1, 1:-1] - un[1:-1, 1:-1] * dt / dx *
(vn[1:-1, 1:-1] - vn[1:-1, 0:-2]) -
vn[1:-1, 1:-1] * dt / dy *
(vn[1:-1, 1:-1] - vn[0:-2, 1:-1]) - dt / (2 * rho * dy) *
(p[2:, 1:-1] - p[0:-2, 1:-1]) + nu *
(dt / dx**2 *
(vn[1:-1, 2:] - 2 * vn[1:-1, 1:-1] + vn[1:-1, 0:-2]) +
dt / dy**2 *
(vn[2:, 1:-1] - 2 * vn[1:-1, 1:-1] + vn[0:-2, 1:-1])))
# Periodic BC u @ x = 2
u[1:-1, -1] = (
un[1:-1, -1] - un[1:-1, -1] * dt / dx *
(un[1:-1, -1] - un[1:-1, -2]) - vn[1:-1, -1] * dt / dy *
(un[1:-1, -1] - un[0:-2, -1]) - dt / (2 * rho * dx) *
(p[1:-1, 0] - p[1:-1, -2]) + nu *
(dt / dx**2 *
(un[1:-1, 0] - 2 * un[1:-1, -1] + un[1:-1, -2]) + dt / dy**2 *
(un[2:, -1] - 2 * un[1:-1, -1] + un[0:-2, -1])) + F * dt)
# Periodic BC u @ x = 0
u[1:-1,
0] = (un[1:-1, 0] - un[1:-1, 0] * dt / dx *
(un[1:-1, 0] - un[1:-1, -1]) - vn[1:-1, 0] * dt / dy *
(un[1:-1, 0] - un[0:-2, 0]) - dt / (2 * rho * dx) *
(p[1:-1, 1] - p[1:-1, -1]) + nu *
(dt / dx**2 *
(un[1:-1, 1] - 2 * un[1:-1, 0] + un[1:-1, -1]) + dt / dy**2 *
(un[2:, 0] - 2 * un[1:-1, 0] + un[0:-2, 0])) + F * dt)
# Periodic BC v @ x = 2
v[1:-1, -1] = (
vn[1:-1, -1] - un[1:-1, -1] * dt / dx *
(vn[1:-1, -1] - vn[1:-1, -2]) - vn[1:-1, -1] * dt / dy *
(vn[1:-1, -1] - vn[0:-2, -1]) - dt / (2 * rho * dy) *
(p[2:, -1] - p[0:-2, -1]) + nu *
(dt / dx**2 *
(vn[1:-1, 0] - 2 * vn[1:-1, -1] + vn[1:-1, -2]) + dt / dy**2 *
(vn[2:, -1] - 2 * vn[1:-1, -1] + vn[0:-2, -1])))
# Periodic BC v @ x = 0
v[1:-1,
0] = (vn[1:-1, 0] - un[1:-1, 0] * dt / dx *
(vn[1:-1, 0] - vn[1:-1, -1]) - vn[1:-1, 0] * dt / dy *
(vn[1:-1, 0] - vn[0:-2, 0]) - dt / (2 * rho * dy) *
(p[2:, 0] - p[0:-2, 0]) + nu *
(dt / dx**2 *
(vn[1:-1, 1] - 2 * vn[1:-1, 0] + vn[1:-1, -1]) + dt / dy**2 *
(vn[2:, 0] - 2 * vn[1:-1, 0] + vn[0:-2, 0])))
# Wall BC: u,v = 0 @ y = 0,2
u[0, :] = 0
u[-1, :] = 0
v[0, :] = 0
v[-1, :] = 0
udiff = (np.sum(u) - np.sum(un)) / np.sum(u)
stepcount += 1
return stepcount
|
en
| 0.813897
|
# Barba, <NAME>., and Forsyth, <NAME>. (2018). # CFD Python: the 12 steps to Navier-Stokes equations. # Journal of Open Source Education, 1(9), 21, # https://doi.org/10.21105/jose.00021 # TODO: License # (c) 2017 <NAME>, <NAME>. # All content is under Creative Commons Attribution CC-BY 4.0, # and all code is under BSD-3 clause (previously under MIT, and changed on March 8, 2018). # Periodic BC Pressure @ x = 2 # Periodic BC Pressure @ x = 0 # Periodic BC Pressure @ x = 2 # Periodic BC Pressure @ x = 0 # Wall boundary conditions, pressure # dp/dy = 0 at y = 2 # dp/dy = 0 at y = 0 # Periodic BC u @ x = 2 # Periodic BC u @ x = 0 # Periodic BC v @ x = 2 # Periodic BC v @ x = 0 # Wall BC: u,v = 0 @ y = 0,2
| 2.743735
| 3
|
tests/STDF/test_EPS.py
|
awinia-github/Semi-ATE-STDF
| 0
|
6627466
|
<reponame>awinia-github/Semi-ATE-STDF
import os
import tempfile
from tests.STDF.STDFRecordTest import STDFRecordTest
from STDF import EPS
# End Program Section Record
# Function:
# Marks the end of the current program section (or sequencer) in the job plan.
def test_EPS():
eps('<')
eps('>')
def eps(end):
# ATDF page 58
expected_atdf = "EPS:"
# record length in bytes
rec_len = 0;
# STDF v4 page 63
record = EPS(endian = end)
# Test serialization
# 1. Save EPS STDF record into a file
# 2. Read byte by byte and compare with expected value
tf = tempfile.NamedTemporaryFile(delete=False)
f = open(tf.name, "wb")
# ERROR : STDF.records.STDFError: EPS._pack_item(REC_LEN) : Unsupported Reference '' vs 'U*2'
w_data = record.__repr__()
f.write(w_data)
f.close
f = open(tf.name, "rb")
stdfRecTest = STDFRecordTest(f, end)
# rec_len, rec_type, rec_sub
stdfRecTest.assert_file_record_header(rec_len, 20, 20)
f.close()
# Test de-serialization
# 1. Open STDF record from a file
# 2. Read record fields and compare with the expected value
inst = EPS('V4', end, w_data)
# rec_len, rec_type, rec_sub
stdfRecTest.assert_instance_record_header(inst , rec_len, 20, 20)
# Test ATDF output
assert inst.to_atdf() == expected_atdf
# ToDo: Test JSON output
os.remove(tf.name)
|
import os
import tempfile
from tests.STDF.STDFRecordTest import STDFRecordTest
from STDF import EPS
# End Program Section Record
# Function:
# Marks the end of the current program section (or sequencer) in the job plan.
def test_EPS():
eps('<')
eps('>')
def eps(end):
# ATDF page 58
expected_atdf = "EPS:"
# record length in bytes
rec_len = 0;
# STDF v4 page 63
record = EPS(endian = end)
# Test serialization
# 1. Save EPS STDF record into a file
# 2. Read byte by byte and compare with expected value
tf = tempfile.NamedTemporaryFile(delete=False)
f = open(tf.name, "wb")
# ERROR : STDF.records.STDFError: EPS._pack_item(REC_LEN) : Unsupported Reference '' vs 'U*2'
w_data = record.__repr__()
f.write(w_data)
f.close
f = open(tf.name, "rb")
stdfRecTest = STDFRecordTest(f, end)
# rec_len, rec_type, rec_sub
stdfRecTest.assert_file_record_header(rec_len, 20, 20)
f.close()
# Test de-serialization
# 1. Open STDF record from a file
# 2. Read record fields and compare with the expected value
inst = EPS('V4', end, w_data)
# rec_len, rec_type, rec_sub
stdfRecTest.assert_instance_record_header(inst , rec_len, 20, 20)
# Test ATDF output
assert inst.to_atdf() == expected_atdf
# ToDo: Test JSON output
os.remove(tf.name)
|
en
| 0.639914
|
# End Program Section Record # Function: # Marks the end of the current program section (or sequencer) in the job plan. # ATDF page 58 # record length in bytes # STDF v4 page 63 # Test serialization # 1. Save EPS STDF record into a file # 2. Read byte by byte and compare with expected value # ERROR : STDF.records.STDFError: EPS._pack_item(REC_LEN) : Unsupported Reference '' vs 'U*2' # rec_len, rec_type, rec_sub # Test de-serialization # 1. Open STDF record from a file # 2. Read record fields and compare with the expected value # rec_len, rec_type, rec_sub # Test ATDF output # ToDo: Test JSON output
| 2.560037
| 3
|
python/ee/tests/collection_test.py
|
Danielbatista0590/earthengine-api
| 1
|
6627467
|
<filename>python/ee/tests/collection_test.py
#!/usr/bin/env python
"""Test for the ee.collection module."""
import datetime
import unittest
import ee
from ee import apitestcase
class CollectionTestCase(apitestcase.ApiTestCase):
def testSortAndLimit(self):
"""Verifies the behavior of the sort() and limit() methods."""
collection = ee.Collection(ee.Function(), {})
limited = collection.limit(10)
self.assertEqual(ee.ApiFunction.lookup('Collection.limit'), limited.func)
self.assertEqual({'collection': collection, 'limit': 10}, limited.args)
sorted_collection = collection.sort('bar', True)
self.assertEqual(
ee.ApiFunction.lookup('Collection.limit'), sorted_collection.func)
self.assertEqual({
'collection': collection,
'key': ee.String('bar'),
'ascending': True
}, sorted_collection.args)
reverse_sorted_collection = collection.sort('bar', False)
self.assertEqual(
ee.ApiFunction.lookup('Collection.limit'),
reverse_sorted_collection.func)
self.assertEqual({
'collection': collection,
'key': ee.String('bar'),
'ascending': False
}, reverse_sorted_collection.args)
def testFilter(self):
"""Verifies the behavior of filter() method."""
collection = ee.Collection(ee.Function(), {})
# We don't allow empty filters.
self.assertRaises(Exception, collection.filter)
filtered = collection.filter(ee.Filter.eq('foo', 1))
self.assertEqual(ee.ApiFunction.lookup('Collection.filter'), filtered.func)
self.assertEqual({
'collection': collection,
'filter': ee.Filter.eq('foo', 1)
}, filtered.args)
self.assertIsInstance(filtered, ee.Collection)
def testFilterShortcuts(self):
"""Verifies the behavior of the various filtering shortcut methods."""
collection = ee.Collection(ee.Function(), {})
geom = {'type': 'Polygon', 'coordinates': [[[1, 2], [3, 4]]]}
d1 = datetime.datetime.strptime('1/1/2000', '%m/%d/%Y')
d2 = datetime.datetime.strptime('1/1/2001', '%m/%d/%Y')
self.assertEqual(
collection.filter(ee.Filter.geometry(geom)),
collection.filterBounds(geom))
self.assertEqual(
collection.filter(ee.Filter.date(d1)), collection.filterDate(d1))
self.assertEqual(
collection.filter(ee.Filter.date(d1, d2)), collection.filterDate(
d1, d2))
self.assertEqual(
collection.filter(ee.Filter.eq('foo', 13)),
collection.filterMetadata('foo', 'equals', 13))
def testMapping(self):
"""Verifies the behavior of the map() method."""
collection = ee.ImageCollection('foo')
algorithm = lambda img: img.select('bar')
mapped = collection.map(algorithm)
self.assertIsInstance(mapped, ee.ImageCollection)
self.assertEqual(ee.ApiFunction.lookup('Collection.map'), mapped.func)
self.assertEqual(collection, mapped.args['collection'])
# Need to do a serialized comparison for the function body because
# variables returned from CustomFunction.variable() do not implement
# __eq__.
sig = {
'returns': 'Image',
'args': [{'name': '_MAPPING_VAR_0_0', 'type': 'Image'}]
}
expected_function = ee.CustomFunction(sig, algorithm)
self.assertEqual(expected_function.serialize(),
mapped.args['baseAlgorithm'].serialize())
def testNestedMapping(self):
"""Verifies that nested map() calls produce distinct variables."""
collection = ee.FeatureCollection('foo')
result = collection.map(lambda x: collection.map(lambda y: [x, y]))
# Verify the signatures.
self.assertEqual('_MAPPING_VAR_1_0',
result.args['baseAlgorithm']._signature['args'][0]['name'])
inner_result = result.args['baseAlgorithm']._body
self.assertEqual(
'_MAPPING_VAR_0_0',
inner_result.args['baseAlgorithm']._signature['args'][0]['name'])
# Verify the references.
self.assertEqual('_MAPPING_VAR_1_0',
inner_result.args['baseAlgorithm']._body[0].varName)
self.assertEqual('_MAPPING_VAR_0_0',
inner_result.args['baseAlgorithm']._body[1].varName)
def testIteration(self):
"""Verifies the behavior of the iterate() method."""
collection = ee.ImageCollection('foo')
first = ee.Image(0)
algorithm = lambda img, prev: img.addBands(ee.Image(prev))
result = collection.iterate(algorithm, first)
self.assertEqual(ee.ApiFunction.lookup('Collection.iterate'), result.func)
self.assertEqual(collection, result.args['collection'])
self.assertEqual(first, result.args['first'])
# Need to do a serialized comparison for the function body because
# variables returned from CustomFunction.variable() do not implement
# __eq__.
sig = {
'returns': 'Object',
'args': [
{'name': '_MAPPING_VAR_0_0', 'type': 'Image'},
{'name': '_MAPPING_VAR_0_1', 'type': 'Object'}
]
}
expected_function = ee.CustomFunction(sig, algorithm)
self.assertEqual(expected_function.serialize(),
result.args['function'].serialize())
if __name__ == '__main__':
unittest.main()
|
<filename>python/ee/tests/collection_test.py
#!/usr/bin/env python
"""Test for the ee.collection module."""
import datetime
import unittest
import ee
from ee import apitestcase
class CollectionTestCase(apitestcase.ApiTestCase):
def testSortAndLimit(self):
"""Verifies the behavior of the sort() and limit() methods."""
collection = ee.Collection(ee.Function(), {})
limited = collection.limit(10)
self.assertEqual(ee.ApiFunction.lookup('Collection.limit'), limited.func)
self.assertEqual({'collection': collection, 'limit': 10}, limited.args)
sorted_collection = collection.sort('bar', True)
self.assertEqual(
ee.ApiFunction.lookup('Collection.limit'), sorted_collection.func)
self.assertEqual({
'collection': collection,
'key': ee.String('bar'),
'ascending': True
}, sorted_collection.args)
reverse_sorted_collection = collection.sort('bar', False)
self.assertEqual(
ee.ApiFunction.lookup('Collection.limit'),
reverse_sorted_collection.func)
self.assertEqual({
'collection': collection,
'key': ee.String('bar'),
'ascending': False
}, reverse_sorted_collection.args)
def testFilter(self):
"""Verifies the behavior of filter() method."""
collection = ee.Collection(ee.Function(), {})
# We don't allow empty filters.
self.assertRaises(Exception, collection.filter)
filtered = collection.filter(ee.Filter.eq('foo', 1))
self.assertEqual(ee.ApiFunction.lookup('Collection.filter'), filtered.func)
self.assertEqual({
'collection': collection,
'filter': ee.Filter.eq('foo', 1)
}, filtered.args)
self.assertIsInstance(filtered, ee.Collection)
def testFilterShortcuts(self):
"""Verifies the behavior of the various filtering shortcut methods."""
collection = ee.Collection(ee.Function(), {})
geom = {'type': 'Polygon', 'coordinates': [[[1, 2], [3, 4]]]}
d1 = datetime.datetime.strptime('1/1/2000', '%m/%d/%Y')
d2 = datetime.datetime.strptime('1/1/2001', '%m/%d/%Y')
self.assertEqual(
collection.filter(ee.Filter.geometry(geom)),
collection.filterBounds(geom))
self.assertEqual(
collection.filter(ee.Filter.date(d1)), collection.filterDate(d1))
self.assertEqual(
collection.filter(ee.Filter.date(d1, d2)), collection.filterDate(
d1, d2))
self.assertEqual(
collection.filter(ee.Filter.eq('foo', 13)),
collection.filterMetadata('foo', 'equals', 13))
def testMapping(self):
"""Verifies the behavior of the map() method."""
collection = ee.ImageCollection('foo')
algorithm = lambda img: img.select('bar')
mapped = collection.map(algorithm)
self.assertIsInstance(mapped, ee.ImageCollection)
self.assertEqual(ee.ApiFunction.lookup('Collection.map'), mapped.func)
self.assertEqual(collection, mapped.args['collection'])
# Need to do a serialized comparison for the function body because
# variables returned from CustomFunction.variable() do not implement
# __eq__.
sig = {
'returns': 'Image',
'args': [{'name': '_MAPPING_VAR_0_0', 'type': 'Image'}]
}
expected_function = ee.CustomFunction(sig, algorithm)
self.assertEqual(expected_function.serialize(),
mapped.args['baseAlgorithm'].serialize())
def testNestedMapping(self):
"""Verifies that nested map() calls produce distinct variables."""
collection = ee.FeatureCollection('foo')
result = collection.map(lambda x: collection.map(lambda y: [x, y]))
# Verify the signatures.
self.assertEqual('_MAPPING_VAR_1_0',
result.args['baseAlgorithm']._signature['args'][0]['name'])
inner_result = result.args['baseAlgorithm']._body
self.assertEqual(
'_MAPPING_VAR_0_0',
inner_result.args['baseAlgorithm']._signature['args'][0]['name'])
# Verify the references.
self.assertEqual('_MAPPING_VAR_1_0',
inner_result.args['baseAlgorithm']._body[0].varName)
self.assertEqual('_MAPPING_VAR_0_0',
inner_result.args['baseAlgorithm']._body[1].varName)
def testIteration(self):
"""Verifies the behavior of the iterate() method."""
collection = ee.ImageCollection('foo')
first = ee.Image(0)
algorithm = lambda img, prev: img.addBands(ee.Image(prev))
result = collection.iterate(algorithm, first)
self.assertEqual(ee.ApiFunction.lookup('Collection.iterate'), result.func)
self.assertEqual(collection, result.args['collection'])
self.assertEqual(first, result.args['first'])
# Need to do a serialized comparison for the function body because
# variables returned from CustomFunction.variable() do not implement
# __eq__.
sig = {
'returns': 'Object',
'args': [
{'name': '_MAPPING_VAR_0_0', 'type': 'Image'},
{'name': '_MAPPING_VAR_0_1', 'type': 'Object'}
]
}
expected_function = ee.CustomFunction(sig, algorithm)
self.assertEqual(expected_function.serialize(),
result.args['function'].serialize())
if __name__ == '__main__':
unittest.main()
|
en
| 0.719278
|
#!/usr/bin/env python Test for the ee.collection module. Verifies the behavior of the sort() and limit() methods. Verifies the behavior of filter() method. # We don't allow empty filters. Verifies the behavior of the various filtering shortcut methods. Verifies the behavior of the map() method. # Need to do a serialized comparison for the function body because # variables returned from CustomFunction.variable() do not implement # __eq__. Verifies that nested map() calls produce distinct variables. # Verify the signatures. # Verify the references. Verifies the behavior of the iterate() method. # Need to do a serialized comparison for the function body because # variables returned from CustomFunction.variable() do not implement # __eq__.
| 2.780401
| 3
|
phasedArrayScript.py
|
alexsludds/Phased-Array-Plotter
| 0
|
6627468
|
import numpy as np
import matplotlib.pyplot as plt
numberOfAntennas = 2
phaseDifferenceBetweenAntennasInDegrees = 30
frequencyOfOperationInHertz = 5.*10**(8) #500Mhz
speedOfPropogationInMedium = 3.*10**(8) #speed of light in meters per second
distanceBetweenFirstAndLastAntenna = 2 #in meters
wavelength = frequencyOfOperationInHertz/speedOfPropogationInMedium
distanceBetweenAdjacentAntennas = float(distanceBetweenFirstAndLastAntenna)/numberOfAntennas
numberOfPointsToConsiderForPlotting = 360
sumOfComplexExponentials = np.zeros(numberOfPointsToConsiderForPlotting)
for theta in range(numberOfPointsToConsiderForPlotting):
for i in range(numberOfAntennas):
sumOfComplexExponentials[theta] += np.exp(-1j*2*np.pi/wavelength*distanceBetweenFirstAndLastAntenna*np.sin(2*np.pi*theta/360.)*i+1j*phaseDifferenceBetweenAntennasInDegrees*i)
sumOfComplexExponentials[theta] = np.abs(sumOfComplexExponentials[theta])**2
r = sumOfComplexExponentials
theta = np.arange(0,2*np.pi,2*np.pi/numberOfPointsToConsiderForPlotting)
ax = plt.subplot(111, projection='polar')
ax.plot(theta, r)
ax.set_rmax(max(sumOfComplexExponentials))
#ax.set_rticks([0.5, 1, 1.5, 2]) # less radial ticks
ax.set_rlabel_position(-22.5) # get radial labels away from plotted line
ax.grid(True)
ax.set_title("A line plot on a polar axis", va='bottom')
plt.show();
|
import numpy as np
import matplotlib.pyplot as plt
numberOfAntennas = 2
phaseDifferenceBetweenAntennasInDegrees = 30
frequencyOfOperationInHertz = 5.*10**(8) #500Mhz
speedOfPropogationInMedium = 3.*10**(8) #speed of light in meters per second
distanceBetweenFirstAndLastAntenna = 2 #in meters
wavelength = frequencyOfOperationInHertz/speedOfPropogationInMedium
distanceBetweenAdjacentAntennas = float(distanceBetweenFirstAndLastAntenna)/numberOfAntennas
numberOfPointsToConsiderForPlotting = 360
sumOfComplexExponentials = np.zeros(numberOfPointsToConsiderForPlotting)
for theta in range(numberOfPointsToConsiderForPlotting):
for i in range(numberOfAntennas):
sumOfComplexExponentials[theta] += np.exp(-1j*2*np.pi/wavelength*distanceBetweenFirstAndLastAntenna*np.sin(2*np.pi*theta/360.)*i+1j*phaseDifferenceBetweenAntennasInDegrees*i)
sumOfComplexExponentials[theta] = np.abs(sumOfComplexExponentials[theta])**2
r = sumOfComplexExponentials
theta = np.arange(0,2*np.pi,2*np.pi/numberOfPointsToConsiderForPlotting)
ax = plt.subplot(111, projection='polar')
ax.plot(theta, r)
ax.set_rmax(max(sumOfComplexExponentials))
#ax.set_rticks([0.5, 1, 1.5, 2]) # less radial ticks
ax.set_rlabel_position(-22.5) # get radial labels away from plotted line
ax.grid(True)
ax.set_title("A line plot on a polar axis", va='bottom')
plt.show();
|
en
| 0.753811
|
#500Mhz #speed of light in meters per second #in meters #ax.set_rticks([0.5, 1, 1.5, 2]) # less radial ticks # get radial labels away from plotted line
| 2.820312
| 3
|
products/admin.py
|
kevin-ci/janeric2
| 1
|
6627469
|
<reponame>kevin-ci/janeric2<gh_stars>1-10
from django.contrib import admin
from .models import Product, Category, Product_Family, ProductSize
# Register your models here.
class ProductAdmin(admin.ModelAdmin):
list_display = (
'active',
'SKU',
'name',
'product_family',
'category',
'image',
'product_size',
'pack',
'price',
'description',
'created',
'modified',
)
ordering = ('product_family',)
list_filter = ('active', 'category', 'category__division')
class ProductSizeAdmin(admin.ModelAdmin):
list_display = (
'name',
'friendly_name'
)
ordering = ('name',)
class Product_FamilyAdmin(admin.ModelAdmin):
list_display = (
'name',
'brand_name'
)
class CategoryAdmin(admin.ModelAdmin):
list_display = (
'name',
'division'
)
admin.site.register(Product, ProductAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Product_Family, Product_FamilyAdmin)
admin.site.register(ProductSize, ProductSizeAdmin)
|
from django.contrib import admin
from .models import Product, Category, Product_Family, ProductSize
# Register your models here.
class ProductAdmin(admin.ModelAdmin):
list_display = (
'active',
'SKU',
'name',
'product_family',
'category',
'image',
'product_size',
'pack',
'price',
'description',
'created',
'modified',
)
ordering = ('product_family',)
list_filter = ('active', 'category', 'category__division')
class ProductSizeAdmin(admin.ModelAdmin):
list_display = (
'name',
'friendly_name'
)
ordering = ('name',)
class Product_FamilyAdmin(admin.ModelAdmin):
list_display = (
'name',
'brand_name'
)
class CategoryAdmin(admin.ModelAdmin):
list_display = (
'name',
'division'
)
admin.site.register(Product, ProductAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Product_Family, Product_FamilyAdmin)
admin.site.register(ProductSize, ProductSizeAdmin)
|
en
| 0.968259
|
# Register your models here.
| 1.901247
| 2
|
pubnub/models/consumer/push.py
|
17media/pubnub-python
| 0
|
6627470
|
class PNPushAddChannelResult(object):
pass
class PNPushRemoveChannelResult(object):
pass
class PNPushRemoveAllChannelsResult(object):
pass
class PNPushListProvisionsResult(object):
def __init__(self, channels):
self.channels = channels
|
class PNPushAddChannelResult(object):
pass
class PNPushRemoveChannelResult(object):
pass
class PNPushRemoveAllChannelsResult(object):
pass
class PNPushListProvisionsResult(object):
def __init__(self, channels):
self.channels = channels
|
none
| 1
| 1.869534
| 2
|
|
habitat/core/dataset.py
|
Lucaweihs/habitat-lab
| 1
|
6627471
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""Implements dataset functionality to be used ``habitat.EmbodiedTask``.
``habitat.core.dataset`` abstracts over a collection of
``habitat.core.Episode``. Each episode consists of a single instantiation
of a ``habitat.Agent`` inside ``habitat.Env``.
"""
import copy
import json
import os
import random
from itertools import groupby
from typing import (
Any,
Callable,
Dict,
Generic,
Iterator,
List,
Optional,
TypeVar,
)
import attr
import numpy as np
from habitat.config import Config
from habitat.core.utils import not_none_validator
ALL_SCENES_MASK = "*"
@attr.s(auto_attribs=True, kw_only=True)
class Episode:
r"""Base class for episode specification that includes initial position and
rotation of agent, scene id, episode.
:property episode_id: id of episode in the dataset, usually episode number.
:property scene_id: id of scene in dataset.
:property start_position: list of length 3 for cartesian coordinates
:py:`(x, y, z)`.
:property start_rotation: list of length 4 for (x, y, z, w) elements
of unit quaternion (versor) representing 3D agent orientation
(https://en.wikipedia.org/wiki/Versor). The rotation specifying the
agent's orientation is relative to the world coordinate axes.
This information is provided by a :ref:`Dataset` instance.
"""
episode_id: str = attr.ib(default=None, validator=not_none_validator)
scene_id: str = attr.ib(default=None, validator=not_none_validator)
start_position: List[float] = attr.ib(
default=None, validator=not_none_validator
)
start_rotation: List[float] = attr.ib(
default=None, validator=not_none_validator
)
info: Optional[Dict[str, str]] = None
_shortest_path_cache: Any = attr.ib(init=False, default=None)
def __getstate__(self):
return {
k: v
for k, v in self.__dict__.items()
if k not in {"_shortest_path_cache"}
}
def __setstate__(self, state):
self.__dict__.update(state)
self.__dict__["_shortest_path_cache"] = None
T = TypeVar("T", bound=Episode)
class Dataset(Generic[T]):
r"""Base class for dataset specification."""
episodes: List[T]
@staticmethod
def scene_from_scene_path(scene_path: str) -> str:
r"""Helper method to get the scene name from an episode.
:param scene_path: The path to the scene, assumes this is formatted
``/path/to/<scene_name>.<ext>``
:return: <scene_name> from the path
"""
return os.path.splitext(os.path.basename(scene_path))[0]
@classmethod
def get_scenes_to_load(cls, config: Config) -> List[str]:
r"""Returns a list of scene names that would be loaded with this dataset.
Useful for determing what scenes to split up among different workers.
:param config: The config for the dataset
:return: A list of scene names that would be loaded with the dataset
"""
assert cls.check_config_paths_exist(config)
dataset = cls(config)
return list(map(cls.scene_from_scene_path, dataset.scene_ids))
@classmethod
def build_content_scenes_filter(cls, config) -> Callable[[T], bool]:
r"""Returns a filter function that takes an episode and returns True if that
episode is valid under the CONTENT_SCENES feild of the provided config
"""
scenes_to_load = set(config.CONTENT_SCENES)
def _filter(ep: T) -> bool:
return (
ALL_SCENES_MASK in scenes_to_load
or cls.scene_from_scene_path(ep.scene_id) in scenes_to_load
)
return _filter
@property
def num_episodes(self) -> int:
r"""number of episodes in the dataset"""
return len(self.episodes)
@property
def scene_ids(self) -> List[str]:
r"""unique scene ids present in the dataset."""
return sorted({episode.scene_id for episode in self.episodes})
def get_scene_episodes(self, scene_id: str) -> List[T]:
r"""..
:param scene_id: id of scene in scene dataset.
:return: list of episodes for the :p:`scene_id`.
"""
return list(
filter(lambda x: x.scene_id == scene_id, iter(self.episodes))
)
def get_episodes(self, indexes: List[int]) -> List[T]:
r"""..
:param indexes: episode indices in dataset.
:return: list of episodes corresponding to indexes.
"""
return [self.episodes[episode_id] for episode_id in indexes]
def get_episode_iterator(self, *args: Any, **kwargs: Any) -> Iterator:
r"""Gets episode iterator with options. Options are specified in
:ref:`EpisodeIterator` documentation.
:param args: positional args for iterator constructor
:param kwargs: keyword args for iterator constructor
:return: episode iterator with specified behavior
To further customize iterator behavior for your :ref:`Dataset`
subclass, create a customized iterator class like
:ref:`EpisodeIterator` and override this method.
"""
return EpisodeIterator(self.episodes, *args, **kwargs)
def to_json(self) -> str:
class DatasetJSONEncoder(json.JSONEncoder):
def default(self, object):
if isinstance(object, np.ndarray):
return object.tolist()
return (
object.__getstate__()
if hasattr(object, "__getstate__")
else object.__dict__
)
result = DatasetJSONEncoder().encode(self)
return result
def from_json(
self, json_str: str, scenes_dir: Optional[str] = None
) -> None:
r"""Creates dataset from :p:`json_str`.
:param json_str: JSON string containing episodes information.
:param scenes_dir: directory containing graphical assets relevant
for episodes present in :p:`json_str`.
Directory containing relevant graphical assets of scenes is passed
through :p:`scenes_dir`.
"""
raise NotImplementedError
def filter_episodes(self, filter_fn: Callable[[T], bool]) -> "Dataset":
r"""Returns a new dataset with only the filtered episodes from the
original dataset.
:param filter_fn: function used to filter the episodes.
:return: the new dataset.
"""
new_episodes = []
for episode in self.episodes:
if filter_fn(episode):
new_episodes.append(episode)
new_dataset = copy.copy(self)
new_dataset.episodes = new_episodes
return new_dataset
def get_splits(
self,
num_splits: int,
episodes_per_split: Optional[int] = None,
remove_unused_episodes: bool = False,
collate_scene_ids: bool = True,
sort_by_episode_id: bool = False,
allow_uneven_splits: bool = False,
) -> List["Dataset"]:
r"""Returns a list of new datasets, each with a subset of the original
episodes.
:param num_splits: the number of splits to create.
:param episodes_per_split: if provided, each split will have up to this
many episodes. If it is not provided, each dataset will have
:py:`len(original_dataset.episodes) // num_splits` episodes. If
max_episodes_per_split is provided and is larger than this value,
it will be capped to this value.
:param remove_unused_episodes: once the splits are created, the extra
episodes will be destroyed from the original dataset. This saves
memory for large datasets.
:param collate_scene_ids: if true, episodes with the same scene id are
next to each other. This saves on overhead of switching between
scenes, but means multiple sequential episodes will be related to
each other because they will be in the same scene.
:param sort_by_episode_id: if true, sequences are sorted by their
episode ID in the returned splits.
:param allow_uneven_splits: if true, the last splits can be shorter
than the others. This is especially useful for splitting over
validation/test datasets in order to make sure that all episodes
are copied but none are duplicated.
:return: a list of new datasets, each with their own subset of
episodes.
All splits will have the same number of episodes, but no episodes will
be duplicated.
"""
if self.num_episodes < num_splits:
raise ValueError(
"Not enough episodes to create those many splits."
)
if episodes_per_split is not None:
if allow_uneven_splits:
raise ValueError(
"You probably don't want to specify allow_uneven_splits"
" and episodes_per_split."
)
if num_splits * episodes_per_split > self.num_episodes:
raise ValueError(
"Not enough episodes to create those many splits."
)
new_datasets = []
if episodes_per_split is not None:
stride = episodes_per_split
else:
stride = self.num_episodes // num_splits
split_lengths = [stride] * num_splits
if allow_uneven_splits:
episodes_left = self.num_episodes - stride * num_splits
split_lengths[:episodes_left] = [stride + 1] * episodes_left
assert sum(split_lengths) == self.num_episodes
num_episodes = sum(split_lengths)
rand_items = np.random.choice(
self.num_episodes, num_episodes, replace=False
)
if collate_scene_ids:
scene_ids = {}
for rand_ind in rand_items:
scene = self.episodes[rand_ind].scene_id
if scene not in scene_ids:
scene_ids[scene] = []
scene_ids[scene].append(rand_ind)
rand_items = []
list(map(rand_items.extend, scene_ids.values()))
ep_ind = 0
new_episodes = []
for nn in range(num_splits):
new_dataset = copy.copy(self) # Creates a shallow copy
new_dataset.episodes = []
new_datasets.append(new_dataset)
for _ii in range(split_lengths[nn]):
new_dataset.episodes.append(self.episodes[rand_items[ep_ind]])
ep_ind += 1
if sort_by_episode_id:
new_dataset.episodes.sort(key=lambda ep: ep.episode_id)
new_episodes.extend(new_dataset.episodes)
if remove_unused_episodes:
self.episodes = new_episodes
return new_datasets
class EpisodeIterator(Iterator):
r"""Episode Iterator class that gives options for how a list of episodes
should be iterated.
Some of those options are desirable for the internal simulator to get
higher performance. More context: simulator suffers overhead when switching
between scenes, therefore episodes of the same scene should be loaded
consecutively. However, if too many consecutive episodes from same scene
are feed into RL model, the model will risk to overfit that scene.
Therefore it's better to load same scene consecutively and switch once a
number threshold is reached.
Currently supports the following features:
Cycling:
when all episodes are iterated, cycle back to start instead of throwing
StopIteration.
Cycling with shuffle:
when cycling back, shuffle episodes groups grouped by scene.
Group by scene:
episodes of same scene will be grouped and loaded consecutively.
Set max scene repeat:
set a number threshold on how many episodes from the same scene can be
loaded consecutively.
Sample episodes:
sample the specified number of episodes.
"""
def __init__(
self,
episodes: List[T],
cycle: bool = True,
shuffle: bool = False,
group_by_scene: bool = True,
max_scene_repeat_episodes: int = -1,
max_scene_repeat_steps: int = -1,
num_episode_sample: int = -1,
step_repetition_range: float = 0.2,
seed: int = None,
):
r"""..
:param episodes: list of episodes.
:param cycle: if :py:`True`, cycle back to first episodes when
StopIteration.
:param shuffle: if :py:`True`, shuffle scene groups when cycle. No
effect if cycle is set to :py:`False`. Will shuffle grouped scenes
if :p:`group_by_scene` is :py:`True`.
:param group_by_scene: if :py:`True`, group episodes from same scene.
:param max_scene_repeat_episodes: threshold of how many episodes from the same
scene can be loaded consecutively. :py:`-1` for no limit
:param max_scene_repeat_steps: threshold of how many steps from the same
scene can be taken consecutively. :py:`-1` for no limit
:param num_episode_sample: number of episodes to be sampled. :py:`-1`
for no sampling.
:param step_repetition_range: The maximum number of steps within each scene is
uniformly drawn from
[1 - step_repeat_range, 1 + step_repeat_range] * max_scene_repeat_steps
on each scene switch. This stops all workers from swapping scenes at
the same time
"""
if seed:
random.seed(seed)
np.random.seed(seed)
# sample episodes
if num_episode_sample >= 0:
episodes = np.random.choice(
episodes, num_episode_sample, replace=False
)
self.episodes = episodes
self.cycle = cycle
self.group_by_scene = group_by_scene
self.shuffle = shuffle
if shuffle:
random.shuffle(self.episodes)
if group_by_scene:
self.episodes = self._group_scenes(self.episodes)
self.max_scene_repetition_episodes = max_scene_repeat_episodes
self.max_scene_repetition_steps = max_scene_repeat_steps
self._rep_count = -1 # 0 corresponds to first episode already returned
self._step_count = 0
self._prev_scene_id = None
self._iterator = iter(self.episodes)
self.step_repetition_range = step_repetition_range
self._set_shuffle_intervals()
def __iter__(self):
return self
def __next__(self):
r"""The main logic for handling how episodes will be iterated.
:return: next episode.
"""
self._forced_scene_switch_if()
next_episode = next(self._iterator, None)
if next_episode is None:
if not self.cycle:
raise StopIteration
self._iterator = iter(self.episodes)
if self.shuffle:
self._shuffle()
next_episode = next(self._iterator)
if (
self._prev_scene_id != next_episode.scene_id
and self._prev_scene_id is not None
):
self._rep_count = 0
self._step_count = 0
self._prev_scene_id = next_episode.scene_id
return next_episode
def _forced_scene_switch(self) -> None:
r"""Internal method to switch the scene. Moves remaining episodes
from current scene to the end and switch to next scene episodes.
"""
grouped_episodes = [
list(g)
for k, g in groupby(self._iterator, key=lambda x: x.scene_id)
]
if len(grouped_episodes) > 1:
# Ensure we swap by moving the current group to the end
grouped_episodes = grouped_episodes[1:] + grouped_episodes[0:1]
self._iterator = iter(sum(grouped_episodes, []))
def _shuffle(self) -> None:
r"""Internal method that shuffles the remaining episodes.
If self.group_by_scene is true, then shuffle groups of scenes.
"""
assert self.shuffle
episodes = list(self._iterator)
random.shuffle(episodes)
if self.group_by_scene:
episodes = self._group_scenes(episodes)
self._iterator = iter(episodes)
def _group_scenes(self, episodes):
r"""Internal method that groups episodes by scene
Groups will be ordered by the order the first episode of a given
scene is in the list of episodes
So if the episodes list shuffled before calling this method,
the scenes will be in a random order
"""
assert self.group_by_scene
scene_sort_keys = {}
for e in episodes:
if e.scene_id not in scene_sort_keys:
scene_sort_keys[e.scene_id] = len(scene_sort_keys)
return sorted(episodes, key=lambda e: scene_sort_keys[e.scene_id])
def step_taken(self):
self._step_count += 1
@staticmethod
def _randomize_value(value, value_range):
return random.randint(
int(value * (1 - value_range)), int(value * (1 + value_range))
)
def _set_shuffle_intervals(self):
if self.max_scene_repetition_episodes > 0:
self._max_rep_episode = self.max_scene_repetition_episodes
else:
self._max_rep_episode = None
if self.max_scene_repetition_steps > 0:
self._max_rep_step = self._randomize_value(
self.max_scene_repetition_steps, self.step_repetition_range
)
else:
self._max_rep_step = None
def _forced_scene_switch_if(self):
do_switch = False
self._rep_count += 1
# Shuffle if a scene has been selected more than _max_rep_episode times in a row
if (
self._max_rep_episode is not None
and self._rep_count >= self._max_rep_episode
):
do_switch = True
# Shuffle if a scene has been used for more than _max_rep_step steps in a row
if (
self._max_rep_step is not None
and self._step_count >= self._max_rep_step
):
do_switch = True
if do_switch:
self._forced_scene_switch()
self._set_shuffle_intervals()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""Implements dataset functionality to be used ``habitat.EmbodiedTask``.
``habitat.core.dataset`` abstracts over a collection of
``habitat.core.Episode``. Each episode consists of a single instantiation
of a ``habitat.Agent`` inside ``habitat.Env``.
"""
import copy
import json
import os
import random
from itertools import groupby
from typing import (
Any,
Callable,
Dict,
Generic,
Iterator,
List,
Optional,
TypeVar,
)
import attr
import numpy as np
from habitat.config import Config
from habitat.core.utils import not_none_validator
ALL_SCENES_MASK = "*"
@attr.s(auto_attribs=True, kw_only=True)
class Episode:
r"""Base class for episode specification that includes initial position and
rotation of agent, scene id, episode.
:property episode_id: id of episode in the dataset, usually episode number.
:property scene_id: id of scene in dataset.
:property start_position: list of length 3 for cartesian coordinates
:py:`(x, y, z)`.
:property start_rotation: list of length 4 for (x, y, z, w) elements
of unit quaternion (versor) representing 3D agent orientation
(https://en.wikipedia.org/wiki/Versor). The rotation specifying the
agent's orientation is relative to the world coordinate axes.
This information is provided by a :ref:`Dataset` instance.
"""
episode_id: str = attr.ib(default=None, validator=not_none_validator)
scene_id: str = attr.ib(default=None, validator=not_none_validator)
start_position: List[float] = attr.ib(
default=None, validator=not_none_validator
)
start_rotation: List[float] = attr.ib(
default=None, validator=not_none_validator
)
info: Optional[Dict[str, str]] = None
_shortest_path_cache: Any = attr.ib(init=False, default=None)
def __getstate__(self):
return {
k: v
for k, v in self.__dict__.items()
if k not in {"_shortest_path_cache"}
}
def __setstate__(self, state):
self.__dict__.update(state)
self.__dict__["_shortest_path_cache"] = None
T = TypeVar("T", bound=Episode)
class Dataset(Generic[T]):
r"""Base class for dataset specification."""
episodes: List[T]
@staticmethod
def scene_from_scene_path(scene_path: str) -> str:
r"""Helper method to get the scene name from an episode.
:param scene_path: The path to the scene, assumes this is formatted
``/path/to/<scene_name>.<ext>``
:return: <scene_name> from the path
"""
return os.path.splitext(os.path.basename(scene_path))[0]
@classmethod
def get_scenes_to_load(cls, config: Config) -> List[str]:
r"""Returns a list of scene names that would be loaded with this dataset.
Useful for determing what scenes to split up among different workers.
:param config: The config for the dataset
:return: A list of scene names that would be loaded with the dataset
"""
assert cls.check_config_paths_exist(config)
dataset = cls(config)
return list(map(cls.scene_from_scene_path, dataset.scene_ids))
@classmethod
def build_content_scenes_filter(cls, config) -> Callable[[T], bool]:
r"""Returns a filter function that takes an episode and returns True if that
episode is valid under the CONTENT_SCENES feild of the provided config
"""
scenes_to_load = set(config.CONTENT_SCENES)
def _filter(ep: T) -> bool:
return (
ALL_SCENES_MASK in scenes_to_load
or cls.scene_from_scene_path(ep.scene_id) in scenes_to_load
)
return _filter
@property
def num_episodes(self) -> int:
r"""number of episodes in the dataset"""
return len(self.episodes)
@property
def scene_ids(self) -> List[str]:
r"""unique scene ids present in the dataset."""
return sorted({episode.scene_id for episode in self.episodes})
def get_scene_episodes(self, scene_id: str) -> List[T]:
r"""..
:param scene_id: id of scene in scene dataset.
:return: list of episodes for the :p:`scene_id`.
"""
return list(
filter(lambda x: x.scene_id == scene_id, iter(self.episodes))
)
def get_episodes(self, indexes: List[int]) -> List[T]:
r"""..
:param indexes: episode indices in dataset.
:return: list of episodes corresponding to indexes.
"""
return [self.episodes[episode_id] for episode_id in indexes]
def get_episode_iterator(self, *args: Any, **kwargs: Any) -> Iterator:
r"""Gets episode iterator with options. Options are specified in
:ref:`EpisodeIterator` documentation.
:param args: positional args for iterator constructor
:param kwargs: keyword args for iterator constructor
:return: episode iterator with specified behavior
To further customize iterator behavior for your :ref:`Dataset`
subclass, create a customized iterator class like
:ref:`EpisodeIterator` and override this method.
"""
return EpisodeIterator(self.episodes, *args, **kwargs)
def to_json(self) -> str:
class DatasetJSONEncoder(json.JSONEncoder):
def default(self, object):
if isinstance(object, np.ndarray):
return object.tolist()
return (
object.__getstate__()
if hasattr(object, "__getstate__")
else object.__dict__
)
result = DatasetJSONEncoder().encode(self)
return result
def from_json(
self, json_str: str, scenes_dir: Optional[str] = None
) -> None:
r"""Creates dataset from :p:`json_str`.
:param json_str: JSON string containing episodes information.
:param scenes_dir: directory containing graphical assets relevant
for episodes present in :p:`json_str`.
Directory containing relevant graphical assets of scenes is passed
through :p:`scenes_dir`.
"""
raise NotImplementedError
def filter_episodes(self, filter_fn: Callable[[T], bool]) -> "Dataset":
r"""Returns a new dataset with only the filtered episodes from the
original dataset.
:param filter_fn: function used to filter the episodes.
:return: the new dataset.
"""
new_episodes = []
for episode in self.episodes:
if filter_fn(episode):
new_episodes.append(episode)
new_dataset = copy.copy(self)
new_dataset.episodes = new_episodes
return new_dataset
def get_splits(
self,
num_splits: int,
episodes_per_split: Optional[int] = None,
remove_unused_episodes: bool = False,
collate_scene_ids: bool = True,
sort_by_episode_id: bool = False,
allow_uneven_splits: bool = False,
) -> List["Dataset"]:
r"""Returns a list of new datasets, each with a subset of the original
episodes.
:param num_splits: the number of splits to create.
:param episodes_per_split: if provided, each split will have up to this
many episodes. If it is not provided, each dataset will have
:py:`len(original_dataset.episodes) // num_splits` episodes. If
max_episodes_per_split is provided and is larger than this value,
it will be capped to this value.
:param remove_unused_episodes: once the splits are created, the extra
episodes will be destroyed from the original dataset. This saves
memory for large datasets.
:param collate_scene_ids: if true, episodes with the same scene id are
next to each other. This saves on overhead of switching between
scenes, but means multiple sequential episodes will be related to
each other because they will be in the same scene.
:param sort_by_episode_id: if true, sequences are sorted by their
episode ID in the returned splits.
:param allow_uneven_splits: if true, the last splits can be shorter
than the others. This is especially useful for splitting over
validation/test datasets in order to make sure that all episodes
are copied but none are duplicated.
:return: a list of new datasets, each with their own subset of
episodes.
All splits will have the same number of episodes, but no episodes will
be duplicated.
"""
if self.num_episodes < num_splits:
raise ValueError(
"Not enough episodes to create those many splits."
)
if episodes_per_split is not None:
if allow_uneven_splits:
raise ValueError(
"You probably don't want to specify allow_uneven_splits"
" and episodes_per_split."
)
if num_splits * episodes_per_split > self.num_episodes:
raise ValueError(
"Not enough episodes to create those many splits."
)
new_datasets = []
if episodes_per_split is not None:
stride = episodes_per_split
else:
stride = self.num_episodes // num_splits
split_lengths = [stride] * num_splits
if allow_uneven_splits:
episodes_left = self.num_episodes - stride * num_splits
split_lengths[:episodes_left] = [stride + 1] * episodes_left
assert sum(split_lengths) == self.num_episodes
num_episodes = sum(split_lengths)
rand_items = np.random.choice(
self.num_episodes, num_episodes, replace=False
)
if collate_scene_ids:
scene_ids = {}
for rand_ind in rand_items:
scene = self.episodes[rand_ind].scene_id
if scene not in scene_ids:
scene_ids[scene] = []
scene_ids[scene].append(rand_ind)
rand_items = []
list(map(rand_items.extend, scene_ids.values()))
ep_ind = 0
new_episodes = []
for nn in range(num_splits):
new_dataset = copy.copy(self) # Creates a shallow copy
new_dataset.episodes = []
new_datasets.append(new_dataset)
for _ii in range(split_lengths[nn]):
new_dataset.episodes.append(self.episodes[rand_items[ep_ind]])
ep_ind += 1
if sort_by_episode_id:
new_dataset.episodes.sort(key=lambda ep: ep.episode_id)
new_episodes.extend(new_dataset.episodes)
if remove_unused_episodes:
self.episodes = new_episodes
return new_datasets
class EpisodeIterator(Iterator):
r"""Episode Iterator class that gives options for how a list of episodes
should be iterated.
Some of those options are desirable for the internal simulator to get
higher performance. More context: simulator suffers overhead when switching
between scenes, therefore episodes of the same scene should be loaded
consecutively. However, if too many consecutive episodes from same scene
are feed into RL model, the model will risk to overfit that scene.
Therefore it's better to load same scene consecutively and switch once a
number threshold is reached.
Currently supports the following features:
Cycling:
when all episodes are iterated, cycle back to start instead of throwing
StopIteration.
Cycling with shuffle:
when cycling back, shuffle episodes groups grouped by scene.
Group by scene:
episodes of same scene will be grouped and loaded consecutively.
Set max scene repeat:
set a number threshold on how many episodes from the same scene can be
loaded consecutively.
Sample episodes:
sample the specified number of episodes.
"""
def __init__(
self,
episodes: List[T],
cycle: bool = True,
shuffle: bool = False,
group_by_scene: bool = True,
max_scene_repeat_episodes: int = -1,
max_scene_repeat_steps: int = -1,
num_episode_sample: int = -1,
step_repetition_range: float = 0.2,
seed: int = None,
):
r"""..
:param episodes: list of episodes.
:param cycle: if :py:`True`, cycle back to first episodes when
StopIteration.
:param shuffle: if :py:`True`, shuffle scene groups when cycle. No
effect if cycle is set to :py:`False`. Will shuffle grouped scenes
if :p:`group_by_scene` is :py:`True`.
:param group_by_scene: if :py:`True`, group episodes from same scene.
:param max_scene_repeat_episodes: threshold of how many episodes from the same
scene can be loaded consecutively. :py:`-1` for no limit
:param max_scene_repeat_steps: threshold of how many steps from the same
scene can be taken consecutively. :py:`-1` for no limit
:param num_episode_sample: number of episodes to be sampled. :py:`-1`
for no sampling.
:param step_repetition_range: The maximum number of steps within each scene is
uniformly drawn from
[1 - step_repeat_range, 1 + step_repeat_range] * max_scene_repeat_steps
on each scene switch. This stops all workers from swapping scenes at
the same time
"""
if seed:
random.seed(seed)
np.random.seed(seed)
# sample episodes
if num_episode_sample >= 0:
episodes = np.random.choice(
episodes, num_episode_sample, replace=False
)
self.episodes = episodes
self.cycle = cycle
self.group_by_scene = group_by_scene
self.shuffle = shuffle
if shuffle:
random.shuffle(self.episodes)
if group_by_scene:
self.episodes = self._group_scenes(self.episodes)
self.max_scene_repetition_episodes = max_scene_repeat_episodes
self.max_scene_repetition_steps = max_scene_repeat_steps
self._rep_count = -1 # 0 corresponds to first episode already returned
self._step_count = 0
self._prev_scene_id = None
self._iterator = iter(self.episodes)
self.step_repetition_range = step_repetition_range
self._set_shuffle_intervals()
def __iter__(self):
return self
def __next__(self):
r"""The main logic for handling how episodes will be iterated.
:return: next episode.
"""
self._forced_scene_switch_if()
next_episode = next(self._iterator, None)
if next_episode is None:
if not self.cycle:
raise StopIteration
self._iterator = iter(self.episodes)
if self.shuffle:
self._shuffle()
next_episode = next(self._iterator)
if (
self._prev_scene_id != next_episode.scene_id
and self._prev_scene_id is not None
):
self._rep_count = 0
self._step_count = 0
self._prev_scene_id = next_episode.scene_id
return next_episode
def _forced_scene_switch(self) -> None:
r"""Internal method to switch the scene. Moves remaining episodes
from current scene to the end and switch to next scene episodes.
"""
grouped_episodes = [
list(g)
for k, g in groupby(self._iterator, key=lambda x: x.scene_id)
]
if len(grouped_episodes) > 1:
# Ensure we swap by moving the current group to the end
grouped_episodes = grouped_episodes[1:] + grouped_episodes[0:1]
self._iterator = iter(sum(grouped_episodes, []))
def _shuffle(self) -> None:
r"""Internal method that shuffles the remaining episodes.
If self.group_by_scene is true, then shuffle groups of scenes.
"""
assert self.shuffle
episodes = list(self._iterator)
random.shuffle(episodes)
if self.group_by_scene:
episodes = self._group_scenes(episodes)
self._iterator = iter(episodes)
def _group_scenes(self, episodes):
r"""Internal method that groups episodes by scene
Groups will be ordered by the order the first episode of a given
scene is in the list of episodes
So if the episodes list shuffled before calling this method,
the scenes will be in a random order
"""
assert self.group_by_scene
scene_sort_keys = {}
for e in episodes:
if e.scene_id not in scene_sort_keys:
scene_sort_keys[e.scene_id] = len(scene_sort_keys)
return sorted(episodes, key=lambda e: scene_sort_keys[e.scene_id])
def step_taken(self):
self._step_count += 1
@staticmethod
def _randomize_value(value, value_range):
return random.randint(
int(value * (1 - value_range)), int(value * (1 + value_range))
)
def _set_shuffle_intervals(self):
if self.max_scene_repetition_episodes > 0:
self._max_rep_episode = self.max_scene_repetition_episodes
else:
self._max_rep_episode = None
if self.max_scene_repetition_steps > 0:
self._max_rep_step = self._randomize_value(
self.max_scene_repetition_steps, self.step_repetition_range
)
else:
self._max_rep_step = None
def _forced_scene_switch_if(self):
do_switch = False
self._rep_count += 1
# Shuffle if a scene has been selected more than _max_rep_episode times in a row
if (
self._max_rep_episode is not None
and self._rep_count >= self._max_rep_episode
):
do_switch = True
# Shuffle if a scene has been used for more than _max_rep_step steps in a row
if (
self._max_rep_step is not None
and self._step_count >= self._max_rep_step
):
do_switch = True
if do_switch:
self._forced_scene_switch()
self._set_shuffle_intervals()
|
en
| 0.840428
|
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. Implements dataset functionality to be used ``habitat.EmbodiedTask``. ``habitat.core.dataset`` abstracts over a collection of ``habitat.core.Episode``. Each episode consists of a single instantiation of a ``habitat.Agent`` inside ``habitat.Env``. Base class for episode specification that includes initial position and rotation of agent, scene id, episode. :property episode_id: id of episode in the dataset, usually episode number. :property scene_id: id of scene in dataset. :property start_position: list of length 3 for cartesian coordinates :py:`(x, y, z)`. :property start_rotation: list of length 4 for (x, y, z, w) elements of unit quaternion (versor) representing 3D agent orientation (https://en.wikipedia.org/wiki/Versor). The rotation specifying the agent's orientation is relative to the world coordinate axes. This information is provided by a :ref:`Dataset` instance. Base class for dataset specification. Helper method to get the scene name from an episode. :param scene_path: The path to the scene, assumes this is formatted ``/path/to/<scene_name>.<ext>`` :return: <scene_name> from the path Returns a list of scene names that would be loaded with this dataset. Useful for determing what scenes to split up among different workers. :param config: The config for the dataset :return: A list of scene names that would be loaded with the dataset Returns a filter function that takes an episode and returns True if that episode is valid under the CONTENT_SCENES feild of the provided config number of episodes in the dataset unique scene ids present in the dataset. .. :param scene_id: id of scene in scene dataset. :return: list of episodes for the :p:`scene_id`. .. :param indexes: episode indices in dataset. :return: list of episodes corresponding to indexes. Gets episode iterator with options. Options are specified in :ref:`EpisodeIterator` documentation. :param args: positional args for iterator constructor :param kwargs: keyword args for iterator constructor :return: episode iterator with specified behavior To further customize iterator behavior for your :ref:`Dataset` subclass, create a customized iterator class like :ref:`EpisodeIterator` and override this method. Creates dataset from :p:`json_str`. :param json_str: JSON string containing episodes information. :param scenes_dir: directory containing graphical assets relevant for episodes present in :p:`json_str`. Directory containing relevant graphical assets of scenes is passed through :p:`scenes_dir`. Returns a new dataset with only the filtered episodes from the original dataset. :param filter_fn: function used to filter the episodes. :return: the new dataset. Returns a list of new datasets, each with a subset of the original episodes. :param num_splits: the number of splits to create. :param episodes_per_split: if provided, each split will have up to this many episodes. If it is not provided, each dataset will have :py:`len(original_dataset.episodes) // num_splits` episodes. If max_episodes_per_split is provided and is larger than this value, it will be capped to this value. :param remove_unused_episodes: once the splits are created, the extra episodes will be destroyed from the original dataset. This saves memory for large datasets. :param collate_scene_ids: if true, episodes with the same scene id are next to each other. This saves on overhead of switching between scenes, but means multiple sequential episodes will be related to each other because they will be in the same scene. :param sort_by_episode_id: if true, sequences are sorted by their episode ID in the returned splits. :param allow_uneven_splits: if true, the last splits can be shorter than the others. This is especially useful for splitting over validation/test datasets in order to make sure that all episodes are copied but none are duplicated. :return: a list of new datasets, each with their own subset of episodes. All splits will have the same number of episodes, but no episodes will be duplicated. # Creates a shallow copy Episode Iterator class that gives options for how a list of episodes should be iterated. Some of those options are desirable for the internal simulator to get higher performance. More context: simulator suffers overhead when switching between scenes, therefore episodes of the same scene should be loaded consecutively. However, if too many consecutive episodes from same scene are feed into RL model, the model will risk to overfit that scene. Therefore it's better to load same scene consecutively and switch once a number threshold is reached. Currently supports the following features: Cycling: when all episodes are iterated, cycle back to start instead of throwing StopIteration. Cycling with shuffle: when cycling back, shuffle episodes groups grouped by scene. Group by scene: episodes of same scene will be grouped and loaded consecutively. Set max scene repeat: set a number threshold on how many episodes from the same scene can be loaded consecutively. Sample episodes: sample the specified number of episodes. .. :param episodes: list of episodes. :param cycle: if :py:`True`, cycle back to first episodes when StopIteration. :param shuffle: if :py:`True`, shuffle scene groups when cycle. No effect if cycle is set to :py:`False`. Will shuffle grouped scenes if :p:`group_by_scene` is :py:`True`. :param group_by_scene: if :py:`True`, group episodes from same scene. :param max_scene_repeat_episodes: threshold of how many episodes from the same scene can be loaded consecutively. :py:`-1` for no limit :param max_scene_repeat_steps: threshold of how many steps from the same scene can be taken consecutively. :py:`-1` for no limit :param num_episode_sample: number of episodes to be sampled. :py:`-1` for no sampling. :param step_repetition_range: The maximum number of steps within each scene is uniformly drawn from [1 - step_repeat_range, 1 + step_repeat_range] * max_scene_repeat_steps on each scene switch. This stops all workers from swapping scenes at the same time # sample episodes # 0 corresponds to first episode already returned The main logic for handling how episodes will be iterated. :return: next episode. Internal method to switch the scene. Moves remaining episodes from current scene to the end and switch to next scene episodes. # Ensure we swap by moving the current group to the end Internal method that shuffles the remaining episodes. If self.group_by_scene is true, then shuffle groups of scenes. Internal method that groups episodes by scene Groups will be ordered by the order the first episode of a given scene is in the list of episodes So if the episodes list shuffled before calling this method, the scenes will be in a random order # Shuffle if a scene has been selected more than _max_rep_episode times in a row # Shuffle if a scene has been used for more than _max_rep_step steps in a row
| 2.677088
| 3
|
function_scheduling_distributed_framework/publishers/rocketmq_publisher.py
|
lokibin1010/distributed_framework
| 1
|
6627472
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# @Author : ydf
# @Time : 2020/7/9 0008 12:12
import time
from function_scheduling_distributed_framework import frame_config
from function_scheduling_distributed_framework.publishers.base_publisher import AbstractPublisher
class RocketmqPublisher(AbstractPublisher, ):
group_id__rocketmq_producer = {}
def custom_init(self):
try:
from rocketmq.client import Producer
except Exception as e:
# print(traceback.format_exc())
raise ImportError(f'rocketmq包 只支持linux和mac {str(e)}')
group_id = f'g-{self._queue_name}'
if group_id not in self.__class__.group_id__rocketmq_producer: # 同一个进程中创建多个同组消费者会报错。
producer = Producer(group_id)
producer.set_namesrv_addr(frame_config.ROCKETMQ_NAMESRV_ADDR)
producer.start()
self.__class__.group_id__rocketmq_producer[group_id] = producer
else:
producer = self.__class__.group_id__rocketmq_producer[group_id]
self._producer = producer
def concrete_realization_of_publish(self, msg):
try:
from rocketmq.client import Message
except Exception as e:
# print(traceback.format_exc())
raise ImportError(f'rocketmq包 只支持linux和mac {str(e)}')
rocket_msg = Message(self._queue_name)
rocket_msg.set_keys(msg) # 利于检索
# rocket_msg.set_tags('XXX')
rocket_msg.set_body(msg)
# print(msg)
self._producer.send_sync(rocket_msg)
def clear(self):
self.logger.error('清除队列,python版的rocket包太弱了,没有方法设置偏移量或者删除主题。java才能做到')
def get_message_count(self):
if time.time() - getattr(self, '_last_warning_count', 0) > 300:
setattr(self, '_last_warning_count', time.time())
self.logger.warning('获取消息数量,python版的rocket包太弱了,没找到方法。java才能做到。')
return 0
def close(self):
self._producer.shutdown()
|
# -*- coding: utf-8 -*-
# @Author : ydf
# @Time : 2020/7/9 0008 12:12
import time
from function_scheduling_distributed_framework import frame_config
from function_scheduling_distributed_framework.publishers.base_publisher import AbstractPublisher
class RocketmqPublisher(AbstractPublisher, ):
group_id__rocketmq_producer = {}
def custom_init(self):
try:
from rocketmq.client import Producer
except Exception as e:
# print(traceback.format_exc())
raise ImportError(f'rocketmq包 只支持linux和mac {str(e)}')
group_id = f'g-{self._queue_name}'
if group_id not in self.__class__.group_id__rocketmq_producer: # 同一个进程中创建多个同组消费者会报错。
producer = Producer(group_id)
producer.set_namesrv_addr(frame_config.ROCKETMQ_NAMESRV_ADDR)
producer.start()
self.__class__.group_id__rocketmq_producer[group_id] = producer
else:
producer = self.__class__.group_id__rocketmq_producer[group_id]
self._producer = producer
def concrete_realization_of_publish(self, msg):
try:
from rocketmq.client import Message
except Exception as e:
# print(traceback.format_exc())
raise ImportError(f'rocketmq包 只支持linux和mac {str(e)}')
rocket_msg = Message(self._queue_name)
rocket_msg.set_keys(msg) # 利于检索
# rocket_msg.set_tags('XXX')
rocket_msg.set_body(msg)
# print(msg)
self._producer.send_sync(rocket_msg)
def clear(self):
self.logger.error('清除队列,python版的rocket包太弱了,没有方法设置偏移量或者删除主题。java才能做到')
def get_message_count(self):
if time.time() - getattr(self, '_last_warning_count', 0) > 300:
setattr(self, '_last_warning_count', time.time())
self.logger.warning('获取消息数量,python版的rocket包太弱了,没找到方法。java才能做到。')
return 0
def close(self):
self._producer.shutdown()
|
en
| 0.229933
|
# -*- coding: utf-8 -*- # @Author : ydf # @Time : 2020/7/9 0008 12:12 # print(traceback.format_exc()) # 同一个进程中创建多个同组消费者会报错。 # print(traceback.format_exc()) # 利于检索 # rocket_msg.set_tags('XXX') # print(msg)
| 2.191713
| 2
|
models/models.py
|
HP-bkeys/odoo-s3-storage
| 0
|
6627473
|
<filename>models/models.py
# -*- coding: utf-8 -*-
"""
s3-storage.models
~~~~~~~~~~~~~~~~~
Use s3 as file storage mechanism
:copyright: (c) 2017 by brolycjw.
:license: MIT License, see LICENSE for more details.
"""
import hashlib
from odoo import models
import s3_helper
class S3Attachment(models.Model):
"""Extends ir.attachment to implement the S3 storage engine
"""
_inherit = "ir.attachment"
def _connect_to_S3_bucket(self, s3, bucket_name):
s3_bucket = s3.Bucket(bucket_name)
exists = s3_helper.bucket_exists(s3, bucket_name)
if not exists:
s3_bucket = s3.create_bucket(Bucket=bucket_name)
return s3_bucket
def _file_read(self, fname, bin_size=False):
storage = self._storage()
if storage[:5] == 's3://':
access_key_id, secret_key, bucket_name, do_space_url = s3_helper.parse_bucket_url(
storage)
s3 = s3_helper.get_resource(
access_key_id, secret_key, do_space_url)
s3_bucket = self._connect_to_S3_bucket(s3, bucket_name)
file_exists = s3_helper.object_exists(s3, s3_bucket.name, fname)
if not file_exists:
# Some old files (prior to the installation of odoo-S3) may
# still be stored in the file system even though
# ir_attachment.location is configured to use S3
try:
read = super(S3Attachment, self)._file_read(
fname, bin_size=False)
except Exception:
# Could not find the file in the file system either.
return False
else:
s3_key = s3.Object(s3_bucket.name, fname)
read = s3_key.get()['Body'].read().encode('base64')
else:
read = super(S3Attachment, self)._file_read(fname, bin_size=False)
return read
def _file_write(self, value, checksum):
storage = self._storage()
if storage[:5] == 's3://':
access_key_id, secret_key, bucket_name, do_space_url = s3_helper.parse_bucket_url(
storage)
s3 = s3_helper.get_resource(
access_key_id, secret_key, do_space_url)
s3_bucket = self._connect_to_S3_bucket(s3, bucket_name)
bin_value = value.decode('base64')
fname = hashlib.sha1(bin_value).hexdigest()
s3.Object(s3_bucket.name, fname).put(Body=bin_value)
else:
fname = super(S3Attachment, self)._file_write(value, checksum)
return fname
|
<filename>models/models.py
# -*- coding: utf-8 -*-
"""
s3-storage.models
~~~~~~~~~~~~~~~~~
Use s3 as file storage mechanism
:copyright: (c) 2017 by brolycjw.
:license: MIT License, see LICENSE for more details.
"""
import hashlib
from odoo import models
import s3_helper
class S3Attachment(models.Model):
"""Extends ir.attachment to implement the S3 storage engine
"""
_inherit = "ir.attachment"
def _connect_to_S3_bucket(self, s3, bucket_name):
s3_bucket = s3.Bucket(bucket_name)
exists = s3_helper.bucket_exists(s3, bucket_name)
if not exists:
s3_bucket = s3.create_bucket(Bucket=bucket_name)
return s3_bucket
def _file_read(self, fname, bin_size=False):
storage = self._storage()
if storage[:5] == 's3://':
access_key_id, secret_key, bucket_name, do_space_url = s3_helper.parse_bucket_url(
storage)
s3 = s3_helper.get_resource(
access_key_id, secret_key, do_space_url)
s3_bucket = self._connect_to_S3_bucket(s3, bucket_name)
file_exists = s3_helper.object_exists(s3, s3_bucket.name, fname)
if not file_exists:
# Some old files (prior to the installation of odoo-S3) may
# still be stored in the file system even though
# ir_attachment.location is configured to use S3
try:
read = super(S3Attachment, self)._file_read(
fname, bin_size=False)
except Exception:
# Could not find the file in the file system either.
return False
else:
s3_key = s3.Object(s3_bucket.name, fname)
read = s3_key.get()['Body'].read().encode('base64')
else:
read = super(S3Attachment, self)._file_read(fname, bin_size=False)
return read
def _file_write(self, value, checksum):
storage = self._storage()
if storage[:5] == 's3://':
access_key_id, secret_key, bucket_name, do_space_url = s3_helper.parse_bucket_url(
storage)
s3 = s3_helper.get_resource(
access_key_id, secret_key, do_space_url)
s3_bucket = self._connect_to_S3_bucket(s3, bucket_name)
bin_value = value.decode('base64')
fname = hashlib.sha1(bin_value).hexdigest()
s3.Object(s3_bucket.name, fname).put(Body=bin_value)
else:
fname = super(S3Attachment, self)._file_write(value, checksum)
return fname
|
en
| 0.855522
|
# -*- coding: utf-8 -*- s3-storage.models ~~~~~~~~~~~~~~~~~ Use s3 as file storage mechanism :copyright: (c) 2017 by brolycjw. :license: MIT License, see LICENSE for more details. Extends ir.attachment to implement the S3 storage engine # Some old files (prior to the installation of odoo-S3) may # still be stored in the file system even though # ir_attachment.location is configured to use S3 # Could not find the file in the file system either.
| 2.239696
| 2
|
datasets/flow_datasets.py
|
Wassouli/Projet-prat
| 0
|
6627474
|
<reponame>Wassouli/Projet-prat
import imageio
import numpy as np
import random
from path import Path
from abc import abstractmethod, ABCMeta
from torch.utils.data import Dataset
from utils.flow_utils import load_flow
from PIL import Image
import os, sys
import glob
import matplotlib.pyplot as plt
from PIL import Image
import os, sys
import imageio
from datetime import timedelta, date
import cv2
from google.colab.patches import cv2_imshow
class ImgSeqDataset(Dataset, metaclass=ABCMeta):
def __init__(self, root, n_frames, input_transform=None, co_transform=None,
target_transform=None, ap_transform=None):
self.root = Path(root)
#print("gggggg",self.root)
self.n_frames = n_frames
self.input_transform = input_transform
self.co_transform = co_transform
self.ap_transform = ap_transform
self.target_transform = target_transform
self.samples = self.collect_samples()
@abstractmethod
def collect_samples(self):
pass
def _load_sample(self, s):
images = s['imgs']
images = [imageio.imread(self.root / p).astype(np.float32) for p in images]
return images
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
images= self._load_sample(self.samples[idx])
if self.co_transform is not None:
# In unsupervised learning, there is no need to change target with image
images = self.co_transform(images)
if self.input_transform is not None:
images = [self.input_transform(i) for i in images]
data = {'img{}'.format(i + 1): p for i, p in enumerate(images)}
if self.ap_transform is not None:
imgs_ph = self.ap_transform(
[data['img{}'.format(i + 1)].clone() for i in range(self.n_frames)])
for i in range(self.n_frames):
data['img{}_ph'.format(i + 1)] = imgs_ph[i]
"""
if self.target_transform is not None:
for key in self.target_transform.keys():
target[key] = self.target_transform[key](target[key])
"""
return data
class SintelRaw(ImgSeqDataset):
def __init__(self, root, n_frames=2, transform=None, co_transform=None):
super(SintelRaw, self).__init__(root, n_frames, input_transform=transform,
co_transform=co_transform)
def collect_samples(self):
#txtfiles = []
#for file in glob.glob("*.txt"):
# txtfiles.append(file)
path = '/content/drive/MyDrive/data1/'
dirs = os.listdir( path )
scene_list = self.root
#print("jjjjjjjjjjjjjjjjjjjjjjj",scene_list)
samples = []
img_list =[]
images_names=[]
dirListing = os.listdir("/content/drive/MyDrive/data1/")
for item in dirListing:
if ".png" in item:
images_names.append(item)
def daterange(start_date, end_date):
for n in range(int((end_date - start_date).days)):
yield start_date + timedelta(n)
start_date = date(2006, 12, 27)
end_date = date(2010, 4, 6)
item=[]
for single_date in daterange(start_date, end_date):
t="NATL_AN_"+single_date.strftime("%Y-%m-%d")+".png"
if ( t in images_names):
img_list.append("/content/drive/MyDrive/data1/"+t)
for st in range(0, len(img_list) - self.n_frames + 1):
seq = img_list[st:st + self.n_frames]
#print(seq)
sample = {'imgs': [i for i in seq]}
samples.append(sample)
print(samples)
return samples
class Sintel(ImgSeqDataset):
def __init__(self, root, n_frames=2, type='clean', split='training',
subsplit='trainval', with_flow=True, ap_transform=None,
transform=None, target_transform=None, co_transform=None, ):
self.dataset_type = type
self.with_flow = with_flow
#print("hna")
self.split = split
self.subsplit = subsplit
self.training_scene = ['alley_1', 'ambush_4', 'ambush_6', 'ambush_7', 'bamboo_2',
'bandage_2', 'cave_2', 'market_2', 'market_5', 'shaman_2',
'sleeping_2', 'temple_3'] # Unofficial train-val split
root = Path(root) / split
super(Sintel, self).__init__(root, n_frames, input_transform=transform,
target_transform=target_transform,
co_transform=co_transform, ap_transform=ap_transform)
def collect_samples(self):
img_dir = self.root / Path(self.dataset_type)
flow_dir = self.root / 'flow'
assert img_dir.isdir() and flow_dir.isdir()
samples = []
for flow_map in sorted((self.root / flow_dir).glob('*/*.flo')):
info = flow_map.splitall()
scene, filename = info[-2:]
fid = int(filename[-8:-4])
if self.split == 'training' and self.subsplit != 'trainval':
if self.subsplit == 'train' and scene not in self.training_scene:
continue
if self.subsplit == 'val' and scene in self.training_scene:
continue
s = {'imgs': [img_dir / scene / 'frame_{:04d}.png'.format(fid + i) for i in
range(self.n_frames)]}
try:
assert all([p.isfile() for p in s['imgs']])
if self.with_flow:
if self.n_frames == 3:
# for img1 img2 img3, only flow_23 will be evaluated
s['flow'] = flow_dir / scene / 'frame_{:04d}.flo'.format(fid + 1)
elif self.n_frames == 2:
# for img1 img2, flow_12 will be evaluated
s['flow'] = flow_dir / scene / 'frame_{:04d}.flo'.format(fid)
else:
raise NotImplementedError(
'n_frames {} with flow or mask'.format(self.n_frames))
if self.with_flow:
assert s['flow'].isfile()
except AssertionError:
print('Incomplete sample for: {}'.format(s['imgs'][0]))
continue
samples.append(s)
return samples
class KITTIRawFile(ImgSeqDataset):
def __init__(self, root, sp_file, n_frames=2, ap_transform=None,
transform=None, target_transform=None, co_transform=None):
self.sp_file = sp_file
super(KITTIRawFile, self).__init__(root, n_frames,
input_transform=transform,
target_transform=target_transform,
co_transform=co_transform,
ap_transform=ap_transform)
def collect_samples(self):
samples = []
with open(self.sp_file, 'r') as f:
for line in f.readlines():
sp = line.split()
s = {'imgs': [sp[i] for i in range(self.n_frames)]}
samples.append(s)
return samples
class KITTIFlowMV(ImgSeqDataset):
"""
This dataset is used for unsupervised training only
"""
def __init__(self, root, n_frames=2,
transform=None, co_transform=None, ap_transform=None, ):
super(KITTIFlowMV, self).__init__(root, n_frames,
input_transform=transform,
co_transform=co_transform,
ap_transform=ap_transform)
def collect_samples(self):
flow_occ_dir = 'flow_' + 'occ'
assert (self.root / flow_occ_dir).isdir()
img_l_dir, img_r_dir = 'image_2', 'image_3'
assert (self.root / img_l_dir).isdir() and (self.root / img_r_dir).isdir()
samples = []
for flow_map in sorted((self.root / flow_occ_dir).glob('*.png')):
flow_map = flow_map.basename()
root_filename = flow_map[:-7]
for img_dir in [img_l_dir, img_r_dir]:
img_list = (self.root / img_dir).files('*{}*.png'.format(root_filename))
img_list.sort()
for st in range(0, len(img_list) - self.n_frames + 1):
seq = img_list[st:st + self.n_frames]
sample = {}
sample['imgs'] = []
for i, file in enumerate(seq):
frame_id = int(file[-6:-4])
if 12 >= frame_id >= 9:
break
sample['imgs'].append(self.root.relpathto(file))
if len(sample['imgs']) == self.n_frames:
samples.append(sample)
return samples
class KITTIFlow(ImgSeqDataset):
"""
This dataset is used for validation only, so all files about target are stored as
file filepath and there is no transform about target.
"""
def __init__(self, root, n_frames=2, transform=None):
super(KITTIFlow, self).__init__(root, n_frames, input_transform=transform)
def __getitem__(self, idx):
s = self.samples[idx]
# img 1 2 for 2 frames, img 0 1 2 for 3 frames.
st = 1 if self.n_frames == 2 else 0
ed = st + self.n_frames
imgs = [s['img{}'.format(i)] for i in range(st, ed)]
inputs = [imageio.imread(self.root / p).astype(np.float32) for p in imgs]
raw_size = inputs[0].shape[:2]
data = {
'flow_occ': self.root / s['flow_occ'],
'flow_noc': self.root / s['flow_noc'],
}
data.update({ # for test set
'im_shape': raw_size,
'img1_path': self.root / s['img1'],
})
if self.input_transform is not None:
inputs = [self.input_transform(i) for i in inputs]
data.update({'img{}'.format(i + 1): inputs[i] for i in range(self.n_frames)})
return data
def collect_samples(self):
'''Will search in training folder for folders 'flow_noc' or 'flow_occ'
and 'colored_0' (KITTI 2012) or 'image_2' (KITTI 2015) '''
flow_occ_dir = 'flow_' + 'occ'
flow_noc_dir = 'flow_' + 'noc'
assert (self.root / flow_occ_dir).isdir()
img_dir = 'image_2'
assert (self.root / img_dir).isdir()
samples = []
for flow_map in sorted((self.root / flow_occ_dir).glob('*.png')):
flow_map = flow_map.basename()
root_filename = flow_map[:-7]
flow_occ_map = flow_occ_dir + '/' + flow_map
flow_noc_map = flow_noc_dir + '/' + flow_map
s = {'flow_occ': flow_occ_map, 'flow_noc': flow_noc_map}
img1 = img_dir + '/' + root_filename + '_10.png'
img2 = img_dir + '/' + root_filename + '_11.png'
assert (self.root / img1).isfile() and (self.root / img2).isfile()
s.update({'img1': img1, 'img2': img2})
if self.n_frames == 3:
img0 = img_dir + '/' + root_filename + '_09.png'
assert (self.root / img0).isfile()
s.update({'img0': img0})
samples.append(s)
return samples
|
import imageio
import numpy as np
import random
from path import Path
from abc import abstractmethod, ABCMeta
from torch.utils.data import Dataset
from utils.flow_utils import load_flow
from PIL import Image
import os, sys
import glob
import matplotlib.pyplot as plt
from PIL import Image
import os, sys
import imageio
from datetime import timedelta, date
import cv2
from google.colab.patches import cv2_imshow
class ImgSeqDataset(Dataset, metaclass=ABCMeta):
def __init__(self, root, n_frames, input_transform=None, co_transform=None,
target_transform=None, ap_transform=None):
self.root = Path(root)
#print("gggggg",self.root)
self.n_frames = n_frames
self.input_transform = input_transform
self.co_transform = co_transform
self.ap_transform = ap_transform
self.target_transform = target_transform
self.samples = self.collect_samples()
@abstractmethod
def collect_samples(self):
pass
def _load_sample(self, s):
images = s['imgs']
images = [imageio.imread(self.root / p).astype(np.float32) for p in images]
return images
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
images= self._load_sample(self.samples[idx])
if self.co_transform is not None:
# In unsupervised learning, there is no need to change target with image
images = self.co_transform(images)
if self.input_transform is not None:
images = [self.input_transform(i) for i in images]
data = {'img{}'.format(i + 1): p for i, p in enumerate(images)}
if self.ap_transform is not None:
imgs_ph = self.ap_transform(
[data['img{}'.format(i + 1)].clone() for i in range(self.n_frames)])
for i in range(self.n_frames):
data['img{}_ph'.format(i + 1)] = imgs_ph[i]
"""
if self.target_transform is not None:
for key in self.target_transform.keys():
target[key] = self.target_transform[key](target[key])
"""
return data
class SintelRaw(ImgSeqDataset):
def __init__(self, root, n_frames=2, transform=None, co_transform=None):
super(SintelRaw, self).__init__(root, n_frames, input_transform=transform,
co_transform=co_transform)
def collect_samples(self):
#txtfiles = []
#for file in glob.glob("*.txt"):
# txtfiles.append(file)
path = '/content/drive/MyDrive/data1/'
dirs = os.listdir( path )
scene_list = self.root
#print("jjjjjjjjjjjjjjjjjjjjjjj",scene_list)
samples = []
img_list =[]
images_names=[]
dirListing = os.listdir("/content/drive/MyDrive/data1/")
for item in dirListing:
if ".png" in item:
images_names.append(item)
def daterange(start_date, end_date):
for n in range(int((end_date - start_date).days)):
yield start_date + timedelta(n)
start_date = date(2006, 12, 27)
end_date = date(2010, 4, 6)
item=[]
for single_date in daterange(start_date, end_date):
t="NATL_AN_"+single_date.strftime("%Y-%m-%d")+".png"
if ( t in images_names):
img_list.append("/content/drive/MyDrive/data1/"+t)
for st in range(0, len(img_list) - self.n_frames + 1):
seq = img_list[st:st + self.n_frames]
#print(seq)
sample = {'imgs': [i for i in seq]}
samples.append(sample)
print(samples)
return samples
class Sintel(ImgSeqDataset):
def __init__(self, root, n_frames=2, type='clean', split='training',
subsplit='trainval', with_flow=True, ap_transform=None,
transform=None, target_transform=None, co_transform=None, ):
self.dataset_type = type
self.with_flow = with_flow
#print("hna")
self.split = split
self.subsplit = subsplit
self.training_scene = ['alley_1', 'ambush_4', 'ambush_6', 'ambush_7', 'bamboo_2',
'bandage_2', 'cave_2', 'market_2', 'market_5', 'shaman_2',
'sleeping_2', 'temple_3'] # Unofficial train-val split
root = Path(root) / split
super(Sintel, self).__init__(root, n_frames, input_transform=transform,
target_transform=target_transform,
co_transform=co_transform, ap_transform=ap_transform)
def collect_samples(self):
img_dir = self.root / Path(self.dataset_type)
flow_dir = self.root / 'flow'
assert img_dir.isdir() and flow_dir.isdir()
samples = []
for flow_map in sorted((self.root / flow_dir).glob('*/*.flo')):
info = flow_map.splitall()
scene, filename = info[-2:]
fid = int(filename[-8:-4])
if self.split == 'training' and self.subsplit != 'trainval':
if self.subsplit == 'train' and scene not in self.training_scene:
continue
if self.subsplit == 'val' and scene in self.training_scene:
continue
s = {'imgs': [img_dir / scene / 'frame_{:04d}.png'.format(fid + i) for i in
range(self.n_frames)]}
try:
assert all([p.isfile() for p in s['imgs']])
if self.with_flow:
if self.n_frames == 3:
# for img1 img2 img3, only flow_23 will be evaluated
s['flow'] = flow_dir / scene / 'frame_{:04d}.flo'.format(fid + 1)
elif self.n_frames == 2:
# for img1 img2, flow_12 will be evaluated
s['flow'] = flow_dir / scene / 'frame_{:04d}.flo'.format(fid)
else:
raise NotImplementedError(
'n_frames {} with flow or mask'.format(self.n_frames))
if self.with_flow:
assert s['flow'].isfile()
except AssertionError:
print('Incomplete sample for: {}'.format(s['imgs'][0]))
continue
samples.append(s)
return samples
class KITTIRawFile(ImgSeqDataset):
def __init__(self, root, sp_file, n_frames=2, ap_transform=None,
transform=None, target_transform=None, co_transform=None):
self.sp_file = sp_file
super(KITTIRawFile, self).__init__(root, n_frames,
input_transform=transform,
target_transform=target_transform,
co_transform=co_transform,
ap_transform=ap_transform)
def collect_samples(self):
samples = []
with open(self.sp_file, 'r') as f:
for line in f.readlines():
sp = line.split()
s = {'imgs': [sp[i] for i in range(self.n_frames)]}
samples.append(s)
return samples
class KITTIFlowMV(ImgSeqDataset):
"""
This dataset is used for unsupervised training only
"""
def __init__(self, root, n_frames=2,
transform=None, co_transform=None, ap_transform=None, ):
super(KITTIFlowMV, self).__init__(root, n_frames,
input_transform=transform,
co_transform=co_transform,
ap_transform=ap_transform)
def collect_samples(self):
flow_occ_dir = 'flow_' + 'occ'
assert (self.root / flow_occ_dir).isdir()
img_l_dir, img_r_dir = 'image_2', 'image_3'
assert (self.root / img_l_dir).isdir() and (self.root / img_r_dir).isdir()
samples = []
for flow_map in sorted((self.root / flow_occ_dir).glob('*.png')):
flow_map = flow_map.basename()
root_filename = flow_map[:-7]
for img_dir in [img_l_dir, img_r_dir]:
img_list = (self.root / img_dir).files('*{}*.png'.format(root_filename))
img_list.sort()
for st in range(0, len(img_list) - self.n_frames + 1):
seq = img_list[st:st + self.n_frames]
sample = {}
sample['imgs'] = []
for i, file in enumerate(seq):
frame_id = int(file[-6:-4])
if 12 >= frame_id >= 9:
break
sample['imgs'].append(self.root.relpathto(file))
if len(sample['imgs']) == self.n_frames:
samples.append(sample)
return samples
class KITTIFlow(ImgSeqDataset):
"""
This dataset is used for validation only, so all files about target are stored as
file filepath and there is no transform about target.
"""
def __init__(self, root, n_frames=2, transform=None):
super(KITTIFlow, self).__init__(root, n_frames, input_transform=transform)
def __getitem__(self, idx):
s = self.samples[idx]
# img 1 2 for 2 frames, img 0 1 2 for 3 frames.
st = 1 if self.n_frames == 2 else 0
ed = st + self.n_frames
imgs = [s['img{}'.format(i)] for i in range(st, ed)]
inputs = [imageio.imread(self.root / p).astype(np.float32) for p in imgs]
raw_size = inputs[0].shape[:2]
data = {
'flow_occ': self.root / s['flow_occ'],
'flow_noc': self.root / s['flow_noc'],
}
data.update({ # for test set
'im_shape': raw_size,
'img1_path': self.root / s['img1'],
})
if self.input_transform is not None:
inputs = [self.input_transform(i) for i in inputs]
data.update({'img{}'.format(i + 1): inputs[i] for i in range(self.n_frames)})
return data
def collect_samples(self):
'''Will search in training folder for folders 'flow_noc' or 'flow_occ'
and 'colored_0' (KITTI 2012) or 'image_2' (KITTI 2015) '''
flow_occ_dir = 'flow_' + 'occ'
flow_noc_dir = 'flow_' + 'noc'
assert (self.root / flow_occ_dir).isdir()
img_dir = 'image_2'
assert (self.root / img_dir).isdir()
samples = []
for flow_map in sorted((self.root / flow_occ_dir).glob('*.png')):
flow_map = flow_map.basename()
root_filename = flow_map[:-7]
flow_occ_map = flow_occ_dir + '/' + flow_map
flow_noc_map = flow_noc_dir + '/' + flow_map
s = {'flow_occ': flow_occ_map, 'flow_noc': flow_noc_map}
img1 = img_dir + '/' + root_filename + '_10.png'
img2 = img_dir + '/' + root_filename + '_11.png'
assert (self.root / img1).isfile() and (self.root / img2).isfile()
s.update({'img1': img1, 'img2': img2})
if self.n_frames == 3:
img0 = img_dir + '/' + root_filename + '_09.png'
assert (self.root / img0).isfile()
s.update({'img0': img0})
samples.append(s)
return samples
|
en
| 0.786625
|
#print("gggggg",self.root) # In unsupervised learning, there is no need to change target with image if self.target_transform is not None: for key in self.target_transform.keys(): target[key] = self.target_transform[key](target[key]) #txtfiles = [] #for file in glob.glob("*.txt"): # txtfiles.append(file) #print("jjjjjjjjjjjjjjjjjjjjjjj",scene_list) #print(seq) #print("hna") # Unofficial train-val split # for img1 img2 img3, only flow_23 will be evaluated # for img1 img2, flow_12 will be evaluated This dataset is used for unsupervised training only This dataset is used for validation only, so all files about target are stored as file filepath and there is no transform about target. # img 1 2 for 2 frames, img 0 1 2 for 3 frames. # for test set Will search in training folder for folders 'flow_noc' or 'flow_occ' and 'colored_0' (KITTI 2012) or 'image_2' (KITTI 2015)
| 2.47024
| 2
|
config.py
|
wizzicollo/Profile-Pitching-App
| 0
|
6627475
|
import os
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://collins:qwertyui@localhost/pitchhs'
UPLOADED_PHOTOS_DEST = 'app/static/photo'
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
SUBJECT_PREFIX = 'Done in 60 seconds'
SENDER_EMAIL = '<EMAIL>'
SIMPLEMDE_JS_IIFE = True
SIMPLEMDE_USE_CDN = True
class prodConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
class DevConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://collins:qwertyui@localhost/pitchhs'
DEBUG = True
class TestConfig(Config):
SQLALCHEMY_DATABASE_URI= 'postgresql+psycopg2://collins:qwertyui@localhost/pitchhs'
config_options = {
'development': DevConfig,
'production': prodConfig,
'test':TestConfig
}
|
import os
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://collins:qwertyui@localhost/pitchhs'
UPLOADED_PHOTOS_DEST = 'app/static/photo'
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
SUBJECT_PREFIX = 'Done in 60 seconds'
SENDER_EMAIL = '<EMAIL>'
SIMPLEMDE_JS_IIFE = True
SIMPLEMDE_USE_CDN = True
class prodConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
class DevConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://collins:qwertyui@localhost/pitchhs'
DEBUG = True
class TestConfig(Config):
SQLALCHEMY_DATABASE_URI= 'postgresql+psycopg2://collins:qwertyui@localhost/pitchhs'
config_options = {
'development': DevConfig,
'production': prodConfig,
'test':TestConfig
}
|
none
| 1
| 2.048962
| 2
|
|
client/log.py
|
s-pace/pyre-check
| 5
|
6627476
|
<filename>client/log.py
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import io
import logging
import os
import re
import sys
import threading
import time
from typing import List, Optional, Sequence
PERFORMANCE: int = 15
PROMPT: int = 50
SUCCESS: int = 60
LOG: logging.Logger = logging.getLogger(__name__)
stdout: io.StringIO = io.StringIO(newline="")
__handler: Optional["TimedStreamHandler"] = None
class Color:
YELLOW: str = "\033[33m"
RED: str = "\033[31m"
GREEN: str = "\033[32m"
class Format:
BOLD: str = "\033[1m"
CLEAR_LINE: str = "\x1b[0G\x1b[K"
CLEAR: str = "\033[0m"
TRUNCATE_OVERFLOW: str = "\033[?7l"
WRAP_OVERFLOW: str = "\033[?7h"
NEWLINE: str = "\n"
CURSOR_UP_LINE: str = "\x1b[1A"
HIDE_CURSOR: str = "\x1b[?25l"
SHOW_CURSOR: str = "\x1b[?25h"
class Character:
LAMBDA: str = "ƛ"
class SectionFormatter(logging.Formatter):
def __init__(self) -> None:
super(SectionFormatter, self).__init__("%(asctime)s %(levelname)s %(message)s")
def format(self, record: logging.LogRecord) -> str:
formatted = super(SectionFormatter, self).format(record)
return re.sub(r"DEBUG \[(.*)\]", r"\1", formatted)
class TimedStreamHandler(logging.StreamHandler):
THRESHOLD: float = 0.5
LINE_BREAKING_LEVELS: Sequence[str] = ["ERROR", "WARNING", "SUCCESS"]
_terminate: bool = False
_last_update: float = 0.0
def __init__(self) -> None:
super(TimedStreamHandler, self).__init__()
self.setFormatter(logging.Formatter("%(message)s"))
self.terminator: str = ""
self.setLevel(logging.INFO)
self._record: Optional[logging.LogRecord] = None
self._last_record: Optional[logging.LogRecord] = None
self._active_lines: int = 0
# Preamble preparing terminal.
sys.stderr.write(
Format.NEWLINE
+ Format.CLEAR_LINE
+ Format.CURSOR_UP_LINE
+ Format.HIDE_CURSOR
)
thread = threading.Thread(target=self._thread)
thread.daemon = True
thread.start()
def clear_lines(self) -> str:
if self._active_lines == 0:
return ""
return Format.CLEAR_LINE + "".join(
[
Format.CURSOR_UP_LINE + Format.CLEAR_LINE
for n in range(self._active_lines - 1)
]
)
def emit(self, record: logging.LogRecord, age: Optional[float] = None) -> None:
self._last_record = record
suffix = ""
color = ""
active_lines = record.msg.count("\n") + 1
if record.levelname in self.LINE_BREAKING_LEVELS:
record.msg += "\n"
if record.levelname == "ERROR":
color = Color.RED
self._record = None
active_lines = 0
elif record.levelname == "WARNING":
color = Color.YELLOW
self._record = None
active_lines = 0
elif record.levelname == "PROMPT":
color = Color.YELLOW
self._record = None
active_lines = 0
elif record.levelname == "SUCCESS":
self._record = None
active_lines = 0
elif age:
if age > 10:
color = Color.YELLOW
if age > 30:
color = Color.RED
suffix = " {}[{:.1f}s]{}".format(
color if color else "", age, Format.CLEAR if color else ""
)
else:
self._record = record
self._last_update = time.time()
timed_record = copy.copy(record)
timed_record.msg = (
"{clear_line}{color} {cursor}{clear} " "{truncate}{message}{suffix}"
).format(
clear_line=self.clear_lines(),
color=color,
cursor=Character.LAMBDA,
clear=Format.CLEAR,
truncate=Format.TRUNCATE_OVERFLOW,
message=record.msg,
suffix=suffix,
)
self._active_lines = active_lines
super(TimedStreamHandler, self).emit(timed_record)
def _thread(self) -> None:
while not self._terminate:
record = self._record
if record:
age = time.time() - self._last_update
if age > self.THRESHOLD:
self.emit(record, age)
time.sleep(0.1)
def terminate(self) -> None:
self._terminate = True
if self._active_lines > 0:
sys.stderr.write(self.clear_lines())
self._active_lines = 0
# Reset terminal.
sys.stderr.write(Format.WRAP_OVERFLOW + Format.SHOW_CURSOR)
sys.stderr.flush()
def initialize(
noninteractive: bool,
log_directory: str = "/tmp/.pyre",
disable_file_logging: bool = False,
) -> None:
global __handler
if noninteractive:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(SectionFormatter())
stream_handler.setLevel(logging.DEBUG)
__handler = None
else:
stream_handler = TimedStreamHandler()
__handler = stream_handler
handlers: List[logging.Handler] = [stream_handler]
if not noninteractive and not disable_file_logging:
if not os.path.exists(log_directory):
os.makedirs(log_directory)
file_handler = logging.FileHandler(os.path.join(log_directory, "pyre.stderr"))
file_handler.setFormatter(SectionFormatter())
file_handler.setLevel(logging.DEBUG)
handlers.append(file_handler)
logging.addLevelName(PERFORMANCE, "PERFORMANCE")
logging.addLevelName(PROMPT, "PROMPT")
logging.addLevelName(SUCCESS, "SUCCESS")
logging.basicConfig(level=logging.DEBUG, handlers=handlers)
def cleanup() -> None:
handler = __handler
if handler:
handler.terminate()
output = stdout.getvalue()
if output:
sys.stdout.write(output + "\n")
class Buffer:
THRESHOLD: float = 0.1
_flushed: bool = False
def __init__(self, section: str, data: List[str]) -> None:
self._section: str = section
self._data: List[str] = data
self._lock: threading.RLock = threading.RLock()
thread = threading.Thread(target=self._thread)
thread.daemon = True
thread.start()
def append(self, line: str) -> None:
self._data.append(line)
def flush(self) -> None:
with self._lock:
if self._flushed is True:
return
self._flushed = True
message = "\n".join(self._data)
if self._section == "ERROR":
LOG.error(message)
elif self._section == "INFO":
LOG.info(message)
elif self._section == "DUMP":
LOG.warning(message)
elif self._section == "WARNING":
LOG.warning(message)
elif self._section == "PROGRESS":
LOG.info(message)
elif self._section == "PARSER":
LOG.error(message)
else:
LOG.debug("[%s] %s", self._section, message)
def _thread(self) -> None:
time.sleep(self.THRESHOLD)
with self._lock:
if not self._flushed:
self.flush()
def get_yes_no_input(prompt: str) -> bool:
choice = get_input(prompt, suffix=" [Y/n] ")
return choice.lower() in ["", "y", "ye", "yes"]
def get_optional_input(prompt: str, default: str) -> str:
result = get_input(prompt, suffix=" (Default: `{}`): ".format(default))
if result == "":
return default
return result
def get_input(prompt: str, suffix: str = "") -> str:
LOG.log(PROMPT, prompt + suffix)
return input().strip()
|
<filename>client/log.py
# Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import io
import logging
import os
import re
import sys
import threading
import time
from typing import List, Optional, Sequence
PERFORMANCE: int = 15
PROMPT: int = 50
SUCCESS: int = 60
LOG: logging.Logger = logging.getLogger(__name__)
stdout: io.StringIO = io.StringIO(newline="")
__handler: Optional["TimedStreamHandler"] = None
class Color:
YELLOW: str = "\033[33m"
RED: str = "\033[31m"
GREEN: str = "\033[32m"
class Format:
BOLD: str = "\033[1m"
CLEAR_LINE: str = "\x1b[0G\x1b[K"
CLEAR: str = "\033[0m"
TRUNCATE_OVERFLOW: str = "\033[?7l"
WRAP_OVERFLOW: str = "\033[?7h"
NEWLINE: str = "\n"
CURSOR_UP_LINE: str = "\x1b[1A"
HIDE_CURSOR: str = "\x1b[?25l"
SHOW_CURSOR: str = "\x1b[?25h"
class Character:
LAMBDA: str = "ƛ"
class SectionFormatter(logging.Formatter):
def __init__(self) -> None:
super(SectionFormatter, self).__init__("%(asctime)s %(levelname)s %(message)s")
def format(self, record: logging.LogRecord) -> str:
formatted = super(SectionFormatter, self).format(record)
return re.sub(r"DEBUG \[(.*)\]", r"\1", formatted)
class TimedStreamHandler(logging.StreamHandler):
THRESHOLD: float = 0.5
LINE_BREAKING_LEVELS: Sequence[str] = ["ERROR", "WARNING", "SUCCESS"]
_terminate: bool = False
_last_update: float = 0.0
def __init__(self) -> None:
super(TimedStreamHandler, self).__init__()
self.setFormatter(logging.Formatter("%(message)s"))
self.terminator: str = ""
self.setLevel(logging.INFO)
self._record: Optional[logging.LogRecord] = None
self._last_record: Optional[logging.LogRecord] = None
self._active_lines: int = 0
# Preamble preparing terminal.
sys.stderr.write(
Format.NEWLINE
+ Format.CLEAR_LINE
+ Format.CURSOR_UP_LINE
+ Format.HIDE_CURSOR
)
thread = threading.Thread(target=self._thread)
thread.daemon = True
thread.start()
def clear_lines(self) -> str:
if self._active_lines == 0:
return ""
return Format.CLEAR_LINE + "".join(
[
Format.CURSOR_UP_LINE + Format.CLEAR_LINE
for n in range(self._active_lines - 1)
]
)
def emit(self, record: logging.LogRecord, age: Optional[float] = None) -> None:
self._last_record = record
suffix = ""
color = ""
active_lines = record.msg.count("\n") + 1
if record.levelname in self.LINE_BREAKING_LEVELS:
record.msg += "\n"
if record.levelname == "ERROR":
color = Color.RED
self._record = None
active_lines = 0
elif record.levelname == "WARNING":
color = Color.YELLOW
self._record = None
active_lines = 0
elif record.levelname == "PROMPT":
color = Color.YELLOW
self._record = None
active_lines = 0
elif record.levelname == "SUCCESS":
self._record = None
active_lines = 0
elif age:
if age > 10:
color = Color.YELLOW
if age > 30:
color = Color.RED
suffix = " {}[{:.1f}s]{}".format(
color if color else "", age, Format.CLEAR if color else ""
)
else:
self._record = record
self._last_update = time.time()
timed_record = copy.copy(record)
timed_record.msg = (
"{clear_line}{color} {cursor}{clear} " "{truncate}{message}{suffix}"
).format(
clear_line=self.clear_lines(),
color=color,
cursor=Character.LAMBDA,
clear=Format.CLEAR,
truncate=Format.TRUNCATE_OVERFLOW,
message=record.msg,
suffix=suffix,
)
self._active_lines = active_lines
super(TimedStreamHandler, self).emit(timed_record)
def _thread(self) -> None:
while not self._terminate:
record = self._record
if record:
age = time.time() - self._last_update
if age > self.THRESHOLD:
self.emit(record, age)
time.sleep(0.1)
def terminate(self) -> None:
self._terminate = True
if self._active_lines > 0:
sys.stderr.write(self.clear_lines())
self._active_lines = 0
# Reset terminal.
sys.stderr.write(Format.WRAP_OVERFLOW + Format.SHOW_CURSOR)
sys.stderr.flush()
def initialize(
noninteractive: bool,
log_directory: str = "/tmp/.pyre",
disable_file_logging: bool = False,
) -> None:
global __handler
if noninteractive:
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(SectionFormatter())
stream_handler.setLevel(logging.DEBUG)
__handler = None
else:
stream_handler = TimedStreamHandler()
__handler = stream_handler
handlers: List[logging.Handler] = [stream_handler]
if not noninteractive and not disable_file_logging:
if not os.path.exists(log_directory):
os.makedirs(log_directory)
file_handler = logging.FileHandler(os.path.join(log_directory, "pyre.stderr"))
file_handler.setFormatter(SectionFormatter())
file_handler.setLevel(logging.DEBUG)
handlers.append(file_handler)
logging.addLevelName(PERFORMANCE, "PERFORMANCE")
logging.addLevelName(PROMPT, "PROMPT")
logging.addLevelName(SUCCESS, "SUCCESS")
logging.basicConfig(level=logging.DEBUG, handlers=handlers)
def cleanup() -> None:
handler = __handler
if handler:
handler.terminate()
output = stdout.getvalue()
if output:
sys.stdout.write(output + "\n")
class Buffer:
THRESHOLD: float = 0.1
_flushed: bool = False
def __init__(self, section: str, data: List[str]) -> None:
self._section: str = section
self._data: List[str] = data
self._lock: threading.RLock = threading.RLock()
thread = threading.Thread(target=self._thread)
thread.daemon = True
thread.start()
def append(self, line: str) -> None:
self._data.append(line)
def flush(self) -> None:
with self._lock:
if self._flushed is True:
return
self._flushed = True
message = "\n".join(self._data)
if self._section == "ERROR":
LOG.error(message)
elif self._section == "INFO":
LOG.info(message)
elif self._section == "DUMP":
LOG.warning(message)
elif self._section == "WARNING":
LOG.warning(message)
elif self._section == "PROGRESS":
LOG.info(message)
elif self._section == "PARSER":
LOG.error(message)
else:
LOG.debug("[%s] %s", self._section, message)
def _thread(self) -> None:
time.sleep(self.THRESHOLD)
with self._lock:
if not self._flushed:
self.flush()
def get_yes_no_input(prompt: str) -> bool:
choice = get_input(prompt, suffix=" [Y/n] ")
return choice.lower() in ["", "y", "ye", "yes"]
def get_optional_input(prompt: str, default: str) -> str:
result = get_input(prompt, suffix=" (Default: `{}`): ".format(default))
if result == "":
return default
return result
def get_input(prompt: str, suffix: str = "") -> str:
LOG.log(PROMPT, prompt + suffix)
return input().strip()
|
en
| 0.837508
|
# Copyright (c) 2016-present, Facebook, Inc. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Preamble preparing terminal. # Reset terminal.
| 2.408977
| 2
|
main/perl-io-socket-ssl/template.py
|
matu3ba/cports
| 46
|
6627477
|
pkgname = "perl-io-socket-ssl"
pkgver = "2.072"
pkgrel = 0
build_style = "perl_module"
hostmakedepends = ["gmake", "perl"]
makedepends = ["perl", "perl-net-ssleay", "perl-uri"]
depends = list(makedepends)
pkgdesc = "SSL sockets with IO::Socket interface"
maintainer = "q66 <<EMAIL>>"
license = "Artistic-1.0-Perl OR GPL-1.0-or-later"
url = "https://metacpan.org/release/IO-Socket-SSL"
source = f"$(CPAN_SITE)/IO/IO-Socket-SSL-{pkgver}.tar.gz"
sha256 = "b5bee81db3905a9069340a450a48e1e1b32dec4ede0064f5703bafb9a707b89d"
# missing checkdepends
options = ["!check"]
|
pkgname = "perl-io-socket-ssl"
pkgver = "2.072"
pkgrel = 0
build_style = "perl_module"
hostmakedepends = ["gmake", "perl"]
makedepends = ["perl", "perl-net-ssleay", "perl-uri"]
depends = list(makedepends)
pkgdesc = "SSL sockets with IO::Socket interface"
maintainer = "q66 <<EMAIL>>"
license = "Artistic-1.0-Perl OR GPL-1.0-or-later"
url = "https://metacpan.org/release/IO-Socket-SSL"
source = f"$(CPAN_SITE)/IO/IO-Socket-SSL-{pkgver}.tar.gz"
sha256 = "b5bee81db3905a9069340a450a48e1e1b32dec4ede0064f5703bafb9a707b89d"
# missing checkdepends
options = ["!check"]
|
en
| 0.6098
|
# missing checkdepends
| 0.990335
| 1
|
dfp/agent_multimodal_advantage.py
|
minosworld/dfp
| 4
|
6627478
|
<reponame>minosworld/dfp<gh_stars>1-10
import numpy as np
import tensorflow as tf
from dfp import tf_ops as my_ops
from dfp.agent import Agent
class AgentMultimodalAdvantage(Agent):
def make_net(self, input_sensory, input_actions, input_objectives, reuse=False):
"""
Hooks up network for inferring non-observed modalities for given time step and for
predicting future targets (measurement + subset of modalities)
Args:
input_sensory - tf placeholder for all modalities (includes both observed and ground truth for to be inferred modalities)
input_actions - tf placeholder for one hot vector representation of action taken by agent
input_objectives - tf placeholder for objective coefficients
(objective is a linear weighted function of predicted measurements, these are the weights specified by the user)
Returns:
infer_sensory_embeddings - dictionary with computed embeddings for infer_modalities
pred_all - All future target predictions (for all actions)
pred_relevant - Relevant future target prediction (for current action)
"""
if reuse:
tf.get_variable_scope().reuse_variables()
if self.random_objective_coeffs:
assert isinstance(self.obj_fc_params, np.ndarray), 'Need fc_obj_params with randomized objectives'
self.fc_val_params = np.copy(self.joint_fc_params)
self.fc_val_params['out_dims'][-1] = self.target_dim
self.fc_adv_params = np.copy(self.joint_fc_params)
self.fc_adv_params['out_dims'][-1] = len(self.net_discrete_actions) * self.target_dim
sensory_embeddings = {}
# build nets to embed input_modalities into concatenated representation
for modality in self.input_modalities:
if modality == 'color':
img_conv = my_ops.conv_encoder(input_sensory['color'], self.img_conv_params, 'img_conv', msra_coeff=0.9)
img_fc = my_ops.fc_net(my_ops.flatten(img_conv), self.img_fc_params, 'img_fc', msra_coeff=0.9)
sensory_embeddings['color'] = img_fc
elif modality == 'depth':
depth_conv = my_ops.conv_encoder(input_sensory['depth'], self.depth_conv_params, 'depth_conv', msra_coeff=0.9)
depth_fc = my_ops.fc_net(my_ops.flatten(depth_conv), self.depth_fc_params, 'depth_fc', msra_coeff=0.9)
sensory_embeddings['depth'] = depth_fc
elif modality == 'measurements':
meas_fc = my_ops.fc_net(input_sensory['measurements'], self.meas_fc_params, 'meas_fc', msra_coeff=0.9)
sensory_embeddings['measurements'] = meas_fc
elif modality == 'force':
force_fc = my_ops.fc_net(input_sensory['force'], self.force_fc_params, 'force_fc', msra_coeff=0.9)
sensory_embeddings['force'] = force_fc
elif modality == 'audiopath':
audiopath_fc = my_ops.fc_net(input_sensory['audiopath'], self.audiopath_fc_params, 'audiopath_fc', msra_coeff=0.9)
sensory_embeddings['audiopath'] = audiopath_fc
elif modality == 'audio':
audio_fc = my_ops.fc_net(input_sensory['audio'], self.audio_fc_params, 'audio_fc', msra_coeff=0.9)
sensory_embeddings['audio'] = audio_fc
elif modality == 'actions':
actions_fc = my_ops.fc_net(input_sensory['actions'], self.actions_fc_params, 'actions_fc', msra_coeff=0.9)
sensory_embeddings['actions'] = actions_fc
elif modality == 'goalRoomType':
goal_roomtype_fc = my_ops.fc_net(input_sensory['goalRoomType'], self.goalroomtype_fc_params, 'goalroomtype_fc', msra_coeff=0.9)
sensory_embeddings['goalRoomType'] = goal_roomtype_fc
elif modality == 'roomType':
roomtype_fc = my_ops.fc_net(input_sensory['roomType'], self.roomtype_fc_params, 'roomtype_fc', msra_coeff=0.9)
sensory_embeddings['roomType'] = roomtype_fc
else:
raise Exception('Unsupported input modality %s' % modality)
# is there a better way to get values from a dictionary ordered by key?
input_concat_fc = tf.concat([sensory_embeddings[modality] for modality in sorted(sensory_embeddings)], 1)
# infer modalities from input_concat_fc
infer_sensory_embeddings = {}
for modality in self.infer_modalities:
if modality == 'measurements':
# handle this one below
pass
elif modality == 'roomType':
self.infer_roomtype_fc_params['out_dims'][-1] = input_sensory['roomType'].get_shape().as_list()[1]
roomtype_fc = my_ops.fc_net_with_soft_max(input_concat_fc, self.infer_roomtype_fc_params, 'infer_roomType_fc', msra_coeff=0.9)
sensory_embeddings[modality] = roomtype_fc
infer_sensory_embeddings[modality] = roomtype_fc
else:
raise Exception('Unsupported infer modality %s' % modality)
if 'measurements' in self.infer_modalities:
input_inferred_concat_fc = tf.concat([sensory_embeddings[modality] for modality in sorted(sensory_embeddings)], 1)
self.infer_meas_fc_params['out_dims'][-1] = input_sensory['measurements'].get_shape().as_list()[1]
meas_fc = my_ops.fc_net(input_inferred_concat_fc, self.infer_meas_fc_params, 'infer_meas_fc', msra_coeff=0.9)
sensory_embeddings['measurements'] = meas_fc
infer_sensory_embeddings['measurements'] = meas_fc
# add objectives to embedding
if isinstance(self.obj_fc_params, np.ndarray):
obj_fc = my_ops.fc_net(input_objectives, self.obj_fc_params, 'obj_fc', msra_coeff=0.9)
sensory_embeddings['objectives'] = obj_fc
# final input + inferred concatenated
concat_fc = tf.concat([sensory_embeddings[modality] for modality in sorted(sensory_embeddings)], 1)
# predicted expectation over all actions
pred_val_fc = my_ops.fc_net(concat_fc, self.fc_val_params, 'pred_val_fc', last_linear=True, msra_coeff=0.9)
# predicted action-conditional differences
pred_adv_fc = my_ops.fc_net(concat_fc, self.fc_adv_params, 'pred_adv_fc', last_linear=True, msra_coeff=0.9)
adv_reshape = tf.reshape(pred_adv_fc, [-1, len(self.net_discrete_actions), self.target_dim])
pred_all_nomean = adv_reshape - tf.reduce_mean(adv_reshape, reduction_indices=1, keep_dims=True)
pred_all = pred_all_nomean + tf.reshape(pred_val_fc, [-1, 1, self.target_dim])
pred_relevant = tf.boolean_mask(pred_all, tf.cast(input_actions, tf.bool))
return infer_sensory_embeddings, pred_all, pred_relevant
def make_losses(self, infer_sensory, input_infer_sensory_preprocessed,
pred_relevant, targets_preprocessed, objective_indices, objective_coeffs):
"""
Setup the losses
Args:
infer_sensory - dictionary of inferred modalities
input_infer_sensory_preprocessed - dictionary of actual ground truth modalities
pred_relevant - future target predictions that are relevant to the current action
targets_preprocessed - ground truth of future targets
objective_indices
objective_coeffs
Returns:
full_loss
errs_to_print
short_summary
detailed_summary
"""
# make a loss function and compute some summary numbers
# Future prediction target losses
per_target_loss = my_ops.mse_ignore_nans(pred_relevant, targets_preprocessed, reduction_indices=0)
target_loss = tf.reduce_sum(per_target_loss)
# Inferred modality losses
per_infer_sensory_loss = []
for modality in self.infer_modalities:
# TODO: crossentropy loss for roomType (one per history element)
sensory_loss = my_ops.mse_ignore_nans(infer_sensory[modality],
input_infer_sensory_preprocessed[modality],
reduction_indices=0)
per_infer_sensory_loss.append(tf.reduce_sum(sensory_loss))
infer_sensory_loss = tf.reduce_sum(per_infer_sensory_loss)
# Combined loss
loss = target_loss + infer_sensory_loss
# compute objective value, just for logging purposes
#print(objective_coeffs[None,:].shape, targets_preprocessed[:,objective_indices].get_shape())
# TODO add multiplication by the objective_coeffs (somehow not trivial)
obj = tf.reduce_sum(self.postprocess_predictions(targets_preprocessed), 1)
#obj = tf.sum(self.postprocess_predictions(targets_preprocessed[:,objective_indices]) * objective_coeffs[None,:], axis=1)
obj_nonan = tf.where(tf.is_nan(obj), tf.zeros_like(obj), obj)
num_valid_targets = tf.reduce_sum(1-tf.cast(tf.is_nan(obj), tf.float32))
mean_obj = tf.reduce_sum(obj_nonan) / num_valid_targets
# summaries
obj_sum = tf.summary.scalar("objective", mean_obj)
#TODO
per_target_loss_sums = []
#per_target_loss_sums = [tf.summary.scalar(name, loss) for name,loss in zip(self.target_names,per_target_loss)]
loss_infer_sum = tf.summary.scalar("infer_loss", infer_sensory_loss)
loss_sum = tf.summary.scalar("full_loss", loss)
#self.per_target_loss = tf.get_variable('avg_targets', [self.target_dim], initializer=tf.constant_initializer(value=0.))
full_loss = loss
errs_to_print = [loss]
short_summary = [loss_sum, loss_infer_sum]
detailed_summary = per_target_loss_sums + [obj_sum]
return full_loss, errs_to_print, short_summary, detailed_summary
def act_net(self, states, objective_coeffs):
"""
Select action given a state and objective_coeffs
Args:
states
objective_coeffs
Returns:
action
"""
if objective_coeffs.ndim == 1:
curr_objective_coeffs = np.tile(objective_coeffs[None,:],(states['measurements'].shape[0],1))
else:
curr_objective_coeffs = objective_coeffs
feed_dict = {self.input_sensory[m]: states[m] for m in self.input_modalities}
feed_dict.update({self.input_objective_coeffs: curr_objective_coeffs})
predictions = self.sess.run(self.pred_all, feed_dict=feed_dict)
self.curr_predictions = predictions[:,:,self.objective_indices]*curr_objective_coeffs[:,None,:]
self.curr_objectives = np.sum(self.curr_predictions, axis=2)
#print(predictions[:,:,self.objective_params[0]])
#print(curr_objective)
#print(objectives)
#print(np.argmax(objectives, axis=1))
curr_action = np.argmax(self.curr_objectives, axis=1)
#self.previous_actions = curr_action
return curr_action
|
import numpy as np
import tensorflow as tf
from dfp import tf_ops as my_ops
from dfp.agent import Agent
class AgentMultimodalAdvantage(Agent):
def make_net(self, input_sensory, input_actions, input_objectives, reuse=False):
"""
Hooks up network for inferring non-observed modalities for given time step and for
predicting future targets (measurement + subset of modalities)
Args:
input_sensory - tf placeholder for all modalities (includes both observed and ground truth for to be inferred modalities)
input_actions - tf placeholder for one hot vector representation of action taken by agent
input_objectives - tf placeholder for objective coefficients
(objective is a linear weighted function of predicted measurements, these are the weights specified by the user)
Returns:
infer_sensory_embeddings - dictionary with computed embeddings for infer_modalities
pred_all - All future target predictions (for all actions)
pred_relevant - Relevant future target prediction (for current action)
"""
if reuse:
tf.get_variable_scope().reuse_variables()
if self.random_objective_coeffs:
assert isinstance(self.obj_fc_params, np.ndarray), 'Need fc_obj_params with randomized objectives'
self.fc_val_params = np.copy(self.joint_fc_params)
self.fc_val_params['out_dims'][-1] = self.target_dim
self.fc_adv_params = np.copy(self.joint_fc_params)
self.fc_adv_params['out_dims'][-1] = len(self.net_discrete_actions) * self.target_dim
sensory_embeddings = {}
# build nets to embed input_modalities into concatenated representation
for modality in self.input_modalities:
if modality == 'color':
img_conv = my_ops.conv_encoder(input_sensory['color'], self.img_conv_params, 'img_conv', msra_coeff=0.9)
img_fc = my_ops.fc_net(my_ops.flatten(img_conv), self.img_fc_params, 'img_fc', msra_coeff=0.9)
sensory_embeddings['color'] = img_fc
elif modality == 'depth':
depth_conv = my_ops.conv_encoder(input_sensory['depth'], self.depth_conv_params, 'depth_conv', msra_coeff=0.9)
depth_fc = my_ops.fc_net(my_ops.flatten(depth_conv), self.depth_fc_params, 'depth_fc', msra_coeff=0.9)
sensory_embeddings['depth'] = depth_fc
elif modality == 'measurements':
meas_fc = my_ops.fc_net(input_sensory['measurements'], self.meas_fc_params, 'meas_fc', msra_coeff=0.9)
sensory_embeddings['measurements'] = meas_fc
elif modality == 'force':
force_fc = my_ops.fc_net(input_sensory['force'], self.force_fc_params, 'force_fc', msra_coeff=0.9)
sensory_embeddings['force'] = force_fc
elif modality == 'audiopath':
audiopath_fc = my_ops.fc_net(input_sensory['audiopath'], self.audiopath_fc_params, 'audiopath_fc', msra_coeff=0.9)
sensory_embeddings['audiopath'] = audiopath_fc
elif modality == 'audio':
audio_fc = my_ops.fc_net(input_sensory['audio'], self.audio_fc_params, 'audio_fc', msra_coeff=0.9)
sensory_embeddings['audio'] = audio_fc
elif modality == 'actions':
actions_fc = my_ops.fc_net(input_sensory['actions'], self.actions_fc_params, 'actions_fc', msra_coeff=0.9)
sensory_embeddings['actions'] = actions_fc
elif modality == 'goalRoomType':
goal_roomtype_fc = my_ops.fc_net(input_sensory['goalRoomType'], self.goalroomtype_fc_params, 'goalroomtype_fc', msra_coeff=0.9)
sensory_embeddings['goalRoomType'] = goal_roomtype_fc
elif modality == 'roomType':
roomtype_fc = my_ops.fc_net(input_sensory['roomType'], self.roomtype_fc_params, 'roomtype_fc', msra_coeff=0.9)
sensory_embeddings['roomType'] = roomtype_fc
else:
raise Exception('Unsupported input modality %s' % modality)
# is there a better way to get values from a dictionary ordered by key?
input_concat_fc = tf.concat([sensory_embeddings[modality] for modality in sorted(sensory_embeddings)], 1)
# infer modalities from input_concat_fc
infer_sensory_embeddings = {}
for modality in self.infer_modalities:
if modality == 'measurements':
# handle this one below
pass
elif modality == 'roomType':
self.infer_roomtype_fc_params['out_dims'][-1] = input_sensory['roomType'].get_shape().as_list()[1]
roomtype_fc = my_ops.fc_net_with_soft_max(input_concat_fc, self.infer_roomtype_fc_params, 'infer_roomType_fc', msra_coeff=0.9)
sensory_embeddings[modality] = roomtype_fc
infer_sensory_embeddings[modality] = roomtype_fc
else:
raise Exception('Unsupported infer modality %s' % modality)
if 'measurements' in self.infer_modalities:
input_inferred_concat_fc = tf.concat([sensory_embeddings[modality] for modality in sorted(sensory_embeddings)], 1)
self.infer_meas_fc_params['out_dims'][-1] = input_sensory['measurements'].get_shape().as_list()[1]
meas_fc = my_ops.fc_net(input_inferred_concat_fc, self.infer_meas_fc_params, 'infer_meas_fc', msra_coeff=0.9)
sensory_embeddings['measurements'] = meas_fc
infer_sensory_embeddings['measurements'] = meas_fc
# add objectives to embedding
if isinstance(self.obj_fc_params, np.ndarray):
obj_fc = my_ops.fc_net(input_objectives, self.obj_fc_params, 'obj_fc', msra_coeff=0.9)
sensory_embeddings['objectives'] = obj_fc
# final input + inferred concatenated
concat_fc = tf.concat([sensory_embeddings[modality] for modality in sorted(sensory_embeddings)], 1)
# predicted expectation over all actions
pred_val_fc = my_ops.fc_net(concat_fc, self.fc_val_params, 'pred_val_fc', last_linear=True, msra_coeff=0.9)
# predicted action-conditional differences
pred_adv_fc = my_ops.fc_net(concat_fc, self.fc_adv_params, 'pred_adv_fc', last_linear=True, msra_coeff=0.9)
adv_reshape = tf.reshape(pred_adv_fc, [-1, len(self.net_discrete_actions), self.target_dim])
pred_all_nomean = adv_reshape - tf.reduce_mean(adv_reshape, reduction_indices=1, keep_dims=True)
pred_all = pred_all_nomean + tf.reshape(pred_val_fc, [-1, 1, self.target_dim])
pred_relevant = tf.boolean_mask(pred_all, tf.cast(input_actions, tf.bool))
return infer_sensory_embeddings, pred_all, pred_relevant
def make_losses(self, infer_sensory, input_infer_sensory_preprocessed,
pred_relevant, targets_preprocessed, objective_indices, objective_coeffs):
"""
Setup the losses
Args:
infer_sensory - dictionary of inferred modalities
input_infer_sensory_preprocessed - dictionary of actual ground truth modalities
pred_relevant - future target predictions that are relevant to the current action
targets_preprocessed - ground truth of future targets
objective_indices
objective_coeffs
Returns:
full_loss
errs_to_print
short_summary
detailed_summary
"""
# make a loss function and compute some summary numbers
# Future prediction target losses
per_target_loss = my_ops.mse_ignore_nans(pred_relevant, targets_preprocessed, reduction_indices=0)
target_loss = tf.reduce_sum(per_target_loss)
# Inferred modality losses
per_infer_sensory_loss = []
for modality in self.infer_modalities:
# TODO: crossentropy loss for roomType (one per history element)
sensory_loss = my_ops.mse_ignore_nans(infer_sensory[modality],
input_infer_sensory_preprocessed[modality],
reduction_indices=0)
per_infer_sensory_loss.append(tf.reduce_sum(sensory_loss))
infer_sensory_loss = tf.reduce_sum(per_infer_sensory_loss)
# Combined loss
loss = target_loss + infer_sensory_loss
# compute objective value, just for logging purposes
#print(objective_coeffs[None,:].shape, targets_preprocessed[:,objective_indices].get_shape())
# TODO add multiplication by the objective_coeffs (somehow not trivial)
obj = tf.reduce_sum(self.postprocess_predictions(targets_preprocessed), 1)
#obj = tf.sum(self.postprocess_predictions(targets_preprocessed[:,objective_indices]) * objective_coeffs[None,:], axis=1)
obj_nonan = tf.where(tf.is_nan(obj), tf.zeros_like(obj), obj)
num_valid_targets = tf.reduce_sum(1-tf.cast(tf.is_nan(obj), tf.float32))
mean_obj = tf.reduce_sum(obj_nonan) / num_valid_targets
# summaries
obj_sum = tf.summary.scalar("objective", mean_obj)
#TODO
per_target_loss_sums = []
#per_target_loss_sums = [tf.summary.scalar(name, loss) for name,loss in zip(self.target_names,per_target_loss)]
loss_infer_sum = tf.summary.scalar("infer_loss", infer_sensory_loss)
loss_sum = tf.summary.scalar("full_loss", loss)
#self.per_target_loss = tf.get_variable('avg_targets', [self.target_dim], initializer=tf.constant_initializer(value=0.))
full_loss = loss
errs_to_print = [loss]
short_summary = [loss_sum, loss_infer_sum]
detailed_summary = per_target_loss_sums + [obj_sum]
return full_loss, errs_to_print, short_summary, detailed_summary
def act_net(self, states, objective_coeffs):
"""
Select action given a state and objective_coeffs
Args:
states
objective_coeffs
Returns:
action
"""
if objective_coeffs.ndim == 1:
curr_objective_coeffs = np.tile(objective_coeffs[None,:],(states['measurements'].shape[0],1))
else:
curr_objective_coeffs = objective_coeffs
feed_dict = {self.input_sensory[m]: states[m] for m in self.input_modalities}
feed_dict.update({self.input_objective_coeffs: curr_objective_coeffs})
predictions = self.sess.run(self.pred_all, feed_dict=feed_dict)
self.curr_predictions = predictions[:,:,self.objective_indices]*curr_objective_coeffs[:,None,:]
self.curr_objectives = np.sum(self.curr_predictions, axis=2)
#print(predictions[:,:,self.objective_params[0]])
#print(curr_objective)
#print(objectives)
#print(np.argmax(objectives, axis=1))
curr_action = np.argmax(self.curr_objectives, axis=1)
#self.previous_actions = curr_action
return curr_action
|
en
| 0.70897
|
Hooks up network for inferring non-observed modalities for given time step and for predicting future targets (measurement + subset of modalities) Args: input_sensory - tf placeholder for all modalities (includes both observed and ground truth for to be inferred modalities) input_actions - tf placeholder for one hot vector representation of action taken by agent input_objectives - tf placeholder for objective coefficients (objective is a linear weighted function of predicted measurements, these are the weights specified by the user) Returns: infer_sensory_embeddings - dictionary with computed embeddings for infer_modalities pred_all - All future target predictions (for all actions) pred_relevant - Relevant future target prediction (for current action) # build nets to embed input_modalities into concatenated representation # is there a better way to get values from a dictionary ordered by key? # infer modalities from input_concat_fc # handle this one below # add objectives to embedding # final input + inferred concatenated # predicted expectation over all actions # predicted action-conditional differences Setup the losses Args: infer_sensory - dictionary of inferred modalities input_infer_sensory_preprocessed - dictionary of actual ground truth modalities pred_relevant - future target predictions that are relevant to the current action targets_preprocessed - ground truth of future targets objective_indices objective_coeffs Returns: full_loss errs_to_print short_summary detailed_summary # make a loss function and compute some summary numbers # Future prediction target losses # Inferred modality losses # TODO: crossentropy loss for roomType (one per history element) # Combined loss # compute objective value, just for logging purposes #print(objective_coeffs[None,:].shape, targets_preprocessed[:,objective_indices].get_shape()) # TODO add multiplication by the objective_coeffs (somehow not trivial) #obj = tf.sum(self.postprocess_predictions(targets_preprocessed[:,objective_indices]) * objective_coeffs[None,:], axis=1) # summaries #TODO #per_target_loss_sums = [tf.summary.scalar(name, loss) for name,loss in zip(self.target_names,per_target_loss)] #self.per_target_loss = tf.get_variable('avg_targets', [self.target_dim], initializer=tf.constant_initializer(value=0.)) Select action given a state and objective_coeffs Args: states objective_coeffs Returns: action #print(predictions[:,:,self.objective_params[0]]) #print(curr_objective) #print(objectives) #print(np.argmax(objectives, axis=1)) #self.previous_actions = curr_action
| 2.412423
| 2
|
python-interface/src/MiniBotFramework/Sound/note_library.py
|
cornell-cup/cs-minibot-platform
| 10
|
6627479
|
<gh_stars>1-10
import pygame.mixer as pm
from pygame_play_tone import Note
from time import sleep
#Default volume for Notes
DEFAULT_VOLUME=0.2
# Notes that can be called on, where C4 is middle C
C0 = 16.35
C0_SHARP = 17.32
D0 = 18.35
D0_SHARP = 19.45
E0 = 20.6
F0 = 21.83
F0_SHARP = 23.12
G0 = 24.5
G0_SHARP = 25.96
A0 = 27.5
A0_SHARP = 29.14
B0 = 30.87
C1 = 32.7
C1_SHARP = 34.65
D1 = 36.71
D1_SHARP = 38.89
E1 = 41.2
F1 = 43.65
F1_SHARP = 46.25
G1 = 49
G1_SHARP = 51.91
A1 = 55
A1_SHARP = 58.27
B1 = 61.74
C2 = 65.41
C2_SHARP = 69.3
D2 = 73.42
D2_SHARP = 77.78
E2 = 82.41
F2 = 87.31
F2_SHARP = 92.5
G2 = 98
G2_SHARP = 103.83
A2 = 110
A2_SHARP = 116.54
B2 = 123.47
C3 = 130.81
C3_SHARP = 138.59
D3 = 146.83
D3_SHARP = 155.56
E3 = 164.81
F3 = 174.61
F3_SHARP = 185
G3 = 196
G3_SHARP = 207.65
A3 = 220
A3_SHARP = 233.08
B3 = 246.94
C4 = 261.63
C4_SHARP = 277.18
D4 = 293.66
D4_SHARP = 311.13
E4 = 329.63
F4 = 349.23
F4_SHARP = 369.99
G4 = 392
G4_SHARP = 415.3
A4 = 440
A4_SHARP = 466.16
B4 = 493.88
C5 = 523.25
C5_SHARP = 554.37
D5 = 587.33
D5_SHARP = 622.25
E5 = 659.25
F5 = 698.46
F5_SHARP = 739.99
G5 = 783.99
G5_SHARP = 830.61
A5 = 880
A5_SHARP = 932.33
B5 = 987.77
C6 = 1046.5
C6_SHARP = 1108.73
D6 = 1174.66
D6_SHARP = 1244.51
E6 = 1318.51
F6 = 1396.91
F6_SHARP = 1479.98
G6 = 1567.98
G6_SHARP = 1661.22
A6 = 1760
A6_SHARP = 1864.66
B6 = 1975.53
def prepPlaying():
''' Initializes environment to play pygame noises '''
pm.pre_init(44100, -16, 1, 1024)
# pygame.init()
# pm.init() #Only works for non-Windows? #TODO Research this further to confirm
pm.init()
def playNote(note,time,volume=DEFAULT_VOLUME):
''' Plays a sound of a given frequency [note] in Hertz for duration
[time] in seconds at a particular volume, where [volume] is a
number between 0.0 and 1.0'''
sound = Note(note,volume)
sound.play(-1)
sleep(time)
sound.stop()
def blurNote(note,time,volume=DEFAULT_VOLUME,last_note=False):
''' Same as playNote, but will continue to play with other notes
that are not specified to stop. In order to stop blurring a
selection of notes together, have the last note be a playNote or
specify the last parameter [last_note] as True'''
sound = Note(note,volume)
sound.play(-1)
sleep(time)
if(last_note):
sound.stop()
|
import pygame.mixer as pm
from pygame_play_tone import Note
from time import sleep
#Default volume for Notes
DEFAULT_VOLUME=0.2
# Notes that can be called on, where C4 is middle C
C0 = 16.35
C0_SHARP = 17.32
D0 = 18.35
D0_SHARP = 19.45
E0 = 20.6
F0 = 21.83
F0_SHARP = 23.12
G0 = 24.5
G0_SHARP = 25.96
A0 = 27.5
A0_SHARP = 29.14
B0 = 30.87
C1 = 32.7
C1_SHARP = 34.65
D1 = 36.71
D1_SHARP = 38.89
E1 = 41.2
F1 = 43.65
F1_SHARP = 46.25
G1 = 49
G1_SHARP = 51.91
A1 = 55
A1_SHARP = 58.27
B1 = 61.74
C2 = 65.41
C2_SHARP = 69.3
D2 = 73.42
D2_SHARP = 77.78
E2 = 82.41
F2 = 87.31
F2_SHARP = 92.5
G2 = 98
G2_SHARP = 103.83
A2 = 110
A2_SHARP = 116.54
B2 = 123.47
C3 = 130.81
C3_SHARP = 138.59
D3 = 146.83
D3_SHARP = 155.56
E3 = 164.81
F3 = 174.61
F3_SHARP = 185
G3 = 196
G3_SHARP = 207.65
A3 = 220
A3_SHARP = 233.08
B3 = 246.94
C4 = 261.63
C4_SHARP = 277.18
D4 = 293.66
D4_SHARP = 311.13
E4 = 329.63
F4 = 349.23
F4_SHARP = 369.99
G4 = 392
G4_SHARP = 415.3
A4 = 440
A4_SHARP = 466.16
B4 = 493.88
C5 = 523.25
C5_SHARP = 554.37
D5 = 587.33
D5_SHARP = 622.25
E5 = 659.25
F5 = 698.46
F5_SHARP = 739.99
G5 = 783.99
G5_SHARP = 830.61
A5 = 880
A5_SHARP = 932.33
B5 = 987.77
C6 = 1046.5
C6_SHARP = 1108.73
D6 = 1174.66
D6_SHARP = 1244.51
E6 = 1318.51
F6 = 1396.91
F6_SHARP = 1479.98
G6 = 1567.98
G6_SHARP = 1661.22
A6 = 1760
A6_SHARP = 1864.66
B6 = 1975.53
def prepPlaying():
''' Initializes environment to play pygame noises '''
pm.pre_init(44100, -16, 1, 1024)
# pygame.init()
# pm.init() #Only works for non-Windows? #TODO Research this further to confirm
pm.init()
def playNote(note,time,volume=DEFAULT_VOLUME):
''' Plays a sound of a given frequency [note] in Hertz for duration
[time] in seconds at a particular volume, where [volume] is a
number between 0.0 and 1.0'''
sound = Note(note,volume)
sound.play(-1)
sleep(time)
sound.stop()
def blurNote(note,time,volume=DEFAULT_VOLUME,last_note=False):
''' Same as playNote, but will continue to play with other notes
that are not specified to stop. In order to stop blurring a
selection of notes together, have the last note be a playNote or
specify the last parameter [last_note] as True'''
sound = Note(note,volume)
sound.play(-1)
sleep(time)
if(last_note):
sound.stop()
|
en
| 0.878152
|
#Default volume for Notes # Notes that can be called on, where C4 is middle C Initializes environment to play pygame noises # pygame.init() # pm.init() #Only works for non-Windows? #TODO Research this further to confirm Plays a sound of a given frequency [note] in Hertz for duration [time] in seconds at a particular volume, where [volume] is a number between 0.0 and 1.0 Same as playNote, but will continue to play with other notes that are not specified to stop. In order to stop blurring a selection of notes together, have the last note be a playNote or specify the last parameter [last_note] as True
| 2.320251
| 2
|
venv/Lib/site-packages/ipykernel/inprocess/tests/test_kernel.py
|
itsAbdulKhadar/Machine-Learning-with-Streamlit
| 5
|
6627480
|
<filename>venv/Lib/site-packages/ipykernel/inprocess/tests/test_kernel.py
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from io import StringIO
import sys
import unittest
import pytest
import tornado
from ipykernel.inprocess.blocking import BlockingInProcessKernelClient
from ipykernel.inprocess.manager import InProcessKernelManager
from ipykernel.inprocess.ipkernel import InProcessKernel
from ipykernel.tests.utils import assemble_output
from IPython.testing.decorators import skipif_not_matplotlib
from IPython.utils.io import capture_output
def _init_asyncio_patch():
"""set default asyncio policy to be compatible with tornado
Tornado 6 (at least) is not compatible with the default
asyncio implementation on Windows
Pick the older SelectorEventLoopPolicy on Windows
if the known-incompatible default policy is in use.
do this as early as possible to make it a low priority and overrideable
ref: https://github.com/tornadoweb/tornado/issues/2608
FIXME: if/when tornado supports the defaults in asyncio,
remove and bump tornado requirement for py38
"""
if sys.platform.startswith("win") and sys.version_info >= (3, 8) and tornado.version_info < (6, 1):
import asyncio
try:
from asyncio import (
WindowsProactorEventLoopPolicy,
WindowsSelectorEventLoopPolicy,
)
except ImportError:
pass
# not affected
else:
if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:
# WindowsProactorEventLoopPolicy is not compatible with tornado 6
# fallback to the pre-3.8 default of Selector
asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
class InProcessKernelTestCase(unittest.TestCase):
def setUp(self):
_init_asyncio_patch()
self.km = InProcessKernelManager()
self.km.start_kernel()
self.kc = self.km.client()
self.kc.start_channels()
self.kc.wait_for_ready()
@skipif_not_matplotlib
def test_pylab(self):
"""Does %pylab work in the in-process kernel?"""
kc = self.kc
kc.execute('%pylab')
out, err = assemble_output(kc.iopub_channel)
self.assertIn('matplotlib', out)
def test_raw_input(self):
""" Does the in-process kernel handle raw_input correctly?
"""
io = StringIO('foobar\n')
sys_stdin = sys.stdin
sys.stdin = io
try:
self.kc.execute('x = input()')
finally:
sys.stdin = sys_stdin
assert self.km.kernel.shell.user_ns.get('x') == 'foobar'
@pytest.mark.skipif(
'__pypy__' in sys.builtin_module_names,
reason="fails on pypy"
)
def test_stdout(self):
""" Does the in-process kernel correctly capture IO?
"""
kernel = InProcessKernel()
with capture_output() as io:
kernel.shell.run_cell('print("foo")')
assert io.stdout == 'foo\n'
kc = BlockingInProcessKernelClient(kernel=kernel, session=kernel.session)
kernel.frontends.append(kc)
kc.execute('print("bar")')
out, err = assemble_output(kc.iopub_channel)
assert out == 'bar\n'
def test_getpass_stream(self):
"Tests that kernel getpass accept the stream parameter"
kernel = InProcessKernel()
kernel._allow_stdin = True
kernel._input_request = lambda *args, **kwargs : None
kernel.getpass(stream='non empty')
|
<filename>venv/Lib/site-packages/ipykernel/inprocess/tests/test_kernel.py
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from io import StringIO
import sys
import unittest
import pytest
import tornado
from ipykernel.inprocess.blocking import BlockingInProcessKernelClient
from ipykernel.inprocess.manager import InProcessKernelManager
from ipykernel.inprocess.ipkernel import InProcessKernel
from ipykernel.tests.utils import assemble_output
from IPython.testing.decorators import skipif_not_matplotlib
from IPython.utils.io import capture_output
def _init_asyncio_patch():
"""set default asyncio policy to be compatible with tornado
Tornado 6 (at least) is not compatible with the default
asyncio implementation on Windows
Pick the older SelectorEventLoopPolicy on Windows
if the known-incompatible default policy is in use.
do this as early as possible to make it a low priority and overrideable
ref: https://github.com/tornadoweb/tornado/issues/2608
FIXME: if/when tornado supports the defaults in asyncio,
remove and bump tornado requirement for py38
"""
if sys.platform.startswith("win") and sys.version_info >= (3, 8) and tornado.version_info < (6, 1):
import asyncio
try:
from asyncio import (
WindowsProactorEventLoopPolicy,
WindowsSelectorEventLoopPolicy,
)
except ImportError:
pass
# not affected
else:
if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:
# WindowsProactorEventLoopPolicy is not compatible with tornado 6
# fallback to the pre-3.8 default of Selector
asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
class InProcessKernelTestCase(unittest.TestCase):
def setUp(self):
_init_asyncio_patch()
self.km = InProcessKernelManager()
self.km.start_kernel()
self.kc = self.km.client()
self.kc.start_channels()
self.kc.wait_for_ready()
@skipif_not_matplotlib
def test_pylab(self):
"""Does %pylab work in the in-process kernel?"""
kc = self.kc
kc.execute('%pylab')
out, err = assemble_output(kc.iopub_channel)
self.assertIn('matplotlib', out)
def test_raw_input(self):
""" Does the in-process kernel handle raw_input correctly?
"""
io = StringIO('foobar\n')
sys_stdin = sys.stdin
sys.stdin = io
try:
self.kc.execute('x = input()')
finally:
sys.stdin = sys_stdin
assert self.km.kernel.shell.user_ns.get('x') == 'foobar'
@pytest.mark.skipif(
'__pypy__' in sys.builtin_module_names,
reason="fails on pypy"
)
def test_stdout(self):
""" Does the in-process kernel correctly capture IO?
"""
kernel = InProcessKernel()
with capture_output() as io:
kernel.shell.run_cell('print("foo")')
assert io.stdout == 'foo\n'
kc = BlockingInProcessKernelClient(kernel=kernel, session=kernel.session)
kernel.frontends.append(kc)
kc.execute('print("bar")')
out, err = assemble_output(kc.iopub_channel)
assert out == 'bar\n'
def test_getpass_stream(self):
"Tests that kernel getpass accept the stream parameter"
kernel = InProcessKernel()
kernel._allow_stdin = True
kernel._input_request = lambda *args, **kwargs : None
kernel.getpass(stream='non empty')
|
en
| 0.780438
|
# Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. set default asyncio policy to be compatible with tornado Tornado 6 (at least) is not compatible with the default asyncio implementation on Windows Pick the older SelectorEventLoopPolicy on Windows if the known-incompatible default policy is in use. do this as early as possible to make it a low priority and overrideable ref: https://github.com/tornadoweb/tornado/issues/2608 FIXME: if/when tornado supports the defaults in asyncio, remove and bump tornado requirement for py38 # not affected # WindowsProactorEventLoopPolicy is not compatible with tornado 6 # fallback to the pre-3.8 default of Selector Does %pylab work in the in-process kernel? Does the in-process kernel handle raw_input correctly? Does the in-process kernel correctly capture IO?
| 1.792866
| 2
|
src/sql_table.py
|
rocfelix/datascience_unittest
| 0
|
6627481
|
<filename>src/sql_table.py<gh_stars>0
from typing import Any, Union, Dict
import pandas as pd
import sqlalchemy
from sqlalchemy.orm import sessionmaker
class DummySqlDB():
"""MS SQL Server connection class"""
def __init__(self):
self.engine = self._create_engine()
self.session_maker = sessionmaker(bind=self.engine)
def _create_engine(self) -> Any:
"""Create SQLAlchemy engine"""
database_url = 'dummy connect string'
return sqlalchemy.create_engine(
database_url, fast_executemany=True, pool_pre_ping=True
)
def query_sql(
self, sql: Union[str, Dict], **kwargs
) -> Union[pd.DataFrame, Dict[str, pd.DataFrame]]:
if isinstance(sql, str):
return pd.read_sql(sql, con=self.engine, **kwargs)
if isinstance(sql, dict):
data = dict()
for name, query in sql.items():
data[name] = pd.read_sql(**query, con=self.engine)
return data
raise ValueError("sql argument must be str or dict")
def transform(self):
df = self.query_sql("select * from MYTABLE")
df['test'] = 'test'
return df
def run_statement(self, statement, dicts):
string = f"INSERT INTO {statement} ({', '.join(dicts.keys())}) VALUES (:{', :'.join(dicts.keys())})"
session = self.session_maker()
try:
session.execute(string, dicts)
session.commit()
finally:
session.close()
|
<filename>src/sql_table.py<gh_stars>0
from typing import Any, Union, Dict
import pandas as pd
import sqlalchemy
from sqlalchemy.orm import sessionmaker
class DummySqlDB():
"""MS SQL Server connection class"""
def __init__(self):
self.engine = self._create_engine()
self.session_maker = sessionmaker(bind=self.engine)
def _create_engine(self) -> Any:
"""Create SQLAlchemy engine"""
database_url = 'dummy connect string'
return sqlalchemy.create_engine(
database_url, fast_executemany=True, pool_pre_ping=True
)
def query_sql(
self, sql: Union[str, Dict], **kwargs
) -> Union[pd.DataFrame, Dict[str, pd.DataFrame]]:
if isinstance(sql, str):
return pd.read_sql(sql, con=self.engine, **kwargs)
if isinstance(sql, dict):
data = dict()
for name, query in sql.items():
data[name] = pd.read_sql(**query, con=self.engine)
return data
raise ValueError("sql argument must be str or dict")
def transform(self):
df = self.query_sql("select * from MYTABLE")
df['test'] = 'test'
return df
def run_statement(self, statement, dicts):
string = f"INSERT INTO {statement} ({', '.join(dicts.keys())}) VALUES (:{', :'.join(dicts.keys())})"
session = self.session_maker()
try:
session.execute(string, dicts)
session.commit()
finally:
session.close()
|
en
| 0.62411
|
MS SQL Server connection class Create SQLAlchemy engine
| 2.84347
| 3
|
23_EntryNodeInListLoop/EntryNodeInListLoop.py
|
DevRoss/CodingInterviewChinese2
| 1
|
6627482
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Created by Ross on 19-1-15
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def count_node(p: ListNode):
p_slow = p
p_fast = p
c = 0
while True:
p_slow = p_slow.next
p_fast = p_fast.next.next
c += 1
if p_slow == p_fast:
break
return c
def solve(head: ListNode):
if not head:
return None
p_fast = head
p_slow = head
# 判断有没有环
while p_fast and p_slow:
p_slow = p_slow.next
p_fast = p_fast.next
if p_fast is None or p_fast.next is None:
return None
else:
p_fast = p_fast.next
if p_slow == p_fast:
break
# 有环
num_node = count_node(p_slow)
p_slow = head
p_fast = head
# 前面的指针向前走num_node步
for i in range(num_node):
p_fast = p_fast.next
while p_slow != p_fast:
p_slow = p_slow.next
p_fast = p_fast.next
return p_slow
if __name__ == '__main__':
head = ListNode(1)
p = head
for i in range(2, 8):
p.next = ListNode(i)
p_entry = p
for i in range(8, 12):
p.next = ListNode(i)
assert solve(head) is None
p.next = p_entry
assert solve(head) is p_entry
assert solve(ListNode(1)) is None
assert solve(None) is None
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Created by Ross on 19-1-15
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def count_node(p: ListNode):
p_slow = p
p_fast = p
c = 0
while True:
p_slow = p_slow.next
p_fast = p_fast.next.next
c += 1
if p_slow == p_fast:
break
return c
def solve(head: ListNode):
if not head:
return None
p_fast = head
p_slow = head
# 判断有没有环
while p_fast and p_slow:
p_slow = p_slow.next
p_fast = p_fast.next
if p_fast is None or p_fast.next is None:
return None
else:
p_fast = p_fast.next
if p_slow == p_fast:
break
# 有环
num_node = count_node(p_slow)
p_slow = head
p_fast = head
# 前面的指针向前走num_node步
for i in range(num_node):
p_fast = p_fast.next
while p_slow != p_fast:
p_slow = p_slow.next
p_fast = p_fast.next
return p_slow
if __name__ == '__main__':
head = ListNode(1)
p = head
for i in range(2, 8):
p.next = ListNode(i)
p_entry = p
for i in range(8, 12):
p.next = ListNode(i)
assert solve(head) is None
p.next = p_entry
assert solve(head) is p_entry
assert solve(ListNode(1)) is None
assert solve(None) is None
|
zh
| 0.393188
|
#!/usr/bin/python3 # -*- coding: utf-8 -*- # Created by Ross on 19-1-15 # 判断有没有环 # 有环 # 前面的指针向前走num_node步
| 3.738836
| 4
|
app.py
|
doraqmon/DSCI-532_gr202_dashboard
| 2
|
6627483
|
<reponame>doraqmon/DSCI-532_gr202_dashboard<filename>app.py
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import altair as alt
import pandas as pd
import geopandas as gpd
import json
import dash_core_components as dcc
from helpers import *
alt.data_transformers.disable_max_rows()
# alt.data_transformers.enable('json')
#alt.data_transformers.enable('data_server')
geo_json_file_loc= 'data/Boston_Neighborhoods.geojson'
gdf = get_gpd_df()
# Import boston crimes
df = pd.read_csv("data/crime.csv", encoding = 'latin-1')
# filter for needed columns
df = df[["DISTRICT", "YEAR", "MONTH", "DAY_OF_WEEK", "HOUR", "OFFENSE_CODE_GROUP"]]
# map district to neighbourhoods
df['DISTRICT'] = df['DISTRICT'].replace(
{'A1': 'Downtown',
'A7': 'East Boston',
'A15': 'Charleston',
'B2': 'Roxbury',
'B3': 'Mattapan',
'C6': 'South Boston',
'C11': 'Dorchester',
'D4': 'South End',
'D14': 'Brighton',
'E5': 'West Roxbury',
'E13': 'Jamaica Plain',
'E18': 'Hyde Park'})
# filter out incomplete data from 1st and last month
df = df.query('~((YEAR == 2015 & MONTH ==6) | (YEAR == 2018 & MONTH == 9))')
# register the custom theme under a chosen name
alt.themes.register('mds_special', mds_special)
# enable the newly registered theme
alt.themes.enable('mds_special')
# for dictionary comprehension
crime_list = list(df['OFFENSE_CODE_GROUP'].unique())
crime_list.sort()
neighbourhood_list = list(df['DISTRICT'].unique())
neighbourhood_list = [x for x in neighbourhood_list if str(x) != 'nan']
neighbourhood_list.sort()
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
app.title = 'Boston Crime App'
# colour dictionary
colors = {"white": "#ffffff",
"light_grey": "#d2d7df",
"ubc_blue": "#082145"
}
app.layout = html.Div(style={'backgroundColor': colors['white']}, children = [
# HEADER
html.Div(className = 'row', style = {'backgroundColor': colors["ubc_blue"], "padding" : 10}, children = [
html.H2('Boston Crime Dashboard', style={'color' : colors["white"]}),
html.P("This Dash app will allow users to explore crime in Boston acrosss time and space. The data set consists of over 300,000 Boston crime records between 2015 and 2018. Simply drag the sliders to select your desired year range. Select one or multiple values from the drop down menus to select which neighbourhoods or crimes you would like to explore. These options will filter all the graphs in the dashboard.",
style={'color' : colors["white"]})
]),
# BODY
html.Div(className = "row", children = [
#SIDE BAR
html.Div(className = "two columns", style = {'backgroundColor': colors['light_grey'], 'padding': 20}, children= [
html.P("Filter by Year"),
dcc.RangeSlider(
id = 'year-slider',
min=2015,
max=2018,
step=1,
marks={
2015: '2015',
2016: '2016',
2017: '2017',
2018: '2018'
},
value=[2015,2018],
),
html.Br(),
html.Br(),
html.P("Filter by Neighbourhood"),
dcc.Dropdown(
id = 'neighbourhood-dropdown',
options=[{'label': neighbourhood.title(), 'value': neighbourhood} for neighbourhood in neighbourhood_list],
value=None, style=dict(width='100%'),
multi=True
),
html.Br(),
html.P("Filter by Crime"),
dcc.Dropdown(
id = 'crime-dropdown',
options=[{'label': crime.title(), 'value': crime} for crime in crime_list],
value=None, style=dict(width='100%'),
multi=True
),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
]),
# MAIN PLOTS
html.Div(className = "row", children = [
html.Div(className = "five columns", children=[
html.Iframe(
sandbox='allow-scripts',
id='choro-plot',
height='400',
width='500',
style={'border-width': '0px'},
),
html.Iframe(
sandbox='allow-scripts',
id='trend-plot',
height='400',
width='500',
style={'border-width': '0px'},
),
]),
html.Div(className = "five columns", children = [
html.Iframe(
sandbox='allow-scripts',
id='heatmap-plot',
height='400',
width='500',
style={'border-width': '0px'},
),
html.Iframe(
sandbox='allow-scripts',
id='bar-plot',
height='400',
width='500',
style={'border-width': '0px'},
),
])
]),
]),
# FOOTER
html.Div(className = 'row', style = {'backgroundColor': colors["light_grey"], "padding" : 4}, children = [
html.P("This dashboard was made collaboratively by the DSCI 532 Group 202 in 2019.",
style={'color' : colors["ubc_blue"]}),
dcc.Link('Data Source ', href='https://www.kaggle.com/ankkur13/boston-crime-data'),
html.Br(),
dcc.Link('Github Repo', href='https://github.com/UBC-MDS/DSCI-532_gr202_dashboard')
]),
])
@app.callback(
dash.dependencies.Output('choro-plot', 'srcDoc'),
[dash.dependencies.Input('year-slider', 'value'),
dash.dependencies.Input('neighbourhood-dropdown', 'value'),
dash.dependencies.Input('crime-dropdown', 'value')])
def update_choro_plot(year_value, neighbourhood_value, crime_value):
return make_choro_plot(df, gdf, year = year_value, neighbourhood = neighbourhood_value, crime = crime_value).to_html()
@app.callback(
dash.dependencies.Output('trend-plot', 'srcDoc'),
[dash.dependencies.Input('year-slider', 'value'),
dash.dependencies.Input('neighbourhood-dropdown', 'value'),
dash.dependencies.Input('crime-dropdown', 'value')])
def update_trend_plot(year_value, neighbourhood_value, crime_value):
return make_trend_plot(df, year = year_value, neighbourhood = neighbourhood_value, crime = crime_value).to_html()
@app.callback(
dash.dependencies.Output('heatmap-plot', 'srcDoc'),
[dash.dependencies.Input('year-slider', 'value'),
dash.dependencies.Input('neighbourhood-dropdown', 'value'),
dash.dependencies.Input('crime-dropdown', 'value')])
def update_heatmap_plot(year_value, neighbourhood_value, crime_value):
return make_heatmap_plot(df, year = year_value, neighbourhood = neighbourhood_value, crime = crime_value).to_html()
@app.callback(
dash.dependencies.Output('bar-plot', 'srcDoc'),
[dash.dependencies.Input('year-slider', 'value'),
dash.dependencies.Input('neighbourhood-dropdown', 'value'),
dash.dependencies.Input('crime-dropdown', 'value')])
def update_bar_plot(year_value, neighbourhood_value, crime_value):
return make_bar_plot(df, year = year_value, neighbourhood = neighbourhood_value, crime = crime_value).to_html()
if __name__ == '__main__':
app.run_server(debug=True)
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import altair as alt
import pandas as pd
import geopandas as gpd
import json
import dash_core_components as dcc
from helpers import *
alt.data_transformers.disable_max_rows()
# alt.data_transformers.enable('json')
#alt.data_transformers.enable('data_server')
geo_json_file_loc= 'data/Boston_Neighborhoods.geojson'
gdf = get_gpd_df()
# Import boston crimes
df = pd.read_csv("data/crime.csv", encoding = 'latin-1')
# filter for needed columns
df = df[["DISTRICT", "YEAR", "MONTH", "DAY_OF_WEEK", "HOUR", "OFFENSE_CODE_GROUP"]]
# map district to neighbourhoods
df['DISTRICT'] = df['DISTRICT'].replace(
{'A1': 'Downtown',
'A7': 'East Boston',
'A15': 'Charleston',
'B2': 'Roxbury',
'B3': 'Mattapan',
'C6': 'South Boston',
'C11': 'Dorchester',
'D4': 'South End',
'D14': 'Brighton',
'E5': 'West Roxbury',
'E13': 'Jamaica Plain',
'E18': 'Hyde Park'})
# filter out incomplete data from 1st and last month
df = df.query('~((YEAR == 2015 & MONTH ==6) | (YEAR == 2018 & MONTH == 9))')
# register the custom theme under a chosen name
alt.themes.register('mds_special', mds_special)
# enable the newly registered theme
alt.themes.enable('mds_special')
# for dictionary comprehension
crime_list = list(df['OFFENSE_CODE_GROUP'].unique())
crime_list.sort()
neighbourhood_list = list(df['DISTRICT'].unique())
neighbourhood_list = [x for x in neighbourhood_list if str(x) != 'nan']
neighbourhood_list.sort()
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
app.title = 'Boston Crime App'
# colour dictionary
colors = {"white": "#ffffff",
"light_grey": "#d2d7df",
"ubc_blue": "#082145"
}
app.layout = html.Div(style={'backgroundColor': colors['white']}, children = [
# HEADER
html.Div(className = 'row', style = {'backgroundColor': colors["ubc_blue"], "padding" : 10}, children = [
html.H2('Boston Crime Dashboard', style={'color' : colors["white"]}),
html.P("This Dash app will allow users to explore crime in Boston acrosss time and space. The data set consists of over 300,000 Boston crime records between 2015 and 2018. Simply drag the sliders to select your desired year range. Select one or multiple values from the drop down menus to select which neighbourhoods or crimes you would like to explore. These options will filter all the graphs in the dashboard.",
style={'color' : colors["white"]})
]),
# BODY
html.Div(className = "row", children = [
#SIDE BAR
html.Div(className = "two columns", style = {'backgroundColor': colors['light_grey'], 'padding': 20}, children= [
html.P("Filter by Year"),
dcc.RangeSlider(
id = 'year-slider',
min=2015,
max=2018,
step=1,
marks={
2015: '2015',
2016: '2016',
2017: '2017',
2018: '2018'
},
value=[2015,2018],
),
html.Br(),
html.Br(),
html.P("Filter by Neighbourhood"),
dcc.Dropdown(
id = 'neighbourhood-dropdown',
options=[{'label': neighbourhood.title(), 'value': neighbourhood} for neighbourhood in neighbourhood_list],
value=None, style=dict(width='100%'),
multi=True
),
html.Br(),
html.P("Filter by Crime"),
dcc.Dropdown(
id = 'crime-dropdown',
options=[{'label': crime.title(), 'value': crime} for crime in crime_list],
value=None, style=dict(width='100%'),
multi=True
),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
html.Br(),
]),
# MAIN PLOTS
html.Div(className = "row", children = [
html.Div(className = "five columns", children=[
html.Iframe(
sandbox='allow-scripts',
id='choro-plot',
height='400',
width='500',
style={'border-width': '0px'},
),
html.Iframe(
sandbox='allow-scripts',
id='trend-plot',
height='400',
width='500',
style={'border-width': '0px'},
),
]),
html.Div(className = "five columns", children = [
html.Iframe(
sandbox='allow-scripts',
id='heatmap-plot',
height='400',
width='500',
style={'border-width': '0px'},
),
html.Iframe(
sandbox='allow-scripts',
id='bar-plot',
height='400',
width='500',
style={'border-width': '0px'},
),
])
]),
]),
# FOOTER
html.Div(className = 'row', style = {'backgroundColor': colors["light_grey"], "padding" : 4}, children = [
html.P("This dashboard was made collaboratively by the DSCI 532 Group 202 in 2019.",
style={'color' : colors["ubc_blue"]}),
dcc.Link('Data Source ', href='https://www.kaggle.com/ankkur13/boston-crime-data'),
html.Br(),
dcc.Link('Github Repo', href='https://github.com/UBC-MDS/DSCI-532_gr202_dashboard')
]),
])
@app.callback(
dash.dependencies.Output('choro-plot', 'srcDoc'),
[dash.dependencies.Input('year-slider', 'value'),
dash.dependencies.Input('neighbourhood-dropdown', 'value'),
dash.dependencies.Input('crime-dropdown', 'value')])
def update_choro_plot(year_value, neighbourhood_value, crime_value):
return make_choro_plot(df, gdf, year = year_value, neighbourhood = neighbourhood_value, crime = crime_value).to_html()
@app.callback(
dash.dependencies.Output('trend-plot', 'srcDoc'),
[dash.dependencies.Input('year-slider', 'value'),
dash.dependencies.Input('neighbourhood-dropdown', 'value'),
dash.dependencies.Input('crime-dropdown', 'value')])
def update_trend_plot(year_value, neighbourhood_value, crime_value):
return make_trend_plot(df, year = year_value, neighbourhood = neighbourhood_value, crime = crime_value).to_html()
@app.callback(
dash.dependencies.Output('heatmap-plot', 'srcDoc'),
[dash.dependencies.Input('year-slider', 'value'),
dash.dependencies.Input('neighbourhood-dropdown', 'value'),
dash.dependencies.Input('crime-dropdown', 'value')])
def update_heatmap_plot(year_value, neighbourhood_value, crime_value):
return make_heatmap_plot(df, year = year_value, neighbourhood = neighbourhood_value, crime = crime_value).to_html()
@app.callback(
dash.dependencies.Output('bar-plot', 'srcDoc'),
[dash.dependencies.Input('year-slider', 'value'),
dash.dependencies.Input('neighbourhood-dropdown', 'value'),
dash.dependencies.Input('crime-dropdown', 'value')])
def update_bar_plot(year_value, neighbourhood_value, crime_value):
return make_bar_plot(df, year = year_value, neighbourhood = neighbourhood_value, crime = crime_value).to_html()
if __name__ == '__main__':
app.run_server(debug=True)
|
en
| 0.612516
|
# alt.data_transformers.enable('json') #alt.data_transformers.enable('data_server') # Import boston crimes # filter for needed columns # map district to neighbourhoods # filter out incomplete data from 1st and last month # register the custom theme under a chosen name # enable the newly registered theme # for dictionary comprehension # colour dictionary # HEADER # BODY #SIDE BAR # MAIN PLOTS # FOOTER
| 2.579844
| 3
|
pymc/examples/gp/more_examples/Geostats/getdata.py
|
matthew-brett/pymc
| 5
|
6627484
|
from numpy import *
# Download datafile
import urllib
urllib.urlretrieve('http://www.ai-geostats.org/fileadmin/Documents/Data/walker_01.dat',filename='walker_01.dat')
# Whhether to thin dataset; definitely thin it if you're running this example on your laptop!
thin = False
l = file('walker_01.dat').read().splitlines()[8:-1]
a = array([fromstring(line,sep='\t') for line in l])
if thin:
a=a[::5]
ident,x,y,v,u,t=a.T
mesh = vstack((x,y)).T
|
from numpy import *
# Download datafile
import urllib
urllib.urlretrieve('http://www.ai-geostats.org/fileadmin/Documents/Data/walker_01.dat',filename='walker_01.dat')
# Whhether to thin dataset; definitely thin it if you're running this example on your laptop!
thin = False
l = file('walker_01.dat').read().splitlines()[8:-1]
a = array([fromstring(line,sep='\t') for line in l])
if thin:
a=a[::5]
ident,x,y,v,u,t=a.T
mesh = vstack((x,y)).T
|
en
| 0.756763
|
# Download datafile # Whhether to thin dataset; definitely thin it if you're running this example on your laptop!
| 2.819721
| 3
|
python/8kyu/classic_hello_world.py
|
Sigmanificient/codewars
| 3
|
6627485
|
"""Kata url: https://www.codewars.com/kata/57036f007fd72e3b77000023."""
class Solution:
@staticmethod
def main():
print("Hello World!")
|
"""Kata url: https://www.codewars.com/kata/57036f007fd72e3b77000023."""
class Solution:
@staticmethod
def main():
print("Hello World!")
|
en
| 0.431958
|
Kata url: https://www.codewars.com/kata/57036f007fd72e3b77000023.
| 2.575906
| 3
|
tensorflow/python/data/experimental/kernel_tests/serialization/tf_record_dataset_serialization_test.py
|
MathMachado/tensorflow
| 848
|
6627486
|
<filename>tensorflow/python/data/experimental/kernel_tests/serialization/tf_record_dataset_serialization_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the TFRecordDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.platform import test
class TFRecordDatasetSerializationTest(
reader_dataset_ops_test_base.TFRecordDatasetTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self,
num_epochs,
batch_size=1,
compression_type=None,
buffer_size=None):
filenames = self._createFiles()
if compression_type == "ZLIB":
zlib_files = []
for i, fn in enumerate(filenames):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
filenames = zlib_files
elif compression_type == "GZIP":
gzip_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
gzfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(gzfn, "wb") as gzf:
gzf.write(f.read())
gzip_files.append(gzfn)
filenames = gzip_files
return core_readers.TFRecordDataset(
filenames, compression_type,
buffer_size=buffer_size).repeat(num_epochs).batch(batch_size)
def testTFRecordWithoutBufferCore(self):
num_epochs = 5
batch_size = num_epochs
num_outputs = num_epochs * self._num_files * self._num_records // batch_size
# pylint: disable=g-long-lambda
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, batch_size,
buffer_size=0),
num_outputs)
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, buffer_size=0),
num_outputs * batch_size)
# pylint: enable=g-long-lambda
def testTFRecordWithBufferCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(lambda: self._build_iterator_graph(num_epochs),
num_outputs)
def testTFRecordWithCompressionCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, compression_type="ZLIB"),
num_outputs)
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, compression_type="GZIP"),
num_outputs)
if __name__ == "__main__":
test.main()
|
<filename>tensorflow/python/data/experimental/kernel_tests/serialization/tf_record_dataset_serialization_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the TFRecordDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.platform import test
class TFRecordDatasetSerializationTest(
reader_dataset_ops_test_base.TFRecordDatasetTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self,
num_epochs,
batch_size=1,
compression_type=None,
buffer_size=None):
filenames = self._createFiles()
if compression_type == "ZLIB":
zlib_files = []
for i, fn in enumerate(filenames):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
filenames = zlib_files
elif compression_type == "GZIP":
gzip_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
gzfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(gzfn, "wb") as gzf:
gzf.write(f.read())
gzip_files.append(gzfn)
filenames = gzip_files
return core_readers.TFRecordDataset(
filenames, compression_type,
buffer_size=buffer_size).repeat(num_epochs).batch(batch_size)
def testTFRecordWithoutBufferCore(self):
num_epochs = 5
batch_size = num_epochs
num_outputs = num_epochs * self._num_files * self._num_records // batch_size
# pylint: disable=g-long-lambda
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, batch_size,
buffer_size=0),
num_outputs)
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, buffer_size=0),
num_outputs * batch_size)
# pylint: enable=g-long-lambda
def testTFRecordWithBufferCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(lambda: self._build_iterator_graph(num_epochs),
num_outputs)
def testTFRecordWithCompressionCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, compression_type="ZLIB"),
num_outputs)
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, compression_type="GZIP"),
num_outputs)
if __name__ == "__main__":
test.main()
|
en
| 0.777158
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Tests for the TFRecordDataset serialization. # pylint: disable=g-long-lambda # pylint: enable=g-long-lambda
| 2.060523
| 2
|
src/agents/td3/td3_utils.py
|
LeRyc/Robust-Robotic-Manipulation
| 1
|
6627487
|
<filename>src/agents/td3/td3_utils.py
import numpy as np
import torch
import torch.nn as nn
from src.agents.agent_commons import create_nn_layer
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action, actor_layer):
super(Actor, self).__init__()
actor_layer[0]["n_neurons"][0] = state_dim
actor_layer[-2]["n_neurons"][1] = action_dim
self.layer_param = actor_layer
self.module_list = nn.ModuleList()
for layer_def in self.layer_param:
layer = create_nn_layer(layer_def)
self.module_list.append(layer)
self.max_action = max_action
def forward(self, x):
for layer in self.module_list:
x = layer(x)
x = x.clone() * self.max_action
return x
class Critic(nn.Module):
def __init__(self, state_dim, action_dim, critic_layer):
super(Critic, self).__init__()
critic_layer[0]["n_neurons"][0] = state_dim + action_dim
self.layer_param = critic_layer
# Q1 architecture
self.module_list_q1 = nn.ModuleList()
for layer_def in self.layer_param:
layer = create_nn_layer(layer_def)
self.module_list_q1.append(layer)
# Q2 architecture
self.module_list_q2 = nn.ModuleList()
for layer_def in self.layer_param:
layer = create_nn_layer(layer_def)
self.module_list_q2.append(layer)
self.layer = [state_dim + action_dim, 256, 256, 1]
def forward(self, state, action):
x1 = torch.cat([state, action], 1)
x2 = torch.cat([state, action], 1)
for layer1, layer2 in zip(self.module_list_q1, self.module_list_q2):
x1 = layer1(x1)
x2 = layer1(x2)
return x1, x2
def Q1(self, state, action):
x = torch.cat([state, action], 1)
for layer in self.module_list_q1:
x = layer(x)
return x
class ReplayBuffer(object):
def __init__(self, state_dim, action_dim, max_size=1e6):
self.max_size = int(max_size)
self.ptr = 0
self.size = 0
self.state = np.zeros((max_size, state_dim))
self.action = np.zeros((max_size, action_dim))
self.next_state = np.zeros((max_size, state_dim))
self.reward = np.zeros((max_size, 1))
self.not_done = np.zeros((max_size, 1))
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def add(self, state, action, next_state, reward, done):
self.state[self.ptr] = state
self.action[self.ptr] = action
self.next_state[self.ptr] = next_state
self.reward[self.ptr] = reward
self.not_done[self.ptr] = 1. - done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample(self, batch_size):
ind = np.random.randint(0, self.size, size=batch_size)
s = torch.FloatTensor(self.state[ind]).to(self.device)
a = torch.FloatTensor(self.action[ind]).to(self.device)
s_ = torch.FloatTensor(self.next_state[ind]).to(self.device)
r = torch.FloatTensor(self.reward[ind]).to(self.device)
done = torch.FloatTensor(self.not_done[ind]).to(self.device)
return (s, a, s_, r, done)
|
<filename>src/agents/td3/td3_utils.py
import numpy as np
import torch
import torch.nn as nn
from src.agents.agent_commons import create_nn_layer
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action, actor_layer):
super(Actor, self).__init__()
actor_layer[0]["n_neurons"][0] = state_dim
actor_layer[-2]["n_neurons"][1] = action_dim
self.layer_param = actor_layer
self.module_list = nn.ModuleList()
for layer_def in self.layer_param:
layer = create_nn_layer(layer_def)
self.module_list.append(layer)
self.max_action = max_action
def forward(self, x):
for layer in self.module_list:
x = layer(x)
x = x.clone() * self.max_action
return x
class Critic(nn.Module):
def __init__(self, state_dim, action_dim, critic_layer):
super(Critic, self).__init__()
critic_layer[0]["n_neurons"][0] = state_dim + action_dim
self.layer_param = critic_layer
# Q1 architecture
self.module_list_q1 = nn.ModuleList()
for layer_def in self.layer_param:
layer = create_nn_layer(layer_def)
self.module_list_q1.append(layer)
# Q2 architecture
self.module_list_q2 = nn.ModuleList()
for layer_def in self.layer_param:
layer = create_nn_layer(layer_def)
self.module_list_q2.append(layer)
self.layer = [state_dim + action_dim, 256, 256, 1]
def forward(self, state, action):
x1 = torch.cat([state, action], 1)
x2 = torch.cat([state, action], 1)
for layer1, layer2 in zip(self.module_list_q1, self.module_list_q2):
x1 = layer1(x1)
x2 = layer1(x2)
return x1, x2
def Q1(self, state, action):
x = torch.cat([state, action], 1)
for layer in self.module_list_q1:
x = layer(x)
return x
class ReplayBuffer(object):
def __init__(self, state_dim, action_dim, max_size=1e6):
self.max_size = int(max_size)
self.ptr = 0
self.size = 0
self.state = np.zeros((max_size, state_dim))
self.action = np.zeros((max_size, action_dim))
self.next_state = np.zeros((max_size, state_dim))
self.reward = np.zeros((max_size, 1))
self.not_done = np.zeros((max_size, 1))
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def add(self, state, action, next_state, reward, done):
self.state[self.ptr] = state
self.action[self.ptr] = action
self.next_state[self.ptr] = next_state
self.reward[self.ptr] = reward
self.not_done[self.ptr] = 1. - done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample(self, batch_size):
ind = np.random.randint(0, self.size, size=batch_size)
s = torch.FloatTensor(self.state[ind]).to(self.device)
a = torch.FloatTensor(self.action[ind]).to(self.device)
s_ = torch.FloatTensor(self.next_state[ind]).to(self.device)
r = torch.FloatTensor(self.reward[ind]).to(self.device)
done = torch.FloatTensor(self.not_done[ind]).to(self.device)
return (s, a, s_, r, done)
|
en
| 0.860792
|
# Q1 architecture # Q2 architecture
| 2.615864
| 3
|
tests/models.py
|
Django-Stack-Backend/Django-backend-React-frontend
| 1
|
6627488
|
from django.db import models
class Person(models.Model):
...
|
from django.db import models
class Person(models.Model):
...
|
none
| 1
| 1.628768
| 2
|
|
code/UI/OpenAPI/python-flask-server/swagger_server/__main__.py
|
dkoslicki/NCATS
| 2
|
6627489
|
#!/usr/bin/env python3
import connexion
from .encoder import JSONEncoder
if __name__ == '__main__':
app = connexion.App(__name__, specification_dir='./swagger/')
app.app.json_encoder = JSONEncoder
app.add_api('swagger.yaml', arguments={'title': 'Proof-of-concept OpenAPI front end for RTX.'})
app.run(port=5000, threaded=True)
|
#!/usr/bin/env python3
import connexion
from .encoder import JSONEncoder
if __name__ == '__main__':
app = connexion.App(__name__, specification_dir='./swagger/')
app.app.json_encoder = JSONEncoder
app.add_api('swagger.yaml', arguments={'title': 'Proof-of-concept OpenAPI front end for RTX.'})
app.run(port=5000, threaded=True)
|
fr
| 0.221828
|
#!/usr/bin/env python3
| 1.775474
| 2
|
tests/test_subscribers_manager.py
|
matan1008/srsran-controller
| 0
|
6627490
|
from contextlib import contextmanager
import pytest
from srsran_controller.configuration import config
from srsran_controller.subscribers_manager import SubscribersManager, Subscriber
@contextmanager
def change_users_db(new_users_db):
old_users = config.users_db
config.users_db = new_users_db
try:
yield
finally:
config.users_db = old_users
@pytest.fixture(scope='function', autouse=True)
def users_db_file(tmp_path):
new_users_db = (tmp_path / 'new_db.csv').absolute()
with change_users_db(new_users_db):
yield new_users_db
def test_adding_subscribers(users_db_file):
s = SubscribersManager()
sub = s.create_subscriber('Name', '001010123456785', '00112233445566778899aabbccddeeff', 'opc',
'<KEY>', '9001', 1233, 9, 'dynamic')
assert sub.index == 0
assert sub.name == 'Name'
assert sub.imsi == '001010123456785'
assert sub.key == '<KEY>'
assert sub.op_type == 'opc'
assert sub.op == '63bfa50ee6523365ff14c1f45f88737d'
assert sub.amf == '9001'
assert sub.sqn == 1233
assert sub.qci == 9
assert sub.ip == 'dynamic'
assert sub.auth == 'mil'
assert users_db_file.read_text() == ('Name,mil,001010123456785,00112233445566778899aabbccddeeff,opc,'
'63bfa50ee6523365ff14c1f45f88737d,9001,0000000004d1,9,dynamic\n')
def test_deleting_subscribers(users_db_file):
s = SubscribersManager()
sub = s.create_subscriber('Name', '001010123456785', '00112233445566778899aabbccddeeff', 'opc',
'63bfa50ee6523365ff14c1f45f88737d', '9001', 1233, 9, 'dynamic')
sub2 = s.create_subscriber('Name2', '001010123456783', '00112233445566778899aabbccddeeff', 'opc',
'63bfa50ee6523365ff14c1f45f88737d', '9001', 1233, 9, 'dynamic')
s.delete_subscriber(sub)
assert users_db_file.read_text() == ('Name2,mil,001010123456783,00112233445566778899aabbccddeeff,opc,'
'63bfa50ee6523365ff14c1f45f88737d,9001,0000000004d1,9,dynamic\n')
sub2.index -= 1
s.delete_subscriber(sub2)
assert not users_db_file.read_text()
def test_iterating_subscribers():
s = SubscribersManager()
sub = s.create_subscriber('Name', '001010123456785', '<KEY>', 'opc',
'63bfa50ee6523365ff14c1f45f88737d', '9001', 1233, 9, 'dynamic')
sub2 = s.create_subscriber('Name2', '001010123456783', '00112233445566778899aabbccddeeff', 'opc',
'63bfa50ee6523365ff14c1f45f88737d', '9001', 1233, 9, 'dynamic')
assert [sub, sub2] == list(s.iter_subscribers())
def test_iterating_subscribers_with_comment(users_db_file):
users_db_file.write_text(
'# Name,mil,001010123456785,00112233445566778899aabbccddeeff,opc,63bfa50ee6523365ff14c1f45f88737d,9001,1233,9,'
'dynamic\n'
'Name2,mil,001010123456783,00112233445566778899aabbccddeeff,opc,63bfa50ee6523365ff14c1f45f88737d,9001,1233,9,'
'dynamic\n'
)
s = SubscribersManager()
assert list(s.iter_subscribers()) == [Subscriber(
0, 'Name2', '001010123456783', '00112233445566778899aabbccddeeff', 'opc', '63bfa50ee6523365ff14c1f45f88737d',
'9001', 4659, 9, 'dynamic'
)]
def test_editing_subscribers(users_db_file):
s = SubscribersManager()
sub = s.create_subscriber('Name', '001010123456785', '00112233445566778899aabbccddeeff', 'opc',
'63bfa50ee6523365ff14c1f45f88737d', '9001', 1233, 9, 'dynamic')
s.create_subscriber('Name2', '001010123456783', '00112233445566778899aabbccddeeff', 'opc',
'63bfa50ee6523365ff14c1f45f88737d', '9001', 1233, 9, 'dynamic')
sub.name = 'New Name!'
s.edit_subscriber(sub)
assert users_db_file.read_text() == (
'New Name!,mil,001010123456785,00112233445566778899aabbccddeeff,opc,63bfa50ee6523365ff14c1f45f88737d,9001,'
'0000000004d1,9,dynamic\n'
'Name2,mil,001010123456783,00112233445566778899aabbccddeeff,opc,63bfa50ee6523365ff14c1f45f88737d,9001,'
'0000000004d1,9,dynamic\n'
)
def test_iterating_subscribers_missing_file():
with change_users_db('users_db_that_doesnt_exist'):
s = SubscribersManager()
assert not list(s.iter_subscribers())
|
from contextlib import contextmanager
import pytest
from srsran_controller.configuration import config
from srsran_controller.subscribers_manager import SubscribersManager, Subscriber
@contextmanager
def change_users_db(new_users_db):
old_users = config.users_db
config.users_db = new_users_db
try:
yield
finally:
config.users_db = old_users
@pytest.fixture(scope='function', autouse=True)
def users_db_file(tmp_path):
new_users_db = (tmp_path / 'new_db.csv').absolute()
with change_users_db(new_users_db):
yield new_users_db
def test_adding_subscribers(users_db_file):
s = SubscribersManager()
sub = s.create_subscriber('Name', '001010123456785', '00112233445566778899aabbccddeeff', 'opc',
'<KEY>', '9001', 1233, 9, 'dynamic')
assert sub.index == 0
assert sub.name == 'Name'
assert sub.imsi == '001010123456785'
assert sub.key == '<KEY>'
assert sub.op_type == 'opc'
assert sub.op == '63bfa50ee6523365ff14c1f45f88737d'
assert sub.amf == '9001'
assert sub.sqn == 1233
assert sub.qci == 9
assert sub.ip == 'dynamic'
assert sub.auth == 'mil'
assert users_db_file.read_text() == ('Name,mil,001010123456785,00112233445566778899aabbccddeeff,opc,'
'63bfa50ee6523365ff14c1f45f88737d,9001,0000000004d1,9,dynamic\n')
def test_deleting_subscribers(users_db_file):
s = SubscribersManager()
sub = s.create_subscriber('Name', '001010123456785', '00112233445566778899aabbccddeeff', 'opc',
'63bfa50ee6523365ff14c1f45f88737d', '9001', 1233, 9, 'dynamic')
sub2 = s.create_subscriber('Name2', '001010123456783', '00112233445566778899aabbccddeeff', 'opc',
'63bfa50ee6523365ff14c1f45f88737d', '9001', 1233, 9, 'dynamic')
s.delete_subscriber(sub)
assert users_db_file.read_text() == ('Name2,mil,001010123456783,00112233445566778899aabbccddeeff,opc,'
'63bfa50ee6523365ff14c1f45f88737d,9001,0000000004d1,9,dynamic\n')
sub2.index -= 1
s.delete_subscriber(sub2)
assert not users_db_file.read_text()
def test_iterating_subscribers():
s = SubscribersManager()
sub = s.create_subscriber('Name', '001010123456785', '<KEY>', 'opc',
'63bfa50ee6523365ff14c1f45f88737d', '9001', 1233, 9, 'dynamic')
sub2 = s.create_subscriber('Name2', '001010123456783', '00112233445566778899aabbccddeeff', 'opc',
'63bfa50ee6523365ff14c1f45f88737d', '9001', 1233, 9, 'dynamic')
assert [sub, sub2] == list(s.iter_subscribers())
def test_iterating_subscribers_with_comment(users_db_file):
users_db_file.write_text(
'# Name,mil,001010123456785,00112233445566778899aabbccddeeff,opc,63bfa50ee6523365ff14c1f45f88737d,9001,1233,9,'
'dynamic\n'
'Name2,mil,001010123456783,00112233445566778899aabbccddeeff,opc,63bfa50ee6523365ff14c1f45f88737d,9001,1233,9,'
'dynamic\n'
)
s = SubscribersManager()
assert list(s.iter_subscribers()) == [Subscriber(
0, 'Name2', '001010123456783', '00112233445566778899aabbccddeeff', 'opc', '63bfa50ee6523365ff14c1f45f88737d',
'9001', 4659, 9, 'dynamic'
)]
def test_editing_subscribers(users_db_file):
s = SubscribersManager()
sub = s.create_subscriber('Name', '001010123456785', '00112233445566778899aabbccddeeff', 'opc',
'63bfa50ee6523365ff14c1f45f88737d', '9001', 1233, 9, 'dynamic')
s.create_subscriber('Name2', '001010123456783', '00112233445566778899aabbccddeeff', 'opc',
'63bfa50ee6523365ff14c1f45f88737d', '9001', 1233, 9, 'dynamic')
sub.name = 'New Name!'
s.edit_subscriber(sub)
assert users_db_file.read_text() == (
'New Name!,mil,001010123456785,00112233445566778899aabbccddeeff,opc,63bfa50ee6523365ff14c1f45f88737d,9001,'
'0000000004d1,9,dynamic\n'
'Name2,mil,001010123456783,00112233445566778899aabbccddeeff,opc,63bfa50ee6523365ff14c1f45f88737d,9001,'
'0000000004d1,9,dynamic\n'
)
def test_iterating_subscribers_missing_file():
with change_users_db('users_db_that_doesnt_exist'):
s = SubscribersManager()
assert not list(s.iter_subscribers())
|
none
| 1
| 2.113489
| 2
|
|
core/domain/search_services_test.py
|
jlau323/oppia
| 2
|
6627491
|
<filename>core/domain/search_services_test.py
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for core.domain.search_services."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import collection_services
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import rating_services
from core.domain import rights_manager
from core.domain import search_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import python_utils
gae_search_services = models.Registry.import_search_services()
class SearchServicesUnitTests(test_utils.GenericTestBase):
"""Test the search services module."""
EXP_ID = 'An_exploration_id'
COLLECTION_ID = 'A_collection_id'
def setUp(self):
super(SearchServicesUnitTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.VOICE_ARTIST_EMAIL, self.VOICE_ARTIST_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.voice_artist_id = self.get_user_id_from_email(
self.VOICE_ARTIST_EMAIL)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
self.owner = user_services.UserActionsInfo(self.owner_id)
self.set_admins([self.ADMIN_USERNAME])
self.user_id_admin = self.get_user_id_from_email(self.ADMIN_EMAIL)
def test_get_search_rank(self):
self.save_new_valid_exploration(self.EXP_ID, self.owner_id)
exp_summary = exp_fetchers.get_exploration_summary_by_id(self.EXP_ID)
base_search_rank = 20
self.assertEqual(
search_services.get_search_rank_from_exp_summary(exp_summary),
base_search_rank)
rights_manager.publish_exploration(self.owner, self.EXP_ID)
self.assertEqual(
search_services.get_search_rank_from_exp_summary(exp_summary),
base_search_rank)
rating_services.assign_rating_to_exploration(
self.owner_id, self.EXP_ID, 5)
exp_summary = exp_fetchers.get_exploration_summary_by_id(self.EXP_ID)
self.assertEqual(
search_services.get_search_rank_from_exp_summary(exp_summary),
base_search_rank + 10)
rating_services.assign_rating_to_exploration(
self.user_id_admin, self.EXP_ID, 2)
exp_summary = exp_fetchers.get_exploration_summary_by_id(self.EXP_ID)
self.assertEqual(
search_services.get_search_rank_from_exp_summary(exp_summary),
base_search_rank + 8)
def test_search_ranks_cannot_be_negative(self):
self.save_new_valid_exploration(self.EXP_ID, self.owner_id)
exp_summary = exp_fetchers.get_exploration_summary_by_id(self.EXP_ID)
base_search_rank = 20
self.assertEqual(
search_services.get_search_rank_from_exp_summary(exp_summary),
base_search_rank)
# A user can (down-)rate an exploration at most once.
for i in python_utils.RANGE(50):
rating_services.assign_rating_to_exploration(
'user_id_1', self.EXP_ID, 1)
exp_summary = exp_fetchers.get_exploration_summary_by_id(self.EXP_ID)
self.assertEqual(
search_services.get_search_rank_from_exp_summary(exp_summary),
base_search_rank - 5)
for i in python_utils.RANGE(50):
rating_services.assign_rating_to_exploration(
'user_id_%s' % i, self.EXP_ID, 1)
# The rank will be at least 0.
exp_summary = exp_fetchers.get_exploration_summary_by_id(self.EXP_ID)
self.assertEqual(search_services.get_search_rank_from_exp_summary(
exp_summary), 0)
def test_search_explorations(self):
expected_query_string = 'a query string'
expected_cursor = 'cursor'
expected_sort = 'title'
expected_limit = 30
expected_result_cursor = 'rcursor'
doc_ids = ['id1', 'id2']
def mock_search(
query_string, index, cursor=None, limit=20, sort='',
ids_only=False, retries=3):
self.assertEqual(query_string, expected_query_string)
self.assertEqual(index, search_services.SEARCH_INDEX_EXPLORATIONS)
self.assertEqual(cursor, expected_cursor)
self.assertEqual(limit, expected_limit)
self.assertEqual(sort, expected_sort)
self.assertEqual(ids_only, True)
self.assertEqual(retries, 3)
return doc_ids, expected_result_cursor
with self.swap(gae_search_services, 'search', mock_search):
result, cursor = search_services.search_explorations(
expected_query_string,
expected_limit,
sort=expected_sort,
cursor=expected_cursor,
)
self.assertEqual(cursor, expected_result_cursor)
self.assertEqual(result, doc_ids)
def test_search_collections(self):
expected_query_string = 'a query string'
expected_cursor = 'cursor'
expected_sort = 'title'
expected_limit = 30
expected_result_cursor = 'rcursor'
doc_ids = ['id1', 'id2']
def mock_search(
query_string, index, cursor=None, limit=20, sort='',
ids_only=False, retries=3):
self.assertEqual(query_string, expected_query_string)
self.assertEqual(
index, collection_services.SEARCH_INDEX_COLLECTIONS)
self.assertEqual(cursor, expected_cursor)
self.assertEqual(limit, expected_limit)
self.assertEqual(sort, expected_sort)
self.assertEqual(ids_only, True)
self.assertEqual(retries, 3)
return doc_ids, expected_result_cursor
with self.swap(gae_search_services, 'search', mock_search):
result, cursor = search_services.search_collections(
expected_query_string,
expected_limit,
sort=expected_sort,
cursor=expected_cursor,
)
self.assertEqual(cursor, expected_result_cursor)
self.assertEqual(result, doc_ids)
def test_demo_collections_are_added_to_search_index(self):
results = search_services.search_collections('Welcome', 2)[0]
self.assertEqual(results, [])
collection_services.load_demo('0')
results = search_services.search_collections('Welcome', 2)[0]
self.assertEqual(results, ['0'])
def test_demo_explorations_are_added_to_search_index(self):
results, _ = search_services.search_explorations('Welcome', 2)
self.assertEqual(results, [])
exp_services.load_demo('0')
results, _ = search_services.search_explorations('Welcome', 2)
self.assertEqual(results, ['0'])
def test_clear_exploration_search_index(self):
exp_services.load_demo('0')
result = search_services.search_explorations('Welcome', 2)[0]
self.assertEqual(result, ['0'])
search_services.clear_exploration_search_index()
result = search_services.search_explorations('Welcome', 2)[0]
self.assertEqual(result, [])
def test_clear_collection_search_index(self):
collection_services.load_demo('0')
result = search_services.search_collections('Welcome', 2)[0]
self.assertEqual(result, ['0'])
search_services.clear_collection_search_index()
result = search_services.search_collections('Welcome', 2)[0]
self.assertEqual(result, [])
def test_delete_explorations_from_search_index(self):
def _mock_delete_docs(ids, index):
"""Mocks delete_documents_from_index()."""
self.assertEqual(ids, [self.EXP_ID])
self.assertEqual(index, search_services.SEARCH_INDEX_EXPLORATIONS)
delete_docs_counter = test_utils.CallCounter(_mock_delete_docs)
delete_docs_swap = self.swap(
gae_search_services, 'delete_documents_from_index',
delete_docs_counter)
with delete_docs_swap:
search_services.delete_explorations_from_search_index([self.EXP_ID])
self.assertEqual(delete_docs_counter.times_called, 1)
def test_delete_collections_from_search_index(self):
def _mock_delete_docs(ids, index):
"""Mocks delete_documents_from_index()."""
self.assertEqual(ids, [self.COLLECTION_ID])
self.assertEqual(index, search_services.SEARCH_INDEX_COLLECTIONS)
delete_docs_counter = test_utils.CallCounter(_mock_delete_docs)
delete_docs_swap = self.swap(
gae_search_services, 'delete_documents_from_index',
delete_docs_counter)
with delete_docs_swap:
search_services.delete_collections_from_search_index(
[self.COLLECTION_ID])
self.assertEqual(delete_docs_counter.times_called, 1)
|
<filename>core/domain/search_services_test.py
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for core.domain.search_services."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.domain import collection_services
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import rating_services
from core.domain import rights_manager
from core.domain import search_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import python_utils
gae_search_services = models.Registry.import_search_services()
class SearchServicesUnitTests(test_utils.GenericTestBase):
"""Test the search services module."""
EXP_ID = 'An_exploration_id'
COLLECTION_ID = 'A_collection_id'
def setUp(self):
super(SearchServicesUnitTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.VOICE_ARTIST_EMAIL, self.VOICE_ARTIST_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.voice_artist_id = self.get_user_id_from_email(
self.VOICE_ARTIST_EMAIL)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
self.owner = user_services.UserActionsInfo(self.owner_id)
self.set_admins([self.ADMIN_USERNAME])
self.user_id_admin = self.get_user_id_from_email(self.ADMIN_EMAIL)
def test_get_search_rank(self):
self.save_new_valid_exploration(self.EXP_ID, self.owner_id)
exp_summary = exp_fetchers.get_exploration_summary_by_id(self.EXP_ID)
base_search_rank = 20
self.assertEqual(
search_services.get_search_rank_from_exp_summary(exp_summary),
base_search_rank)
rights_manager.publish_exploration(self.owner, self.EXP_ID)
self.assertEqual(
search_services.get_search_rank_from_exp_summary(exp_summary),
base_search_rank)
rating_services.assign_rating_to_exploration(
self.owner_id, self.EXP_ID, 5)
exp_summary = exp_fetchers.get_exploration_summary_by_id(self.EXP_ID)
self.assertEqual(
search_services.get_search_rank_from_exp_summary(exp_summary),
base_search_rank + 10)
rating_services.assign_rating_to_exploration(
self.user_id_admin, self.EXP_ID, 2)
exp_summary = exp_fetchers.get_exploration_summary_by_id(self.EXP_ID)
self.assertEqual(
search_services.get_search_rank_from_exp_summary(exp_summary),
base_search_rank + 8)
def test_search_ranks_cannot_be_negative(self):
self.save_new_valid_exploration(self.EXP_ID, self.owner_id)
exp_summary = exp_fetchers.get_exploration_summary_by_id(self.EXP_ID)
base_search_rank = 20
self.assertEqual(
search_services.get_search_rank_from_exp_summary(exp_summary),
base_search_rank)
# A user can (down-)rate an exploration at most once.
for i in python_utils.RANGE(50):
rating_services.assign_rating_to_exploration(
'user_id_1', self.EXP_ID, 1)
exp_summary = exp_fetchers.get_exploration_summary_by_id(self.EXP_ID)
self.assertEqual(
search_services.get_search_rank_from_exp_summary(exp_summary),
base_search_rank - 5)
for i in python_utils.RANGE(50):
rating_services.assign_rating_to_exploration(
'user_id_%s' % i, self.EXP_ID, 1)
# The rank will be at least 0.
exp_summary = exp_fetchers.get_exploration_summary_by_id(self.EXP_ID)
self.assertEqual(search_services.get_search_rank_from_exp_summary(
exp_summary), 0)
def test_search_explorations(self):
expected_query_string = 'a query string'
expected_cursor = 'cursor'
expected_sort = 'title'
expected_limit = 30
expected_result_cursor = 'rcursor'
doc_ids = ['id1', 'id2']
def mock_search(
query_string, index, cursor=None, limit=20, sort='',
ids_only=False, retries=3):
self.assertEqual(query_string, expected_query_string)
self.assertEqual(index, search_services.SEARCH_INDEX_EXPLORATIONS)
self.assertEqual(cursor, expected_cursor)
self.assertEqual(limit, expected_limit)
self.assertEqual(sort, expected_sort)
self.assertEqual(ids_only, True)
self.assertEqual(retries, 3)
return doc_ids, expected_result_cursor
with self.swap(gae_search_services, 'search', mock_search):
result, cursor = search_services.search_explorations(
expected_query_string,
expected_limit,
sort=expected_sort,
cursor=expected_cursor,
)
self.assertEqual(cursor, expected_result_cursor)
self.assertEqual(result, doc_ids)
def test_search_collections(self):
expected_query_string = 'a query string'
expected_cursor = 'cursor'
expected_sort = 'title'
expected_limit = 30
expected_result_cursor = 'rcursor'
doc_ids = ['id1', 'id2']
def mock_search(
query_string, index, cursor=None, limit=20, sort='',
ids_only=False, retries=3):
self.assertEqual(query_string, expected_query_string)
self.assertEqual(
index, collection_services.SEARCH_INDEX_COLLECTIONS)
self.assertEqual(cursor, expected_cursor)
self.assertEqual(limit, expected_limit)
self.assertEqual(sort, expected_sort)
self.assertEqual(ids_only, True)
self.assertEqual(retries, 3)
return doc_ids, expected_result_cursor
with self.swap(gae_search_services, 'search', mock_search):
result, cursor = search_services.search_collections(
expected_query_string,
expected_limit,
sort=expected_sort,
cursor=expected_cursor,
)
self.assertEqual(cursor, expected_result_cursor)
self.assertEqual(result, doc_ids)
def test_demo_collections_are_added_to_search_index(self):
results = search_services.search_collections('Welcome', 2)[0]
self.assertEqual(results, [])
collection_services.load_demo('0')
results = search_services.search_collections('Welcome', 2)[0]
self.assertEqual(results, ['0'])
def test_demo_explorations_are_added_to_search_index(self):
results, _ = search_services.search_explorations('Welcome', 2)
self.assertEqual(results, [])
exp_services.load_demo('0')
results, _ = search_services.search_explorations('Welcome', 2)
self.assertEqual(results, ['0'])
def test_clear_exploration_search_index(self):
exp_services.load_demo('0')
result = search_services.search_explorations('Welcome', 2)[0]
self.assertEqual(result, ['0'])
search_services.clear_exploration_search_index()
result = search_services.search_explorations('Welcome', 2)[0]
self.assertEqual(result, [])
def test_clear_collection_search_index(self):
collection_services.load_demo('0')
result = search_services.search_collections('Welcome', 2)[0]
self.assertEqual(result, ['0'])
search_services.clear_collection_search_index()
result = search_services.search_collections('Welcome', 2)[0]
self.assertEqual(result, [])
def test_delete_explorations_from_search_index(self):
def _mock_delete_docs(ids, index):
"""Mocks delete_documents_from_index()."""
self.assertEqual(ids, [self.EXP_ID])
self.assertEqual(index, search_services.SEARCH_INDEX_EXPLORATIONS)
delete_docs_counter = test_utils.CallCounter(_mock_delete_docs)
delete_docs_swap = self.swap(
gae_search_services, 'delete_documents_from_index',
delete_docs_counter)
with delete_docs_swap:
search_services.delete_explorations_from_search_index([self.EXP_ID])
self.assertEqual(delete_docs_counter.times_called, 1)
def test_delete_collections_from_search_index(self):
def _mock_delete_docs(ids, index):
"""Mocks delete_documents_from_index()."""
self.assertEqual(ids, [self.COLLECTION_ID])
self.assertEqual(index, search_services.SEARCH_INDEX_COLLECTIONS)
delete_docs_counter = test_utils.CallCounter(_mock_delete_docs)
delete_docs_swap = self.swap(
gae_search_services, 'delete_documents_from_index',
delete_docs_counter)
with delete_docs_swap:
search_services.delete_collections_from_search_index(
[self.COLLECTION_ID])
self.assertEqual(delete_docs_counter.times_called, 1)
|
en
| 0.779759
|
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Unit tests for core.domain.search_services. # pylint: disable=import-only-modules # pylint: disable=import-only-modules Test the search services module. # A user can (down-)rate an exploration at most once. # The rank will be at least 0. Mocks delete_documents_from_index(). Mocks delete_documents_from_index().
| 1.944172
| 2
|
lectures/binary-search/guess-number.py
|
syedakainat3/youtube
| 2,605
|
6627492
|
# Guess Number Higher or Lower, https://leetcode.com/explore/learn/card/binary-search/125/template-i/951/
# The guess API is already defined for you.
# @param num, your guess
# @return -1 if my number is lower, 1 if my number is higher, otherwise return 0
# def guess(num):
class Solution(object):
def guessNumber(self, n):
left = 1
right = n
while left <= right:
mid = left + (right - left) // 2
x = guess(mid) # let's remember this value not to repeat the query in ifs below
if x == 0:
return mid
if x == 1:
left = mid + 1
else:
right = mid - 1
assert False # we shouldn't get here because it's guaranteed the target value is in the array
|
# Guess Number Higher or Lower, https://leetcode.com/explore/learn/card/binary-search/125/template-i/951/
# The guess API is already defined for you.
# @param num, your guess
# @return -1 if my number is lower, 1 if my number is higher, otherwise return 0
# def guess(num):
class Solution(object):
def guessNumber(self, n):
left = 1
right = n
while left <= right:
mid = left + (right - left) // 2
x = guess(mid) # let's remember this value not to repeat the query in ifs below
if x == 0:
return mid
if x == 1:
left = mid + 1
else:
right = mid - 1
assert False # we shouldn't get here because it's guaranteed the target value is in the array
|
en
| 0.850522
|
# Guess Number Higher or Lower, https://leetcode.com/explore/learn/card/binary-search/125/template-i/951/ # The guess API is already defined for you. # @param num, your guess # @return -1 if my number is lower, 1 if my number is higher, otherwise return 0 # def guess(num): # let's remember this value not to repeat the query in ifs below # we shouldn't get here because it's guaranteed the target value is in the array
| 3.746372
| 4
|
src/app.py
|
ljw9609/NMT-FCONV
| 0
|
6627493
|
from flask import request, jsonify, make_response
from src import app
from fairseq.models.fconv import FConvModel
import os
PATH = os.path.join(os.path.dirname(__file__), '../FCONV/model')
zh2en = FConvModel.from_pretrained(
PATH,
checkpoint_file='model_zh2en.pt',
data_name_or_path=PATH,
tokenizer='moses',
bpe='subword_nmt',
bpe_codes=PATH + '/zh.code'
)
en2zh = FConvModel.from_pretrained(
PATH,
checkpoint_file='model_en2zh.pt',
data_name_or_path=PATH,
tokenizer='moses',
bpe='subword_nmt',
bpe_codes=PATH + '/en.code'
)
def translate_passage(src, translator):
def translate_paragraph(_para_):
sents = _para_.split('.')
tgt_sents = []
for sent in sents:
if len(sent) == 0 or sent in ['', ' ', '\t']:
continue
tgt_sents.append(translate_sentence(sent))
return ''.join(tgt_sents)
def translate_sentence(_sent_):
return translator.translate(_sent_.lower()).capitalize()
paras = src.split('\n')
outputs = []
for para in paras:
if len(para) == 0 or para in ['', ' ', '\t']:
continue
outputs.append(translate_paragraph(para))
return '\n'.join(outputs)
@app.route('/translate', methods=['POST'])
def translate():
params = request.json
s_lang = params.get('s_lang').lower()
s_text = params.get('s_text').lower()
if s_lang == 'en':
# t_text = en2zh.translate(s_text)
t_text = translate_passage(s_text, en2zh)
elif s_lang == 'zh':
# t_text = zh2en.translate(s_text)
t_text = translate_passage(s_text, zh2en)
else:
t_text = 'Wrong source language!'
json_obj = {'s_text': s_text,
't_text': t_text}
response = make_response(jsonify(json_obj), 200)
return response
|
from flask import request, jsonify, make_response
from src import app
from fairseq.models.fconv import FConvModel
import os
PATH = os.path.join(os.path.dirname(__file__), '../FCONV/model')
zh2en = FConvModel.from_pretrained(
PATH,
checkpoint_file='model_zh2en.pt',
data_name_or_path=PATH,
tokenizer='moses',
bpe='subword_nmt',
bpe_codes=PATH + '/zh.code'
)
en2zh = FConvModel.from_pretrained(
PATH,
checkpoint_file='model_en2zh.pt',
data_name_or_path=PATH,
tokenizer='moses',
bpe='subword_nmt',
bpe_codes=PATH + '/en.code'
)
def translate_passage(src, translator):
def translate_paragraph(_para_):
sents = _para_.split('.')
tgt_sents = []
for sent in sents:
if len(sent) == 0 or sent in ['', ' ', '\t']:
continue
tgt_sents.append(translate_sentence(sent))
return ''.join(tgt_sents)
def translate_sentence(_sent_):
return translator.translate(_sent_.lower()).capitalize()
paras = src.split('\n')
outputs = []
for para in paras:
if len(para) == 0 or para in ['', ' ', '\t']:
continue
outputs.append(translate_paragraph(para))
return '\n'.join(outputs)
@app.route('/translate', methods=['POST'])
def translate():
params = request.json
s_lang = params.get('s_lang').lower()
s_text = params.get('s_text').lower()
if s_lang == 'en':
# t_text = en2zh.translate(s_text)
t_text = translate_passage(s_text, en2zh)
elif s_lang == 'zh':
# t_text = zh2en.translate(s_text)
t_text = translate_passage(s_text, zh2en)
else:
t_text = 'Wrong source language!'
json_obj = {'s_text': s_text,
't_text': t_text}
response = make_response(jsonify(json_obj), 200)
return response
|
ar
| 0.087438
|
# t_text = en2zh.translate(s_text) # t_text = zh2en.translate(s_text)
| 2.427755
| 2
|
tests/test_conf.py
|
WorkInProgress-Development/theplease
| 0
|
6627494
|
<gh_stars>0
import pytest
import six
import os
from mock import Mock
from theplease import const
@pytest.fixture
def load_source(mocker):
return mocker.patch('theplease.conf.load_source')
def test_settings_defaults(load_source, settings):
load_source.return_value = object()
settings.init()
for key, val in const.DEFAULT_SETTINGS.items():
assert getattr(settings, key) == val
class TestSettingsFromFile(object):
def test_from_file(self, load_source, settings):
load_source.return_value = Mock(rules=['test'],
wait_command=10,
require_confirmation=True,
no_colors=True,
priority={'vim': 100},
exclude_rules=['git'])
settings.init()
assert settings.rules == ['test']
assert settings.wait_command == 10
assert settings.require_confirmation is True
assert settings.no_colors is True
assert settings.priority == {'vim': 100}
assert settings.exclude_rules == ['git']
def test_from_file_with_DEFAULT(self, load_source, settings):
load_source.return_value = Mock(rules=const.DEFAULT_RULES + ['test'],
wait_command=10,
exclude_rules=[],
require_confirmation=True,
no_colors=True)
settings.init()
assert settings.rules == const.DEFAULT_RULES + ['test']
@pytest.mark.usefixtures('load_source')
class TestSettingsFromEnv(object):
def test_from_env(self, os_environ, settings):
os_environ.update({'THEFUCK_RULES': 'bash:lisp',
'THEFUCK_EXCLUDE_RULES': 'git:vim',
'THEFUCK_WAIT_COMMAND': '55',
'THEFUCK_REQUIRE_CONFIRMATION': 'true',
'THEFUCK_NO_COLORS': 'false',
'THEFUCK_PRIORITY': 'bash=10:lisp=wrong:vim=15',
'THEFUCK_WAIT_SLOW_COMMAND': '999',
'THEFUCK_SLOW_COMMANDS': 'lein:react-native:./gradlew',
'THEFUCK_NUM_CLOSE_MATCHES': '359',
'THEFUCK_EXCLUDED_SEARCH_PATH_PREFIXES': '/media/:/mnt/'})
settings.init()
assert settings.rules == ['bash', 'lisp']
assert settings.exclude_rules == ['git', 'vim']
assert settings.wait_command == 55
assert settings.require_confirmation is True
assert settings.no_colors is False
assert settings.priority == {'bash': 10, 'vim': 15}
assert settings.wait_slow_command == 999
assert settings.slow_commands == ['lein', 'react-native', './gradlew']
assert settings.num_close_matches == 359
assert settings.excluded_search_path_prefixes == ['/media/', '/mnt/']
def test_from_env_with_DEFAULT(self, os_environ, settings):
os_environ.update({'THEFUCK_RULES': 'DEFAULT_RULES:bash:lisp'})
settings.init()
assert settings.rules == const.DEFAULT_RULES + ['bash', 'lisp']
def test_settings_from_args(settings):
settings.init(Mock(yes=True, debug=True, repeat=True))
assert not settings.require_confirmation
assert settings.debug
assert settings.repeat
class TestInitializeSettingsFile(object):
def test_ignore_if_exists(self, settings):
settings_path_mock = Mock(is_file=Mock(return_value=True), open=Mock())
settings.user_dir = Mock(joinpath=Mock(return_value=settings_path_mock))
settings._init_settings_file()
assert settings_path_mock.is_file.call_count == 1
assert not settings_path_mock.open.called
def test_create_if_doesnt_exists(self, settings):
settings_file = six.StringIO()
settings_path_mock = Mock(
is_file=Mock(return_value=False),
open=Mock(return_value=Mock(
__exit__=lambda *args: None, __enter__=lambda *args: settings_file)))
settings.user_dir = Mock(joinpath=Mock(return_value=settings_path_mock))
settings._init_settings_file()
settings_file_contents = settings_file.getvalue()
assert settings_path_mock.is_file.call_count == 1
assert settings_path_mock.open.call_count == 1
assert const.SETTINGS_HEADER in settings_file_contents
for setting in const.DEFAULT_SETTINGS.items():
assert '# {} = {}\n'.format(*setting) in settings_file_contents
settings_file.close()
@pytest.mark.parametrize('legacy_dir_exists, xdg_config_home, result', [
(False, '~/.config', '~/.config/theplease'),
(False, '/user/test/config/', '/user/test/config/theplease'),
(True, '~/.config', '~/.theplease'),
(True, '/user/test/config/', '~/.theplease')])
def test_get_user_dir_path(mocker, os_environ, settings, legacy_dir_exists,
xdg_config_home, result):
mocker.patch('theplease.conf.Path.is_dir',
return_value=legacy_dir_exists)
if xdg_config_home is not None:
os_environ['XDG_CONFIG_HOME'] = xdg_config_home
else:
os_environ.pop('XDG_CONFIG_HOME', None)
path = settings._get_user_dir_path().as_posix()
assert path == os.path.expanduser(result)
|
import pytest
import six
import os
from mock import Mock
from theplease import const
@pytest.fixture
def load_source(mocker):
return mocker.patch('theplease.conf.load_source')
def test_settings_defaults(load_source, settings):
load_source.return_value = object()
settings.init()
for key, val in const.DEFAULT_SETTINGS.items():
assert getattr(settings, key) == val
class TestSettingsFromFile(object):
def test_from_file(self, load_source, settings):
load_source.return_value = Mock(rules=['test'],
wait_command=10,
require_confirmation=True,
no_colors=True,
priority={'vim': 100},
exclude_rules=['git'])
settings.init()
assert settings.rules == ['test']
assert settings.wait_command == 10
assert settings.require_confirmation is True
assert settings.no_colors is True
assert settings.priority == {'vim': 100}
assert settings.exclude_rules == ['git']
def test_from_file_with_DEFAULT(self, load_source, settings):
load_source.return_value = Mock(rules=const.DEFAULT_RULES + ['test'],
wait_command=10,
exclude_rules=[],
require_confirmation=True,
no_colors=True)
settings.init()
assert settings.rules == const.DEFAULT_RULES + ['test']
@pytest.mark.usefixtures('load_source')
class TestSettingsFromEnv(object):
def test_from_env(self, os_environ, settings):
os_environ.update({'THEFUCK_RULES': 'bash:lisp',
'THEFUCK_EXCLUDE_RULES': 'git:vim',
'THEFUCK_WAIT_COMMAND': '55',
'THEFUCK_REQUIRE_CONFIRMATION': 'true',
'THEFUCK_NO_COLORS': 'false',
'THEFUCK_PRIORITY': 'bash=10:lisp=wrong:vim=15',
'THEFUCK_WAIT_SLOW_COMMAND': '999',
'THEFUCK_SLOW_COMMANDS': 'lein:react-native:./gradlew',
'THEFUCK_NUM_CLOSE_MATCHES': '359',
'THEFUCK_EXCLUDED_SEARCH_PATH_PREFIXES': '/media/:/mnt/'})
settings.init()
assert settings.rules == ['bash', 'lisp']
assert settings.exclude_rules == ['git', 'vim']
assert settings.wait_command == 55
assert settings.require_confirmation is True
assert settings.no_colors is False
assert settings.priority == {'bash': 10, 'vim': 15}
assert settings.wait_slow_command == 999
assert settings.slow_commands == ['lein', 'react-native', './gradlew']
assert settings.num_close_matches == 359
assert settings.excluded_search_path_prefixes == ['/media/', '/mnt/']
def test_from_env_with_DEFAULT(self, os_environ, settings):
os_environ.update({'THEFUCK_RULES': 'DEFAULT_RULES:bash:lisp'})
settings.init()
assert settings.rules == const.DEFAULT_RULES + ['bash', 'lisp']
def test_settings_from_args(settings):
settings.init(Mock(yes=True, debug=True, repeat=True))
assert not settings.require_confirmation
assert settings.debug
assert settings.repeat
class TestInitializeSettingsFile(object):
def test_ignore_if_exists(self, settings):
settings_path_mock = Mock(is_file=Mock(return_value=True), open=Mock())
settings.user_dir = Mock(joinpath=Mock(return_value=settings_path_mock))
settings._init_settings_file()
assert settings_path_mock.is_file.call_count == 1
assert not settings_path_mock.open.called
def test_create_if_doesnt_exists(self, settings):
settings_file = six.StringIO()
settings_path_mock = Mock(
is_file=Mock(return_value=False),
open=Mock(return_value=Mock(
__exit__=lambda *args: None, __enter__=lambda *args: settings_file)))
settings.user_dir = Mock(joinpath=Mock(return_value=settings_path_mock))
settings._init_settings_file()
settings_file_contents = settings_file.getvalue()
assert settings_path_mock.is_file.call_count == 1
assert settings_path_mock.open.call_count == 1
assert const.SETTINGS_HEADER in settings_file_contents
for setting in const.DEFAULT_SETTINGS.items():
assert '# {} = {}\n'.format(*setting) in settings_file_contents
settings_file.close()
@pytest.mark.parametrize('legacy_dir_exists, xdg_config_home, result', [
(False, '~/.config', '~/.config/theplease'),
(False, '/user/test/config/', '/user/test/config/theplease'),
(True, '~/.config', '~/.theplease'),
(True, '/user/test/config/', '~/.theplease')])
def test_get_user_dir_path(mocker, os_environ, settings, legacy_dir_exists,
xdg_config_home, result):
mocker.patch('theplease.conf.Path.is_dir',
return_value=legacy_dir_exists)
if xdg_config_home is not None:
os_environ['XDG_CONFIG_HOME'] = xdg_config_home
else:
os_environ.pop('XDG_CONFIG_HOME', None)
path = settings._get_user_dir_path().as_posix()
assert path == os.path.expanduser(result)
|
none
| 1
| 2.067362
| 2
|
|
users/migrations/0003_auto_20180630_0145.py
|
tat3/djitter
| 4
|
6627495
|
<reponame>tat3/djitter<filename>users/migrations/0003_auto_20180630_0145.py
# Generated by Django 2.0.5 on 2018-06-30 01:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_user_nickname'),
]
operations = [
migrations.AlterField(
model_name='user',
name='nickname',
field=models.CharField(default='unknown', max_length=20),
),
]
|
# Generated by Django 2.0.5 on 2018-06-30 01:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_user_nickname'),
]
operations = [
migrations.AlterField(
model_name='user',
name='nickname',
field=models.CharField(default='unknown', max_length=20),
),
]
|
en
| 0.791483
|
# Generated by Django 2.0.5 on 2018-06-30 01:45
| 1.631697
| 2
|
lstm/ilab/metrics_np.py
|
ucrscholar/HandWashNet
| 1
|
6627496
|
import numpy as np
def iou_np(y_true, y_pred, smooth=1.):
intersection = y_true * y_pred
union = y_true + y_pred
return np.sum(intersection + smooth) / np.sum(union - intersection + smooth)
def iou_thresholded_np(y_true, y_pred, threshold=0.5, smooth=1.):
y_pred_pos = (y_pred > threshold) * 1.0
intersection = y_true * y_pred_pos
union = y_true + y_pred_pos
return np.sum(intersection + smooth) / np.sum(union - intersection + smooth)
def iou_thresholded_np_imgwise(y_true, y_pred, threshold=0.5, smooth=1.):
y_true = y_true.reshape((y_true.shape[0], y_true.shape[1] ** 2))
y_pred = y_pred.reshape((y_pred.shape[0], y_pred.shape[1] ** 2))
y_pred_pos = (y_pred > threshold) * 1.0
intersection = y_true * y_pred_pos
union = y_true + y_pred_pos
return np.sum(intersection + smooth, axis=1) / np.sum(union - intersection + smooth, axis=1)
|
import numpy as np
def iou_np(y_true, y_pred, smooth=1.):
intersection = y_true * y_pred
union = y_true + y_pred
return np.sum(intersection + smooth) / np.sum(union - intersection + smooth)
def iou_thresholded_np(y_true, y_pred, threshold=0.5, smooth=1.):
y_pred_pos = (y_pred > threshold) * 1.0
intersection = y_true * y_pred_pos
union = y_true + y_pred_pos
return np.sum(intersection + smooth) / np.sum(union - intersection + smooth)
def iou_thresholded_np_imgwise(y_true, y_pred, threshold=0.5, smooth=1.):
y_true = y_true.reshape((y_true.shape[0], y_true.shape[1] ** 2))
y_pred = y_pred.reshape((y_pred.shape[0], y_pred.shape[1] ** 2))
y_pred_pos = (y_pred > threshold) * 1.0
intersection = y_true * y_pred_pos
union = y_true + y_pred_pos
return np.sum(intersection + smooth, axis=1) / np.sum(union - intersection + smooth, axis=1)
|
none
| 1
| 2.477428
| 2
|
|
python/p1a/main.py
|
AoEiuV020/codeforces
| 0
|
6627497
|
line = input()
nums = line.split(' ')
w = int(nums[0])
h = int(nums[1])
a = int(nums[2])
wc = (w + a - 1) // a
hc = (h + a - 1) // a
ret = wc * hc
print(ret)
|
line = input()
nums = line.split(' ')
w = int(nums[0])
h = int(nums[1])
a = int(nums[2])
wc = (w + a - 1) // a
hc = (h + a - 1) // a
ret = wc * hc
print(ret)
|
none
| 1
| 3.139935
| 3
|
|
bokeh/embed/tests/test_elements.py
|
kevin1kevin1k/bokeh
| 12
|
6627498
|
<reponame>kevin1kevin1k/bokeh
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from bokeh.embed.util import RenderItem
# Module under test
import bokeh.embed.elements as bee
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class Test_div_for_render_item(object):
def test_render(self):
render_item = RenderItem(docid="doc123", elementid="foo123")
assert bee.div_for_render_item(render_item).strip() == """<div class="bk-root" id="foo123"></div>"""
class Test_html_page_for_render_items(object):
pass
class Test_script_for_render_items(object):
pass
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from bokeh.embed.util import RenderItem
# Module under test
import bokeh.embed.elements as bee
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class Test_div_for_render_item(object):
def test_render(self):
render_item = RenderItem(docid="doc123", elementid="foo123")
assert bee.div_for_render_item(render_item).strip() == """<div class="bk-root" id="foo123"></div>"""
class Test_html_page_for_render_items(object):
pass
class Test_script_for_render_items(object):
pass
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
en
| 0.129891
|
#----------------------------------------------------------------------------- # Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors. # All rights reserved. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Boilerplate #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Standard library imports # External imports # Bokeh imports # Module under test #----------------------------------------------------------------------------- # Setup #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # General API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- <div class="bk-root" id="foo123"></div> #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
| 1.441273
| 1
|
crawling_scraping/beautiful_soup/scrape_by_bs4.py
|
litteletips/crawling_scraping-scrapy_tool
| 0
|
6627499
|
<gh_stars>0
# Beautiful Soup4を使ったスクレイピング
# HTMLから書籍のURLとタイトルを抽出できる
# 内部のパーサーを目的に応じて変えられる。(html.parser,lxml,lxml-xml,html5lib)
# 実行方法
# python scrape_by_bs4.py
from urllib.parse import urljoin
from bs4 import BeautifulSoup
# HTMLファイルを読み込んでBeautifulSoupオブジェクトを得る。
with open('dp.html') as f:
soup = BeautifulSoup(f, 'html.parser')
# select()メソッドで、セレクターに該当するa要素のリストを取得して、個々のa要素に対して処理を行う。
for a in soup.select('#listBook > li > a[itemprop="url"]'):
# a要素のhref属性から書籍のURLを取得する。
url = urljoin('https://gihyo.jp/dp', a.get('href'))
# 書籍のタイトルは itemprop="name" という属性を持つp要素から取得する。
p = a.select('p[itemprop="name"]')[0]
title = p.text # wbr要素などが含まれるのでstringではなくtextを使う。
# 書籍のURLとタイトルを出力する。
print(url, title)
|
# Beautiful Soup4を使ったスクレイピング
# HTMLから書籍のURLとタイトルを抽出できる
# 内部のパーサーを目的に応じて変えられる。(html.parser,lxml,lxml-xml,html5lib)
# 実行方法
# python scrape_by_bs4.py
from urllib.parse import urljoin
from bs4 import BeautifulSoup
# HTMLファイルを読み込んでBeautifulSoupオブジェクトを得る。
with open('dp.html') as f:
soup = BeautifulSoup(f, 'html.parser')
# select()メソッドで、セレクターに該当するa要素のリストを取得して、個々のa要素に対して処理を行う。
for a in soup.select('#listBook > li > a[itemprop="url"]'):
# a要素のhref属性から書籍のURLを取得する。
url = urljoin('https://gihyo.jp/dp', a.get('href'))
# 書籍のタイトルは itemprop="name" という属性を持つp要素から取得する。
p = a.select('p[itemprop="name"]')[0]
title = p.text # wbr要素などが含まれるのでstringではなくtextを使う。
# 書籍のURLとタイトルを出力する。
print(url, title)
|
ja
| 0.999716
|
# Beautiful Soup4を使ったスクレイピング # HTMLから書籍のURLとタイトルを抽出できる # 内部のパーサーを目的に応じて変えられる。(html.parser,lxml,lxml-xml,html5lib) # 実行方法 # python scrape_by_bs4.py # HTMLファイルを読み込んでBeautifulSoupオブジェクトを得る。 # select()メソッドで、セレクターに該当するa要素のリストを取得して、個々のa要素に対して処理を行う。 # a要素のhref属性から書籍のURLを取得する。 # 書籍のタイトルは itemprop="name" という属性を持つp要素から取得する。 # wbr要素などが含まれるのでstringではなくtextを使う。 # 書籍のURLとタイトルを出力する。
| 3.616592
| 4
|
GUI/NumericalV.py
|
RECIEM/Ballistica
| 2
|
6627500
|
# Red Ciudadana de Estaciones Meteorologicas
#
# Copyright @ 2021
#
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
import numpy as np
import matplotlib
import tkinter as tk
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
plt.rcParams.update({'figure.max_open_warning': 0})
from matplotlib.figure import Figure
from PhysicsEngine import NumericalVPhysicsHandler
from tkinter import filedialog
class NumericalVGUI(tk.Frame):
def __init__(self, master=None):
self.physicshandler = NumericalVPhysicsHandler(0, 0, 0)
tk.Frame.__init__(self, master)
self.grid()
# Top level panel structure
self.panels = tk.Frame(self)
self.panels.pack(fill=tk.BOTH, expand=1)
# Left and right panels
self.leftpanel = tk.Frame(self.panels, relief=tk.GROOVE)
self.leftpanel.pack(side=tk.LEFT)
self.rightpanel = tk.Frame(self.panels)
self.rightpanel.pack(side=tk.RIGHT)
# Controls grid for upper left pannel
self.ulpanel = tk.LabelFrame(self.leftpanel, text='Parameters')
self.ulpanel.pack(side=tk.TOP)
# Control for angle
self.anglelable = tk.Label(self.ulpanel, text='Initial angle (degrees)')
self.anglelable.grid(row=0, column=0)
self.angleinput = tk.Scale(self.ulpanel, from_=0, to=90, resolution=1, length=170,orient=tk.HORIZONTAL)
self.angleinput.grid(row=0, column=1)
# Control for drag
self.draglable = tk.Label(self.ulpanel, text='Drag coefficient (s^-1)')
self.draglable.grid(row=1, column=0)
self.draginput = tk.Scale(self.ulpanel, from_=0.01, to=2, resolution=0.01, length=170, orient=tk.HORIZONTAL)
self.draginput.grid(row=1, column=1)
# Control for velocity
self.velocitylabel = tk.Label(self.ulpanel, text='Initial velocity (m/s)')
self.velocitylabel.grid(row=2, column=0)
self.velocityinput = tk.Entry(self.ulpanel, justify=tk.RIGHT, width=10)
self.velocityinput.grid(row=2, column=1)
self.velocityinput.insert(0, '125')
self.latIlabel = tk.Label(self.ulpanel, text='I. Lat (m)')
self.latIlabel.grid(row=3, column=0)
self.lonIlabel = tk.Label(self.ulpanel, text='I. Lon (m)')
self.lonIlabel.grid(row=3, column=1)
self.heightIlabel = tk.Label(self.ulpanel, text='I. Height (m)')
self.heightIlabel.grid(row=3, column=2)
self.latIinput = tk.Entry(self.ulpanel, justify=tk.RIGHT, width=10)
self.latIinput.grid(row=4, column=0)
self.lonIinput = tk.Entry(self.ulpanel, justify=tk.RIGHT, width=10)
self.lonIinput.grid(row=4, column=1)
self.heightIinput = tk.Entry(self.ulpanel, justify=tk.RIGHT, width=10)
self.heightIinput.grid(row=4, column=2)
self.latIinput.insert(0, '0')
self.lonIinput.insert(0, '0')
self.heightIinput.insert(0, '0')
self.pblanklabel = tk.Label(self.ulpanel, text='')
self.pblanklabel.grid(row=5, column=0, columnspan=2)
self.latFlabel = tk.Label(self.ulpanel, text='F. Lat (m)')
self.latFlabel.grid(row=6, column=0)
self.lonFlabel = tk.Label(self.ulpanel, text='F. Lon (m)')
self.lonFlabel.grid(row=6, column=1)
self.heightFlabel = tk.Label(self.ulpanel, text='F. Height (m)')
self.heightFlabel.grid(row=6, column=2)
self.latFinput = tk.Entry(self.ulpanel, justify=tk.RIGHT, width=10)
self.latFinput.grid(row=7, column=0)
self.lonFinput = tk.Entry(self.ulpanel, justify=tk.RIGHT, width=10)
self.lonFinput.grid(row=7, column=1)
self.heightFinput = tk.Entry(self.ulpanel, justify=tk.RIGHT, width=10)
self.heightFinput.grid(row=7, column=2)
self.latFinput.insert(0, '100')
self.lonFinput.insert(0, '100')
self.heightFinput.insert(0, '0')
self.barrierset = tk.BooleanVar()
self.barriercheck = tk.Checkbutton(self.ulpanel, justify=tk.RIGHT, variable=self.barrierset, onvalue=True,
offvalue=False, text='Show barrier')
self.barriercheck.grid(row=8, column=0)
# Controls grid for upper left pannel
self.blpanel = tk.Frame(self.leftpanel)
self.blpanel.pack(side=tk.BOTTOM)
# Buttons for various functions
# Buttons for various functions
self.blanklabel= tk.Label(self.blpanel, text="")
self.blanklabel.grid(row=0, column=0, columnspan=2)
self.computebutton = tk.Button(self.blpanel, text="Compute", width=20, command=self.compute, default=tk.NORMAL)
self.computebutton.grid(row=1, column=0, columnspan=3)
self.computebutton = tk.Button(self.blpanel, text="x(t) vs. t", width=10, command=self.txGraph, default=tk.NORMAL)
self.computebutton.grid(row=2, column=0)
self.computebutton = tk.Button(self.blpanel, text="z(t) vs. t", width=10, command=self.tyGraph, default=tk.NORMAL)
self.computebutton.grid(row=2, column=1)
self.computebutton = tk.Button(self.blpanel, text="v(t) vs. t", width=10, command=self.tvGraph, default=tk.NORMAL)
self.computebutton.grid(row=2, column=2)
self.computebutton = tk.Button(self.blpanel, text="z(t) vs. x(t)", width=10, command=self.xyGraph, default=tk.NORMAL)
self.computebutton.grid(row=3, column=0)
self.computebutton = tk.Button(self.blpanel, text="v(t) vs. x(t)", width=10, command=self.xvGraph, default=tk.NORMAL)
self.computebutton.grid(row=3, column=1)
self.computebutton = tk.Button(self.blpanel, text="v(t) vs. z(t)", width=10, command=self.yvGraph, default=tk.NORMAL)
self.computebutton.grid(row=3, column=2)
self.userlabel = tk.Label(self.blpanel, text="", fg="red")
self.userlabel.grid(row=4, column=0, columnspan=3)
self.csvbutton= tk.Button(self.blpanel, text="Save to CSV", command=self.saveCSV, default=tk.NORMAL)
self.csvbutton.grid(row=5, column=0)
self.pngbutton = tk.Button(self.blpanel, text="Save to PNG", command=self.savePNG, default=tk.NORMAL)
self.pngbutton.grid(row=5, column=1)
self.quitbutton = tk.Button(self.blpanel, text="Quit", command=self.bye, default=tk.NORMAL)
self.quitbutton.grid(row=5, column=2)
self.physicshandler.v0 = 0
self.physicshandler.theta = 0
self.physicshandler.b = 1
fig, axs = plt.subplots(1, 1, figsize=(7, 6), dpi=80)
axs.set_xlabel('Distance (m)')
axs.set_ylabel('Height (m)')
axs.set_xlim(0, 100)
axs.set_ylim(0, 100)
axs.set_title('Ballistics with constant drag (b) proportional to v')
canvas = FigureCanvasTkAgg(fig, master=self.rightpanel)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
self.addStatistics()
self.mostrecentfig : Figure = fig
def geography(self):
latI = 0.0
try:
latI = float(self.latIinput.get())
except:
self.userlabel['text'] = "Initial latitude format incorrect"
latF = 0.0
try:
latF = float(self.latFinput.get())
except:
self.userlabel['text'] = "Final latitude format incorrect"
lonI = 0.0
try:
lonI = float(self.lonIinput.get())
except:
self.userlabel['text'] = "Initial longitude format incorrect"
lonF = 0.0
try:
lonF = float(self.lonFinput.get())
except:
self.userlabel['text'] = "Final longitude format incorrect"
heightI = 0.0
try:
heightI = float(self.heightIinput.get())
except:
self.userlabel['text'] = "Initial latitude format incorrect"
heightF = 0.0
try:
heightF = float(self.heightFinput.get())
except:
self.userlabel['text'] = "Initial latitude format incorrect"
distance = np.sqrt(np.power((latF - latI), 2) + np.power((lonF - lonI), 2))
height = heightF - heightI
return (distance, height)
def compute(self):
self.userlabel['text'] = ""
try:
vel0 = float(self.velocityinput.get())
except:
self.userlabel['text'] = "Velocity format incorrect"
return
theta = np.deg2rad(float(self.angleinput.get()))
b = float(self.draginput.get())
self.physicshandler.v0 = vel0
self.physicshandler.theta = theta
self.physicshandler.b = b
distance, height = self.geography()
self.physicshandler.distance = distance
if self.barrierset.get():
self.physicshandler.height = height
self.physicshandler.barrier = True
else:
self.physicshandler.height = -1
self.physicshandler.barrier = False
self.physicshandler.compute()
self.xyGraph()
def txGraph(self):
for s in self.rightpanel.grid_slaves():
s.destroy()
figtx, axs = plt.subplots(1, 1, figsize=(7, 6), dpi=80)
selected = self.physicshandler.data[self.physicshandler.data['t'] <= self.physicshandler.totalT()]
axs.plot(selected['t'], selected['x'], '-', linewidth=2, color='b')
axs.set_xlabel('Time (s)')
axs.set_ylabel('Distance (m)')
axs.set_title('Ballistics with constant drag (b) proportional to v')
canvas = FigureCanvasTkAgg(figtx, master=self.rightpanel)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
self.addStatistics()
self.mostrecentfig = figtx
def tyGraph(self):
for s in self.rightpanel.grid_slaves():
s.destroy()
figty, axs = plt.subplots(1, 1, figsize=(7, 6), dpi=80)
selected = self.physicshandler.data[self.physicshandler.data['t'] <= self.physicshandler.totalT()]
axs.plot(selected['t'], selected['z'], '-', linewidth=2, color='b')
axs.set_xlabel('Time (s)')
axs.set_ylabel('Height (m)')
axs.set_title('Ballistics with constant drag (b) proportional to v')
canvas = FigureCanvasTkAgg(figty, master=self.rightpanel)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
self.addStatistics()
self.mostrecentfig = figty
def tvGraph(self):
for s in self.rightpanel.grid_slaves():
s.destroy()
figtv, axs = plt.subplots(1, 1, figsize=(7, 6), dpi=80)
selected = self.physicshandler.data[self.physicshandler.data['t'] <= self.physicshandler.totalT()]
axs.plot(selected['t'], selected['v'], '-', linewidth=2, color='b')
axs.set_xlabel('Time (s)')
axs.set_ylabel('Velocity (m/s)')
axs.set_title('Ballistics with constant drag (b) proportional to v')
canvas = FigureCanvasTkAgg(figtv, master=self.rightpanel)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
self.addStatistics()
self.mostrecentfig = figtv
def xyGraph(self):
for s in self.rightpanel.grid_slaves():
s.destroy()
distance, height = self.geography()
figxy, axs = plt.subplots(1, 1, figsize=(7, 6), dpi=80)
axs.plot(self.physicshandler.data['x'], self.physicshandler.data['z'], '-', linewidth=2, color='b')
axs.set_xlabel('Distance (m)')
axs.set_ylabel('Height (m)')
if self.barrierset.get():
maxax = np.max([self.physicshandler.totalR() + 10, self.physicshandler.maxH() + 10, distance + 20])
minay = np.min([0, self.physicshandler.height - 10])
else:
maxax = np.max([self.physicshandler.totalR() + 10, self.physicshandler.maxH() + 10])
minay = 0
axs.set_xlim(np.min([0, self.physicshandler.totalR()]), maxax)
axs.set_ylim(minay, maxax)
axs.set_title('Ballistics with constant drag (b) proportional to v')
if self.barrierset.get():
axs.axvline(x=distance, color='red', linestyle='--')
axs.plot([distance], [height], marker='P', color='green')
canvas = FigureCanvasTkAgg(figxy, master=self.rightpanel)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
self.addStatistics()
self.mostrecentfig = figxy
def xvGraph(self):
for s in self.rightpanel.grid_slaves():
s.destroy()
figxv, axs = plt.subplots(1, 1, figsize=(7, 6), dpi=80)
selected = self.physicshandler.data[self.physicshandler.data['x'] <= self.physicshandler.totalR()]
axs.plot(selected['x'], selected['v'], '-', linewidth=2, color='b')
axs.set_xlabel('Distance (m)')
axs.set_ylabel('Velocity (m/s)')
axs.set_title('Ballistics with constant drag (b) proportional to v')
canvas = FigureCanvasTkAgg(figxv, master=self.rightpanel)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
self.addStatistics()
self.mostrecentfig = figxv
def yvGraph(self):
for s in self.rightpanel.grid_slaves():
s.destroy()
figyv, axs = plt.subplots(1, 1, figsize=(7, 6), dpi=80)
selected = self.physicshandler.data[self.physicshandler.data['z'] >= self.physicshandler.height]
axs.plot(selected['z'], selected['v'], '-', linewidth=2, color='b')
axs.set_xlabel('Height (m)')
axs.set_ylabel('Velocity (m/s)')
axs.set_title('Ballistics with constant drag (b) proportional to v')
axs.invert_xaxis()
canvas = FigureCanvasTkAgg(figyv, master=self.rightpanel)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
self.addStatistics()
self.mostrecentfig = figyv
def addStatistics(self):
stats = tk.LabelFrame(self.rightpanel, text='Results')
stats.grid(row=1, column=0)
rangeLabel = tk.Label(stats, text=f'Range: {self.physicshandler.totalR():.1f} m')
rangeLabel.grid(row=0, column=0)
rangeLabel = tk.Label(stats, text=f'Max height: {self.physicshandler.maxH():.1f} m')
rangeLabel.grid(row=1, column=0)
mheightLabel = tk.Label(stats, text=f'Time to max height: {self.physicshandler.maxT():.1f} s')
mheightLabel.grid(row=2, column=0)
mheightLabel = tk.Label(stats, text=f'Time of flight: {self.physicshandler.totalT():.1f} s')
mheightLabel.grid(row=3, column=0)
mheightLabel = tk.Label(stats, text=f'Velocity of impact: {self.physicshandler.finalV():.1f} m/s')
mheightLabel.grid(row=4, column=0)
mheightLabel = tk.Label(stats, text=f'Angle of impact: {self.physicshandler.finalTheta():.1f} degrees')
mheightLabel.grid(row=5, column=0)
def saveCSV(self):
if self.physicshandler.data is None:
self.userlabel['text'] = "No computed data exists"
else:
fname = filedialog.asksaveasfilename(initialdir = ".", title = "Select file",filetypes = (("CSV files","*.csv"),("all files","*.*")))
self.physicshandler.save_csv(fname+".csv")
self.userlabel['text'] = "File saved"
def savePNG(self):
if self.physicshandler.data is None:
self.userlabel['text'] = "No computed data exists"
else:
fname = filedialog.asksaveasfilename(initialdir=".", title="Select file",
filetypes=(("PNG files", "*.png"), ("all files", "*.*")))
self.mostrecentfig.savefig(fname+".png")
self.userlabel['text'] = "File saved"
def bye(self):
self.quit()
self.destroy()
if __name__ == "__main__":
app = NumericalVGUI()
app.mainloop()
|
# Red Ciudadana de Estaciones Meteorologicas
#
# Copyright @ 2021
#
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
import numpy as np
import matplotlib
import tkinter as tk
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
plt.rcParams.update({'figure.max_open_warning': 0})
from matplotlib.figure import Figure
from PhysicsEngine import NumericalVPhysicsHandler
from tkinter import filedialog
class NumericalVGUI(tk.Frame):
def __init__(self, master=None):
self.physicshandler = NumericalVPhysicsHandler(0, 0, 0)
tk.Frame.__init__(self, master)
self.grid()
# Top level panel structure
self.panels = tk.Frame(self)
self.panels.pack(fill=tk.BOTH, expand=1)
# Left and right panels
self.leftpanel = tk.Frame(self.panels, relief=tk.GROOVE)
self.leftpanel.pack(side=tk.LEFT)
self.rightpanel = tk.Frame(self.panels)
self.rightpanel.pack(side=tk.RIGHT)
# Controls grid for upper left pannel
self.ulpanel = tk.LabelFrame(self.leftpanel, text='Parameters')
self.ulpanel.pack(side=tk.TOP)
# Control for angle
self.anglelable = tk.Label(self.ulpanel, text='Initial angle (degrees)')
self.anglelable.grid(row=0, column=0)
self.angleinput = tk.Scale(self.ulpanel, from_=0, to=90, resolution=1, length=170,orient=tk.HORIZONTAL)
self.angleinput.grid(row=0, column=1)
# Control for drag
self.draglable = tk.Label(self.ulpanel, text='Drag coefficient (s^-1)')
self.draglable.grid(row=1, column=0)
self.draginput = tk.Scale(self.ulpanel, from_=0.01, to=2, resolution=0.01, length=170, orient=tk.HORIZONTAL)
self.draginput.grid(row=1, column=1)
# Control for velocity
self.velocitylabel = tk.Label(self.ulpanel, text='Initial velocity (m/s)')
self.velocitylabel.grid(row=2, column=0)
self.velocityinput = tk.Entry(self.ulpanel, justify=tk.RIGHT, width=10)
self.velocityinput.grid(row=2, column=1)
self.velocityinput.insert(0, '125')
self.latIlabel = tk.Label(self.ulpanel, text='I. Lat (m)')
self.latIlabel.grid(row=3, column=0)
self.lonIlabel = tk.Label(self.ulpanel, text='I. Lon (m)')
self.lonIlabel.grid(row=3, column=1)
self.heightIlabel = tk.Label(self.ulpanel, text='I. Height (m)')
self.heightIlabel.grid(row=3, column=2)
self.latIinput = tk.Entry(self.ulpanel, justify=tk.RIGHT, width=10)
self.latIinput.grid(row=4, column=0)
self.lonIinput = tk.Entry(self.ulpanel, justify=tk.RIGHT, width=10)
self.lonIinput.grid(row=4, column=1)
self.heightIinput = tk.Entry(self.ulpanel, justify=tk.RIGHT, width=10)
self.heightIinput.grid(row=4, column=2)
self.latIinput.insert(0, '0')
self.lonIinput.insert(0, '0')
self.heightIinput.insert(0, '0')
self.pblanklabel = tk.Label(self.ulpanel, text='')
self.pblanklabel.grid(row=5, column=0, columnspan=2)
self.latFlabel = tk.Label(self.ulpanel, text='F. Lat (m)')
self.latFlabel.grid(row=6, column=0)
self.lonFlabel = tk.Label(self.ulpanel, text='F. Lon (m)')
self.lonFlabel.grid(row=6, column=1)
self.heightFlabel = tk.Label(self.ulpanel, text='F. Height (m)')
self.heightFlabel.grid(row=6, column=2)
self.latFinput = tk.Entry(self.ulpanel, justify=tk.RIGHT, width=10)
self.latFinput.grid(row=7, column=0)
self.lonFinput = tk.Entry(self.ulpanel, justify=tk.RIGHT, width=10)
self.lonFinput.grid(row=7, column=1)
self.heightFinput = tk.Entry(self.ulpanel, justify=tk.RIGHT, width=10)
self.heightFinput.grid(row=7, column=2)
self.latFinput.insert(0, '100')
self.lonFinput.insert(0, '100')
self.heightFinput.insert(0, '0')
self.barrierset = tk.BooleanVar()
self.barriercheck = tk.Checkbutton(self.ulpanel, justify=tk.RIGHT, variable=self.barrierset, onvalue=True,
offvalue=False, text='Show barrier')
self.barriercheck.grid(row=8, column=0)
# Controls grid for upper left pannel
self.blpanel = tk.Frame(self.leftpanel)
self.blpanel.pack(side=tk.BOTTOM)
# Buttons for various functions
# Buttons for various functions
self.blanklabel= tk.Label(self.blpanel, text="")
self.blanklabel.grid(row=0, column=0, columnspan=2)
self.computebutton = tk.Button(self.blpanel, text="Compute", width=20, command=self.compute, default=tk.NORMAL)
self.computebutton.grid(row=1, column=0, columnspan=3)
self.computebutton = tk.Button(self.blpanel, text="x(t) vs. t", width=10, command=self.txGraph, default=tk.NORMAL)
self.computebutton.grid(row=2, column=0)
self.computebutton = tk.Button(self.blpanel, text="z(t) vs. t", width=10, command=self.tyGraph, default=tk.NORMAL)
self.computebutton.grid(row=2, column=1)
self.computebutton = tk.Button(self.blpanel, text="v(t) vs. t", width=10, command=self.tvGraph, default=tk.NORMAL)
self.computebutton.grid(row=2, column=2)
self.computebutton = tk.Button(self.blpanel, text="z(t) vs. x(t)", width=10, command=self.xyGraph, default=tk.NORMAL)
self.computebutton.grid(row=3, column=0)
self.computebutton = tk.Button(self.blpanel, text="v(t) vs. x(t)", width=10, command=self.xvGraph, default=tk.NORMAL)
self.computebutton.grid(row=3, column=1)
self.computebutton = tk.Button(self.blpanel, text="v(t) vs. z(t)", width=10, command=self.yvGraph, default=tk.NORMAL)
self.computebutton.grid(row=3, column=2)
self.userlabel = tk.Label(self.blpanel, text="", fg="red")
self.userlabel.grid(row=4, column=0, columnspan=3)
self.csvbutton= tk.Button(self.blpanel, text="Save to CSV", command=self.saveCSV, default=tk.NORMAL)
self.csvbutton.grid(row=5, column=0)
self.pngbutton = tk.Button(self.blpanel, text="Save to PNG", command=self.savePNG, default=tk.NORMAL)
self.pngbutton.grid(row=5, column=1)
self.quitbutton = tk.Button(self.blpanel, text="Quit", command=self.bye, default=tk.NORMAL)
self.quitbutton.grid(row=5, column=2)
self.physicshandler.v0 = 0
self.physicshandler.theta = 0
self.physicshandler.b = 1
fig, axs = plt.subplots(1, 1, figsize=(7, 6), dpi=80)
axs.set_xlabel('Distance (m)')
axs.set_ylabel('Height (m)')
axs.set_xlim(0, 100)
axs.set_ylim(0, 100)
axs.set_title('Ballistics with constant drag (b) proportional to v')
canvas = FigureCanvasTkAgg(fig, master=self.rightpanel)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
self.addStatistics()
self.mostrecentfig : Figure = fig
def geography(self):
latI = 0.0
try:
latI = float(self.latIinput.get())
except:
self.userlabel['text'] = "Initial latitude format incorrect"
latF = 0.0
try:
latF = float(self.latFinput.get())
except:
self.userlabel['text'] = "Final latitude format incorrect"
lonI = 0.0
try:
lonI = float(self.lonIinput.get())
except:
self.userlabel['text'] = "Initial longitude format incorrect"
lonF = 0.0
try:
lonF = float(self.lonFinput.get())
except:
self.userlabel['text'] = "Final longitude format incorrect"
heightI = 0.0
try:
heightI = float(self.heightIinput.get())
except:
self.userlabel['text'] = "Initial latitude format incorrect"
heightF = 0.0
try:
heightF = float(self.heightFinput.get())
except:
self.userlabel['text'] = "Initial latitude format incorrect"
distance = np.sqrt(np.power((latF - latI), 2) + np.power((lonF - lonI), 2))
height = heightF - heightI
return (distance, height)
def compute(self):
self.userlabel['text'] = ""
try:
vel0 = float(self.velocityinput.get())
except:
self.userlabel['text'] = "Velocity format incorrect"
return
theta = np.deg2rad(float(self.angleinput.get()))
b = float(self.draginput.get())
self.physicshandler.v0 = vel0
self.physicshandler.theta = theta
self.physicshandler.b = b
distance, height = self.geography()
self.physicshandler.distance = distance
if self.barrierset.get():
self.physicshandler.height = height
self.physicshandler.barrier = True
else:
self.physicshandler.height = -1
self.physicshandler.barrier = False
self.physicshandler.compute()
self.xyGraph()
def txGraph(self):
for s in self.rightpanel.grid_slaves():
s.destroy()
figtx, axs = plt.subplots(1, 1, figsize=(7, 6), dpi=80)
selected = self.physicshandler.data[self.physicshandler.data['t'] <= self.physicshandler.totalT()]
axs.plot(selected['t'], selected['x'], '-', linewidth=2, color='b')
axs.set_xlabel('Time (s)')
axs.set_ylabel('Distance (m)')
axs.set_title('Ballistics with constant drag (b) proportional to v')
canvas = FigureCanvasTkAgg(figtx, master=self.rightpanel)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
self.addStatistics()
self.mostrecentfig = figtx
def tyGraph(self):
for s in self.rightpanel.grid_slaves():
s.destroy()
figty, axs = plt.subplots(1, 1, figsize=(7, 6), dpi=80)
selected = self.physicshandler.data[self.physicshandler.data['t'] <= self.physicshandler.totalT()]
axs.plot(selected['t'], selected['z'], '-', linewidth=2, color='b')
axs.set_xlabel('Time (s)')
axs.set_ylabel('Height (m)')
axs.set_title('Ballistics with constant drag (b) proportional to v')
canvas = FigureCanvasTkAgg(figty, master=self.rightpanel)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
self.addStatistics()
self.mostrecentfig = figty
def tvGraph(self):
for s in self.rightpanel.grid_slaves():
s.destroy()
figtv, axs = plt.subplots(1, 1, figsize=(7, 6), dpi=80)
selected = self.physicshandler.data[self.physicshandler.data['t'] <= self.physicshandler.totalT()]
axs.plot(selected['t'], selected['v'], '-', linewidth=2, color='b')
axs.set_xlabel('Time (s)')
axs.set_ylabel('Velocity (m/s)')
axs.set_title('Ballistics with constant drag (b) proportional to v')
canvas = FigureCanvasTkAgg(figtv, master=self.rightpanel)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
self.addStatistics()
self.mostrecentfig = figtv
def xyGraph(self):
for s in self.rightpanel.grid_slaves():
s.destroy()
distance, height = self.geography()
figxy, axs = plt.subplots(1, 1, figsize=(7, 6), dpi=80)
axs.plot(self.physicshandler.data['x'], self.physicshandler.data['z'], '-', linewidth=2, color='b')
axs.set_xlabel('Distance (m)')
axs.set_ylabel('Height (m)')
if self.barrierset.get():
maxax = np.max([self.physicshandler.totalR() + 10, self.physicshandler.maxH() + 10, distance + 20])
minay = np.min([0, self.physicshandler.height - 10])
else:
maxax = np.max([self.physicshandler.totalR() + 10, self.physicshandler.maxH() + 10])
minay = 0
axs.set_xlim(np.min([0, self.physicshandler.totalR()]), maxax)
axs.set_ylim(minay, maxax)
axs.set_title('Ballistics with constant drag (b) proportional to v')
if self.barrierset.get():
axs.axvline(x=distance, color='red', linestyle='--')
axs.plot([distance], [height], marker='P', color='green')
canvas = FigureCanvasTkAgg(figxy, master=self.rightpanel)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
self.addStatistics()
self.mostrecentfig = figxy
def xvGraph(self):
for s in self.rightpanel.grid_slaves():
s.destroy()
figxv, axs = plt.subplots(1, 1, figsize=(7, 6), dpi=80)
selected = self.physicshandler.data[self.physicshandler.data['x'] <= self.physicshandler.totalR()]
axs.plot(selected['x'], selected['v'], '-', linewidth=2, color='b')
axs.set_xlabel('Distance (m)')
axs.set_ylabel('Velocity (m/s)')
axs.set_title('Ballistics with constant drag (b) proportional to v')
canvas = FigureCanvasTkAgg(figxv, master=self.rightpanel)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
self.addStatistics()
self.mostrecentfig = figxv
def yvGraph(self):
for s in self.rightpanel.grid_slaves():
s.destroy()
figyv, axs = plt.subplots(1, 1, figsize=(7, 6), dpi=80)
selected = self.physicshandler.data[self.physicshandler.data['z'] >= self.physicshandler.height]
axs.plot(selected['z'], selected['v'], '-', linewidth=2, color='b')
axs.set_xlabel('Height (m)')
axs.set_ylabel('Velocity (m/s)')
axs.set_title('Ballistics with constant drag (b) proportional to v')
axs.invert_xaxis()
canvas = FigureCanvasTkAgg(figyv, master=self.rightpanel)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
self.addStatistics()
self.mostrecentfig = figyv
def addStatistics(self):
stats = tk.LabelFrame(self.rightpanel, text='Results')
stats.grid(row=1, column=0)
rangeLabel = tk.Label(stats, text=f'Range: {self.physicshandler.totalR():.1f} m')
rangeLabel.grid(row=0, column=0)
rangeLabel = tk.Label(stats, text=f'Max height: {self.physicshandler.maxH():.1f} m')
rangeLabel.grid(row=1, column=0)
mheightLabel = tk.Label(stats, text=f'Time to max height: {self.physicshandler.maxT():.1f} s')
mheightLabel.grid(row=2, column=0)
mheightLabel = tk.Label(stats, text=f'Time of flight: {self.physicshandler.totalT():.1f} s')
mheightLabel.grid(row=3, column=0)
mheightLabel = tk.Label(stats, text=f'Velocity of impact: {self.physicshandler.finalV():.1f} m/s')
mheightLabel.grid(row=4, column=0)
mheightLabel = tk.Label(stats, text=f'Angle of impact: {self.physicshandler.finalTheta():.1f} degrees')
mheightLabel.grid(row=5, column=0)
def saveCSV(self):
if self.physicshandler.data is None:
self.userlabel['text'] = "No computed data exists"
else:
fname = filedialog.asksaveasfilename(initialdir = ".", title = "Select file",filetypes = (("CSV files","*.csv"),("all files","*.*")))
self.physicshandler.save_csv(fname+".csv")
self.userlabel['text'] = "File saved"
def savePNG(self):
if self.physicshandler.data is None:
self.userlabel['text'] = "No computed data exists"
else:
fname = filedialog.asksaveasfilename(initialdir=".", title="Select file",
filetypes=(("PNG files", "*.png"), ("all files", "*.*")))
self.mostrecentfig.savefig(fname+".png")
self.userlabel['text'] = "File saved"
def bye(self):
self.quit()
self.destroy()
if __name__ == "__main__":
app = NumericalVGUI()
app.mainloop()
|
en
| 0.525514
|
# Red Ciudadana de Estaciones Meteorologicas # # Copyright @ 2021 # # Authors: <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # Top level panel structure # Left and right panels # Controls grid for upper left pannel # Control for angle # Control for drag # Control for velocity # Controls grid for upper left pannel # Buttons for various functions # Buttons for various functions
| 3.012111
| 3
|
packages/nodejs-lts.py
|
zpcc/mpkg-pkgs
| 1
|
6627501
|
<reponame>zpcc/mpkg-pkgs
import time
from mpkg.common import Soft
from mpkg.utils import Search
class Package(Soft):
ID = 'nodejs-lts'
def _prepare(self):
data = self.data
arch = {'32bit': 'https://nodejs.org/dist/v{ver}/node-v{ver}-x86.msi',
'64bit': 'https://nodejs.org/dist/v{ver}/node-v{ver}-x64.msi'}
url = 'https://nodejs.org/en/download/'
data.ver = Search(url, 'LTS Version: <strong>([\\d.]+)</strong>')
data.changelog = 'https://github.com/nodejs/node/tree/master/doc/changelogs'
date = Search(
f'https://nodejs.org/dist/v{data.ver}/', f'node-v{data.ver}-x64.msi</a>\\s+([\\w-]+)')
data.date = time.strftime('%Y-%m-%d', time.strptime(date, '%d-%b-%Y'))
data.arch = Search(links=arch, ver=data.ver)
data.sha256 = Search(
data.arch, sumurl=f'https://nodejs.org/dist/v{data.ver}/SHASUMS256.txt')
|
import time
from mpkg.common import Soft
from mpkg.utils import Search
class Package(Soft):
ID = 'nodejs-lts'
def _prepare(self):
data = self.data
arch = {'32bit': 'https://nodejs.org/dist/v{ver}/node-v{ver}-x86.msi',
'64bit': 'https://nodejs.org/dist/v{ver}/node-v{ver}-x64.msi'}
url = 'https://nodejs.org/en/download/'
data.ver = Search(url, 'LTS Version: <strong>([\\d.]+)</strong>')
data.changelog = 'https://github.com/nodejs/node/tree/master/doc/changelogs'
date = Search(
f'https://nodejs.org/dist/v{data.ver}/', f'node-v{data.ver}-x64.msi</a>\\s+([\\w-]+)')
data.date = time.strftime('%Y-%m-%d', time.strptime(date, '%d-%b-%Y'))
data.arch = Search(links=arch, ver=data.ver)
data.sha256 = Search(
data.arch, sumurl=f'https://nodejs.org/dist/v{data.ver}/SHASUMS256.txt')
|
none
| 1
| 2.389354
| 2
|
|
lnt/lnt/report/draft_salary_register/draft_salary_register.py
|
vhrspvl/lnt
| 0
|
6627502
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt
from frappe import _
def execute(filters=None):
if not filters: filters = {}
salary_slips = get_salary_slips(filters)
columns, earning_types, ded_types = get_columns(salary_slips)
ss_earning_map = get_ss_earning_map(salary_slips)
ss_ded_map = get_ss_ded_map(salary_slips)
data = []
for ss in salary_slips:
row = [ss.name, ss.employee, ss.employee_name, ss.branch, ss.department, ss.designation,
ss.company, ss.start_date, ss.end_date, ss.leave_withut_pay, ss.payment_days]
if not ss.branch == None:columns[3] = columns[3].replace('-1','120')
if not ss.department == None: columns[4] = columns[4].replace('-1','120')
if not ss.designation == None: columns[5] = columns[5].replace('-1','120')
if not ss.leave_withut_pay == None: columns[9] = columns[9].replace('-1','130')
for e in earning_types:
row.append(ss_earning_map.get(ss.name, {}).get(e))
row += [ss.gross_pay]
for d in ded_types:
row.append(ss_ded_map.get(ss.name, {}).get(d))
row += [ss.total_deduction, ss.net_pay]
data.append(row)
return columns, data
def get_columns(salary_slips):
"""
columns = [
_("Salary Slip ID") + ":Link/Salary Slip:150",_("Employee") + ":Link/Employee:120", _("Employee Name") + "::140", _("Branch") + ":Link/Branch:120",
_("Department") + ":Link/Department:120", _("Designation") + ":Link/Designation:120",
_("Company") + ":Link/Company:120", _("Start Date") + "::80", _("End Date") + "::80", _("Leave Without Pay") + ":Float:130",
_("Payment Days") + ":Float:120"
]
"""
columns = [
_("Salary Slip ID") + ":Link/Salary Slip:150",_("Employee") + ":Link/Employee:120", _("Employee Name") + "::140", _("Branch") + ":Link/Branch:-1",
_("Department") + ":Link/Department:-1", _("Designation") + ":Link/Designation:-1",
_("Company") + ":Link/Company:120", _("Start Date") + "::80", _("End Date") + "::80", _("Leave Without Pay") + ":Float:-1",
_("Payment Days") + ":Float:120"
]
salary_components = {_("Earning"): [], _("Deduction"): []}
for component in frappe.db.sql("""select distinct sd.salary_component, sc.type
from `tabSalary Detail` sd, `tabSalary Component` sc
where sc.name=sd.salary_component and sd.amount != 0 and sd.parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=1):
salary_components[_(component.type)].append(component.salary_component)
columns = columns + [(e + ":Currency:120") for e in salary_components[_("Earning")]] + \
[_("Gross Pay") + ":Currency:120"] + [(d + ":Currency:120") for d in salary_components[_("Deduction")]] + \
[_("Total Deduction") + ":Currency:120", _("Net Pay") + ":Currency:120"]
return columns, salary_components[_("Earning")], salary_components[_("Deduction")]
def get_salary_slips(filters):
filters.update({"from_date": filters.get("date_range")[0], "to_date":filters.get("date_range")[1]})
conditions, filters = get_conditions(filters)
salary_slips = frappe.db.sql("""select * from `tabSalary Slip` where docstatus = 0 %s
order by employee""" % conditions, filters, as_dict=1)
if not salary_slips:
frappe.throw(_("No salary slip found between {0} and {1}").format(
filters.get("from_date"), filters.get("to_date")))
return salary_slips
def get_conditions(filters):
conditions = ""
if filters.get("date_range"): conditions += " and start_date >= %(from_date)s"
if filters.get("date_range"): conditions += " and end_date <= %(to_date)s"
if filters.get("company"): conditions += " and company = %(company)s"
if filters.get("employee"): conditions += " and employee = %(employee)s"
return conditions, filters
def get_ss_earning_map(salary_slips):
ss_earnings = frappe.db.sql("""select parent, salary_component, amount
from `tabSalary Detail` where parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=1)
ss_earning_map = {}
for d in ss_earnings:
ss_earning_map.setdefault(d.parent, frappe._dict()).setdefault(d.salary_component, [])
ss_earning_map[d.parent][d.salary_component] = flt(d.amount)
return ss_earning_map
def get_ss_ded_map(salary_slips):
ss_deductions = frappe.db.sql("""select parent, salary_component, amount
from `tabSalary Detail` where parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=1)
ss_ded_map = {}
for d in ss_deductions:
ss_ded_map.setdefault(d.parent, frappe._dict()).setdefault(d.salary_component, [])
ss_ded_map[d.parent][d.salary_component] = flt(d.amount)
return ss_ded_map
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt
from frappe import _
def execute(filters=None):
if not filters: filters = {}
salary_slips = get_salary_slips(filters)
columns, earning_types, ded_types = get_columns(salary_slips)
ss_earning_map = get_ss_earning_map(salary_slips)
ss_ded_map = get_ss_ded_map(salary_slips)
data = []
for ss in salary_slips:
row = [ss.name, ss.employee, ss.employee_name, ss.branch, ss.department, ss.designation,
ss.company, ss.start_date, ss.end_date, ss.leave_withut_pay, ss.payment_days]
if not ss.branch == None:columns[3] = columns[3].replace('-1','120')
if not ss.department == None: columns[4] = columns[4].replace('-1','120')
if not ss.designation == None: columns[5] = columns[5].replace('-1','120')
if not ss.leave_withut_pay == None: columns[9] = columns[9].replace('-1','130')
for e in earning_types:
row.append(ss_earning_map.get(ss.name, {}).get(e))
row += [ss.gross_pay]
for d in ded_types:
row.append(ss_ded_map.get(ss.name, {}).get(d))
row += [ss.total_deduction, ss.net_pay]
data.append(row)
return columns, data
def get_columns(salary_slips):
"""
columns = [
_("Salary Slip ID") + ":Link/Salary Slip:150",_("Employee") + ":Link/Employee:120", _("Employee Name") + "::140", _("Branch") + ":Link/Branch:120",
_("Department") + ":Link/Department:120", _("Designation") + ":Link/Designation:120",
_("Company") + ":Link/Company:120", _("Start Date") + "::80", _("End Date") + "::80", _("Leave Without Pay") + ":Float:130",
_("Payment Days") + ":Float:120"
]
"""
columns = [
_("Salary Slip ID") + ":Link/Salary Slip:150",_("Employee") + ":Link/Employee:120", _("Employee Name") + "::140", _("Branch") + ":Link/Branch:-1",
_("Department") + ":Link/Department:-1", _("Designation") + ":Link/Designation:-1",
_("Company") + ":Link/Company:120", _("Start Date") + "::80", _("End Date") + "::80", _("Leave Without Pay") + ":Float:-1",
_("Payment Days") + ":Float:120"
]
salary_components = {_("Earning"): [], _("Deduction"): []}
for component in frappe.db.sql("""select distinct sd.salary_component, sc.type
from `tabSalary Detail` sd, `tabSalary Component` sc
where sc.name=sd.salary_component and sd.amount != 0 and sd.parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=1):
salary_components[_(component.type)].append(component.salary_component)
columns = columns + [(e + ":Currency:120") for e in salary_components[_("Earning")]] + \
[_("Gross Pay") + ":Currency:120"] + [(d + ":Currency:120") for d in salary_components[_("Deduction")]] + \
[_("Total Deduction") + ":Currency:120", _("Net Pay") + ":Currency:120"]
return columns, salary_components[_("Earning")], salary_components[_("Deduction")]
def get_salary_slips(filters):
filters.update({"from_date": filters.get("date_range")[0], "to_date":filters.get("date_range")[1]})
conditions, filters = get_conditions(filters)
salary_slips = frappe.db.sql("""select * from `tabSalary Slip` where docstatus = 0 %s
order by employee""" % conditions, filters, as_dict=1)
if not salary_slips:
frappe.throw(_("No salary slip found between {0} and {1}").format(
filters.get("from_date"), filters.get("to_date")))
return salary_slips
def get_conditions(filters):
conditions = ""
if filters.get("date_range"): conditions += " and start_date >= %(from_date)s"
if filters.get("date_range"): conditions += " and end_date <= %(to_date)s"
if filters.get("company"): conditions += " and company = %(company)s"
if filters.get("employee"): conditions += " and employee = %(employee)s"
return conditions, filters
def get_ss_earning_map(salary_slips):
ss_earnings = frappe.db.sql("""select parent, salary_component, amount
from `tabSalary Detail` where parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=1)
ss_earning_map = {}
for d in ss_earnings:
ss_earning_map.setdefault(d.parent, frappe._dict()).setdefault(d.salary_component, [])
ss_earning_map[d.parent][d.salary_component] = flt(d.amount)
return ss_earning_map
def get_ss_ded_map(salary_slips):
ss_deductions = frappe.db.sql("""select parent, salary_component, amount
from `tabSalary Detail` where parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=1)
ss_ded_map = {}
for d in ss_deductions:
ss_ded_map.setdefault(d.parent, frappe._dict()).setdefault(d.salary_component, [])
ss_ded_map[d.parent][d.salary_component] = flt(d.amount)
return ss_ded_map
|
en
| 0.597102
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt columns = [ _("Salary Slip ID") + ":Link/Salary Slip:150",_("Employee") + ":Link/Employee:120", _("Employee Name") + "::140", _("Branch") + ":Link/Branch:120", _("Department") + ":Link/Department:120", _("Designation") + ":Link/Designation:120", _("Company") + ":Link/Company:120", _("Start Date") + "::80", _("End Date") + "::80", _("Leave Without Pay") + ":Float:130", _("Payment Days") + ":Float:120" ] select distinct sd.salary_component, sc.type from `tabSalary Detail` sd, `tabSalary Component` sc where sc.name=sd.salary_component and sd.amount != 0 and sd.parent in (%s) select * from `tabSalary Slip` where docstatus = 0 %s order by employee select parent, salary_component, amount from `tabSalary Detail` where parent in (%s) select parent, salary_component, amount from `tabSalary Detail` where parent in (%s)
| 2.282491
| 2
|
Code/Bogoliubov_Kitaev.py
|
PatrickHuembeli/Adversarial-Domain-Adaptation-for-Identifying-Phase-Transitions
| 19
|
6627503
|
<gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
File to generate Input Data for Kitaev model.
Credits:
Hamiltonian from https://topocondmat.org/w1_topointro/1D.html
Bogoliubov according to
http://iopscience.iop.org/article/10.1088/0953-8984/25/47/475304
Author: <NAME>
"""
import sys
import numpy as np
from scipy import linalg as la
import h5py
# For simplicity we set t = delta
def hamiltonian(n, t, mu, delta):
diagonal = mu*np.ones(n)
diagonal = np.diag(diagonal, k=0)
hopping = t*np.ones(n-1)
hopping = np.diag(hopping, k=1) + np.diag(hopping, k=-1)
pairing = delta*np.ones(n-1)
matr = np.diag(-pairing, k=1) + np.diag(pairing, k=-1)
kitaev = np.bmat([[diagonal + hopping, np.transpose(matr)],
[matr, -(diagonal+hopping)]])
return kitaev
def gs(n, t, mu, delta):
# diagonalize the Hamiltonian and finds the ground state
mat = hamiltonian(n, t, mu, delta)
_, vec = la.eigh(mat)
return abs(vec)
nr_of_states = 1001
t = 1.0
mu = -2.5*t
delta = t
n = 64 # number of sites
start = -4.0
end = 4.0
# -----------------------------------------------------------------------
mu_list = np.linspace(start, end, nr_of_states)
start_index = np.where(mu_list >= -2.)[0][0]
end_index = np.where(mu_list >= 2.)[0][0]
labels = [[1, 0]]*(start_index) + [[0, 1]]*(end_index-start_index) + \
[[1, 0]]*(nr_of_states-end_index)
if len(labels) != len(mu_list):
sys.exit('Length of labels not equal length of states')
states = [gs(n, t, mu, delta) for mu in mu_list]
filename = 'Kitaev_20001_bigger.h5'
f = h5py.File(filename, 'w')
X_dset = f.create_dataset('my_data', (len(labels), 2*n, 2*n), dtype='f')
X_dset[:] = states
y_dset = f.create_dataset('my_labels', (len(labels), 2), dtype='i')
y_dset[:] = labels
f.close()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
File to generate Input Data for Kitaev model.
Credits:
Hamiltonian from https://topocondmat.org/w1_topointro/1D.html
Bogoliubov according to
http://iopscience.iop.org/article/10.1088/0953-8984/25/47/475304
Author: <NAME>
"""
import sys
import numpy as np
from scipy import linalg as la
import h5py
# For simplicity we set t = delta
def hamiltonian(n, t, mu, delta):
diagonal = mu*np.ones(n)
diagonal = np.diag(diagonal, k=0)
hopping = t*np.ones(n-1)
hopping = np.diag(hopping, k=1) + np.diag(hopping, k=-1)
pairing = delta*np.ones(n-1)
matr = np.diag(-pairing, k=1) + np.diag(pairing, k=-1)
kitaev = np.bmat([[diagonal + hopping, np.transpose(matr)],
[matr, -(diagonal+hopping)]])
return kitaev
def gs(n, t, mu, delta):
# diagonalize the Hamiltonian and finds the ground state
mat = hamiltonian(n, t, mu, delta)
_, vec = la.eigh(mat)
return abs(vec)
nr_of_states = 1001
t = 1.0
mu = -2.5*t
delta = t
n = 64 # number of sites
start = -4.0
end = 4.0
# -----------------------------------------------------------------------
mu_list = np.linspace(start, end, nr_of_states)
start_index = np.where(mu_list >= -2.)[0][0]
end_index = np.where(mu_list >= 2.)[0][0]
labels = [[1, 0]]*(start_index) + [[0, 1]]*(end_index-start_index) + \
[[1, 0]]*(nr_of_states-end_index)
if len(labels) != len(mu_list):
sys.exit('Length of labels not equal length of states')
states = [gs(n, t, mu, delta) for mu in mu_list]
filename = 'Kitaev_20001_bigger.h5'
f = h5py.File(filename, 'w')
X_dset = f.create_dataset('my_data', (len(labels), 2*n, 2*n), dtype='f')
X_dset[:] = states
y_dset = f.create_dataset('my_labels', (len(labels), 2), dtype='i')
y_dset[:] = labels
f.close()
|
en
| 0.590507
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*- File to generate Input Data for Kitaev model.
Credits:
Hamiltonian from https://topocondmat.org/w1_topointro/1D.html
Bogoliubov according to
http://iopscience.iop.org/article/10.1088/0953-8984/25/47/475304
Author: <NAME> # For simplicity we set t = delta # diagonalize the Hamiltonian and finds the ground state # number of sites # -----------------------------------------------------------------------
| 2.701599
| 3
|
tensorflow_federated/python/aggregators/mean.py
|
alessiomora/federated
| 1,918
|
6627504
|
<reponame>alessiomora/federated<filename>tensorflow_federated/python/aggregators/mean.py
# Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory for mean."""
import collections
from typing import Optional
import tensorflow as tf
from tensorflow_federated.python.aggregators import factory
from tensorflow_federated.python.aggregators import sum_factory
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import placements
from tensorflow_federated.python.core.impl.types import type_analysis
from tensorflow_federated.python.core.templates import aggregation_process
from tensorflow_federated.python.core.templates import measured_process
class MeanFactory(factory.WeightedAggregationFactory):
"""Aggregation factory for weighted mean.
The created `tff.templates.AggregationProcess` computes the weighted mean of
values placed at `CLIENTS`, and outputs the mean placed at `SERVER`.
The input arguments of the `next` attribute of the process returned by
`create` are `<state, value, weight>`, where `weight` is a scalar broadcasted
to the structure of `value`, and the weighted mean refers to the expression
`sum(value * weight) / sum(weight)`.
The implementation is parameterized by two inner aggregation factories
responsible for the summations above, with the following high-level steps.
- Multiplication of `value` and `weight` at `CLIENTS`.
- Delegation to inner `value_sum_factory` and `weight_sum_factory` to
realize the sum of weighted values and weights.
- Division of summed weighted values and summed weights at `SERVER`.
Note that the the division at `SERVER` can protect against division by 0, as
specified by `no_nan_division` constructor argument.
The `state` is the composed `state` of the aggregation processes created by
the two inner aggregation factories. The same holds for `measurements`.
"""
def __init__(
self,
value_sum_factory: Optional[factory.UnweightedAggregationFactory] = None,
weight_sum_factory: Optional[factory.UnweightedAggregationFactory] = None,
no_nan_division: bool = False):
"""Initializes `MeanFactory`.
Args:
value_sum_factory: An optional
`tff.aggregators.UnweightedAggregationFactory` responsible for summation
of weighted values. If not specified, `tff.aggregators.SumFactory` is
used.
weight_sum_factory: An optional
`tff.aggregators.UnweightedAggregationFactory` responsible for summation
of weights. If not specified, `tff.aggregators.SumFactory` is used.
no_nan_division: A bool. If True, the computed mean is 0 if sum of weights
is equal to 0.
Raises:
TypeError: If provided `value_sum_factory` or `weight_sum_factory` is not
an instance of `tff.aggregators.UnweightedAggregationFactory`.
"""
if value_sum_factory is None:
value_sum_factory = sum_factory.SumFactory()
py_typecheck.check_type(value_sum_factory,
factory.UnweightedAggregationFactory)
self._value_sum_factory = value_sum_factory
if weight_sum_factory is None:
weight_sum_factory = sum_factory.SumFactory()
py_typecheck.check_type(weight_sum_factory,
factory.UnweightedAggregationFactory)
self._weight_sum_factory = weight_sum_factory
py_typecheck.check_type(no_nan_division, bool)
self._no_nan_division = no_nan_division
def create(
self, value_type: factory.ValueType,
weight_type: factory.ValueType) -> aggregation_process.AggregationProcess:
_check_value_type(value_type)
py_typecheck.check_type(weight_type, factory.ValueType.__args__)
value_sum_process = self._value_sum_factory.create(value_type)
weight_sum_process = self._weight_sum_factory.create(weight_type)
@computations.federated_computation()
def init_fn():
state = collections.OrderedDict(
value_sum_process=value_sum_process.initialize(),
weight_sum_process=weight_sum_process.initialize())
return intrinsics.federated_zip(state)
@computations.federated_computation(
init_fn.type_signature.result,
computation_types.FederatedType(value_type, placements.CLIENTS),
computation_types.FederatedType(weight_type, placements.CLIENTS))
def next_fn(state, value, weight):
# Client computation.
weighted_value = intrinsics.federated_map(_mul, (value, weight))
# Inner aggregations.
value_output = value_sum_process.next(state['value_sum_process'],
weighted_value)
weight_output = weight_sum_process.next(state['weight_sum_process'],
weight)
# Server computation.
weighted_mean_value = intrinsics.federated_map(
_div_no_nan if self._no_nan_division else _div,
(value_output.result, weight_output.result))
# Output preparation.
state = collections.OrderedDict(
value_sum_process=value_output.state,
weight_sum_process=weight_output.state)
measurements = collections.OrderedDict(
mean_value=value_output.measurements,
mean_weight=weight_output.measurements)
return measured_process.MeasuredProcessOutput(
intrinsics.federated_zip(state), weighted_mean_value,
intrinsics.federated_zip(measurements))
return aggregation_process.AggregationProcess(init_fn, next_fn)
class UnweightedMeanFactory(factory.UnweightedAggregationFactory):
"""Aggregation factory for unweighted mean.
The created `tff.templates.AggregationProcess` computes the unweighted mean of
values placed at `CLIENTS`, and outputs the mean placed at `SERVER`.
The input arguments of the `next` attribute of the process returned by
`create` are `<state, value>`, and the unweighted mean refers to the
expression `sum(value * weight) / count(value)` where `count(value)` is the
cardinality of the `CLIENTS` placement.
The implementation is parameterized by an inner aggregation factory
responsible for the summation of values.
"""
def __init__(
self,
value_sum_factory: Optional[factory.UnweightedAggregationFactory] = None):
"""Initializes `UnweightedMeanFactory`.
Args:
value_sum_factory: An optional
`tff.aggregators.UnweightedAggregationFactory` responsible for summation
of values. If not specified, `tff.aggregators.SumFactory` is used.
Raises:
TypeError: If provided `value_sum_factory` is not an instance of
`tff.aggregators.UnweightedAggregationFactory`.
"""
if value_sum_factory is None:
value_sum_factory = sum_factory.SumFactory()
py_typecheck.check_type(value_sum_factory,
factory.UnweightedAggregationFactory)
self._value_sum_factory = value_sum_factory
def create(
self,
value_type: factory.ValueType) -> aggregation_process.AggregationProcess:
_check_value_type(value_type)
value_sum_process = self._value_sum_factory.create(value_type)
@computations.federated_computation()
def init_fn():
return value_sum_process.initialize()
@computations.federated_computation(init_fn.type_signature.result,
computation_types.FederatedType(
value_type, placements.CLIENTS))
def next_fn(state, value):
value_sum_output = value_sum_process.next(state, value)
count = intrinsics.federated_sum(
intrinsics.federated_value(1, placements.CLIENTS))
mean_value = intrinsics.federated_map(_div,
(value_sum_output.result, count))
state = value_sum_output.state
measurements = intrinsics.federated_zip(
collections.OrderedDict(mean_value=value_sum_output.measurements))
return measured_process.MeasuredProcessOutput(state, mean_value,
measurements)
return aggregation_process.AggregationProcess(init_fn, next_fn)
def _check_value_type(value_type):
py_typecheck.check_type(value_type, factory.ValueType.__args__)
if not type_analysis.is_structure_of_floats(value_type):
raise TypeError(f'All values in provided value_type must be of floating '
f'dtype. Provided value_type: {value_type}')
@computations.tf_computation()
def _mul(value, weight):
return tf.nest.map_structure(lambda x: x * tf.cast(weight, x.dtype), value)
@computations.tf_computation()
def _div(weighted_value_sum, weight_sum):
return tf.nest.map_structure(
lambda x: tf.math.divide(x, tf.cast(weight_sum, x.dtype)),
weighted_value_sum)
@computations.tf_computation()
def _div_no_nan(weighted_value_sum, weight_sum):
return tf.nest.map_structure(
lambda x: tf.math.divide_no_nan(x, tf.cast(weight_sum, x.dtype)),
weighted_value_sum)
|
# Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Factory for mean."""
import collections
from typing import Optional
import tensorflow as tf
from tensorflow_federated.python.aggregators import factory
from tensorflow_federated.python.aggregators import sum_factory
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import placements
from tensorflow_federated.python.core.impl.types import type_analysis
from tensorflow_federated.python.core.templates import aggregation_process
from tensorflow_federated.python.core.templates import measured_process
class MeanFactory(factory.WeightedAggregationFactory):
"""Aggregation factory for weighted mean.
The created `tff.templates.AggregationProcess` computes the weighted mean of
values placed at `CLIENTS`, and outputs the mean placed at `SERVER`.
The input arguments of the `next` attribute of the process returned by
`create` are `<state, value, weight>`, where `weight` is a scalar broadcasted
to the structure of `value`, and the weighted mean refers to the expression
`sum(value * weight) / sum(weight)`.
The implementation is parameterized by two inner aggregation factories
responsible for the summations above, with the following high-level steps.
- Multiplication of `value` and `weight` at `CLIENTS`.
- Delegation to inner `value_sum_factory` and `weight_sum_factory` to
realize the sum of weighted values and weights.
- Division of summed weighted values and summed weights at `SERVER`.
Note that the the division at `SERVER` can protect against division by 0, as
specified by `no_nan_division` constructor argument.
The `state` is the composed `state` of the aggregation processes created by
the two inner aggregation factories. The same holds for `measurements`.
"""
def __init__(
self,
value_sum_factory: Optional[factory.UnweightedAggregationFactory] = None,
weight_sum_factory: Optional[factory.UnweightedAggregationFactory] = None,
no_nan_division: bool = False):
"""Initializes `MeanFactory`.
Args:
value_sum_factory: An optional
`tff.aggregators.UnweightedAggregationFactory` responsible for summation
of weighted values. If not specified, `tff.aggregators.SumFactory` is
used.
weight_sum_factory: An optional
`tff.aggregators.UnweightedAggregationFactory` responsible for summation
of weights. If not specified, `tff.aggregators.SumFactory` is used.
no_nan_division: A bool. If True, the computed mean is 0 if sum of weights
is equal to 0.
Raises:
TypeError: If provided `value_sum_factory` or `weight_sum_factory` is not
an instance of `tff.aggregators.UnweightedAggregationFactory`.
"""
if value_sum_factory is None:
value_sum_factory = sum_factory.SumFactory()
py_typecheck.check_type(value_sum_factory,
factory.UnweightedAggregationFactory)
self._value_sum_factory = value_sum_factory
if weight_sum_factory is None:
weight_sum_factory = sum_factory.SumFactory()
py_typecheck.check_type(weight_sum_factory,
factory.UnweightedAggregationFactory)
self._weight_sum_factory = weight_sum_factory
py_typecheck.check_type(no_nan_division, bool)
self._no_nan_division = no_nan_division
def create(
self, value_type: factory.ValueType,
weight_type: factory.ValueType) -> aggregation_process.AggregationProcess:
_check_value_type(value_type)
py_typecheck.check_type(weight_type, factory.ValueType.__args__)
value_sum_process = self._value_sum_factory.create(value_type)
weight_sum_process = self._weight_sum_factory.create(weight_type)
@computations.federated_computation()
def init_fn():
state = collections.OrderedDict(
value_sum_process=value_sum_process.initialize(),
weight_sum_process=weight_sum_process.initialize())
return intrinsics.federated_zip(state)
@computations.federated_computation(
init_fn.type_signature.result,
computation_types.FederatedType(value_type, placements.CLIENTS),
computation_types.FederatedType(weight_type, placements.CLIENTS))
def next_fn(state, value, weight):
# Client computation.
weighted_value = intrinsics.federated_map(_mul, (value, weight))
# Inner aggregations.
value_output = value_sum_process.next(state['value_sum_process'],
weighted_value)
weight_output = weight_sum_process.next(state['weight_sum_process'],
weight)
# Server computation.
weighted_mean_value = intrinsics.federated_map(
_div_no_nan if self._no_nan_division else _div,
(value_output.result, weight_output.result))
# Output preparation.
state = collections.OrderedDict(
value_sum_process=value_output.state,
weight_sum_process=weight_output.state)
measurements = collections.OrderedDict(
mean_value=value_output.measurements,
mean_weight=weight_output.measurements)
return measured_process.MeasuredProcessOutput(
intrinsics.federated_zip(state), weighted_mean_value,
intrinsics.federated_zip(measurements))
return aggregation_process.AggregationProcess(init_fn, next_fn)
class UnweightedMeanFactory(factory.UnweightedAggregationFactory):
"""Aggregation factory for unweighted mean.
The created `tff.templates.AggregationProcess` computes the unweighted mean of
values placed at `CLIENTS`, and outputs the mean placed at `SERVER`.
The input arguments of the `next` attribute of the process returned by
`create` are `<state, value>`, and the unweighted mean refers to the
expression `sum(value * weight) / count(value)` where `count(value)` is the
cardinality of the `CLIENTS` placement.
The implementation is parameterized by an inner aggregation factory
responsible for the summation of values.
"""
def __init__(
self,
value_sum_factory: Optional[factory.UnweightedAggregationFactory] = None):
"""Initializes `UnweightedMeanFactory`.
Args:
value_sum_factory: An optional
`tff.aggregators.UnweightedAggregationFactory` responsible for summation
of values. If not specified, `tff.aggregators.SumFactory` is used.
Raises:
TypeError: If provided `value_sum_factory` is not an instance of
`tff.aggregators.UnweightedAggregationFactory`.
"""
if value_sum_factory is None:
value_sum_factory = sum_factory.SumFactory()
py_typecheck.check_type(value_sum_factory,
factory.UnweightedAggregationFactory)
self._value_sum_factory = value_sum_factory
def create(
self,
value_type: factory.ValueType) -> aggregation_process.AggregationProcess:
_check_value_type(value_type)
value_sum_process = self._value_sum_factory.create(value_type)
@computations.federated_computation()
def init_fn():
return value_sum_process.initialize()
@computations.federated_computation(init_fn.type_signature.result,
computation_types.FederatedType(
value_type, placements.CLIENTS))
def next_fn(state, value):
value_sum_output = value_sum_process.next(state, value)
count = intrinsics.federated_sum(
intrinsics.federated_value(1, placements.CLIENTS))
mean_value = intrinsics.federated_map(_div,
(value_sum_output.result, count))
state = value_sum_output.state
measurements = intrinsics.federated_zip(
collections.OrderedDict(mean_value=value_sum_output.measurements))
return measured_process.MeasuredProcessOutput(state, mean_value,
measurements)
return aggregation_process.AggregationProcess(init_fn, next_fn)
def _check_value_type(value_type):
py_typecheck.check_type(value_type, factory.ValueType.__args__)
if not type_analysis.is_structure_of_floats(value_type):
raise TypeError(f'All values in provided value_type must be of floating '
f'dtype. Provided value_type: {value_type}')
@computations.tf_computation()
def _mul(value, weight):
return tf.nest.map_structure(lambda x: x * tf.cast(weight, x.dtype), value)
@computations.tf_computation()
def _div(weighted_value_sum, weight_sum):
return tf.nest.map_structure(
lambda x: tf.math.divide(x, tf.cast(weight_sum, x.dtype)),
weighted_value_sum)
@computations.tf_computation()
def _div_no_nan(weighted_value_sum, weight_sum):
return tf.nest.map_structure(
lambda x: tf.math.divide_no_nan(x, tf.cast(weight_sum, x.dtype)),
weighted_value_sum)
|
en
| 0.80636
|
# Copyright 2020, The TensorFlow Federated Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Factory for mean. Aggregation factory for weighted mean. The created `tff.templates.AggregationProcess` computes the weighted mean of values placed at `CLIENTS`, and outputs the mean placed at `SERVER`. The input arguments of the `next` attribute of the process returned by `create` are `<state, value, weight>`, where `weight` is a scalar broadcasted to the structure of `value`, and the weighted mean refers to the expression `sum(value * weight) / sum(weight)`. The implementation is parameterized by two inner aggregation factories responsible for the summations above, with the following high-level steps. - Multiplication of `value` and `weight` at `CLIENTS`. - Delegation to inner `value_sum_factory` and `weight_sum_factory` to realize the sum of weighted values and weights. - Division of summed weighted values and summed weights at `SERVER`. Note that the the division at `SERVER` can protect against division by 0, as specified by `no_nan_division` constructor argument. The `state` is the composed `state` of the aggregation processes created by the two inner aggregation factories. The same holds for `measurements`. Initializes `MeanFactory`. Args: value_sum_factory: An optional `tff.aggregators.UnweightedAggregationFactory` responsible for summation of weighted values. If not specified, `tff.aggregators.SumFactory` is used. weight_sum_factory: An optional `tff.aggregators.UnweightedAggregationFactory` responsible for summation of weights. If not specified, `tff.aggregators.SumFactory` is used. no_nan_division: A bool. If True, the computed mean is 0 if sum of weights is equal to 0. Raises: TypeError: If provided `value_sum_factory` or `weight_sum_factory` is not an instance of `tff.aggregators.UnweightedAggregationFactory`. # Client computation. # Inner aggregations. # Server computation. # Output preparation. Aggregation factory for unweighted mean. The created `tff.templates.AggregationProcess` computes the unweighted mean of values placed at `CLIENTS`, and outputs the mean placed at `SERVER`. The input arguments of the `next` attribute of the process returned by `create` are `<state, value>`, and the unweighted mean refers to the expression `sum(value * weight) / count(value)` where `count(value)` is the cardinality of the `CLIENTS` placement. The implementation is parameterized by an inner aggregation factory responsible for the summation of values. Initializes `UnweightedMeanFactory`. Args: value_sum_factory: An optional `tff.aggregators.UnweightedAggregationFactory` responsible for summation of values. If not specified, `tff.aggregators.SumFactory` is used. Raises: TypeError: If provided `value_sum_factory` is not an instance of `tff.aggregators.UnweightedAggregationFactory`.
| 2.196421
| 2
|
myCode.py
|
shyshin/Sushi-Game-miniclip-bot
| 0
|
6627505
|
<reponame>shyshin/Sushi-Game-miniclip-bot
import win32con,win32api
import time as time
from PIL import ImageGrab as Image
from PIL import ImageOps
import numpy as np
import os
foodavail={
'shrimp':5,
'rice':10,
'nori':10,
'fish':10,
'salmon':5,
'unagi':5
}
sushitype={
'gun':1770,
'cal':2100,
'oni':1843
}
class go():
seat1=5514
seat2=4792
seat3=9335
seat4=9254
seat5=4948
seat6=7038
def __init__(self):
self.x_pad=189
self.y_pad=197
#----------------------------------
self.fish=(86,430)
self.rice=(97,373)
self.nori=(25,430)
self.salmon=(30,491)
self.unagi=(80,499)
self.shrimp=(38,389)
#----------------------------------
self.p1=(85,254)
self.p2=(180,260)
self.p3=(273,252)
self.p4=(378,253)
self.p5=(493,256)
self.p6=(569,256)
#----------------------------------
self.free=(503,347)
self.top=(512,315)
self.orice=(536,338)
self.ofish=(561,320)
self.onori=(474,321)
self.osalmon=(496,372)
self.ounagi=(581,278)
self.oshrimp=(489,289)
self.slate=(189,422)
self.phone=(596,397)
self.exit=(595,386)
def left_down(self):
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)
time.sleep(.1)
print 'left down'
def left_up(self):
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)
time.sleep(.1)
print 'left release'
def leftClick(self):
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)
time.sleep(.1)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)
print "Click."
def mousePos(self,cord):
c1=cord[0]
c2=cord[1]
win32api.SetCursorPos((self.x_pad+c1,self.y_pad+c2))
def get_cords(self):
x,y= win32api.GetCursorPos()
x=x-self.x_pad
y=y-self.y_pad
return x,y
def screengrab(self):
box=(self.x_pad,self.y_pad,self.x_pad+638,self.y_pad+480)
im= Image.grab(box)
return im
def start_game(self):
self.mousePos((324,257))
self.leftClick()
time.sleep(.1)
self.mousePos((311,424))
self.leftClick()
time.sleep(.1)
self.mousePos((564,492))
self.leftClick()
time.sleep(.1)
self.mousePos((339,426))
self.leftClick()
time.sleep(.1)
def order(self):
self.mousePos(self.phone)
self.leftClick()
time.sleep(0.1)
self.mousePos(self.orice)
self.leftClick()
time.sleep(.1)
self.leftClick()
self.mousePos(self.free)
self.leftClick()
a=time.time()
return a
def clear_tables(self):
self.mousePos(self.p1)
self.leftClick()
self.mousePos(self.p2)
self.leftClick()
self.mousePos(self.p3)
self.leftClick()
self.mousePos(self.p4)
self.leftClick()
self.mousePos(self.p5)
self.leftClick()
self.mousePos(self.p6)
self.leftClick()
time.sleep(1.5)
def foldslate(self):
self.mousePos(self.slate)
self.leftClick()
time.sleep(.1)
def make_food(self,food):
if food=="oni":
print 'Onigiri'
foodavail['rice']-= 2
foodavail['nori']-= 1
self.mousePos(self.rice)
self.leftClick()
time.sleep(.05)
self.mousePos(self.nori)
self.leftClick()
time.sleep(.05)
self.mousePos(self.rice)
self.leftClick()
time.sleep(.1)
self.foldslate()
elif food=='cal':
print 'california roll'
foodavail['rice']-= 1
foodavail['nori']-= 1
foodavail['fish']-= 1
self.mousePos(self.rice)
self.leftClick()
time.sleep(.05)
self.mousePos(self.nori)
self.leftClick()
time.sleep(.05)
self.mousePos(self.fish)
self.leftClick()
time.sleep(.1)
self.foldslate()
elif food=='gun':
print 'gunkan roll'
foodavail['rice']-= 1
foodavail['nori']-= 1
foodavail['fish']-= 2
self.mousePos(self.rice)
self.leftClick()
time.sleep(.05)
self.mousePos(self.nori)
self.leftClick()
time.sleep(.05)
self.mousePos(self.fish)
self.leftClick()
time.sleep(.05)
self.mousePos(self.fish)
self.leftClick()
time.sleep(.1)
self.foldslate()
def buy_food(self,food):
self.mousePos(self.phone)
time.sleep(.1)
self.leftClick()
if food == "rice":
self.mousePos(self.orice)
time.sleep(0.1)
self.leftClick()
s= self.screengrab()
if s.getpixel(self.orice) !=(127,127,127):
print "rice is avail"
self.mousePos(self.orice)
self.leftClick()
time.sleep(.1)
self.mousePos(self.free)
self.leftClick()
self.leftClick()
foodavail['rice']+= 10
time.sleep(6.55)
else:
print 'rice is not avail'
self.mousePos(self.exit)
self.leftClick()
time.sleep(1)
self.buy_food(food)
else:
self.mousePos(self.top)
self.leftClick()
time.sleep(.1)
s=self.screengrab()
if food == "nori":
print "nori is avail"
if s.getpixel(self.onori) == (218,246,255):
self.mousePos(self.onori)
self.leftClick()
time.sleep(.1)
self.mousePos(self.free)
self.leftClick()
foodavail['nori']+= 10
time.sleep(6.55)
else:
print 'nori is not avail'
self.mousePos(self.exit)
self.leftClick()
time.sleep(1)
self.buy_food(food)
elif food == "fish":
if s.getpixel(self.ofish)==(218,246,255):
print "fish is avail"
self.mousePos(self.ofish)
self.leftClick()
time.sleep(.1)
self.mousePos(self.free)
self.leftClick()
foodavail['fish']+= 10
time.sleep(6.55)
else:
print "fish is not avail"
self.mousePos(self.exit)
self.leftClick()
time.sleep(1)
self.buy_food(food)
elif food == "salmon":
if s.getpixel(self.osalmon)==(218,246,255):
print "salmon is avail"
self.mousePos(self.osalmon)
self.leftClick()
time.sleep(.1)
self.mousePos(self.free)
self.leftClick()
foodavail['salmon']+= 5
time.sleep(6.55)
else:
print "salmon is not avail"
self.mousePos(self.exit)
self.leftClick()
time.sleep(1)
self.buy_food(food)
elif food == "unagi":
if s.getpixel(self.ounagi)==(189,98,16):
print "unagi is avail"
self.mousePos(self.ounagi)
self.leftClick()
time.sleep(.1)
self.mousePos(self.free)
self.leftClick()
foodavail['unagi']+= 5
time.sleep(6.55)
else:
print "unagi is not avail"
self.mousePos(self.exit)
self.leftClick()
time.sleep(1)
self.buy_food(food)
elif food == "shrimp":
if s.getpixel(self.shrimp)==(255,255,255):
print "shrimp is avail"
self.mousePos(self.oshrimp)
self.leftClick()
time.sleep(.1)
self.mousePos(self.free)
self.leftClick()
foodavail['rice']+= 10
time.sleep(6.55)
else:
print "shrimp is not avail"
self.mousePos(self.exit)
self.leftClick()
time.sleep(1)
self.buy_food(food)
def check_food(self):
for i,j in foodavail.items():
if i == 'fish' or i== 'rice' or i== 'nori':
if j <=3:
print i + 'is low and needs to be replenished'
self.buy_food(i)
def grab(self):
box=(self.x_pad+1,self.y_pad+1,self.x_pad+638,self.y_pad+480)
im= ImageOps.grayscale(Image.grab(box))
a= np.array(im.getcolors())
a= a.sum()
im.save(os.getcwd() + '\\seat_one_'+str(int(time.time())))
print a
return a
def seat_one(self):
box=(self.x_pad+24,self.y_pad+112,self.x_pad+24+63,self.y_pad+122)
im = ImageOps.grayscale(Image.grab(box))
a= np.array(im.getcolors())
a= a.sum()
# im.save(os.getcwd() + '\\seat_one_'+str(int(time.time()))+'.png','PNG')
print "seat one "+ str(a)
return a
def seat_two(self):
box=(self.x_pad+125,self.y_pad+112,self.x_pad+125+63,self.y_pad+122)
im= ImageOps.grayscale(Image.grab(box))
a= np.array(im.getcolors())
a= a.sum()
# im.save(os.getcwd() + '\\seat_two_'+str(int(time.time()))+'.png','PNG')
print "seat two "+ str(a)
return a
def seat_third(self):
box= (self.x_pad+226,self.y_pad+112,self.x_pad+226+63,self.y_pad+122)
im = ImageOps.grayscale(Image.grab(box))
a= np.array(im.getcolors())
a= a.sum()
# im.save(os.getcwd() + '\\seat_third_'+str(int(time.time()))+'.png','PNG')
print "seat third "+ str(a)
return a
def seat_four(self):
box= (self.x_pad+327,self.y_pad+112,self.x_pad+327+63,self.y_pad+122)
im = ImageOps.grayscale(Image.grab(box))
a= np.array(im.getcolors())
a= a.sum()
#im.save(os.getcwd() + '\\seat_four_'+str(int(time.time()))+'.png','PNG')
print "seat four "+ str(a)
return a
def seat_five(self):
box= (self.x_pad+428,self.y_pad+112,self.x_pad+428+63,self.y_pad+122)
im = ImageOps.grayscale(Image.grab(box))
a= np.array(im.getcolors())
a= a.sum()
#im.save(os.getcwd() + '\\seat_five_'+str(int(time.time()))+'.png','PNG')
print "seat five "+ str(a)
return a
def seat_six(self):
box= (self.x_pad+529,self.y_pad+112,self.x_pad+529+63,self.y_pad+122)
im = ImageOps.grayscale(Image.grab(box))
a= np.array(im.getcolors())
a= a.sum()
#im.save(os.getcwd() + '\\seat_six_'+str(int(time.time()))+'.png','PNG')
print "seat six "+ str(a)
return a
def get_seats(self):
self.seat_one()
self.seat_two()
self.seat_third()
self.seat_four()
self.seat_five()
self.seat_six()
def check_bubs(self):
self.check_food()
s1= self.seat_one()
if s1 != self.seat1:
c=0
for i,j in sushitype.items():
if j == s1:
c=1
k= i
if c == 1:
print 'table 1 is occupied and needs %s' %k
self.make_food(k)
else:
print 'sushi not found\n sushitype= %i' %s1
else:
print "table 1 is occupied"
self.clear_tables()
self.check_food()
s2= self.seat_two()
if s2 != self.seat1:
c=0
for i,j in sushitype.items():
if j == s2:
c=1
k= i
if c == 1:
print 'table 2 is occupied and needs %s' %k
self.make_food(k)
else:
print 'sushi not found\n sushitype= %i' %s2
else:
print "table 2 is occupied"
self.check_food()
s3=self.seat_third()
if s3 != self.seat1:
c=0
for i,j in sushitype.items():
if j == s3:
c=1
k= i
if c == 1:
print 'table 3 is occupied and needs %s' %k
self.make_food(k)
else:
print 'sushi not found\n sushitype= %i' %s3
else:
print "table 3 is occupied"
self.check_food()
s4=self.seat_four()
if s4 != self.seat1:
c=0
for i,j in sushitype.items():
if j == s4:
c=1
k= i
if c == 1:
print 'table 4 is occupied and needs %s' %k
self.make_food(k)
else:
print 'sushi not found\n sushitype= %i' %s4
else:
print "table 4 is occupied"
self.check_food()
s5=self.seat_five()
if s5 != self.seat1:
c=0
for i,j in sushitype.items():
if j == s5:
c=1
k= i
if c == 1:
print 'table 5 is occupied and needs %s' %k
self.make_food(k)
else:
print 'sushi not found\n sushitype= %i' %s5
else:
print "table 5 is occupied"
self.check_food()
s6=self.seat_six()
if s6 != self.seat1:
c=0
for i,j in sushitype.items():
if j == s6:
c=1
k= i
if c == 1:
print 'table 6 is occupied and needs %s' %k
self.make_food(k)
else:
print 'sushi not found\n sushitype= %i' %s6
else:
print "table 6 is occupied"
self.clear_tables()
def small_check(self):
self.check_food()
s1= self.seat_one()
if s1 != self.seat1:
c=0
for i,j in sushitype.items():
if j == s1:
c=1
k= i
if c == 1:
print 'table 1 is occupied and needs %s' %k
self.make_food(k)
else:
print 'sushi not found\n sushitype= %i' %s1
else:
print "table 1 is occupied"
def main():
a= go()
a.start_game()
t= time.time()
b= time.time()
while b-t<=188.65:
a.check_bubs()
b=time.time()
if __name__ == "__main__":
main()
|
import win32con,win32api
import time as time
from PIL import ImageGrab as Image
from PIL import ImageOps
import numpy as np
import os
foodavail={
'shrimp':5,
'rice':10,
'nori':10,
'fish':10,
'salmon':5,
'unagi':5
}
sushitype={
'gun':1770,
'cal':2100,
'oni':1843
}
class go():
seat1=5514
seat2=4792
seat3=9335
seat4=9254
seat5=4948
seat6=7038
def __init__(self):
self.x_pad=189
self.y_pad=197
#----------------------------------
self.fish=(86,430)
self.rice=(97,373)
self.nori=(25,430)
self.salmon=(30,491)
self.unagi=(80,499)
self.shrimp=(38,389)
#----------------------------------
self.p1=(85,254)
self.p2=(180,260)
self.p3=(273,252)
self.p4=(378,253)
self.p5=(493,256)
self.p6=(569,256)
#----------------------------------
self.free=(503,347)
self.top=(512,315)
self.orice=(536,338)
self.ofish=(561,320)
self.onori=(474,321)
self.osalmon=(496,372)
self.ounagi=(581,278)
self.oshrimp=(489,289)
self.slate=(189,422)
self.phone=(596,397)
self.exit=(595,386)
def left_down(self):
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)
time.sleep(.1)
print 'left down'
def left_up(self):
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)
time.sleep(.1)
print 'left release'
def leftClick(self):
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)
time.sleep(.1)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)
print "Click."
def mousePos(self,cord):
c1=cord[0]
c2=cord[1]
win32api.SetCursorPos((self.x_pad+c1,self.y_pad+c2))
def get_cords(self):
x,y= win32api.GetCursorPos()
x=x-self.x_pad
y=y-self.y_pad
return x,y
def screengrab(self):
box=(self.x_pad,self.y_pad,self.x_pad+638,self.y_pad+480)
im= Image.grab(box)
return im
def start_game(self):
self.mousePos((324,257))
self.leftClick()
time.sleep(.1)
self.mousePos((311,424))
self.leftClick()
time.sleep(.1)
self.mousePos((564,492))
self.leftClick()
time.sleep(.1)
self.mousePos((339,426))
self.leftClick()
time.sleep(.1)
def order(self):
self.mousePos(self.phone)
self.leftClick()
time.sleep(0.1)
self.mousePos(self.orice)
self.leftClick()
time.sleep(.1)
self.leftClick()
self.mousePos(self.free)
self.leftClick()
a=time.time()
return a
def clear_tables(self):
self.mousePos(self.p1)
self.leftClick()
self.mousePos(self.p2)
self.leftClick()
self.mousePos(self.p3)
self.leftClick()
self.mousePos(self.p4)
self.leftClick()
self.mousePos(self.p5)
self.leftClick()
self.mousePos(self.p6)
self.leftClick()
time.sleep(1.5)
def foldslate(self):
self.mousePos(self.slate)
self.leftClick()
time.sleep(.1)
def make_food(self,food):
if food=="oni":
print 'Onigiri'
foodavail['rice']-= 2
foodavail['nori']-= 1
self.mousePos(self.rice)
self.leftClick()
time.sleep(.05)
self.mousePos(self.nori)
self.leftClick()
time.sleep(.05)
self.mousePos(self.rice)
self.leftClick()
time.sleep(.1)
self.foldslate()
elif food=='cal':
print 'california roll'
foodavail['rice']-= 1
foodavail['nori']-= 1
foodavail['fish']-= 1
self.mousePos(self.rice)
self.leftClick()
time.sleep(.05)
self.mousePos(self.nori)
self.leftClick()
time.sleep(.05)
self.mousePos(self.fish)
self.leftClick()
time.sleep(.1)
self.foldslate()
elif food=='gun':
print 'gunkan roll'
foodavail['rice']-= 1
foodavail['nori']-= 1
foodavail['fish']-= 2
self.mousePos(self.rice)
self.leftClick()
time.sleep(.05)
self.mousePos(self.nori)
self.leftClick()
time.sleep(.05)
self.mousePos(self.fish)
self.leftClick()
time.sleep(.05)
self.mousePos(self.fish)
self.leftClick()
time.sleep(.1)
self.foldslate()
def buy_food(self,food):
self.mousePos(self.phone)
time.sleep(.1)
self.leftClick()
if food == "rice":
self.mousePos(self.orice)
time.sleep(0.1)
self.leftClick()
s= self.screengrab()
if s.getpixel(self.orice) !=(127,127,127):
print "rice is avail"
self.mousePos(self.orice)
self.leftClick()
time.sleep(.1)
self.mousePos(self.free)
self.leftClick()
self.leftClick()
foodavail['rice']+= 10
time.sleep(6.55)
else:
print 'rice is not avail'
self.mousePos(self.exit)
self.leftClick()
time.sleep(1)
self.buy_food(food)
else:
self.mousePos(self.top)
self.leftClick()
time.sleep(.1)
s=self.screengrab()
if food == "nori":
print "nori is avail"
if s.getpixel(self.onori) == (218,246,255):
self.mousePos(self.onori)
self.leftClick()
time.sleep(.1)
self.mousePos(self.free)
self.leftClick()
foodavail['nori']+= 10
time.sleep(6.55)
else:
print 'nori is not avail'
self.mousePos(self.exit)
self.leftClick()
time.sleep(1)
self.buy_food(food)
elif food == "fish":
if s.getpixel(self.ofish)==(218,246,255):
print "fish is avail"
self.mousePos(self.ofish)
self.leftClick()
time.sleep(.1)
self.mousePos(self.free)
self.leftClick()
foodavail['fish']+= 10
time.sleep(6.55)
else:
print "fish is not avail"
self.mousePos(self.exit)
self.leftClick()
time.sleep(1)
self.buy_food(food)
elif food == "salmon":
if s.getpixel(self.osalmon)==(218,246,255):
print "salmon is avail"
self.mousePos(self.osalmon)
self.leftClick()
time.sleep(.1)
self.mousePos(self.free)
self.leftClick()
foodavail['salmon']+= 5
time.sleep(6.55)
else:
print "salmon is not avail"
self.mousePos(self.exit)
self.leftClick()
time.sleep(1)
self.buy_food(food)
elif food == "unagi":
if s.getpixel(self.ounagi)==(189,98,16):
print "unagi is avail"
self.mousePos(self.ounagi)
self.leftClick()
time.sleep(.1)
self.mousePos(self.free)
self.leftClick()
foodavail['unagi']+= 5
time.sleep(6.55)
else:
print "unagi is not avail"
self.mousePos(self.exit)
self.leftClick()
time.sleep(1)
self.buy_food(food)
elif food == "shrimp":
if s.getpixel(self.shrimp)==(255,255,255):
print "shrimp is avail"
self.mousePos(self.oshrimp)
self.leftClick()
time.sleep(.1)
self.mousePos(self.free)
self.leftClick()
foodavail['rice']+= 10
time.sleep(6.55)
else:
print "shrimp is not avail"
self.mousePos(self.exit)
self.leftClick()
time.sleep(1)
self.buy_food(food)
def check_food(self):
for i,j in foodavail.items():
if i == 'fish' or i== 'rice' or i== 'nori':
if j <=3:
print i + 'is low and needs to be replenished'
self.buy_food(i)
def grab(self):
box=(self.x_pad+1,self.y_pad+1,self.x_pad+638,self.y_pad+480)
im= ImageOps.grayscale(Image.grab(box))
a= np.array(im.getcolors())
a= a.sum()
im.save(os.getcwd() + '\\seat_one_'+str(int(time.time())))
print a
return a
def seat_one(self):
box=(self.x_pad+24,self.y_pad+112,self.x_pad+24+63,self.y_pad+122)
im = ImageOps.grayscale(Image.grab(box))
a= np.array(im.getcolors())
a= a.sum()
# im.save(os.getcwd() + '\\seat_one_'+str(int(time.time()))+'.png','PNG')
print "seat one "+ str(a)
return a
def seat_two(self):
box=(self.x_pad+125,self.y_pad+112,self.x_pad+125+63,self.y_pad+122)
im= ImageOps.grayscale(Image.grab(box))
a= np.array(im.getcolors())
a= a.sum()
# im.save(os.getcwd() + '\\seat_two_'+str(int(time.time()))+'.png','PNG')
print "seat two "+ str(a)
return a
def seat_third(self):
box= (self.x_pad+226,self.y_pad+112,self.x_pad+226+63,self.y_pad+122)
im = ImageOps.grayscale(Image.grab(box))
a= np.array(im.getcolors())
a= a.sum()
# im.save(os.getcwd() + '\\seat_third_'+str(int(time.time()))+'.png','PNG')
print "seat third "+ str(a)
return a
def seat_four(self):
box= (self.x_pad+327,self.y_pad+112,self.x_pad+327+63,self.y_pad+122)
im = ImageOps.grayscale(Image.grab(box))
a= np.array(im.getcolors())
a= a.sum()
#im.save(os.getcwd() + '\\seat_four_'+str(int(time.time()))+'.png','PNG')
print "seat four "+ str(a)
return a
def seat_five(self):
box= (self.x_pad+428,self.y_pad+112,self.x_pad+428+63,self.y_pad+122)
im = ImageOps.grayscale(Image.grab(box))
a= np.array(im.getcolors())
a= a.sum()
#im.save(os.getcwd() + '\\seat_five_'+str(int(time.time()))+'.png','PNG')
print "seat five "+ str(a)
return a
def seat_six(self):
box= (self.x_pad+529,self.y_pad+112,self.x_pad+529+63,self.y_pad+122)
im = ImageOps.grayscale(Image.grab(box))
a= np.array(im.getcolors())
a= a.sum()
#im.save(os.getcwd() + '\\seat_six_'+str(int(time.time()))+'.png','PNG')
print "seat six "+ str(a)
return a
def get_seats(self):
self.seat_one()
self.seat_two()
self.seat_third()
self.seat_four()
self.seat_five()
self.seat_six()
def check_bubs(self):
self.check_food()
s1= self.seat_one()
if s1 != self.seat1:
c=0
for i,j in sushitype.items():
if j == s1:
c=1
k= i
if c == 1:
print 'table 1 is occupied and needs %s' %k
self.make_food(k)
else:
print 'sushi not found\n sushitype= %i' %s1
else:
print "table 1 is occupied"
self.clear_tables()
self.check_food()
s2= self.seat_two()
if s2 != self.seat1:
c=0
for i,j in sushitype.items():
if j == s2:
c=1
k= i
if c == 1:
print 'table 2 is occupied and needs %s' %k
self.make_food(k)
else:
print 'sushi not found\n sushitype= %i' %s2
else:
print "table 2 is occupied"
self.check_food()
s3=self.seat_third()
if s3 != self.seat1:
c=0
for i,j in sushitype.items():
if j == s3:
c=1
k= i
if c == 1:
print 'table 3 is occupied and needs %s' %k
self.make_food(k)
else:
print 'sushi not found\n sushitype= %i' %s3
else:
print "table 3 is occupied"
self.check_food()
s4=self.seat_four()
if s4 != self.seat1:
c=0
for i,j in sushitype.items():
if j == s4:
c=1
k= i
if c == 1:
print 'table 4 is occupied and needs %s' %k
self.make_food(k)
else:
print 'sushi not found\n sushitype= %i' %s4
else:
print "table 4 is occupied"
self.check_food()
s5=self.seat_five()
if s5 != self.seat1:
c=0
for i,j in sushitype.items():
if j == s5:
c=1
k= i
if c == 1:
print 'table 5 is occupied and needs %s' %k
self.make_food(k)
else:
print 'sushi not found\n sushitype= %i' %s5
else:
print "table 5 is occupied"
self.check_food()
s6=self.seat_six()
if s6 != self.seat1:
c=0
for i,j in sushitype.items():
if j == s6:
c=1
k= i
if c == 1:
print 'table 6 is occupied and needs %s' %k
self.make_food(k)
else:
print 'sushi not found\n sushitype= %i' %s6
else:
print "table 6 is occupied"
self.clear_tables()
def small_check(self):
self.check_food()
s1= self.seat_one()
if s1 != self.seat1:
c=0
for i,j in sushitype.items():
if j == s1:
c=1
k= i
if c == 1:
print 'table 1 is occupied and needs %s' %k
self.make_food(k)
else:
print 'sushi not found\n sushitype= %i' %s1
else:
print "table 1 is occupied"
def main():
a= go()
a.start_game()
t= time.time()
b= time.time()
while b-t<=188.65:
a.check_bubs()
b=time.time()
if __name__ == "__main__":
main()
|
fa
| 0.089985
|
#---------------------------------- #---------------------------------- #---------------------------------- # im.save(os.getcwd() + '\\seat_one_'+str(int(time.time()))+'.png','PNG') # im.save(os.getcwd() + '\\seat_two_'+str(int(time.time()))+'.png','PNG') # im.save(os.getcwd() + '\\seat_third_'+str(int(time.time()))+'.png','PNG') #im.save(os.getcwd() + '\\seat_four_'+str(int(time.time()))+'.png','PNG') #im.save(os.getcwd() + '\\seat_five_'+str(int(time.time()))+'.png','PNG') #im.save(os.getcwd() + '\\seat_six_'+str(int(time.time()))+'.png','PNG')
| 2.343906
| 2
|
salt/modules/win_network.py
|
mitsuhiko/salt
| 4
|
6627506
|
'''
Module for gathering and managing network information
'''
import sys
from string import ascii_letters, digits
from salt.utils.interfaces import *
from salt.utils.socket_util import *
__outputter__ = {
'dig': 'txt',
'ping': 'txt',
'netstat': 'txt',
}
def __virtual__():
'''
Only works on Windows systems
'''
if __grains__['os'] == 'Windows':
setattr(sys.modules['salt.utils.interfaces'], 'interfaces', interfaces)
return 'network'
return False
def _sanitize_host(host):
'''
Sanitize host string.
'''
return ''.join([
c for c in host[0:255] if c in (ascii_letters + digits + '.-')
])
def ping(host):
'''
Performs a ping to a host
CLI Example::
salt '*' network.ping archlinux.org
'''
cmd = 'ping -n 4 %s' % _sanitize_host(host)
return __salt__['cmd.run'](cmd)
def netstat():
'''
Return information on open ports and states
CLI Example::
salt '*' network.netstat
'''
ret = []
cmd = 'netstat -na'
lines = __salt__['cmd.run'](cmd).split('\n')
for line in lines:
comps = line.split()
if line.startswith(' TCP'):
ret.append({
'local-address': comps[1],
'proto': comps[0],
'remote-address': comps[2],
'state': comps[3]})
if line.startswith(' UDP'):
ret.append({
'local-address': comps[1],
'proto': comps[0],
'remote-address': comps[2],
'state': None})
return ret
def traceroute(host):
'''
Performs a traceroute to a 3rd party host
CLI Example::
salt '*' network.traceroute archlinux.org
'''
ret = []
cmd = 'tracert %s' % _sanitize_host(host)
lines = __salt__['cmd.run'](cmd).split('\n')
for line in lines:
if not ' ' in line:
continue
if line.startswith('Trac'):
continue
if line.startswith('over'):
continue
comps = line.split()
complength = len(comps)
# This method still needs to better catch rows of other lengths
# For example if some of the ms returns are '*'
if complength == 9:
result = {
'count': comps[0],
'hostname': comps[7],
'ip': comps[8],
'ms1': comps[1],
'ms2': comps[3],
'ms3': comps[5]}
ret.append(result)
elif complength == 8:
result = {
'count': comps[0],
'hostname': None,
'ip': comps[7],
'ms1': comps[1],
'ms2': comps[3],
'ms3': comps[5]}
ret.append(result)
else:
result = {
'count': comps[0],
'hostname': None,
'ip': None,
'ms1': None,
'ms2': None,
'ms3': None}
ret.append(result)
return ret
def nslookup(host):
'''
Query DNS for information about a domain or ip address
CLI Example::
salt '*' network.nslookup archlinux.org
'''
ret = []
cmd = 'nslookup %s' % _sanitize_host(host)
lines = __salt__['cmd.run'](cmd).split('\n')
for line in lines:
if line.startswith('Non-authoritative'):
continue
if ":" in line:
comps = line.split(":")
ret.append({comps[0].strip(): comps[1].strip()})
return ret
def dig(host):
'''
Performs a DNS lookup with dig
Note: dig must be installed on the Windows minion
CLI Example::
salt '*' network.dig archlinux.org
'''
cmd = 'dig %s' % _sanitize_host(host)
return __salt__['cmd.run'](cmd)
def _cidr_to_ipv4_netmask(cidr_bits):
'''
Returns an IPv4 netmask
'''
netmask = ''
for n in range(4):
if n:
netmask += '.'
if cidr_bits >= 8:
netmask += '255'
cidr_bits -= 8
else:
netmask += '%d' % (256-(2**(8-cidr_bits)))
cidr_bits = 0
return netmask
def _interfaces_ipconfig(out):
'''
Returns a dictionary of interfaces with various information about each
(up/down state, ip address, netmask, and hwaddr)
'''
import re
ifaces = dict()
iface = None
for line in out.splitlines():
if not line:
continue
# TODO what does Windows call Infiniband and 10/40gige adapters
if line.startswith('Ethernet'):
iface = ifaces[re.search('adapter (\S.+):$').group(1)]
iface['up'] = True
addr = None
continue
if iface:
k, v = line.split(',', 1)
key = k.strip(' .')
val = v.strip()
if addr and key in ('Subnet Mask'):
addr['netmask'] = val
elif key in ('IP Address', 'IPv4 Address'):
if 'inet' not in iface:
iface['inet'] = list()
addr = {'address': val.rstrip('(Preferred)'),
'netmask': None,
'broadcast': None} # TODO find the broadcast
iface['inet'].append(addr)
elif 'IPv6 Address' in key:
if 'inet6' not in iface:
iface['inet'] = list()
# XXX What is the prefixlen!?
addr = {'address': val.rstrip('(Preferred)'),
'prefixlen': None}
iface['inet6'].append(addr)
elif key in ('Physical Address'):
iface['hwaddr'] = val
elif key in ('Media State'):
# XXX seen used for tunnel adaptors
# might be useful
iface['up'] = (v != 'Media disconnected')
def interfaces():
cmd = __salt__['cmd.run']('ipconfig /all')
ifaces = _ifconfig(cmd)
return ifaces
|
'''
Module for gathering and managing network information
'''
import sys
from string import ascii_letters, digits
from salt.utils.interfaces import *
from salt.utils.socket_util import *
__outputter__ = {
'dig': 'txt',
'ping': 'txt',
'netstat': 'txt',
}
def __virtual__():
'''
Only works on Windows systems
'''
if __grains__['os'] == 'Windows':
setattr(sys.modules['salt.utils.interfaces'], 'interfaces', interfaces)
return 'network'
return False
def _sanitize_host(host):
'''
Sanitize host string.
'''
return ''.join([
c for c in host[0:255] if c in (ascii_letters + digits + '.-')
])
def ping(host):
'''
Performs a ping to a host
CLI Example::
salt '*' network.ping archlinux.org
'''
cmd = 'ping -n 4 %s' % _sanitize_host(host)
return __salt__['cmd.run'](cmd)
def netstat():
'''
Return information on open ports and states
CLI Example::
salt '*' network.netstat
'''
ret = []
cmd = 'netstat -na'
lines = __salt__['cmd.run'](cmd).split('\n')
for line in lines:
comps = line.split()
if line.startswith(' TCP'):
ret.append({
'local-address': comps[1],
'proto': comps[0],
'remote-address': comps[2],
'state': comps[3]})
if line.startswith(' UDP'):
ret.append({
'local-address': comps[1],
'proto': comps[0],
'remote-address': comps[2],
'state': None})
return ret
def traceroute(host):
'''
Performs a traceroute to a 3rd party host
CLI Example::
salt '*' network.traceroute archlinux.org
'''
ret = []
cmd = 'tracert %s' % _sanitize_host(host)
lines = __salt__['cmd.run'](cmd).split('\n')
for line in lines:
if not ' ' in line:
continue
if line.startswith('Trac'):
continue
if line.startswith('over'):
continue
comps = line.split()
complength = len(comps)
# This method still needs to better catch rows of other lengths
# For example if some of the ms returns are '*'
if complength == 9:
result = {
'count': comps[0],
'hostname': comps[7],
'ip': comps[8],
'ms1': comps[1],
'ms2': comps[3],
'ms3': comps[5]}
ret.append(result)
elif complength == 8:
result = {
'count': comps[0],
'hostname': None,
'ip': comps[7],
'ms1': comps[1],
'ms2': comps[3],
'ms3': comps[5]}
ret.append(result)
else:
result = {
'count': comps[0],
'hostname': None,
'ip': None,
'ms1': None,
'ms2': None,
'ms3': None}
ret.append(result)
return ret
def nslookup(host):
'''
Query DNS for information about a domain or ip address
CLI Example::
salt '*' network.nslookup archlinux.org
'''
ret = []
cmd = 'nslookup %s' % _sanitize_host(host)
lines = __salt__['cmd.run'](cmd).split('\n')
for line in lines:
if line.startswith('Non-authoritative'):
continue
if ":" in line:
comps = line.split(":")
ret.append({comps[0].strip(): comps[1].strip()})
return ret
def dig(host):
'''
Performs a DNS lookup with dig
Note: dig must be installed on the Windows minion
CLI Example::
salt '*' network.dig archlinux.org
'''
cmd = 'dig %s' % _sanitize_host(host)
return __salt__['cmd.run'](cmd)
def _cidr_to_ipv4_netmask(cidr_bits):
'''
Returns an IPv4 netmask
'''
netmask = ''
for n in range(4):
if n:
netmask += '.'
if cidr_bits >= 8:
netmask += '255'
cidr_bits -= 8
else:
netmask += '%d' % (256-(2**(8-cidr_bits)))
cidr_bits = 0
return netmask
def _interfaces_ipconfig(out):
'''
Returns a dictionary of interfaces with various information about each
(up/down state, ip address, netmask, and hwaddr)
'''
import re
ifaces = dict()
iface = None
for line in out.splitlines():
if not line:
continue
# TODO what does Windows call Infiniband and 10/40gige adapters
if line.startswith('Ethernet'):
iface = ifaces[re.search('adapter (\S.+):$').group(1)]
iface['up'] = True
addr = None
continue
if iface:
k, v = line.split(',', 1)
key = k.strip(' .')
val = v.strip()
if addr and key in ('Subnet Mask'):
addr['netmask'] = val
elif key in ('IP Address', 'IPv4 Address'):
if 'inet' not in iface:
iface['inet'] = list()
addr = {'address': val.rstrip('(Preferred)'),
'netmask': None,
'broadcast': None} # TODO find the broadcast
iface['inet'].append(addr)
elif 'IPv6 Address' in key:
if 'inet6' not in iface:
iface['inet'] = list()
# XXX What is the prefixlen!?
addr = {'address': val.rstrip('(Preferred)'),
'prefixlen': None}
iface['inet6'].append(addr)
elif key in ('Physical Address'):
iface['hwaddr'] = val
elif key in ('Media State'):
# XXX seen used for tunnel adaptors
# might be useful
iface['up'] = (v != 'Media disconnected')
def interfaces():
cmd = __salt__['cmd.run']('ipconfig /all')
ifaces = _ifconfig(cmd)
return ifaces
|
en
| 0.748015
|
Module for gathering and managing network information Only works on Windows systems Sanitize host string. Performs a ping to a host CLI Example:: salt '*' network.ping archlinux.org Return information on open ports and states CLI Example:: salt '*' network.netstat Performs a traceroute to a 3rd party host CLI Example:: salt '*' network.traceroute archlinux.org # This method still needs to better catch rows of other lengths # For example if some of the ms returns are '*' Query DNS for information about a domain or ip address CLI Example:: salt '*' network.nslookup archlinux.org Performs a DNS lookup with dig Note: dig must be installed on the Windows minion CLI Example:: salt '*' network.dig archlinux.org Returns an IPv4 netmask Returns a dictionary of interfaces with various information about each (up/down state, ip address, netmask, and hwaddr) # TODO what does Windows call Infiniband and 10/40gige adapters # TODO find the broadcast # XXX What is the prefixlen!? # XXX seen used for tunnel adaptors # might be useful
| 3.03038
| 3
|
test_reporter.py
|
iroth/pytunes-reporter
| 0
|
6627507
|
import pytest
import responses
from faker import Factory
from requests.exceptions import HTTPError
# library to test
import reporter
sales_url = 'https://reportingitc-reporter.apple.com/reportservice/sales/v1'
financial_url = 'https://reportingitc-reporter.apple.com/reportservice/finance/v1'
@pytest.fixture(scope='session')
def faker():
return Factory.create('ja_JP')
def test_reporter_have_token_create(faker):
access_token = faker.uuid4()
account = faker.pyint()
new_reporter = reporter.Reporter(
access_token=access_token,
account=account,
)
assert type(new_reporter) is reporter.Reporter
assert new_reporter.access_token == access_token
assert new_reporter.account == account
@responses.activate
def test_reporter_have_password_create(faker):
request_id = faker.uuid4()
user_id = faker.email()
password = <PASSWORD>()
access_token = faker.uuid4()
responses.add(
responses.POST,
sales_url,
body=(
b'If you generate a new access token, your existing token will be '
b'deleted. You will need to save your new access token within your'
b' properties file. Do you still want to continue? (y/n): '),
status=200,
headers={
'SERVICE_REQUEST_ID': request_id,
}
)
response_xml = f'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n<ViewToken>\n <AccessToken>{access_token}</AccessToken>\n <ExpirationDate>2018-09-24</ExpirationDate>\n <Message>Your new access token has been generated.</Message>\n</ViewToken>\n'
responses.add(
responses.POST,
sales_url,
status=200,
body=response_xml,
)
new_reporter = reporter.Reporter(
user_id=user_id,
password=password
)
assert type(new_reporter) is reporter.Reporter
assert new_reporter.access_token
def test_vendor_numbers(faker):
access_token = <PASSWORD>.uuid4()
vendor_numbers = [str(faker.random_int(800000, 899999))
for _ in range(faker.random_int())]
response_xml = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<Vendors>
{vendors}
</Vendors>
'''.format(vendors=''.join(['<Vendor>{num}</Vendor>'.format(num=num)
for num in vendor_numbers]))
responses.add(
responses.POST,
sales_url,
body=response_xml,
status=200
)
new_reporter = reporter.Reporter(
access_token=access_token
)
assert new_reporter.vendors == vendor_numbers
def test_finanical_vendors_and_regions(faker):
access_token = faker.uuid4()
response_xml = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<VendorsAndRegions>
<Vendor>
<Number>80012345</Number>
<Region>
<Code>US</Code>
<Reports>
<Report>Financial</Report>
</Reports>
</Region>
<Region>
<Code>JP</Code>
<Reports>
<Report>Financial</Report>
</Reports>
</Region>
</Vendor>
<Vendor>
<Number>80067891</Number>
<Region>
<Code>US</Code>
<Reports>
<Report>Financial</Report>
</Reports>
</Region>
</Vendor>
</VendorsAndRegions>
'''
responses.add(
responses.POST,
financial_url,
body=response_xml,
status=200
)
expected_result = {
'80012345': {
'id': '80012345',
'regions': [
{
'code': 'US',
'reports': [
'Financial',
],
},
{
'code': 'JP',
'reports': [
'Financial',
],
},
],
},
'80067891': {
'id': '80067891',
'regions': [
{
'code': 'US',
'reports': [
'Financial',
],
},
],
},
}
new_reporter = reporter.Reporter(access_token=access_token)
assert new_reporter.vendors_and_regions == expected_result
@responses.activate
def test_error_handling():
error_xml = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<Error>
<Code>101</Code>
<Message>Invalid command.</Message>
</Error>"""
responses.add(
responses.POST,
sales_url,
body=error_xml,
status=400
)
new_reporter = reporter.Reporter(user_id='<EMAIL>', password='<PASSWORD>')
with pytest.raises(HTTPError):
new_reporter.access_token
|
import pytest
import responses
from faker import Factory
from requests.exceptions import HTTPError
# library to test
import reporter
sales_url = 'https://reportingitc-reporter.apple.com/reportservice/sales/v1'
financial_url = 'https://reportingitc-reporter.apple.com/reportservice/finance/v1'
@pytest.fixture(scope='session')
def faker():
return Factory.create('ja_JP')
def test_reporter_have_token_create(faker):
access_token = faker.uuid4()
account = faker.pyint()
new_reporter = reporter.Reporter(
access_token=access_token,
account=account,
)
assert type(new_reporter) is reporter.Reporter
assert new_reporter.access_token == access_token
assert new_reporter.account == account
@responses.activate
def test_reporter_have_password_create(faker):
request_id = faker.uuid4()
user_id = faker.email()
password = <PASSWORD>()
access_token = faker.uuid4()
responses.add(
responses.POST,
sales_url,
body=(
b'If you generate a new access token, your existing token will be '
b'deleted. You will need to save your new access token within your'
b' properties file. Do you still want to continue? (y/n): '),
status=200,
headers={
'SERVICE_REQUEST_ID': request_id,
}
)
response_xml = f'<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n<ViewToken>\n <AccessToken>{access_token}</AccessToken>\n <ExpirationDate>2018-09-24</ExpirationDate>\n <Message>Your new access token has been generated.</Message>\n</ViewToken>\n'
responses.add(
responses.POST,
sales_url,
status=200,
body=response_xml,
)
new_reporter = reporter.Reporter(
user_id=user_id,
password=password
)
assert type(new_reporter) is reporter.Reporter
assert new_reporter.access_token
def test_vendor_numbers(faker):
access_token = <PASSWORD>.uuid4()
vendor_numbers = [str(faker.random_int(800000, 899999))
for _ in range(faker.random_int())]
response_xml = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<Vendors>
{vendors}
</Vendors>
'''.format(vendors=''.join(['<Vendor>{num}</Vendor>'.format(num=num)
for num in vendor_numbers]))
responses.add(
responses.POST,
sales_url,
body=response_xml,
status=200
)
new_reporter = reporter.Reporter(
access_token=access_token
)
assert new_reporter.vendors == vendor_numbers
def test_finanical_vendors_and_regions(faker):
access_token = faker.uuid4()
response_xml = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<VendorsAndRegions>
<Vendor>
<Number>80012345</Number>
<Region>
<Code>US</Code>
<Reports>
<Report>Financial</Report>
</Reports>
</Region>
<Region>
<Code>JP</Code>
<Reports>
<Report>Financial</Report>
</Reports>
</Region>
</Vendor>
<Vendor>
<Number>80067891</Number>
<Region>
<Code>US</Code>
<Reports>
<Report>Financial</Report>
</Reports>
</Region>
</Vendor>
</VendorsAndRegions>
'''
responses.add(
responses.POST,
financial_url,
body=response_xml,
status=200
)
expected_result = {
'80012345': {
'id': '80012345',
'regions': [
{
'code': 'US',
'reports': [
'Financial',
],
},
{
'code': 'JP',
'reports': [
'Financial',
],
},
],
},
'80067891': {
'id': '80067891',
'regions': [
{
'code': 'US',
'reports': [
'Financial',
],
},
],
},
}
new_reporter = reporter.Reporter(access_token=access_token)
assert new_reporter.vendors_and_regions == expected_result
@responses.activate
def test_error_handling():
error_xml = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<Error>
<Code>101</Code>
<Message>Invalid command.</Message>
</Error>"""
responses.add(
responses.POST,
sales_url,
body=error_xml,
status=400
)
new_reporter = reporter.Reporter(user_id='<EMAIL>', password='<PASSWORD>')
with pytest.raises(HTTPError):
new_reporter.access_token
|
en
| 0.19746
|
# library to test <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <Vendors> {vendors} </Vendors> <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <VendorsAndRegions> <Vendor> <Number>80012345</Number> <Region> <Code>US</Code> <Reports> <Report>Financial</Report> </Reports> </Region> <Region> <Code>JP</Code> <Reports> <Report>Financial</Report> </Reports> </Region> </Vendor> <Vendor> <Number>80067891</Number> <Region> <Code>US</Code> <Reports> <Report>Financial</Report> </Reports> </Region> </Vendor> </VendorsAndRegions> <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <Error> <Code>101</Code> <Message>Invalid command.</Message> </Error>
| 2.288535
| 2
|
shorten.py
|
jasonshaw/learningpython
| 0
|
6627508
|
# 1.长链接转换为短链接核心就是进制转换
# 2.十进制数转为62进制( 0~9 + A~Z + a~z )共62个字符
# 3.假如允许转换的10进制数范围为 10 000 000~ 99 999 999 (唯一,相当于数据库主键)每一个数字对应一个长链接,再转为62进制数
# 4.浏览器解析时,现将短链接(62进制数)转换成 10进制数 --- > 再找到对应的长链接,最后解析
# 数字转62进制
def convert(num):
global all_chars
all_chars = '0123456789ABCDEFGHIGKLMNOPQRSTUVWXYZabcdefghigklmnopqrstuvwxyz~!@#$%&_-'
mod = len(all_chars)
digits = []
while num > 0:
# 拿到对应的下标取得mod进制数,并插入列表0号位
digits.insert(0, all_chars[num % mod])
num//= mod
return ''.join(digits)
# 例:12 000 000 转为mod进制数为 oLkO
print(convert(12000000))
|
# 1.长链接转换为短链接核心就是进制转换
# 2.十进制数转为62进制( 0~9 + A~Z + a~z )共62个字符
# 3.假如允许转换的10进制数范围为 10 000 000~ 99 999 999 (唯一,相当于数据库主键)每一个数字对应一个长链接,再转为62进制数
# 4.浏览器解析时,现将短链接(62进制数)转换成 10进制数 --- > 再找到对应的长链接,最后解析
# 数字转62进制
def convert(num):
global all_chars
all_chars = '0123456789ABCDEFGHIGKLMNOPQRSTUVWXYZabcdefghigklmnopqrstuvwxyz~!@#$%&_-'
mod = len(all_chars)
digits = []
while num > 0:
# 拿到对应的下标取得mod进制数,并插入列表0号位
digits.insert(0, all_chars[num % mod])
num//= mod
return ''.join(digits)
# 例:12 000 000 转为mod进制数为 oLkO
print(convert(12000000))
|
zh
| 0.924494
|
# 1.长链接转换为短链接核心就是进制转换 # 2.十进制数转为62进制( 0~9 + A~Z + a~z )共62个字符 # 3.假如允许转换的10进制数范围为 10 000 000~ 99 999 999 (唯一,相当于数据库主键)每一个数字对应一个长链接,再转为62进制数 # 4.浏览器解析时,现将短链接(62进制数)转换成 10进制数 --- > 再找到对应的长链接,最后解析 # 数字转62进制 #$%&_-' # 拿到对应的下标取得mod进制数,并插入列表0号位 # 例:12 000 000 转为mod进制数为 oLkO
| 3.098263
| 3
|
monai/apps/utils.py
|
Scitator/MONAI
| 1
|
6627509
|
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import os
from urllib.request import urlretrieve
from urllib.error import URLError
import hashlib
import tarfile
import zipfile
from monai.utils import progress_bar, optional_import
gdown, has_gdown = optional_import("gdown", "3.6")
def check_md5(filepath: str, md5_value: Optional[str] = None) -> bool:
"""
check MD5 signature of specified file.
Args:
filepath: path of source file to verify MD5.
md5_value: expected MD5 value of the file.
"""
if md5_value is not None:
md5 = hashlib.md5()
with open(filepath, "rb") as f:
for chunk in iter(lambda: f.read(1024 * 1024), b""):
md5.update(chunk)
if md5_value != md5.hexdigest():
return False
else:
print(f"expected MD5 is None, skip MD5 check for file {filepath}.")
return True
def download_url(url: str, filepath: str, md5_value: Optional[str] = None) -> None:
"""
Download file from specified URL link, support process bar and MD5 check.
Args:
url: source URL link to download file.
filepath: target filepath to save the downloaded file.
md5_value: expected MD5 value to validate the downloaded file.
if None, skip MD5 validation.
Raises:
RuntimeError: MD5 check of existing file {filepath} failed, please delete it and try again.
URLError: See urllib.request.urlopen
IOError: See urllib.request.urlopen
RuntimeError: MD5 check of downloaded file failed, URL={url}, filepath={filepath}, expected MD5={md5_value}.
"""
if os.path.exists(filepath):
if not check_md5(filepath, md5_value):
raise RuntimeError(f"MD5 check of existing file {filepath} failed, please delete it and try again.")
print(f"file {filepath} exists, skip downloading.")
return
os.makedirs(os.path.dirname(filepath), exist_ok=True)
if url.startswith("https://drive.google.com"):
gdown.download(url, filepath, quiet=False)
if not os.path.exists(filepath):
raise RuntimeError("download failed due to network issue or permission denied.")
else:
def _process_hook(blocknum, blocksize, totalsize):
progress_bar(blocknum * blocksize, totalsize, f"Downloading {filepath.split('/')[-1]}:")
try:
urlretrieve(url, filepath, reporthook=_process_hook)
print(f"\ndownloaded file: {filepath}.")
except (URLError, IOError) as e:
raise e
if not check_md5(filepath, md5_value):
raise RuntimeError(
f"MD5 check of downloaded file failed, \
URL={url}, filepath={filepath}, expected MD5={md5_value}."
)
def extractall(filepath: str, output_dir: str, md5_value: Optional[str] = None) -> None:
"""
Extract file to the output directory.
Expected file types are: `zip`, `tar.gz` and `tar`.
Args:
filepath: the file path of compressed file.
output_dir: target directory to save extracted files.
md5_value: expected MD5 value to validate the compressed file.
if None, skip MD5 validation.
Raises:
RuntimeError: MD5 check of compressed file {filepath} failed.
TypeError: unsupported compressed file type.
"""
target_file = os.path.join(output_dir, os.path.basename(filepath).split(".")[0])
if os.path.exists(target_file):
print(f"extracted file {target_file} exists, skip extracting.")
return
if not check_md5(filepath, md5_value):
raise RuntimeError(f"MD5 check of compressed file {filepath} failed.")
if filepath.endswith("zip"):
zip_file = zipfile.ZipFile(filepath)
zip_file.extractall(output_dir)
zip_file.close()
elif filepath.endswith("tar") or filepath.endswith("tar.gz"):
tar_file = tarfile.open(filepath)
tar_file.extractall(output_dir)
tar_file.close()
else:
raise TypeError("unsupported compressed file type.")
def download_and_extract(url: str, filepath: str, output_dir: str, md5_value: Optional[str] = None) -> None:
"""
Download file from URL and extract it to the output directory.
Args:
url: source URL link to download file.
filepath: the file path of compressed file.
output_dir: target directory to save extracted files.
defaut is None to save in current directory.
md5_value: expected MD5 value to validate the downloaded file.
if None, skip MD5 validation.
"""
download_url(url=url, filepath=filepath, md5_value=md5_value)
extractall(filepath=filepath, output_dir=output_dir, md5_value=md5_value)
|
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import os
from urllib.request import urlretrieve
from urllib.error import URLError
import hashlib
import tarfile
import zipfile
from monai.utils import progress_bar, optional_import
gdown, has_gdown = optional_import("gdown", "3.6")
def check_md5(filepath: str, md5_value: Optional[str] = None) -> bool:
"""
check MD5 signature of specified file.
Args:
filepath: path of source file to verify MD5.
md5_value: expected MD5 value of the file.
"""
if md5_value is not None:
md5 = hashlib.md5()
with open(filepath, "rb") as f:
for chunk in iter(lambda: f.read(1024 * 1024), b""):
md5.update(chunk)
if md5_value != md5.hexdigest():
return False
else:
print(f"expected MD5 is None, skip MD5 check for file {filepath}.")
return True
def download_url(url: str, filepath: str, md5_value: Optional[str] = None) -> None:
"""
Download file from specified URL link, support process bar and MD5 check.
Args:
url: source URL link to download file.
filepath: target filepath to save the downloaded file.
md5_value: expected MD5 value to validate the downloaded file.
if None, skip MD5 validation.
Raises:
RuntimeError: MD5 check of existing file {filepath} failed, please delete it and try again.
URLError: See urllib.request.urlopen
IOError: See urllib.request.urlopen
RuntimeError: MD5 check of downloaded file failed, URL={url}, filepath={filepath}, expected MD5={md5_value}.
"""
if os.path.exists(filepath):
if not check_md5(filepath, md5_value):
raise RuntimeError(f"MD5 check of existing file {filepath} failed, please delete it and try again.")
print(f"file {filepath} exists, skip downloading.")
return
os.makedirs(os.path.dirname(filepath), exist_ok=True)
if url.startswith("https://drive.google.com"):
gdown.download(url, filepath, quiet=False)
if not os.path.exists(filepath):
raise RuntimeError("download failed due to network issue or permission denied.")
else:
def _process_hook(blocknum, blocksize, totalsize):
progress_bar(blocknum * blocksize, totalsize, f"Downloading {filepath.split('/')[-1]}:")
try:
urlretrieve(url, filepath, reporthook=_process_hook)
print(f"\ndownloaded file: {filepath}.")
except (URLError, IOError) as e:
raise e
if not check_md5(filepath, md5_value):
raise RuntimeError(
f"MD5 check of downloaded file failed, \
URL={url}, filepath={filepath}, expected MD5={md5_value}."
)
def extractall(filepath: str, output_dir: str, md5_value: Optional[str] = None) -> None:
"""
Extract file to the output directory.
Expected file types are: `zip`, `tar.gz` and `tar`.
Args:
filepath: the file path of compressed file.
output_dir: target directory to save extracted files.
md5_value: expected MD5 value to validate the compressed file.
if None, skip MD5 validation.
Raises:
RuntimeError: MD5 check of compressed file {filepath} failed.
TypeError: unsupported compressed file type.
"""
target_file = os.path.join(output_dir, os.path.basename(filepath).split(".")[0])
if os.path.exists(target_file):
print(f"extracted file {target_file} exists, skip extracting.")
return
if not check_md5(filepath, md5_value):
raise RuntimeError(f"MD5 check of compressed file {filepath} failed.")
if filepath.endswith("zip"):
zip_file = zipfile.ZipFile(filepath)
zip_file.extractall(output_dir)
zip_file.close()
elif filepath.endswith("tar") or filepath.endswith("tar.gz"):
tar_file = tarfile.open(filepath)
tar_file.extractall(output_dir)
tar_file.close()
else:
raise TypeError("unsupported compressed file type.")
def download_and_extract(url: str, filepath: str, output_dir: str, md5_value: Optional[str] = None) -> None:
"""
Download file from URL and extract it to the output directory.
Args:
url: source URL link to download file.
filepath: the file path of compressed file.
output_dir: target directory to save extracted files.
defaut is None to save in current directory.
md5_value: expected MD5 value to validate the downloaded file.
if None, skip MD5 validation.
"""
download_url(url=url, filepath=filepath, md5_value=md5_value)
extractall(filepath=filepath, output_dir=output_dir, md5_value=md5_value)
|
en
| 0.73788
|
# Copyright 2020 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. check MD5 signature of specified file. Args: filepath: path of source file to verify MD5. md5_value: expected MD5 value of the file. Download file from specified URL link, support process bar and MD5 check. Args: url: source URL link to download file. filepath: target filepath to save the downloaded file. md5_value: expected MD5 value to validate the downloaded file. if None, skip MD5 validation. Raises: RuntimeError: MD5 check of existing file {filepath} failed, please delete it and try again. URLError: See urllib.request.urlopen IOError: See urllib.request.urlopen RuntimeError: MD5 check of downloaded file failed, URL={url}, filepath={filepath}, expected MD5={md5_value}. Extract file to the output directory. Expected file types are: `zip`, `tar.gz` and `tar`. Args: filepath: the file path of compressed file. output_dir: target directory to save extracted files. md5_value: expected MD5 value to validate the compressed file. if None, skip MD5 validation. Raises: RuntimeError: MD5 check of compressed file {filepath} failed. TypeError: unsupported compressed file type. Download file from URL and extract it to the output directory. Args: url: source URL link to download file. filepath: the file path of compressed file. output_dir: target directory to save extracted files. defaut is None to save in current directory. md5_value: expected MD5 value to validate the downloaded file. if None, skip MD5 validation.
| 2.208018
| 2
|
problem_sets/ps_08-2020Fall/problem_set_08_solution.py
|
arwhyte/SI506-practice
| 12
|
6627510
|
# START PROBLEM SET 08
print('Problem set 08 \n')
# SETUP
import csv
state2state_id = {
'Alabama' : 'AL',
'Alaska' : 'AK',
'Arizona' : 'AZ',
'Arkansas' : 'AR',
'California' : 'CA',
'Colorado' : 'CO',
'Connecticut' : 'CT',
'Delaware' : 'DE',
'Florida' : 'FL',
'Georgia' : 'GA',
'Hawaii' : 'HI',
'Idaho' : 'ID',
'Illinois' : 'IL',
'Indiana' : 'IN',
'Iowa' : 'IA',
'Kansas' : 'KS',
'Kentucky' : 'KY',
'Louisiana' : 'LA',
'Maine' : 'ME',
'Maryland' : 'MD',
'Massachusetts' : 'MA',
'Michigan' : 'MI',
'Minnesota' : 'MN',
'Mississippi' : 'MS',
'Missouri' : 'MO',
'Montana' : 'MT',
'Nebraska' : 'NE',
'Nevada' : 'NV',
'New Hampshire' : 'NH',
'New Jersey' : 'NJ',
'New Mexico' : 'NM',
'New York' : 'NY',
'North Carolina' : 'NC',
'North Dakota' : 'ND',
'Ohio' : 'OH',
'Oklahoma' : 'OK',
'Oregon' : 'OR',
'Pennsylvania' : 'PA',
'Rhode Island' : 'RI',
'South Carolina' : 'SC',
'South Dakota' : 'SD',
'Tennessee' : 'TN',
'Texas' : 'TX',
'Utah' : 'UT',
'Vermont' : 'VT',
'Virginia' : 'VA',
'Washington' : 'WA',
'West Virginia' : 'WV',
'Wisconsin' : 'WI',
'Wyoming' : 'WY'
}
# END SETUP
### PROBLEM 1.1
def read_txt(filepath):
"""Returns a list of lists where each item (list) is formed from the data split by tab.
Parameters:
filepath (str): a filepath that includes a filename with its extension
Returns:
list: a list of lists that contain states and total number of cases
"""
with open(filepath, mode='r') as file_obj:
data = []
lines = file_obj.readlines()
for line in lines[1:]:
data.append(line.strip().split('\t')) # .strip() remove newline, whitespace
return data
### PROBLEM 1.2
def read_csv(filepath):
"""Returns a list of dictionaries where each dictionary is formed from the data.
Parameters:
filepath (str): a filepath that includes a filename with its extension
Returns:
list: a list of dictionaries where each dictionary is formed from the data
"""
with open(filepath, mode='r') as data:
data = list(csv.DictReader(data))
return data
### PROBLEM 2
def convert_to_dict(data):
"""Returns a dictionary of US state and the total number of COVID-19 cases as key-value pairs.
Parameters:
data (list): a list of lists that contain states and total number of cases
Returns:
dict: a dictionary that contains state and the total number of cases as key-value pairs
"""
state_2_case_counts = {}
for row in data:
state_2_case_counts[row[0]] = row[1]
return state_2_case_counts
### PROBLEM 3
def get_total_num_policies(data):
"""Returns a dictionary of US state id and the total number of state-level policies as key-value pairs.
Parameters:
data (list): a list of dictionaries where each dictionary is formed from the data.
Returns:
state_id_2_policy_counts (dict): a dictionary of US state id and the total number of state-level policies as key-value pairs
"""
state_id_2_policy_counts = {}
for row in data:
if row['policy_level'] == 'state' and row['start_stop'] == 'start':
if row['state_id'] not in state_id_2_policy_counts:
state_id_2_policy_counts[row['state_id']] = 1
else:
state_id_2_policy_counts[row['state_id']] += 1
return state_id_2_policy_counts
### PROBLEM 4
def summarize_data(state2state_id, state_id_2_policy_counts, state_2_case_counts):
"""Returns a list of dictionaries that contain four key-value pairs: US state, US state id, the total number of state-level policies, and the total number of COVID-19 cases.
Keys are state, state_id, total_policies, and total_cases.
Parameters:
three dictionaries:
state2state_id is a dictionary of state names and their ids as key-value pairs
state_id_2_policy_counts is a dictionary of US state id and the total number of state-level policies as key-value pairs
state_2_case_counts is a dictionary of US state and the total number of COVID-19 cases as key-value pairs
Returns:
data (list): a list of dictionaries with four key-value pairs.
"""
data = []
for state, state_id in state2state_id.items():
info = {}
info['state'] = state
info['state_id'] = state_id
info['total_policies'] = 0
info['total_cases'] = 0
if state_id in state_id_2_policy_counts:
info['total_policies'] = state_id_2_policy_counts[state_id]
if state in state_2_case_counts:
info['total_cases'] = state_2_case_counts[state]
data.append(info)
return data
### PROBLEM 5
def write_csv(data, filepath):
"""Writes a csv file from an input data given a filepath.
Parameters:
data (list): a list of dictionaries with state, state_id, the total number of state-level policies, and the total number of COVID-19 cases.
filepath (str): a filepath to store data to a csv file.
Returns:
None
"""
fieldnames = list(data[0].keys())
with open(filepath, mode='w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for d in data:
writer.writerow(d)
def main():
"""Program entry point. Handles program workflow (or words to that effect).
Parameters:
None
Returns:
None
"""
### Problem 1 (20 points)
cases = read_txt('us_covid19_cases.txt')
policies = read_csv('us_policy_updates.csv')
### Problem 2 (10 points)
state2cases = convert_to_dict(cases)
print(f"\n2.0 state2cases = {state2cases}")
### Problem 3 (30 points)
state_id2policies = get_total_num_policies(policies)
print(f"\n3.0 state_id2policies = {state_id2policies}")
### Problem 4 (30 points)
summarized_info = summarize_data(state2state_id,state_id2policies,state2cases)
print(f"\n4.0 summarized_info = {summarized_info}")
### Problem 5 (10 points)
write_csv(summarized_info,'state_covid19_related_info.csv')
#Do not delete the lines below.
if __name__ == '__main__':
main()
|
# START PROBLEM SET 08
print('Problem set 08 \n')
# SETUP
import csv
state2state_id = {
'Alabama' : 'AL',
'Alaska' : 'AK',
'Arizona' : 'AZ',
'Arkansas' : 'AR',
'California' : 'CA',
'Colorado' : 'CO',
'Connecticut' : 'CT',
'Delaware' : 'DE',
'Florida' : 'FL',
'Georgia' : 'GA',
'Hawaii' : 'HI',
'Idaho' : 'ID',
'Illinois' : 'IL',
'Indiana' : 'IN',
'Iowa' : 'IA',
'Kansas' : 'KS',
'Kentucky' : 'KY',
'Louisiana' : 'LA',
'Maine' : 'ME',
'Maryland' : 'MD',
'Massachusetts' : 'MA',
'Michigan' : 'MI',
'Minnesota' : 'MN',
'Mississippi' : 'MS',
'Missouri' : 'MO',
'Montana' : 'MT',
'Nebraska' : 'NE',
'Nevada' : 'NV',
'New Hampshire' : 'NH',
'New Jersey' : 'NJ',
'New Mexico' : 'NM',
'New York' : 'NY',
'North Carolina' : 'NC',
'North Dakota' : 'ND',
'Ohio' : 'OH',
'Oklahoma' : 'OK',
'Oregon' : 'OR',
'Pennsylvania' : 'PA',
'Rhode Island' : 'RI',
'South Carolina' : 'SC',
'South Dakota' : 'SD',
'Tennessee' : 'TN',
'Texas' : 'TX',
'Utah' : 'UT',
'Vermont' : 'VT',
'Virginia' : 'VA',
'Washington' : 'WA',
'West Virginia' : 'WV',
'Wisconsin' : 'WI',
'Wyoming' : 'WY'
}
# END SETUP
### PROBLEM 1.1
def read_txt(filepath):
"""Returns a list of lists where each item (list) is formed from the data split by tab.
Parameters:
filepath (str): a filepath that includes a filename with its extension
Returns:
list: a list of lists that contain states and total number of cases
"""
with open(filepath, mode='r') as file_obj:
data = []
lines = file_obj.readlines()
for line in lines[1:]:
data.append(line.strip().split('\t')) # .strip() remove newline, whitespace
return data
### PROBLEM 1.2
def read_csv(filepath):
"""Returns a list of dictionaries where each dictionary is formed from the data.
Parameters:
filepath (str): a filepath that includes a filename with its extension
Returns:
list: a list of dictionaries where each dictionary is formed from the data
"""
with open(filepath, mode='r') as data:
data = list(csv.DictReader(data))
return data
### PROBLEM 2
def convert_to_dict(data):
"""Returns a dictionary of US state and the total number of COVID-19 cases as key-value pairs.
Parameters:
data (list): a list of lists that contain states and total number of cases
Returns:
dict: a dictionary that contains state and the total number of cases as key-value pairs
"""
state_2_case_counts = {}
for row in data:
state_2_case_counts[row[0]] = row[1]
return state_2_case_counts
### PROBLEM 3
def get_total_num_policies(data):
"""Returns a dictionary of US state id and the total number of state-level policies as key-value pairs.
Parameters:
data (list): a list of dictionaries where each dictionary is formed from the data.
Returns:
state_id_2_policy_counts (dict): a dictionary of US state id and the total number of state-level policies as key-value pairs
"""
state_id_2_policy_counts = {}
for row in data:
if row['policy_level'] == 'state' and row['start_stop'] == 'start':
if row['state_id'] not in state_id_2_policy_counts:
state_id_2_policy_counts[row['state_id']] = 1
else:
state_id_2_policy_counts[row['state_id']] += 1
return state_id_2_policy_counts
### PROBLEM 4
def summarize_data(state2state_id, state_id_2_policy_counts, state_2_case_counts):
"""Returns a list of dictionaries that contain four key-value pairs: US state, US state id, the total number of state-level policies, and the total number of COVID-19 cases.
Keys are state, state_id, total_policies, and total_cases.
Parameters:
three dictionaries:
state2state_id is a dictionary of state names and their ids as key-value pairs
state_id_2_policy_counts is a dictionary of US state id and the total number of state-level policies as key-value pairs
state_2_case_counts is a dictionary of US state and the total number of COVID-19 cases as key-value pairs
Returns:
data (list): a list of dictionaries with four key-value pairs.
"""
data = []
for state, state_id in state2state_id.items():
info = {}
info['state'] = state
info['state_id'] = state_id
info['total_policies'] = 0
info['total_cases'] = 0
if state_id in state_id_2_policy_counts:
info['total_policies'] = state_id_2_policy_counts[state_id]
if state in state_2_case_counts:
info['total_cases'] = state_2_case_counts[state]
data.append(info)
return data
### PROBLEM 5
def write_csv(data, filepath):
"""Writes a csv file from an input data given a filepath.
Parameters:
data (list): a list of dictionaries with state, state_id, the total number of state-level policies, and the total number of COVID-19 cases.
filepath (str): a filepath to store data to a csv file.
Returns:
None
"""
fieldnames = list(data[0].keys())
with open(filepath, mode='w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for d in data:
writer.writerow(d)
def main():
"""Program entry point. Handles program workflow (or words to that effect).
Parameters:
None
Returns:
None
"""
### Problem 1 (20 points)
cases = read_txt('us_covid19_cases.txt')
policies = read_csv('us_policy_updates.csv')
### Problem 2 (10 points)
state2cases = convert_to_dict(cases)
print(f"\n2.0 state2cases = {state2cases}")
### Problem 3 (30 points)
state_id2policies = get_total_num_policies(policies)
print(f"\n3.0 state_id2policies = {state_id2policies}")
### Problem 4 (30 points)
summarized_info = summarize_data(state2state_id,state_id2policies,state2cases)
print(f"\n4.0 summarized_info = {summarized_info}")
### Problem 5 (10 points)
write_csv(summarized_info,'state_covid19_related_info.csv')
#Do not delete the lines below.
if __name__ == '__main__':
main()
|
en
| 0.785453
|
# START PROBLEM SET 08 # SETUP # END SETUP ### PROBLEM 1.1 Returns a list of lists where each item (list) is formed from the data split by tab. Parameters: filepath (str): a filepath that includes a filename with its extension Returns: list: a list of lists that contain states and total number of cases # .strip() remove newline, whitespace ### PROBLEM 1.2 Returns a list of dictionaries where each dictionary is formed from the data. Parameters: filepath (str): a filepath that includes a filename with its extension Returns: list: a list of dictionaries where each dictionary is formed from the data ### PROBLEM 2 Returns a dictionary of US state and the total number of COVID-19 cases as key-value pairs. Parameters: data (list): a list of lists that contain states and total number of cases Returns: dict: a dictionary that contains state and the total number of cases as key-value pairs ### PROBLEM 3 Returns a dictionary of US state id and the total number of state-level policies as key-value pairs. Parameters: data (list): a list of dictionaries where each dictionary is formed from the data. Returns: state_id_2_policy_counts (dict): a dictionary of US state id and the total number of state-level policies as key-value pairs ### PROBLEM 4 Returns a list of dictionaries that contain four key-value pairs: US state, US state id, the total number of state-level policies, and the total number of COVID-19 cases. Keys are state, state_id, total_policies, and total_cases. Parameters: three dictionaries: state2state_id is a dictionary of state names and their ids as key-value pairs state_id_2_policy_counts is a dictionary of US state id and the total number of state-level policies as key-value pairs state_2_case_counts is a dictionary of US state and the total number of COVID-19 cases as key-value pairs Returns: data (list): a list of dictionaries with four key-value pairs. ### PROBLEM 5 Writes a csv file from an input data given a filepath. Parameters: data (list): a list of dictionaries with state, state_id, the total number of state-level policies, and the total number of COVID-19 cases. filepath (str): a filepath to store data to a csv file. Returns: None Program entry point. Handles program workflow (or words to that effect). Parameters: None Returns: None ### Problem 1 (20 points) ### Problem 2 (10 points) ### Problem 3 (30 points) ### Problem 4 (30 points) ### Problem 5 (10 points) #Do not delete the lines below.
| 3.200236
| 3
|
Classes and Objects/class1.py
|
artuguen28/Python_bible_intermediates
| 0
|
6627511
|
# Creating classes
class Car:
amount_cars = 0
# __init__ is the constructor
# self is a mandatory parameter
def __init__(self, manufacturer, model, hp):
self.manufacturer = manufacturer
self.model = model
self.hp = hp
Car.amount_cars += 1
def print_info(self):
print(f"Manufacturer: {self.manufacturer}| Model: {self.model}| HP: {self.hp}")
def print_car_amount(self):
print(f"Amount: {Car.amount_cars}")
# def__del__(self):
# print("Object gets deleted!")
# Car.amount_cars -= 1
myCar1 = Car("Tesla", "Model X", 525)
myCar2 = Car("BMW", "X3", 200)
myCar3 = Car("VW", "Golf", 100)
myCar4 = Car("Porsche", "911", 520)
# Hidden Atributes
class MyClass:
def __init__(self):
self.__hidden = "Hello"
print(self.__hidden) # Works
# Inheritance
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def get_older(self, years):
self.age += years
# The Programmer class inherits the and functions of Person class
class Programmer(Person):
def __init__(self, name, age, language):
super(Programmer, self).__init__(name, age)
self.language = language
def print_language(self):
print(f"Favorite Programming Language: {self.language}")
p1 = Programmer("Mike", 30, "Python")
p1.print_language()
# Operator Overloading
class Vector():
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return f"X: {self.x} |Y: {self.y}"
def __add__(self, other):
return Vector(self.x + other.x, self.y + other.y)
v1 = Vector(3, 5)
v2 = Vector(6, 2)
v3 = v1 + v2
print(v1)
print(v2)
print(v3)
|
# Creating classes
class Car:
amount_cars = 0
# __init__ is the constructor
# self is a mandatory parameter
def __init__(self, manufacturer, model, hp):
self.manufacturer = manufacturer
self.model = model
self.hp = hp
Car.amount_cars += 1
def print_info(self):
print(f"Manufacturer: {self.manufacturer}| Model: {self.model}| HP: {self.hp}")
def print_car_amount(self):
print(f"Amount: {Car.amount_cars}")
# def__del__(self):
# print("Object gets deleted!")
# Car.amount_cars -= 1
myCar1 = Car("Tesla", "Model X", 525)
myCar2 = Car("BMW", "X3", 200)
myCar3 = Car("VW", "Golf", 100)
myCar4 = Car("Porsche", "911", 520)
# Hidden Atributes
class MyClass:
def __init__(self):
self.__hidden = "Hello"
print(self.__hidden) # Works
# Inheritance
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def get_older(self, years):
self.age += years
# The Programmer class inherits the and functions of Person class
class Programmer(Person):
def __init__(self, name, age, language):
super(Programmer, self).__init__(name, age)
self.language = language
def print_language(self):
print(f"Favorite Programming Language: {self.language}")
p1 = Programmer("Mike", 30, "Python")
p1.print_language()
# Operator Overloading
class Vector():
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return f"X: {self.x} |Y: {self.y}"
def __add__(self, other):
return Vector(self.x + other.x, self.y + other.y)
v1 = Vector(3, 5)
v2 = Vector(6, 2)
v3 = v1 + v2
print(v1)
print(v2)
print(v3)
|
en
| 0.583581
|
# Creating classes # __init__ is the constructor # self is a mandatory parameter # def__del__(self): # print("Object gets deleted!") # Car.amount_cars -= 1 # Hidden Atributes # Works # Inheritance # The Programmer class inherits the and functions of Person class # Operator Overloading
| 4.403006
| 4
|
site/populate.py
|
annaelde/wall-app
| 1
|
6627512
|
<reponame>annaelde/wall-app<gh_stars>1-10
"""
Populates the database for testing and development.
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.dev")
application = get_wsgi_application()
from users.models import User
from posts.models import Post
if __name__ == '__main__':
post = 'Nulla metus metus, ullamcorper vel, tincidunt sed, euismod in, nibh. Quisque volutpat condimentum velit. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Nam nec ante. Sed lacinia, urna non tincidunt mattis, tortor neque adipiscing diam, a cursus ipsum ante quis turpis. Nulla facilisi. Ut fringilla. Suspendisse potenti. Nunc feugiat mi a tellus consequat imperdiet. Vestibulum sapien. Proin quam.'
try:
user = User.objects.get(username='guest')
except:
user = User.objects.create(
username='guest', password='password', email='')
# Create five posts
for _ in range(5):
Post.objects.create(message=post, author=user)
|
"""
Populates the database for testing and development.
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.dev")
application = get_wsgi_application()
from users.models import User
from posts.models import Post
if __name__ == '__main__':
post = 'Nulla metus metus, ullamcorper vel, tincidunt sed, euismod in, nibh. Quisque volutpat condimentum velit. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Nam nec ante. Sed lacinia, urna non tincidunt mattis, tortor neque adipiscing diam, a cursus ipsum ante quis turpis. Nulla facilisi. Ut fringilla. Suspendisse potenti. Nunc feugiat mi a tellus consequat imperdiet. Vestibulum sapien. Proin quam.'
try:
user = User.objects.get(username='guest')
except:
user = User.objects.create(
username='guest', password='password', email='')
# Create five posts
for _ in range(5):
Post.objects.create(message=post, author=user)
|
en
| 0.886914
|
Populates the database for testing and development. # Create five posts
| 2.343755
| 2
|
tests/gptj/gptj_test_3d_training_with_fusion.py
|
sooftware/oslo
| 0
|
6627513
|
# Copyright 2021 TUNiB Inc.
import os
import random
import numpy
import torch
import torch.distributed as dist
from datasets import load_dataset
from torch.optim import Adam
from torch.utils.data import DataLoader
from transformers import GPT2Tokenizer
from oslo.models.gptj.modeling_gptj import (
GPTJForCausalLM,
GPTJForSequenceClassification,
)
class Test3DTraining:
def __init__(self, num_gpus, batch_size=1, total_step=3000):
random.seed(42)
numpy.random.seed(42)
torch.manual_seed(42)
self.num_gpus = num_gpus
self.batch_size = batch_size
self.total_step = total_step
self.save_path = "save"
self.tokenizer = GPT2Tokenizer.from_pretrained("anton-l/gpt-j-tiny-random")
self.tokenizer.pad_token = self.tokenizer.eos_token
@staticmethod
def get_grad(model):
"""For debugging"""
param_per_layer = [
(
x[0].split(".mlp.fc_in.weight")[0].split("transformer.")[1],
x[1].grad,
)
for x in model.named_parameters()
if "mlp.fc_in.weight" in x[0]
]
return param_per_layer
@staticmethod
def get_param(model):
"""For debugging"""
param_per_layer = [
(
x[0].split(".mlp.fc_in.weight")[0].split("transformer.")[1],
round(x[1].mean(-1)[0].item(), 5),
)
for x in model.named_parameters()
if "mlp.fc_in.weight" in x[0]
]
return param_per_layer
@staticmethod
def get_tied_param(model):
"""For debugging"""
param_per_layer = [
(x[0], round(x[1].mean(-1)[0].item(), 5))
for x in model.named_parameters()
if "wte" in x[0]
] + [
(x[0], round(x[1].weight.data.mean(-1)[0].item(), 5))
for x in model.named_children()
if "lm_head" in x[0]
]
return param_per_layer
def test_gptj_lm_head_model(self):
datasets = load_dataset("squad").data["train"]["context"]
datasets = [str(sample) for sample in datasets if len(str(sample)) < 500]
data_loader = DataLoader(datasets, batch_size=self.batch_size, num_workers=8)
model_3d = (
GPTJForCausalLM.from_pretrained_with_parallel(
"anton-l/gpt-j-tiny-random",
pipeline_parallel_size=self.num_gpus // 2,
tensor_parallel_size=self.num_gpus // 2,
)
.train()
.fuse()
)
model_1d = GPTJForCausalLM.from_pretrained("anton-l/gpt-j-tiny-random").cuda()
optimizer_1d = Adam(model_1d.parameters(), lr=1e-4, weight_decay=1e-5)
optimizer_3d = Adam(model_3d.gpu_parameters(), lr=1e-4, weight_decay=1e-5)
for i, data in enumerate(data_loader):
optimizer_1d.zero_grad()
optimizer_3d.zero_grad()
tokens = self.tokenizer(
data,
padding=True,
truncation=True,
max_length=1024,
return_tensors="pt",
).to("cuda")
loss_3d = []
for output in model_3d(
input_ids=tokens.input_ids,
attention_mask=tokens.attention_mask,
labels=tokens.input_ids,
use_cache=False,
):
micro_loss = output.loss
micro_loss.backward()
loss_3d.append(micro_loss.detach().item())
loss_3d = sum(loss_3d) / len(loss_3d)
loss_1d = model_1d(
input_ids=tokens.input_ids,
attention_mask=tokens.attention_mask,
labels=tokens.input_ids,
use_cache=False,
).loss
loss_1d.backward()
if dist.get_rank() == 0:
print(
f"GPTJForCausalLM - "
f"step: {i}, "
f"loss_1d:{loss_1d}, "
f"loss_3d:{loss_3d}"
)
optimizer_1d.step()
optimizer_3d.step()
if i >= self.total_step:
break
if i % 300 == 0:
os.makedirs(self.save_path, exist_ok=True)
model_3d.save_pretrained_with_parallel(
save_directory=self.save_path + "/3d",
save_with_merging=False,
)
model_1d.save_pretrained_with_parallel(
save_directory=self.save_path + "/1d",
save_with_merging=False,
)
def test_gptj_for_sequence_classification(self):
datasets = load_dataset("multi_nli").data["train"]
premise, hypothesis, label = datasets[2], datasets[5], datasets[9]
datasets = [
{"texts": str(p) + self.tokenizer.eos_token + str(h), "label": l.as_py()}
for p, h, l in zip(premise, hypothesis, label)
]
model_3d = (
GPTJForSequenceClassification.from_pretrained_with_parallel(
"anton-l/gpt-j-tiny-random",
pipeline_parallel_size=self.num_gpus // 2,
tensor_parallel_size=self.num_gpus // 2,
num_labels=3,
)
.train()
.fuse()
)
model_1d = (
GPTJForSequenceClassification.from_pretrained(
"anton-l/gpt-j-tiny-random", num_labels=3
)
.cuda()
.train()
)
model_3d.config.pad_token_id = self.tokenizer.eos_token_id
model_1d.config.pad_token_id = self.tokenizer.eos_token_id
data_loader = DataLoader(datasets, batch_size=self.batch_size, num_workers=8)
optimizer_1d = Adam(model_1d.parameters(), lr=1e-3, weight_decay=1e-5)
optimizer_3d = Adam(model_3d.gpu_parameters(), lr=1e-3, weight_decay=1e-5)
for i, data in enumerate(data_loader):
optimizer_1d.zero_grad()
optimizer_3d.zero_grad()
tokens = self.tokenizer(
data["texts"],
padding=True,
truncation=True,
max_length=1024,
return_tensors="pt",
).to("cuda")
loss_3d = []
for output in model_3d(
input_ids=tokens.input_ids,
attention_mask=tokens.attention_mask,
labels=data["label"].cuda(),
):
micro_loss = output.loss
micro_loss.backward()
loss_3d.append(micro_loss.detach().item())
loss_3d = sum(loss_3d) / len(loss_3d)
loss_1d = model_1d(
input_ids=tokens.input_ids,
attention_mask=tokens.attention_mask,
labels=data["label"].cuda(),
).loss
loss_1d.backward()
if dist.get_rank() == 0:
print(
f"GPTJForSequenceClassification - "
f"step: {i}, "
f"loss_1d:{loss_1d}, "
f"loss_3d:{loss_3d}"
)
optimizer_1d.step()
optimizer_3d.step()
if i >= self.total_step:
break
if i % 300 == 0:
os.makedirs(self.save_path, exist_ok=True)
model_3d.save_pretrained_with_parallel(
save_directory=self.save_path + "/3d",
save_with_merging=False,
)
model_1d.save_pretrained_with_parallel(
save_directory=self.save_path + "/1d",
save_with_merging=False,
)
if __name__ == "__main__":
test = Test3DTraining(num_gpus=4, batch_size=4)
test.test_gptj_lm_head_model()
|
# Copyright 2021 TUNiB Inc.
import os
import random
import numpy
import torch
import torch.distributed as dist
from datasets import load_dataset
from torch.optim import Adam
from torch.utils.data import DataLoader
from transformers import GPT2Tokenizer
from oslo.models.gptj.modeling_gptj import (
GPTJForCausalLM,
GPTJForSequenceClassification,
)
class Test3DTraining:
def __init__(self, num_gpus, batch_size=1, total_step=3000):
random.seed(42)
numpy.random.seed(42)
torch.manual_seed(42)
self.num_gpus = num_gpus
self.batch_size = batch_size
self.total_step = total_step
self.save_path = "save"
self.tokenizer = GPT2Tokenizer.from_pretrained("anton-l/gpt-j-tiny-random")
self.tokenizer.pad_token = self.tokenizer.eos_token
@staticmethod
def get_grad(model):
"""For debugging"""
param_per_layer = [
(
x[0].split(".mlp.fc_in.weight")[0].split("transformer.")[1],
x[1].grad,
)
for x in model.named_parameters()
if "mlp.fc_in.weight" in x[0]
]
return param_per_layer
@staticmethod
def get_param(model):
"""For debugging"""
param_per_layer = [
(
x[0].split(".mlp.fc_in.weight")[0].split("transformer.")[1],
round(x[1].mean(-1)[0].item(), 5),
)
for x in model.named_parameters()
if "mlp.fc_in.weight" in x[0]
]
return param_per_layer
@staticmethod
def get_tied_param(model):
"""For debugging"""
param_per_layer = [
(x[0], round(x[1].mean(-1)[0].item(), 5))
for x in model.named_parameters()
if "wte" in x[0]
] + [
(x[0], round(x[1].weight.data.mean(-1)[0].item(), 5))
for x in model.named_children()
if "lm_head" in x[0]
]
return param_per_layer
def test_gptj_lm_head_model(self):
datasets = load_dataset("squad").data["train"]["context"]
datasets = [str(sample) for sample in datasets if len(str(sample)) < 500]
data_loader = DataLoader(datasets, batch_size=self.batch_size, num_workers=8)
model_3d = (
GPTJForCausalLM.from_pretrained_with_parallel(
"anton-l/gpt-j-tiny-random",
pipeline_parallel_size=self.num_gpus // 2,
tensor_parallel_size=self.num_gpus // 2,
)
.train()
.fuse()
)
model_1d = GPTJForCausalLM.from_pretrained("anton-l/gpt-j-tiny-random").cuda()
optimizer_1d = Adam(model_1d.parameters(), lr=1e-4, weight_decay=1e-5)
optimizer_3d = Adam(model_3d.gpu_parameters(), lr=1e-4, weight_decay=1e-5)
for i, data in enumerate(data_loader):
optimizer_1d.zero_grad()
optimizer_3d.zero_grad()
tokens = self.tokenizer(
data,
padding=True,
truncation=True,
max_length=1024,
return_tensors="pt",
).to("cuda")
loss_3d = []
for output in model_3d(
input_ids=tokens.input_ids,
attention_mask=tokens.attention_mask,
labels=tokens.input_ids,
use_cache=False,
):
micro_loss = output.loss
micro_loss.backward()
loss_3d.append(micro_loss.detach().item())
loss_3d = sum(loss_3d) / len(loss_3d)
loss_1d = model_1d(
input_ids=tokens.input_ids,
attention_mask=tokens.attention_mask,
labels=tokens.input_ids,
use_cache=False,
).loss
loss_1d.backward()
if dist.get_rank() == 0:
print(
f"GPTJForCausalLM - "
f"step: {i}, "
f"loss_1d:{loss_1d}, "
f"loss_3d:{loss_3d}"
)
optimizer_1d.step()
optimizer_3d.step()
if i >= self.total_step:
break
if i % 300 == 0:
os.makedirs(self.save_path, exist_ok=True)
model_3d.save_pretrained_with_parallel(
save_directory=self.save_path + "/3d",
save_with_merging=False,
)
model_1d.save_pretrained_with_parallel(
save_directory=self.save_path + "/1d",
save_with_merging=False,
)
def test_gptj_for_sequence_classification(self):
datasets = load_dataset("multi_nli").data["train"]
premise, hypothesis, label = datasets[2], datasets[5], datasets[9]
datasets = [
{"texts": str(p) + self.tokenizer.eos_token + str(h), "label": l.as_py()}
for p, h, l in zip(premise, hypothesis, label)
]
model_3d = (
GPTJForSequenceClassification.from_pretrained_with_parallel(
"anton-l/gpt-j-tiny-random",
pipeline_parallel_size=self.num_gpus // 2,
tensor_parallel_size=self.num_gpus // 2,
num_labels=3,
)
.train()
.fuse()
)
model_1d = (
GPTJForSequenceClassification.from_pretrained(
"anton-l/gpt-j-tiny-random", num_labels=3
)
.cuda()
.train()
)
model_3d.config.pad_token_id = self.tokenizer.eos_token_id
model_1d.config.pad_token_id = self.tokenizer.eos_token_id
data_loader = DataLoader(datasets, batch_size=self.batch_size, num_workers=8)
optimizer_1d = Adam(model_1d.parameters(), lr=1e-3, weight_decay=1e-5)
optimizer_3d = Adam(model_3d.gpu_parameters(), lr=1e-3, weight_decay=1e-5)
for i, data in enumerate(data_loader):
optimizer_1d.zero_grad()
optimizer_3d.zero_grad()
tokens = self.tokenizer(
data["texts"],
padding=True,
truncation=True,
max_length=1024,
return_tensors="pt",
).to("cuda")
loss_3d = []
for output in model_3d(
input_ids=tokens.input_ids,
attention_mask=tokens.attention_mask,
labels=data["label"].cuda(),
):
micro_loss = output.loss
micro_loss.backward()
loss_3d.append(micro_loss.detach().item())
loss_3d = sum(loss_3d) / len(loss_3d)
loss_1d = model_1d(
input_ids=tokens.input_ids,
attention_mask=tokens.attention_mask,
labels=data["label"].cuda(),
).loss
loss_1d.backward()
if dist.get_rank() == 0:
print(
f"GPTJForSequenceClassification - "
f"step: {i}, "
f"loss_1d:{loss_1d}, "
f"loss_3d:{loss_3d}"
)
optimizer_1d.step()
optimizer_3d.step()
if i >= self.total_step:
break
if i % 300 == 0:
os.makedirs(self.save_path, exist_ok=True)
model_3d.save_pretrained_with_parallel(
save_directory=self.save_path + "/3d",
save_with_merging=False,
)
model_1d.save_pretrained_with_parallel(
save_directory=self.save_path + "/1d",
save_with_merging=False,
)
if __name__ == "__main__":
test = Test3DTraining(num_gpus=4, batch_size=4)
test.test_gptj_lm_head_model()
|
en
| 0.652894
|
# Copyright 2021 TUNiB Inc. For debugging For debugging For debugging
| 1.991479
| 2
|
qafan/checkheaders.py
|
quatrope/qafan
| 0
|
6627514
|
<filename>qafan/checkheaders.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# License: BSD-3 (https://tldrlegal.com/license/bsd-3-clause-license-(revised))
# Copyright (c) 2022, QuatroPe
# All rights reserved.
# =============================================================================
# DOCS
# =============================================================================
"""Tool to check if the headers of all python files are correct."""
# =============================================================================
# IMPORTS
# =============================================================================
import inspect
import pathlib
from typing import List, OrderedDict
import attr
import typer
from . import VERSION
# =============================================================================
# FUNCTIONS
# =============================================================================
def lines_rstrip(text):
return "\n".join(line.rstrip() for line in text.splitlines())
def check_file_header(fpath, header_tpl):
if not isinstance(fpath, pathlib.Path):
fpath = pathlib.Path(fpath)
lines = len(header_tpl.splitlines())
with open(fpath) as fp:
fheader = "".join(fp.readlines()[:lines])
fheader = lines_rstrip(fheader)
header_ok = fheader == header_tpl
return header_ok
# =============================================================================
# CLI
# =============================================================================
@attr.s(frozen=True)
class CLI:
"""Check if python files contain the appropriate header."""
footnotes = "\n".join(
[
"This software is under the BSD 3-Clause License.",
"Copyright (c) 2021, <NAME>.",
"For bug reporting or other instructions please check:"
" https://github.com/quatrope/qafan",
]
)
run = attr.ib(init=False)
@run.default
def _set_run_default(self):
app = typer.Typer()
for k in dir(self):
if k.startswith("_"):
continue
v = getattr(self, k)
if inspect.ismethod(v):
decorator = app.command()
decorator(v)
return app
def version(self):
"""Print checktestdir.py version."""
typer.echo(f"{__file__ } v.{VERSION}")
def check(
self,
sources: List[pathlib.Path] = typer.Argument(
..., help="Path to the test structure."
),
header_template: pathlib.Path = typer.Option(
..., help="Path to the header template."
),
verbose: bool = typer.Option(
default=False, help="Show all the result"
),
):
"""Check if python files contain the appropriate header."""
results = OrderedDict()
try:
header_tpl = lines_rstrip(header_template.read_text())
for src in sources:
if src.is_dir():
for fpath in src.glob("**/*.py"):
results[fpath] = check_file_header(fpath, header_tpl)
elif src.suffix in (".py",):
results[src] = check_file_header(src, header_tpl)
else:
raise ValueError(f"Invalid file type {src.suffix}")
except OSError as err:
typer.echo(typer.style(str(err), fg=typer.colors.RED))
raise typer.Exit(code=1)
all_headers_ok = True
for fpath, header_ok in results.items():
if header_ok:
fg = typer.colors.GREEN
status = "HEADER MATCH"
else:
all_headers_ok = False
fg = typer.colors.RED
status = typer.style(
"HEADER DOES NOT MATCH", fg=typer.colors.YELLOW
)
if verbose or not header_ok:
msg = f"{fpath} -> {status}"
typer.echo(typer.style(msg, fg=fg))
if all_headers_ok:
final_fg = typer.colors.GREEN
final_status = "All files has the correct header"
exit_code = 0
else:
final_fg = typer.colors.RED
final_status = "Not all headers match!"
exit_code = 1
typer.echo("-------------------------------------")
typer.echo(typer.style(final_status, fg=final_fg))
raise typer.Exit(code=exit_code)
def main():
"""Run the checkheaders.py cli interface."""
cli = CLI()
cli.run()
if __name__ == "__main__":
main()
|
<filename>qafan/checkheaders.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# License: BSD-3 (https://tldrlegal.com/license/bsd-3-clause-license-(revised))
# Copyright (c) 2022, QuatroPe
# All rights reserved.
# =============================================================================
# DOCS
# =============================================================================
"""Tool to check if the headers of all python files are correct."""
# =============================================================================
# IMPORTS
# =============================================================================
import inspect
import pathlib
from typing import List, OrderedDict
import attr
import typer
from . import VERSION
# =============================================================================
# FUNCTIONS
# =============================================================================
def lines_rstrip(text):
return "\n".join(line.rstrip() for line in text.splitlines())
def check_file_header(fpath, header_tpl):
if not isinstance(fpath, pathlib.Path):
fpath = pathlib.Path(fpath)
lines = len(header_tpl.splitlines())
with open(fpath) as fp:
fheader = "".join(fp.readlines()[:lines])
fheader = lines_rstrip(fheader)
header_ok = fheader == header_tpl
return header_ok
# =============================================================================
# CLI
# =============================================================================
@attr.s(frozen=True)
class CLI:
"""Check if python files contain the appropriate header."""
footnotes = "\n".join(
[
"This software is under the BSD 3-Clause License.",
"Copyright (c) 2021, <NAME>.",
"For bug reporting or other instructions please check:"
" https://github.com/quatrope/qafan",
]
)
run = attr.ib(init=False)
@run.default
def _set_run_default(self):
app = typer.Typer()
for k in dir(self):
if k.startswith("_"):
continue
v = getattr(self, k)
if inspect.ismethod(v):
decorator = app.command()
decorator(v)
return app
def version(self):
"""Print checktestdir.py version."""
typer.echo(f"{__file__ } v.{VERSION}")
def check(
self,
sources: List[pathlib.Path] = typer.Argument(
..., help="Path to the test structure."
),
header_template: pathlib.Path = typer.Option(
..., help="Path to the header template."
),
verbose: bool = typer.Option(
default=False, help="Show all the result"
),
):
"""Check if python files contain the appropriate header."""
results = OrderedDict()
try:
header_tpl = lines_rstrip(header_template.read_text())
for src in sources:
if src.is_dir():
for fpath in src.glob("**/*.py"):
results[fpath] = check_file_header(fpath, header_tpl)
elif src.suffix in (".py",):
results[src] = check_file_header(src, header_tpl)
else:
raise ValueError(f"Invalid file type {src.suffix}")
except OSError as err:
typer.echo(typer.style(str(err), fg=typer.colors.RED))
raise typer.Exit(code=1)
all_headers_ok = True
for fpath, header_ok in results.items():
if header_ok:
fg = typer.colors.GREEN
status = "HEADER MATCH"
else:
all_headers_ok = False
fg = typer.colors.RED
status = typer.style(
"HEADER DOES NOT MATCH", fg=typer.colors.YELLOW
)
if verbose or not header_ok:
msg = f"{fpath} -> {status}"
typer.echo(typer.style(msg, fg=fg))
if all_headers_ok:
final_fg = typer.colors.GREEN
final_status = "All files has the correct header"
exit_code = 0
else:
final_fg = typer.colors.RED
final_status = "Not all headers match!"
exit_code = 1
typer.echo("-------------------------------------")
typer.echo(typer.style(final_status, fg=final_fg))
raise typer.Exit(code=exit_code)
def main():
"""Run the checkheaders.py cli interface."""
cli = CLI()
cli.run()
if __name__ == "__main__":
main()
|
en
| 0.391613
|
#!/usr/bin/env python # -*- coding: utf-8 -*- # License: BSD-3 (https://tldrlegal.com/license/bsd-3-clause-license-(revised)) # Copyright (c) 2022, QuatroPe # All rights reserved. # ============================================================================= # DOCS # ============================================================================= Tool to check if the headers of all python files are correct. # ============================================================================= # IMPORTS # ============================================================================= # ============================================================================= # FUNCTIONS # ============================================================================= # ============================================================================= # CLI # ============================================================================= Check if python files contain the appropriate header. Print checktestdir.py version. Check if python files contain the appropriate header. Run the checkheaders.py cli interface.
| 1.581017
| 2
|
pysc2/agents/myAgent/myAgent_11/net/bicnet_for_level_2/bicnet.py
|
Hotpotfish/pysc2
| 0
|
6627515
|
import tensorflow as tf
from pysc2.agents.myAgent.myAgent_11.config import config
import tensorflow.contrib.slim as slim
class bicnet(object):
def __init__(self, mu, sigma, learning_rate, action_dim, statedim, agents_number, enemy_number, name): # 初始化
# 神经网络参数
self.mu = mu
self.sigma = sigma
self.learning_rate = learning_rate
# 动作维度数,动作参数维度数,状态维度数
self.statedim = statedim
self.action_dim = action_dim
self.state_dim = statedim
self.agents_number = agents_number
self.enemy_number = enemy_number
self.name = name
self._setup_placeholders_graph()
with tf.variable_scope('Actor'):
self.a = self._build_graph_a(self.agents_local_observation, 'eval', train=True)
a_ = self._build_graph_a(self.agents_local_observation_next, 'target', train=False)
with tf.variable_scope('Critic'):
self.q = self._build_graph_c(self.state_input, self.a, 'eval', train=True)
q_ = self._build_graph_c(self.state_input_next, a_, 'target', train=False)
# networks parameters
self.ae_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval')
self.at_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target')
self.ce_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval')
self.ct_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target')
# target net replacement
self.soft_replace = [tf.assign(t, (1 - config.TAU) * t + config.TAU * e)
for t, e in zip(self.at_params + self.ct_params, self.ae_params + self.ce_params)]
q_target = self.reward + config.GAMMA * q_
self.td_error = tf.losses.mean_squared_error(labels=q_target, predictions=self.q)
self.ctrain = tf.train.AdamOptimizer(self.learning_rate * 2).minimize(self.td_error, var_list=self.ce_params)
self.a_loss = - tf.reduce_mean(self.q) # maximize the q
self.atrain = tf.train.AdamOptimizer(self.learning_rate).minimize(self.a_loss, var_list=self.ae_params)
def _setup_placeholders_graph(self):
# s
self.state_input = tf.placeholder("float", shape=self.statedim, name='state_input') # 全局状态
self.agents_local_observation = tf.placeholder("float",
shape=[None, self.agents_number, config.COOP_AGENTS_OBDIM],
name='agents_local_observation')
# s_
self.state_input_next = tf.placeholder("float", shape=self.statedim, name='state_input_next') # 全局状态
self.agents_local_observation_next = tf.placeholder("float",
shape=[None, self.agents_number, config.COOP_AGENTS_OBDIM],
name='agents_local_observation_next')
self.reward = tf.placeholder("float", shape=[None], name='reward')
def _build_graph_a(self, agents_local_observation, scope_name, train):
# 环境和智能体本地的共同观察
with tf.variable_scope(scope_name):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
trainable=train,
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(0.05)):
encoder_outputs = self._observation_encoder_a(agents_local_observation, self.agents_number, '_observation_encoder')
bicnet_outputs = self._bicnet_build_a(encoder_outputs, self.agents_number, '_bicnet_build')
return bicnet_outputs
def _observation_encoder_a(self, agents_local_observation, agents_number, scope_name):
with tf.variable_scope(scope_name):
encoder = []
for i in range(agents_number):
fc1 = slim.fully_connected(agents_local_observation[:, i, :], 100, scope='full_connected1' + "_" + str(i))
encoder.append(fc1)
encoder = tf.transpose(encoder, [1, 0, 2])
encoder = tf.unstack(encoder, agents_number, 1) # (self.agents_number,batch_size,obs_add_dim)
return encoder
def _bicnet_build_a(self, encoder_outputs, agents_number, scope_name):
with tf.variable_scope(scope_name):
outputs = []
lstm_fw_cell = tf.nn.rnn_cell.GRUCell(self.action_dim, name="lstm_fw_cell")
lstm_bw_cell = tf.nn.rnn_cell.GRUCell(self.action_dim, name="lstm_bw_cell")
bicnet_outputs, _, _ = tf.nn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, encoder_outputs,
dtype=tf.float32)
for i in range(agents_number):
# fc1 = slim.fully_connected(bicnet_outputs[i], 30, scope='full_connected1' + "_" + str(i))
fc1 = slim.fully_connected(bicnet_outputs[i], self.action_dim, activation_fn=tf.nn.softmax,
scope='full_connected1' + "_" + str(i))
outputs.append(fc1)
outputs = tf.unstack(outputs, self.agents_number) # (agents_number, batch_size, action_dim)
outputs = tf.transpose(outputs, [1, 0, 2])
return outputs # (batch_size,agents_number,action_dim)
#################################### critic_net ####################################
def _build_graph_c(self, state_input, action_input, scope_name, train):
# 环境和智能体本地的共同观察
with tf.variable_scope(scope_name):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
trainable=train,
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(0.05)):
encoder_outputs = self._observation_encoder_c(state_input, action_input, self.agents_number,
'_observation_encoder')
bicnet_outputs = self._bicnet_build_c(encoder_outputs, self.agents_number, '_bicnet_build')
return bicnet_outputs
def _observation_encoder_c(self, state_input, action_input, agents_number, scope_name):
with tf.variable_scope(scope_name):
encoder = []
for i in range(agents_number):
fc1_s = slim.fully_connected(state_input[:, i], 10, scope='full_connected_s1' + "_" + str(i))
# fc2_s = slim.fully_connected(fc1_s, 30, scope='full_connected_s2' + "_" + str(i))
fc1_a = slim.fully_connected(action_input[:, i], 10, scope='full_connected_a1' + "_" + str(i))
data = fc1_s + fc1_a
fc1 = slim.fully_connected(data, 10, scope='full_connected1' + "_" + str(i))
encoder.append(fc1)
encoder = tf.transpose(encoder, [1, 0, 2])
encoder = tf.unstack(encoder, agents_number, 1) # (self.agents_number,batch_size,obs_add_dim)
return encoder
def _bicnet_build_c(self, encoder_outputs, agents_number, scope_name):
with tf.variable_scope(scope_name):
outputs = []
lstm_fw_cell = tf.nn.rnn_cell.GRUCell(self.action_dim, name="lstm_fw_cell")
lstm_bw_cell = tf.nn.rnn_cell.GRUCell(self.action_dim, name="lstm_bw_cell")
bicnet_outputs, _, _ = tf.nn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, encoder_outputs,
dtype=tf.float32)
for i in range(agents_number):
fc1 = slim.fully_connected(bicnet_outputs[i], 1, scope='full_connected1' + "_" + str(i))
outputs.append(fc1)
outputs = tf.unstack(outputs, self.agents_number) # (agents_number, batch_size,1)
outputs = tf.transpose(outputs, [1, 0, 2]) # (batch_size,agents_number,1)
outputs = slim.flatten(outputs)
fc2 = slim.fully_connected(outputs, 1, scope='full_connected2')
# fc3 = slim.fully_connected(fc2, 1, scope='full_connected3')
return fc2
|
import tensorflow as tf
from pysc2.agents.myAgent.myAgent_11.config import config
import tensorflow.contrib.slim as slim
class bicnet(object):
def __init__(self, mu, sigma, learning_rate, action_dim, statedim, agents_number, enemy_number, name): # 初始化
# 神经网络参数
self.mu = mu
self.sigma = sigma
self.learning_rate = learning_rate
# 动作维度数,动作参数维度数,状态维度数
self.statedim = statedim
self.action_dim = action_dim
self.state_dim = statedim
self.agents_number = agents_number
self.enemy_number = enemy_number
self.name = name
self._setup_placeholders_graph()
with tf.variable_scope('Actor'):
self.a = self._build_graph_a(self.agents_local_observation, 'eval', train=True)
a_ = self._build_graph_a(self.agents_local_observation_next, 'target', train=False)
with tf.variable_scope('Critic'):
self.q = self._build_graph_c(self.state_input, self.a, 'eval', train=True)
q_ = self._build_graph_c(self.state_input_next, a_, 'target', train=False)
# networks parameters
self.ae_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval')
self.at_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target')
self.ce_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/eval')
self.ct_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target')
# target net replacement
self.soft_replace = [tf.assign(t, (1 - config.TAU) * t + config.TAU * e)
for t, e in zip(self.at_params + self.ct_params, self.ae_params + self.ce_params)]
q_target = self.reward + config.GAMMA * q_
self.td_error = tf.losses.mean_squared_error(labels=q_target, predictions=self.q)
self.ctrain = tf.train.AdamOptimizer(self.learning_rate * 2).minimize(self.td_error, var_list=self.ce_params)
self.a_loss = - tf.reduce_mean(self.q) # maximize the q
self.atrain = tf.train.AdamOptimizer(self.learning_rate).minimize(self.a_loss, var_list=self.ae_params)
def _setup_placeholders_graph(self):
# s
self.state_input = tf.placeholder("float", shape=self.statedim, name='state_input') # 全局状态
self.agents_local_observation = tf.placeholder("float",
shape=[None, self.agents_number, config.COOP_AGENTS_OBDIM],
name='agents_local_observation')
# s_
self.state_input_next = tf.placeholder("float", shape=self.statedim, name='state_input_next') # 全局状态
self.agents_local_observation_next = tf.placeholder("float",
shape=[None, self.agents_number, config.COOP_AGENTS_OBDIM],
name='agents_local_observation_next')
self.reward = tf.placeholder("float", shape=[None], name='reward')
def _build_graph_a(self, agents_local_observation, scope_name, train):
# 环境和智能体本地的共同观察
with tf.variable_scope(scope_name):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
trainable=train,
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(0.05)):
encoder_outputs = self._observation_encoder_a(agents_local_observation, self.agents_number, '_observation_encoder')
bicnet_outputs = self._bicnet_build_a(encoder_outputs, self.agents_number, '_bicnet_build')
return bicnet_outputs
def _observation_encoder_a(self, agents_local_observation, agents_number, scope_name):
with tf.variable_scope(scope_name):
encoder = []
for i in range(agents_number):
fc1 = slim.fully_connected(agents_local_observation[:, i, :], 100, scope='full_connected1' + "_" + str(i))
encoder.append(fc1)
encoder = tf.transpose(encoder, [1, 0, 2])
encoder = tf.unstack(encoder, agents_number, 1) # (self.agents_number,batch_size,obs_add_dim)
return encoder
def _bicnet_build_a(self, encoder_outputs, agents_number, scope_name):
with tf.variable_scope(scope_name):
outputs = []
lstm_fw_cell = tf.nn.rnn_cell.GRUCell(self.action_dim, name="lstm_fw_cell")
lstm_bw_cell = tf.nn.rnn_cell.GRUCell(self.action_dim, name="lstm_bw_cell")
bicnet_outputs, _, _ = tf.nn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, encoder_outputs,
dtype=tf.float32)
for i in range(agents_number):
# fc1 = slim.fully_connected(bicnet_outputs[i], 30, scope='full_connected1' + "_" + str(i))
fc1 = slim.fully_connected(bicnet_outputs[i], self.action_dim, activation_fn=tf.nn.softmax,
scope='full_connected1' + "_" + str(i))
outputs.append(fc1)
outputs = tf.unstack(outputs, self.agents_number) # (agents_number, batch_size, action_dim)
outputs = tf.transpose(outputs, [1, 0, 2])
return outputs # (batch_size,agents_number,action_dim)
#################################### critic_net ####################################
def _build_graph_c(self, state_input, action_input, scope_name, train):
# 环境和智能体本地的共同观察
with tf.variable_scope(scope_name):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
trainable=train,
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
weights_regularizer=slim.l2_regularizer(0.05)):
encoder_outputs = self._observation_encoder_c(state_input, action_input, self.agents_number,
'_observation_encoder')
bicnet_outputs = self._bicnet_build_c(encoder_outputs, self.agents_number, '_bicnet_build')
return bicnet_outputs
def _observation_encoder_c(self, state_input, action_input, agents_number, scope_name):
with tf.variable_scope(scope_name):
encoder = []
for i in range(agents_number):
fc1_s = slim.fully_connected(state_input[:, i], 10, scope='full_connected_s1' + "_" + str(i))
# fc2_s = slim.fully_connected(fc1_s, 30, scope='full_connected_s2' + "_" + str(i))
fc1_a = slim.fully_connected(action_input[:, i], 10, scope='full_connected_a1' + "_" + str(i))
data = fc1_s + fc1_a
fc1 = slim.fully_connected(data, 10, scope='full_connected1' + "_" + str(i))
encoder.append(fc1)
encoder = tf.transpose(encoder, [1, 0, 2])
encoder = tf.unstack(encoder, agents_number, 1) # (self.agents_number,batch_size,obs_add_dim)
return encoder
def _bicnet_build_c(self, encoder_outputs, agents_number, scope_name):
with tf.variable_scope(scope_name):
outputs = []
lstm_fw_cell = tf.nn.rnn_cell.GRUCell(self.action_dim, name="lstm_fw_cell")
lstm_bw_cell = tf.nn.rnn_cell.GRUCell(self.action_dim, name="lstm_bw_cell")
bicnet_outputs, _, _ = tf.nn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, encoder_outputs,
dtype=tf.float32)
for i in range(agents_number):
fc1 = slim.fully_connected(bicnet_outputs[i], 1, scope='full_connected1' + "_" + str(i))
outputs.append(fc1)
outputs = tf.unstack(outputs, self.agents_number) # (agents_number, batch_size,1)
outputs = tf.transpose(outputs, [1, 0, 2]) # (batch_size,agents_number,1)
outputs = slim.flatten(outputs)
fc2 = slim.fully_connected(outputs, 1, scope='full_connected2')
# fc3 = slim.fully_connected(fc2, 1, scope='full_connected3')
return fc2
|
en
| 0.244769
|
# 初始化 # 神经网络参数 # 动作维度数,动作参数维度数,状态维度数 # networks parameters # target net replacement # maximize the q # s # 全局状态 # s_ # 全局状态 # 环境和智能体本地的共同观察 # (self.agents_number,batch_size,obs_add_dim) # fc1 = slim.fully_connected(bicnet_outputs[i], 30, scope='full_connected1' + "_" + str(i)) # (agents_number, batch_size, action_dim) # (batch_size,agents_number,action_dim) #################################### critic_net #################################### # 环境和智能体本地的共同观察 # fc2_s = slim.fully_connected(fc1_s, 30, scope='full_connected_s2' + "_" + str(i)) # (self.agents_number,batch_size,obs_add_dim) # (agents_number, batch_size,1) # (batch_size,agents_number,1) # fc3 = slim.fully_connected(fc2, 1, scope='full_connected3')
| 2.417525
| 2
|
gitgud/levels/rampup/__init__.py
|
Ishan1742/git-gud
| 0
|
6627516
|
<gh_stars>0
import pkg_resources
from gitgud.levels.util import BasicChallenge
from gitgud.levels.util import Level
level = Level(
'rampup',
[
BasicChallenge('detaching', pkg_resources.resource_filename(__name__, '_detaching/')),
BasicChallenge('relrefs1', pkg_resources.resource_filename(__name__, '_relrefs1/')),
BasicChallenge('relrefs2', pkg_resources.resource_filename(__name__, '_relrefs2/'))
]
)
|
import pkg_resources
from gitgud.levels.util import BasicChallenge
from gitgud.levels.util import Level
level = Level(
'rampup',
[
BasicChallenge('detaching', pkg_resources.resource_filename(__name__, '_detaching/')),
BasicChallenge('relrefs1', pkg_resources.resource_filename(__name__, '_relrefs1/')),
BasicChallenge('relrefs2', pkg_resources.resource_filename(__name__, '_relrefs2/'))
]
)
|
none
| 1
| 1.780689
| 2
|
|
OpenCV Egitim Dosyalari/ch6/maskeleme.py
|
HisarCS/PiWarsTurkey-Library-Folders
| 3
|
6627517
|
<reponame>HisarCS/PiWarsTurkey-Library-Folders<filename>OpenCV Egitim Dosyalari/ch6/maskeleme.py
import cv2
import os
import numpy as np
logo = cv2.imread(os.path.abspath('PiWarsTurkeyLogo.png'))
cv2.imshow("Logo Asil", logo)
dikdortgen = np.zeros((logo.shape[0], logo.shape[1]), dtype = "uint8")
cv2.rectangle(dikdortgen, (0, 0), (220, 387), 255, -1)
cv2.imshow("dikdortgen", dikdortgen)
bitselAnd = cv2.bitwise_and(logo, logo, mask = dikdortgen)
cv2.imshow("Maskeli", bitselAnd)
cv2.waitKey(0)
|
Egitim Dosyalari/ch6/maskeleme.py
import cv2
import os
import numpy as np
logo = cv2.imread(os.path.abspath('PiWarsTurkeyLogo.png'))
cv2.imshow("Logo Asil", logo)
dikdortgen = np.zeros((logo.shape[0], logo.shape[1]), dtype = "uint8")
cv2.rectangle(dikdortgen, (0, 0), (220, 387), 255, -1)
cv2.imshow("dikdortgen", dikdortgen)
bitselAnd = cv2.bitwise_and(logo, logo, mask = dikdortgen)
cv2.imshow("Maskeli", bitselAnd)
cv2.waitKey(0)
|
none
| 1
| 3.0276
| 3
|
|
src/models/caps_activate_fn.py
|
LeanderLXZ/oracle-recognition
| 1
|
6627518
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class ActivationFunc(object):
@staticmethod
def squash(x, batch_size, epsilon):
"""Squashing function
Args:
x: A tensor with shape: (batch_size, num_caps, vec_dim, 1).
batch_size: Batch size
epsilon: Add epsilon(a very small number) to zeros
Returns:
A tensor with the same shape as input tensor but squashed in 'vec_dim'
dimension.
"""
vec_shape = x.get_shape().as_list()
num_caps = vec_shape[1]
vec_dim = vec_shape[2]
vec_squared_norm = tf.reduce_sum(tf.square(x), -2, keep_dims=True)
assert vec_squared_norm.get_shape() == (batch_size, num_caps, 1, 1)
# scalar_factor = tf.div(vec_squared_norm, 1 + vec_squared_norm)
scalar_factor = tf.div(vec_squared_norm, 0.5 + vec_squared_norm)
assert scalar_factor.get_shape() == (batch_size, num_caps, 1, 1)
unit_vec = tf.div(x, tf.sqrt(vec_squared_norm + epsilon))
assert unit_vec.get_shape() == (batch_size, num_caps, vec_dim, 1)
squashed_vec = tf.multiply(scalar_factor, unit_vec)
assert squashed_vec.get_shape() == (batch_size, num_caps, vec_dim, 1)
return squashed_vec
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class ActivationFunc(object):
@staticmethod
def squash(x, batch_size, epsilon):
"""Squashing function
Args:
x: A tensor with shape: (batch_size, num_caps, vec_dim, 1).
batch_size: Batch size
epsilon: Add epsilon(a very small number) to zeros
Returns:
A tensor with the same shape as input tensor but squashed in 'vec_dim'
dimension.
"""
vec_shape = x.get_shape().as_list()
num_caps = vec_shape[1]
vec_dim = vec_shape[2]
vec_squared_norm = tf.reduce_sum(tf.square(x), -2, keep_dims=True)
assert vec_squared_norm.get_shape() == (batch_size, num_caps, 1, 1)
# scalar_factor = tf.div(vec_squared_norm, 1 + vec_squared_norm)
scalar_factor = tf.div(vec_squared_norm, 0.5 + vec_squared_norm)
assert scalar_factor.get_shape() == (batch_size, num_caps, 1, 1)
unit_vec = tf.div(x, tf.sqrt(vec_squared_norm + epsilon))
assert unit_vec.get_shape() == (batch_size, num_caps, vec_dim, 1)
squashed_vec = tf.multiply(scalar_factor, unit_vec)
assert squashed_vec.get_shape() == (batch_size, num_caps, vec_dim, 1)
return squashed_vec
|
en
| 0.752298
|
Squashing function Args: x: A tensor with shape: (batch_size, num_caps, vec_dim, 1). batch_size: Batch size epsilon: Add epsilon(a very small number) to zeros Returns: A tensor with the same shape as input tensor but squashed in 'vec_dim' dimension. # scalar_factor = tf.div(vec_squared_norm, 1 + vec_squared_norm)
| 2.774964
| 3
|
ai_api/__init__.py
|
sunshine-app/CodeAIBaidu
| 3
|
6627519
|
# -*- coding: utf-8 -*-
# @Time : 2019/4/25 9:27
# @Author : shine
# @File : __init__.py.py
|
# -*- coding: utf-8 -*-
# @Time : 2019/4/25 9:27
# @Author : shine
# @File : __init__.py.py
|
fr
| 0.276762
|
# -*- coding: utf-8 -*- # @Time : 2019/4/25 9:27 # @Author : shine # @File : __init__.py.py
| 0.944474
| 1
|
decoders/misc/writer.py
|
necrose99/Dshell
| 1
|
6627520
|
'''
Created on Jan 13, 2012
@author: tparker
'''
import dshell
import dpkt
from output import PCAPWriter
class DshellDecoder(dshell.Decoder):
'''
session writer - chain to a decoder to end the chain if the decoder does not output session or packets on its own
if chained to a packet-based decoder, writes all packets to pcap file, can be used to convert or concatenate files
if chained to a connection-based decoder, writes selected streams to session file
'''
def __init__(self, **kwargs):
'''
Constructor
'''
self.file = None
dshell.Decoder.__init__(self,
name='writer',
description='pcap/session writer',
author='twp',
raw=True,
optiondict=dict(filename=dict(default='%(clientip)s:%(clientport)s-%(serverip)s:%(serverport)s-%(direction)s.txt'),
)
)
def rawHandler(self, pktlen, pkt, ts):
self.decodedbytes += pktlen
self.count += 1
self.dump(pktlen, pkt, ts) # pktlen may be wrong if we stripped vlan
def IPHandler(self, addr, ip, ts, pkttype=None, **kw):
self.decodedbytes += len(ip.data)
self.count += 1
# if we are passed in IP data vs layer-2 frames, we need to encapsulate
# them
self.dump(dpkt.ethernet.Ethernet(data=str(ip), pkttype=type), ts=ts)
def connectionHandler(self, conn):
self.write(conn)
dObj = DshellDecoder()
|
'''
Created on Jan 13, 2012
@author: tparker
'''
import dshell
import dpkt
from output import PCAPWriter
class DshellDecoder(dshell.Decoder):
'''
session writer - chain to a decoder to end the chain if the decoder does not output session or packets on its own
if chained to a packet-based decoder, writes all packets to pcap file, can be used to convert or concatenate files
if chained to a connection-based decoder, writes selected streams to session file
'''
def __init__(self, **kwargs):
'''
Constructor
'''
self.file = None
dshell.Decoder.__init__(self,
name='writer',
description='pcap/session writer',
author='twp',
raw=True,
optiondict=dict(filename=dict(default='%(clientip)s:%(clientport)s-%(serverip)s:%(serverport)s-%(direction)s.txt'),
)
)
def rawHandler(self, pktlen, pkt, ts):
self.decodedbytes += pktlen
self.count += 1
self.dump(pktlen, pkt, ts) # pktlen may be wrong if we stripped vlan
def IPHandler(self, addr, ip, ts, pkttype=None, **kw):
self.decodedbytes += len(ip.data)
self.count += 1
# if we are passed in IP data vs layer-2 frames, we need to encapsulate
# them
self.dump(dpkt.ethernet.Ethernet(data=str(ip), pkttype=type), ts=ts)
def connectionHandler(self, conn):
self.write(conn)
dObj = DshellDecoder()
|
en
| 0.814954
|
Created on Jan 13, 2012 @author: tparker session writer - chain to a decoder to end the chain if the decoder does not output session or packets on its own if chained to a packet-based decoder, writes all packets to pcap file, can be used to convert or concatenate files if chained to a connection-based decoder, writes selected streams to session file Constructor # pktlen may be wrong if we stripped vlan # if we are passed in IP data vs layer-2 frames, we need to encapsulate # them
| 2.525476
| 3
|
stage/standard/test_sql_server_cdc_origin.py
|
streamsets/datacollector-tests
| 14
|
6627521
|
# Copyright 2020 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import string
import pytest
import sqlalchemy
from streamsets.sdk.utils import Version
from streamsets.testframework.markers import database, sdc_min_version
from streamsets.testframework.utils import get_random_string
@pytest.fixture(scope='module')
def sdc_builder_hook():
def hook(data_collector):
data_collector.SDC_JAVA_OPTS = '-Xmx2048m -Xms2048m'
return hook
logger = logging.getLogger(__name__)
DEFAULT_SCHEMA_NAME = 'dbo'
# https://docs.microsoft.com/en-us/sql/t-sql/data-types/data-types-transact-sql?view=sql-server-2017
# hiearchyid types not supported
# Geometry and geography not supported
DATA_TYPES_SQLSERVER = [
('DATE', "'2019-01-01'", 'DATE', 1546300800000),
('DATETIME', "'2004-05-23T14:25:10'", 'DATETIME', 1085322310000),
('DATETIME2', "'2004-05-23T14:25:10'", 'DATETIME', 1085322310000),
('DATETIMEOFFSET', "'2004-05-23 14:25:10.3456 -08:00'", 'DEPENDS_ON_VERSION', 'depends_on_version'),
('SMALLDATETIME', "'2004-05-23T14:25:10'", 'DATETIME', 1085322300000),
('TIME', "'14:25:10'", 'TIME', 51910000),
('BIT', "1", 'BOOLEAN', True),
('DECIMAL(5,2)', '5.20', 'DECIMAL', '5.20'),
('NUMERIC(5,2)', '5.20', 'DECIMAL', '5.20'),
('REAL', '5.20', 'FLOAT', '5.2'),
('FLOAT', '5.20', 'DOUBLE', '5.2'),
('TINYINT', '255', 'SHORT', 255),
('SMALLINT', '-32768', 'SHORT', -32768),
('INT', '-2147483648', 'INTEGER', '-2147483648'),
('BIGINT', '-9223372036854775807', 'LONG', '-9223372036854775807'),
('MONEY', '255.60', 'DECIMAL', '255.6000'),
('SMALLMONEY', '255.60', 'DECIMAL', '255.6000'),
('BINARY(5)', "CAST('Hello' AS BINARY(5))", 'BYTE_ARRAY', 'SGVsbG8='),
('VARBINARY(5)', "CAST('Hello' AS VARBINARY(5))", 'BYTE_ARRAY', 'SGVsbG8='),
('CHAR(5)', "'Hello'", 'STRING', 'Hello'),
('VARCHAR(5)', "'Hello'", 'STRING', 'Hello'),
('NCHAR(5)', "'Hello'", 'STRING', 'Hello'),
('NVARCHAR(5)', "'Hello'", 'STRING', 'Hello'),
('TEXT', "'Hello'", 'STRING', 'Hello'),
('NTEXT', "'Hello'", 'STRING', 'Hello'),
('IMAGE', "CAST('Hello' AS IMAGE)", 'BYTE_ARRAY', 'SGVsbG8='),
# ('GEOGRAPHY',"geography::STGeomFromText('LINESTRING(-122.360 47.656, -122.343 47.656 )', 4326)", 'BYTE_ARRAY', '5hAAAAEUhxbZzvfTR0DXo3A9CpdewIcW2c7300dAy6FFtvOVXsA='), # Not supported
# ('GEOMETRY',"geometry::STGeomFromText('LINESTRING (100 100, 20 180, 180 180)', 0)", 'BYTE_ARRAY', 'AAAAAAEEAwAAAAAAAAAAAFlAAAAAAAAAWUAAAAAAAAA0QAAAAAAAgGZAAAAAAACAZkAAAAAAAIBmQAEAAAABAAAAAAEAAAD/////AAAAAAI='), # Not supported
('XML', "'<a></a>'", 'STRING', '<a/>')
]
@sdc_min_version('3.0.0.0')
@database('sqlserver')
@pytest.mark.parametrize('sql_type,insert_fragment,expected_type,expected_value', DATA_TYPES_SQLSERVER, ids=[i[0] for i in DATA_TYPES_SQLSERVER])
def test_data_types(sdc_builder, sdc_executor, database, sql_type, insert_fragment, expected_type, expected_value, keep_data):
if not database.is_cdc_enabled:
pytest.skip('Test only runs against SQL Server with CDC enabled.')
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
try:
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
id int primary key,
data_column {sql_type} NULL
)
""")
_enable_cdc(connection, DEFAULT_SCHEMA_NAME, table_name)
# And insert a row with actual value
connection.execute(f"INSERT INTO {table_name} VALUES(1, {insert_fragment})")
# And a null
connection.execute(f"INSERT INTO {table_name} VALUES(2, NULL)")
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('SQL Server CDC Client')
origin.fetch_size = 1
origin.table_configs = [{'capture_instance': f"{DEFAULT_SCHEMA_NAME}_{table_name}"}]
wiretap = builder.add_wiretap()
# As a part of SDC-10125, DATETIMEOFFSET is natively supported in SDC, and is converted into ZONED_DATETIME
if sql_type == 'DATETIMEOFFSET':
if Version(sdc_executor.version) >= Version('3.14.0'):
expected_type = 'ZONED_DATETIME'
expected_value = '2004-05-23T14:25:10.3456-08:00'
else:
expected_type = 'STRING'
expected_value = '2004-05-23 14:25:10.3456 -08:00'
# This unknown_type_action setting is required, otherwise DATETIMEOFFSET tests for SDC < 3.14 will fail.
origin.on_unknown_type = 'CONVERT_TO_STRING'
origin >> wiretap.destination
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(2)
sdc_executor.stop_pipeline(pipeline)
records = wiretap.output_records
assert len(records) == 2
record = records[0]
null_record = records[1]
# Since we are controlling types, we want to check explicit values inside the record rather the the python
# wrappers.
# TLKT-177: Add ability for field to return raw value
assert record.field['data_column'].type == expected_type
assert null_record.field['data_column'].type == expected_type
assert record.field['data_column']._data['value'] == expected_value
assert null_record.field['data_column'] == None
finally:
if not keep_data:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
if connection is not None:
connection.close()
# Rules: https://stackoverflow.com/questions/5808332/sql-server-maximum-character-length-of-object-names
# Rules:
OBJECT_NAMES_SQLSERVER = [
('keywords', 'table', 'column'),
('lowercase', get_random_string(string.ascii_lowercase, 20), get_random_string(string.ascii_lowercase, 20)),
('uppercase', get_random_string(string.ascii_uppercase, 20), get_random_string(string.ascii_uppercase, 20)),
('mixedcase', get_random_string(string.ascii_letters, 20), get_random_string(string.ascii_letters, 20)),
# Capture instance name is limited to 100, - 7 (dbo_ _CT)
('max_table_name', get_random_string(string.ascii_letters, 93), get_random_string(string.ascii_letters, 20)),
('max_column_name', get_random_string(string.ascii_letters, 20), get_random_string(string.ascii_letters, 128)),
('numbers', get_random_string(string.ascii_letters, 5) + "0123456789", get_random_string(string.ascii_letters, 5) + "0123456789"),
('special', get_random_string(string.ascii_letters, 5) + "!@#$%^&*()_+=-?<>", get_random_string(string.ascii_letters, 5) + "!@#$%^&*()_+=-?<>"),
]
@database('sqlserver')
@pytest.mark.parametrize('test_name,table_name,offset_name', OBJECT_NAMES_SQLSERVER, ids=[i[0] for i in OBJECT_NAMES_SQLSERVER])
def test_object_names(sdc_builder, sdc_executor, database, test_name, table_name, offset_name, keep_data):
if not database.is_cdc_enabled:
pytest.skip('Test only runs against SQL Server with CDC enabled.')
connection = database.engine.connect()
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('SQL Server CDC Client')
origin.fetch_size = 1
origin.table_configs = [{'capture_instance': f"{DEFAULT_SCHEMA_NAME}_{table_name}"}]
wiretap = builder.add_wiretap()
origin >> wiretap.destination
pipeline = builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column(offset_name, sqlalchemy.Integer, primary_key=True, quote=True),
quote=True
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(connection.engine)
_enable_cdc(connection, DEFAULT_SCHEMA_NAME, table_name)
logger.info('Adding data into %s database ...', database.type)
connection.execute(table.insert(), [{offset_name: 1}])
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(1)
sdc_executor.stop_pipeline(pipeline)
records = wiretap.output_records
assert len(records) == 1
# SDC Will escape field names with certain characters, but not always...
if "$" in offset_name:
assert records[0].field[f'"{offset_name}"'] == 1
else:
assert records[0].field[offset_name] == 1
finally:
if not keep_data:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
if connection is not None:
connection.close()
@database('sqlserver')
@pytest.mark.parametrize('number_of_threads', [1, 10])
def test_multiple_batches(sdc_builder, sdc_executor, database, number_of_threads, keep_data):
if not database.is_cdc_enabled:
pytest.skip('Test only runs against SQL Server with CDC enabled.')
connection = database.engine.connect()
max_batch_size = 1000
batches = 50
table_name = get_random_string(string.ascii_lowercase, 20)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, quote=True),
quote=True
)
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('SQL Server CDC Client')
origin.fetch_size = 1
origin.table_configs = [{'capture_instance': f"{DEFAULT_SCHEMA_NAME}_{table_name}"}]
origin.max_batch_size_in_records = max_batch_size
origin.number_of_threads = number_of_threads
origin.maximum_pool_size = number_of_threads
wiretap = builder.add_wiretap()
origin >> wiretap.destination
finisher = builder.add_stage("Pipeline Finisher Executor")
finisher.stage_record_preconditions = ["${record:eventType() == 'no-more-data'}"]
origin >= finisher
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
logger.info('Creating table %s', table_name)
table.create(connection.engine)
_enable_cdc(connection, DEFAULT_SCHEMA_NAME, table_name)
logger.info('Inserting data into %s', table_name)
connection.execute(table.insert(), [{'id' : n} for n in range(1, max_batch_size * batches + 1)])
sdc_executor.start_pipeline(pipeline).wait_for_finished()
records = wiretap.output_records
assert len(records) == max_batch_size * batches
records.sort(key=_sort_records)
expected_number = 1
for record in records:
assert record.field['id'] == expected_number
expected_number = expected_number + 1
finally:
if not keep_data:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
if connection is not None:
connection.close()
@database('sqlserver')
def test_dataflow_events(sdc_builder, sdc_executor, database, keep_data):
if not database.is_cdc_enabled:
pytest.skip('Test only runs against SQL Server with CDC enabled.')
connection = database.engine.connect()
table_prefix = get_random_string(string.ascii_lowercase, 20)
table_a = '{}_a'.format(table_prefix)
table_b = '{}_b'.format(table_prefix)
table_a_ct = '{}_{}_CT'.format(DEFAULT_SCHEMA_NAME, table_a)
table_b_ct = '{}_{}_CT'.format(DEFAULT_SCHEMA_NAME, table_b)
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('SQL Server CDC Client')
source.fetch_size = 1
source.table_configs = [
{'capture_instance': f"{DEFAULT_SCHEMA_NAME}_{table_a}"},
{'capture_instance': f"{DEFAULT_SCHEMA_NAME}_{table_b}"}
]
trash = builder.add_stage('Trash')
source >> trash
wiretap = builder.add_wiretap()
source >= wiretap.destination
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
# We need three tables for this test
metadata = sqlalchemy.MetaData()
a = sqlalchemy.Table(
table_a,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, quote=True),
quote=True
)
b = sqlalchemy.Table(
table_b,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=False, quote=True),
quote=True
)
try:
logger.info('Creating tables %s and %s in %s database ...', table_a, table_b, database.type)
a.create(connection.engine)
_enable_cdc(connection, DEFAULT_SCHEMA_NAME, table_a)
b.create(connection.engine)
_enable_cdc(connection, DEFAULT_SCHEMA_NAME, table_b)
logger.info('Inserting rows into %s and %s', table_a, table_b)
connection.execute(a.insert(), {'id': 1})
connection.execute(b.insert(), {'id': 1})
# Start the pipeline
status = sdc_executor.start_pipeline(pipeline)
# Read two records, generate 4 events, 6 records
status.wait_for_pipeline_output_records_count(6)
# Force lexicographically reverse order (table-finished, schema-finished, no-more-data)
records = sorted(wiretap.output_records, key=lambda row: row.header.values['sdc.event.type'], reverse=True)
assert len(records) == 4
# First two events should be table-finished (for any order of the tables though)
assert records[0].header.values['sdc.event.type'] == 'table-finished'
assert records[1].header.values['sdc.event.type'] == 'table-finished'
table_set = set()
table_set.add(records[0].field['table'])
table_set.add(records[1].field['table'])
assert table_a_ct in table_set
assert table_b_ct in table_set
# Then we should have schema done with all the tables
assert records[2].header.values['sdc.event.type'] == 'schema-finished'
assert table_a_ct in records[2].field['tables']
assert table_b_ct in records[2].field['tables']
# Final event should be no more data
assert records[3].header.values['sdc.event.type'] == 'no-more-data'
wiretap.reset()
# Second iteration - insert one new row
logger.info('Inserting rows into %s', table_a)
connection.execute(a.insert(), {'id': 2})
# 1 record, 3 events more
status.wait_for_pipeline_output_records_count(10)
# Force lexicographically reverse order (table-finished, schema-finished, no-more-data)
records = sorted(wiretap.output_records, key=lambda row: row.header.values['sdc.event.type'], reverse=True)
assert len(records) == 3
assert records[0].header.values['sdc.event.type'] == 'table-finished'
assert records[0].field['table'] == table_a_ct
assert records[1].header.values['sdc.event.type'] == 'schema-finished'
assert table_a_ct in records[1].field['tables']
assert table_b_ct in records[1].field['tables']
assert records[2].header.values['sdc.event.type'] == 'no-more-data'
# Now let's stop the pipeline and start it again
# SDC-10022: Multitable JDBC Origin with non-incremental table does not properly trigger 'no-more-data' event
sdc_executor.stop_pipeline(pipeline)
# Portable truncate
wiretap.reset()
# Start the pipeline and wait for it to read three records (3 events)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(3)
# Force lexicographically reverse order (table-finished, schema-finished, no-more-data)
records = sorted(wiretap.output_records, key=lambda row: row.header.values['sdc.event.type'], reverse=True)
assert len(records) == 4
assert records[0].header.values['sdc.event.type'] == 'table-finished'
assert records[1].header.values['sdc.event.type'] == 'table-finished'
table_set = set()
table_set.add(records[0].field['table'])
table_set.add(records[1].field['table'])
assert table_a_ct in table_set
assert table_b_ct in table_set
assert records[2].header.values['sdc.event.type'] == 'schema-finished'
assert table_a_ct in records[2].field['tables']
assert table_b_ct in records[2].field['tables']
assert records[3].header.values['sdc.event.type'] == 'no-more-data'
sdc_executor.stop_pipeline(pipeline)
finally:
if not keep_data:
logger.info('Dropping tables %s and %s in %s database...', table_a, table_b, database.type)
a.drop(database.engine)
b.drop(database.engine)
if connection is not None:
connection.close()
@database('sqlserver')
def test_data_format(sdc_builder, sdc_executor, database, keep_data):
pytest.skip("SQL Server CDC Origin doesn't deal with data formats")
@database('sqlserver')
def test_resume_offset(sdc_builder, sdc_executor, database, keep_data):
if not database.is_cdc_enabled:
pytest.skip('Test only runs against SQL Server with CDC enabled.')
iterations = 3
records_per_iteration = 10
connection = database.engine.connect()
table_name = get_random_string(string.ascii_lowercase, 20)
table = sqlalchemy.Table(table_name, sqlalchemy.MetaData(),
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, autoincrement=False),
schema=DEFAULT_SCHEMA_NAME)
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('SQL Server CDC Client')
origin.fetch_size = 1
origin.table_configs = [{'capture_instance': f"{DEFAULT_SCHEMA_NAME}_{table_name}"}]
wiretap = builder.add_wiretap()
origin >> wiretap.destination
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
logger.info('Creating table %s', table_name)
table.create(connection.engine)
_enable_cdc(connection, DEFAULT_SCHEMA_NAME, table_name)
for iteration in range(0, iterations):
logger.info(f"Iteration: {iteration}")
wiretap.reset()
logger.info('Inserting data into %s', table_name)
connection.execute(table.insert(), [{'id': n} for n in range(iteration * records_per_iteration + 1, iteration * records_per_iteration + 1 + records_per_iteration)])
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(records_per_iteration)
sdc_executor.stop_pipeline(pipeline)
records = wiretap.output_records
# We should get the right number of records
assert len(records) == records_per_iteration
expected_number = iteration * records_per_iteration + 1
for record in records:
assert record.field['id'].value == expected_number
expected_number = expected_number + 1
finally:
if not keep_data:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
if connection is not None:
connection.close()
def _sort_records(entry):
return entry.field['id'].value
def _enable_cdc(connection, schema_name, table_name, capture_instance=None):
if capture_instance is None:
capture_instance = f"{schema_name}_{table_name}"
logger.info('Enabling CDC on %s.%s into table %s...', schema_name, table_name, capture_instance)
connection.execute(f'EXEC sys.sp_cdc_enable_table '
f'@source_schema=N\'{schema_name}\', '
f'@source_name=N\'{table_name}\','
f'@role_name = NULL, '
f'@capture_instance=N\'{capture_instance}\'')
def _disable_cdc(connection, schema_name, table_name, capture_instance=None):
if capture_instance is None:
capture_instance = f"{schema_name}_{table_name}"
logger.info('Disabling CDC on %s.%s from table %s...', schema_name, table_name, capture_instance)
connection.execute(
f'EXEC sys.sp_cdc_disable_table '
f'@source_schema=N\'{schema_name}\', '
f'@source_name=N\'{table_name}\','
f'@capture_instance={capture_instance}')
|
# Copyright 2020 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import string
import pytest
import sqlalchemy
from streamsets.sdk.utils import Version
from streamsets.testframework.markers import database, sdc_min_version
from streamsets.testframework.utils import get_random_string
@pytest.fixture(scope='module')
def sdc_builder_hook():
def hook(data_collector):
data_collector.SDC_JAVA_OPTS = '-Xmx2048m -Xms2048m'
return hook
logger = logging.getLogger(__name__)
DEFAULT_SCHEMA_NAME = 'dbo'
# https://docs.microsoft.com/en-us/sql/t-sql/data-types/data-types-transact-sql?view=sql-server-2017
# hiearchyid types not supported
# Geometry and geography not supported
DATA_TYPES_SQLSERVER = [
('DATE', "'2019-01-01'", 'DATE', 1546300800000),
('DATETIME', "'2004-05-23T14:25:10'", 'DATETIME', 1085322310000),
('DATETIME2', "'2004-05-23T14:25:10'", 'DATETIME', 1085322310000),
('DATETIMEOFFSET', "'2004-05-23 14:25:10.3456 -08:00'", 'DEPENDS_ON_VERSION', 'depends_on_version'),
('SMALLDATETIME', "'2004-05-23T14:25:10'", 'DATETIME', 1085322300000),
('TIME', "'14:25:10'", 'TIME', 51910000),
('BIT', "1", 'BOOLEAN', True),
('DECIMAL(5,2)', '5.20', 'DECIMAL', '5.20'),
('NUMERIC(5,2)', '5.20', 'DECIMAL', '5.20'),
('REAL', '5.20', 'FLOAT', '5.2'),
('FLOAT', '5.20', 'DOUBLE', '5.2'),
('TINYINT', '255', 'SHORT', 255),
('SMALLINT', '-32768', 'SHORT', -32768),
('INT', '-2147483648', 'INTEGER', '-2147483648'),
('BIGINT', '-9223372036854775807', 'LONG', '-9223372036854775807'),
('MONEY', '255.60', 'DECIMAL', '255.6000'),
('SMALLMONEY', '255.60', 'DECIMAL', '255.6000'),
('BINARY(5)', "CAST('Hello' AS BINARY(5))", 'BYTE_ARRAY', 'SGVsbG8='),
('VARBINARY(5)', "CAST('Hello' AS VARBINARY(5))", 'BYTE_ARRAY', 'SGVsbG8='),
('CHAR(5)', "'Hello'", 'STRING', 'Hello'),
('VARCHAR(5)', "'Hello'", 'STRING', 'Hello'),
('NCHAR(5)', "'Hello'", 'STRING', 'Hello'),
('NVARCHAR(5)', "'Hello'", 'STRING', 'Hello'),
('TEXT', "'Hello'", 'STRING', 'Hello'),
('NTEXT', "'Hello'", 'STRING', 'Hello'),
('IMAGE', "CAST('Hello' AS IMAGE)", 'BYTE_ARRAY', 'SGVsbG8='),
# ('GEOGRAPHY',"geography::STGeomFromText('LINESTRING(-122.360 47.656, -122.343 47.656 )', 4326)", 'BYTE_ARRAY', '5hAAAAEUhxbZzvfTR0DXo3A9CpdewIcW2c7300dAy6FFtvOVXsA='), # Not supported
# ('GEOMETRY',"geometry::STGeomFromText('LINESTRING (100 100, 20 180, 180 180)', 0)", 'BYTE_ARRAY', 'AAAAAAEEAwAAAAAAAAAAAFlAAAAAAAAAWUAAAAAAAAA0QAAAAAAAgGZAAAAAAACAZkAAAAAAAIBmQAEAAAABAAAAAAEAAAD/////AAAAAAI='), # Not supported
('XML', "'<a></a>'", 'STRING', '<a/>')
]
@sdc_min_version('3.0.0.0')
@database('sqlserver')
@pytest.mark.parametrize('sql_type,insert_fragment,expected_type,expected_value', DATA_TYPES_SQLSERVER, ids=[i[0] for i in DATA_TYPES_SQLSERVER])
def test_data_types(sdc_builder, sdc_executor, database, sql_type, insert_fragment, expected_type, expected_value, keep_data):
if not database.is_cdc_enabled:
pytest.skip('Test only runs against SQL Server with CDC enabled.')
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
try:
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
id int primary key,
data_column {sql_type} NULL
)
""")
_enable_cdc(connection, DEFAULT_SCHEMA_NAME, table_name)
# And insert a row with actual value
connection.execute(f"INSERT INTO {table_name} VALUES(1, {insert_fragment})")
# And a null
connection.execute(f"INSERT INTO {table_name} VALUES(2, NULL)")
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('SQL Server CDC Client')
origin.fetch_size = 1
origin.table_configs = [{'capture_instance': f"{DEFAULT_SCHEMA_NAME}_{table_name}"}]
wiretap = builder.add_wiretap()
# As a part of SDC-10125, DATETIMEOFFSET is natively supported in SDC, and is converted into ZONED_DATETIME
if sql_type == 'DATETIMEOFFSET':
if Version(sdc_executor.version) >= Version('3.14.0'):
expected_type = 'ZONED_DATETIME'
expected_value = '2004-05-23T14:25:10.3456-08:00'
else:
expected_type = 'STRING'
expected_value = '2004-05-23 14:25:10.3456 -08:00'
# This unknown_type_action setting is required, otherwise DATETIMEOFFSET tests for SDC < 3.14 will fail.
origin.on_unknown_type = 'CONVERT_TO_STRING'
origin >> wiretap.destination
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(2)
sdc_executor.stop_pipeline(pipeline)
records = wiretap.output_records
assert len(records) == 2
record = records[0]
null_record = records[1]
# Since we are controlling types, we want to check explicit values inside the record rather the the python
# wrappers.
# TLKT-177: Add ability for field to return raw value
assert record.field['data_column'].type == expected_type
assert null_record.field['data_column'].type == expected_type
assert record.field['data_column']._data['value'] == expected_value
assert null_record.field['data_column'] == None
finally:
if not keep_data:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
if connection is not None:
connection.close()
# Rules: https://stackoverflow.com/questions/5808332/sql-server-maximum-character-length-of-object-names
# Rules:
OBJECT_NAMES_SQLSERVER = [
('keywords', 'table', 'column'),
('lowercase', get_random_string(string.ascii_lowercase, 20), get_random_string(string.ascii_lowercase, 20)),
('uppercase', get_random_string(string.ascii_uppercase, 20), get_random_string(string.ascii_uppercase, 20)),
('mixedcase', get_random_string(string.ascii_letters, 20), get_random_string(string.ascii_letters, 20)),
# Capture instance name is limited to 100, - 7 (dbo_ _CT)
('max_table_name', get_random_string(string.ascii_letters, 93), get_random_string(string.ascii_letters, 20)),
('max_column_name', get_random_string(string.ascii_letters, 20), get_random_string(string.ascii_letters, 128)),
('numbers', get_random_string(string.ascii_letters, 5) + "0123456789", get_random_string(string.ascii_letters, 5) + "0123456789"),
('special', get_random_string(string.ascii_letters, 5) + "!@#$%^&*()_+=-?<>", get_random_string(string.ascii_letters, 5) + "!@#$%^&*()_+=-?<>"),
]
@database('sqlserver')
@pytest.mark.parametrize('test_name,table_name,offset_name', OBJECT_NAMES_SQLSERVER, ids=[i[0] for i in OBJECT_NAMES_SQLSERVER])
def test_object_names(sdc_builder, sdc_executor, database, test_name, table_name, offset_name, keep_data):
if not database.is_cdc_enabled:
pytest.skip('Test only runs against SQL Server with CDC enabled.')
connection = database.engine.connect()
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('SQL Server CDC Client')
origin.fetch_size = 1
origin.table_configs = [{'capture_instance': f"{DEFAULT_SCHEMA_NAME}_{table_name}"}]
wiretap = builder.add_wiretap()
origin >> wiretap.destination
pipeline = builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column(offset_name, sqlalchemy.Integer, primary_key=True, quote=True),
quote=True
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(connection.engine)
_enable_cdc(connection, DEFAULT_SCHEMA_NAME, table_name)
logger.info('Adding data into %s database ...', database.type)
connection.execute(table.insert(), [{offset_name: 1}])
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(1)
sdc_executor.stop_pipeline(pipeline)
records = wiretap.output_records
assert len(records) == 1
# SDC Will escape field names with certain characters, but not always...
if "$" in offset_name:
assert records[0].field[f'"{offset_name}"'] == 1
else:
assert records[0].field[offset_name] == 1
finally:
if not keep_data:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
if connection is not None:
connection.close()
@database('sqlserver')
@pytest.mark.parametrize('number_of_threads', [1, 10])
def test_multiple_batches(sdc_builder, sdc_executor, database, number_of_threads, keep_data):
if not database.is_cdc_enabled:
pytest.skip('Test only runs against SQL Server with CDC enabled.')
connection = database.engine.connect()
max_batch_size = 1000
batches = 50
table_name = get_random_string(string.ascii_lowercase, 20)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, quote=True),
quote=True
)
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('SQL Server CDC Client')
origin.fetch_size = 1
origin.table_configs = [{'capture_instance': f"{DEFAULT_SCHEMA_NAME}_{table_name}"}]
origin.max_batch_size_in_records = max_batch_size
origin.number_of_threads = number_of_threads
origin.maximum_pool_size = number_of_threads
wiretap = builder.add_wiretap()
origin >> wiretap.destination
finisher = builder.add_stage("Pipeline Finisher Executor")
finisher.stage_record_preconditions = ["${record:eventType() == 'no-more-data'}"]
origin >= finisher
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
logger.info('Creating table %s', table_name)
table.create(connection.engine)
_enable_cdc(connection, DEFAULT_SCHEMA_NAME, table_name)
logger.info('Inserting data into %s', table_name)
connection.execute(table.insert(), [{'id' : n} for n in range(1, max_batch_size * batches + 1)])
sdc_executor.start_pipeline(pipeline).wait_for_finished()
records = wiretap.output_records
assert len(records) == max_batch_size * batches
records.sort(key=_sort_records)
expected_number = 1
for record in records:
assert record.field['id'] == expected_number
expected_number = expected_number + 1
finally:
if not keep_data:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
if connection is not None:
connection.close()
@database('sqlserver')
def test_dataflow_events(sdc_builder, sdc_executor, database, keep_data):
if not database.is_cdc_enabled:
pytest.skip('Test only runs against SQL Server with CDC enabled.')
connection = database.engine.connect()
table_prefix = get_random_string(string.ascii_lowercase, 20)
table_a = '{}_a'.format(table_prefix)
table_b = '{}_b'.format(table_prefix)
table_a_ct = '{}_{}_CT'.format(DEFAULT_SCHEMA_NAME, table_a)
table_b_ct = '{}_{}_CT'.format(DEFAULT_SCHEMA_NAME, table_b)
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('SQL Server CDC Client')
source.fetch_size = 1
source.table_configs = [
{'capture_instance': f"{DEFAULT_SCHEMA_NAME}_{table_a}"},
{'capture_instance': f"{DEFAULT_SCHEMA_NAME}_{table_b}"}
]
trash = builder.add_stage('Trash')
source >> trash
wiretap = builder.add_wiretap()
source >= wiretap.destination
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
# We need three tables for this test
metadata = sqlalchemy.MetaData()
a = sqlalchemy.Table(
table_a,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, quote=True),
quote=True
)
b = sqlalchemy.Table(
table_b,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=False, quote=True),
quote=True
)
try:
logger.info('Creating tables %s and %s in %s database ...', table_a, table_b, database.type)
a.create(connection.engine)
_enable_cdc(connection, DEFAULT_SCHEMA_NAME, table_a)
b.create(connection.engine)
_enable_cdc(connection, DEFAULT_SCHEMA_NAME, table_b)
logger.info('Inserting rows into %s and %s', table_a, table_b)
connection.execute(a.insert(), {'id': 1})
connection.execute(b.insert(), {'id': 1})
# Start the pipeline
status = sdc_executor.start_pipeline(pipeline)
# Read two records, generate 4 events, 6 records
status.wait_for_pipeline_output_records_count(6)
# Force lexicographically reverse order (table-finished, schema-finished, no-more-data)
records = sorted(wiretap.output_records, key=lambda row: row.header.values['sdc.event.type'], reverse=True)
assert len(records) == 4
# First two events should be table-finished (for any order of the tables though)
assert records[0].header.values['sdc.event.type'] == 'table-finished'
assert records[1].header.values['sdc.event.type'] == 'table-finished'
table_set = set()
table_set.add(records[0].field['table'])
table_set.add(records[1].field['table'])
assert table_a_ct in table_set
assert table_b_ct in table_set
# Then we should have schema done with all the tables
assert records[2].header.values['sdc.event.type'] == 'schema-finished'
assert table_a_ct in records[2].field['tables']
assert table_b_ct in records[2].field['tables']
# Final event should be no more data
assert records[3].header.values['sdc.event.type'] == 'no-more-data'
wiretap.reset()
# Second iteration - insert one new row
logger.info('Inserting rows into %s', table_a)
connection.execute(a.insert(), {'id': 2})
# 1 record, 3 events more
status.wait_for_pipeline_output_records_count(10)
# Force lexicographically reverse order (table-finished, schema-finished, no-more-data)
records = sorted(wiretap.output_records, key=lambda row: row.header.values['sdc.event.type'], reverse=True)
assert len(records) == 3
assert records[0].header.values['sdc.event.type'] == 'table-finished'
assert records[0].field['table'] == table_a_ct
assert records[1].header.values['sdc.event.type'] == 'schema-finished'
assert table_a_ct in records[1].field['tables']
assert table_b_ct in records[1].field['tables']
assert records[2].header.values['sdc.event.type'] == 'no-more-data'
# Now let's stop the pipeline and start it again
# SDC-10022: Multitable JDBC Origin with non-incremental table does not properly trigger 'no-more-data' event
sdc_executor.stop_pipeline(pipeline)
# Portable truncate
wiretap.reset()
# Start the pipeline and wait for it to read three records (3 events)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(3)
# Force lexicographically reverse order (table-finished, schema-finished, no-more-data)
records = sorted(wiretap.output_records, key=lambda row: row.header.values['sdc.event.type'], reverse=True)
assert len(records) == 4
assert records[0].header.values['sdc.event.type'] == 'table-finished'
assert records[1].header.values['sdc.event.type'] == 'table-finished'
table_set = set()
table_set.add(records[0].field['table'])
table_set.add(records[1].field['table'])
assert table_a_ct in table_set
assert table_b_ct in table_set
assert records[2].header.values['sdc.event.type'] == 'schema-finished'
assert table_a_ct in records[2].field['tables']
assert table_b_ct in records[2].field['tables']
assert records[3].header.values['sdc.event.type'] == 'no-more-data'
sdc_executor.stop_pipeline(pipeline)
finally:
if not keep_data:
logger.info('Dropping tables %s and %s in %s database...', table_a, table_b, database.type)
a.drop(database.engine)
b.drop(database.engine)
if connection is not None:
connection.close()
@database('sqlserver')
def test_data_format(sdc_builder, sdc_executor, database, keep_data):
pytest.skip("SQL Server CDC Origin doesn't deal with data formats")
@database('sqlserver')
def test_resume_offset(sdc_builder, sdc_executor, database, keep_data):
if not database.is_cdc_enabled:
pytest.skip('Test only runs against SQL Server with CDC enabled.')
iterations = 3
records_per_iteration = 10
connection = database.engine.connect()
table_name = get_random_string(string.ascii_lowercase, 20)
table = sqlalchemy.Table(table_name, sqlalchemy.MetaData(),
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, autoincrement=False),
schema=DEFAULT_SCHEMA_NAME)
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('SQL Server CDC Client')
origin.fetch_size = 1
origin.table_configs = [{'capture_instance': f"{DEFAULT_SCHEMA_NAME}_{table_name}"}]
wiretap = builder.add_wiretap()
origin >> wiretap.destination
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
logger.info('Creating table %s', table_name)
table.create(connection.engine)
_enable_cdc(connection, DEFAULT_SCHEMA_NAME, table_name)
for iteration in range(0, iterations):
logger.info(f"Iteration: {iteration}")
wiretap.reset()
logger.info('Inserting data into %s', table_name)
connection.execute(table.insert(), [{'id': n} for n in range(iteration * records_per_iteration + 1, iteration * records_per_iteration + 1 + records_per_iteration)])
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(records_per_iteration)
sdc_executor.stop_pipeline(pipeline)
records = wiretap.output_records
# We should get the right number of records
assert len(records) == records_per_iteration
expected_number = iteration * records_per_iteration + 1
for record in records:
assert record.field['id'].value == expected_number
expected_number = expected_number + 1
finally:
if not keep_data:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
if connection is not None:
connection.close()
def _sort_records(entry):
return entry.field['id'].value
def _enable_cdc(connection, schema_name, table_name, capture_instance=None):
if capture_instance is None:
capture_instance = f"{schema_name}_{table_name}"
logger.info('Enabling CDC on %s.%s into table %s...', schema_name, table_name, capture_instance)
connection.execute(f'EXEC sys.sp_cdc_enable_table '
f'@source_schema=N\'{schema_name}\', '
f'@source_name=N\'{table_name}\','
f'@role_name = NULL, '
f'@capture_instance=N\'{capture_instance}\'')
def _disable_cdc(connection, schema_name, table_name, capture_instance=None):
if capture_instance is None:
capture_instance = f"{schema_name}_{table_name}"
logger.info('Disabling CDC on %s.%s from table %s...', schema_name, table_name, capture_instance)
connection.execute(
f'EXEC sys.sp_cdc_disable_table '
f'@source_schema=N\'{schema_name}\', '
f'@source_name=N\'{table_name}\','
f'@capture_instance={capture_instance}')
|
en
| 0.742677
|
# Copyright 2020 StreamSets Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # https://docs.microsoft.com/en-us/sql/t-sql/data-types/data-types-transact-sql?view=sql-server-2017 # hiearchyid types not supported # Geometry and geography not supported # ('GEOGRAPHY',"geography::STGeomFromText('LINESTRING(-122.360 47.656, -122.343 47.656 )', 4326)", 'BYTE_ARRAY', '5hAAAAEUhxbZzvfTR0DXo3A9CpdewIcW2c7300dAy6FFtvOVXsA='), # Not supported # ('GEOMETRY',"geometry::STGeomFromText('LINESTRING (100 100, 20 180, 180 180)', 0)", 'BYTE_ARRAY', 'AAAAAAEEAwAAAAAAAAAAAFlAAAAAAAAAWUAAAAAAAAA0QAAAAAAAgGZAAAAAAACAZkAAAAAAAIBmQAEAAAABAAAAAAEAAAD/////AAAAAAI='), # Not supported # Create table CREATE TABLE {table_name}( id int primary key, data_column {sql_type} NULL ) # And insert a row with actual value # And a null # As a part of SDC-10125, DATETIMEOFFSET is natively supported in SDC, and is converted into ZONED_DATETIME # This unknown_type_action setting is required, otherwise DATETIMEOFFSET tests for SDC < 3.14 will fail. # Since we are controlling types, we want to check explicit values inside the record rather the the python # wrappers. # TLKT-177: Add ability for field to return raw value # Rules: https://stackoverflow.com/questions/5808332/sql-server-maximum-character-length-of-object-names # Rules: # Capture instance name is limited to 100, - 7 (dbo_ _CT) #$%^&*()_+=-?<>", get_random_string(string.ascii_letters, 5) + "!@#$%^&*()_+=-?<>"), # SDC Will escape field names with certain characters, but not always... # We need three tables for this test # Start the pipeline # Read two records, generate 4 events, 6 records # Force lexicographically reverse order (table-finished, schema-finished, no-more-data) # First two events should be table-finished (for any order of the tables though) # Then we should have schema done with all the tables # Final event should be no more data # Second iteration - insert one new row # 1 record, 3 events more # Force lexicographically reverse order (table-finished, schema-finished, no-more-data) # Now let's stop the pipeline and start it again # SDC-10022: Multitable JDBC Origin with non-incremental table does not properly trigger 'no-more-data' event # Portable truncate # Start the pipeline and wait for it to read three records (3 events) # Force lexicographically reverse order (table-finished, schema-finished, no-more-data) # We should get the right number of records
| 1.685212
| 2
|
hydrus/app.py
|
king-11/hydrus
| 0
|
6627522
|
"""Main route for the application"""
import logging
from sqlalchemy import create_engine
from gevent.pywsgi import WSGIServer
from sqlalchemy.orm import sessionmaker
from hydrus.app_factory import app_factory
from hydrus.conf import (
HYDRUS_SERVER_URL, API_NAME, DB_URL, APIDOC_OBJ, PORT, DEBUG)
from hydrus.data import doc_parse
from hydrus.data.db_models import Base, create_database_tables
from hydrus.data.exceptions import UserExists
from hydrus.data.user import add_user
from hydra_python_core import doc_maker
from hydrus.utils import (
set_session, set_doc, set_hydrus_server_url,
set_token, set_api_name, set_authentication)
from hydrus.socketio_factory import create_socket
logger = logging.getLogger(__file__)
# TODO: loading the engine and creating the tables should be handled better
engine = create_engine(DB_URL)
session = sessionmaker(bind=engine)()
#
# Load ApiDoc with doc_maker
#
apidoc = doc_maker.create_doc(APIDOC_OBJ, HYDRUS_SERVER_URL, API_NAME)
classes = doc_parse.get_classes(apidoc)
try:
Base.metadata.drop_all(engine)
create_database_tables(classes)
Base.metadata.create_all(engine)
except Exception:
pass
AUTH = True
TOKEN = True
if AUTH:
try:
add_user(id_=1, paraphrase="test", session=session)
except UserExists:
pass
# Create a Hydrus app
app = app_factory(API_NAME)
socketio = create_socket(app, session)
#
# Nested context managers
#
# Use authentication for all requests
# Set the API Documentation
# Set HYDRUS_SERVER_URL
# Set the Database session
with set_authentication(app, AUTH), set_token(app, TOKEN), \
set_api_name(app, API_NAME), set_doc(app, apidoc), \
set_hydrus_server_url(app, HYDRUS_SERVER_URL), set_session(app, session):
if __name__ == "__main__":
# this is run only if development server is run
# Set the name of the API
socketio.run(app=app, debug=True, port=PORT)
else:
# Start the Hydrus app
http_server = WSGIServer(('', PORT), app)
logger.info(f'Running server at port {PORT}')
try:
http_server.serve_forever()
except KeyboardInterrupt:
pass
|
"""Main route for the application"""
import logging
from sqlalchemy import create_engine
from gevent.pywsgi import WSGIServer
from sqlalchemy.orm import sessionmaker
from hydrus.app_factory import app_factory
from hydrus.conf import (
HYDRUS_SERVER_URL, API_NAME, DB_URL, APIDOC_OBJ, PORT, DEBUG)
from hydrus.data import doc_parse
from hydrus.data.db_models import Base, create_database_tables
from hydrus.data.exceptions import UserExists
from hydrus.data.user import add_user
from hydra_python_core import doc_maker
from hydrus.utils import (
set_session, set_doc, set_hydrus_server_url,
set_token, set_api_name, set_authentication)
from hydrus.socketio_factory import create_socket
logger = logging.getLogger(__file__)
# TODO: loading the engine and creating the tables should be handled better
engine = create_engine(DB_URL)
session = sessionmaker(bind=engine)()
#
# Load ApiDoc with doc_maker
#
apidoc = doc_maker.create_doc(APIDOC_OBJ, HYDRUS_SERVER_URL, API_NAME)
classes = doc_parse.get_classes(apidoc)
try:
Base.metadata.drop_all(engine)
create_database_tables(classes)
Base.metadata.create_all(engine)
except Exception:
pass
AUTH = True
TOKEN = True
if AUTH:
try:
add_user(id_=1, paraphrase="test", session=session)
except UserExists:
pass
# Create a Hydrus app
app = app_factory(API_NAME)
socketio = create_socket(app, session)
#
# Nested context managers
#
# Use authentication for all requests
# Set the API Documentation
# Set HYDRUS_SERVER_URL
# Set the Database session
with set_authentication(app, AUTH), set_token(app, TOKEN), \
set_api_name(app, API_NAME), set_doc(app, apidoc), \
set_hydrus_server_url(app, HYDRUS_SERVER_URL), set_session(app, session):
if __name__ == "__main__":
# this is run only if development server is run
# Set the name of the API
socketio.run(app=app, debug=True, port=PORT)
else:
# Start the Hydrus app
http_server = WSGIServer(('', PORT), app)
logger.info(f'Running server at port {PORT}')
try:
http_server.serve_forever()
except KeyboardInterrupt:
pass
|
en
| 0.72093
|
Main route for the application # TODO: loading the engine and creating the tables should be handled better # # Load ApiDoc with doc_maker # # Create a Hydrus app # # Nested context managers # # Use authentication for all requests # Set the API Documentation # Set HYDRUS_SERVER_URL # Set the Database session # this is run only if development server is run # Set the name of the API # Start the Hydrus app
| 2.169068
| 2
|
examples/virus/influenza_analysis.py
|
sys-bio/SBstoat
| 0
|
6627523
|
#!/usr/bin/env python
# coding: utf-8
# # Fitting Parameters for Influenza Data
# ## Overview
#
# This is a study of the Influenza data. The analysis provides plots of fits and parameter estimates. Two models are considered.
#
# ### Influenza Data (Influenza.csv)
#
# - 6 patients
# - Viral levels in log10(TCID50 / ml of nasal wash)
# - Measurements taken a successive days since the exposure of the volunteers to an attenuated strain of H1N1"
# - Columns are patients
#
# ### State variables
#
# - $T$: number of target cells
# - $E$: number of exposed cells (virus replicating inside, not yet spreading virus)
# - $I$: number of infected cells (active virus production)
# - $V$: viral titre, in units of TCID50/ml of biofluid wash (for Influenza)
#
# ### Baseline Model: $T \rightarrow E \rightarrow I \rightarrow \emptyset$
# $\frac{dT}{dt} = - \beta T V$
#
# $\frac{dE}{dt} = \beta T V - \kappa E$
#
# $\frac{dI}{dt} = \kappa E - \delta I$
#
# $\frac{dV}{dt} = p y(I) - c y(V)$
#
# ### Simplified Model: $T \rightarrow I \rightarrow \emptyset$
# $\frac{dT}{dt} = - \beta T V$
#
# $\frac{dI}{dt} = \beta T V - \delta I$
#
# $\frac{dV}{dt} = p y(I) - c y(V)$
#
print("# In[1]:")
# Python packages used
import os
import numpy as np
import pandas as pd
import SBstoat
from SBstoat.modelStudy import ModelStudy
import matplotlib
matplotlib.use('TkAgg')
print("# In[2]:")
# Programming Constants Used in Analysis. Constants are in all capital letters.
USE_SERIALIZED = False # Use saved values of fitting from a previous bootstrap (if present)
DO_SERIALIZE = False # Update the saved values of fitted data
DIR = "." # Directory where the data are
FILE_NAME = "Influenza.csv" # Name of the file containing the observed data
NUM_BOOTSTRAP_ITERATION = 1000 # Number of bootstrap iterations, if bootstrapping is done
VIRUS = "log10V" # Name of the state variable that corresponds to the observed data
# ## Study for Baseline Model
print("# In[3]:")
ANTIMONY_MODEL = '''
// Equations
E1: T -> E ; beta*T*V ; // Target cells to exposed
E2: E -> I ; kappa*E ; // Exposed cells to infected
E3: -> V ; p*I ; // Virus production by infected cells
E4: V -> ; c*V ; // Virus clearance
E5: I -> ; delta*I // Death of infected cells
// Parameters - from the Influenza article,
beta = 3.2e-5; // rate of transition of target(T) to exposed(E) cells, in units of 1/[V] * 1/day
kappa = 4.0; // rate of transition from exposed(E) to infected(I) cells, in units of 1/day
delta = 5.2; // rate of death of infected cells(I), in units of 1/day
p = 4.6e-2; // rate virus(V) producion by infected cells(I), in units of [V]/day
c = 5.2; // rate of virus clearance, in units of 1/day
// Initial conditions
T = 4E+8 // estimate of the total number of susceptible epithelial cells
// in upper respiratory tract)
E = 0
I = 0
V = 0.75 // the dose of virus in TCID50 in Influenza experiment; could be V=0 and I = 20 instead for a natural infection
// Computed values
log10V := log10(V)
'''
# ### 1. Data Setup
# Data are from B<NAME> et al., Kinetics of Influenza A Virus Infection in Humans. Journal of Virology. 2006 Aug 1;80(15):7590–9. Specifically, Influenza A (WT HK/123/77 (H1N1)) data from Table 1.
# These data are:
#
# ``
# 2,5.5,4,5.5,3,0,0
# 1,6,3,1.5,3.5,1.3,0
# 2.5,5,5,3,5.5,3.5,0
# 3.5,5.5,6.5,5.5,3.5,4,0
# 2.5,3,6.5,6.5,2,0.8,0
# 4,5,5.5,7.5,5.5,1.3,0
# ``
#
# The rows are patients; the columns are times. We need to create separate data for each patient.
print("# In[4]:")
# Transform the input data into separate data sources.
path = os.path.join(DIR, FILE_NAME)
patients = ["P1", "P2", "P3", "P4", "P5", "P6"]
dataSourceDct = SBstoat.modelStudy.mkDataSourceDct(path, VIRUS,
dataSourceNames=patients, isTimeColumns=True)
print("# In[5]:")
# dataSourceDct is a python dictionary. The key is 'Pn', where n is the patient number.
# The value is a time series for that patient.
dataSourceDct
# ### 2. Transform the simulation results to units of observed values
# The observed values are in units of log10. So, simulation results must
# be converted to these units. This is done by using an assignment rule in the simulation model.
# For this model, the assignmnt rule is ``log10V := log10(V)``.
# ### 3. Specify permissible values for parameters
# For each parameter, provide a tuple of its: lower bound, upper bound, and starting value.
print("# In[6]:")
# Parameter value ranges: lower, upper, initial value
parameterDct = dict(
beta=(0, 10e-5, 3.2e-5),
kappa=(0, 10, 4.0),
delta=(0, 10, 5.2),
p=(0, 1, 4.6e-2),
c=(0, 10, 5.2)
)
# ### 4. Run the model and produce plots.
print("# In[7]:")
# Run a study
def runStudy(model, dirStudyPath, filterSL=None):
study = ModelStudy(model, # Antimony model to evaluate
dataSourceDct, # Data sources to use for fitting
parameterDct=parameterDct, # Parameters and their value ranges
dirStudyPath=dirStudyPath, # Where to store the results of bootstrapping
selectedColumns=["log10V"], # Output column is computed in the assignment rule
doSerialize=DO_SERIALIZE, # Save the results of bootstrapping
useSerialized=USE_SERIALIZED) # Use previously calculated bootstrap results if they are present
study.bootstrap(numIteration=NUM_BOOTSTRAP_ITERATION, filterSL=filterSL) # Do bootstrapping
print("\n\n")
study.plotFitAll() # Plot fitted and observed values with band plots for confidence
print("\n\n")
study.plotParameterEstimates() # Plot the parameter estimates for each data source
print("# In[8]:")
dirStudyPath = os.path.join(DIR, "ModelStudyFitters_01")
runStudy(ANTIMONY_MODEL, dirStudyPath, filterSL=0.01)
# ## Simplified Model: $T \rightarrow I \rightarrow \emptyset$
# The model is updated to remove the state variable E and the parameter $\kappa$.
print("# In[9]:")
# Parameter value ranges: lower, upper, initial value
parameterDct = dict(
beta=(0, 10e-5, 3.2e-5),
delta=(0, 10, 5.2),
p=(0, 1, 4.6e-2),
c=(0, 10, 5.2)
)
print("# In[10]:")
SIMPLIFIED_MODEL = '''
// Equations
E1: T -> I ; beta*T*V ; // Target cells to exposed
E3: -> V ; p*I ; // Virus production by infected cells
E4: V -> ; c*V ; // Virus clearance
E5: I -> ; delta*I // Death of infected cells
// Parameters - from the Influenza article,
beta = 3.2e-5; // rate of transition of target(T) to exposed(E) cells, in units of 1/[V] * 1/day
delta = 5.2; // rate of death of infected cells(I), in units of 1/day
p = 4.6e-2; // rate virus(V) producion by infected cells(I), in units of [V]/day
c = 5.2; // rate of virus clearance, in units of 1/day
// Initial conditions
T = 4E+8 // estimate of the total number of susceptible epithelial cells
// in upper respiratory tract)
I = 0
V = 0.75 // the dose of virus in TCID50 in Influenza experiment; could be V=0 and I = 20 instead for a natural infection
// Computed values
log10V := log10(V)
'''
# ### Step 4: Run the model and produce plots
print("# In[11]:")
dirStudyPath = os.path.join(DIR, "SimpleModelFitters_01")
runStudy(SIMPLIFIED_MODEL, dirStudyPath, filterSL=0.01)
|
#!/usr/bin/env python
# coding: utf-8
# # Fitting Parameters for Influenza Data
# ## Overview
#
# This is a study of the Influenza data. The analysis provides plots of fits and parameter estimates. Two models are considered.
#
# ### Influenza Data (Influenza.csv)
#
# - 6 patients
# - Viral levels in log10(TCID50 / ml of nasal wash)
# - Measurements taken a successive days since the exposure of the volunteers to an attenuated strain of H1N1"
# - Columns are patients
#
# ### State variables
#
# - $T$: number of target cells
# - $E$: number of exposed cells (virus replicating inside, not yet spreading virus)
# - $I$: number of infected cells (active virus production)
# - $V$: viral titre, in units of TCID50/ml of biofluid wash (for Influenza)
#
# ### Baseline Model: $T \rightarrow E \rightarrow I \rightarrow \emptyset$
# $\frac{dT}{dt} = - \beta T V$
#
# $\frac{dE}{dt} = \beta T V - \kappa E$
#
# $\frac{dI}{dt} = \kappa E - \delta I$
#
# $\frac{dV}{dt} = p y(I) - c y(V)$
#
# ### Simplified Model: $T \rightarrow I \rightarrow \emptyset$
# $\frac{dT}{dt} = - \beta T V$
#
# $\frac{dI}{dt} = \beta T V - \delta I$
#
# $\frac{dV}{dt} = p y(I) - c y(V)$
#
print("# In[1]:")
# Python packages used
import os
import numpy as np
import pandas as pd
import SBstoat
from SBstoat.modelStudy import ModelStudy
import matplotlib
matplotlib.use('TkAgg')
print("# In[2]:")
# Programming Constants Used in Analysis. Constants are in all capital letters.
USE_SERIALIZED = False # Use saved values of fitting from a previous bootstrap (if present)
DO_SERIALIZE = False # Update the saved values of fitted data
DIR = "." # Directory where the data are
FILE_NAME = "Influenza.csv" # Name of the file containing the observed data
NUM_BOOTSTRAP_ITERATION = 1000 # Number of bootstrap iterations, if bootstrapping is done
VIRUS = "log10V" # Name of the state variable that corresponds to the observed data
# ## Study for Baseline Model
print("# In[3]:")
ANTIMONY_MODEL = '''
// Equations
E1: T -> E ; beta*T*V ; // Target cells to exposed
E2: E -> I ; kappa*E ; // Exposed cells to infected
E3: -> V ; p*I ; // Virus production by infected cells
E4: V -> ; c*V ; // Virus clearance
E5: I -> ; delta*I // Death of infected cells
// Parameters - from the Influenza article,
beta = 3.2e-5; // rate of transition of target(T) to exposed(E) cells, in units of 1/[V] * 1/day
kappa = 4.0; // rate of transition from exposed(E) to infected(I) cells, in units of 1/day
delta = 5.2; // rate of death of infected cells(I), in units of 1/day
p = 4.6e-2; // rate virus(V) producion by infected cells(I), in units of [V]/day
c = 5.2; // rate of virus clearance, in units of 1/day
// Initial conditions
T = 4E+8 // estimate of the total number of susceptible epithelial cells
// in upper respiratory tract)
E = 0
I = 0
V = 0.75 // the dose of virus in TCID50 in Influenza experiment; could be V=0 and I = 20 instead for a natural infection
// Computed values
log10V := log10(V)
'''
# ### 1. Data Setup
# Data are from B<NAME> et al., Kinetics of Influenza A Virus Infection in Humans. Journal of Virology. 2006 Aug 1;80(15):7590–9. Specifically, Influenza A (WT HK/123/77 (H1N1)) data from Table 1.
# These data are:
#
# ``
# 2,5.5,4,5.5,3,0,0
# 1,6,3,1.5,3.5,1.3,0
# 2.5,5,5,3,5.5,3.5,0
# 3.5,5.5,6.5,5.5,3.5,4,0
# 2.5,3,6.5,6.5,2,0.8,0
# 4,5,5.5,7.5,5.5,1.3,0
# ``
#
# The rows are patients; the columns are times. We need to create separate data for each patient.
print("# In[4]:")
# Transform the input data into separate data sources.
path = os.path.join(DIR, FILE_NAME)
patients = ["P1", "P2", "P3", "P4", "P5", "P6"]
dataSourceDct = SBstoat.modelStudy.mkDataSourceDct(path, VIRUS,
dataSourceNames=patients, isTimeColumns=True)
print("# In[5]:")
# dataSourceDct is a python dictionary. The key is 'Pn', where n is the patient number.
# The value is a time series for that patient.
dataSourceDct
# ### 2. Transform the simulation results to units of observed values
# The observed values are in units of log10. So, simulation results must
# be converted to these units. This is done by using an assignment rule in the simulation model.
# For this model, the assignmnt rule is ``log10V := log10(V)``.
# ### 3. Specify permissible values for parameters
# For each parameter, provide a tuple of its: lower bound, upper bound, and starting value.
print("# In[6]:")
# Parameter value ranges: lower, upper, initial value
parameterDct = dict(
beta=(0, 10e-5, 3.2e-5),
kappa=(0, 10, 4.0),
delta=(0, 10, 5.2),
p=(0, 1, 4.6e-2),
c=(0, 10, 5.2)
)
# ### 4. Run the model and produce plots.
print("# In[7]:")
# Run a study
def runStudy(model, dirStudyPath, filterSL=None):
study = ModelStudy(model, # Antimony model to evaluate
dataSourceDct, # Data sources to use for fitting
parameterDct=parameterDct, # Parameters and their value ranges
dirStudyPath=dirStudyPath, # Where to store the results of bootstrapping
selectedColumns=["log10V"], # Output column is computed in the assignment rule
doSerialize=DO_SERIALIZE, # Save the results of bootstrapping
useSerialized=USE_SERIALIZED) # Use previously calculated bootstrap results if they are present
study.bootstrap(numIteration=NUM_BOOTSTRAP_ITERATION, filterSL=filterSL) # Do bootstrapping
print("\n\n")
study.plotFitAll() # Plot fitted and observed values with band plots for confidence
print("\n\n")
study.plotParameterEstimates() # Plot the parameter estimates for each data source
print("# In[8]:")
dirStudyPath = os.path.join(DIR, "ModelStudyFitters_01")
runStudy(ANTIMONY_MODEL, dirStudyPath, filterSL=0.01)
# ## Simplified Model: $T \rightarrow I \rightarrow \emptyset$
# The model is updated to remove the state variable E and the parameter $\kappa$.
print("# In[9]:")
# Parameter value ranges: lower, upper, initial value
parameterDct = dict(
beta=(0, 10e-5, 3.2e-5),
delta=(0, 10, 5.2),
p=(0, 1, 4.6e-2),
c=(0, 10, 5.2)
)
print("# In[10]:")
SIMPLIFIED_MODEL = '''
// Equations
E1: T -> I ; beta*T*V ; // Target cells to exposed
E3: -> V ; p*I ; // Virus production by infected cells
E4: V -> ; c*V ; // Virus clearance
E5: I -> ; delta*I // Death of infected cells
// Parameters - from the Influenza article,
beta = 3.2e-5; // rate of transition of target(T) to exposed(E) cells, in units of 1/[V] * 1/day
delta = 5.2; // rate of death of infected cells(I), in units of 1/day
p = 4.6e-2; // rate virus(V) producion by infected cells(I), in units of [V]/day
c = 5.2; // rate of virus clearance, in units of 1/day
// Initial conditions
T = 4E+8 // estimate of the total number of susceptible epithelial cells
// in upper respiratory tract)
I = 0
V = 0.75 // the dose of virus in TCID50 in Influenza experiment; could be V=0 and I = 20 instead for a natural infection
// Computed values
log10V := log10(V)
'''
# ### Step 4: Run the model and produce plots
print("# In[11]:")
dirStudyPath = os.path.join(DIR, "SimpleModelFitters_01")
runStudy(SIMPLIFIED_MODEL, dirStudyPath, filterSL=0.01)
|
en
| 0.697249
|
#!/usr/bin/env python # coding: utf-8 # # Fitting Parameters for Influenza Data # ## Overview # # This is a study of the Influenza data. The analysis provides plots of fits and parameter estimates. Two models are considered. # # ### Influenza Data (Influenza.csv) # # - 6 patients # - Viral levels in log10(TCID50 / ml of nasal wash) # - Measurements taken a successive days since the exposure of the volunteers to an attenuated strain of H1N1" # - Columns are patients # # ### State variables # # - $T$: number of target cells # - $E$: number of exposed cells (virus replicating inside, not yet spreading virus) # - $I$: number of infected cells (active virus production) # - $V$: viral titre, in units of TCID50/ml of biofluid wash (for Influenza) # # ### Baseline Model: $T \rightarrow E \rightarrow I \rightarrow \emptyset$ # $\frac{dT}{dt} = - \beta T V$ # # $\frac{dE}{dt} = \beta T V - \kappa E$ # # $\frac{dI}{dt} = \kappa E - \delta I$ # # $\frac{dV}{dt} = p y(I) - c y(V)$ # # ### Simplified Model: $T \rightarrow I \rightarrow \emptyset$ # $\frac{dT}{dt} = - \beta T V$ # # $\frac{dI}{dt} = \beta T V - \delta I$ # # $\frac{dV}{dt} = p y(I) - c y(V)$ # # Python packages used # Programming Constants Used in Analysis. Constants are in all capital letters. # Use saved values of fitting from a previous bootstrap (if present) # Update the saved values of fitted data # Directory where the data are # Name of the file containing the observed data # Number of bootstrap iterations, if bootstrapping is done # Name of the state variable that corresponds to the observed data # ## Study for Baseline Model // Equations E1: T -> E ; beta*T*V ; // Target cells to exposed E2: E -> I ; kappa*E ; // Exposed cells to infected E3: -> V ; p*I ; // Virus production by infected cells E4: V -> ; c*V ; // Virus clearance E5: I -> ; delta*I // Death of infected cells // Parameters - from the Influenza article, beta = 3.2e-5; // rate of transition of target(T) to exposed(E) cells, in units of 1/[V] * 1/day kappa = 4.0; // rate of transition from exposed(E) to infected(I) cells, in units of 1/day delta = 5.2; // rate of death of infected cells(I), in units of 1/day p = 4.6e-2; // rate virus(V) producion by infected cells(I), in units of [V]/day c = 5.2; // rate of virus clearance, in units of 1/day // Initial conditions T = 4E+8 // estimate of the total number of susceptible epithelial cells // in upper respiratory tract) E = 0 I = 0 V = 0.75 // the dose of virus in TCID50 in Influenza experiment; could be V=0 and I = 20 instead for a natural infection // Computed values log10V := log10(V) # ### 1. Data Setup # Data are from B<NAME> et al., Kinetics of Influenza A Virus Infection in Humans. Journal of Virology. 2006 Aug 1;80(15):7590–9. Specifically, Influenza A (WT HK/123/77 (H1N1)) data from Table 1. # These data are: # # `` # 2,5.5,4,5.5,3,0,0 # 1,6,3,1.5,3.5,1.3,0 # 2.5,5,5,3,5.5,3.5,0 # 3.5,5.5,6.5,5.5,3.5,4,0 # 2.5,3,6.5,6.5,2,0.8,0 # 4,5,5.5,7.5,5.5,1.3,0 # `` # # The rows are patients; the columns are times. We need to create separate data for each patient. # Transform the input data into separate data sources. # dataSourceDct is a python dictionary. The key is 'Pn', where n is the patient number. # The value is a time series for that patient. # ### 2. Transform the simulation results to units of observed values # The observed values are in units of log10. So, simulation results must # be converted to these units. This is done by using an assignment rule in the simulation model. # For this model, the assignmnt rule is ``log10V := log10(V)``. # ### 3. Specify permissible values for parameters # For each parameter, provide a tuple of its: lower bound, upper bound, and starting value. # Parameter value ranges: lower, upper, initial value # ### 4. Run the model and produce plots. # Run a study # Antimony model to evaluate # Data sources to use for fitting # Parameters and their value ranges # Where to store the results of bootstrapping # Output column is computed in the assignment rule # Save the results of bootstrapping # Use previously calculated bootstrap results if they are present # Do bootstrapping # Plot fitted and observed values with band plots for confidence # Plot the parameter estimates for each data source # ## Simplified Model: $T \rightarrow I \rightarrow \emptyset$ # The model is updated to remove the state variable E and the parameter $\kappa$. # Parameter value ranges: lower, upper, initial value // Equations E1: T -> I ; beta*T*V ; // Target cells to exposed E3: -> V ; p*I ; // Virus production by infected cells E4: V -> ; c*V ; // Virus clearance E5: I -> ; delta*I // Death of infected cells // Parameters - from the Influenza article, beta = 3.2e-5; // rate of transition of target(T) to exposed(E) cells, in units of 1/[V] * 1/day delta = 5.2; // rate of death of infected cells(I), in units of 1/day p = 4.6e-2; // rate virus(V) producion by infected cells(I), in units of [V]/day c = 5.2; // rate of virus clearance, in units of 1/day // Initial conditions T = 4E+8 // estimate of the total number of susceptible epithelial cells // in upper respiratory tract) I = 0 V = 0.75 // the dose of virus in TCID50 in Influenza experiment; could be V=0 and I = 20 instead for a natural infection // Computed values log10V := log10(V) # ### Step 4: Run the model and produce plots
| 3.108148
| 3
|
powerline/bindings/qtile/widget.py
|
MrFishFinger/powerline
| 11,435
|
6627524
|
<gh_stars>1000+
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
from libqtile.bar import CALCULATED
from libqtile.widget import TextBox
from powerline import Powerline
class QTilePowerline(Powerline):
def do_setup(self, obj):
obj.powerline = self
class PowerlineTextBox(TextBox):
# TODO Replace timeout argument with update_interval argument in next major
# release.
def __init__(self, timeout=2, text=b' ', width=CALCULATED, side='right', update_interval=None, **config):
super(PowerlineTextBox, self).__init__(text, width, **config)
self.side = side
self.update_interval = update_interval or timeout
self.did_run_timer_setup = False
powerline = QTilePowerline(ext='wm', renderer_module='pango_markup')
powerline.setup(self)
def update(self):
if not self.configured:
return True
self.text = self.powerline.render(side=self.side).encode('utf-8')
self.bar.draw()
return True
def cmd_update(self, text):
self.update(text)
def cmd_get(self):
return self.text
def timer_setup(self):
if not self.did_run_timer_setup:
self.did_run_timer_setup = True
self.timeout_add(self.update_interval, self.update)
def _configure(self, qtile, bar):
super(PowerlineTextBox, self)._configure(qtile, bar)
if self.layout.markup:
# QTile-0.9.1: no need to recreate layout or run timer_setup
return
self.layout = self.drawer.textlayout(
self.text,
self.foreground,
self.font,
self.fontsize,
self.fontshadow,
markup=True,
)
self.timer_setup()
# TODO: Remove this at next major release
Powerline = PowerlineTextBox
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
from libqtile.bar import CALCULATED
from libqtile.widget import TextBox
from powerline import Powerline
class QTilePowerline(Powerline):
def do_setup(self, obj):
obj.powerline = self
class PowerlineTextBox(TextBox):
# TODO Replace timeout argument with update_interval argument in next major
# release.
def __init__(self, timeout=2, text=b' ', width=CALCULATED, side='right', update_interval=None, **config):
super(PowerlineTextBox, self).__init__(text, width, **config)
self.side = side
self.update_interval = update_interval or timeout
self.did_run_timer_setup = False
powerline = QTilePowerline(ext='wm', renderer_module='pango_markup')
powerline.setup(self)
def update(self):
if not self.configured:
return True
self.text = self.powerline.render(side=self.side).encode('utf-8')
self.bar.draw()
return True
def cmd_update(self, text):
self.update(text)
def cmd_get(self):
return self.text
def timer_setup(self):
if not self.did_run_timer_setup:
self.did_run_timer_setup = True
self.timeout_add(self.update_interval, self.update)
def _configure(self, qtile, bar):
super(PowerlineTextBox, self)._configure(qtile, bar)
if self.layout.markup:
# QTile-0.9.1: no need to recreate layout or run timer_setup
return
self.layout = self.drawer.textlayout(
self.text,
self.foreground,
self.font,
self.fontsize,
self.fontshadow,
markup=True,
)
self.timer_setup()
# TODO: Remove this at next major release
Powerline = PowerlineTextBox
|
en
| 0.646527
|
# vim:fileencoding=utf-8:noet # TODO Replace timeout argument with update_interval argument in next major # release. # QTile-0.9.1: no need to recreate layout or run timer_setup # TODO: Remove this at next major release
| 2.237269
| 2
|
website_monitor/website_monitor.py
|
John2662/website_monitor
| 0
|
6627525
|
# -*- coding: utf-8 -*-
"""Main module."""
import copy
import datetime
import getopt
import json
import logging as log
import os
import re as reg_ex
import sys
import threading
import time
import requests
from website_monitor import db_utils
from .wm_exceptions import (
ConfigFileEmpty, ConfigFileInvalid, RequirementsNotFulfilled,
URLPropertyNotFound
)
WORK_DIR = os.path.dirname(os.path.abspath(__file__))
LOG_DIR = os.path.join(WORK_DIR, 'logs')
LOG_FILE_PATH = os.path.join(LOG_DIR, 'logfile.log')
log.basicConfig(filename=LOG_FILE_PATH, format='%(message)s', level=log.INFO)
DEFAULT_CHECK_PERIOD = 3600
class WebMonitorConfigObject(object):
"""Represents a configuration object."""
def __init__(self, check_period=0, defer_to_store=False, config_abs_path=None):
"""
Initialize a WebMonitorConfigObject instance.
:param config_abs_path: String representing absolute path
to configuration file
:param check_period: Value representing the interval period between
website status checks.
"""
config_path = config_abs_path or os.path.join(WORK_DIR, 'config.json')
with open(config_path) as f:
configs = json.load(f)
self.set_check_period_and_web_data(configs, check_period, defer_to_store)
# check if website properties have at least defined the url
# if they are properly formed, add them in the set of
# self.websites
def extract_websites(self, configs):
self.websites = copy.copy(configs)
for key, val in configs.items():
if 'url' not in val:
self.websites.pop(key,None)
@staticmethod
def is_positive_int(i):
try:
i = int(i)
except:
return False
return i > 0
@staticmethod
def extract_check_period_from_input(test_check_period):
try:
test_check_period = int(test_check_period)
except:
test_check_period = 0
return test_check_period
def set_check_period_and_web_data(self, configs, passed_check_period, defer_to_store):
passed_check_period = self.__class__.extract_check_period_from_input(passed_check_period)
stored_check_period = self.__class__.extract_check_period_from_input(configs.pop('check_period', 0))
self.extract_websites(configs)
if defer_to_store and stored_check_period > 0:
self.check_period = stored_check_period
elif passed_check_period > 0:
self.check_period = passed_check_period
else:
self.check_period = DEFAULT_CHECK_PERIOD
@property
def check_period(self):
return self.__check_period
@check_period.setter
def check_period(self, val):
try:
val = int(val)
except ValueError:
print('Please make sure that check period value is specified '
'as integer.')
return False
if val < 0:
print('Checking period cannot be negative. Please set correct '
'value and try again.')
return False
self.__check_period = val
class Monitor(object):
"""Represents Monitor object."""
config_obj = None
def __init__(self, check_interval):
"""
Initialize a Monitor instance.
:param config_obj: website_monitor.WebMonitorConfigObject class instance
"""
self.config_store = WebMonitorConfigObject(check_interval, False)
self.load_website_query_table()
self.next_call = time.time()
self.start_watch()
def hot_load_config(self):
defer_to_json_configs = True
self.config_store = WebMonitorConfigObject(self.config_store.check_period, defer_to_json_configs)
self.load_website_query_table()
def load_website_query_table(self):
for webname, web_data in self.config_store.websites.items():
url = web_data['url']
content_requirements = web_data.get('content', None)
print(f'{webname}, {url}, {content_requirements}')
db_utils.insert_webcheck_config(webname, url, content_requirements)
def start_watch(self):
"""
Method responsible for triggering periodic checks in time intervals.
If time interval is not specified it is set by default to 3600s(1h).
:return: None
"""
self.hot_load_config()
self._start_checks()
self.next_call += self.config_store.check_period
# accounts for drift
# more at https://stackoverflow.com/a/18180189/2808371
threading.Timer(self.next_call - time.time(), self.start_watch).start()
def _start_checks(self):
"""
Method responsible for coordinating checks of each website.
:return: None
"""
# used for formatting first and last message of round of checks
time_format = '%d/%m/%Y %H:%M:%S'
asterix = '*' * 10
s = ('\n{asterix}Starting new round of checks - {current_time}'
'{asterix}')
log.info(s.format(asterix=asterix,
current_time=datetime.datetime.now().strftime(
time_format)))
threads = []
for webname, web_data in self.config_store.websites.items():
url = web_data['url']
content_requirements = web_data.get('content', None)
t = threading.Thread(target=self._perform_checks, args=(
url, content_requirements, webname))
threads.append(t)
t.start()
for t in threads:
t.join()
s = '\n{asterix}Finished all checks - {current_time}{asterix}'
log.info(s.format(asterix=asterix,
current_time=datetime.datetime.now().strftime(
time_format)))
def _perform_checks(self, url, content_requirements, webname):
"""
Method responsible for checking requirements on each website.
:param url: URL of the page for which we want to check requirements
:param content_requirements: Actual content requirements
:return: None
"""
response = self.make_request(url, webname)
if not response:
return
response_time = response.elapsed / datetime.timedelta(seconds=1)
requirements_fulfilled = 1
try:
self.check_requirements(response, content_requirements)
except RequirementsNotFulfilled as e:
s = ('Content requirements: {e} ("{content_requirements}" '
'not in response content)')
log.info(s.format(**locals()))
requirements_fulfilled = 0
else:
s = ('Content requirements: Website meets content requirements.'
'("{content_requirements}" in response content)')
log.info(s.format(**locals()))
db_utils.insert_webcheck_record(webname, url, datetime.datetime.now(),
response.status_code, response_time, requirements_fulfilled)
@staticmethod
def make_request(url, webname=None):
"""
Static method used to perform actual request to the server.
:param url: URL of the page that we want to make request to
:param webname: Alias name for website
:return: If successful returns requests.Response object, otherwise None
"""
try:
response = requests.get(url)
except requests.exceptions.RequestException as e:
error_msg = str(e)
s = 'Connection problem\nError message: {}\n'
log.info(s.format(error_msg))
db_utils.insert_webcheck_record(
webname, url, request_time=datetime.datetime.now(),
error=error_msg)
else:
s = ('\nURL: {url}\nStatus: {response.status_code}\n'
'Response time: {response.elapsed.seconds}s'
'{response.elapsed.microseconds}\u00B5s')
log.info(s.format(**locals()))
return response
return None
@staticmethod
def check_requirements(response, content_requirements):
"""
Static method used to perform requirement checks for specific
requests.Response.
:param response: requests.Response object.
:param content_requirements: Content requirements to check against
in response object.
:return: If requirements are met returns True, otherwise raises
website_monitor.exceptions.RequirementsNotFulfilled
"""
response_content = response.content.decode('utf-8', 'ignore')
requirements_are_met = reg_ex.search(content_requirements,
response_content, reg_ex.IGNORECASE)
if not content_requirements or requirements_are_met:
# if there are no requirements or the requirements are fulfilled
return True
s = 'Website content does not match specified requirements.'
raise RequirementsNotFulfilled(s.format(**locals()))
def parse_cl_args(argv):
"""
Helper function used to check if user provided checking period value
in command line arguments.
:param argv: command line arguments
:return: checking period value
"""
help_text = """
Usage:
website_monitor.py -i <checking_interval_in_s>
website_monitor.py --interval=<checking_interval_in_s>
"""
try:
opts, args = getopt.getopt(argv, "hi:", ["help", "interval="])
except getopt.GetoptError:
print(help_text)
sys.exit(2)
for opt, val in opts:
if opt == '-h':
print(help_text)
sys.exit(0)
elif opt in ("-i", "--interval"):
return val
def main():
interval = parse_cl_args(sys.argv[1:])
db_utils.create_tables()
Monitor(interval)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""Main module."""
import copy
import datetime
import getopt
import json
import logging as log
import os
import re as reg_ex
import sys
import threading
import time
import requests
from website_monitor import db_utils
from .wm_exceptions import (
ConfigFileEmpty, ConfigFileInvalid, RequirementsNotFulfilled,
URLPropertyNotFound
)
WORK_DIR = os.path.dirname(os.path.abspath(__file__))
LOG_DIR = os.path.join(WORK_DIR, 'logs')
LOG_FILE_PATH = os.path.join(LOG_DIR, 'logfile.log')
log.basicConfig(filename=LOG_FILE_PATH, format='%(message)s', level=log.INFO)
DEFAULT_CHECK_PERIOD = 3600
class WebMonitorConfigObject(object):
"""Represents a configuration object."""
def __init__(self, check_period=0, defer_to_store=False, config_abs_path=None):
"""
Initialize a WebMonitorConfigObject instance.
:param config_abs_path: String representing absolute path
to configuration file
:param check_period: Value representing the interval period between
website status checks.
"""
config_path = config_abs_path or os.path.join(WORK_DIR, 'config.json')
with open(config_path) as f:
configs = json.load(f)
self.set_check_period_and_web_data(configs, check_period, defer_to_store)
# check if website properties have at least defined the url
# if they are properly formed, add them in the set of
# self.websites
def extract_websites(self, configs):
self.websites = copy.copy(configs)
for key, val in configs.items():
if 'url' not in val:
self.websites.pop(key,None)
@staticmethod
def is_positive_int(i):
try:
i = int(i)
except:
return False
return i > 0
@staticmethod
def extract_check_period_from_input(test_check_period):
try:
test_check_period = int(test_check_period)
except:
test_check_period = 0
return test_check_period
def set_check_period_and_web_data(self, configs, passed_check_period, defer_to_store):
passed_check_period = self.__class__.extract_check_period_from_input(passed_check_period)
stored_check_period = self.__class__.extract_check_period_from_input(configs.pop('check_period', 0))
self.extract_websites(configs)
if defer_to_store and stored_check_period > 0:
self.check_period = stored_check_period
elif passed_check_period > 0:
self.check_period = passed_check_period
else:
self.check_period = DEFAULT_CHECK_PERIOD
@property
def check_period(self):
return self.__check_period
@check_period.setter
def check_period(self, val):
try:
val = int(val)
except ValueError:
print('Please make sure that check period value is specified '
'as integer.')
return False
if val < 0:
print('Checking period cannot be negative. Please set correct '
'value and try again.')
return False
self.__check_period = val
class Monitor(object):
"""Represents Monitor object."""
config_obj = None
def __init__(self, check_interval):
"""
Initialize a Monitor instance.
:param config_obj: website_monitor.WebMonitorConfigObject class instance
"""
self.config_store = WebMonitorConfigObject(check_interval, False)
self.load_website_query_table()
self.next_call = time.time()
self.start_watch()
def hot_load_config(self):
defer_to_json_configs = True
self.config_store = WebMonitorConfigObject(self.config_store.check_period, defer_to_json_configs)
self.load_website_query_table()
def load_website_query_table(self):
for webname, web_data in self.config_store.websites.items():
url = web_data['url']
content_requirements = web_data.get('content', None)
print(f'{webname}, {url}, {content_requirements}')
db_utils.insert_webcheck_config(webname, url, content_requirements)
def start_watch(self):
"""
Method responsible for triggering periodic checks in time intervals.
If time interval is not specified it is set by default to 3600s(1h).
:return: None
"""
self.hot_load_config()
self._start_checks()
self.next_call += self.config_store.check_period
# accounts for drift
# more at https://stackoverflow.com/a/18180189/2808371
threading.Timer(self.next_call - time.time(), self.start_watch).start()
def _start_checks(self):
"""
Method responsible for coordinating checks of each website.
:return: None
"""
# used for formatting first and last message of round of checks
time_format = '%d/%m/%Y %H:%M:%S'
asterix = '*' * 10
s = ('\n{asterix}Starting new round of checks - {current_time}'
'{asterix}')
log.info(s.format(asterix=asterix,
current_time=datetime.datetime.now().strftime(
time_format)))
threads = []
for webname, web_data in self.config_store.websites.items():
url = web_data['url']
content_requirements = web_data.get('content', None)
t = threading.Thread(target=self._perform_checks, args=(
url, content_requirements, webname))
threads.append(t)
t.start()
for t in threads:
t.join()
s = '\n{asterix}Finished all checks - {current_time}{asterix}'
log.info(s.format(asterix=asterix,
current_time=datetime.datetime.now().strftime(
time_format)))
def _perform_checks(self, url, content_requirements, webname):
"""
Method responsible for checking requirements on each website.
:param url: URL of the page for which we want to check requirements
:param content_requirements: Actual content requirements
:return: None
"""
response = self.make_request(url, webname)
if not response:
return
response_time = response.elapsed / datetime.timedelta(seconds=1)
requirements_fulfilled = 1
try:
self.check_requirements(response, content_requirements)
except RequirementsNotFulfilled as e:
s = ('Content requirements: {e} ("{content_requirements}" '
'not in response content)')
log.info(s.format(**locals()))
requirements_fulfilled = 0
else:
s = ('Content requirements: Website meets content requirements.'
'("{content_requirements}" in response content)')
log.info(s.format(**locals()))
db_utils.insert_webcheck_record(webname, url, datetime.datetime.now(),
response.status_code, response_time, requirements_fulfilled)
@staticmethod
def make_request(url, webname=None):
"""
Static method used to perform actual request to the server.
:param url: URL of the page that we want to make request to
:param webname: Alias name for website
:return: If successful returns requests.Response object, otherwise None
"""
try:
response = requests.get(url)
except requests.exceptions.RequestException as e:
error_msg = str(e)
s = 'Connection problem\nError message: {}\n'
log.info(s.format(error_msg))
db_utils.insert_webcheck_record(
webname, url, request_time=datetime.datetime.now(),
error=error_msg)
else:
s = ('\nURL: {url}\nStatus: {response.status_code}\n'
'Response time: {response.elapsed.seconds}s'
'{response.elapsed.microseconds}\u00B5s')
log.info(s.format(**locals()))
return response
return None
@staticmethod
def check_requirements(response, content_requirements):
"""
Static method used to perform requirement checks for specific
requests.Response.
:param response: requests.Response object.
:param content_requirements: Content requirements to check against
in response object.
:return: If requirements are met returns True, otherwise raises
website_monitor.exceptions.RequirementsNotFulfilled
"""
response_content = response.content.decode('utf-8', 'ignore')
requirements_are_met = reg_ex.search(content_requirements,
response_content, reg_ex.IGNORECASE)
if not content_requirements or requirements_are_met:
# if there are no requirements or the requirements are fulfilled
return True
s = 'Website content does not match specified requirements.'
raise RequirementsNotFulfilled(s.format(**locals()))
def parse_cl_args(argv):
"""
Helper function used to check if user provided checking period value
in command line arguments.
:param argv: command line arguments
:return: checking period value
"""
help_text = """
Usage:
website_monitor.py -i <checking_interval_in_s>
website_monitor.py --interval=<checking_interval_in_s>
"""
try:
opts, args = getopt.getopt(argv, "hi:", ["help", "interval="])
except getopt.GetoptError:
print(help_text)
sys.exit(2)
for opt, val in opts:
if opt == '-h':
print(help_text)
sys.exit(0)
elif opt in ("-i", "--interval"):
return val
def main():
interval = parse_cl_args(sys.argv[1:])
db_utils.create_tables()
Monitor(interval)
if __name__ == '__main__':
main()
|
en
| 0.708779
|
# -*- coding: utf-8 -*- Main module. Represents a configuration object. Initialize a WebMonitorConfigObject instance. :param config_abs_path: String representing absolute path to configuration file :param check_period: Value representing the interval period between website status checks. # check if website properties have at least defined the url # if they are properly formed, add them in the set of # self.websites Represents Monitor object. Initialize a Monitor instance. :param config_obj: website_monitor.WebMonitorConfigObject class instance Method responsible for triggering periodic checks in time intervals. If time interval is not specified it is set by default to 3600s(1h). :return: None # accounts for drift # more at https://stackoverflow.com/a/18180189/2808371 Method responsible for coordinating checks of each website. :return: None # used for formatting first and last message of round of checks Method responsible for checking requirements on each website. :param url: URL of the page for which we want to check requirements :param content_requirements: Actual content requirements :return: None Static method used to perform actual request to the server. :param url: URL of the page that we want to make request to :param webname: Alias name for website :return: If successful returns requests.Response object, otherwise None Static method used to perform requirement checks for specific requests.Response. :param response: requests.Response object. :param content_requirements: Content requirements to check against in response object. :return: If requirements are met returns True, otherwise raises website_monitor.exceptions.RequirementsNotFulfilled # if there are no requirements or the requirements are fulfilled Helper function used to check if user provided checking period value in command line arguments. :param argv: command line arguments :return: checking period value Usage: website_monitor.py -i <checking_interval_in_s> website_monitor.py --interval=<checking_interval_in_s>
| 2.346437
| 2
|
ampy/cli.py
|
Cediddi/ampy
| 1
|
6627526
|
# Adafruit MicroPython Tool - Command Line Interface
# Author: <NAME>
# Copyright (c) 2016 Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import os
import platform
import posixpath
import re
import click
import ampy.files as files
import ampy.pyboard as pyboard
_board = None
def windows_full_port_name(portname):
# Helper function to generate proper Windows COM port paths. Apparently
# Windows requires COM ports above 9 to have a special path, where ports below
# 9 are just referred to by COM1, COM2, etc. (wacky!) See this post for
# more info and where this code came from:
# http://eli.thegreenplace.net/2009/07/31/listing-all-serial-ports-on-windows-with-python/
m = re.match('^COM(\d+)$', portname)
if m and int(m.group(1)) < 10:
return portname
else:
return '\\\\.\\{0}'.format(portname)
@click.group()
@click.option('--port', '-p', envvar='AMPY_PORT', required=True, type=click.STRING,
help='Name of serial port for connected board. Can optionally specify with AMPY_PORT environemnt variable.',
metavar='PORT')
@click.option('--baud', '-b', envvar='AMPY_BAUD', default=115200, type=click.INT,
help='Baud rate for the serial connection (default 115200). Can optionally specify with AMPY_BAUD environment variable.',
metavar='BAUD')
@click.version_option()
def cli(port, baud):
"""ampy - Adafruit MicroPython Tool
Ampy is a tool to control MicroPython boards over a serial connection. Using
ampy you can manipulate files on the board's internal filesystem and even run
scripts.
"""
global _board
# On Windows fix the COM port path name for ports above 9 (see comment in
# windows_full_port_name function).
if platform.system() == 'Windows':
port = windows_full_port_name(port)
_board = pyboard.Pyboard(port, baudrate=baud)
@cli.command()
@click.argument('remote_file')
@click.argument('local_file', type=click.File('wb'), required=False)
def get(remote_file, local_file):
"""
Retrieve a file from the board.
Get will download a file from the board and print its contents or save it
locally. You must pass at least one argument which is the path to the file
to download from the board. If you don't specify a second argument then
the file contents will be printed to standard output. However if you pass
a file name as the second argument then the contents of the downloaded file
will be saved to that file (overwriting anything inside it!).
For example to retrieve the boot.py and print it out run:
ampy --port /board/serial/port get boot.py
Or to get main.py and save it as main.py locally run:
ampy --port /board/serial/port get main.py main.py
"""
# Get the file contents.
board_files = files.Files(_board)
contents = board_files.get(remote_file)
# Print the file out if no local file was provided, otherwise save it.
if local_file is None:
print(contents.decode('utf-8'))
else:
local_file.write(contents)
@cli.command()
@click.argument('remote_file')
def sha1sum(remote_file):
"""
Retrieve sha1sum of a file from the board.
Sha1sum will calculate sha1 hash a file from the board and print its hex digest.
You only need to pass the path to the file.
For example to retrieve the sha1 hash of boot.py and print it out run:
ampy --port /board/serial/port sha1sum boot.py
"""
# Get the hash of the file.
board_files = files.Files(_board)
sha1_hash = board_files.sha1sum(remote_file)
# Print the hash output.
print(sha1_hash)
@cli.command()
@click.argument('directory')
def mkdir(directory):
"""
Create a directory on the board.
Mkdir will create the specified directory on the board. One argument is
required, the full path of the directory to create.
Note that you cannot recursively create a hierarchy of directories with one
mkdir command, instead you must create each parent directory with separate
mkdir command calls.
For example to make a directory under the root called 'code':
ampy --port /board/serial/port mkdir /code
"""
# Run the mkdir command.
board_files = files.Files(_board)
board_files.mkdir(directory)
@cli.command()
@click.argument('directory', default='/')
def ls(directory):
"""List contents of a directory on the board.
Can pass an optional argument which is the path to the directory. The
default is to list the contents of the root, /, path.
For example to list the contents of the root run:
ampy --port /board/serial/port ls
Or to list the contents of the /foo/bar directory on the board run:
ampy --port /board/serial/port ls /foo/bar
"""
# List each file/directory on a separate line.
board_files = files.Files(_board)
for f in board_files.ls(directory):
print(f)
@cli.command()
@click.argument('local', type=click.Path(exists=True))
@click.argument('remote', required=False)
def put(local, remote):
"""Put a file or folder and its contents on the board.
Put will upload a local file or folder to the board. If the file already
exists on the board it will be overwritten with no warning! You must pass
at least one argument which is the path to the local file/folder to
upload. If the item to upload is a folder then it will be copied to the
board recursively with its entire child structure. You can pass a second
optional argument which is the path and name of the file/folder to put to
on the connected board.
For example to upload a main.py from the current directory to the board's
root run:
ampy --port /board/serial/port put main.py
Or to upload a board_boot.py from a ./foo subdirectory and save it as boot.py
in the board's root run:
ampy --port /board/serial/port put ./foo/board_boot.py boot.py
To upload a local folder adafruit_library and all of its child files/folders
as an item under the board's root run:
ampy --port /board/serial/port put adafruit_library
Or to put a local folder adafruit_library on the board under the path
/lib/adafruit_library on the board run:
ampy --port /board/serial/port put adafruit_library /lib/adafruit_library
"""
# Use the local filename if no remote filename is provided.
if remote is None:
remote = os.path.basename(os.path.abspath(local))
# Check if path is a folder and do recursive copy of everything inside it.
# Otherwise it's a file and should simply be copied over.
if os.path.isdir(local):
# Directory copy, create the directory and walk all children to copy
# over the files.
board_files = files.Files(_board)
for parent, child_dirs, child_files in os.walk(local):
# Create board filesystem absolute path to parent directory.
remote_parent = posixpath.normpath(posixpath.join(remote, os.path.relpath(parent, local)))
try:
# Create remote parent directory.
board_files.mkdir(remote_parent)
except files.DirectoryExistsError:
# Ignore errors for directories that already exist.
pass
# Loop through all the files and put them on the board too.
for filename in child_files:
with open(os.path.join(parent, filename), 'rb') as infile:
remote_filename = posixpath.join(remote_parent, filename)
import hashlib
local_file = infile.read()
local_hash = hashlib.sha1(local_file).hexdigest()
try:
remote_hash1 = board_files.sha1sum(remote_filename)
except RuntimeError:
remote_hash1 = ""
if local_hash != remote_hash1:
board_files.put(remote_filename, local_file)
else:
# File copy, open the file and copy its contents to the board.
# Put the file on the board.
with open(local, 'rb') as infile:
board_files = files.Files(_board)
board_files.put(remote, infile.read())
@cli.command()
@click.argument('remote_file')
def rm(remote_file):
"""Remove a file from the board.
Remove the specified file from the board's filesystem. Must specify one
argument which is the path to the file to delete. Note that this can't
delete directories which have files inside them, but can delete empty
directories.
For example to delete main.py from the root of a board run:
ampy --port /board/serial/port rm main.py
"""
# Delete the provided file/directory on the board.
board_files = files.Files(_board)
board_files.rm(remote_file)
@cli.command()
@click.argument('remote_folder')
def rmdir(remote_folder):
"""Forcefully remove a folder and all its children from the board.
Remove the specified folder from the board's filesystem. Must specify one
argument which is the path to the folder to delete. This will delete the
directory and ALL of its children recursively, use with caution!
For example to delete everything under /adafruit_library from the root of a
board run:
ampy --port /board/serial/port rmdir adafruit_library
"""
# Delete the provided file/directory on the board.
board_files = files.Files(_board)
board_files.rmdir(remote_folder)
@cli.command()
@click.argument('local_file')
@click.option('--no-output', '-n', is_flag=True,
help="Run the code without waiting for it to finish and print output. Use this when running code with main loops that never return.")
def run(local_file, no_output):
"""Run a script and print its output.
Run will send the specified file to the board and execute it immediately.
Any output from the board will be printed to the console (note that this is
not a 'shell' and you can't send input to the program).
Note that if your code has a main or infinite loop you should add the --no-output
option. This will run the script and immediately exit without waiting for
the script to finish and print output.
For example to run a test.py script and print any output after it finishes:
ampy --port /board/serial/port run test.py
Or to run test.py and not wait for it to finish:
ampy --port /board/serial/port run --no-output test.py
"""
# Run the provided file and print its output.
board_files = files.Files(_board)
output = board_files.run(local_file, not no_output)
if output is not None:
print(output.decode('utf-8'), end='')
@cli.command()
def reset():
"""Perform soft reset/reboot of the board.
Will connect to the board and perform a soft reset. No arguments are
necessary:
ampy --port /board/serial/port reset
"""
# Enter then exit the raw REPL, in the process the board will be soft reset
# (part of enter raw REPL).
_board.enter_raw_repl()
_board.exit_raw_repl()
if __name__ == '__main__':
try:
cli()
finally:
# Try to ensure the board serial connection is always gracefully closed.
if _board is not None:
try:
_board.close()
except:
# Swallow errors when attempting to close as it's just a best effort
# and shouldn't cause a new error or problem if the connection can't
# be closed.
pass
|
# Adafruit MicroPython Tool - Command Line Interface
# Author: <NAME>
# Copyright (c) 2016 Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import os
import platform
import posixpath
import re
import click
import ampy.files as files
import ampy.pyboard as pyboard
_board = None
def windows_full_port_name(portname):
# Helper function to generate proper Windows COM port paths. Apparently
# Windows requires COM ports above 9 to have a special path, where ports below
# 9 are just referred to by COM1, COM2, etc. (wacky!) See this post for
# more info and where this code came from:
# http://eli.thegreenplace.net/2009/07/31/listing-all-serial-ports-on-windows-with-python/
m = re.match('^COM(\d+)$', portname)
if m and int(m.group(1)) < 10:
return portname
else:
return '\\\\.\\{0}'.format(portname)
@click.group()
@click.option('--port', '-p', envvar='AMPY_PORT', required=True, type=click.STRING,
help='Name of serial port for connected board. Can optionally specify with AMPY_PORT environemnt variable.',
metavar='PORT')
@click.option('--baud', '-b', envvar='AMPY_BAUD', default=115200, type=click.INT,
help='Baud rate for the serial connection (default 115200). Can optionally specify with AMPY_BAUD environment variable.',
metavar='BAUD')
@click.version_option()
def cli(port, baud):
"""ampy - Adafruit MicroPython Tool
Ampy is a tool to control MicroPython boards over a serial connection. Using
ampy you can manipulate files on the board's internal filesystem and even run
scripts.
"""
global _board
# On Windows fix the COM port path name for ports above 9 (see comment in
# windows_full_port_name function).
if platform.system() == 'Windows':
port = windows_full_port_name(port)
_board = pyboard.Pyboard(port, baudrate=baud)
@cli.command()
@click.argument('remote_file')
@click.argument('local_file', type=click.File('wb'), required=False)
def get(remote_file, local_file):
"""
Retrieve a file from the board.
Get will download a file from the board and print its contents or save it
locally. You must pass at least one argument which is the path to the file
to download from the board. If you don't specify a second argument then
the file contents will be printed to standard output. However if you pass
a file name as the second argument then the contents of the downloaded file
will be saved to that file (overwriting anything inside it!).
For example to retrieve the boot.py and print it out run:
ampy --port /board/serial/port get boot.py
Or to get main.py and save it as main.py locally run:
ampy --port /board/serial/port get main.py main.py
"""
# Get the file contents.
board_files = files.Files(_board)
contents = board_files.get(remote_file)
# Print the file out if no local file was provided, otherwise save it.
if local_file is None:
print(contents.decode('utf-8'))
else:
local_file.write(contents)
@cli.command()
@click.argument('remote_file')
def sha1sum(remote_file):
"""
Retrieve sha1sum of a file from the board.
Sha1sum will calculate sha1 hash a file from the board and print its hex digest.
You only need to pass the path to the file.
For example to retrieve the sha1 hash of boot.py and print it out run:
ampy --port /board/serial/port sha1sum boot.py
"""
# Get the hash of the file.
board_files = files.Files(_board)
sha1_hash = board_files.sha1sum(remote_file)
# Print the hash output.
print(sha1_hash)
@cli.command()
@click.argument('directory')
def mkdir(directory):
"""
Create a directory on the board.
Mkdir will create the specified directory on the board. One argument is
required, the full path of the directory to create.
Note that you cannot recursively create a hierarchy of directories with one
mkdir command, instead you must create each parent directory with separate
mkdir command calls.
For example to make a directory under the root called 'code':
ampy --port /board/serial/port mkdir /code
"""
# Run the mkdir command.
board_files = files.Files(_board)
board_files.mkdir(directory)
@cli.command()
@click.argument('directory', default='/')
def ls(directory):
"""List contents of a directory on the board.
Can pass an optional argument which is the path to the directory. The
default is to list the contents of the root, /, path.
For example to list the contents of the root run:
ampy --port /board/serial/port ls
Or to list the contents of the /foo/bar directory on the board run:
ampy --port /board/serial/port ls /foo/bar
"""
# List each file/directory on a separate line.
board_files = files.Files(_board)
for f in board_files.ls(directory):
print(f)
@cli.command()
@click.argument('local', type=click.Path(exists=True))
@click.argument('remote', required=False)
def put(local, remote):
"""Put a file or folder and its contents on the board.
Put will upload a local file or folder to the board. If the file already
exists on the board it will be overwritten with no warning! You must pass
at least one argument which is the path to the local file/folder to
upload. If the item to upload is a folder then it will be copied to the
board recursively with its entire child structure. You can pass a second
optional argument which is the path and name of the file/folder to put to
on the connected board.
For example to upload a main.py from the current directory to the board's
root run:
ampy --port /board/serial/port put main.py
Or to upload a board_boot.py from a ./foo subdirectory and save it as boot.py
in the board's root run:
ampy --port /board/serial/port put ./foo/board_boot.py boot.py
To upload a local folder adafruit_library and all of its child files/folders
as an item under the board's root run:
ampy --port /board/serial/port put adafruit_library
Or to put a local folder adafruit_library on the board under the path
/lib/adafruit_library on the board run:
ampy --port /board/serial/port put adafruit_library /lib/adafruit_library
"""
# Use the local filename if no remote filename is provided.
if remote is None:
remote = os.path.basename(os.path.abspath(local))
# Check if path is a folder and do recursive copy of everything inside it.
# Otherwise it's a file and should simply be copied over.
if os.path.isdir(local):
# Directory copy, create the directory and walk all children to copy
# over the files.
board_files = files.Files(_board)
for parent, child_dirs, child_files in os.walk(local):
# Create board filesystem absolute path to parent directory.
remote_parent = posixpath.normpath(posixpath.join(remote, os.path.relpath(parent, local)))
try:
# Create remote parent directory.
board_files.mkdir(remote_parent)
except files.DirectoryExistsError:
# Ignore errors for directories that already exist.
pass
# Loop through all the files and put them on the board too.
for filename in child_files:
with open(os.path.join(parent, filename), 'rb') as infile:
remote_filename = posixpath.join(remote_parent, filename)
import hashlib
local_file = infile.read()
local_hash = hashlib.sha1(local_file).hexdigest()
try:
remote_hash1 = board_files.sha1sum(remote_filename)
except RuntimeError:
remote_hash1 = ""
if local_hash != remote_hash1:
board_files.put(remote_filename, local_file)
else:
# File copy, open the file and copy its contents to the board.
# Put the file on the board.
with open(local, 'rb') as infile:
board_files = files.Files(_board)
board_files.put(remote, infile.read())
@cli.command()
@click.argument('remote_file')
def rm(remote_file):
"""Remove a file from the board.
Remove the specified file from the board's filesystem. Must specify one
argument which is the path to the file to delete. Note that this can't
delete directories which have files inside them, but can delete empty
directories.
For example to delete main.py from the root of a board run:
ampy --port /board/serial/port rm main.py
"""
# Delete the provided file/directory on the board.
board_files = files.Files(_board)
board_files.rm(remote_file)
@cli.command()
@click.argument('remote_folder')
def rmdir(remote_folder):
"""Forcefully remove a folder and all its children from the board.
Remove the specified folder from the board's filesystem. Must specify one
argument which is the path to the folder to delete. This will delete the
directory and ALL of its children recursively, use with caution!
For example to delete everything under /adafruit_library from the root of a
board run:
ampy --port /board/serial/port rmdir adafruit_library
"""
# Delete the provided file/directory on the board.
board_files = files.Files(_board)
board_files.rmdir(remote_folder)
@cli.command()
@click.argument('local_file')
@click.option('--no-output', '-n', is_flag=True,
help="Run the code without waiting for it to finish and print output. Use this when running code with main loops that never return.")
def run(local_file, no_output):
"""Run a script and print its output.
Run will send the specified file to the board and execute it immediately.
Any output from the board will be printed to the console (note that this is
not a 'shell' and you can't send input to the program).
Note that if your code has a main or infinite loop you should add the --no-output
option. This will run the script and immediately exit without waiting for
the script to finish and print output.
For example to run a test.py script and print any output after it finishes:
ampy --port /board/serial/port run test.py
Or to run test.py and not wait for it to finish:
ampy --port /board/serial/port run --no-output test.py
"""
# Run the provided file and print its output.
board_files = files.Files(_board)
output = board_files.run(local_file, not no_output)
if output is not None:
print(output.decode('utf-8'), end='')
@cli.command()
def reset():
"""Perform soft reset/reboot of the board.
Will connect to the board and perform a soft reset. No arguments are
necessary:
ampy --port /board/serial/port reset
"""
# Enter then exit the raw REPL, in the process the board will be soft reset
# (part of enter raw REPL).
_board.enter_raw_repl()
_board.exit_raw_repl()
if __name__ == '__main__':
try:
cli()
finally:
# Try to ensure the board serial connection is always gracefully closed.
if _board is not None:
try:
_board.close()
except:
# Swallow errors when attempting to close as it's just a best effort
# and shouldn't cause a new error or problem if the connection can't
# be closed.
pass
|
en
| 0.849205
|
# Adafruit MicroPython Tool - Command Line Interface # Author: <NAME> # Copyright (c) 2016 Adafruit Industries # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # Helper function to generate proper Windows COM port paths. Apparently # Windows requires COM ports above 9 to have a special path, where ports below # 9 are just referred to by COM1, COM2, etc. (wacky!) See this post for # more info and where this code came from: # http://eli.thegreenplace.net/2009/07/31/listing-all-serial-ports-on-windows-with-python/ ampy - Adafruit MicroPython Tool Ampy is a tool to control MicroPython boards over a serial connection. Using ampy you can manipulate files on the board's internal filesystem and even run scripts. # On Windows fix the COM port path name for ports above 9 (see comment in # windows_full_port_name function). Retrieve a file from the board. Get will download a file from the board and print its contents or save it locally. You must pass at least one argument which is the path to the file to download from the board. If you don't specify a second argument then the file contents will be printed to standard output. However if you pass a file name as the second argument then the contents of the downloaded file will be saved to that file (overwriting anything inside it!). For example to retrieve the boot.py and print it out run: ampy --port /board/serial/port get boot.py Or to get main.py and save it as main.py locally run: ampy --port /board/serial/port get main.py main.py # Get the file contents. # Print the file out if no local file was provided, otherwise save it. Retrieve sha1sum of a file from the board. Sha1sum will calculate sha1 hash a file from the board and print its hex digest. You only need to pass the path to the file. For example to retrieve the sha1 hash of boot.py and print it out run: ampy --port /board/serial/port sha1sum boot.py # Get the hash of the file. # Print the hash output. Create a directory on the board. Mkdir will create the specified directory on the board. One argument is required, the full path of the directory to create. Note that you cannot recursively create a hierarchy of directories with one mkdir command, instead you must create each parent directory with separate mkdir command calls. For example to make a directory under the root called 'code': ampy --port /board/serial/port mkdir /code # Run the mkdir command. List contents of a directory on the board. Can pass an optional argument which is the path to the directory. The default is to list the contents of the root, /, path. For example to list the contents of the root run: ampy --port /board/serial/port ls Or to list the contents of the /foo/bar directory on the board run: ampy --port /board/serial/port ls /foo/bar # List each file/directory on a separate line. Put a file or folder and its contents on the board. Put will upload a local file or folder to the board. If the file already exists on the board it will be overwritten with no warning! You must pass at least one argument which is the path to the local file/folder to upload. If the item to upload is a folder then it will be copied to the board recursively with its entire child structure. You can pass a second optional argument which is the path and name of the file/folder to put to on the connected board. For example to upload a main.py from the current directory to the board's root run: ampy --port /board/serial/port put main.py Or to upload a board_boot.py from a ./foo subdirectory and save it as boot.py in the board's root run: ampy --port /board/serial/port put ./foo/board_boot.py boot.py To upload a local folder adafruit_library and all of its child files/folders as an item under the board's root run: ampy --port /board/serial/port put adafruit_library Or to put a local folder adafruit_library on the board under the path /lib/adafruit_library on the board run: ampy --port /board/serial/port put adafruit_library /lib/adafruit_library # Use the local filename if no remote filename is provided. # Check if path is a folder and do recursive copy of everything inside it. # Otherwise it's a file and should simply be copied over. # Directory copy, create the directory and walk all children to copy # over the files. # Create board filesystem absolute path to parent directory. # Create remote parent directory. # Ignore errors for directories that already exist. # Loop through all the files and put them on the board too. # File copy, open the file and copy its contents to the board. # Put the file on the board. Remove a file from the board. Remove the specified file from the board's filesystem. Must specify one argument which is the path to the file to delete. Note that this can't delete directories which have files inside them, but can delete empty directories. For example to delete main.py from the root of a board run: ampy --port /board/serial/port rm main.py # Delete the provided file/directory on the board. Forcefully remove a folder and all its children from the board. Remove the specified folder from the board's filesystem. Must specify one argument which is the path to the folder to delete. This will delete the directory and ALL of its children recursively, use with caution! For example to delete everything under /adafruit_library from the root of a board run: ampy --port /board/serial/port rmdir adafruit_library # Delete the provided file/directory on the board. Run a script and print its output. Run will send the specified file to the board and execute it immediately. Any output from the board will be printed to the console (note that this is not a 'shell' and you can't send input to the program). Note that if your code has a main or infinite loop you should add the --no-output option. This will run the script and immediately exit without waiting for the script to finish and print output. For example to run a test.py script and print any output after it finishes: ampy --port /board/serial/port run test.py Or to run test.py and not wait for it to finish: ampy --port /board/serial/port run --no-output test.py # Run the provided file and print its output. Perform soft reset/reboot of the board. Will connect to the board and perform a soft reset. No arguments are necessary: ampy --port /board/serial/port reset # Enter then exit the raw REPL, in the process the board will be soft reset # (part of enter raw REPL). # Try to ensure the board serial connection is always gracefully closed. # Swallow errors when attempting to close as it's just a best effort # and shouldn't cause a new error or problem if the connection can't # be closed.
| 2.336526
| 2
|
kale/pipeline/video_domain_adapter.py
|
SheffieldAI/pykale
| 0
|
6627527
|
# =============================================================================
# Author: <NAME>, <EMAIL>
# <NAME>, <EMAIL> or <EMAIL>
# =============================================================================
"""Domain adaptation systems (pipelines) for video data, e.g., for action recognition.
Most are inherited from kale.pipeline.domain_adapter.
"""
import torch
import kale.predict.losses as losses
from kale.loaddata.video_access import get_image_modality
from kale.pipeline.domain_adapter import (
BaseMMDLike,
CDANTrainer,
DANNTrainer,
get_aggregated_metrics_from_dict,
get_metrics_from_parameter_dict,
GradReverse,
Method,
set_requires_grad,
WDGRLTrainer,
)
def create_mmd_based_video(method: Method, dataset, image_modality, feature_extractor, task_classifier, **train_params):
"""MMD-based deep learning methods for domain adaptation on video data: DAN and JAN"""
if not method.is_mmd_method():
raise ValueError(f"Unsupported MMD method: {method}")
if method is Method.DAN:
return DANTrainerVideo(
dataset=dataset,
image_modality=image_modality,
feature_extractor=feature_extractor,
task_classifier=task_classifier,
method=method,
**train_params,
)
if method is Method.JAN:
return JANTrainerVideo(
dataset=dataset,
image_modality=image_modality,
feature_extractor=feature_extractor,
task_classifier=task_classifier,
method=method,
kernel_mul=[2.0, 2.0],
kernel_num=[5, 1],
**train_params,
)
def create_dann_like_video(
method: Method, dataset, image_modality, feature_extractor, task_classifier, critic, **train_params
):
"""DANN-based deep learning methods for domain adaptation on video data: DANN, CDAN, CDAN+E"""
# Uncomment for later work.
# Set up a new create_fewshot_trainer for video data based on original one in `domain_adapter.py`
# if dataset.is_semi_supervised():
# return create_fewshot_trainer_video(
# method, dataset, feature_extractor, task_classifier, critic, **train_params
# )
if method.is_dann_method():
alpha = 0.0 if method is Method.Source else 1.0
return DANNTrainerVideo(
alpha=alpha,
image_modality=image_modality,
dataset=dataset,
feature_extractor=feature_extractor,
task_classifier=task_classifier,
critic=critic,
method=method,
**train_params,
)
elif method.is_cdan_method():
return CDANTrainerVideo(
dataset=dataset,
image_modality=image_modality,
feature_extractor=feature_extractor,
task_classifier=task_classifier,
critic=critic,
method=method,
use_entropy=method is Method.CDAN_E,
**train_params,
)
elif method is Method.WDGRL:
return WDGRLTrainerVideo(
dataset=dataset,
image_modality=image_modality,
feature_extractor=feature_extractor,
task_classifier=task_classifier,
critic=critic,
method=method,
**train_params,
)
else:
raise ValueError(f"Unsupported method: {method}")
class BaseMMDLikeVideo(BaseMMDLike):
"""Common API for MME-based domain adaptation on video data: DAN, JAN"""
def __init__(
self, dataset, image_modality, feature_extractor, task_classifier, kernel_mul=2.0, kernel_num=5, **base_params,
):
super().__init__(dataset, feature_extractor, task_classifier, kernel_mul, kernel_num, **base_params)
self.image_modality = image_modality
self.rgb_feat = self.feat["rgb"]
self.flow_feat = self.feat["flow"]
def forward(self, x):
if self.feat is not None:
if self.image_modality in ["rgb", "flow"]:
if self.rgb_feat is not None:
x = self.rgb_feat(x)
else:
x = self.flow_feat(x)
x = x.view(x.size(0), -1)
class_output = self.classifier(x)
return x, class_output
elif self.image_modality == "joint":
x_rgb = self.rgb_feat(x["rgb"])
x_flow = self.flow_feat(x["flow"])
x_rgb = x_rgb.view(x_rgb.size(0), -1)
x_flow = x_flow.view(x_flow.size(0), -1)
x = torch.cat((x_rgb, x_flow), dim=1)
class_output = self.classifier(x)
return [x_rgb, x_flow], class_output
def compute_loss(self, batch, split_name="valid"):
# _s refers to source, _tu refers to unlabeled target
if self.image_modality == "joint" and len(batch) == 4:
(x_s_rgb, y_s), (x_s_flow, y_s_flow), (x_tu_rgb, y_tu), (x_tu_flow, y_tu_flow) = batch
[phi_s_rgb, phi_s_flow], y_hat = self.forward({"rgb": x_s_rgb, "flow": x_s_flow})
[phi_t_rgb, phi_t_flow], y_t_hat = self.forward({"rgb": x_tu_rgb, "flow": x_tu_flow})
mmd_rgb = self._compute_mmd(phi_s_rgb, phi_t_rgb, y_hat, y_t_hat)
mmd_flow = self._compute_mmd(phi_s_flow, phi_t_flow, y_hat, y_t_hat)
mmd = mmd_rgb + mmd_flow
elif self.image_modality in ["rgb", "flow"] and len(batch) == 2:
(x_s, y_s), (x_tu, y_tu) = batch
phi_s, y_hat = self.forward(x_s)
phi_t, y_t_hat = self.forward(x_tu)
mmd = self._compute_mmd(phi_s, phi_t, y_hat, y_t_hat)
else:
raise NotImplementedError("Batch len is {}. Check the Dataloader.".format(len(batch)))
# Uncomment when checking whether rgb & flow labels are equal.
# print('rgb_s:{}, flow_s:{}, rgb_f:{}, flow_f:{}'.format(y_s, y_s_flow, y_tu, y_tu_flow))
# print('equal: {}/{}'.format(torch.all(torch.eq(y_s, y_s_flow)), torch.all(torch.eq(y_tu, y_tu_flow))))
# ok is abbreviation for (all) correct
loss_cls, ok_src = losses.cross_entropy_logits(y_hat, y_s)
_, ok_tgt = losses.cross_entropy_logits(y_t_hat, y_tu)
task_loss = loss_cls
log_metrics = {
f"{split_name}_source_acc": ok_src,
f"{split_name}_target_acc": ok_tgt,
f"{split_name}_domain_acc": mmd,
}
return task_loss, mmd, log_metrics
class DANTrainerVideo(BaseMMDLikeVideo):
"""This is an implementation of DAN for video data."""
def __init__(self, dataset, image_modality, feature_extractor, task_classifier, **base_params):
super().__init__(dataset, image_modality, feature_extractor, task_classifier, **base_params)
def _compute_mmd(self, phi_s, phi_t, y_hat, y_t_hat):
batch_size = int(phi_s.size()[0])
kernels = losses.gaussian_kernel(phi_s, phi_t, kernel_mul=self._kernel_mul, kernel_num=self._kernel_num,)
return losses.compute_mmd_loss(kernels, batch_size)
class JANTrainerVideo(BaseMMDLikeVideo):
"""This is an implementation of JAN for video data."""
def __init__(
self,
dataset,
image_modality,
feature_extractor,
task_classifier,
kernel_mul=(2.0, 2.0),
kernel_num=(5, 1),
**base_params,
):
super().__init__(
dataset,
image_modality,
feature_extractor,
task_classifier,
kernel_mul=kernel_mul,
kernel_num=kernel_num,
**base_params,
)
def _compute_mmd(self, phi_s, phi_t, y_hat, y_t_hat):
softmax_layer = torch.nn.Softmax(dim=-1)
source_list = [phi_s, softmax_layer(y_hat)]
target_list = [phi_t, softmax_layer(y_t_hat)]
batch_size = int(phi_s.size()[0])
joint_kernels = None
for source, target, k_mul, k_num, sigma in zip(
source_list, target_list, self._kernel_mul, self._kernel_num, [None, 1.68]
):
kernels = losses.gaussian_kernel(source, target, kernel_mul=k_mul, kernel_num=k_num, fix_sigma=sigma)
if joint_kernels is not None:
joint_kernels = joint_kernels * kernels
else:
joint_kernels = kernels
return losses.compute_mmd_loss(joint_kernels, batch_size)
class DANNTrainerVideo(DANNTrainer):
"""This is an implementation of DANN for video data."""
def __init__(
self, dataset, image_modality, feature_extractor, task_classifier, critic, method, **base_params,
):
super(DANNTrainerVideo, self).__init__(
dataset, feature_extractor, task_classifier, critic, method, **base_params
)
self.image_modality = image_modality
self.rgb, self.flow = get_image_modality(self.image_modality)
self.rgb_feat = self.feat["rgb"]
self.flow_feat = self.feat["flow"]
def forward(self, x):
if self.feat is not None:
x_rgb = x_flow = adversarial_output_rgb = adversarial_output_flow = None
# For joint input, both two ifs are used
if self.rgb:
x_rgb = self.rgb_feat(x["rgb"])
x_rgb = x_rgb.view(x_rgb.size(0), -1)
reverse_feature_rgb = GradReverse.apply(x_rgb, self.alpha)
adversarial_output_rgb = self.domain_classifier(reverse_feature_rgb)
if self.flow:
x_flow = self.flow_feat(x["flow"])
x_flow = x_flow.view(x_flow.size(0), -1)
reverse_feature_flow = GradReverse.apply(x_flow, self.alpha)
adversarial_output_flow = self.domain_classifier(reverse_feature_flow)
if self.rgb:
if self.flow: # For joint input
x = torch.cat((x_rgb, x_flow), dim=1)
else: # For rgb input
x = x_rgb
else: # For flow input
x = x_flow
class_output = self.classifier(x)
return [x_rgb, x_flow], class_output, [adversarial_output_rgb, adversarial_output_flow]
def compute_loss(self, batch, split_name="valid"):
# _s refers to source, _tu refers to unlabeled target
x_s_rgb = x_tu_rgb = x_s_flow = x_tu_flow = None
if self.rgb:
if self.flow: # For joint input
(x_s_rgb, y_s), (x_s_flow, y_s_flow), (x_tu_rgb, y_tu), (x_tu_flow, y_tu_flow) = batch
else: # For rgb input
(x_s_rgb, y_s), (x_tu_rgb, y_tu) = batch
else: # For flow input
(x_s_flow, y_s), (x_tu_flow, y_tu) = batch
_, y_hat, [d_hat_rgb, d_hat_flow] = self.forward({"rgb": x_s_rgb, "flow": x_s_flow})
_, y_t_hat, [d_t_hat_rgb, d_t_hat_flow] = self.forward({"rgb": x_tu_rgb, "flow": x_tu_flow})
batch_size = len(y_s)
if self.rgb:
loss_dmn_src_rgb, dok_src_rgb = losses.cross_entropy_logits(d_hat_rgb, torch.zeros(batch_size))
loss_dmn_tgt_rgb, dok_tgt_rgb = losses.cross_entropy_logits(d_t_hat_rgb, torch.ones(batch_size))
if self.flow:
loss_dmn_src_flow, dok_src_flow = losses.cross_entropy_logits(d_hat_flow, torch.zeros(batch_size))
loss_dmn_tgt_flow, dok_tgt_flow = losses.cross_entropy_logits(d_t_hat_flow, torch.ones(batch_size))
if self.rgb and self.flow: # For joint input
loss_dmn_src = loss_dmn_src_rgb + loss_dmn_src_flow
loss_dmn_tgt = loss_dmn_tgt_rgb + loss_dmn_tgt_flow
dok = torch.cat((dok_src_rgb, dok_src_flow, dok_tgt_rgb, dok_tgt_flow))
dok_src = torch.cat((dok_src_rgb, dok_src_flow))
dok_tgt = torch.cat((dok_tgt_rgb, dok_tgt_flow))
else:
if self.rgb: # For rgb input
d_hat = d_hat_rgb
d_t_hat = d_t_hat_rgb
else: # For flow input
d_hat = d_hat_flow
d_t_hat = d_t_hat_flow
# ok is abbreviation for (all) correct, dok refers to domain correct
loss_dmn_src, dok_src = losses.cross_entropy_logits(d_hat, torch.zeros(batch_size))
loss_dmn_tgt, dok_tgt = losses.cross_entropy_logits(d_t_hat, torch.ones(batch_size))
dok = torch.cat((dok_src, dok_tgt))
loss_cls, ok_src = losses.cross_entropy_logits(y_hat, y_s)
_, ok_tgt = losses.cross_entropy_logits(y_t_hat, y_tu)
adv_loss = loss_dmn_src + loss_dmn_tgt # adv_loss = src + tgt
task_loss = loss_cls
log_metrics = {
f"{split_name}_source_acc": ok_src,
f"{split_name}_target_acc": ok_tgt,
f"{split_name}_domain_acc": dok,
f"{split_name}_source_domain_acc": dok_src,
f"{split_name}_target_domain_acc": dok_tgt,
}
return task_loss, adv_loss, log_metrics
def training_step(self, batch, batch_nb):
self._update_batch_epoch_factors(batch_nb)
task_loss, adv_loss, log_metrics = self.compute_loss(batch, split_name="train")
if self.current_epoch < self._init_epochs:
loss = task_loss
else:
loss = task_loss + self.lamb_da * adv_loss
log_metrics = get_aggregated_metrics_from_dict(log_metrics)
log_metrics.update(get_metrics_from_parameter_dict(self.get_parameters_watch_list(), loss.device))
log_metrics["train_total_loss"] = loss
log_metrics["train_adv_loss"] = adv_loss
log_metrics["train_task_loss"] = task_loss
for key in log_metrics:
self.log(key, log_metrics[key])
return {"loss": loss}
class CDANTrainerVideo(CDANTrainer):
"""This is an implementation of CDAN for video data."""
def __init__(
self,
dataset,
image_modality,
feature_extractor,
task_classifier,
critic,
use_entropy=False,
use_random=False,
random_dim=1024,
**base_params,
):
super(CDANTrainerVideo, self).__init__(
dataset, feature_extractor, task_classifier, critic, use_entropy, use_random, random_dim, **base_params
)
self.image_modality = image_modality
self.rgb, self.flow = get_image_modality(image_modality)
self.rgb_feat = self.feat["rgb"]
self.flow_feat = self.feat["flow"]
def forward(self, x):
if self.feat is not None:
x_rgb = x_flow = adversarial_output_rgb = adversarial_output_flow = None
# For joint input, both two ifs are used
if self.rgb:
x_rgb = self.rgb_feat(x["rgb"])
x_rgb = x_rgb.view(x_rgb.size(0), -1)
reverse_feature_rgb = GradReverse.apply(x_rgb, self.alpha)
if self.flow:
x_flow = self.flow_feat(x["flow"])
x_flow = x_flow.view(x_flow.size(0), -1)
reverse_feature_flow = GradReverse.apply(x_flow, self.alpha)
if self.rgb:
if self.flow: # For joint input
x = torch.cat((x_rgb, x_flow), dim=1)
else: # For rgb input
x = x_rgb
else: # For flow input
x = x_flow
class_output = self.classifier(x)
softmax_output = torch.nn.Softmax(dim=1)(class_output)
reverse_out = GradReverse.apply(softmax_output, self.alpha)
if self.rgb:
feature_rgb = torch.bmm(reverse_out.unsqueeze(2), reverse_feature_rgb.unsqueeze(1))
feature_rgb = feature_rgb.view(-1, reverse_out.size(1) * reverse_feature_rgb.size(1))
if self.random_layer:
random_out_rgb = self.random_layer.forward(feature_rgb)
adversarial_output_rgb = self.domain_classifier(random_out_rgb.view(-1, random_out_rgb.size(1)))
else:
adversarial_output_rgb = self.domain_classifier(feature_rgb)
if self.flow:
feature_flow = torch.bmm(reverse_out.unsqueeze(2), reverse_feature_flow.unsqueeze(1))
feature_flow = feature_flow.view(-1, reverse_out.size(1) * reverse_feature_flow.size(1))
if self.random_layer:
random_out_flow = self.random_layer.forward(feature_flow)
adversarial_output_flow = self.domain_classifier(random_out_flow.view(-1, random_out_flow.size(1)))
else:
adversarial_output_flow = self.domain_classifier(feature_flow)
return [x_rgb, x_flow], class_output, [adversarial_output_rgb, adversarial_output_flow]
def compute_loss(self, batch, split_name="valid"):
# _s refers to source, _tu refers to unlabeled target
x_s_rgb = x_tu_rgb = x_s_flow = x_tu_flow = None
if self.rgb:
if self.flow: # For joint input
(x_s_rgb, y_s), (x_s_flow, y_s_flow), (x_tu_rgb, y_tu), (x_tu_flow, y_tu_flow) = batch
else: # For rgb input
(x_s_rgb, y_s), (x_tu_rgb, y_tu) = batch
else: # For flow input
(x_s_flow, y_s), (x_tu_flow, y_tu) = batch
_, y_hat, [d_hat_rgb, d_hat_flow] = self.forward({"rgb": x_s_rgb, "flow": x_s_flow})
_, y_t_hat, [d_t_hat_rgb, d_t_hat_flow] = self.forward({"rgb": x_tu_rgb, "flow": x_tu_flow})
batch_size = len(y_s)
if self.entropy:
e_s = self._compute_entropy_weights(y_hat)
e_t = self._compute_entropy_weights(y_t_hat)
source_weight = e_s / torch.sum(e_s)
target_weight = e_t / torch.sum(e_t)
else:
source_weight = None
target_weight = None
if self.rgb:
loss_dmn_src_rgb, dok_src_rgb = losses.cross_entropy_logits(
d_hat_rgb, torch.zeros(batch_size), source_weight
)
loss_dmn_tgt_rgb, dok_tgt_rgb = losses.cross_entropy_logits(
d_t_hat_rgb, torch.ones(batch_size), target_weight
)
if self.flow:
loss_dmn_src_flow, dok_src_flow = losses.cross_entropy_logits(
d_hat_flow, torch.zeros(batch_size), source_weight
)
loss_dmn_tgt_flow, dok_tgt_flow = losses.cross_entropy_logits(
d_t_hat_flow, torch.ones(batch_size), target_weight
)
# ok is abbreviation for (all) correct, dok refers to domain correct
if self.rgb and self.flow: # For joint input
loss_dmn_src = loss_dmn_src_rgb + loss_dmn_src_flow
loss_dmn_tgt = loss_dmn_tgt_rgb + loss_dmn_tgt_flow
dok = torch.cat((dok_src_rgb, dok_src_flow, dok_tgt_rgb, dok_tgt_flow))
dok_src = torch.cat((dok_src_rgb, dok_src_flow))
dok_tgt = torch.cat((dok_tgt_rgb, dok_tgt_flow))
else:
if self.rgb: # For rgb input
d_hat = d_hat_rgb
d_t_hat = d_t_hat_rgb
else: # For flow input
d_hat = d_hat_flow
d_t_hat = d_t_hat_flow
loss_dmn_src, dok_src = losses.cross_entropy_logits(d_hat, torch.zeros(batch_size))
loss_dmn_tgt, dok_tgt = losses.cross_entropy_logits(d_t_hat, torch.ones(batch_size))
dok = torch.cat((dok_src, dok_tgt))
loss_cls, ok_src = losses.cross_entropy_logits(y_hat, y_s)
_, ok_tgt = losses.cross_entropy_logits(y_t_hat, y_tu)
adv_loss = loss_dmn_src + loss_dmn_tgt # adv_loss = src + tgt
task_loss = loss_cls
log_metrics = {
f"{split_name}_source_acc": ok_src,
f"{split_name}_target_acc": ok_tgt,
f"{split_name}_domain_acc": dok,
f"{split_name}_source_domain_acc": dok_src,
f"{split_name}_target_domain_acc": dok_tgt,
}
return task_loss, adv_loss, log_metrics
class WDGRLTrainerVideo(WDGRLTrainer):
"""This is an implementation of WDGRL for video data."""
def __init__(
self,
dataset,
image_modality,
feature_extractor,
task_classifier,
critic,
k_critic=5,
gamma=10,
beta_ratio=0,
**base_params,
):
super(WDGRLTrainerVideo, self).__init__(
dataset, feature_extractor, task_classifier, critic, k_critic, gamma, beta_ratio, **base_params
)
self.image_modality = image_modality
self.rgb, self.flow = get_image_modality(self.image_modality)
self.rgb_feat = self.feat["rgb"]
self.flow_feat = self.feat["flow"]
def forward(self, x):
if self.feat is not None:
x_rgb = x_flow = adversarial_output_rgb = adversarial_output_flow = None
# For joint input, both two ifs are used
if self.rgb:
x_rgb = self.rgb_feat(x["rgb"])
x_rgb = x_rgb.view(x_rgb.size(0), -1)
adversarial_output_rgb = self.domain_classifier(x_rgb)
if self.flow:
x_flow = self.flow_feat(x["flow"])
x_flow = x_flow.view(x_flow.size(0), -1)
adversarial_output_flow = self.domain_classifier(x_flow)
if self.rgb:
if self.flow: # For joint input
x = torch.cat((x_rgb, x_flow), dim=1)
else: # For rgb input
x = x_rgb
else: # For flow input
x = x_flow
class_output = self.classifier(x)
return [x_rgb, x_flow], class_output, [adversarial_output_rgb, adversarial_output_flow]
def compute_loss(self, batch, split_name="valid"):
# _s refers to source, _tu refers to unlabeled target
x_s_rgb = x_tu_rgb = x_s_flow = x_tu_flow = None
if self.rgb:
if self.flow: # For joint input
(x_s_rgb, y_s), (x_s_flow, y_s_flow), (x_tu_rgb, y_tu), (x_tu_flow, y_tu_flow) = batch
else: # For rgb input
(x_s_rgb, y_s), (x_tu_rgb, y_tu) = batch
else: # For flow input
(x_s_flow, y_s), (x_tu_flow, y_tu) = batch
_, y_hat, [d_hat_rgb, d_hat_flow] = self.forward({"rgb": x_s_rgb, "flow": x_s_flow})
_, y_t_hat, [d_t_hat_rgb, d_t_hat_flow] = self.forward({"rgb": x_tu_rgb, "flow": x_tu_flow})
batch_size = len(y_s)
# ok is abbreviation for (all) correct, dok refers to domain correct
if self.rgb:
_, dok_src_rgb = losses.cross_entropy_logits(d_hat_rgb, torch.zeros(batch_size))
_, dok_tgt_rgb = losses.cross_entropy_logits(d_t_hat_rgb, torch.ones(batch_size))
if self.flow:
_, dok_src_flow = losses.cross_entropy_logits(d_hat_flow, torch.zeros(batch_size))
_, dok_tgt_flow = losses.cross_entropy_logits(d_t_hat_flow, torch.ones(batch_size))
if self.rgb and self.flow: # For joint input
dok = torch.cat((dok_src_rgb, dok_src_flow, dok_tgt_rgb, dok_tgt_flow))
dok_src = torch.cat((dok_src_rgb, dok_src_flow))
dok_tgt = torch.cat((dok_tgt_rgb, dok_tgt_flow))
wasserstein_distance_rgb = d_hat_rgb.mean() - (1 + self._beta_ratio) * d_t_hat_rgb.mean()
wasserstein_distance_flow = d_hat_flow.mean() - (1 + self._beta_ratio) * d_t_hat_flow.mean()
wasserstein_distance = (wasserstein_distance_rgb + wasserstein_distance_flow) / 2
else:
if self.rgb: # For rgb input
d_hat = d_hat_rgb
d_t_hat = d_t_hat_rgb
dok_src = dok_src_rgb
dok_tgt = dok_tgt_rgb
else: # For flow input
d_hat = d_hat_flow
d_t_hat = d_t_hat_flow
dok_src = dok_src_flow
dok_tgt = dok_tgt_flow
wasserstein_distance = d_hat.mean() - (1 + self._beta_ratio) * d_t_hat.mean()
dok = torch.cat((dok_src, dok_tgt))
loss_cls, ok_src = losses.cross_entropy_logits(y_hat, y_s)
_, ok_tgt = losses.cross_entropy_logits(y_t_hat, y_tu)
adv_loss = wasserstein_distance
task_loss = loss_cls
log_metrics = {
f"{split_name}_source_acc": ok_src,
f"{split_name}_target_acc": ok_tgt,
f"{split_name}_domain_acc": dok,
f"{split_name}_source_domain_acc": dok_src,
f"{split_name}_target_domain_acc": dok_tgt,
f"{split_name}_wasserstein_dist": wasserstein_distance,
}
return task_loss, adv_loss, log_metrics
def configure_optimizers(self):
if self.image_modality in ["rgb", "flow"]:
if self.rgb_feat is not None:
nets = [self.rgb_feat, self.classifier]
else:
nets = [self.flow_feat, self.classifier]
elif self.image_modality == "joint":
nets = [self.rgb_feat, self.flow_feat, self.classifier]
parameters = set()
for net in nets:
parameters |= set(net.parameters())
if self._adapt_lr:
task_feat_optimizer, task_feat_sched = self._configure_optimizer(parameters)
self.critic_opt, self.critic_sched = self._configure_optimizer(self.domain_classifier.parameters())
self.critic_opt = self.critic_opt[0]
self.critic_sched = self.critic_sched[0]
return task_feat_optimizer, task_feat_sched
else:
task_feat_optimizer = self._configure_optimizer(parameters)
self.critic_opt = self._configure_optimizer(self.domain_classifier.parameters())
self.critic_sched = None
self.critic_opt = self.critic_opt[0]
return task_feat_optimizer
def critic_update_steps(self, batch):
if self.current_epoch < self._init_epochs:
return
set_requires_grad(self.domain_classifier, requires_grad=True)
if self.image_modality in ["rgb", "flow"]:
if self.rgb_feat is not None:
set_requires_grad(self.rgb_feat, requires_grad=False)
(x_s, y_s), (x_tu, _) = batch
with torch.no_grad():
h_s = self.rgb_feat(x_s).data.view(x_s.shape[0], -1)
h_t = self.rgb_feat(x_tu).data.view(x_tu.shape[0], -1)
else:
set_requires_grad(self.flow_feat, requires_grad=False)
(x_s, y_s), (x_tu, _) = batch
with torch.no_grad():
h_s = self.flow_feat(x_s).data.view(x_s.shape[0], -1)
h_t = self.flow_feat(x_tu).data.view(x_tu.shape[0], -1)
for _ in range(self._k_critic):
# gp refers to gradient penelty in Wasserstein distance.
gp = losses.gradient_penalty(self.domain_classifier, h_s, h_t)
critic_s = self.domain_classifier(h_s)
critic_t = self.domain_classifier(h_t)
wasserstein_distance = critic_s.mean() - (1 + self._beta_ratio) * critic_t.mean()
critic_cost = -wasserstein_distance + self._gamma * gp
self.critic_opt.zero_grad()
critic_cost.backward()
self.critic_opt.step()
if self.critic_sched:
self.critic_sched.step()
if self.rgb_feat is not None:
set_requires_grad(self.rgb_feat, requires_grad=True)
else:
set_requires_grad(self.flow_feat, requires_grad=True)
set_requires_grad(self.domain_classifier, requires_grad=False)
elif self.image_modality == "joint":
set_requires_grad(self.rgb_feat, requires_grad=False)
set_requires_grad(self.flow_feat, requires_grad=False)
(x_s_rgb, y_s), (x_s_flow, _), (x_tu_rgb, _), (x_tu_flow, _) = batch
with torch.no_grad():
h_s_rgb = self.rgb_feat(x_s_rgb).data.view(x_s_rgb.shape[0], -1)
h_t_rgb = self.rgb_feat(x_tu_rgb).data.view(x_tu_rgb.shape[0], -1)
h_s_flow = self.flow_feat(x_s_flow).data.view(x_s_flow.shape[0], -1)
h_t_flow = self.flow_feat(x_tu_flow).data.view(x_tu_flow.shape[0], -1)
h_s = torch.cat((h_s_rgb, h_s_flow), dim=1)
h_t = torch.cat((h_t_rgb, h_t_flow), dim=1)
# Need to improve to process rgb and flow separately in the future.
for _ in range(self._k_critic):
# gp_x refers to gradient penelty for the input with the modality x.
gp_rgb = losses.gradient_penalty(self.domain_classifier, h_s_rgb, h_t_rgb)
gp_flow = losses.gradient_penalty(self.domain_classifier, h_s_flow, h_t_flow)
critic_s_rgb = self.domain_classifier(h_s_rgb)
critic_s_flow = self.domain_classifier(h_s_flow)
critic_t_rgb = self.domain_classifier(h_t_rgb)
critic_t_flow = self.domain_classifier(h_t_flow)
wasserstein_distance_rgb = critic_s_rgb.mean() - (1 + self._beta_ratio) * critic_t_rgb.mean()
wasserstein_distance_flow = critic_s_flow.mean() - (1 + self._beta_ratio) * critic_t_flow.mean()
critic_cost = (
-wasserstein_distance_rgb
+ -wasserstein_distance_flow
+ self._gamma * gp_rgb
+ self._gamma * gp_flow
) * 0.5
self.critic_opt.zero_grad()
critic_cost.backward()
self.critic_opt.step()
if self.critic_sched:
self.critic_sched.step()
set_requires_grad(self.rgb_feat, requires_grad=True)
set_requires_grad(self.flow_feat, requires_grad=True)
set_requires_grad(self.domain_classifier, requires_grad=False)
|
# =============================================================================
# Author: <NAME>, <EMAIL>
# <NAME>, <EMAIL> or <EMAIL>
# =============================================================================
"""Domain adaptation systems (pipelines) for video data, e.g., for action recognition.
Most are inherited from kale.pipeline.domain_adapter.
"""
import torch
import kale.predict.losses as losses
from kale.loaddata.video_access import get_image_modality
from kale.pipeline.domain_adapter import (
BaseMMDLike,
CDANTrainer,
DANNTrainer,
get_aggregated_metrics_from_dict,
get_metrics_from_parameter_dict,
GradReverse,
Method,
set_requires_grad,
WDGRLTrainer,
)
def create_mmd_based_video(method: Method, dataset, image_modality, feature_extractor, task_classifier, **train_params):
"""MMD-based deep learning methods for domain adaptation on video data: DAN and JAN"""
if not method.is_mmd_method():
raise ValueError(f"Unsupported MMD method: {method}")
if method is Method.DAN:
return DANTrainerVideo(
dataset=dataset,
image_modality=image_modality,
feature_extractor=feature_extractor,
task_classifier=task_classifier,
method=method,
**train_params,
)
if method is Method.JAN:
return JANTrainerVideo(
dataset=dataset,
image_modality=image_modality,
feature_extractor=feature_extractor,
task_classifier=task_classifier,
method=method,
kernel_mul=[2.0, 2.0],
kernel_num=[5, 1],
**train_params,
)
def create_dann_like_video(
method: Method, dataset, image_modality, feature_extractor, task_classifier, critic, **train_params
):
"""DANN-based deep learning methods for domain adaptation on video data: DANN, CDAN, CDAN+E"""
# Uncomment for later work.
# Set up a new create_fewshot_trainer for video data based on original one in `domain_adapter.py`
# if dataset.is_semi_supervised():
# return create_fewshot_trainer_video(
# method, dataset, feature_extractor, task_classifier, critic, **train_params
# )
if method.is_dann_method():
alpha = 0.0 if method is Method.Source else 1.0
return DANNTrainerVideo(
alpha=alpha,
image_modality=image_modality,
dataset=dataset,
feature_extractor=feature_extractor,
task_classifier=task_classifier,
critic=critic,
method=method,
**train_params,
)
elif method.is_cdan_method():
return CDANTrainerVideo(
dataset=dataset,
image_modality=image_modality,
feature_extractor=feature_extractor,
task_classifier=task_classifier,
critic=critic,
method=method,
use_entropy=method is Method.CDAN_E,
**train_params,
)
elif method is Method.WDGRL:
return WDGRLTrainerVideo(
dataset=dataset,
image_modality=image_modality,
feature_extractor=feature_extractor,
task_classifier=task_classifier,
critic=critic,
method=method,
**train_params,
)
else:
raise ValueError(f"Unsupported method: {method}")
class BaseMMDLikeVideo(BaseMMDLike):
"""Common API for MME-based domain adaptation on video data: DAN, JAN"""
def __init__(
self, dataset, image_modality, feature_extractor, task_classifier, kernel_mul=2.0, kernel_num=5, **base_params,
):
super().__init__(dataset, feature_extractor, task_classifier, kernel_mul, kernel_num, **base_params)
self.image_modality = image_modality
self.rgb_feat = self.feat["rgb"]
self.flow_feat = self.feat["flow"]
def forward(self, x):
if self.feat is not None:
if self.image_modality in ["rgb", "flow"]:
if self.rgb_feat is not None:
x = self.rgb_feat(x)
else:
x = self.flow_feat(x)
x = x.view(x.size(0), -1)
class_output = self.classifier(x)
return x, class_output
elif self.image_modality == "joint":
x_rgb = self.rgb_feat(x["rgb"])
x_flow = self.flow_feat(x["flow"])
x_rgb = x_rgb.view(x_rgb.size(0), -1)
x_flow = x_flow.view(x_flow.size(0), -1)
x = torch.cat((x_rgb, x_flow), dim=1)
class_output = self.classifier(x)
return [x_rgb, x_flow], class_output
def compute_loss(self, batch, split_name="valid"):
# _s refers to source, _tu refers to unlabeled target
if self.image_modality == "joint" and len(batch) == 4:
(x_s_rgb, y_s), (x_s_flow, y_s_flow), (x_tu_rgb, y_tu), (x_tu_flow, y_tu_flow) = batch
[phi_s_rgb, phi_s_flow], y_hat = self.forward({"rgb": x_s_rgb, "flow": x_s_flow})
[phi_t_rgb, phi_t_flow], y_t_hat = self.forward({"rgb": x_tu_rgb, "flow": x_tu_flow})
mmd_rgb = self._compute_mmd(phi_s_rgb, phi_t_rgb, y_hat, y_t_hat)
mmd_flow = self._compute_mmd(phi_s_flow, phi_t_flow, y_hat, y_t_hat)
mmd = mmd_rgb + mmd_flow
elif self.image_modality in ["rgb", "flow"] and len(batch) == 2:
(x_s, y_s), (x_tu, y_tu) = batch
phi_s, y_hat = self.forward(x_s)
phi_t, y_t_hat = self.forward(x_tu)
mmd = self._compute_mmd(phi_s, phi_t, y_hat, y_t_hat)
else:
raise NotImplementedError("Batch len is {}. Check the Dataloader.".format(len(batch)))
# Uncomment when checking whether rgb & flow labels are equal.
# print('rgb_s:{}, flow_s:{}, rgb_f:{}, flow_f:{}'.format(y_s, y_s_flow, y_tu, y_tu_flow))
# print('equal: {}/{}'.format(torch.all(torch.eq(y_s, y_s_flow)), torch.all(torch.eq(y_tu, y_tu_flow))))
# ok is abbreviation for (all) correct
loss_cls, ok_src = losses.cross_entropy_logits(y_hat, y_s)
_, ok_tgt = losses.cross_entropy_logits(y_t_hat, y_tu)
task_loss = loss_cls
log_metrics = {
f"{split_name}_source_acc": ok_src,
f"{split_name}_target_acc": ok_tgt,
f"{split_name}_domain_acc": mmd,
}
return task_loss, mmd, log_metrics
class DANTrainerVideo(BaseMMDLikeVideo):
"""This is an implementation of DAN for video data."""
def __init__(self, dataset, image_modality, feature_extractor, task_classifier, **base_params):
super().__init__(dataset, image_modality, feature_extractor, task_classifier, **base_params)
def _compute_mmd(self, phi_s, phi_t, y_hat, y_t_hat):
batch_size = int(phi_s.size()[0])
kernels = losses.gaussian_kernel(phi_s, phi_t, kernel_mul=self._kernel_mul, kernel_num=self._kernel_num,)
return losses.compute_mmd_loss(kernels, batch_size)
class JANTrainerVideo(BaseMMDLikeVideo):
"""This is an implementation of JAN for video data."""
def __init__(
self,
dataset,
image_modality,
feature_extractor,
task_classifier,
kernel_mul=(2.0, 2.0),
kernel_num=(5, 1),
**base_params,
):
super().__init__(
dataset,
image_modality,
feature_extractor,
task_classifier,
kernel_mul=kernel_mul,
kernel_num=kernel_num,
**base_params,
)
def _compute_mmd(self, phi_s, phi_t, y_hat, y_t_hat):
softmax_layer = torch.nn.Softmax(dim=-1)
source_list = [phi_s, softmax_layer(y_hat)]
target_list = [phi_t, softmax_layer(y_t_hat)]
batch_size = int(phi_s.size()[0])
joint_kernels = None
for source, target, k_mul, k_num, sigma in zip(
source_list, target_list, self._kernel_mul, self._kernel_num, [None, 1.68]
):
kernels = losses.gaussian_kernel(source, target, kernel_mul=k_mul, kernel_num=k_num, fix_sigma=sigma)
if joint_kernels is not None:
joint_kernels = joint_kernels * kernels
else:
joint_kernels = kernels
return losses.compute_mmd_loss(joint_kernels, batch_size)
class DANNTrainerVideo(DANNTrainer):
"""This is an implementation of DANN for video data."""
def __init__(
self, dataset, image_modality, feature_extractor, task_classifier, critic, method, **base_params,
):
super(DANNTrainerVideo, self).__init__(
dataset, feature_extractor, task_classifier, critic, method, **base_params
)
self.image_modality = image_modality
self.rgb, self.flow = get_image_modality(self.image_modality)
self.rgb_feat = self.feat["rgb"]
self.flow_feat = self.feat["flow"]
def forward(self, x):
if self.feat is not None:
x_rgb = x_flow = adversarial_output_rgb = adversarial_output_flow = None
# For joint input, both two ifs are used
if self.rgb:
x_rgb = self.rgb_feat(x["rgb"])
x_rgb = x_rgb.view(x_rgb.size(0), -1)
reverse_feature_rgb = GradReverse.apply(x_rgb, self.alpha)
adversarial_output_rgb = self.domain_classifier(reverse_feature_rgb)
if self.flow:
x_flow = self.flow_feat(x["flow"])
x_flow = x_flow.view(x_flow.size(0), -1)
reverse_feature_flow = GradReverse.apply(x_flow, self.alpha)
adversarial_output_flow = self.domain_classifier(reverse_feature_flow)
if self.rgb:
if self.flow: # For joint input
x = torch.cat((x_rgb, x_flow), dim=1)
else: # For rgb input
x = x_rgb
else: # For flow input
x = x_flow
class_output = self.classifier(x)
return [x_rgb, x_flow], class_output, [adversarial_output_rgb, adversarial_output_flow]
def compute_loss(self, batch, split_name="valid"):
# _s refers to source, _tu refers to unlabeled target
x_s_rgb = x_tu_rgb = x_s_flow = x_tu_flow = None
if self.rgb:
if self.flow: # For joint input
(x_s_rgb, y_s), (x_s_flow, y_s_flow), (x_tu_rgb, y_tu), (x_tu_flow, y_tu_flow) = batch
else: # For rgb input
(x_s_rgb, y_s), (x_tu_rgb, y_tu) = batch
else: # For flow input
(x_s_flow, y_s), (x_tu_flow, y_tu) = batch
_, y_hat, [d_hat_rgb, d_hat_flow] = self.forward({"rgb": x_s_rgb, "flow": x_s_flow})
_, y_t_hat, [d_t_hat_rgb, d_t_hat_flow] = self.forward({"rgb": x_tu_rgb, "flow": x_tu_flow})
batch_size = len(y_s)
if self.rgb:
loss_dmn_src_rgb, dok_src_rgb = losses.cross_entropy_logits(d_hat_rgb, torch.zeros(batch_size))
loss_dmn_tgt_rgb, dok_tgt_rgb = losses.cross_entropy_logits(d_t_hat_rgb, torch.ones(batch_size))
if self.flow:
loss_dmn_src_flow, dok_src_flow = losses.cross_entropy_logits(d_hat_flow, torch.zeros(batch_size))
loss_dmn_tgt_flow, dok_tgt_flow = losses.cross_entropy_logits(d_t_hat_flow, torch.ones(batch_size))
if self.rgb and self.flow: # For joint input
loss_dmn_src = loss_dmn_src_rgb + loss_dmn_src_flow
loss_dmn_tgt = loss_dmn_tgt_rgb + loss_dmn_tgt_flow
dok = torch.cat((dok_src_rgb, dok_src_flow, dok_tgt_rgb, dok_tgt_flow))
dok_src = torch.cat((dok_src_rgb, dok_src_flow))
dok_tgt = torch.cat((dok_tgt_rgb, dok_tgt_flow))
else:
if self.rgb: # For rgb input
d_hat = d_hat_rgb
d_t_hat = d_t_hat_rgb
else: # For flow input
d_hat = d_hat_flow
d_t_hat = d_t_hat_flow
# ok is abbreviation for (all) correct, dok refers to domain correct
loss_dmn_src, dok_src = losses.cross_entropy_logits(d_hat, torch.zeros(batch_size))
loss_dmn_tgt, dok_tgt = losses.cross_entropy_logits(d_t_hat, torch.ones(batch_size))
dok = torch.cat((dok_src, dok_tgt))
loss_cls, ok_src = losses.cross_entropy_logits(y_hat, y_s)
_, ok_tgt = losses.cross_entropy_logits(y_t_hat, y_tu)
adv_loss = loss_dmn_src + loss_dmn_tgt # adv_loss = src + tgt
task_loss = loss_cls
log_metrics = {
f"{split_name}_source_acc": ok_src,
f"{split_name}_target_acc": ok_tgt,
f"{split_name}_domain_acc": dok,
f"{split_name}_source_domain_acc": dok_src,
f"{split_name}_target_domain_acc": dok_tgt,
}
return task_loss, adv_loss, log_metrics
def training_step(self, batch, batch_nb):
self._update_batch_epoch_factors(batch_nb)
task_loss, adv_loss, log_metrics = self.compute_loss(batch, split_name="train")
if self.current_epoch < self._init_epochs:
loss = task_loss
else:
loss = task_loss + self.lamb_da * adv_loss
log_metrics = get_aggregated_metrics_from_dict(log_metrics)
log_metrics.update(get_metrics_from_parameter_dict(self.get_parameters_watch_list(), loss.device))
log_metrics["train_total_loss"] = loss
log_metrics["train_adv_loss"] = adv_loss
log_metrics["train_task_loss"] = task_loss
for key in log_metrics:
self.log(key, log_metrics[key])
return {"loss": loss}
class CDANTrainerVideo(CDANTrainer):
"""This is an implementation of CDAN for video data."""
def __init__(
self,
dataset,
image_modality,
feature_extractor,
task_classifier,
critic,
use_entropy=False,
use_random=False,
random_dim=1024,
**base_params,
):
super(CDANTrainerVideo, self).__init__(
dataset, feature_extractor, task_classifier, critic, use_entropy, use_random, random_dim, **base_params
)
self.image_modality = image_modality
self.rgb, self.flow = get_image_modality(image_modality)
self.rgb_feat = self.feat["rgb"]
self.flow_feat = self.feat["flow"]
def forward(self, x):
if self.feat is not None:
x_rgb = x_flow = adversarial_output_rgb = adversarial_output_flow = None
# For joint input, both two ifs are used
if self.rgb:
x_rgb = self.rgb_feat(x["rgb"])
x_rgb = x_rgb.view(x_rgb.size(0), -1)
reverse_feature_rgb = GradReverse.apply(x_rgb, self.alpha)
if self.flow:
x_flow = self.flow_feat(x["flow"])
x_flow = x_flow.view(x_flow.size(0), -1)
reverse_feature_flow = GradReverse.apply(x_flow, self.alpha)
if self.rgb:
if self.flow: # For joint input
x = torch.cat((x_rgb, x_flow), dim=1)
else: # For rgb input
x = x_rgb
else: # For flow input
x = x_flow
class_output = self.classifier(x)
softmax_output = torch.nn.Softmax(dim=1)(class_output)
reverse_out = GradReverse.apply(softmax_output, self.alpha)
if self.rgb:
feature_rgb = torch.bmm(reverse_out.unsqueeze(2), reverse_feature_rgb.unsqueeze(1))
feature_rgb = feature_rgb.view(-1, reverse_out.size(1) * reverse_feature_rgb.size(1))
if self.random_layer:
random_out_rgb = self.random_layer.forward(feature_rgb)
adversarial_output_rgb = self.domain_classifier(random_out_rgb.view(-1, random_out_rgb.size(1)))
else:
adversarial_output_rgb = self.domain_classifier(feature_rgb)
if self.flow:
feature_flow = torch.bmm(reverse_out.unsqueeze(2), reverse_feature_flow.unsqueeze(1))
feature_flow = feature_flow.view(-1, reverse_out.size(1) * reverse_feature_flow.size(1))
if self.random_layer:
random_out_flow = self.random_layer.forward(feature_flow)
adversarial_output_flow = self.domain_classifier(random_out_flow.view(-1, random_out_flow.size(1)))
else:
adversarial_output_flow = self.domain_classifier(feature_flow)
return [x_rgb, x_flow], class_output, [adversarial_output_rgb, adversarial_output_flow]
def compute_loss(self, batch, split_name="valid"):
# _s refers to source, _tu refers to unlabeled target
x_s_rgb = x_tu_rgb = x_s_flow = x_tu_flow = None
if self.rgb:
if self.flow: # For joint input
(x_s_rgb, y_s), (x_s_flow, y_s_flow), (x_tu_rgb, y_tu), (x_tu_flow, y_tu_flow) = batch
else: # For rgb input
(x_s_rgb, y_s), (x_tu_rgb, y_tu) = batch
else: # For flow input
(x_s_flow, y_s), (x_tu_flow, y_tu) = batch
_, y_hat, [d_hat_rgb, d_hat_flow] = self.forward({"rgb": x_s_rgb, "flow": x_s_flow})
_, y_t_hat, [d_t_hat_rgb, d_t_hat_flow] = self.forward({"rgb": x_tu_rgb, "flow": x_tu_flow})
batch_size = len(y_s)
if self.entropy:
e_s = self._compute_entropy_weights(y_hat)
e_t = self._compute_entropy_weights(y_t_hat)
source_weight = e_s / torch.sum(e_s)
target_weight = e_t / torch.sum(e_t)
else:
source_weight = None
target_weight = None
if self.rgb:
loss_dmn_src_rgb, dok_src_rgb = losses.cross_entropy_logits(
d_hat_rgb, torch.zeros(batch_size), source_weight
)
loss_dmn_tgt_rgb, dok_tgt_rgb = losses.cross_entropy_logits(
d_t_hat_rgb, torch.ones(batch_size), target_weight
)
if self.flow:
loss_dmn_src_flow, dok_src_flow = losses.cross_entropy_logits(
d_hat_flow, torch.zeros(batch_size), source_weight
)
loss_dmn_tgt_flow, dok_tgt_flow = losses.cross_entropy_logits(
d_t_hat_flow, torch.ones(batch_size), target_weight
)
# ok is abbreviation for (all) correct, dok refers to domain correct
if self.rgb and self.flow: # For joint input
loss_dmn_src = loss_dmn_src_rgb + loss_dmn_src_flow
loss_dmn_tgt = loss_dmn_tgt_rgb + loss_dmn_tgt_flow
dok = torch.cat((dok_src_rgb, dok_src_flow, dok_tgt_rgb, dok_tgt_flow))
dok_src = torch.cat((dok_src_rgb, dok_src_flow))
dok_tgt = torch.cat((dok_tgt_rgb, dok_tgt_flow))
else:
if self.rgb: # For rgb input
d_hat = d_hat_rgb
d_t_hat = d_t_hat_rgb
else: # For flow input
d_hat = d_hat_flow
d_t_hat = d_t_hat_flow
loss_dmn_src, dok_src = losses.cross_entropy_logits(d_hat, torch.zeros(batch_size))
loss_dmn_tgt, dok_tgt = losses.cross_entropy_logits(d_t_hat, torch.ones(batch_size))
dok = torch.cat((dok_src, dok_tgt))
loss_cls, ok_src = losses.cross_entropy_logits(y_hat, y_s)
_, ok_tgt = losses.cross_entropy_logits(y_t_hat, y_tu)
adv_loss = loss_dmn_src + loss_dmn_tgt # adv_loss = src + tgt
task_loss = loss_cls
log_metrics = {
f"{split_name}_source_acc": ok_src,
f"{split_name}_target_acc": ok_tgt,
f"{split_name}_domain_acc": dok,
f"{split_name}_source_domain_acc": dok_src,
f"{split_name}_target_domain_acc": dok_tgt,
}
return task_loss, adv_loss, log_metrics
class WDGRLTrainerVideo(WDGRLTrainer):
"""This is an implementation of WDGRL for video data."""
def __init__(
self,
dataset,
image_modality,
feature_extractor,
task_classifier,
critic,
k_critic=5,
gamma=10,
beta_ratio=0,
**base_params,
):
super(WDGRLTrainerVideo, self).__init__(
dataset, feature_extractor, task_classifier, critic, k_critic, gamma, beta_ratio, **base_params
)
self.image_modality = image_modality
self.rgb, self.flow = get_image_modality(self.image_modality)
self.rgb_feat = self.feat["rgb"]
self.flow_feat = self.feat["flow"]
def forward(self, x):
if self.feat is not None:
x_rgb = x_flow = adversarial_output_rgb = adversarial_output_flow = None
# For joint input, both two ifs are used
if self.rgb:
x_rgb = self.rgb_feat(x["rgb"])
x_rgb = x_rgb.view(x_rgb.size(0), -1)
adversarial_output_rgb = self.domain_classifier(x_rgb)
if self.flow:
x_flow = self.flow_feat(x["flow"])
x_flow = x_flow.view(x_flow.size(0), -1)
adversarial_output_flow = self.domain_classifier(x_flow)
if self.rgb:
if self.flow: # For joint input
x = torch.cat((x_rgb, x_flow), dim=1)
else: # For rgb input
x = x_rgb
else: # For flow input
x = x_flow
class_output = self.classifier(x)
return [x_rgb, x_flow], class_output, [adversarial_output_rgb, adversarial_output_flow]
def compute_loss(self, batch, split_name="valid"):
# _s refers to source, _tu refers to unlabeled target
x_s_rgb = x_tu_rgb = x_s_flow = x_tu_flow = None
if self.rgb:
if self.flow: # For joint input
(x_s_rgb, y_s), (x_s_flow, y_s_flow), (x_tu_rgb, y_tu), (x_tu_flow, y_tu_flow) = batch
else: # For rgb input
(x_s_rgb, y_s), (x_tu_rgb, y_tu) = batch
else: # For flow input
(x_s_flow, y_s), (x_tu_flow, y_tu) = batch
_, y_hat, [d_hat_rgb, d_hat_flow] = self.forward({"rgb": x_s_rgb, "flow": x_s_flow})
_, y_t_hat, [d_t_hat_rgb, d_t_hat_flow] = self.forward({"rgb": x_tu_rgb, "flow": x_tu_flow})
batch_size = len(y_s)
# ok is abbreviation for (all) correct, dok refers to domain correct
if self.rgb:
_, dok_src_rgb = losses.cross_entropy_logits(d_hat_rgb, torch.zeros(batch_size))
_, dok_tgt_rgb = losses.cross_entropy_logits(d_t_hat_rgb, torch.ones(batch_size))
if self.flow:
_, dok_src_flow = losses.cross_entropy_logits(d_hat_flow, torch.zeros(batch_size))
_, dok_tgt_flow = losses.cross_entropy_logits(d_t_hat_flow, torch.ones(batch_size))
if self.rgb and self.flow: # For joint input
dok = torch.cat((dok_src_rgb, dok_src_flow, dok_tgt_rgb, dok_tgt_flow))
dok_src = torch.cat((dok_src_rgb, dok_src_flow))
dok_tgt = torch.cat((dok_tgt_rgb, dok_tgt_flow))
wasserstein_distance_rgb = d_hat_rgb.mean() - (1 + self._beta_ratio) * d_t_hat_rgb.mean()
wasserstein_distance_flow = d_hat_flow.mean() - (1 + self._beta_ratio) * d_t_hat_flow.mean()
wasserstein_distance = (wasserstein_distance_rgb + wasserstein_distance_flow) / 2
else:
if self.rgb: # For rgb input
d_hat = d_hat_rgb
d_t_hat = d_t_hat_rgb
dok_src = dok_src_rgb
dok_tgt = dok_tgt_rgb
else: # For flow input
d_hat = d_hat_flow
d_t_hat = d_t_hat_flow
dok_src = dok_src_flow
dok_tgt = dok_tgt_flow
wasserstein_distance = d_hat.mean() - (1 + self._beta_ratio) * d_t_hat.mean()
dok = torch.cat((dok_src, dok_tgt))
loss_cls, ok_src = losses.cross_entropy_logits(y_hat, y_s)
_, ok_tgt = losses.cross_entropy_logits(y_t_hat, y_tu)
adv_loss = wasserstein_distance
task_loss = loss_cls
log_metrics = {
f"{split_name}_source_acc": ok_src,
f"{split_name}_target_acc": ok_tgt,
f"{split_name}_domain_acc": dok,
f"{split_name}_source_domain_acc": dok_src,
f"{split_name}_target_domain_acc": dok_tgt,
f"{split_name}_wasserstein_dist": wasserstein_distance,
}
return task_loss, adv_loss, log_metrics
def configure_optimizers(self):
if self.image_modality in ["rgb", "flow"]:
if self.rgb_feat is not None:
nets = [self.rgb_feat, self.classifier]
else:
nets = [self.flow_feat, self.classifier]
elif self.image_modality == "joint":
nets = [self.rgb_feat, self.flow_feat, self.classifier]
parameters = set()
for net in nets:
parameters |= set(net.parameters())
if self._adapt_lr:
task_feat_optimizer, task_feat_sched = self._configure_optimizer(parameters)
self.critic_opt, self.critic_sched = self._configure_optimizer(self.domain_classifier.parameters())
self.critic_opt = self.critic_opt[0]
self.critic_sched = self.critic_sched[0]
return task_feat_optimizer, task_feat_sched
else:
task_feat_optimizer = self._configure_optimizer(parameters)
self.critic_opt = self._configure_optimizer(self.domain_classifier.parameters())
self.critic_sched = None
self.critic_opt = self.critic_opt[0]
return task_feat_optimizer
def critic_update_steps(self, batch):
if self.current_epoch < self._init_epochs:
return
set_requires_grad(self.domain_classifier, requires_grad=True)
if self.image_modality in ["rgb", "flow"]:
if self.rgb_feat is not None:
set_requires_grad(self.rgb_feat, requires_grad=False)
(x_s, y_s), (x_tu, _) = batch
with torch.no_grad():
h_s = self.rgb_feat(x_s).data.view(x_s.shape[0], -1)
h_t = self.rgb_feat(x_tu).data.view(x_tu.shape[0], -1)
else:
set_requires_grad(self.flow_feat, requires_grad=False)
(x_s, y_s), (x_tu, _) = batch
with torch.no_grad():
h_s = self.flow_feat(x_s).data.view(x_s.shape[0], -1)
h_t = self.flow_feat(x_tu).data.view(x_tu.shape[0], -1)
for _ in range(self._k_critic):
# gp refers to gradient penelty in Wasserstein distance.
gp = losses.gradient_penalty(self.domain_classifier, h_s, h_t)
critic_s = self.domain_classifier(h_s)
critic_t = self.domain_classifier(h_t)
wasserstein_distance = critic_s.mean() - (1 + self._beta_ratio) * critic_t.mean()
critic_cost = -wasserstein_distance + self._gamma * gp
self.critic_opt.zero_grad()
critic_cost.backward()
self.critic_opt.step()
if self.critic_sched:
self.critic_sched.step()
if self.rgb_feat is not None:
set_requires_grad(self.rgb_feat, requires_grad=True)
else:
set_requires_grad(self.flow_feat, requires_grad=True)
set_requires_grad(self.domain_classifier, requires_grad=False)
elif self.image_modality == "joint":
set_requires_grad(self.rgb_feat, requires_grad=False)
set_requires_grad(self.flow_feat, requires_grad=False)
(x_s_rgb, y_s), (x_s_flow, _), (x_tu_rgb, _), (x_tu_flow, _) = batch
with torch.no_grad():
h_s_rgb = self.rgb_feat(x_s_rgb).data.view(x_s_rgb.shape[0], -1)
h_t_rgb = self.rgb_feat(x_tu_rgb).data.view(x_tu_rgb.shape[0], -1)
h_s_flow = self.flow_feat(x_s_flow).data.view(x_s_flow.shape[0], -1)
h_t_flow = self.flow_feat(x_tu_flow).data.view(x_tu_flow.shape[0], -1)
h_s = torch.cat((h_s_rgb, h_s_flow), dim=1)
h_t = torch.cat((h_t_rgb, h_t_flow), dim=1)
# Need to improve to process rgb and flow separately in the future.
for _ in range(self._k_critic):
# gp_x refers to gradient penelty for the input with the modality x.
gp_rgb = losses.gradient_penalty(self.domain_classifier, h_s_rgb, h_t_rgb)
gp_flow = losses.gradient_penalty(self.domain_classifier, h_s_flow, h_t_flow)
critic_s_rgb = self.domain_classifier(h_s_rgb)
critic_s_flow = self.domain_classifier(h_s_flow)
critic_t_rgb = self.domain_classifier(h_t_rgb)
critic_t_flow = self.domain_classifier(h_t_flow)
wasserstein_distance_rgb = critic_s_rgb.mean() - (1 + self._beta_ratio) * critic_t_rgb.mean()
wasserstein_distance_flow = critic_s_flow.mean() - (1 + self._beta_ratio) * critic_t_flow.mean()
critic_cost = (
-wasserstein_distance_rgb
+ -wasserstein_distance_flow
+ self._gamma * gp_rgb
+ self._gamma * gp_flow
) * 0.5
self.critic_opt.zero_grad()
critic_cost.backward()
self.critic_opt.step()
if self.critic_sched:
self.critic_sched.step()
set_requires_grad(self.rgb_feat, requires_grad=True)
set_requires_grad(self.flow_feat, requires_grad=True)
set_requires_grad(self.domain_classifier, requires_grad=False)
|
en
| 0.740545
|
# ============================================================================= # Author: <NAME>, <EMAIL> # <NAME>, <EMAIL> or <EMAIL> # ============================================================================= Domain adaptation systems (pipelines) for video data, e.g., for action recognition. Most are inherited from kale.pipeline.domain_adapter. MMD-based deep learning methods for domain adaptation on video data: DAN and JAN DANN-based deep learning methods for domain adaptation on video data: DANN, CDAN, CDAN+E # Uncomment for later work. # Set up a new create_fewshot_trainer for video data based on original one in `domain_adapter.py` # if dataset.is_semi_supervised(): # return create_fewshot_trainer_video( # method, dataset, feature_extractor, task_classifier, critic, **train_params # ) Common API for MME-based domain adaptation on video data: DAN, JAN # _s refers to source, _tu refers to unlabeled target # Uncomment when checking whether rgb & flow labels are equal. # print('rgb_s:{}, flow_s:{}, rgb_f:{}, flow_f:{}'.format(y_s, y_s_flow, y_tu, y_tu_flow)) # print('equal: {}/{}'.format(torch.all(torch.eq(y_s, y_s_flow)), torch.all(torch.eq(y_tu, y_tu_flow)))) # ok is abbreviation for (all) correct This is an implementation of DAN for video data. This is an implementation of JAN for video data. This is an implementation of DANN for video data. # For joint input, both two ifs are used # For joint input # For rgb input # For flow input # _s refers to source, _tu refers to unlabeled target # For joint input # For rgb input # For flow input # For joint input # For rgb input # For flow input # ok is abbreviation for (all) correct, dok refers to domain correct # adv_loss = src + tgt This is an implementation of CDAN for video data. # For joint input, both two ifs are used # For joint input # For rgb input # For flow input # _s refers to source, _tu refers to unlabeled target # For joint input # For rgb input # For flow input # ok is abbreviation for (all) correct, dok refers to domain correct # For joint input # For rgb input # For flow input # adv_loss = src + tgt This is an implementation of WDGRL for video data. # For joint input, both two ifs are used # For joint input # For rgb input # For flow input # _s refers to source, _tu refers to unlabeled target # For joint input # For rgb input # For flow input # ok is abbreviation for (all) correct, dok refers to domain correct # For joint input # For rgb input # For flow input # gp refers to gradient penelty in Wasserstein distance. # Need to improve to process rgb and flow separately in the future. # gp_x refers to gradient penelty for the input with the modality x.
| 2.422904
| 2
|
train_lstm.py
|
yuangan/A2L
| 0
|
6627528
|
<reponame>yuangan/A2L
#!/usr/bin/env python3
#coding:utf-8
import os
import os.path as osp
import re
import sys
import yaml
import shutil
import numpy as np
import torch
import click
import warnings
warnings.simplefilter('ignore')
from functools import reduce
from munch import Munch
from meldataset import build_dataloader
from optimizers import build_optimizer
from models import build_model_lstm
from trainer import Trainer
from torch.utils.tensorboard import SummaryWriter
from Utils.ASR.models import ASRCNN
from Utils.JDC.model import JDCNet
import logging
from logging import StreamHandler
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
handler = StreamHandler()
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
torch.backends.cudnn.benchmark = True #
@click.command()
@click.option('-p', '--config_path', default='Configs/config.yml', type=str)
def main(config_path):
config = yaml.safe_load(open(config_path))
log_dir = config['log_dir']
if not osp.exists(log_dir): os.makedirs(log_dir, exist_ok=True)
shutil.copy(config_path, osp.join(log_dir, osp.basename(config_path)))
writer = SummaryWriter(log_dir + "/tensorboard")
# write logs
file_handler = logging.FileHandler(osp.join(log_dir, 'train.log'))
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter('%(levelname)s:%(asctime)s: %(message)s'))
logger.addHandler(file_handler)
batch_size = config.get('batch_size', 10)
device = config.get('device', 'cpu')
epochs = config.get('epochs', 1000)
save_freq = config.get('save_freq', 20)
train_path = config.get('train_data', None)
val_path = config.get('val_data', None)
stage = config.get('stage', 'star')
fp16_run = config.get('fp16_run', False)
# load data
train_list, val_list = get_data_path_list(train_path, val_path)
train_dataloader = build_dataloader(train_list,
batch_size=batch_size,
num_workers=4,
device=device)
val_dataloader = build_dataloader(val_list,
batch_size=batch_size,
validation=True,
num_workers=2,
device=device)
# load pretrained ASR model
ASR_config = config.get('ASR_config', False)
ASR_path = config.get('ASR_path', False)
with open(ASR_config) as f:
ASR_config = yaml.safe_load(f)
ASR_model_config = ASR_config['model_params']
ASR_model = ASRCNN(**ASR_model_config)
params = torch.load(ASR_path, map_location='cpu')['model']
ASR_model.load_state_dict(params)
_ = ASR_model.eval()
# load pretrained F0 model
F0_path = config.get('F0_path', False)
F0_model = JDCNet(num_class=1, seq_len=32)
params = torch.load(F0_path, map_location='cpu')['net']
F0_model.load_state_dict(params)
# build model
model, model_ema, model_mot = build_model_lstm(Munch(config['model_params']), F0_model, ASR_model)
scheduler_params = {
"lr": float(config['optimizer_params'].get('lr', 2e-4)),
"pct_start": float(config['optimizer_params'].get('pct_start', 0.0)),
"epochs": epochs,
"steps_per_epoch": len(train_dataloader),
"T_0": int(config['optimizer_params'].get('T_0', 500)),
"T_multi": int(config['optimizer_params'].get('T_mult', 1)),
"milestones": config['optimizer_params'].get('milestones', [100, 500, 1000]),
"gamma": float(config['optimizer_params'].get('gamma', 0.1)),
}
_ = [model[key].to(device) for key in model]
_ = [model_ema[key].to(device) for key in model_ema]
_ = [model_mot[key].to(device) for key in model_mot]
scheduler_params_dict = {key: scheduler_params.copy() for key in model_mot}
# scheduler_params_dict['mapping_network']['max_lr'] = 2e-6
optimizer = build_optimizer({key: model_mot[key].parameters() for key in model_mot},
scheduler_params_dict=scheduler_params_dict)
trainer = Trainer(args=Munch(config['loss_params']), model=model,
model_ema=model_ema,
model_mot = model_mot,
optimizer=optimizer,
device=device,
train_dataloader=train_dataloader,
val_dataloader=val_dataloader,
logger=logger,
fp16_run=fp16_run)
if config.get('pretrained_model', '') != '':
trainer.load_checkpoint(config['pretrained_model'],
load_only_params=config.get('load_only_params', True))
for _ in range(1, epochs+1):
epoch = trainer.epochs
train_results = trainer._train_lmk_epoch()
eval_results = trainer._eval_lmk_epoch()
results = train_results.copy()
results.update(eval_results)
logger.info('--- epoch %d %s---' % (epoch, str(trainer._get_lr())))
writer.add_scalar('lr', trainer._get_lr(), epoch)
for key, value in results.items():
if isinstance(value, float):
logger.info('%-15s: %.10f' % (key, value))
writer.add_scalar(key, value, epoch)
else:
for v in value:
writer.add_figure('eval_spec', v, epoch)
if (epoch % save_freq) == 0:
trainer.save_checkpoint(osp.join(log_dir, 'epoch_%05d.pth' % epoch))
return 0
def get_data_path_list(train_path=None, val_path=None):
if train_path is None:
train_path = "Data/train_list.txt"
if val_path is None:
val_path = "Data/val_list.txt"
with open(train_path, 'r') as f:
train_list = f.readlines()
with open(val_path, 'r') as f:
val_list = f.readlines()
return train_list, val_list
if __name__=="__main__":
main()
|
#!/usr/bin/env python3
#coding:utf-8
import os
import os.path as osp
import re
import sys
import yaml
import shutil
import numpy as np
import torch
import click
import warnings
warnings.simplefilter('ignore')
from functools import reduce
from munch import Munch
from meldataset import build_dataloader
from optimizers import build_optimizer
from models import build_model_lstm
from trainer import Trainer
from torch.utils.tensorboard import SummaryWriter
from Utils.ASR.models import ASRCNN
from Utils.JDC.model import JDCNet
import logging
from logging import StreamHandler
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
handler = StreamHandler()
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
torch.backends.cudnn.benchmark = True #
@click.command()
@click.option('-p', '--config_path', default='Configs/config.yml', type=str)
def main(config_path):
config = yaml.safe_load(open(config_path))
log_dir = config['log_dir']
if not osp.exists(log_dir): os.makedirs(log_dir, exist_ok=True)
shutil.copy(config_path, osp.join(log_dir, osp.basename(config_path)))
writer = SummaryWriter(log_dir + "/tensorboard")
# write logs
file_handler = logging.FileHandler(osp.join(log_dir, 'train.log'))
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter('%(levelname)s:%(asctime)s: %(message)s'))
logger.addHandler(file_handler)
batch_size = config.get('batch_size', 10)
device = config.get('device', 'cpu')
epochs = config.get('epochs', 1000)
save_freq = config.get('save_freq', 20)
train_path = config.get('train_data', None)
val_path = config.get('val_data', None)
stage = config.get('stage', 'star')
fp16_run = config.get('fp16_run', False)
# load data
train_list, val_list = get_data_path_list(train_path, val_path)
train_dataloader = build_dataloader(train_list,
batch_size=batch_size,
num_workers=4,
device=device)
val_dataloader = build_dataloader(val_list,
batch_size=batch_size,
validation=True,
num_workers=2,
device=device)
# load pretrained ASR model
ASR_config = config.get('ASR_config', False)
ASR_path = config.get('ASR_path', False)
with open(ASR_config) as f:
ASR_config = yaml.safe_load(f)
ASR_model_config = ASR_config['model_params']
ASR_model = ASRCNN(**ASR_model_config)
params = torch.load(ASR_path, map_location='cpu')['model']
ASR_model.load_state_dict(params)
_ = ASR_model.eval()
# load pretrained F0 model
F0_path = config.get('F0_path', False)
F0_model = JDCNet(num_class=1, seq_len=32)
params = torch.load(F0_path, map_location='cpu')['net']
F0_model.load_state_dict(params)
# build model
model, model_ema, model_mot = build_model_lstm(Munch(config['model_params']), F0_model, ASR_model)
scheduler_params = {
"lr": float(config['optimizer_params'].get('lr', 2e-4)),
"pct_start": float(config['optimizer_params'].get('pct_start', 0.0)),
"epochs": epochs,
"steps_per_epoch": len(train_dataloader),
"T_0": int(config['optimizer_params'].get('T_0', 500)),
"T_multi": int(config['optimizer_params'].get('T_mult', 1)),
"milestones": config['optimizer_params'].get('milestones', [100, 500, 1000]),
"gamma": float(config['optimizer_params'].get('gamma', 0.1)),
}
_ = [model[key].to(device) for key in model]
_ = [model_ema[key].to(device) for key in model_ema]
_ = [model_mot[key].to(device) for key in model_mot]
scheduler_params_dict = {key: scheduler_params.copy() for key in model_mot}
# scheduler_params_dict['mapping_network']['max_lr'] = 2e-6
optimizer = build_optimizer({key: model_mot[key].parameters() for key in model_mot},
scheduler_params_dict=scheduler_params_dict)
trainer = Trainer(args=Munch(config['loss_params']), model=model,
model_ema=model_ema,
model_mot = model_mot,
optimizer=optimizer,
device=device,
train_dataloader=train_dataloader,
val_dataloader=val_dataloader,
logger=logger,
fp16_run=fp16_run)
if config.get('pretrained_model', '') != '':
trainer.load_checkpoint(config['pretrained_model'],
load_only_params=config.get('load_only_params', True))
for _ in range(1, epochs+1):
epoch = trainer.epochs
train_results = trainer._train_lmk_epoch()
eval_results = trainer._eval_lmk_epoch()
results = train_results.copy()
results.update(eval_results)
logger.info('--- epoch %d %s---' % (epoch, str(trainer._get_lr())))
writer.add_scalar('lr', trainer._get_lr(), epoch)
for key, value in results.items():
if isinstance(value, float):
logger.info('%-15s: %.10f' % (key, value))
writer.add_scalar(key, value, epoch)
else:
for v in value:
writer.add_figure('eval_spec', v, epoch)
if (epoch % save_freq) == 0:
trainer.save_checkpoint(osp.join(log_dir, 'epoch_%05d.pth' % epoch))
return 0
def get_data_path_list(train_path=None, val_path=None):
if train_path is None:
train_path = "Data/train_list.txt"
if val_path is None:
val_path = "Data/val_list.txt"
with open(train_path, 'r') as f:
train_list = f.readlines()
with open(val_path, 'r') as f:
val_list = f.readlines()
return train_list, val_list
if __name__=="__main__":
main()
|
en
| 0.452456
|
#!/usr/bin/env python3 #coding:utf-8 # # write logs # load data # load pretrained ASR model # load pretrained F0 model # build model # scheduler_params_dict['mapping_network']['max_lr'] = 2e-6
| 1.910425
| 2
|
Texture Maps/lbp3d.py
|
Gas-Helio/Trabalhando-com-imagens-3d
| 1
|
6627529
|
# -*- coding: utf-8 -*-
"""LBP3D.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Y-EefNZliOPCzhaLFRu_43Gr0h2xalW3
"""
import numpy as np
def LBP_3D(img, v):
resul = np.zeros(img.shape)
imgZeros = np.zeros((img.shape[0]+2, img.shape[1]+2, img.shape[2]+2))
imgZeros[1:-1,1:-1,1:-1] = img
for x in range(img.shape[0]):
for y in range(img.shape[1]):
for z in range(img.shape[2]):
resul[x,y,z] = v(imgZeros[x:x+3, y:y+3, z:z+3])
return resul
def lbp_v1(img):
locais = [[0,1,1],[1,0,1],[1,1,0],[1,1,2],[1,2,1],[2,1,0]]
binary = ''
for l in locais:
binario += '1' if ((img[l[0],l[1],l[2]]) > img[1,1,1]) else '0'
return int(binary, 2)
def lbp_v2(img):
bi1, bi2, bi3 = '', '', ''
n1 = img[1,:,:]
n2 = img[:,1,:]
n3 = img[:,:,1]
for i in range(3):
for j in range(3):
if ((i == 1) and (j == 1)):
continue
bi1 += '1' if (n1[i,j] > img[1,1,1]) else '0'
bi2 += '1' if (n2[i,j] > img[1,1,1]) else '0'
bi3 += '1' if (n3[i,j] > img[1,1,1]) else '0'
bi1 = bi1[3] + bi1[:3] + bi1[4] + bi1[:5:-1]
bi2 = bi2[3] + bi2[:3] + bi2[4] + bi2[:5:-1]
bi3 = bi3[3] + bi3[:3] + bi3[4] + bi3[:5:-1]
return (int(bi1, 2) + int(bi2, 2) + int(bi3, 2)) / 3
def lbp_v3(img):
bina = ''
for i in range(1,3,1):
for j in range(1,3,1):
for k in range(1,3,1):
if (i == j) and (j == k) and (k == 1):
continue
bina += '1' if (img[i,j,k] > img[1,1,1]) else '0'
return int(bina, 2)
"""Exemple
```
# LBP_3D(image, lbp_v1)
```
"""
|
# -*- coding: utf-8 -*-
"""LBP3D.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Y-EefNZliOPCzhaLFRu_43Gr0h2xalW3
"""
import numpy as np
def LBP_3D(img, v):
resul = np.zeros(img.shape)
imgZeros = np.zeros((img.shape[0]+2, img.shape[1]+2, img.shape[2]+2))
imgZeros[1:-1,1:-1,1:-1] = img
for x in range(img.shape[0]):
for y in range(img.shape[1]):
for z in range(img.shape[2]):
resul[x,y,z] = v(imgZeros[x:x+3, y:y+3, z:z+3])
return resul
def lbp_v1(img):
locais = [[0,1,1],[1,0,1],[1,1,0],[1,1,2],[1,2,1],[2,1,0]]
binary = ''
for l in locais:
binario += '1' if ((img[l[0],l[1],l[2]]) > img[1,1,1]) else '0'
return int(binary, 2)
def lbp_v2(img):
bi1, bi2, bi3 = '', '', ''
n1 = img[1,:,:]
n2 = img[:,1,:]
n3 = img[:,:,1]
for i in range(3):
for j in range(3):
if ((i == 1) and (j == 1)):
continue
bi1 += '1' if (n1[i,j] > img[1,1,1]) else '0'
bi2 += '1' if (n2[i,j] > img[1,1,1]) else '0'
bi3 += '1' if (n3[i,j] > img[1,1,1]) else '0'
bi1 = bi1[3] + bi1[:3] + bi1[4] + bi1[:5:-1]
bi2 = bi2[3] + bi2[:3] + bi2[4] + bi2[:5:-1]
bi3 = bi3[3] + bi3[:3] + bi3[4] + bi3[:5:-1]
return (int(bi1, 2) + int(bi2, 2) + int(bi3, 2)) / 3
def lbp_v3(img):
bina = ''
for i in range(1,3,1):
for j in range(1,3,1):
for k in range(1,3,1):
if (i == j) and (j == k) and (k == 1):
continue
bina += '1' if (img[i,j,k] > img[1,1,1]) else '0'
return int(bina, 2)
"""Exemple
```
# LBP_3D(image, lbp_v1)
```
"""
|
en
| 0.83228
|
# -*- coding: utf-8 -*- LBP3D.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1Y-EefNZliOPCzhaLFRu_43Gr0h2xalW3 Exemple ``` # LBP_3D(image, lbp_v1) ```
| 2.703115
| 3
|
streamingApm.py
|
prelert/engine-python
| 36
|
6627530
|
#!/usr/bin/env python
############################################################################
# #
# Copyright 2014 Prelert Ltd #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
############################################################################
'''
This script creates a new job and uploads to it APM data records
generated from existing data in a CSV file. New records will created
indefinitely or until the 'duration' argument expires. Each record has
a new timestamp so this script can be used to repeatedly replay the
historical data. After each upload of data the script requests any new
bucket results and prints them.
The script is invoked with 1 positional argument -the CSV file containing
APM to use a the source of the generated data- and optional arguments
to specify the location of the Engine API. Run the script with '--help'
to see the options.
The file used in the online example can be downloaded from
http://s3.amazonaws.com/prelert_demo/network.csv
If no 'duration' is set the script will run indefinitely cse Ctrl-C to
stop the script - the interrupt is caught and the job closed gracefully
'''
import argparse
import csv
import json
import logging
import sys
import time
from datetime import datetime, timedelta, tzinfo
from prelert.engineApiClient import EngineApiClient
# Default connection prarams
HOST = 'localhost'
PORT = 8080
BASE_URL = 'engine/v2'
ZERO_OFFSET = timedelta(0)
class UtcOffset(tzinfo):
'''
Timezone object at 0 (UTC) offset
'''
def utcoffset(self, dt):
return ZERO_OFFSET
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO_OFFSET
def parseArguments():
parser = argparse.ArgumentParser()
parser.add_argument("--host", help="The Prelert Engine API host, defaults to "
+ HOST, default=HOST)
parser.add_argument("--port", help="The Prelert Engine API port, defaults to "
+ str(PORT), default=PORT)
parser.add_argument("--duration", help="The number of hours to generate \
data for. If not set script will produce records from the historical \
start date until the time now", type=int, default=0)
parser.add_argument("file", help="Path to APM data")
return parser.parse_args()
def generateRecords(csv_filename, start_date, interval, end_date):
'''
Generator function reads csv data file and returns records
with an updated timestamp on demand.
Records are read from a file and stored in a local array, once
all the records have been read the function does not loop
round to the beginning again instead it flips and outputs
the records in reverse order and so on.
The csv file must contain a field with the name 'time'
'''
csv_data = []
csv_file = open(csv_filename, 'rb')
reader = csv.reader(csv_file)
header = reader.next()
time_field_idx = -1
for i in range(len(header)):
if header[i] == 'time':
time_field_idx = i
break
if time_field_idx == -1:
logging.error("Cannot find 'time' field in csv header")
return
reverse = False
while start_date < end_date:
try:
yield header
if len(csv_data) == 0:
# populate csv_data record
for row in reader:
row[time_field_idx] = start_date.isoformat()
start_date += interval
csv_data.append(row)
yield row
if start_date > end_date:
break
csv_file.close()
else:
if reverse:
for row in reversed(csv_data):
row[time_field_idx] = start_date.isoformat()
start_date += interval
yield row
if start_date > end_date:
break
else:
for row in csv_data:
row[time_field_idx] = start_date.isoformat()
start_date += interval
yield row
if start_date > end_date:
break
reverse = not reverse
except KeyboardInterrupt:
raise StopIteration
def main():
args = parseArguments()
start_date = datetime(2014, 05, 18, 0, 0, 0, 0, UtcOffset())
# interval between the generated timestamps for the records
interval = timedelta(seconds=300)
if args.duration <= 0:
end_date = datetime.now(UtcOffset())
else:
duration = timedelta(hours=args.duration)
end_date = start_date + duration
job_config = '{\
"analysisConfig" : {\
"bucketSpan":3600,\
"detectors" :[\
{"fieldName":"In Discards","byFieldName":"host"},\
{"fieldName":"In Octets","byFieldName":"host"},\
{"fieldName":"Out Discards","byFieldName":"host"},\
{"fieldName":"Out Octets","byFieldName":"host"} \
]\
},\
"dataDescription" : {\
"fieldDelimiter":",",\
"timeField":"time",\
"timeFormat":"yyyy-MM-dd\'T\'HH:mm:ssXXX"\
}\
}'
engine_client = EngineApiClient(args.host, BASE_URL, args.port)
(http_status_code, response) = engine_client.createJob(job_config)
if http_status_code != 201:
print (http_status_code, json.dumps(response))
return
job_id = response['id']
print 'Job created with Id = ' + job_id
# get the csv header (the first record generated)
record_generator = generateRecords(args.file, start_date, interval, end_date)
header = ','.join(next(record_generator))
header += '\n'
count = 0
try:
# for the results
next_bucket_id = 1
print
print "Date,Anomaly Score,Max Normalized Probablility"
data = header
for record in record_generator:
# format as csv and append new line
csv = ','.join(record) + '\n'
data += csv
# print data
count += 1
if count == 100:
(http_status_code, response) = engine_client.upload(job_id, data)
if http_status_code != 202:
print (http_status_code, json.dumps(response))
break
# get the latest results...
(http_status_code, response) = engine_client.getBucketsByDate(job_id=job_id,
start_date=str(next_bucket_id), end_date=None)
if http_status_code != 200:
print (http_status_code, json.dumps(response))
break
# and print them
for bucket in response:
print "{0},{1},{2},{3}".format(bucket['timestamp'],
bucket['anomalyScore'], bucket['maxNormalizedProbability'])
if len(response) > 0:
next_bucket_id = int(response[-1]['id']) + 1
# must send the header every time
data = header
count = 0
# sleep a little while (optional this can be removed)
#time.sleep(0.1)
except KeyboardInterrupt:
print "Keyboard interrupt closing job..."
(http_status_code, response) = engine_client.close(job_id)
if http_status_code != 202:
print (http_status_code, json.dumps(response))
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
############################################################################
# #
# Copyright 2014 Prelert Ltd #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
############################################################################
'''
This script creates a new job and uploads to it APM data records
generated from existing data in a CSV file. New records will created
indefinitely or until the 'duration' argument expires. Each record has
a new timestamp so this script can be used to repeatedly replay the
historical data. After each upload of data the script requests any new
bucket results and prints them.
The script is invoked with 1 positional argument -the CSV file containing
APM to use a the source of the generated data- and optional arguments
to specify the location of the Engine API. Run the script with '--help'
to see the options.
The file used in the online example can be downloaded from
http://s3.amazonaws.com/prelert_demo/network.csv
If no 'duration' is set the script will run indefinitely cse Ctrl-C to
stop the script - the interrupt is caught and the job closed gracefully
'''
import argparse
import csv
import json
import logging
import sys
import time
from datetime import datetime, timedelta, tzinfo
from prelert.engineApiClient import EngineApiClient
# Default connection prarams
HOST = 'localhost'
PORT = 8080
BASE_URL = 'engine/v2'
ZERO_OFFSET = timedelta(0)
class UtcOffset(tzinfo):
'''
Timezone object at 0 (UTC) offset
'''
def utcoffset(self, dt):
return ZERO_OFFSET
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO_OFFSET
def parseArguments():
parser = argparse.ArgumentParser()
parser.add_argument("--host", help="The Prelert Engine API host, defaults to "
+ HOST, default=HOST)
parser.add_argument("--port", help="The Prelert Engine API port, defaults to "
+ str(PORT), default=PORT)
parser.add_argument("--duration", help="The number of hours to generate \
data for. If not set script will produce records from the historical \
start date until the time now", type=int, default=0)
parser.add_argument("file", help="Path to APM data")
return parser.parse_args()
def generateRecords(csv_filename, start_date, interval, end_date):
'''
Generator function reads csv data file and returns records
with an updated timestamp on demand.
Records are read from a file and stored in a local array, once
all the records have been read the function does not loop
round to the beginning again instead it flips and outputs
the records in reverse order and so on.
The csv file must contain a field with the name 'time'
'''
csv_data = []
csv_file = open(csv_filename, 'rb')
reader = csv.reader(csv_file)
header = reader.next()
time_field_idx = -1
for i in range(len(header)):
if header[i] == 'time':
time_field_idx = i
break
if time_field_idx == -1:
logging.error("Cannot find 'time' field in csv header")
return
reverse = False
while start_date < end_date:
try:
yield header
if len(csv_data) == 0:
# populate csv_data record
for row in reader:
row[time_field_idx] = start_date.isoformat()
start_date += interval
csv_data.append(row)
yield row
if start_date > end_date:
break
csv_file.close()
else:
if reverse:
for row in reversed(csv_data):
row[time_field_idx] = start_date.isoformat()
start_date += interval
yield row
if start_date > end_date:
break
else:
for row in csv_data:
row[time_field_idx] = start_date.isoformat()
start_date += interval
yield row
if start_date > end_date:
break
reverse = not reverse
except KeyboardInterrupt:
raise StopIteration
def main():
args = parseArguments()
start_date = datetime(2014, 05, 18, 0, 0, 0, 0, UtcOffset())
# interval between the generated timestamps for the records
interval = timedelta(seconds=300)
if args.duration <= 0:
end_date = datetime.now(UtcOffset())
else:
duration = timedelta(hours=args.duration)
end_date = start_date + duration
job_config = '{\
"analysisConfig" : {\
"bucketSpan":3600,\
"detectors" :[\
{"fieldName":"In Discards","byFieldName":"host"},\
{"fieldName":"In Octets","byFieldName":"host"},\
{"fieldName":"Out Discards","byFieldName":"host"},\
{"fieldName":"Out Octets","byFieldName":"host"} \
]\
},\
"dataDescription" : {\
"fieldDelimiter":",",\
"timeField":"time",\
"timeFormat":"yyyy-MM-dd\'T\'HH:mm:ssXXX"\
}\
}'
engine_client = EngineApiClient(args.host, BASE_URL, args.port)
(http_status_code, response) = engine_client.createJob(job_config)
if http_status_code != 201:
print (http_status_code, json.dumps(response))
return
job_id = response['id']
print 'Job created with Id = ' + job_id
# get the csv header (the first record generated)
record_generator = generateRecords(args.file, start_date, interval, end_date)
header = ','.join(next(record_generator))
header += '\n'
count = 0
try:
# for the results
next_bucket_id = 1
print
print "Date,Anomaly Score,Max Normalized Probablility"
data = header
for record in record_generator:
# format as csv and append new line
csv = ','.join(record) + '\n'
data += csv
# print data
count += 1
if count == 100:
(http_status_code, response) = engine_client.upload(job_id, data)
if http_status_code != 202:
print (http_status_code, json.dumps(response))
break
# get the latest results...
(http_status_code, response) = engine_client.getBucketsByDate(job_id=job_id,
start_date=str(next_bucket_id), end_date=None)
if http_status_code != 200:
print (http_status_code, json.dumps(response))
break
# and print them
for bucket in response:
print "{0},{1},{2},{3}".format(bucket['timestamp'],
bucket['anomalyScore'], bucket['maxNormalizedProbability'])
if len(response) > 0:
next_bucket_id = int(response[-1]['id']) + 1
# must send the header every time
data = header
count = 0
# sleep a little while (optional this can be removed)
#time.sleep(0.1)
except KeyboardInterrupt:
print "Keyboard interrupt closing job..."
(http_status_code, response) = engine_client.close(job_id)
if http_status_code != 202:
print (http_status_code, json.dumps(response))
if __name__ == "__main__":
main()
|
en
| 0.780786
|
#!/usr/bin/env python ############################################################################ # # # Copyright 2014 Prelert Ltd # # # # Licensed under the Apache License, Version 2.0 (the "License"); # # you may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # # # ############################################################################ This script creates a new job and uploads to it APM data records generated from existing data in a CSV file. New records will created indefinitely or until the 'duration' argument expires. Each record has a new timestamp so this script can be used to repeatedly replay the historical data. After each upload of data the script requests any new bucket results and prints them. The script is invoked with 1 positional argument -the CSV file containing APM to use a the source of the generated data- and optional arguments to specify the location of the Engine API. Run the script with '--help' to see the options. The file used in the online example can be downloaded from http://s3.amazonaws.com/prelert_demo/network.csv If no 'duration' is set the script will run indefinitely cse Ctrl-C to stop the script - the interrupt is caught and the job closed gracefully # Default connection prarams Timezone object at 0 (UTC) offset Generator function reads csv data file and returns records with an updated timestamp on demand. Records are read from a file and stored in a local array, once all the records have been read the function does not loop round to the beginning again instead it flips and outputs the records in reverse order and so on. The csv file must contain a field with the name 'time' # populate csv_data record # interval between the generated timestamps for the records # get the csv header (the first record generated) # for the results # format as csv and append new line # print data # get the latest results... # and print them # must send the header every time # sleep a little while (optional this can be removed) #time.sleep(0.1)
| 1.92707
| 2
|
cfnlp/faiss/__init__.py
|
zhangyuo/cf-nlp-py
| 0
|
6627531
|
<gh_stars>0
#!/usr/bin/env python
# coding:utf-8
"""
# @Time : 2020-07-27 14:28
# @Author : Zhangyu
# @Email : <EMAIL>
# @File : __init__.py.py
# @Software : PyCharm
# @Desc :
"""
|
#!/usr/bin/env python
# coding:utf-8
"""
# @Time : 2020-07-27 14:28
# @Author : Zhangyu
# @Email : <EMAIL>
# @File : __init__.py.py
# @Software : PyCharm
# @Desc :
"""
|
fr
| 0.303593
|
#!/usr/bin/env python # coding:utf-8 # @Time : 2020-07-27 14:28 # @Author : Zhangyu # @Email : <EMAIL> # @File : __init__.py.py # @Software : PyCharm # @Desc :
| 1.041039
| 1
|
openpecha/catalog/storage.py
|
ta4tsering/openpecha-toolkit
| 1
|
6627532
|
import base64
import logging
from abc import ABC, abstractclassmethod
import requests
from github import Github
from .config import GITHUB_BUCKET_CONFIG
from .utils import create_pecha_id
class Bucket(ABC):
"""A class representing a Bucket on Cloud Storage."""
def __init__(self, name, config):
self.name = name
self.config = config
super().__init__()
@abstractclassmethod
def _get_client(self, name, token):
pass
@abstractclassmethod
def get_pecha_base(self, id):
pass
@abstractclassmethod
def get_all_pechas_base(self):
pass
class GithubBucket(Bucket):
def __init__(self, name, config=GITHUB_BUCKET_CONFIG):
self.client = self._get_client(name, config["token"])
super().__init__(name, config)
def _get_client(self, name, token):
"""Return github org object"""
g = Github(token)
org = g.get_organization(name)
return org
@staticmethod
def get_blob_content(blob_url):
"""
Get the blob content from the url provided in parameter
"""
base64_content = requests.get(blob_url).json()["content"]
return base64_content
@staticmethod
def decode_bas64_blob(base64_blob):
"""
Decode the base64 encoded blob into UTF-8
"""
decoded_list_data = base64.b64decode(base64_blob).decode("utf-8")
return decoded_list_data
def _get_content(self, gh_file_obj):
blob_content = self.get_blob_content(gh_file_obj.git_url)
decoded_blob = self.decode_bas64_blob(blob_content)
return decoded_blob
def get_pecha_base(self, id):
try:
repo = self.client.get_repo(id)
except Exception:
return [], ""
for vol_base_file_obj in repo.get_contents(f"{id}.opf/base"):
vol_base_content = self._get_content(vol_base_file_obj)
logging.info(f"Downloaded {vol_base_file_obj.name}")
yield vol_base_content, vol_base_file_obj.name
def get_all_pechas_base(self):
for pecha_num in range(
self.config["catalog"]["start_id"], self.config["catalog"]["end_id"] + 1
):
pecha_id = create_pecha_id(pecha_num)
logging.info(f"Downloading {pecha_id}")
yield pecha_id, self.get_pecha_base(pecha_id)
|
import base64
import logging
from abc import ABC, abstractclassmethod
import requests
from github import Github
from .config import GITHUB_BUCKET_CONFIG
from .utils import create_pecha_id
class Bucket(ABC):
"""A class representing a Bucket on Cloud Storage."""
def __init__(self, name, config):
self.name = name
self.config = config
super().__init__()
@abstractclassmethod
def _get_client(self, name, token):
pass
@abstractclassmethod
def get_pecha_base(self, id):
pass
@abstractclassmethod
def get_all_pechas_base(self):
pass
class GithubBucket(Bucket):
def __init__(self, name, config=GITHUB_BUCKET_CONFIG):
self.client = self._get_client(name, config["token"])
super().__init__(name, config)
def _get_client(self, name, token):
"""Return github org object"""
g = Github(token)
org = g.get_organization(name)
return org
@staticmethod
def get_blob_content(blob_url):
"""
Get the blob content from the url provided in parameter
"""
base64_content = requests.get(blob_url).json()["content"]
return base64_content
@staticmethod
def decode_bas64_blob(base64_blob):
"""
Decode the base64 encoded blob into UTF-8
"""
decoded_list_data = base64.b64decode(base64_blob).decode("utf-8")
return decoded_list_data
def _get_content(self, gh_file_obj):
blob_content = self.get_blob_content(gh_file_obj.git_url)
decoded_blob = self.decode_bas64_blob(blob_content)
return decoded_blob
def get_pecha_base(self, id):
try:
repo = self.client.get_repo(id)
except Exception:
return [], ""
for vol_base_file_obj in repo.get_contents(f"{id}.opf/base"):
vol_base_content = self._get_content(vol_base_file_obj)
logging.info(f"Downloaded {vol_base_file_obj.name}")
yield vol_base_content, vol_base_file_obj.name
def get_all_pechas_base(self):
for pecha_num in range(
self.config["catalog"]["start_id"], self.config["catalog"]["end_id"] + 1
):
pecha_id = create_pecha_id(pecha_num)
logging.info(f"Downloading {pecha_id}")
yield pecha_id, self.get_pecha_base(pecha_id)
|
en
| 0.51916
|
A class representing a Bucket on Cloud Storage. Return github org object Get the blob content from the url provided in parameter Decode the base64 encoded blob into UTF-8
| 2.718334
| 3
|
encode.py
|
mueslimak3r/mystery-meme
| 0
|
6627533
|
<filename>encode.py
import sys, getopt
import fixedint
from PIL import Image
from pathlib import Path
from generatepattern import generate_pattern
'''
encode_bits
encodes two bits from the source text into the LSBs of the source image's green and blue channels
'''
def encode_bits(g, b, image_pos, input_data, input_data_iter, input_data_bit_iter):
green_channel_pixel = g.getpixel(image_pos) #
blue_channel_pixel = b.getpixel(image_pos) # get g and b byte to modify
selected_byte = input_data[input_data_iter] # get byte from input data to modify
green_pixel_bitmask = (selected_byte & (0x1 << input_data_bit_iter)) >> input_data_bit_iter #
blue_pixel_bitmask = (selected_byte & (0x1 << (input_data_bit_iter + 1))) >> (input_data_bit_iter + 1) # bitshift the two bits from the input data to mask the least significant bits in the g and b pixel bytes
if green_channel_pixel & 0x1 and green_pixel_bitmask == 0: # flip the bit on or off if needed
green_channel_pixel -= 1
else:
green_channel_pixel |= green_pixel_bitmask
if blue_channel_pixel & 0x1 and blue_pixel_bitmask == 0:
blue_channel_pixel -= 1
else:
blue_channel_pixel |= blue_pixel_bitmask
g.putpixel(image_pos, green_channel_pixel) # save the alteration into each color channel
b.putpixel(image_pos, blue_channel_pixel)
'''
encoder
opens image and converts to 4 x 8bit RGBA
reads input data, gets length of input data and serializes the int value this length
the data to be encoded in now the length value joined to the input data, as a bitarray
uses generator function, supplied with the seed, to generate x, y pairs
The last call of this function opens the pygame window that displays the pattern visually
for each x,y pair 2 bits will be encoded via encode_bits and saved into the g, b lists
the 4 channels are merged into a new image object and saved to the filesystem
'''
def encoder(input_image, output_image, input_data_file, seed):
img = Image.open(input_image) # open source image as PIL/Pillow object
hdatareader = open(input_data_file, 'rb') # open input data file to be read as a bytearray
input_data = hdatareader.read()
r, g, b, a = img.convert('RGBA').split() # split the color/alpha channels into individual lists
input_data_len = len(input_data) * 4 # get length of input data
print("original size of input data (in pixels used): ", input_data_len)
input_data_iter = 0
input_data_bit_iter = 0
data_len_serialized = fixedint.UInt32(input_data_len).to_bytes() # turn the length of the input data into a bytearray to it can be prepended to the input data during encoding
input_data = data_len_serialized + input_data # prepend the length to the input data
input_data_len = len(input_data) * 4 # update value to include total length of data that will be encoded
print("image mode before conversion: ", img.mode)
if input_data_len > img.width * img.height: # check if everything fits
print("input data too large")
return
print("input data will affect ", input_data_len, " of ", img.width * img.height, "pixels in the image")
for x, y in generate_pattern(seed, img.width, img.height, input_data_len): # generate image x,y coordinates where bits encoded in the green and blue pixel data
encode_bits(g, b, (x, y), input_data, input_data_iter, input_data_bit_iter) # pass g and b color channel list, pixel coordinates, and the indexes of the byte and bits to encode
input_data_bit_iter += 2
if input_data_bit_iter > 6: # move forward to the next byte
#print(input_data[input_data_iter], end=' ')
input_data_iter += 1
input_data_bit_iter = 0
newimage = Image.merge('RGBA', (r, g, b, a)) # make new image to export by combining the channels
newimage.save(output_image, 'PNG')
img.close()
newimage.close()
hdatareader.close()
'''
main
gets arguments using getopt
'''
def main(argv):
input_image = ''
input_data_file = ''
output_image = ''
seed = 0
try:
opts, args = getopt.getopt(argv,"hd:i:o:s:",["hdata=", "ifile=","ofile=", "seed="])
except getopt.GetoptError:
print('encode.py -d <data to encode> -i <input image file> -o <output image file> -s <seed(integer)>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('encode.py -d <data to encode> -i <input image file> -o <output image file> -s <seed(integer)>')
sys.exit()
elif opt in ("-d", "--hdata"):
input_data_file = arg
elif opt in ("-i", "--ifile"):
input_image = arg
elif opt in ("-o", "--ofile"):
output_image = arg
elif opt in ("-s", "--seed"):
seed = int(arg)
if input_image == '' or output_image == '' or input_data_file == '' or seed <= 0:
print('encode.py -d <data to encode> -i <input image file> -o <output image file> -s <seed(integer)>')
sys.exit(2)
print ('Seed is - ', seed)
print ('File with data to encode is - ', input_data_file)
print ('Input image file is - ', input_image)
print ('Output image file is - ', output_image)
encoder(input_image, output_image, input_data_file, seed)
if __name__ == "__main__":
main(sys.argv[1:])
|
<filename>encode.py
import sys, getopt
import fixedint
from PIL import Image
from pathlib import Path
from generatepattern import generate_pattern
'''
encode_bits
encodes two bits from the source text into the LSBs of the source image's green and blue channels
'''
def encode_bits(g, b, image_pos, input_data, input_data_iter, input_data_bit_iter):
green_channel_pixel = g.getpixel(image_pos) #
blue_channel_pixel = b.getpixel(image_pos) # get g and b byte to modify
selected_byte = input_data[input_data_iter] # get byte from input data to modify
green_pixel_bitmask = (selected_byte & (0x1 << input_data_bit_iter)) >> input_data_bit_iter #
blue_pixel_bitmask = (selected_byte & (0x1 << (input_data_bit_iter + 1))) >> (input_data_bit_iter + 1) # bitshift the two bits from the input data to mask the least significant bits in the g and b pixel bytes
if green_channel_pixel & 0x1 and green_pixel_bitmask == 0: # flip the bit on or off if needed
green_channel_pixel -= 1
else:
green_channel_pixel |= green_pixel_bitmask
if blue_channel_pixel & 0x1 and blue_pixel_bitmask == 0:
blue_channel_pixel -= 1
else:
blue_channel_pixel |= blue_pixel_bitmask
g.putpixel(image_pos, green_channel_pixel) # save the alteration into each color channel
b.putpixel(image_pos, blue_channel_pixel)
'''
encoder
opens image and converts to 4 x 8bit RGBA
reads input data, gets length of input data and serializes the int value this length
the data to be encoded in now the length value joined to the input data, as a bitarray
uses generator function, supplied with the seed, to generate x, y pairs
The last call of this function opens the pygame window that displays the pattern visually
for each x,y pair 2 bits will be encoded via encode_bits and saved into the g, b lists
the 4 channels are merged into a new image object and saved to the filesystem
'''
def encoder(input_image, output_image, input_data_file, seed):
img = Image.open(input_image) # open source image as PIL/Pillow object
hdatareader = open(input_data_file, 'rb') # open input data file to be read as a bytearray
input_data = hdatareader.read()
r, g, b, a = img.convert('RGBA').split() # split the color/alpha channels into individual lists
input_data_len = len(input_data) * 4 # get length of input data
print("original size of input data (in pixels used): ", input_data_len)
input_data_iter = 0
input_data_bit_iter = 0
data_len_serialized = fixedint.UInt32(input_data_len).to_bytes() # turn the length of the input data into a bytearray to it can be prepended to the input data during encoding
input_data = data_len_serialized + input_data # prepend the length to the input data
input_data_len = len(input_data) * 4 # update value to include total length of data that will be encoded
print("image mode before conversion: ", img.mode)
if input_data_len > img.width * img.height: # check if everything fits
print("input data too large")
return
print("input data will affect ", input_data_len, " of ", img.width * img.height, "pixels in the image")
for x, y in generate_pattern(seed, img.width, img.height, input_data_len): # generate image x,y coordinates where bits encoded in the green and blue pixel data
encode_bits(g, b, (x, y), input_data, input_data_iter, input_data_bit_iter) # pass g and b color channel list, pixel coordinates, and the indexes of the byte and bits to encode
input_data_bit_iter += 2
if input_data_bit_iter > 6: # move forward to the next byte
#print(input_data[input_data_iter], end=' ')
input_data_iter += 1
input_data_bit_iter = 0
newimage = Image.merge('RGBA', (r, g, b, a)) # make new image to export by combining the channels
newimage.save(output_image, 'PNG')
img.close()
newimage.close()
hdatareader.close()
'''
main
gets arguments using getopt
'''
def main(argv):
input_image = ''
input_data_file = ''
output_image = ''
seed = 0
try:
opts, args = getopt.getopt(argv,"hd:i:o:s:",["hdata=", "ifile=","ofile=", "seed="])
except getopt.GetoptError:
print('encode.py -d <data to encode> -i <input image file> -o <output image file> -s <seed(integer)>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('encode.py -d <data to encode> -i <input image file> -o <output image file> -s <seed(integer)>')
sys.exit()
elif opt in ("-d", "--hdata"):
input_data_file = arg
elif opt in ("-i", "--ifile"):
input_image = arg
elif opt in ("-o", "--ofile"):
output_image = arg
elif opt in ("-s", "--seed"):
seed = int(arg)
if input_image == '' or output_image == '' or input_data_file == '' or seed <= 0:
print('encode.py -d <data to encode> -i <input image file> -o <output image file> -s <seed(integer)>')
sys.exit(2)
print ('Seed is - ', seed)
print ('File with data to encode is - ', input_data_file)
print ('Input image file is - ', input_image)
print ('Output image file is - ', output_image)
encoder(input_image, output_image, input_data_file, seed)
if __name__ == "__main__":
main(sys.argv[1:])
|
en
| 0.766578
|
encode_bits encodes two bits from the source text into the LSBs of the source image's green and blue channels # # get g and b byte to modify # get byte from input data to modify # # bitshift the two bits from the input data to mask the least significant bits in the g and b pixel bytes # flip the bit on or off if needed # save the alteration into each color channel encoder opens image and converts to 4 x 8bit RGBA reads input data, gets length of input data and serializes the int value this length the data to be encoded in now the length value joined to the input data, as a bitarray uses generator function, supplied with the seed, to generate x, y pairs The last call of this function opens the pygame window that displays the pattern visually for each x,y pair 2 bits will be encoded via encode_bits and saved into the g, b lists the 4 channels are merged into a new image object and saved to the filesystem # open source image as PIL/Pillow object # open input data file to be read as a bytearray # split the color/alpha channels into individual lists # get length of input data # turn the length of the input data into a bytearray to it can be prepended to the input data during encoding # prepend the length to the input data # update value to include total length of data that will be encoded # check if everything fits # generate image x,y coordinates where bits encoded in the green and blue pixel data # pass g and b color channel list, pixel coordinates, and the indexes of the byte and bits to encode # move forward to the next byte #print(input_data[input_data_iter], end=' ') # make new image to export by combining the channels main gets arguments using getopt
| 3.567049
| 4
|
tests/test__mets_maker.py
|
StateArchivesOfNorthCarolina/tomes_metadata
| 0
|
6627534
|
#!/usr/bin/env python3
# import modules.
import sys; sys.path.append("..")
import logging
import plac
import tempfile
import unittest
from datetime import datetime
from tomes_packager.lib.mets_maker import *
# enable logging.
logging.basicConfig(level=logging.DEBUG)
class Test_MetsMaker(unittest.TestCase):
def setUp(self):
# set attributes.
self.sample_file = "sample_files/sample_mets_template.xml"
def test__mets_validity(self):
""" Is a rendered METS valid? """
# make temporary file, save the filename, then delete the file.
mets_handle, mets_path = tempfile.mkstemp(dir=".", suffix=".xml")
os.close(mets_handle)
os.remove(mets_path)
# write a temporary METS file.
self.mm = METSMaker(self.sample_file, mets_path,
TIMESTAMP = lambda: datetime.now().isoformat() + "Z")
self.mm.make()
# see if METS is valid.
self.assertTrue(self.mm.validate())
os.remove(mets_path)
# CLI.
def main(template: "METS template file", output_file: "output METS XML file"):
"Renders METS template with callable \"TIMESTAMP()\" variable to a METS XML file.\
\nexample: `python3 test__mets_maker.py sample_files/sample_mets_template.xml out.xml`"
# create and self-validate METS file.
mm = METSMaker(template, output_file,
TIMESTAMP = lambda: datetime.now().isoformat() + "Z")
mm.make()
mm.validate()
if __name__ == "__main__":
plac.call(main)
|
#!/usr/bin/env python3
# import modules.
import sys; sys.path.append("..")
import logging
import plac
import tempfile
import unittest
from datetime import datetime
from tomes_packager.lib.mets_maker import *
# enable logging.
logging.basicConfig(level=logging.DEBUG)
class Test_MetsMaker(unittest.TestCase):
def setUp(self):
# set attributes.
self.sample_file = "sample_files/sample_mets_template.xml"
def test__mets_validity(self):
""" Is a rendered METS valid? """
# make temporary file, save the filename, then delete the file.
mets_handle, mets_path = tempfile.mkstemp(dir=".", suffix=".xml")
os.close(mets_handle)
os.remove(mets_path)
# write a temporary METS file.
self.mm = METSMaker(self.sample_file, mets_path,
TIMESTAMP = lambda: datetime.now().isoformat() + "Z")
self.mm.make()
# see if METS is valid.
self.assertTrue(self.mm.validate())
os.remove(mets_path)
# CLI.
def main(template: "METS template file", output_file: "output METS XML file"):
"Renders METS template with callable \"TIMESTAMP()\" variable to a METS XML file.\
\nexample: `python3 test__mets_maker.py sample_files/sample_mets_template.xml out.xml`"
# create and self-validate METS file.
mm = METSMaker(template, output_file,
TIMESTAMP = lambda: datetime.now().isoformat() + "Z")
mm.make()
mm.validate()
if __name__ == "__main__":
plac.call(main)
|
en
| 0.665045
|
#!/usr/bin/env python3 # import modules. # enable logging. # set attributes. Is a rendered METS valid? # make temporary file, save the filename, then delete the file. # write a temporary METS file. # see if METS is valid. # CLI. # create and self-validate METS file.
| 2.460676
| 2
|
MHD/FEniCS/CGns/NS1.py
|
wathen/PhD
| 3
|
6627535
|
#!/usr/bin/python
# import petsc4py
# import sys
# petsc4py.init(sys.argv)
# from petsc4py import PETSc
# from MatrixOperations import *
from dolfin import *
import numpy as np
import matplotlib.pylab as plt
import os
import scipy.io
from PyTrilinos import Epetra, EpetraExt, AztecOO, ML, Amesos
from scipy2Trilinos import scipy_csr_matrix2CrsMatrix
def SaveEpertaMatrix(A,name):
from PyTrilinos import EpetraExt
from numpy import array,loadtxt
import scipy.sparse as sps
import scipy.io
test ="".join([name,".txt"])
EpetraExt.RowMatrixToMatlabFile(test,A)
data = loadtxt(test)
col,row,values = data[:,0]-1,data[:,1]-1,data[:,2]
Asparse = sps.csr_matrix((values, (row, col)))
testmat ="".join([name,".mat"])
scipy.io.savemat( testmat, {name: Asparse},oned_as='row')
def NullSpace(A,name):
from PyTrilinos import EpetraExt, Epetra
from numpy import array,loadtxt
import scipy.sparse as sps
import scipy.io
import matplotlib.pylab as plt
test ="".join([name,".txt"])
EpetraExt.RowMatrixToMatlabFile(test,A)
data = loadtxt(test)
col,row,values = data[:,0]-1,data[:,1]-1,data[:,2]
Asparse = sps.csr_matrix((values, (row, col)))
(Nb,Mb) = Asparse.shape
Aublas1 = Asparse[0:Nb-1,0:Mb-1]
# plt.spy(Aublas1)
# if (Nb < 1000):
# plt.show()
comm = Epetra.PyComm()
Ap = scipy_csr_matrix2CrsMatrix(Aublas1, comm)
return Ap
#MO.SwapBackend('epetra')
#os.system("echo $PATH")
m = 2
errL2u = np.zeros((m-1,1))
errL2p = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Vdim = np.zeros((m-1,1))
Qdim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
nn = 2
dim = 2
Solving = 'No'
Saving = 'no'
case = 1
parameters['linear_algebra_backend'] = ''
for xx in xrange(1,m):
print xx
nn = 2**(xx+4)
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn
# nn = 32
# mesh = UnitSquareMesh(16,16)
# mesh = UnitSquareMesh(nn, nn)
mesh = RectangleMesh(-1, -1, 1, 1, nn, nn,'right')
# tic()
parameters['reorder_dofs_serial'] = False
V = VectorFunctionSpace(mesh, "CG", 2)
Q = FunctionSpace(mesh, "CG", 1)
parameters['reorder_dofs_serial'] = False
# print 'time to create function spaces', toc(),'\n\n'
W = V*Q
Vdim[xx-1] = V.dim()
Qdim[xx-1] = Q.dim()
Wdim[xx-1] = W.dim()
print "\n\nV: ",Vdim[xx-1],"Q: ",Qdim[xx-1],"W: ",Wdim[xx-1],"\n\n"
def boundary(x, on_boundary):
return on_boundary
# u0 = Expression(("sin(pi*x[1])","sin(pi*x[0])"))
# u0 = Expression(("pow(x[1],2)-1","pow(x[0],2)-1"))
if case == 1:
u0 = Expression(("20*x[0]*pow(x[1],3)","5*pow(x[0],4)-5*pow(x[1],4)"))
p0 = Expression("60*pow(x[0],2)*x[1]-20*pow(x[1],3)")
elif case == 2:
Su0 = Expression(("pow(x[1],2)-x[1]","pow(x[0],2)-x[0]"))
p0 = Expression("x[1]+x[0]-1")
elif case == 3:
u0 = Expression(("cos(2*pi*x[1])*sin(2*pi*x[0]) ","-cos(2*pi*x[0])*sin(2*pi*x[1]) "))
p0 = Expression("sin(2*pi*x[0])*sin(2*pi*x[1]) ")
bc = DirichletBC(W.sub(0),u0, boundary)
bc1 = DirichletBC(W.sub(1), p0, boundary)
bcs = [bc]
# v, u = TestFunction(V), TrialFunction(V)
# q, p = TestFunction(Q), TrialFunction(Q)
(u, p) = TrialFunctions(W)
(v, q) = TestFunctions(W)
# f = Expression(("-pi*pi*sin(pi*x[1])+2*x[1]","-pi*pi*sin(pi*x[0])"))
if case == 1:
f = -Expression(("120*x[0]*x[1]*(1-mu)+ 400*x[0]*pow(x[1],6)+(5*pow(x[0],4)-5*pow(x[1],4))*60*x[0]*x[1]*x[1]","60*(pow(x[0],2)-pow(x[1],2))*(1-mu)+400*pow(x[0],4)*pow(x[1],3)-(5*pow(x[0],4)-5*pow(x[1],4))*20*x[1]*x[1]*x[1]"), mu = 1e0)
elif case == 2:
f = -Expression(("-1","-1"))
elif case == 3:
f = -Expression(("8*pi*pi*cos(2*pi*x[1])*sin(2*pi*x[0]) + 2*pi*cos(2*pi*x[0])*sin(2*pi*x[1])","2*pi*cos(2*pi*x[0])*sin(2*pi*x[1]) - 8*pi*pi*cos(2*pi*x[0])*sin(2*pi*x[1])"))
u_k = Function(V)
mu = Constant(1e0)
n = FacetNormal(mesh)
h = CellSize(mesh)
h_avg =avg(h)
d = 0
u_k = Function(V)
a11 = -mu*inner(grad(v), grad(u))*dx - inner(dolfin.dot(u_k,grad(u)),v)*dx
a12 = div(v)*p*dx
a21 = div(u)*q*dx
L1 = inner(v, f)*dx
a = a11+a12+a21
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-5 # tolerance
iter = 0 # iteration counter
maxiter = 100 # max no of iterations allowed
# i = p*q*dx
# AA = assemble(a11)
while eps > tol and iter < maxiter:
iter += 1
x = Function(W)
uu = Function(W)
tic()
AA, bb = assemble_system(a, L1, bcs)
print toc()
tic()
A_epetra = as_backend_type(AA).mat()
A_epetra =NullSpace(A_epetra,"A_epetra")
# As = AA.sparray()[0:-1,0:-1]
# print toc()
# tic()
# A = PETSc.Mat().createAIJ(size=As.shape,csr=(As.indptr, As.indices, As.data))
print toc()
pause
# PP, btmp = assemble_system(i+a11, L1, bcs)
DoF = V.dim() + Q.dim()
x_epetra = Epetra.Vector(0*bb.array())
A_epetra = as_backend_type(AA).mat()
# P_epetra = down_cast(PP).mat()
b_epetra = as_backend_type(bb).vec()
# x_epetra = down_cast(uu.vector()).vec()
A_epetra =NullSpace(A_epetra,"A_epetra")
# P_epetra =NullSpace(P_epetra,"P_epetra")
print toc()
bbb =bb.array()
Nb = bbb.shape
b =bbb[0:Nb[0]-1]
b_epetra = Epetra.Vector(b)
xxx = x.vector().array()
x =xxx[0:Nb[0]-1]
x_epetra = Epetra.Vector(x)
pause()
# mlList = {"max levels" : 200,
# "output" : 10,
# "smoother: type" : "symmetric Gauss-Seidel",
# "aggregation: type" : "Uncoupled"
# }
# prec = ML.MultiLevelPreconditioner(P_epetra, False)
# prec.SetParameterList(mlList)
# prec.ComputePreconditioner()
# solver = AztecOO.AztecOO(A_epetra, x_epetra, b_epetra)
# solver.SetPrecOperator(prec)
# solver.SetAztecOption(AztecOO.AZ_solver, AztecOO.AZ_gmres);
# solver.SetAztecOption(AztecOO.AZ_output, 100);
# err = solver.Iterate(20000, 1e-10)
tic()
problem = Epetra.LinearProblem(A_epetra,x_epetra,b_epetra)
print '\n\n\n\n\n\n'
factory = Amesos.Factory()
solver = factory.Create("Amesos_Umfpack", problem)
# solver = factory.Create("MUMPS", problem)
amesosList = {"PrintTiming" : True, "PrintStatus" : True }
solver.SetParameters(amesosList)
solver.SymbolicFactorization()
solver.NumericFactorization()
solver.Solve()
soln = problem.GetLHS()
print "||x_computed||_2 =", soln.Norm2()
# solver.PrintTiming()
print '\n\n\n\n\n\n'
uu = x_epetra[0:Vdim[xx-1][0]]
# time = time+toc()
u1 = Function(V)
u1.vector()[:] = u1.vector()[:] + uu.array
diff = u1.vector().array() - u_k.vector().array()
eps = np.linalg.norm(diff, ord=np.Inf)
print '\n\n\niter=%d: norm=%g' % (iter, eps)
# u_k.assign(uu) # update for next iteration
u_k.assign(u1)
#
if case == 1:
ue = Expression(("20*x[0]*pow(x[1],3)","5*pow(x[0],4)-5*pow(x[1],4)"))
pe = Expression("60*pow(x[0],2)*x[1]-20*pow(x[1],3)+5")
elif case == 2:
ue = Expression(("pow(x[1],2)-x[1]","pow(x[0],2)-x[0]"))
pe = Expression("x[1]+x[0]-1")
elif case == 3:
ue = Expression(("cos(2*pi*x[1])*sin(2*pi*x[0]) ","-cos(2*pi*x[0])*sin(2*pi*x[1]) "))
pe = Expression("sin(2*pi*x[0])*sin(2*pi*x[1]) ")
# pp = x_epetra[Vdim[xx-1][0]:]
# pa = Function(Q)
# pa1 = Function(Q)
# pa2 = Function(Q)
# pa1.vector()[:] = pp.array
# pa2.vector()[:] = 0*pp.array+1
# pa2.vector().array()
# pa.vector()[:] = pp.array + assemble(pa1*dx)/assemble(pa2*dx)
# uu = x_epetra[0:Vdim[xx-1][0]]
# ua = Function(V)
# ua.vector()[:] = uu.array
u = interpolate(ue,V)
p = interpolate(pe,Q)
Nv = u.vector().array().shape
x = x_epetra[0:Nv[0]]
ua = Function(V)
ua.vector()[:] = x.array
pp = x_epetra[Nv[0]:]
pp = pp.array
n = pp.shape
pp = np.insert(pp,n,0)
pa = Function(Q)
pa.vector()[:] = pp
pend = assemble(pa*dx)
ones = Function(Q)
ones.vector()[:]=(0*pp+1)
pp = Function(Q)
pp.vector()[:] = pa.vector().array()- assemble(pa*dx)/assemble(ones*dx)
pInterp = interpolate(pe,Q)
pe = Function(Q)
pe.vector()[:] = pInterp.vector().array()
const = - assemble(pe*dx)/assemble(ones*dx)
pe.vector()[:] = pe.vector()[:]+const
errL2u[xx-1] = errornorm(ue,ua,norm_type="L2", degree_rise=4,mesh=mesh)
errL2p[xx-1] = errornorm(pe,pp,norm_type="L2", degree_rise=4,mesh=mesh)
print errL2u[xx-1]
print errL2p[xx-1]
del solver
# scipy.io.savemat('Vdim.mat', {'VDoF':Vdim})
# scipy.io.savemat('DoF.mat', {'DoF':DoF})
plt.loglog(NN,errL2u)
plt.title('Error plot for CG2 elements - Velocity L2 convergence = %f' % np.log2(np.average((errL2u[0:m-2]/errL2u[1:m-1]))))
plt.xlabel('N')
plt.ylabel('L2 error')
plt.figure()
plt.loglog(NN,errL2p)
plt.title('Error plot for CG1 elements - Pressure L2 convergence = %f' % np.log2(np.average((errL2p[0:m-2]/errL2p[1:m-1]))))
plt.xlabel('N')
plt.ylabel('L2 error')
# plt.show()
print "Velocity Elements rate of convergence ", np.log2(np.average((errL2u[0:m-2]/errL2u[1:m-1])))
print "Pressure Elements rate of convergence ", np.log2(np.average((errL2p[0:m-2]/errL2p[1:m-1])))
print "\n\n"
import prettytable
table = prettytable.PrettyTable(["DoF","V-L2","P-L2"])
for x in xrange(1,m):
table.add_row([Wdim[x-1][0],errL2u[x-1][0],errL2p[x-1][0]])
print table
# plt.loglog(N,erru)
# plt.title('Error plot for P2 elements - convergence = %f' % np.log2(np.average((erru[0:m-2]/erru[1:m-1]))))
# plt.xlabel('N')
# plt.ylabel('L2 error')
# plt.figure()
# plt.loglog(N,errp)
# plt.title('Error plot for P1 elements - convergence = %f' % np.log2(np.average((errp[0:m-2]/errp[1:m-1]))))
# plt.xlabel('N')
# plt.ylabel('L2 error')
plot(ua)
# plot(interpolate(ue,V))
plot(pp)
# plot(interpolate(pe,Q))
interactive()
# plt.show()
|
#!/usr/bin/python
# import petsc4py
# import sys
# petsc4py.init(sys.argv)
# from petsc4py import PETSc
# from MatrixOperations import *
from dolfin import *
import numpy as np
import matplotlib.pylab as plt
import os
import scipy.io
from PyTrilinos import Epetra, EpetraExt, AztecOO, ML, Amesos
from scipy2Trilinos import scipy_csr_matrix2CrsMatrix
def SaveEpertaMatrix(A,name):
from PyTrilinos import EpetraExt
from numpy import array,loadtxt
import scipy.sparse as sps
import scipy.io
test ="".join([name,".txt"])
EpetraExt.RowMatrixToMatlabFile(test,A)
data = loadtxt(test)
col,row,values = data[:,0]-1,data[:,1]-1,data[:,2]
Asparse = sps.csr_matrix((values, (row, col)))
testmat ="".join([name,".mat"])
scipy.io.savemat( testmat, {name: Asparse},oned_as='row')
def NullSpace(A,name):
from PyTrilinos import EpetraExt, Epetra
from numpy import array,loadtxt
import scipy.sparse as sps
import scipy.io
import matplotlib.pylab as plt
test ="".join([name,".txt"])
EpetraExt.RowMatrixToMatlabFile(test,A)
data = loadtxt(test)
col,row,values = data[:,0]-1,data[:,1]-1,data[:,2]
Asparse = sps.csr_matrix((values, (row, col)))
(Nb,Mb) = Asparse.shape
Aublas1 = Asparse[0:Nb-1,0:Mb-1]
# plt.spy(Aublas1)
# if (Nb < 1000):
# plt.show()
comm = Epetra.PyComm()
Ap = scipy_csr_matrix2CrsMatrix(Aublas1, comm)
return Ap
#MO.SwapBackend('epetra')
#os.system("echo $PATH")
m = 2
errL2u = np.zeros((m-1,1))
errL2p = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Vdim = np.zeros((m-1,1))
Qdim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
nn = 2
dim = 2
Solving = 'No'
Saving = 'no'
case = 1
parameters['linear_algebra_backend'] = ''
for xx in xrange(1,m):
print xx
nn = 2**(xx+4)
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn
# nn = 32
# mesh = UnitSquareMesh(16,16)
# mesh = UnitSquareMesh(nn, nn)
mesh = RectangleMesh(-1, -1, 1, 1, nn, nn,'right')
# tic()
parameters['reorder_dofs_serial'] = False
V = VectorFunctionSpace(mesh, "CG", 2)
Q = FunctionSpace(mesh, "CG", 1)
parameters['reorder_dofs_serial'] = False
# print 'time to create function spaces', toc(),'\n\n'
W = V*Q
Vdim[xx-1] = V.dim()
Qdim[xx-1] = Q.dim()
Wdim[xx-1] = W.dim()
print "\n\nV: ",Vdim[xx-1],"Q: ",Qdim[xx-1],"W: ",Wdim[xx-1],"\n\n"
def boundary(x, on_boundary):
return on_boundary
# u0 = Expression(("sin(pi*x[1])","sin(pi*x[0])"))
# u0 = Expression(("pow(x[1],2)-1","pow(x[0],2)-1"))
if case == 1:
u0 = Expression(("20*x[0]*pow(x[1],3)","5*pow(x[0],4)-5*pow(x[1],4)"))
p0 = Expression("60*pow(x[0],2)*x[1]-20*pow(x[1],3)")
elif case == 2:
Su0 = Expression(("pow(x[1],2)-x[1]","pow(x[0],2)-x[0]"))
p0 = Expression("x[1]+x[0]-1")
elif case == 3:
u0 = Expression(("cos(2*pi*x[1])*sin(2*pi*x[0]) ","-cos(2*pi*x[0])*sin(2*pi*x[1]) "))
p0 = Expression("sin(2*pi*x[0])*sin(2*pi*x[1]) ")
bc = DirichletBC(W.sub(0),u0, boundary)
bc1 = DirichletBC(W.sub(1), p0, boundary)
bcs = [bc]
# v, u = TestFunction(V), TrialFunction(V)
# q, p = TestFunction(Q), TrialFunction(Q)
(u, p) = TrialFunctions(W)
(v, q) = TestFunctions(W)
# f = Expression(("-pi*pi*sin(pi*x[1])+2*x[1]","-pi*pi*sin(pi*x[0])"))
if case == 1:
f = -Expression(("120*x[0]*x[1]*(1-mu)+ 400*x[0]*pow(x[1],6)+(5*pow(x[0],4)-5*pow(x[1],4))*60*x[0]*x[1]*x[1]","60*(pow(x[0],2)-pow(x[1],2))*(1-mu)+400*pow(x[0],4)*pow(x[1],3)-(5*pow(x[0],4)-5*pow(x[1],4))*20*x[1]*x[1]*x[1]"), mu = 1e0)
elif case == 2:
f = -Expression(("-1","-1"))
elif case == 3:
f = -Expression(("8*pi*pi*cos(2*pi*x[1])*sin(2*pi*x[0]) + 2*pi*cos(2*pi*x[0])*sin(2*pi*x[1])","2*pi*cos(2*pi*x[0])*sin(2*pi*x[1]) - 8*pi*pi*cos(2*pi*x[0])*sin(2*pi*x[1])"))
u_k = Function(V)
mu = Constant(1e0)
n = FacetNormal(mesh)
h = CellSize(mesh)
h_avg =avg(h)
d = 0
u_k = Function(V)
a11 = -mu*inner(grad(v), grad(u))*dx - inner(dolfin.dot(u_k,grad(u)),v)*dx
a12 = div(v)*p*dx
a21 = div(u)*q*dx
L1 = inner(v, f)*dx
a = a11+a12+a21
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-5 # tolerance
iter = 0 # iteration counter
maxiter = 100 # max no of iterations allowed
# i = p*q*dx
# AA = assemble(a11)
while eps > tol and iter < maxiter:
iter += 1
x = Function(W)
uu = Function(W)
tic()
AA, bb = assemble_system(a, L1, bcs)
print toc()
tic()
A_epetra = as_backend_type(AA).mat()
A_epetra =NullSpace(A_epetra,"A_epetra")
# As = AA.sparray()[0:-1,0:-1]
# print toc()
# tic()
# A = PETSc.Mat().createAIJ(size=As.shape,csr=(As.indptr, As.indices, As.data))
print toc()
pause
# PP, btmp = assemble_system(i+a11, L1, bcs)
DoF = V.dim() + Q.dim()
x_epetra = Epetra.Vector(0*bb.array())
A_epetra = as_backend_type(AA).mat()
# P_epetra = down_cast(PP).mat()
b_epetra = as_backend_type(bb).vec()
# x_epetra = down_cast(uu.vector()).vec()
A_epetra =NullSpace(A_epetra,"A_epetra")
# P_epetra =NullSpace(P_epetra,"P_epetra")
print toc()
bbb =bb.array()
Nb = bbb.shape
b =bbb[0:Nb[0]-1]
b_epetra = Epetra.Vector(b)
xxx = x.vector().array()
x =xxx[0:Nb[0]-1]
x_epetra = Epetra.Vector(x)
pause()
# mlList = {"max levels" : 200,
# "output" : 10,
# "smoother: type" : "symmetric Gauss-Seidel",
# "aggregation: type" : "Uncoupled"
# }
# prec = ML.MultiLevelPreconditioner(P_epetra, False)
# prec.SetParameterList(mlList)
# prec.ComputePreconditioner()
# solver = AztecOO.AztecOO(A_epetra, x_epetra, b_epetra)
# solver.SetPrecOperator(prec)
# solver.SetAztecOption(AztecOO.AZ_solver, AztecOO.AZ_gmres);
# solver.SetAztecOption(AztecOO.AZ_output, 100);
# err = solver.Iterate(20000, 1e-10)
tic()
problem = Epetra.LinearProblem(A_epetra,x_epetra,b_epetra)
print '\n\n\n\n\n\n'
factory = Amesos.Factory()
solver = factory.Create("Amesos_Umfpack", problem)
# solver = factory.Create("MUMPS", problem)
amesosList = {"PrintTiming" : True, "PrintStatus" : True }
solver.SetParameters(amesosList)
solver.SymbolicFactorization()
solver.NumericFactorization()
solver.Solve()
soln = problem.GetLHS()
print "||x_computed||_2 =", soln.Norm2()
# solver.PrintTiming()
print '\n\n\n\n\n\n'
uu = x_epetra[0:Vdim[xx-1][0]]
# time = time+toc()
u1 = Function(V)
u1.vector()[:] = u1.vector()[:] + uu.array
diff = u1.vector().array() - u_k.vector().array()
eps = np.linalg.norm(diff, ord=np.Inf)
print '\n\n\niter=%d: norm=%g' % (iter, eps)
# u_k.assign(uu) # update for next iteration
u_k.assign(u1)
#
if case == 1:
ue = Expression(("20*x[0]*pow(x[1],3)","5*pow(x[0],4)-5*pow(x[1],4)"))
pe = Expression("60*pow(x[0],2)*x[1]-20*pow(x[1],3)+5")
elif case == 2:
ue = Expression(("pow(x[1],2)-x[1]","pow(x[0],2)-x[0]"))
pe = Expression("x[1]+x[0]-1")
elif case == 3:
ue = Expression(("cos(2*pi*x[1])*sin(2*pi*x[0]) ","-cos(2*pi*x[0])*sin(2*pi*x[1]) "))
pe = Expression("sin(2*pi*x[0])*sin(2*pi*x[1]) ")
# pp = x_epetra[Vdim[xx-1][0]:]
# pa = Function(Q)
# pa1 = Function(Q)
# pa2 = Function(Q)
# pa1.vector()[:] = pp.array
# pa2.vector()[:] = 0*pp.array+1
# pa2.vector().array()
# pa.vector()[:] = pp.array + assemble(pa1*dx)/assemble(pa2*dx)
# uu = x_epetra[0:Vdim[xx-1][0]]
# ua = Function(V)
# ua.vector()[:] = uu.array
u = interpolate(ue,V)
p = interpolate(pe,Q)
Nv = u.vector().array().shape
x = x_epetra[0:Nv[0]]
ua = Function(V)
ua.vector()[:] = x.array
pp = x_epetra[Nv[0]:]
pp = pp.array
n = pp.shape
pp = np.insert(pp,n,0)
pa = Function(Q)
pa.vector()[:] = pp
pend = assemble(pa*dx)
ones = Function(Q)
ones.vector()[:]=(0*pp+1)
pp = Function(Q)
pp.vector()[:] = pa.vector().array()- assemble(pa*dx)/assemble(ones*dx)
pInterp = interpolate(pe,Q)
pe = Function(Q)
pe.vector()[:] = pInterp.vector().array()
const = - assemble(pe*dx)/assemble(ones*dx)
pe.vector()[:] = pe.vector()[:]+const
errL2u[xx-1] = errornorm(ue,ua,norm_type="L2", degree_rise=4,mesh=mesh)
errL2p[xx-1] = errornorm(pe,pp,norm_type="L2", degree_rise=4,mesh=mesh)
print errL2u[xx-1]
print errL2p[xx-1]
del solver
# scipy.io.savemat('Vdim.mat', {'VDoF':Vdim})
# scipy.io.savemat('DoF.mat', {'DoF':DoF})
plt.loglog(NN,errL2u)
plt.title('Error plot for CG2 elements - Velocity L2 convergence = %f' % np.log2(np.average((errL2u[0:m-2]/errL2u[1:m-1]))))
plt.xlabel('N')
plt.ylabel('L2 error')
plt.figure()
plt.loglog(NN,errL2p)
plt.title('Error plot for CG1 elements - Pressure L2 convergence = %f' % np.log2(np.average((errL2p[0:m-2]/errL2p[1:m-1]))))
plt.xlabel('N')
plt.ylabel('L2 error')
# plt.show()
print "Velocity Elements rate of convergence ", np.log2(np.average((errL2u[0:m-2]/errL2u[1:m-1])))
print "Pressure Elements rate of convergence ", np.log2(np.average((errL2p[0:m-2]/errL2p[1:m-1])))
print "\n\n"
import prettytable
table = prettytable.PrettyTable(["DoF","V-L2","P-L2"])
for x in xrange(1,m):
table.add_row([Wdim[x-1][0],errL2u[x-1][0],errL2p[x-1][0]])
print table
# plt.loglog(N,erru)
# plt.title('Error plot for P2 elements - convergence = %f' % np.log2(np.average((erru[0:m-2]/erru[1:m-1]))))
# plt.xlabel('N')
# plt.ylabel('L2 error')
# plt.figure()
# plt.loglog(N,errp)
# plt.title('Error plot for P1 elements - convergence = %f' % np.log2(np.average((errp[0:m-2]/errp[1:m-1]))))
# plt.xlabel('N')
# plt.ylabel('L2 error')
plot(ua)
# plot(interpolate(ue,V))
plot(pp)
# plot(interpolate(pe,Q))
interactive()
# plt.show()
|
en
| 0.291922
|
#!/usr/bin/python # import petsc4py # import sys # petsc4py.init(sys.argv) # from petsc4py import PETSc # from MatrixOperations import * # plt.spy(Aublas1) # if (Nb < 1000): # plt.show() #MO.SwapBackend('epetra') #os.system("echo $PATH") # Create mesh and define function space # nn = 32 # mesh = UnitSquareMesh(16,16) # mesh = UnitSquareMesh(nn, nn) # tic() # print 'time to create function spaces', toc(),'\n\n' # u0 = Expression(("sin(pi*x[1])","sin(pi*x[0])")) # u0 = Expression(("pow(x[1],2)-1","pow(x[0],2)-1")) # v, u = TestFunction(V), TrialFunction(V) # q, p = TestFunction(Q), TrialFunction(Q) # f = Expression(("-pi*pi*sin(pi*x[1])+2*x[1]","-pi*pi*sin(pi*x[0])")) # error measure ||u-u_k|| # tolerance # iteration counter # max no of iterations allowed # i = p*q*dx # AA = assemble(a11) # As = AA.sparray()[0:-1,0:-1] # print toc() # tic() # A = PETSc.Mat().createAIJ(size=As.shape,csr=(As.indptr, As.indices, As.data)) # PP, btmp = assemble_system(i+a11, L1, bcs) # P_epetra = down_cast(PP).mat() # x_epetra = down_cast(uu.vector()).vec() # P_epetra =NullSpace(P_epetra,"P_epetra") # mlList = {"max levels" : 200, # "output" : 10, # "smoother: type" : "symmetric Gauss-Seidel", # "aggregation: type" : "Uncoupled" # } # prec = ML.MultiLevelPreconditioner(P_epetra, False) # prec.SetParameterList(mlList) # prec.ComputePreconditioner() # solver = AztecOO.AztecOO(A_epetra, x_epetra, b_epetra) # solver.SetPrecOperator(prec) # solver.SetAztecOption(AztecOO.AZ_solver, AztecOO.AZ_gmres); # solver.SetAztecOption(AztecOO.AZ_output, 100); # err = solver.Iterate(20000, 1e-10) # solver = factory.Create("MUMPS", problem) # solver.PrintTiming() # time = time+toc() # u_k.assign(uu) # update for next iteration # # pp = x_epetra[Vdim[xx-1][0]:] # pa = Function(Q) # pa1 = Function(Q) # pa2 = Function(Q) # pa1.vector()[:] = pp.array # pa2.vector()[:] = 0*pp.array+1 # pa2.vector().array() # pa.vector()[:] = pp.array + assemble(pa1*dx)/assemble(pa2*dx) # uu = x_epetra[0:Vdim[xx-1][0]] # ua = Function(V) # ua.vector()[:] = uu.array # scipy.io.savemat('Vdim.mat', {'VDoF':Vdim}) # scipy.io.savemat('DoF.mat', {'DoF':DoF}) # plt.show() # plt.loglog(N,erru) # plt.title('Error plot for P2 elements - convergence = %f' % np.log2(np.average((erru[0:m-2]/erru[1:m-1])))) # plt.xlabel('N') # plt.ylabel('L2 error') # plt.figure() # plt.loglog(N,errp) # plt.title('Error plot for P1 elements - convergence = %f' % np.log2(np.average((errp[0:m-2]/errp[1:m-1])))) # plt.xlabel('N') # plt.ylabel('L2 error') # plot(interpolate(ue,V)) # plot(interpolate(pe,Q)) # plt.show()
| 2.296959
| 2
|
src/db_writer.py
|
DesignBuilderSoftware/db-temperature-distribution
| 0
|
6627536
|
"""Define functions to writer parsed tables to various formats."""
import csv
from pathlib import Path
from typing import List
from db_table import DbTable
class DbWriter:
"""Writes table data to filesystem."""
@classmethod
def write_table(cls, table: DbTable, directory: Path) -> Path:
"""
Write table to given directory, table name is used as file name.
Arguments
---------
table : DbTable
Parsed DbTable.
directory : Path
A path to directory to place output files.
Returns
-------
list of Path
Paths of all output files.
"""
filename = table.name + ".csv"
path = Path(directory, filename)
with open(path, mode="w", newline="", encoding="utf-8") as csv_file:
writer = csv.writer(csv_file)
writer.writerows(table.as_2d_table())
return path
@classmethod
def write_tables(cls, tables: List[DbTable], directory: Path) -> List[Path]:
"""
Write tables as spreadsheet files to a given directory.
Arguments
---------
tables : List of DbTable
Parsed DbTables.
directory : Path
A path to directory to place output files.
Returns
-------
list of Path
Paths of all output files.
"""
output_paths = []
for table in tables:
path = cls.write_table(table, directory)
output_paths.append(path)
return output_paths
|
"""Define functions to writer parsed tables to various formats."""
import csv
from pathlib import Path
from typing import List
from db_table import DbTable
class DbWriter:
"""Writes table data to filesystem."""
@classmethod
def write_table(cls, table: DbTable, directory: Path) -> Path:
"""
Write table to given directory, table name is used as file name.
Arguments
---------
table : DbTable
Parsed DbTable.
directory : Path
A path to directory to place output files.
Returns
-------
list of Path
Paths of all output files.
"""
filename = table.name + ".csv"
path = Path(directory, filename)
with open(path, mode="w", newline="", encoding="utf-8") as csv_file:
writer = csv.writer(csv_file)
writer.writerows(table.as_2d_table())
return path
@classmethod
def write_tables(cls, tables: List[DbTable], directory: Path) -> List[Path]:
"""
Write tables as spreadsheet files to a given directory.
Arguments
---------
tables : List of DbTable
Parsed DbTables.
directory : Path
A path to directory to place output files.
Returns
-------
list of Path
Paths of all output files.
"""
output_paths = []
for table in tables:
path = cls.write_table(table, directory)
output_paths.append(path)
return output_paths
|
en
| 0.633275
|
Define functions to writer parsed tables to various formats. Writes table data to filesystem. Write table to given directory, table name is used as file name. Arguments --------- table : DbTable Parsed DbTable. directory : Path A path to directory to place output files. Returns ------- list of Path Paths of all output files. Write tables as spreadsheet files to a given directory. Arguments --------- tables : List of DbTable Parsed DbTables. directory : Path A path to directory to place output files. Returns ------- list of Path Paths of all output files.
| 3.830534
| 4
|
onlinepayments/sdk/log/log_message.py
|
wl-online-payments-direct/sdk-python3
| 0
|
6627537
|
from abc import ABC, abstractmethod
from onlinepayments.sdk.log.logging_util import LoggingUtil
class LogMessage(ABC):
"""
A utility class to build log messages.
"""
__request_id = None
__headers = None
__body = None
__content_type = None
__header_list = None
def __init__(self, request_id):
if not request_id:
raise ValueError("request_id is required")
self.__request_id = request_id
self.__headers = ""
self.__header_list = []
@property
def request_id(self):
return self.__request_id
@property
def headers(self):
return str(self.__headers)
@property
def body(self):
return self.__body
@property
def content_type(self):
return self.__content_type
def add_header(self, name, value):
if self.__headers:
self.__headers += ", "
self.__headers += name + "=\""
if value is not None and value.lower() != 'none':
value = str(value)
obfuscated_value = LoggingUtil.obfuscate_header(name, value)
self.__headers += obfuscated_value
self.__header_list.append((name, "\"" + obfuscated_value + "\""))
else:
self.__header_list.append((name, "\"\""))
self.__headers += "\""
def set_body(self, body, content_type, charset=None):
self.__content_type = content_type
if charset is not None:
self.__body = LoggingUtil.obfuscate_body(body, charset)
else:
self.__body = LoggingUtil.obfuscate_body(body)
@staticmethod
def empty_if_none(value):
if value is not None:
return value
return ""
@abstractmethod
def get_message(self):
""""""
def get_header_list(self):
return self.__header_list
|
from abc import ABC, abstractmethod
from onlinepayments.sdk.log.logging_util import LoggingUtil
class LogMessage(ABC):
"""
A utility class to build log messages.
"""
__request_id = None
__headers = None
__body = None
__content_type = None
__header_list = None
def __init__(self, request_id):
if not request_id:
raise ValueError("request_id is required")
self.__request_id = request_id
self.__headers = ""
self.__header_list = []
@property
def request_id(self):
return self.__request_id
@property
def headers(self):
return str(self.__headers)
@property
def body(self):
return self.__body
@property
def content_type(self):
return self.__content_type
def add_header(self, name, value):
if self.__headers:
self.__headers += ", "
self.__headers += name + "=\""
if value is not None and value.lower() != 'none':
value = str(value)
obfuscated_value = LoggingUtil.obfuscate_header(name, value)
self.__headers += obfuscated_value
self.__header_list.append((name, "\"" + obfuscated_value + "\""))
else:
self.__header_list.append((name, "\"\""))
self.__headers += "\""
def set_body(self, body, content_type, charset=None):
self.__content_type = content_type
if charset is not None:
self.__body = LoggingUtil.obfuscate_body(body, charset)
else:
self.__body = LoggingUtil.obfuscate_body(body)
@staticmethod
def empty_if_none(value):
if value is not None:
return value
return ""
@abstractmethod
def get_message(self):
""""""
def get_header_list(self):
return self.__header_list
|
en
| 0.75611
|
A utility class to build log messages.
| 2.775149
| 3
|
test/main.py
|
populationgenomics/analysis-runner
| 0
|
6627538
|
#!/usr/bin/env python3
"""A very simple batch that tests basic functionality."""
import hail as hl
hl.init()
|
#!/usr/bin/env python3
"""A very simple batch that tests basic functionality."""
import hail as hl
hl.init()
|
en
| 0.806165
|
#!/usr/bin/env python3 A very simple batch that tests basic functionality.
| 1.247593
| 1
|
desafio037.py
|
mario-nobre/python-guanabara
| 0
|
6627539
|
<filename>desafio037.py<gh_stars>0
num=int(input('digite um número inteiro: '))
print('''colha uma das bases para conversão:
[1] converter para BINÁRIO
[2] converter para OCTAL
[3] converter para HEXADECIMAL''')
op = int(input('sua opção: '))
if op == 1:
print('{} convertido para BINÁRIO é igual a {}'.format(num, bin(num)[2:]))
elif op == 2:
print('{} convertido para OCTAL é igual a {}'.format(num, oct(num)[2:]))
elif op == 3:
print('{} convertido para HEXADECIMAL é igual a {}'.format(num, hex(num)[2:]))
else:
print('opção invalida. tente novamente.')
|
<filename>desafio037.py<gh_stars>0
num=int(input('digite um número inteiro: '))
print('''colha uma das bases para conversão:
[1] converter para BINÁRIO
[2] converter para OCTAL
[3] converter para HEXADECIMAL''')
op = int(input('sua opção: '))
if op == 1:
print('{} convertido para BINÁRIO é igual a {}'.format(num, bin(num)[2:]))
elif op == 2:
print('{} convertido para OCTAL é igual a {}'.format(num, oct(num)[2:]))
elif op == 3:
print('{} convertido para HEXADECIMAL é igual a {}'.format(num, hex(num)[2:]))
else:
print('opção invalida. tente novamente.')
|
pt
| 0.984661
|
colha uma das bases para conversão: [1] converter para BINÁRIO [2] converter para OCTAL [3] converter para HEXADECIMAL
| 4.103929
| 4
|
src/morphodict/lexicon/migrations/0004_add_more_defn_types.py
|
sarahrmoeller/morphodict
| 8
|
6627540
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("lexicon", "0003_populate_fst_lemma"),
]
operations = [
migrations.AddField(
model_name="definition",
name="raw_core_definition",
field=models.CharField(
help_text="\n The definition to optionally use for auto-translation.\n\n It should include only the core sense of the wordform without any\n notes or cross-references.\n ",
max_length=200,
null=True,
),
),
migrations.AddField(
model_name="definition",
name="raw_semantic_definition",
field=models.CharField(
help_text="\n The definition to optionally use when building a semantic vector.\n\n This is not visible to the user. It may include etymological terms,\n and may omit stopwords.\n\n Even though it is only used at import time, it is stored in the\n database to enable the possibility of regenerating definition\n vectors without the original importjson file.\n ",
max_length=200,
null=True,
),
),
migrations.AlterField(
model_name="definition",
name="text",
field=models.CharField(
help_text="\n The definition text. This is displayed to the user, and terms within\n it are indexed for full-text search.\n ",
max_length=200,
),
),
]
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("lexicon", "0003_populate_fst_lemma"),
]
operations = [
migrations.AddField(
model_name="definition",
name="raw_core_definition",
field=models.CharField(
help_text="\n The definition to optionally use for auto-translation.\n\n It should include only the core sense of the wordform without any\n notes or cross-references.\n ",
max_length=200,
null=True,
),
),
migrations.AddField(
model_name="definition",
name="raw_semantic_definition",
field=models.CharField(
help_text="\n The definition to optionally use when building a semantic vector.\n\n This is not visible to the user. It may include etymological terms,\n and may omit stopwords.\n\n Even though it is only used at import time, it is stored in the\n database to enable the possibility of regenerating definition\n vectors without the original importjson file.\n ",
max_length=200,
null=True,
),
),
migrations.AlterField(
model_name="definition",
name="text",
field=models.CharField(
help_text="\n The definition text. This is displayed to the user, and terms within\n it are indexed for full-text search.\n ",
max_length=200,
),
),
]
|
none
| 1
| 2.122939
| 2
|
|
PythonScript/PythonIntermedio/cap_4/decoradorPython.py
|
FranklinA/CoursesAndSelfStudy
| 0
|
6627541
|
#
def primerD(funcion):
def funcionDecorada(*args,**kkwars):#para recibir parametros
print("Primer decorador")
return funcionDecorada
@primerD # debde coincidir con una funcion existente en este caso primerD para usarla en la funcion funcion()
def funcion():
print('Mi primer decorador')
funcion()
|
#
def primerD(funcion):
def funcionDecorada(*args,**kkwars):#para recibir parametros
print("Primer decorador")
return funcionDecorada
@primerD # debde coincidir con una funcion existente en este caso primerD para usarla en la funcion funcion()
def funcion():
print('Mi primer decorador')
funcion()
|
es
| 0.876283
|
# #para recibir parametros # debde coincidir con una funcion existente en este caso primerD para usarla en la funcion funcion()
| 3.046314
| 3
|
app/scrape/scrape.py
|
luiscape/hdxscraper-opennepal
| 0
|
6627542
|
<reponame>luiscape/hdxscraper-opennepal
#!/usr/bin/python
# -*- coding: utf-8 -*-
import urlparse
import requests
from bs4 import BeautifulSoup
def ScrapeURLs(page, filters=True, verbose=False):
'''Scrapes the OpenNepal website for dataset URLs.'''
if verbose:
print 'Scraping the OpenNepal page: %s' % page
#
# Assemble URL.
#
u = 'http://data.opennepal.net/datasets?page=%s' % page
if filters is True:
u += 'field_dataset_sector_tid[0]=107&field_dataset_sector_tid[1]=7&field_dataset_sector_tid[2]=112&field_dataset_sector_tid[3]=146&field_dataset_sector_tid[4]=144&field_dataset_sector_tid[5]=147&field_dataset_sector_tid[6]=148&field_dataset_sector_tid[7]=100&field_dataset_sector_tid[8]=183&field_dataset_sector_tid[9]=217'
try:
#
# Download data from OpenNepal's website.
#
r = requests.get(u)
#
# Find data with BeautifulSoup.
#
soup = BeautifulSoup(r.content, 'html.parser')
table = soup.findAll('table')
keys = ['url']
out = []
i = 0
for row in table[0].findAll('tr'):
if i == 0:
i += 1
continue
else:
#
# Finds href.
#
url = [ 'http://data.opennepal.net' + row.findAll('a', href=True)[0]['href'] ]
out.append(dict(zip(keys, url)))
i += 1
return out
except Exception as e:
print 'Failed to scrape data from OpenNepal website'
print e
return False
def ScrapeContent(url, verbose=False):
'''Scraping content from each dataset.'''
if verbose:
print 'Scraping the OpenNepal dataset: %s' % url
#
# Download data from OpenNepal's website.
#
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
#
# Title.
#
title = soup.select('#page-title')[0].text
#
# Tags.
#
tags = []
region = soup.select('.views-field-field-dataset-keywords')
for tag in region[0].find_all('a'):
tags.append(tag.text)
#
# License and date.
#
license_selector = soup.select('.views-field-field-dataset-license')[0].select('.field-content')
license = license_selector[0].text
date = soup.select('.date-display-single')[0].text
#
# Description.
#
description = soup.select('.views-field-body')[0].find_all('p')[0].text
#
# Resource name, link, and type.
#
resource = {
'url': None,
'name': None,
'type': None
}
resource['url'] = soup.select('.views-field-nothing')[0].findAll('a', href=True)[0]['href']
resource['name'] = resource['url'].split('/')[-1].split('&')[0].split('?')[0]
resource['type'] = resource['name'].split('.')[1].upper()
out = {
'title': title,
'license': license,
'tags': tags[0].replace('/', '-'),
'date': date,
'description': description,
'resource_url': resource['url'],
'resource_name': resource['name'],
'resource_type': resource['type']
}
return out
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import urlparse
import requests
from bs4 import BeautifulSoup
def ScrapeURLs(page, filters=True, verbose=False):
'''Scrapes the OpenNepal website for dataset URLs.'''
if verbose:
print 'Scraping the OpenNepal page: %s' % page
#
# Assemble URL.
#
u = 'http://data.opennepal.net/datasets?page=%s' % page
if filters is True:
u += 'field_dataset_sector_tid[0]=107&field_dataset_sector_tid[1]=7&field_dataset_sector_tid[2]=112&field_dataset_sector_tid[3]=146&field_dataset_sector_tid[4]=144&field_dataset_sector_tid[5]=147&field_dataset_sector_tid[6]=148&field_dataset_sector_tid[7]=100&field_dataset_sector_tid[8]=183&field_dataset_sector_tid[9]=217'
try:
#
# Download data from OpenNepal's website.
#
r = requests.get(u)
#
# Find data with BeautifulSoup.
#
soup = BeautifulSoup(r.content, 'html.parser')
table = soup.findAll('table')
keys = ['url']
out = []
i = 0
for row in table[0].findAll('tr'):
if i == 0:
i += 1
continue
else:
#
# Finds href.
#
url = [ 'http://data.opennepal.net' + row.findAll('a', href=True)[0]['href'] ]
out.append(dict(zip(keys, url)))
i += 1
return out
except Exception as e:
print 'Failed to scrape data from OpenNepal website'
print e
return False
def ScrapeContent(url, verbose=False):
'''Scraping content from each dataset.'''
if verbose:
print 'Scraping the OpenNepal dataset: %s' % url
#
# Download data from OpenNepal's website.
#
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
#
# Title.
#
title = soup.select('#page-title')[0].text
#
# Tags.
#
tags = []
region = soup.select('.views-field-field-dataset-keywords')
for tag in region[0].find_all('a'):
tags.append(tag.text)
#
# License and date.
#
license_selector = soup.select('.views-field-field-dataset-license')[0].select('.field-content')
license = license_selector[0].text
date = soup.select('.date-display-single')[0].text
#
# Description.
#
description = soup.select('.views-field-body')[0].find_all('p')[0].text
#
# Resource name, link, and type.
#
resource = {
'url': None,
'name': None,
'type': None
}
resource['url'] = soup.select('.views-field-nothing')[0].findAll('a', href=True)[0]['href']
resource['name'] = resource['url'].split('/')[-1].split('&')[0].split('?')[0]
resource['type'] = resource['name'].split('.')[1].upper()
out = {
'title': title,
'license': license,
'tags': tags[0].replace('/', '-'),
'date': date,
'description': description,
'resource_url': resource['url'],
'resource_name': resource['name'],
'resource_type': resource['type']
}
return out
|
en
| 0.686056
|
#!/usr/bin/python # -*- coding: utf-8 -*- Scrapes the OpenNepal website for dataset URLs. # # Assemble URL. # # # Download data from OpenNepal's website. # # # Find data with BeautifulSoup. # # # Finds href. # Scraping content from each dataset. # # Download data from OpenNepal's website. # # # Title. # # # Tags. # # # License and date. # # # Description. # # # Resource name, link, and type. #
| 3.090371
| 3
|
Frameworks/Examples/SVM_IRIS.py
|
CrispyKernel/GetTuned
| 0
|
6627543
|
<reponame>CrispyKernel/GetTuned<filename>Frameworks/Examples/SVM_IRIS.py
"""
@Description: We will evaluate the performance of "Gaussian Process with EI acquisition function" HPO method
implemented in a simple context with a fixed total budget of (100x200x4 = 80000 epochs),
a max budget per config of 800 epochs and a number of four cross validation per config tested.
For a better understanding, the budget will allow our model to evaluate 100 different
configurations because it is a non-bandit optimization method.
Now, considering breast cancer wisconsin data set classification problem (569 instances,
30 attributes, 2 classes), will we initialize a Sklearn SVM with default parameter (rbf kernel)
and try to find the best values for C (Penalty of the error term) and gamma (kernel coefficient)
"""
## MODULES IMPORTATION SECTION ##---------------------------------------------//
# Importation of helpful generic modules
import sys
import os
from numpy import linspace
# Importation of available model based on existing sklearn models
from GetTuned import MLP, SVM
# Importation of the HPtuner modules needed for the HPO process
from GetTuned import HPtuner
# Importation of domain object needed to define each hyperparameter's search space
from GetTuned import DiscreteDomain, ContinuousDomain
# Importation of helpful available functions from DataManager module
# to generate data to practice hyperparameter tuning with GetTuned
from GetTuned import DataGenerator, load_digits_dataset, load_breast_cancer_dataset, \
load_forest_covertypes_dataset, load_forest_covertypes_dataset, load_iris_dataset, plot_data
## RESULT SAVING SETTINGS ##--------------------------------------------------//
save_enabled = True
display_results = True
saving_path = os.path.join(os.path.dirname(os.getcwd()), 'Results')
experiment_title = '' # (**Optional** -> Will be used as a directory within Results/)
name_of_dataset = '' # (**Optional** -> Will only be written in summary.txt)
## BUDGET SETTINGS ##---------------------------------------------------------//
# Total number of epochs that we're allowed to execute during the HPO process
total_epochs_budget = 80000
# Total number of epochs that we're allowed to execute
# while testing a single configuration of hyperparameters
max_budget_per_config = 800
# Number of cross validation that we want to execute to compute the
# score of a single configuration of hyperparameters
nb_cross_validation = 4
# Percentage of training data to take for validation
# when computing the score of a single configuration of hyperparameters
valid_size = 0.20
## TUNING METHOD CHOICE ##----------------------------------------------------//
"""
List of the current available methods :
['grid_search', 'random_search', 'gaussian_process', 'tpe', 'annealing', 'hyperband', 'BOHB']
"""
method = 'gaussian_process'
## **OPTIONAL PARAMETER FOR GAUSSIAN PROCESS METHOD** ##----------------------//
variant = 'GP' # One among these : ['GP', 'GP_MCMC']
acquisition_function = 'EI' # One among these : ['EI', 'MPI']
nb_inital_evaluation_points = 10 # Number of points to evaluate before the beginning of the optimization
## LETS GET TUNED! ## --------------------------------------------------------//
# We generate data for our tests and global variables for all tests
x_train, t_train, x_test, t_test = load_breast_cancer_dataset()
# We initialize our model (See Model.py for more informations on hyperparameter options)
model = SVM(kernel='rbf')
# We define our search space dictionary
search_space = {'C': ContinuousDomain(-8, 0, log_scaled=True), # From 10^-8 to 10^0
'gamma': ContinuousDomain(-8, 0, log_scaled=True)}
# We initialize our HPtuner and set the hyperparameters search space
hp_tuner = HPtuner(model=model, method=method, total_budget=total_epochs_budget,
max_budget_per_config=max_budget_per_config, test_default_hyperparam=False)
hp_tuner.set_search_space(search_space)
# We execute the hyperparameter optimization (the tuning!)
tuning_analyst = hp_tuner.tune(X=x_train, t=t_train, nb_cross_validation=nb_cross_validation, valid_size=valid_size,
nbr_initial_evals=nb_inital_evaluation_points, method_type=variant, acquisition_function=acquisition_function)
final_accuracy_score = model.score(X=x_test, t=t_test)
if save_enabled:
tuning_analyst.save_all_results(path=saving_path, experiment_title=experiment_title, dataset_name=name_of_dataset,
training_size=len(x_train), test_accuracy=final_accuracy_score)
print("\n\n*******************************")
print("\nTuning completed!!", "\n\n")
if display_results:
print("Best hyper-parameters found : %s \n" % str(tuning_analyst.best_hyperparameters))
print("Test accuracy : %g \n\n" % final_accuracy_score)
print("See tuning_summary.txt for more details \n")
tuning_analyst.plot_accuracy_history(best_accuracy=False)
tuning_analyst.plot_accuracy_history(best_accuracy=True)
print("*******************************\n\n")
|
"""
@Description: We will evaluate the performance of "Gaussian Process with EI acquisition function" HPO method
implemented in a simple context with a fixed total budget of (100x200x4 = 80000 epochs),
a max budget per config of 800 epochs and a number of four cross validation per config tested.
For a better understanding, the budget will allow our model to evaluate 100 different
configurations because it is a non-bandit optimization method.
Now, considering breast cancer wisconsin data set classification problem (569 instances,
30 attributes, 2 classes), will we initialize a Sklearn SVM with default parameter (rbf kernel)
and try to find the best values for C (Penalty of the error term) and gamma (kernel coefficient)
"""
## MODULES IMPORTATION SECTION ##---------------------------------------------//
# Importation of helpful generic modules
import sys
import os
from numpy import linspace
# Importation of available model based on existing sklearn models
from GetTuned import MLP, SVM
# Importation of the HPtuner modules needed for the HPO process
from GetTuned import HPtuner
# Importation of domain object needed to define each hyperparameter's search space
from GetTuned import DiscreteDomain, ContinuousDomain
# Importation of helpful available functions from DataManager module
# to generate data to practice hyperparameter tuning with GetTuned
from GetTuned import DataGenerator, load_digits_dataset, load_breast_cancer_dataset, \
load_forest_covertypes_dataset, load_forest_covertypes_dataset, load_iris_dataset, plot_data
## RESULT SAVING SETTINGS ##--------------------------------------------------//
save_enabled = True
display_results = True
saving_path = os.path.join(os.path.dirname(os.getcwd()), 'Results')
experiment_title = '' # (**Optional** -> Will be used as a directory within Results/)
name_of_dataset = '' # (**Optional** -> Will only be written in summary.txt)
## BUDGET SETTINGS ##---------------------------------------------------------//
# Total number of epochs that we're allowed to execute during the HPO process
total_epochs_budget = 80000
# Total number of epochs that we're allowed to execute
# while testing a single configuration of hyperparameters
max_budget_per_config = 800
# Number of cross validation that we want to execute to compute the
# score of a single configuration of hyperparameters
nb_cross_validation = 4
# Percentage of training data to take for validation
# when computing the score of a single configuration of hyperparameters
valid_size = 0.20
## TUNING METHOD CHOICE ##----------------------------------------------------//
"""
List of the current available methods :
['grid_search', 'random_search', 'gaussian_process', 'tpe', 'annealing', 'hyperband', 'BOHB']
"""
method = 'gaussian_process'
## **OPTIONAL PARAMETER FOR GAUSSIAN PROCESS METHOD** ##----------------------//
variant = 'GP' # One among these : ['GP', 'GP_MCMC']
acquisition_function = 'EI' # One among these : ['EI', 'MPI']
nb_inital_evaluation_points = 10 # Number of points to evaluate before the beginning of the optimization
## LETS GET TUNED! ## --------------------------------------------------------//
# We generate data for our tests and global variables for all tests
x_train, t_train, x_test, t_test = load_breast_cancer_dataset()
# We initialize our model (See Model.py for more informations on hyperparameter options)
model = SVM(kernel='rbf')
# We define our search space dictionary
search_space = {'C': ContinuousDomain(-8, 0, log_scaled=True), # From 10^-8 to 10^0
'gamma': ContinuousDomain(-8, 0, log_scaled=True)}
# We initialize our HPtuner and set the hyperparameters search space
hp_tuner = HPtuner(model=model, method=method, total_budget=total_epochs_budget,
max_budget_per_config=max_budget_per_config, test_default_hyperparam=False)
hp_tuner.set_search_space(search_space)
# We execute the hyperparameter optimization (the tuning!)
tuning_analyst = hp_tuner.tune(X=x_train, t=t_train, nb_cross_validation=nb_cross_validation, valid_size=valid_size,
nbr_initial_evals=nb_inital_evaluation_points, method_type=variant, acquisition_function=acquisition_function)
final_accuracy_score = model.score(X=x_test, t=t_test)
if save_enabled:
tuning_analyst.save_all_results(path=saving_path, experiment_title=experiment_title, dataset_name=name_of_dataset,
training_size=len(x_train), test_accuracy=final_accuracy_score)
print("\n\n*******************************")
print("\nTuning completed!!", "\n\n")
if display_results:
print("Best hyper-parameters found : %s \n" % str(tuning_analyst.best_hyperparameters))
print("Test accuracy : %g \n\n" % final_accuracy_score)
print("See tuning_summary.txt for more details \n")
tuning_analyst.plot_accuracy_history(best_accuracy=False)
tuning_analyst.plot_accuracy_history(best_accuracy=True)
print("*******************************\n\n")
|
en
| 0.660361
|
@Description: We will evaluate the performance of "Gaussian Process with EI acquisition function" HPO method implemented in a simple context with a fixed total budget of (100x200x4 = 80000 epochs), a max budget per config of 800 epochs and a number of four cross validation per config tested. For a better understanding, the budget will allow our model to evaluate 100 different configurations because it is a non-bandit optimization method. Now, considering breast cancer wisconsin data set classification problem (569 instances, 30 attributes, 2 classes), will we initialize a Sklearn SVM with default parameter (rbf kernel) and try to find the best values for C (Penalty of the error term) and gamma (kernel coefficient) ## MODULES IMPORTATION SECTION ##---------------------------------------------// # Importation of helpful generic modules # Importation of available model based on existing sklearn models # Importation of the HPtuner modules needed for the HPO process # Importation of domain object needed to define each hyperparameter's search space # Importation of helpful available functions from DataManager module # to generate data to practice hyperparameter tuning with GetTuned ## RESULT SAVING SETTINGS ##--------------------------------------------------// # (**Optional** -> Will be used as a directory within Results/) # (**Optional** -> Will only be written in summary.txt) ## BUDGET SETTINGS ##---------------------------------------------------------// # Total number of epochs that we're allowed to execute during the HPO process # Total number of epochs that we're allowed to execute # while testing a single configuration of hyperparameters # Number of cross validation that we want to execute to compute the # score of a single configuration of hyperparameters # Percentage of training data to take for validation # when computing the score of a single configuration of hyperparameters ## TUNING METHOD CHOICE ##----------------------------------------------------// List of the current available methods : ['grid_search', 'random_search', 'gaussian_process', 'tpe', 'annealing', 'hyperband', 'BOHB'] ## **OPTIONAL PARAMETER FOR GAUSSIAN PROCESS METHOD** ##----------------------// # One among these : ['GP', 'GP_MCMC'] # One among these : ['EI', 'MPI'] # Number of points to evaluate before the beginning of the optimization ## LETS GET TUNED! ## --------------------------------------------------------// # We generate data for our tests and global variables for all tests # We initialize our model (See Model.py for more informations on hyperparameter options) # We define our search space dictionary # From 10^-8 to 10^0 # We initialize our HPtuner and set the hyperparameters search space # We execute the hyperparameter optimization (the tuning!)
| 2.387496
| 2
|
cyder/core/system/forms.py
|
jwasinger/cyder
| 0
|
6627544
|
<reponame>jwasinger/cyder<gh_stars>0
from django import forms
from cyder.base.eav.forms import get_eav_form
from cyder.base.mixins import UsabilityFormMixin
from cyder.core.system.models import System, SystemAV
class SystemForm(forms.ModelForm):
class Meta:
model = System
class ExtendedSystemForm(forms.ModelForm, UsabilityFormMixin):
interface_type = forms.ChoiceField(
widget=forms.RadioSelect, choices=(
('static_interface', 'Static Interface'),
('dynamic_interface', 'Dynamic Interface')))
class Meta:
model = System
SystemAVForm = get_eav_form(SystemAV, System)
|
from django import forms
from cyder.base.eav.forms import get_eav_form
from cyder.base.mixins import UsabilityFormMixin
from cyder.core.system.models import System, SystemAV
class SystemForm(forms.ModelForm):
class Meta:
model = System
class ExtendedSystemForm(forms.ModelForm, UsabilityFormMixin):
interface_type = forms.ChoiceField(
widget=forms.RadioSelect, choices=(
('static_interface', 'Static Interface'),
('dynamic_interface', 'Dynamic Interface')))
class Meta:
model = System
SystemAVForm = get_eav_form(SystemAV, System)
|
none
| 1
| 2.042267
| 2
|
|
tests/test_process_.py
|
QSD-Group/QSDsan
| 23
|
6627545
|
# -*- coding: utf-8 -*-
'''
QSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems
This module is developed by:
<NAME> <<EMAIL>>
This module is under the University of Illinois/NCSA Open Source License.
Please refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt
for license details.
'''
__all__ = ('test_process',)
def test_process():
import pytest
import os
from sympy import symbols, Eq
from sympy.parsing.sympy_parser import parse_expr
from math import isclose
from qsdsan import set_thermo, Components, Process, Processes, CompiledProcesses, _pk
import qsdsan.processes as pc
cmps = Components.load_default()
S_A = cmps.S_Ac.copy('S_A')
S_ALK = cmps.S_CO3.copy('S_ALK') # measured as g C
S_F = cmps.S_F.copy('S_F')
S_I = cmps.S_U_E.copy('S_I')
S_N2 = cmps.S_N2.copy('S_N2')
S_NH4 = cmps.S_NH4.copy('S_NH4')
S_NO3 = cmps.S_NO3.copy('S_NO3')
S_O2 = cmps.S_O2.copy('S_O2')
S_PO4 = cmps.S_PO4.copy('S_PO4')
X_AUT = cmps.X_AOO.copy('X_AUT')
X_H = cmps.X_OHO.copy('X_H')
X_I = cmps.X_U_OHO_E.copy('X_I')
X_MeOH = cmps.X_FeOH.copy('X_MeOH')
X_MeP = cmps.X_FePO4.copy('X_MeP')
X_PAO = cmps.X_PAO.copy('X_PAO')
X_PHA = cmps.X_PAO_PHA.copy('X_PHA')
X_PP = cmps.X_PAO_PP_Lo.copy('X_PP')
X_S = cmps.X_B_Subst.copy('X_S')
S_I.i_N = 0.01
S_F.i_N = 0.03
X_I.i_N = 0.02
X_S.i_N = 0.04
X_H.i_N = X_PAO.i_N = X_AUT.i_N = 0.07
S_I.i_P = 0.00
S_F.i_P = 0.01
X_I.i_P = 0.01
X_S.i_P = 0.01
X_H.i_P = X_PAO.i_P = X_AUT.i_P = 0.02
cmps_asm2d = Components([S_O2, S_F, S_A, S_NH4, S_NO3, S_PO4, S_I, S_ALK, S_N2,
X_I, X_S, X_H, X_PAO, X_PP, X_PHA, X_AUT, X_MeOH, X_MeP])
cmps_asm2d.compile()
set_thermo(cmps_asm2d)
p1 = Process('aero_hydrolysis',
'X_S -> [1-f_SI]S_F + [f_SI]S_I + [?]S_NH4 + [?]S_PO4 + [?]S_ALK',
ref_component='X_S',
rate_equation='K_h * S_O2/(K_O2+S_O2) * X_S/(K_X*X_H+X_S) * X_H',
parameters=('f_SI', 'K_h', 'K_O2', 'K_X'))
f_SI = symbols('f_SI')
assert abs(sum(p1._stoichiometry * p1._components.i_N).subs({'f_SI':1})) < 1e-8
assert abs(sum(p1._stoichiometry * p1._components.i_N).subs({'f_SI':0})) < 1e-8
assert abs(sum(p1._stoichiometry * p1._components.i_P).subs({'f_SI':1})) < 1e-8
assert abs(sum(p1._stoichiometry * p1._components.i_charge).subs({'f_SI':1})) < 1e-8
p1.set_parameters(f_SI = 0.0)
assert p1.parameters['f_SI'] == 0.0
assert Eq(p1._stoichiometry[p1._components._index['S_I']], parse_expr('1*f_SI'))
p12 = Process('anox_storage_PP',
'S_PO4 + [Y_PHA]X_PHA + [?]S_NO3 -> X_PP + [?]S_N2 + [?]S_NH4 + [?]S_ALK',
ref_component='X_PP',
rate_equation='q_PP * S_O2/(K_O2+S_O2) * S_PO4/(K_PS+S_PO4) * S_ALK/(K_ALK+S_ALK) * (X_PHA/X_PAO)/(K_PHA+X_PHA/X_PAO) * (K_MAX-X_PP/X_PAO)/(K_PP+K_MAX-X_PP/X_PAO) * X_PAO * eta_NO3 * K_O2/S_O2 * S_NO3/(K_NO3+S_NO3)',
parameters=('Y_PHA', 'q_PP', 'K_O2', 'K_PS', 'K_ALK', 'K_PHA', 'eta_NO3', 'K_PP', 'K_NO3'),
conserved_for=('COD', 'N', 'P', 'NOD', 'charge'))
p14 = Process('PAO_anox_growth',
'[1/Y_H]X_PHA + [?]S_NO3 + [?]S_PO4 -> X_PAO + [?]S_N2 + [?]S_NH4 + [?]S_ALK',
ref_component='X_PAO',
rate_equation='mu_PAO * S_O2/(K_O2 + S_O2) * S_NH4/(K_NH4 + S_NH4) * S_PO4/(K_P + S_PO4) * S_CO3/(K_ALK + S_ALK) * (X_PHA/X_PAO)/(K_PHA + X_PHA/X_PAO) * X_PAO * eta_NO3 * K_O2/S_O2 * S_NO3/(K_NO3 + S_NO3)',
parameters=('Y_H', 'mu_PAO', 'K_O2', 'K_NH4', 'K_P', 'K_ALK', 'K_PHA', 'eta_NO3', 'K_NO3'),
conserved_for=('COD', 'N', 'P', 'NOD', 'charge'))
PAO_anox_processes = Processes([p12, p14])
assert PAO_anox_processes.PAO_anox_growth.ref_component == X_PAO
with pytest.raises(AttributeError):
print(PAO_anox_processes.production_rates)
params = ('f_SI', 'Y_H', 'f_XI', 'Y_PO4', 'Y_PHA', 'Y_A',
'K_h', 'eta_NO3', 'eta_fe', 'K_O2', 'K_NO3', 'K_X',
'mu_H', 'q_fe', 'eta_NO3_deni', 'b_H', 'K_F', 'K_fe', 'K_A',
'K_NH4', 'K_P', 'K_ALK', 'q_PHA', 'q_PP', 'mu_PAO', 'b_PAO',
'b_PP', 'b_PHA', 'K_PS', 'K_PP', 'K_MAX', 'K_IPP', 'K_PHA',
'mu_AUT', 'b_AUT', 'K_O2_AUT', 'K_NH4_AUT', 'K_ALK_2',
'k_PRE', 'k_RED')
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ASM2d_original.tsv')
asm2d = Processes.load_from_file(path,
conserved_for=('COD', 'N', 'P', 'charge'),
parameters=params,
compile=False)
asm2d.extend(PAO_anox_processes)
asm2d.compile()
assert isinstance(asm2d, CompiledProcesses)
assert p12 in asm2d
assert set(asm2d.parameters.keys()) == set(params)
# Try re-pickling if the tests are run locally and
# the environment supports Pickle Protocol 5
pickle = True if _pk else False
try: pc.load_asm1_cmps()
except:
pc._asm1._create_asm1_cmps(pickle=pickle)
try: pc.load_asm2d_cmps()
except:
pc._asm2d._create_asm2d_cmps(pickle=pickle)
pc.load_asm2d_cmps()
if __name__ == '__main__':
test_process()
|
# -*- coding: utf-8 -*-
'''
QSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems
This module is developed by:
<NAME> <<EMAIL>>
This module is under the University of Illinois/NCSA Open Source License.
Please refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt
for license details.
'''
__all__ = ('test_process',)
def test_process():
import pytest
import os
from sympy import symbols, Eq
from sympy.parsing.sympy_parser import parse_expr
from math import isclose
from qsdsan import set_thermo, Components, Process, Processes, CompiledProcesses, _pk
import qsdsan.processes as pc
cmps = Components.load_default()
S_A = cmps.S_Ac.copy('S_A')
S_ALK = cmps.S_CO3.copy('S_ALK') # measured as g C
S_F = cmps.S_F.copy('S_F')
S_I = cmps.S_U_E.copy('S_I')
S_N2 = cmps.S_N2.copy('S_N2')
S_NH4 = cmps.S_NH4.copy('S_NH4')
S_NO3 = cmps.S_NO3.copy('S_NO3')
S_O2 = cmps.S_O2.copy('S_O2')
S_PO4 = cmps.S_PO4.copy('S_PO4')
X_AUT = cmps.X_AOO.copy('X_AUT')
X_H = cmps.X_OHO.copy('X_H')
X_I = cmps.X_U_OHO_E.copy('X_I')
X_MeOH = cmps.X_FeOH.copy('X_MeOH')
X_MeP = cmps.X_FePO4.copy('X_MeP')
X_PAO = cmps.X_PAO.copy('X_PAO')
X_PHA = cmps.X_PAO_PHA.copy('X_PHA')
X_PP = cmps.X_PAO_PP_Lo.copy('X_PP')
X_S = cmps.X_B_Subst.copy('X_S')
S_I.i_N = 0.01
S_F.i_N = 0.03
X_I.i_N = 0.02
X_S.i_N = 0.04
X_H.i_N = X_PAO.i_N = X_AUT.i_N = 0.07
S_I.i_P = 0.00
S_F.i_P = 0.01
X_I.i_P = 0.01
X_S.i_P = 0.01
X_H.i_P = X_PAO.i_P = X_AUT.i_P = 0.02
cmps_asm2d = Components([S_O2, S_F, S_A, S_NH4, S_NO3, S_PO4, S_I, S_ALK, S_N2,
X_I, X_S, X_H, X_PAO, X_PP, X_PHA, X_AUT, X_MeOH, X_MeP])
cmps_asm2d.compile()
set_thermo(cmps_asm2d)
p1 = Process('aero_hydrolysis',
'X_S -> [1-f_SI]S_F + [f_SI]S_I + [?]S_NH4 + [?]S_PO4 + [?]S_ALK',
ref_component='X_S',
rate_equation='K_h * S_O2/(K_O2+S_O2) * X_S/(K_X*X_H+X_S) * X_H',
parameters=('f_SI', 'K_h', 'K_O2', 'K_X'))
f_SI = symbols('f_SI')
assert abs(sum(p1._stoichiometry * p1._components.i_N).subs({'f_SI':1})) < 1e-8
assert abs(sum(p1._stoichiometry * p1._components.i_N).subs({'f_SI':0})) < 1e-8
assert abs(sum(p1._stoichiometry * p1._components.i_P).subs({'f_SI':1})) < 1e-8
assert abs(sum(p1._stoichiometry * p1._components.i_charge).subs({'f_SI':1})) < 1e-8
p1.set_parameters(f_SI = 0.0)
assert p1.parameters['f_SI'] == 0.0
assert Eq(p1._stoichiometry[p1._components._index['S_I']], parse_expr('1*f_SI'))
p12 = Process('anox_storage_PP',
'S_PO4 + [Y_PHA]X_PHA + [?]S_NO3 -> X_PP + [?]S_N2 + [?]S_NH4 + [?]S_ALK',
ref_component='X_PP',
rate_equation='q_PP * S_O2/(K_O2+S_O2) * S_PO4/(K_PS+S_PO4) * S_ALK/(K_ALK+S_ALK) * (X_PHA/X_PAO)/(K_PHA+X_PHA/X_PAO) * (K_MAX-X_PP/X_PAO)/(K_PP+K_MAX-X_PP/X_PAO) * X_PAO * eta_NO3 * K_O2/S_O2 * S_NO3/(K_NO3+S_NO3)',
parameters=('Y_PHA', 'q_PP', 'K_O2', 'K_PS', 'K_ALK', 'K_PHA', 'eta_NO3', 'K_PP', 'K_NO3'),
conserved_for=('COD', 'N', 'P', 'NOD', 'charge'))
p14 = Process('PAO_anox_growth',
'[1/Y_H]X_PHA + [?]S_NO3 + [?]S_PO4 -> X_PAO + [?]S_N2 + [?]S_NH4 + [?]S_ALK',
ref_component='X_PAO',
rate_equation='mu_PAO * S_O2/(K_O2 + S_O2) * S_NH4/(K_NH4 + S_NH4) * S_PO4/(K_P + S_PO4) * S_CO3/(K_ALK + S_ALK) * (X_PHA/X_PAO)/(K_PHA + X_PHA/X_PAO) * X_PAO * eta_NO3 * K_O2/S_O2 * S_NO3/(K_NO3 + S_NO3)',
parameters=('Y_H', 'mu_PAO', 'K_O2', 'K_NH4', 'K_P', 'K_ALK', 'K_PHA', 'eta_NO3', 'K_NO3'),
conserved_for=('COD', 'N', 'P', 'NOD', 'charge'))
PAO_anox_processes = Processes([p12, p14])
assert PAO_anox_processes.PAO_anox_growth.ref_component == X_PAO
with pytest.raises(AttributeError):
print(PAO_anox_processes.production_rates)
params = ('f_SI', 'Y_H', 'f_XI', 'Y_PO4', 'Y_PHA', 'Y_A',
'K_h', 'eta_NO3', 'eta_fe', 'K_O2', 'K_NO3', 'K_X',
'mu_H', 'q_fe', 'eta_NO3_deni', 'b_H', 'K_F', 'K_fe', 'K_A',
'K_NH4', 'K_P', 'K_ALK', 'q_PHA', 'q_PP', 'mu_PAO', 'b_PAO',
'b_PP', 'b_PHA', 'K_PS', 'K_PP', 'K_MAX', 'K_IPP', 'K_PHA',
'mu_AUT', 'b_AUT', 'K_O2_AUT', 'K_NH4_AUT', 'K_ALK_2',
'k_PRE', 'k_RED')
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ASM2d_original.tsv')
asm2d = Processes.load_from_file(path,
conserved_for=('COD', 'N', 'P', 'charge'),
parameters=params,
compile=False)
asm2d.extend(PAO_anox_processes)
asm2d.compile()
assert isinstance(asm2d, CompiledProcesses)
assert p12 in asm2d
assert set(asm2d.parameters.keys()) == set(params)
# Try re-pickling if the tests are run locally and
# the environment supports Pickle Protocol 5
pickle = True if _pk else False
try: pc.load_asm1_cmps()
except:
pc._asm1._create_asm1_cmps(pickle=pickle)
try: pc.load_asm2d_cmps()
except:
pc._asm2d._create_asm2d_cmps(pickle=pickle)
pc.load_asm2d_cmps()
if __name__ == '__main__':
test_process()
|
en
| 0.883183
|
# -*- coding: utf-8 -*- QSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems This module is developed by: <NAME> <<EMAIL>> This module is under the University of Illinois/NCSA Open Source License. Please refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt for license details. # measured as g C # Try re-pickling if the tests are run locally and # the environment supports Pickle Protocol 5
| 1.886925
| 2
|
src/stat/var.py
|
easai/stat
| 0
|
6627546
|
<gh_stars>0
import numpy as np
from statistics import mean, median,variance,stdev
x=[]
x.append([-5,-10])
x.append([0,3])
x.append([2,11])
x.append([3,14])
xrow=[y[0] for y in x]
var = np.var(x, axis=0)
print(f"{var=}")
var = np.var(xrow)
print(f"{var=}")
var=variance(xrow)
print(f"{var=}")
var = np.array(xrow).var()
print(f"{var=}")
var = np.array(xrow).var()
print(f"{var=}")
xrow=np.array(xrow)
var = mean(abs(xrow - xrow.mean())**2)
cov=np.cov(x,bias=0)
print(f"{cov=}")
totalX=0
totalY=0
totalXY=0
n=len(x)
for item in x:
totalX+=item[0]
totalY+=item[1]
totalXY+=item[0]*item[1]
meanX = totalX/n
meanY=totalY/n
meanXY=totalXY/n
print(f"{meanX=}")
print(f"{meanY=}")
print(f"{meanXY=}")
print(f"cov = {meanXY-meanX*meanY}")
print(np.mean(x, axis=1))
diffX=0
diffY=0
diffXY=0
for item in x:
diffX+=(item[0]-meanX)*(item[0]-meanX)
diffY+=(item[1]-meanY)*(item[1]-meanY)
diffXY+=(item[0]-meanX)*(item[1]-meanY)
varX=diffX/n
varY=diffY/n
cov=diffXY/n
print(f"{varX=}")
print(f"{varY=}")
print(f"{cov=}")
a=meanY-(meanXY-meanX*meanY)/(varX)*meanX
b=(meanXY-meanX*meanY)/(varX)
print(f"{a=}")
print(f"{b=}")
import pandas as pd
df = pd.DataFrame(x)
print(df)
print(df.describe())
print(df.cov())
|
import numpy as np
from statistics import mean, median,variance,stdev
x=[]
x.append([-5,-10])
x.append([0,3])
x.append([2,11])
x.append([3,14])
xrow=[y[0] for y in x]
var = np.var(x, axis=0)
print(f"{var=}")
var = np.var(xrow)
print(f"{var=}")
var=variance(xrow)
print(f"{var=}")
var = np.array(xrow).var()
print(f"{var=}")
var = np.array(xrow).var()
print(f"{var=}")
xrow=np.array(xrow)
var = mean(abs(xrow - xrow.mean())**2)
cov=np.cov(x,bias=0)
print(f"{cov=}")
totalX=0
totalY=0
totalXY=0
n=len(x)
for item in x:
totalX+=item[0]
totalY+=item[1]
totalXY+=item[0]*item[1]
meanX = totalX/n
meanY=totalY/n
meanXY=totalXY/n
print(f"{meanX=}")
print(f"{meanY=}")
print(f"{meanXY=}")
print(f"cov = {meanXY-meanX*meanY}")
print(np.mean(x, axis=1))
diffX=0
diffY=0
diffXY=0
for item in x:
diffX+=(item[0]-meanX)*(item[0]-meanX)
diffY+=(item[1]-meanY)*(item[1]-meanY)
diffXY+=(item[0]-meanX)*(item[1]-meanY)
varX=diffX/n
varY=diffY/n
cov=diffXY/n
print(f"{varX=}")
print(f"{varY=}")
print(f"{cov=}")
a=meanY-(meanXY-meanX*meanY)/(varX)*meanX
b=(meanXY-meanX*meanY)/(varX)
print(f"{a=}")
print(f"{b=}")
import pandas as pd
df = pd.DataFrame(x)
print(df)
print(df.describe())
print(df.cov())
|
none
| 1
| 3.175624
| 3
|
|
utils/dirs.py
|
BeyondCloud/TF_Template
| 0
|
6627547
|
import os
def create_dirs_if_not_exist(dirs):
"""
dirs - a list of directories to create if these directories are not found
:param dirs:
:return exit_code: 0:success -1:failed
"""
try:
for dir_ in dirs:
if not os.path.exists(dir_):
os.makedirs(dir_)
return 0
except Exception as err:
print("Creating directories error: {0}".format(err))
exit(-1)
|
import os
def create_dirs_if_not_exist(dirs):
"""
dirs - a list of directories to create if these directories are not found
:param dirs:
:return exit_code: 0:success -1:failed
"""
try:
for dir_ in dirs:
if not os.path.exists(dir_):
os.makedirs(dir_)
return 0
except Exception as err:
print("Creating directories error: {0}".format(err))
exit(-1)
|
en
| 0.788793
|
dirs - a list of directories to create if these directories are not found :param dirs: :return exit_code: 0:success -1:failed
| 4.151218
| 4
|
main.py
|
kanishka100/library
| 0
|
6627548
|
<reponame>kanishka100/library<gh_stars>0
from flask import Flask, render_template, request, url_for
from flask_sqlalchemy import SQLAlchemy
from werkzeug.utils import redirect
app = Flask(__name__)
all_books = []
# creating database
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///new-books-collection.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
# creating table model
class Book(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(250), unique=True, nullable=False)
author = db.Column(db.String(250), nullable=False)
rating = db.Column(db.Float, nullable=False)
# this is optional,helps in identifing each book object
def __repr__(self):
my_books = {'title': self.title,
'id': self.id,
'rating': float(self.rating)}
return f'{my_books}'
# create table
db.create_all()
# new_book = Book(id=1, title='<NAME>', author="J.K.Rowlings", rating=9.2)
# db.session.add(new_book)
# db.session.commit()
"""Home page"""
@app.route('/')
def home():
"""The data type of the result returned on querying the database is a list."""
return render_template("index.html", books=Book.query.all())
""" deleting a certain book and then redirecting to home again"""
@app.route("/<int:id>", methods=["GET", "POST"])
def abc(id):
book_to_delete = Book.query.get(id)
db.session.delete(book_to_delete)
db.session.commit()
return redirect(url_for("home"))
"""to add new books"""
@app.route("/add", methods=["GET", "POST"])
def add():
if request.method == "POST":
title = request.form['title']
author = request.form['author']
rating = request.form['rating']
# all_books.append(new_book)
# adding new book into database
new_book = Book(title=title, author=author, rating=rating)
db.session.add(new_book)
db.session.commit()
print((Book.query.all()))
return render_template("index.html", books=Book.query.all())
else:
return render_template("add.html")
"""to edit the rating of books"""
@app.route('/edit/<int:id>', methods=['GET', 'POST'])
def edit(id):
book = Book.query.filter_by(id=id).first()
if request.method == "POST":
book_to_update = Book.query.get(id)
book_to_update.rating = request.form['new_rating']
db.session.commit()
return redirect(url_for('home'))
return render_template("rating_edit.html", book=book)
if __name__ == "__main__":
app.run(debug=True)
|
from flask import Flask, render_template, request, url_for
from flask_sqlalchemy import SQLAlchemy
from werkzeug.utils import redirect
app = Flask(__name__)
all_books = []
# creating database
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///new-books-collection.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
# creating table model
class Book(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(250), unique=True, nullable=False)
author = db.Column(db.String(250), nullable=False)
rating = db.Column(db.Float, nullable=False)
# this is optional,helps in identifing each book object
def __repr__(self):
my_books = {'title': self.title,
'id': self.id,
'rating': float(self.rating)}
return f'{my_books}'
# create table
db.create_all()
# new_book = Book(id=1, title='<NAME>', author="J.K.Rowlings", rating=9.2)
# db.session.add(new_book)
# db.session.commit()
"""Home page"""
@app.route('/')
def home():
"""The data type of the result returned on querying the database is a list."""
return render_template("index.html", books=Book.query.all())
""" deleting a certain book and then redirecting to home again"""
@app.route("/<int:id>", methods=["GET", "POST"])
def abc(id):
book_to_delete = Book.query.get(id)
db.session.delete(book_to_delete)
db.session.commit()
return redirect(url_for("home"))
"""to add new books"""
@app.route("/add", methods=["GET", "POST"])
def add():
if request.method == "POST":
title = request.form['title']
author = request.form['author']
rating = request.form['rating']
# all_books.append(new_book)
# adding new book into database
new_book = Book(title=title, author=author, rating=rating)
db.session.add(new_book)
db.session.commit()
print((Book.query.all()))
return render_template("index.html", books=Book.query.all())
else:
return render_template("add.html")
"""to edit the rating of books"""
@app.route('/edit/<int:id>', methods=['GET', 'POST'])
def edit(id):
book = Book.query.filter_by(id=id).first()
if request.method == "POST":
book_to_update = Book.query.get(id)
book_to_update.rating = request.form['new_rating']
db.session.commit()
return redirect(url_for('home'))
return render_template("rating_edit.html", book=book)
if __name__ == "__main__":
app.run(debug=True)
|
en
| 0.713856
|
# creating database # creating table model # this is optional,helps in identifing each book object # create table # new_book = Book(id=1, title='<NAME>', author="J.K.Rowlings", rating=9.2) # db.session.add(new_book) # db.session.commit() Home page The data type of the result returned on querying the database is a list. deleting a certain book and then redirecting to home again to add new books # all_books.append(new_book) # adding new book into database to edit the rating of books
| 3.110517
| 3
|
backend/surveybackend/surveybackend/core/views.py
|
huarngpa/huarngpa_norc_challenge
| 0
|
6627549
|
<reponame>huarngpa/huarngpa_norc_challenge<gh_stars>0
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm
from django.contrib.auth import update_session_auth_hash, login, authenticate
from django.contrib import messages
from django.shortcuts import render, redirect
from social_django.models import UserSocialAuth
# Create your views here.
def signup(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
user = authenticate(
username=form.cleaned_data.get('username'),
password=form.cleaned_data.get('password1'),
)
login(request, user)
return redirect('home')
else:
form = UserCreationForm()
return render(request, 'registration/signup.html', {'form': form})
@login_required
def home(request):
return render(request, 'core/home.html')
@login_required
def settings(request):
user = request.user
try:
facebook_login = user.social_auth.get(provider='facebook')
except Exception as e:
facebook_login = None
can_disconnect = (user.social_auth.count() > 1 or user.has_usable_password())
return render(request, 'core/settings.html', {
'facebook_login': facebook_login,
'can_disconnect': can_disconnect,
})
@login_required
def password(request):
if request.user.has_usable_password():
PasswordForm = PasswordChangeForm
else:
PasswordForm = AdminPasswordChangeForm
if request.method == 'POST':
form = PasswordForm(request.user, request.POST)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
messages.success(request, 'Your password was successfully updated!')
return redirect('password')
else:
messages.error(request, 'Please correct the error below.')
else:
form = PasswordForm(request.user)
return render(request, 'core/password.html', {'form': form})
|
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm
from django.contrib.auth import update_session_auth_hash, login, authenticate
from django.contrib import messages
from django.shortcuts import render, redirect
from social_django.models import UserSocialAuth
# Create your views here.
def signup(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
user = authenticate(
username=form.cleaned_data.get('username'),
password=form.cleaned_data.get('password1'),
)
login(request, user)
return redirect('home')
else:
form = UserCreationForm()
return render(request, 'registration/signup.html', {'form': form})
@login_required
def home(request):
return render(request, 'core/home.html')
@login_required
def settings(request):
user = request.user
try:
facebook_login = user.social_auth.get(provider='facebook')
except Exception as e:
facebook_login = None
can_disconnect = (user.social_auth.count() > 1 or user.has_usable_password())
return render(request, 'core/settings.html', {
'facebook_login': facebook_login,
'can_disconnect': can_disconnect,
})
@login_required
def password(request):
if request.user.has_usable_password():
PasswordForm = PasswordChangeForm
else:
PasswordForm = AdminPasswordChangeForm
if request.method == 'POST':
form = PasswordForm(request.user, request.POST)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
messages.success(request, 'Your password was successfully updated!')
return redirect('password')
else:
messages.error(request, 'Please correct the error below.')
else:
form = PasswordForm(request.user)
return render(request, 'core/password.html', {'form': form})
|
en
| 0.968116
|
# Create your views here.
| 2.17174
| 2
|
robocrys/tests/adapter.py
|
kgmat/robocrystallographer
| 0
|
6627550
|
<filename>robocrys/tests/adapter.py
from robocrys.adapter import BaseAdapter
from robocrys.tests import RobocrysTest
class TestDescriptionAdapter(RobocrysTest):
"""Class to test the base adapter functionality."""
def setUp(self):
tin_dioxide = self.get_condensed_structure("SnO2")
self.tin_dioxide_ba = BaseAdapter(tin_dioxide)
mapi = self.get_condensed_structure("mapi")
self.mapi_ba = BaseAdapter(mapi)
def test_attributes(self):
self.assertEqual(self.mapi_ba.mineral['type'],
"Orthorhombic Perovskite")
self.assertEqual(self.mapi_ba.mineral['distance'], -1)
self.assertEqual(self.mapi_ba.formula, "CH3NH3PbI3")
self.assertEqual(self.mapi_ba.spg_symbol, "Pnma")
self.assertEqual(self.mapi_ba.crystal_system, "orthorhombic")
self.assertEqual(self.mapi_ba.dimensionality, 3)
self.assertTrue(self.mapi_ba.sites)
self.assertTrue(self.mapi_ba.distances)
self.assertTrue(self.mapi_ba.angles)
self.assertTrue(self.mapi_ba.components)
self.assertTrue(self.mapi_ba.component_makeup)
self.assertEqual(self.mapi_ba.elements[0], 'H+')
def test_get_distance_details(self):
# test get distance using int works
distances = self.tin_dioxide_ba.get_distance_details(0, 2)
self.assertTrue(len(distances), 3)
self.assertAlmostEqual(distances[0], 2.0922101061490546)
# test get distance using list works
distances = self.tin_dioxide_ba.get_distance_details(0, [2])
self.assertTrue(len(distances), 3)
self.assertAlmostEqual(distances[0], 2.0922101061490546)
# test getting multiple distances
distances = self.mapi_ba.get_distance_details(44, [0, 8])
self.assertTrue(len(distances), 4)
self.assertAlmostEqual(distances[0], 1.0386222568611572)
def test_get_angle_details(self):
# test get angles using int works
distances = self.tin_dioxide_ba.get_angle_details(0, 0, 'corner')
self.assertTrue(len(distances), 8)
self.assertAlmostEqual(distances[0], 129.18849530149342)
# test get angles using list works
distances = self.tin_dioxide_ba.get_angle_details(0, [0], 'corner')
self.assertTrue(len(distances), 8)
self.assertAlmostEqual(distances[0], 129.18849530149342)
|
<filename>robocrys/tests/adapter.py
from robocrys.adapter import BaseAdapter
from robocrys.tests import RobocrysTest
class TestDescriptionAdapter(RobocrysTest):
"""Class to test the base adapter functionality."""
def setUp(self):
tin_dioxide = self.get_condensed_structure("SnO2")
self.tin_dioxide_ba = BaseAdapter(tin_dioxide)
mapi = self.get_condensed_structure("mapi")
self.mapi_ba = BaseAdapter(mapi)
def test_attributes(self):
self.assertEqual(self.mapi_ba.mineral['type'],
"Orthorhombic Perovskite")
self.assertEqual(self.mapi_ba.mineral['distance'], -1)
self.assertEqual(self.mapi_ba.formula, "CH3NH3PbI3")
self.assertEqual(self.mapi_ba.spg_symbol, "Pnma")
self.assertEqual(self.mapi_ba.crystal_system, "orthorhombic")
self.assertEqual(self.mapi_ba.dimensionality, 3)
self.assertTrue(self.mapi_ba.sites)
self.assertTrue(self.mapi_ba.distances)
self.assertTrue(self.mapi_ba.angles)
self.assertTrue(self.mapi_ba.components)
self.assertTrue(self.mapi_ba.component_makeup)
self.assertEqual(self.mapi_ba.elements[0], 'H+')
def test_get_distance_details(self):
# test get distance using int works
distances = self.tin_dioxide_ba.get_distance_details(0, 2)
self.assertTrue(len(distances), 3)
self.assertAlmostEqual(distances[0], 2.0922101061490546)
# test get distance using list works
distances = self.tin_dioxide_ba.get_distance_details(0, [2])
self.assertTrue(len(distances), 3)
self.assertAlmostEqual(distances[0], 2.0922101061490546)
# test getting multiple distances
distances = self.mapi_ba.get_distance_details(44, [0, 8])
self.assertTrue(len(distances), 4)
self.assertAlmostEqual(distances[0], 1.0386222568611572)
def test_get_angle_details(self):
# test get angles using int works
distances = self.tin_dioxide_ba.get_angle_details(0, 0, 'corner')
self.assertTrue(len(distances), 8)
self.assertAlmostEqual(distances[0], 129.18849530149342)
# test get angles using list works
distances = self.tin_dioxide_ba.get_angle_details(0, [0], 'corner')
self.assertTrue(len(distances), 8)
self.assertAlmostEqual(distances[0], 129.18849530149342)
|
en
| 0.711557
|
Class to test the base adapter functionality. # test get distance using int works # test get distance using list works # test getting multiple distances # test get angles using int works # test get angles using list works
| 2.838845
| 3
|