hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace5e3e22b901f265dd52274651cc06958273bd9 | 1,704 | py | Python | scripts/freeze_dependencies.py | ejfitzgerald/agents-aea | 6411fcba8af2cdf55a3005939ae8129df92e8c3e | [
"Apache-2.0"
] | null | null | null | scripts/freeze_dependencies.py | ejfitzgerald/agents-aea | 6411fcba8af2cdf55a3005939ae8129df92e8c3e | [
"Apache-2.0"
] | null | null | null | scripts/freeze_dependencies.py | ejfitzgerald/agents-aea | 6411fcba8af2cdf55a3005939ae8129df92e8c3e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""
This CLI tool freezes the dependencies
"""
import re
import subprocess # nosec
def parse_args():
"""Parse CLI arguments."""
import argparse # pylint: disable=import-outside-toplevel
parser = argparse.ArgumentParser("freeze_dependencies")
parser.add_argument("-o", "--output", type=argparse.FileType("w"), default=None)
return parser.parse_args()
if __name__ == "__main__":
arguments = parse_args()
pip_freeze_call = subprocess.Popen( # nosec
["pip", "freeze"], stdout=subprocess.PIPE
)
(stdout, stderr) = pip_freeze_call.communicate()
requirements = stdout.decode("utf-8")
# remove 'aea' itself
regex = re.compile("^aea(==.*| .*)?$", re.MULTILINE)
requirements = re.sub(regex, "", requirements)
if arguments.output is None:
print(requirements)
else:
arguments.output.write(requirements)
| 32.150943 | 84 | 0.621479 |
ace5e476a40b8a3946d56e597c12117e2be3c38b | 65,303 | py | Python | Providers/Scripts/2.6x-2.7x/Scripts/nxService.py | amitsara/PowerShell-DSC-for-Linux | 22694d09f1fe61228210aae9bdd53b6f3da4c2d1 | [
"MIT"
] | null | null | null | Providers/Scripts/2.6x-2.7x/Scripts/nxService.py | amitsara/PowerShell-DSC-for-Linux | 22694d09f1fe61228210aae9bdd53b6f3da4c2d1 | [
"MIT"
] | null | null | null | Providers/Scripts/2.6x-2.7x/Scripts/nxService.py | amitsara/PowerShell-DSC-for-Linux | 22694d09f1fe61228210aae9bdd53b6f3da4c2d1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# ===================================
# Copyright (c) Microsoft Corporation. All rights reserved.
# See license.txt for license information.
# ===================================
from __future__ import with_statement
from contextlib import contextmanager
import subprocess
import os
import sys
import glob
import codecs
import imp
import time
import copy
import re
import fnmatch
protocol = imp.load_source('protocol', '../protocol.py')
nxDSCLog = imp.load_source('nxDSCLog', '../nxDSCLog.py')
helperlib = imp.load_source('helperlib', '../helperlib.py')
LG = nxDSCLog.DSCLog
# [ClassVersion("1.0.0"),FriendlyName("nxService"), SupportsInventory()]
# class MSFT_nxServiceResource : OMI_BaseResource
# {
# [key, InventoryFilter] string Name;
# [write,required,ValueMap{"init", "upstart", "systemd"},Values{"init","upstart","systemd"}, InventoryFilter] string Controller;
# [write, InventoryFilter] boolean Enabled;
# [write,ValueMap{"Running", "Stopped"},Values{"Running", "Stopped"}, InventoryFilter] string State;
# [read] string Path;
# [read] string Description;
# [read] string Runlevels;
# };
global show_mof
show_mof = False
def init_vars(Name, Controller, Enabled, State):
if Name is not None:
Name = Name.encode('ascii', 'ignore')
else:
Name = ''
if Name == '*':
Name = ''
if Controller is not None and Controller != '*' and Controller != '':
Controller = Controller.encode('ascii', 'ignore').lower()
else:
Controller = GetController()
if Enabled is None:
Enabled = False
Enabled = (Enabled == True)
if State is not None:
State = State.encode('ascii', 'ignore').lower()
else:
State = ''
return Name, Controller.lower(), Enabled, State.lower()
def Set_Marshall(Name, Controller, Enabled, State):
if helperlib.CONFIG_SYSCONFDIR_DSC == "omsconfig":
return [-1]
(Name, Controller, Enabled, State) = init_vars(
Name, Controller, Enabled, State)
if Controller == '':
return [-1]
retval = Set(Name, Controller, Enabled, State)
return retval
def Test_Marshall(Name, Controller, Enabled, State):
if helperlib.CONFIG_SYSCONFDIR_DSC == "omsconfig":
return [-1]
(Name, Controller, Enabled, State) = init_vars(
Name, Controller, Enabled, State)
if Controller == '':
return [-1]
retval = Test(Name, Controller, Enabled, State)
return retval
def Get_Marshall(Name, Controller, Enabled, State):
arg_names = list(locals().keys())
arg_names.append('Path')
arg_names.append('Runlevels')
arg_names.append('Description')
(Name, Controller, Enabled, State) = init_vars(
Name, Controller, Enabled, State)
if Controller == '':
return [-1], {}
retval = 0
(retval, Name, Controller, Enabled, State, Path, Description, Runlevels) = Get(
Name, Controller, Enabled, State)
Name = protocol.MI_String(Name)
Controller = protocol.MI_String(Controller)
Enabled = protocol.MI_Boolean(Enabled)
State = protocol.MI_String(State)
Path = protocol.MI_String(Path)
Description = protocol.MI_String(Description)
Runlevels = protocol.MI_String(Runlevels)
retd = {}
ld = locals()
for k in arg_names:
retd[k] = ld[k]
return retval, retd
def Inventory_Marshall(Name, Controller, Enabled, State):
FilterEnabled = (Enabled != None)
(Name, Controller, Enabled, State) = init_vars(
Name, Controller, Enabled, State)
if Controller == '':
return -1, {"__Inventory": {}}
sc = ServiceContext(Name, Controller, Enabled, State)
sc.FilterEnabled = FilterEnabled
if not GetAll(sc):
return -1, {"__Inventory": {}}
for srv in sc.services_list:
srv['Name'] = protocol.MI_String(srv['Name'])
srv['Controller'] = protocol.MI_String(srv['Controller'])
srv['Enabled'] = protocol.MI_Boolean(srv['Enabled'])
srv['State'] = protocol.MI_String(srv['State'])
srv['Path'] = protocol.MI_String(srv['Path'])
srv['Description'] = protocol.MI_String(srv['Description'])
srv['Runlevels'] = protocol.MI_String(srv['Runlevels'])
Inventory = protocol.MI_InstanceA(sc.services_list)
retd = {}
retd["__Inventory"] = Inventory
return 0, retd
#
# Begin user defined DSC functions
#
def SetShowMof(a):
global show_mof
show_mof = a
def ShowMof(op, Name, Controller, Enabled, State):
if not show_mof:
return
mof = ''
mof += op + ' nxService MyService'
mof += '{\n'
mof += ' Name = "' + Name + '"\n'
mof += ' Controller = "' + Controller + '"\n'
mof += ' Enabled = ' + str(Enabled) + '\n'
mof += ' State = "' + State + '"\n'
mof += '}\n'
f = open('./test_mofs.log', 'a')
Print(mof, file=f)
LG().Log('INFO', mof)
f.close()
def Print(s, file=sys.stdout):
file.write(s + '\n')
@contextmanager
def opened_w_error(filename, mode="r"):
"""
This context ensures the file is closed.
"""
try:
f = codecs.open(filename, encoding='utf-8', mode=mode)
except IOError, err:
yield None, err
else:
try:
yield f, None
finally:
f.close()
def RunGetOutput(cmd, no_output, chk_err=True):
"""
Wrapper for subprocess.check_output.
Execute 'cmd'. Returns return code and STDOUT,
trapping expected exceptions.
Reports exceptions to Error if chk_err parameter is True
"""
def check_output(no_output, *popenargs, **kwargs):
r"""Backport from subprocess module from python 2.7"""
if 'stdout' in kwargs:
raise ValueError(
'stdout argument not allowed, it will be overridden.')
if no_output:
out_file = None
else:
out_file = subprocess.PIPE
process = subprocess.Popen(stdout=out_file, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
# Exception classes used by this module.
class CalledProcessError(Exception):
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" \
% (self.cmd, self.returncode)
subprocess.check_output = check_output
subprocess.CalledProcessError = CalledProcessError
try:
output = subprocess.check_output(
no_output, cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError, e:
if chk_err:
Print('CalledProcessError. Error Code is ' +
str(e.returncode), file=sys.stderr)
LG().Log(
'ERROR', 'CalledProcessError. Error Code is '
+ str(e.returncode))
Print(
'CalledProcessError. Command string was '
+ e.cmd, file=sys.stderr)
LG().Log(
'ERROR', 'CalledProcessError. Command string was ' + e.cmd)
Print('CalledProcessError. Command result was ' +
(e.output[:-1]).decode('utf-8').encode('ascii', 'ignore'),
file=sys.stderr)
LG().Log('ERROR', 'CalledProcessError. Command result was ' +
(e.output[:-1]).decode('utf-8').encode('ascii', 'ignore'))
if no_output:
return e.returncode, None
else:
return e.returncode, \
e.output.decode('utf-8').encode('ascii', 'ignore')
if no_output:
return 0, None
else:
return 0, output.decode('utf-8').encode('ascii', 'ignore')
def RunGetOutputNoStderr(cmd, no_output, chk_err=True):
"""
Wrapper for subprocess.check_output without stderr.
Execute 'cmd'. Returns return code and STDOUT,
trapping expected exceptions.
Reports exceptions to Error if chk_err parameter is True
"""
def check_output(no_output, *popenargs, **kwargs):
r"""Backport from subprocess module from python 2.7"""
if 'stdout' in kwargs:
raise ValueError(
'stdout argument not allowed, it will be overridden.')
if no_output:
out_file = None
else:
out_file = subprocess.PIPE
process = subprocess.Popen(stdout=out_file, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
# Exception classes used by this module.
class CalledProcessError(Exception):
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" \
% (self.cmd, self.returncode)
subprocess.check_output = check_output
subprocess.CalledProcessError = CalledProcessError
devnull = open('/dev/null','w')
try:
output = subprocess.check_output(
no_output, cmd, stderr=devnull, shell=True)
except subprocess.CalledProcessError, e:
if chk_err:
Print('CalledProcessError. Error Code is ' +
str(e.returncode), file=sys.stderr)
LG().Log(
'ERROR', 'CalledProcessError. Error Code is '
+ str(e.returncode))
Print(
'CalledProcessError. Command string was '
+ e.cmd, file=sys.stderr)
LG().Log(
'ERROR', 'CalledProcessError. Command string was ' + e.cmd)
Print('CalledProcessError. Command result was ' +
(e.output[:-1]).decode('utf-8').encode('ascii', 'ignore'),
file=sys.stderr)
LG().Log('ERROR', 'CalledProcessError. Command result was ' +
(e.output[:-1]).decode('utf-8').encode('ascii', 'ignore'))
devnull.close()
if no_output:
return e.returncode, None
else:
return e.returncode, \
e.output.decode('utf-8').encode('ascii', 'ignore')
if no_output:
return 0, None
else:
return 0, output.decode('utf-8').encode('ascii', 'ignore')
systemctl_path = "/usr/bin/systemctl"
upstart_start_path = "/sbin/start"
upstart_stop_path = "/sbin/stop"
upstart_status_path = "/sbin/status"
if os.path.exists('/sbin'):
os.environ['PATH']=os.environ['PATH']+':/sbin'
code, out = RunGetOutput('which service', False, False)
initd_service = out.strip('\n')
initd_chkconfig = "/sbin/chkconfig"
initd_invokerc = "/usr/sbin/invoke-rc.d"
initd_updaterc = "/usr/sbin/update-rc.d"
lsb_install_initd = "/usr/lib/lsb/install_initd"
lsb_remove_initd = "/usr/lib/lsb/remove_initd"
runlevel_path = "/sbin/runlevel"
def ReadFile(path):
"""
Safely attempt to read a file,
ensuring file is always closed at exit.
Return the data and the exception object.
The data is None if an error occurred.
The error is None if the data was read.
Log results to stderr.
"""
d = None
error = None
with opened_w_error(path, 'rb') as (F, error):
if error:
Print("Exception opening file " + path + " Error Code: "
+ str(error.errno) +
" Error: " + error.message + error.strerror, file=sys.stderr)
LG().Log('ERROR', "Exception opening file " + path
+ " Error Code: " + str(error.errno)
+ " Error: " + error.message + error.strerror)
else:
d = F.read()
return d, error
def WriteFile(path, contents):
"""
Safely attempt to write data to a file,
replacing the existing file or creating it and
ensuring file is always closed at exit.
Return the exception object.
The error is None if the data was written.
Log results to stderr.
"""
error = None
with opened_w_error(path, 'wb+') as (F, error):
if error:
Print("Exception opening file " + path +
" Error Code: " + str(error.errno) +
" Error: " + error.message + error.strerror,
file=sys.stderr)
LG().Log('ERROR', "Exception opening file "
+ path + " Error Code: " +
str(error.errno) + " Error: " + error.message
+ error.strerror)
else:
F.write(contents)
return error
def Process(params, no_output=False):
line = ''
spc = ''
for p in params:
line += (spc + p)
if len(spc) is 0:
spc = ' '
code, out = RunGetOutput(line, no_output, False)
return (out, out, code)
def StartService(sc):
if sc.Controller == "systemd":
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "start", sc.Name])
if retval is not 0:
Print("Error: " + systemctl_path + " start " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + systemctl_path +
" start " + sc.Name + " failed: " + process_stderr)
return [-1]
elif sc.Controller == "upstart":
(process_stdout, process_stderr, retval) = Process(
[upstart_start_path, sc.Name])
if retval is not 0:
Print("Error: " + upstart_start_path +
" failed: " + process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + upstart_start_path
+ " failed: " + process_stderr)
return [-1]
elif sc.Controller == "init":
check_state_program = initd_service
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
check_state_program = initd_invokerc
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "start"])
if retval is not 0:
Print("Error: " + check_state_program +
" failed: " + process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + check_state_program
+ " failed: " + process_stderr)
return [-1]
if not IsServiceRunning(sc):
Print("Error: " + sc.Name + " start failed: " +
process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + sc.Name +
" start failed: " + process_stderr)
return [-1]
return [0]
def StopService(sc):
if sc.Controller == "systemd":
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "stop", sc.Name])
if retval is not 0:
Print("Error: " + systemctl_path + " failed: " +
process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + systemctl_path
+ " failed: " + process_stderr)
return [-1]
elif sc.Controller == "upstart":
(process_stdout, process_stderr, retval) = Process(
[upstart_stop_path, sc.Name])
if retval is not 0:
Print("Error: " + upstart_stop_path +
" failed: " + process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + upstart_stop_path
+ " failed: " + process_stderr)
return [-1]
elif sc.Controller == "init":
check_state_program = initd_service
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
check_state_program = initd_invokerc
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "stop"])
if retval is not 0:
Print("Error: " + check_state_program +
" failed: " + process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + check_state_program
+ " failed: " + process_stderr)
return [-1]
if IsServiceRunning(sc):
Print("Error: " + sc.Name + " stop failed: " +
process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + sc.Name +
" stop failed: " + process_stderr)
return [-1]
return [0]
def GetRunLevel():
(process_stdout, process_stderr, retval) = Process([runlevel_path])
if retval is not 0:
Print("Error: " + runlevel_path + " failed: " +
process_stderr, file=sys.stderr)
LG().Log(
'ERROR', "Error: " + runlevel_path + " failed: " + process_stderr)
return -1
tokens = process_stdout.split(" ")
if len(tokens) is not 2:
Print("Error: unexpected number of tokens from " +
runlevel_path + ". stdout: " + process_stdout, file=sys.stderr)
LG().Log('ERROR', "Error: unexpected number of tokens from " +
runlevel_path + ". stdout: " + process_stdout)
return -1
return int(tokens[1])
def DetermineInitState(stdout):
if "is running" in stdout or "start/running" in stdout \
or "..running" in stdout:
return True
elif stdout.strip() == "running":
return True
elif "(running)" in stdout:
return True
else:
return False
def DetermineInitEnabled(stdout, runlevel):
tokens = stdout.split()
tokens = tokens[1:]
if runlevel > (len(tokens) - 1):
Print("runlevel " + str(runlevel) +
" not found in chkconfig", file=sys.stderr)
LG().Log(
'ERROR', "runlevel " + str(runlevel) + " not found in chkconfig")
return False
runlevel_tokens = tokens[runlevel].split(":")
if len(runlevel_tokens) is not 2:
Print(
"Unable to determine format for chkconfig run level",
file=sys.stderr)
LG().Log(
'ERROR', "Unable to determine format for chkconfig run level")
return False
if runlevel_tokens[1] == "on":
return True
else:
return False
def GetSystemdState(sc):
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "status", sc.Name])
if retval is 0:
if '(running)' in process_stdout:
return "running"
return "stopped"
def TestSystemdState(sc):
if sc.State and sc.State != GetSystemdState(sc):
return False
return True
def GetSystemdEnabled(sc):
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "is-enabled", sc.Name])
if retval is 0:
return True
else:
return False
def TestSystemdEnabled(sc):
if sc.Enabled is not GetSystemdEnabled(sc):
return False
return True
def TestSystemd(sc):
if not SystemdExists():
return [-1]
if not TestSystemdState(sc):
return [-1]
if not TestSystemdEnabled(sc):
return [-1]
return [0]
def GetUpstartState(sc):
(process_stdout, process_stderr, retval) = Process(
[upstart_status_path, sc.Name])
if retval is not 0:
Print("Error: " + upstart_status_path +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + upstart_status_path +
" failed: " + process_stderr)
return ""
if (sc.Name + " start") in process_stdout:
return "running"
else:
return "stopped"
def TestUpstartState(sc):
if sc.State and sc.State != GetUpstartState(sc):
return False
return True
def GetUpstartEnabled(sc):
if os.path.isfile("/etc/init/" + sc.Name + ".conf"):
start_on_exists = False
start_on_is_enabled = False
stop_on_exists = False
stop_on_is_enabled = False
file_lines, error = ReadFile("/etc/init/" + sc.Name + ".conf")
if error is not None:
Print(
"Error reading:/etc/init/" + sc.Name + ".conf",
file=sys.stderr)
LG().Log('ERROR', "Error reading:/etc/init/" +
sc.Name + ".conf")
return "Error"
for full_line in file_lines.splitlines():
# everything after a '#' character is a comment, so strip it off
line = full_line.split("#")[0]
if "start on" in line:
start_on_exists = True
if ("(" in line) or ("and" in line) or ("or" in line):
return "Complex"
elif "start on runlevel [" in line:
runlevel = GetRunLevel()
specified_runlevel_digits = line.split("[")[1][:-1]
if str(runlevel) in specified_runlevel_digits:
start_on_is_enabled = True
else:
start_on_is_enabled = False
if "!" in specified_runlevel_digits:
start_on_is_enabled = not start_on_is_enabled
else:
return "Complex"
if "stop on" in line:
stop_on_exists = True
if ("(" in line) or ("and" in line) or ("or" in line):
return "Complex"
elif "stop on runlevel [" in line:
runlevel = GetRunLevel()
specified_runlevel_digits = line.split("[")[1][:-1]
if str(runlevel) in specified_runlevel_digits:
stop_on_is_enabled = True
else:
stop_on_is_enabled = False
if "!" in specified_runlevel_digits:
stop_on_is_enabled = not stop_on_is_enabled
else:
return "Complex"
if not start_on_exists and not stop_on_exists: # not upstart
if os.path.islink('/etc/init.d/' + sc.Name) and \
os.readlink('/etc/init.d/' + sc.Name) \
== '/lib/init/upstart-job':
# this is a 'converted' init script, check the default rc2.d
# for smylink to conf file. if so its enabled.
file_list = os.listdir('/etc/rc2.d')
for f in file_list:
f = '/etc/rc2.d/' + f
if os.path.islink(f) and os.readlink(f) == \
"../init.d/" + sc.Name:
return True
return False
(process_stdout, process_stderr, retval) = Process(
['chkconfig', sc.Name, '']) # try init style
if retval is 0:
if 'off' not in process_stdout:
return True
return False
if start_on_exists and start_on_is_enabled:
if stop_on_exists and stop_on_is_enabled:
Print("Error: Having trouble determining whether service " +
sc.Name + " is enabled or disabled.", file=sys.stderr)
LG().Log('ERROR',
"Error: Having trouble determining whether service " +
sc.Name + " is enabled or disabled.")
return "Complex"
else:
return True
else:
return False
Print("Error: Unable to find line containing 'start on' in " +
sc.Name + ".conf", file=sys.stderr)
LG().Log('ERROR',
"Error: Unable to find line containing 'start on' in " +
sc.Name + ".conf")
return False
else:
Print("Error: conf file does not exist for service named " +
sc.Name, file=sys.stderr)
LG().Log('ERROR',
"Error: conf file does not exist for service named " +
sc.Name)
return False
def TestUpstartEnabled(sc):
currently_enabled = GetUpstartEnabled(sc)
if currently_enabled == "Complex":
Print("Error: Cannot modify 'Enabled' state for service " + sc.Name +
", conf file too complex. Please use the File provider to " +
"write your own conf file for this service.", file=sys.stderr)
LG().Log('ERROR', "Error: Cannot modify 'Enabled' state for service "
+ sc.Name +
", conf file too complex. Please use the File provider to " +
" writeyour own conf file for this service.")
return False
return currently_enabled
def TestUpstart(sc):
if not UpstartExists():
return [-1]
if not TestUpstartState(sc):
return [-1]
if sc.Enabled is not TestUpstartEnabled(sc):
return [-1]
return [0]
def GetInitState(sc):
check_state_program = initd_service
# debian style init. These are missing in redhat.
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
check_state_program = initd_service
if os.path.isfile(initd_service):
check_state_program = initd_service
else: # invoke the service directly
check_state_program = '/etc/init.d/'
if check_state_program == '/etc/init.d/':
(process_stdout, process_stderr, retval) = Process(
[check_state_program + sc.Name, "status"], True)
if retval is not 0:
Print("Error: " + check_state_program +
sc.Name + " status failed: ", file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
sc.Name + " status failed: ")
if IsServiceRunning(sc):
return "running"
else:
return "stopped"
else:
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "status"])
if retval is not 0:
if IsServiceRunning(sc):
return "running"
else:
return "stopped"
if DetermineInitState(process_stdout):
return "running"
else:
return "stopped"
def TestInitState(sc):
if sc.State and sc.State != GetInitState(sc):
return False
return True
def GetInitEnabled(sc):
runlevel = GetRunLevel()
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
# A service is enabled if a symbolic link
# exists in /etc/rc${RUNLEVEL}.d/ with the name:
# S??${sc.Name}
matched_files = glob.glob(
"/etc/rc" + str(runlevel) + ".d/S??" + sc.Name)
for f in matched_files:
if os.path.islink(f):
return True
return False
else:
check_enabled_program = initd_chkconfig
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "--list", sc.Name])
if retval is not 0:
Print("Error: " + check_enabled_program +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" failed: " + process_stderr)
return False
if DetermineInitEnabled(process_stdout, runlevel):
return True
else:
return False
def TestInitEnabled(sc):
if sc.Enabled is not GetInitEnabled(sc):
return False
return True
def TestInit(sc):
if not InitExists():
return [-1]
if not TestInitState(sc):
return [-1]
if not TestInitEnabled(sc):
return [-1]
return [0]
def SystemdExists():
global systemctl_path
code, out = RunGetOutput('which systemctl', False, False)
if code is 0:
systemctl_path = out.strip()
return True
else:
return False
def UpstartExists():
if (os.path.isfile('/sbin/upstart-local-bridge')
or os.path.isfile('/sbin/upstart-udev-bridge')) \
and os.path.isfile(upstart_start_path) \
and os.path.isfile(upstart_stop_path) \
and os.path.isfile(upstart_status_path):
return True
else:
return False
def InitExists():
if os.path.isfile(initd_service) and os.path.isfile(initd_chkconfig):
return True
elif os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
return True
else:
return False
def ServiceExistsInSystemd(sc):
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "status", sc.Name])
if retval is not 0:
if "Loaded: loaded" in process_stdout:
return True
else:
return False
else:
return True
def ServiceExistsInUpstart(sc):
(process_stdout, process_stderr, retval) = Process(
[upstart_status_path, sc.Name])
if retval is not 0:
return False
else:
return True
def ServiceExistsInInit(sc):
check_state_program = initd_service
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
check_state_program = initd_invokerc
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "status"])
if "unrecognized service" in process_stderr \
or "no such service" in process_stderr \
or "not found" in process_stderr:
Print(process_stderr, file=sys.stderr)
LG().Log('INFO', process_stderr)
return False
else:
return True
def CreateSystemdService(sc):
Print("Error: systemd services cannot be created from the service " +
"provider. Please use the file provider to create a systemd " +
"conf file, then modify the service using this service provider.",
file=sys.stderr)
LG().Log('ERROR',
"Error: systemd services cannot be created from the service provider. \
Please use the file provider to create a systemd conf file, \
then modify the service using this service provider.")
return [-1]
def ModifySystemdService(sc):
if sc.Enabled is True:
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "enable", sc.Name + '.service'])
if retval is not 0:
Print("Error: " + systemctl_path + " enable " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + systemctl_path +
" enable " + sc.Name + " failed: " + process_stderr)
return [-1]
elif sc.Enabled is False:
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "disable", sc.Name + '.service'])
if retval is not 0:
Print("Error: " + systemctl_path + " disable " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + systemctl_path +
" disable " + sc.Name + " failed: " + process_stderr)
return [-1]
(process_stdout, process_stderr, retval) = Process(
[systemctl_path, "status", sc.Name + '.service'])
# retval may be non zero even if service exists for 'status'.
if 'No such file or directory' in process_stdout:
Print("Error: " + systemctl_path + " status " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + systemctl_path +
" status " + sc.Name + " failed: " + process_stderr)
return [-1]
if 'Active: active' in process_stdout:
Print("Running", file=sys.stderr)
LG().Log('INFO', "Running")
if sc.State and sc.State != "running":
return StopService(sc)
else:
Print("Stopped", file=sys.stderr)
LG().Log('INFO', "Stopped")
if sc.State and sc.State != "stopped":
return StartService(sc)
return [0]
def CreateUpstartService(sc):
Print("Error: Upstart services cannot be created from the service " +
"provider. Please use the file provider to create an upstart " +
"conf file, then modify the service using this service provider.",
file=sys.stderr)
LG().Log('ERROR',
"Error: Upstart services cannot be created from the service " +
"provider. Please use the file provider to create an upstart " +
"conf file, then modify the service using this service provider.")
return [-1]
def ModifyUpstartConfFile(sc):
if os.path.isfile("/etc/init/" + sc.Name + ".conf"):
file_lines, error = ReadFile("/etc/init/" + sc.Name + ".conf")
if len(file_lines) is 0 or error is not None:
Print("Error: Conf file unable to be read for service " +
sc.Name, file=sys.stderr)
LG().Log(
'ERROR', "Error: Conf file unable to be read for service " +
sc.Name)
return False
outfile = ""
start_on_exists = False
stop_on_exists = False
for full_line in file_lines.splitlines():
line = full_line.split("#")[0]
if "start on" in line or "stop on" in line and not start_on_exists:
# If we got to this point, we can assume that we're allowed to
# modify the conf file. No need to check for a "Complex" conf
# file.
start_on_exists = True
if sc.Enabled is True:
outfile += "start on runlevel [2345]\n"
outfile += "stop on runlevel [!2345]\n"
elif sc.Enabled is False:
outfile += "stop on runlevel [0123456]\n"
elif "start on" in line or "stop on" in line and start_on_exists:
continue # its xtra now
else:
outfile += full_line + "\n"
if start_on_exists or stop_on_exists:
if WriteFile("/etc/init/" + sc.Name + ".conf", outfile) \
is not None:
Print(
"Error: Unable to write conf file for service " + sc.Name,
file=sys.stderr)
LG().Log(
'ERROR', "Error: Unable to write conf file for service " +
sc.Name)
return False
return True
else: # not an upstart service
if os.path.islink('/etc/init.d/' + sc.Name) \
and os.readlink('/etc/init.d/' + sc.Name) \
== '/lib/init/upstart-job':
# this is a 'converted' init script, check the default rc[2345].d
# for smylink to conf file. if so its enabled.
for rc in range(2, 6):
file_list = os.listdir('/etc/rc' + str(rc) + '.d')
found = False
for f in file_list:
f = '/etc/rc' + str(rc) + '.d/' + f
if os.path.islink(f) and os.readlink(f) \
== "../init.d/" + sc.Name:
found = True
break
if sc.Enabled is True:
if not found:
# create the symlink
os.symlink(
"../init.d/" + sc.Name, "/etc/rc2.d/S22" + sc.Name)
return True
else:
if found:
os.unlink(f)
return True
if sc.Enabled is True:
(process_stdout, process_stderr, retval) = Process(
['update-rc.d', sc.Name, ' defaults'])
if retval is not 0:
Print("Error: " + process_stdout + " enable " +
sc.Name + " failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + process_stdout +
" enable " + sc.Name + " failed: " + process_stderr)
return False
else:
(process_stdout, process_stderr, retval) = Process(
['update-rc.d -f ', sc.Name, ' remove'])
if retval is not 0:
Print("Error: " + process_stdout + " disable " +
sc.Name + " failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + process_stdout +
" disable " + sc.Name + " failed: " + process_stderr)
return False
return True
def ModifyUpstartService(sc):
if sc.Enabled is not TestUpstartEnabled(sc):
if not ModifyUpstartConfFile(sc):
Print("Error: Failed to modify upstart conf file", file=sys.stderr)
LG().Log('ERROR', "Error: Failed to modify upstart conf file")
return [-1]
if sc.State == "running":
(process_stdout, process_stderr, retval) = Process(
[upstart_start_path, sc.Name])
if retval is not 0:
if "Job is already running" not in process_stderr:
Print("Error: " + upstart_start_path + " " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + upstart_start_path +
" " + sc.Name + " failed: " + process_stderr)
return [-1]
if not IsServiceRunning(sc):
Print("Error: " + upstart_start_path + " " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + upstart_start_path +
" " + sc.Name + " failed: " + process_stderr)
return [-1]
elif sc.State == "stopped":
(process_stdout, process_stderr, retval) = Process(
[upstart_stop_path, sc.Name])
if retval is not 0:
if "Unknown instance" not in process_stderr:
Print("Error: " + upstart_stop_path + " " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + upstart_stop_path +
" " + sc.Name + " failed: " + process_stderr)
return [-1]
if IsServiceRunning(sc):
Print("Error: " + upstart_stop_path + " " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + upstart_stop_path +
" " + sc.Name + " failed: " + process_stderr)
return [-1]
return [0]
def CreateInitService(sc):
(process_stdout, process_stderr, retval) = Process(
[lsb_install_initd, sc.Name])
if retval is not 0:
Print("Error: " + lsb_install_initd + " " + sc.Name +
" failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + lsb_install_initd +
" " + sc.Name + " failed: " + process_stderr)
return [-1]
return ModifyInitService(sc)
def ModifyInitService(sc):
check_state_program = initd_service
check_enabled_program = initd_chkconfig
# debian style init. These are missing in redhat.
if os.path.isfile(initd_invokerc) and os.path.isfile(initd_updaterc):
if os.path.isfile(initd_service):
check_state_program = initd_service
else: # invoke the service directly
check_state_program = '/etc/init.d/'
check_enabled_program = initd_updaterc
if sc.Enabled is True:
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "enable"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " enable failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " enable failed: " + process_stderr)
# try 'defaults'
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "defaults"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " defaults failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " defaults failed: " + process_stderr)
return [-1]
if 'already exist' in process_stdout: # we need to remove them first
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "remove"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " remove failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " remove failed: " + process_stderr)
return [-1]
# it should work now
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "defaults"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " defaults failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " defaults failed: " + process_stderr)
return [-1]
elif sc.Enabled is False:
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "disable"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " disable failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " disable failed: " + process_stderr)
# try remove
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "remove"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " remove failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " remove failed: " + process_stderr)
return [-1]
else:
if sc.Enabled is True:
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, sc.Name, "on"])
if retval is not 0:
Print("Error: " + check_enabled_program + " " + sc.Name +
" on failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" " + sc.Name + " on failed: " + process_stderr)
# try 'defaults'
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "defaults"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " defaults failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " defaults failed: " + process_stderr)
return [-1]
if 'already exist' in process_stdout: # we need to remove them first
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "remove"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " remove failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " remove failed: " + process_stderr)
return [-1]
# it should work now
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "defaults"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " defaults failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " defaults failed: " + process_stderr)
return [-1]
elif sc.Enabled is False:
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, sc.Name, "off"])
if retval is not 0:
Print("Error: " + check_enabled_program + " " + sc.Name +
" off failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" " + sc.Name + " off failed: " + process_stderr)
# try remove
(process_stdout, process_stderr, retval) = Process(
[check_enabled_program, "-f", sc.Name, "remove"])
if retval is not 0:
Print("Error: " + check_enabled_program + " -f " +
sc.Name + " remove failed: " + process_stderr,
file=sys.stderr)
LG().Log('ERROR', "Error: " + check_enabled_program +
" -f " + sc.Name + " remove failed: " + process_stderr)
return [-1]
if sc.State == "running":
# don't try to read stdout or stderr as 'service start' comand
# re-directs them, causing a hang in subprocess.communicate()
if check_state_program == '/etc/init.d/':
(process_stdout, process_stderr, retval) = Process(
[check_state_program + sc.Name, "start"], True)
if retval is not 0:
Print("Error: " + check_state_program +
sc.Name + " start failed: ", file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
sc.Name + " start failed: ")
return [-1]
else:
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "start"], True)
if retval is not 0:
Print("Error: " + check_state_program + " " +
sc.Name + " start failed: ", file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
" " + sc.Name + " start failed: ")
return [-1]
if not IsServiceRunning(sc):
Print("Error: " + check_state_program + " " +
sc.Name + " start failed: ", file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
" " + sc.Name + " start failed: ")
return [-1]
elif sc.State == "stopped":
if check_state_program == '/etc/init.d/':
(process_stdout, process_stderr, retval) = Process(
[check_state_program + sc.Name, "stop"], True)
if retval is not 0:
Print("Error: " + check_state_program +
sc.Name + " stop failed: ", file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
sc.Name + " stop failed: ")
return [-1]
else:
(process_stdout, process_stderr, retval) = Process(
[check_state_program, sc.Name, "stop"])
if retval is not 0:
Print("Error: " + check_state_program + " " + sc.Name +
" stop failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
" " + sc.Name + " stop failed: " + process_stderr)
return [-1]
if IsServiceRunning(sc):
Print("Error: " + check_state_program + " " + sc.Name +
" stop failed: " + process_stderr, file=sys.stderr)
LG().Log('ERROR', "Error: " + check_state_program +
" " + sc.Name + " stop failed: " + process_stderr)
return [-1]
return [0]
def IsServiceRunning(sc):
time.sleep(1)
cmd = 'ps -ef | grep -v grep | grep -E ".*( ' + \
sc.Name + '|/' + sc.Name + ')(\..*?|.?)( |$)"'
code, out = RunGetOutput(cmd, False, False)
if code is not 0:
return False
return True
def Set(Name, Controller, Enabled, State):
ShowMof('SET', Name, Controller, Enabled, State)
sc = ServiceContext(Name, Controller, Enabled, State)
if sc.Controller == "systemd":
if SystemdExists() is True:
if ServiceExistsInSystemd(sc):
return ModifySystemdService(sc)
else:
return CreateSystemdService(sc)
elif sc.Controller == "upstart":
if UpstartExists() is True:
if ServiceExistsInUpstart(sc):
return ModifyUpstartService(sc)
else:
return CreateUpstartService(sc)
elif sc.Controller == "init":
if InitExists() is True:
if ServiceExistsInInit(sc):
return ModifyInitService(sc)
else:
return CreateInitService(sc)
return [-1]
def Test(Name, Controller, Enabled, State):
ShowMof('TEST', Name, Controller, Enabled, State)
sc = ServiceContext(Name, Controller, Enabled, State)
if sc.Controller == "systemd":
return TestSystemd(sc)
elif sc.Controller == "upstart":
return TestUpstart(sc)
elif sc.Controller == "init":
return TestInit(sc)
else:
Print("Invalid service controller (" + sc.Controller +
") specified for service: " + sc.Name, file=sys.stderr)
LG().Log('ERROR', "Invalid service controller (" +
sc.Controller + ") specified for service: " + sc.Name)
return [-1]
return [-1]
def Get(Name, Controller, Enabled, State):
ShowMof('GET', Name, Controller, Enabled, State)
sc = ServiceContext(Name, Controller, Enabled, State)
Path = ""
exit_code = 0
if not sc.Controller:
Print("Error: Controller not specified.", file=sys.stderr)
LG().Log('ERROR', "Error: Controller not specified.")
exit_code = -1
elif sc.Controller == "systemd":
if not ServiceExistsInSystemd(sc):
Print("Error: Unable to find service named " +
sc.Name + " in systemd.", file=sys.stderr)
LG().Log(
'ERROR', "Error: Unable to find service named " +
sc.Name + " in systemd.")
exit_code = -1
else:
Enabled = GetSystemdEnabled(sc)
State = GetSystemdState(sc)
Path = "/usr/lib/systemd/system/" + sc.Name + ".service"
elif sc.Controller == "upstart":
if not ServiceExistsInUpstart(sc):
Print("Error: Unable to find service named " +
sc.Name + " in upstart.", file=sys.stderr)
LG().Log(
'ERROR', "Error: Unable to find service named " +
sc.Name + " in upstart.")
exit_code = -1
else:
temp = GetUpstartEnabled(sc)
if temp is False:
Enabled = False
else:
# When GetUpstartEnabled returns "Complex", we assume that it
# is enabled (and we won't modify it).
Enabled = True
State = GetUpstartState(sc)
Path = "/etc/init/" + sc.Name + ".conf"
elif sc.Controller == "init":
if not ServiceExistsInInit(sc):
Print("Error: Unable to find service named " +
sc.Name + " in init.", file=sys.stderr)
LG().Log(
'ERROR', "Error: Unable to find service named " +
sc.Name + " in init.")
exit_code = -1
else:
Enabled = GetInitEnabled(sc)
State = GetInitState(sc)
Path = "/etc/init.d/" + sc.Name
GetOne(sc)
return [exit_code, Name, Controller, Enabled, State, Path, sc.Description, sc.Runlevels]
def GetOne(sc):
GetAll(sc)
if len(sc.services_list):
sc.Description = sc.services_list[0]['Description']
sc.Runlevels = sc.services_list[0]['Runlevels']
def GetAll(sc):
if sc.Controller == 'init':
return InitdGetAll(sc)
if sc.Controller == 'systemd':
return SystemdGetAll(sc)
if sc.Controller == 'upstart':
return UpstartGetAll(sc)
def GetRunlevels(sc, Name):
if sc.runlevels_d == None:
sc.runlevels_d = {}
cmd = "file /etc/rc*.d/* | grep link | awk '{print $5,$1}' | sort"
code, out = RunGetOutput(cmd, False, False)
for line in out.splitlines():
line = line.replace("'", '')
srv = line.split(' ')[0]
rl = line.split(' ')[1]
n = os.path.basename(srv)
if n not in sc.runlevels_d.keys():
sc.runlevels_d[n] = {}
if 'Path' not in sc.runlevels_d[n].keys():
sc.runlevels_d[n]['Path'] = srv.replace('..', '/etc')
if 'Runlevels' not in sc.runlevels_d[n].keys():
sc.runlevels_d[n]['Runlevels'] = ''
s = 'off'
if rl[11].lower() == 's':
s = 'on'
sc.runlevels_d[n]['Runlevels'] += rl[7] + ':' + s + ' '
if Name in sc.runlevels_d.keys():
return sc.runlevels_d[Name]
return None
def SystemdGetAll(sc):
d = {}
if os.system('which systemctl') != 0:
Print("Error: 'Controller' = " + sc.Controller +
" is incorrectly specified.", file=sys.stderr)
LG().Log('ERROR', "Error: 'Controller' = " +
sc.Controller + " is incorrectly specified.")
return False
Name = sc.Name
if '*' not in Name and '?' not in Name and len(Name) > 0:
Name = Name.replace('.service', '')
Name += '.service'
# Do the commands work?
# There may be no error detected in our multi-pipe command below.
# To keep from returning garbage, we must test the commands.
# RunGetOutput(chk_err = True) will log the error message here if it
# occurs.
cmd = 'systemctl -a list-unit-files ' + Name
code, txt = RunGetOutputNoStderr(cmd, False, True)
if code != 0:
return False
sname = ''
# Get the last service name from the output.
m = re.search(r'.*?\n(.*?)[.]service.*?\n', txt, re.M)
if m is not None:
sname = m.group(1)
cmd = 'systemctl -a --no-pager --no-legend -p "Names,WantedBy,Description,SubState,FragmentPath,UnitFileState" show ' + sname
code, txt = RunGetOutputNoStderr(cmd, False, True)
if code != 0:
return False
# Now we know it will work.
cmd = 'systemctl -a list-unit-files ' + Name + '| grep \.service | grep -v "@" | awk \'{print $1}\' | xargs systemctl -a --no-pager --no-legend -p "Names,WantedBy,Description,SubState,FragmentPath,UnitFileState" show'
code, txt = RunGetOutputNoStderr(cmd, False, False)
txt=txt.replace('\n\n','@@')
txt=txt.replace('\n','|')
services=txt.split('@@')
subs=re.compile(r'(.*?=)')
for srv in services:
if len(srv) == 0:
continue
s=srv.split('|')
d['Name'] = subs.sub('',s[0].replace('.service',''))
d['Controller'] = sc.Controller
d['Description'] =subs.sub('',s[2])
d['State'] = subs.sub('',s[3])
if len(sc.State) and sc.State != d['State'].lower():
continue
d['Path'] = subs.sub('',s[4])
d['Enabled'] = 'enabled' in subs.sub('',s[5])
if sc.FilterEnabled and sc.Enabled != d['Enabled']:
continue
rld=GetRunlevels(sc,d['Name'])
if rld != None and 'Runlevels' in rld.keys():
d['Runlevels'] = rld['Runlevels']
else:
d['Runlevels'] = subs.sub('',s[1])
sc.services_list.append(copy.deepcopy(d))
return True
def UpstartGetAll(sc):
d={}
names={}
if os.system('which initctl') != 0:
Print("Error: 'Controller' = " + sc.Controller + " is incorrectly specified.", file=sys.stderr)
LG().Log('ERROR', "Error: 'Controller' = " + sc.Controller + " is incorrectly specified.")
return False
# Do the commands work?
# There may be no error detected in our multi-pipe command below.
# To keep from returning garbage, we must test the commands.
# RunGetOutput(chk_err = True) will log the error message here if it occurs.
cmd = 'initctl list'
code, txt = RunGetOutputNoStderr(cmd, False, True)
if code != 0:
return False
cmd = initd_service + ' --status-all'
code, txt = RunGetOutputNoStderr(cmd, False, True)
if code != 0:
return False
# Now we know it will work.
cmd = "initctl list | sed 's/[(].*[)] //g' | tr ', ' ' ' | awk '{print $1,$2}'"
code, txt = RunGetOutputNoStderr(cmd, False, False)
services = txt.splitlines()
cmd = initd_service + " --status-all &> /tmp/tmpfile ; cat /tmp/tmpfile ; rm /tmp/tmpfile"
code, txt = RunGetOutputNoStderr(cmd, False, False)
txt = txt.replace('[','')
txt = txt.replace(']','')
services.extend(txt.splitlines())
for srv in services:
if len(srv) == 0:
continue
s=srv.split()
if len(s[0]) == 1: #swap them.
s.reverse()
d['Name'] = s[0]
if len(sc.Name) and not fnmatch.fnmatch(d['Name'],sc.Name):
continue
if d['Name'] in names.keys():
continue
names[d['Name']] = None
d['Controller'] = sc.Controller
d['Description'] = ''
d['State'] = 'stopped'
if 'running' in s[1] or '+' in s[1]:
d['State'] = 'running'
if len(sc.State) and sc.State != d['State'].lower():
continue
d['Path'] = ''
if os.path.exists('/etc/init.d/' + s[0]):
d['Path'] = '/etc/init.d/' + s[0]
elif os.path.exists('/etc/init/' + s[0] + '.conf'):
d['Path'] = '/etc/init/' + s[0] + '.conf'
# 'initctl list' won't show disabled services
d['Enabled'] = True
if sc.FilterEnabled and sc.Enabled != d['Enabled']:
continue
if len(s[1]) > 1:
cmd = 'initctl show-config ' + d['Name'] + ' | grep -E "start |stop " | tr "\n" " " | tr -s " " '
code, out = RunGetOutputNoStderr(cmd, False, False)
d['Runlevels'] = out[1:]
else:
rld=GetRunlevels(sc,d['Name'])
if rld != None and 'Runlevels' in rld.keys():
d['Runlevels'] = rld['Runlevels']
sc.services_list.append(copy.deepcopy(d))
return True
def InitdGetAll(sc):
d={}
if helperlib.CONFIG_SYSCONFDIR_DSC == "omsconfig":
initd_service_status = 'sudo /opt/microsoft/omsconfig/Scripts/OMSServiceStat.sh'
status_postfix = ''
initd_service_status_all = 'sudo /opt/microsoft/omsconfig/Scripts/OMSServiceStatAll.sh'
else:
initd_service_status = initd_service
status_postfix = ' status'
initd_service_status_all = initd_service + ' --status-all '
if os.path.exists(initd_chkconfig):
# SLES 11-SP4 chkconfig can return error code on success,
# so don't check chkconfig error code if this is the case.
if os.path.exists('/etc/SuSE-release'):
txt = open('/etc/SuSE-release','r').read()
s=r'.*?VERSION.*?=(.*?)\n.*?PATCHLEVEL.*?=(.*?)\n'
m = re.search(s, txt, re.M)
if m != None:
if not (int(m.group(1)) == 11 and int(m.group(2)) == 4 ) :
# Does the command work?
# There may be no error detected in our multi-pipe command below.
# To keep from returning garbage, we must test the command.
# RunGetOutput(chk_err = True) will log the error message here if it occurs.
cmd = initd_chkconfig + ' --list '
code, txt = RunGetOutputNoStderr(cmd, False, True)
if code != 0:
return False
# Now we know it will work.
cmd = initd_chkconfig + ' --list | grep on | grep -v based'
code, txt = RunGetOutputNoStderr(cmd, False, False)
services=txt.splitlines()
for srv in services:
if len(srv) == 0:
continue
s=srv.split()
d['Name'] = s[0]
if len(sc.Name) and not fnmatch.fnmatch(d['Name'],sc.Name):
continue
d['Controller'] = sc.Controller
d['Description'] = ''
d['State'] = 'stopped'
cmd = initd_service_status + ' ' + s[0] + status_postfix
code, txt = RunGetOutputNoStderr(cmd, False, False)
if 'running' in txt:
d['State'] = 'running'
if len(sc.State) and sc.State != d['State'].lower():
continue
d['Path'] = ''
if os.path.exists('/etc/init.d/' + s[0]):
d['Path'] = '/etc/init.d/' + s[0]
d['Enabled'] = ':on' in srv
if sc.FilterEnabled and sc.Enabled != d['Enabled']:
continue
d['Runlevels'] = reduce(lambda x, y: x + ' ' + y, s[1:])
sc.services_list.append(copy.deepcopy(d))
else:
# Does the command work?
# There may be no error detected in our multi-statement command below.
# To keep from returning garbage, we must test the command.
# RunGetOutput(chk_err = True) will log the error message here if it occurs.
cmd = initd_service_status_all
code, txt = RunGetOutputNoStderr(cmd, False, True)
if code != 0:
return False
# Now we know it will work.
cmd = initd_service_status_all + ' &> /tmp/tmpfile ; cat /tmp/tmpfile ; rm /tmp/tmpfile'
code, txt = RunGetOutputNoStderr(cmd, False, False)
txt = txt.replace('[','')
txt = txt.replace(']','')
services = txt.splitlines()
for srv in services:
if len(srv) == 0:
continue
s=srv.split()
d['Name'] = s[1]
if len(sc.Name) and not fnmatch.fnmatch(d['Name'],sc.Name):
continue
d['Controller'] = sc.Controller
d['Description'] = ''
d['State'] = 'stopped'
if '+' in s[0]:
d['State'] = 'running'
if len(sc.State) and sc.State != d['State'].lower():
continue
d['Path'] = ''
if os.path.exists('/etc/init.d/' + s[1]):
d['Path'] = '/etc/init.d/' + s[1]
elif os.path.exists('/etc/init/' + s[1] + '.conf'):
d['Path'] = '/etc/init/' + s[1] + '.conf'
rld=GetRunlevels(sc,d['Name'])
if rld != None and 'Runlevels' in rld.keys():
d['Runlevels'] = rld['Runlevels']
d['Enabled'] = 'on' in d['Runlevels']
if sc.FilterEnabled and sc.Enabled != d['Enabled']:
continue
sc.services_list.append(copy.deepcopy(d))
return True
def GetController():
if UpstartExists():
return 'upstart'
if SystemdExists():
return 'systemd'
if InitExists():
return 'init'
Print('ERROR: Unable to determine Controller.')
LG().Log('ERROR', 'Unable to determine Controller.')
return ''
class ServiceContext:
def __init__(self, Name, Controller, Enabled, State):
self.services_list=[]
self.runlevels_d=None
self.Name = Name
self.Controller = Controller
self.Enabled = Enabled
self.State = State
self.Path = ''
self.Description = ''
self.Runlevels = ''
self.FilterEnabled = False
| 38.436139 | 222 | 0.540113 |
ace5e4c71f2a96f241e0ca909ef2857959f837f7 | 573 | py | Python | mysite/ct/admin.py | VladimirFilonov/socraticqs2 | cf33e380d26f307029d44c00e0f7068abb3a31d2 | [
"Apache-2.0"
] | null | null | null | mysite/ct/admin.py | VladimirFilonov/socraticqs2 | cf33e380d26f307029d44c00e0f7068abb3a31d2 | [
"Apache-2.0"
] | null | null | null | mysite/ct/admin.py | VladimirFilonov/socraticqs2 | cf33e380d26f307029d44c00e0f7068abb3a31d2 | [
"Apache-2.0"
] | 1 | 2019-06-10T12:00:36.000Z | 2019-06-10T12:00:36.000Z | from django.contrib import admin
import ct.models
@admin.register(ct.models.Role)
class AdminRole(admin.ModelAdmin):
list_display = ('role', 'course', 'user')
admin.site.register(ct.models.Concept)
admin.site.register(ct.models.ConceptGraph)
admin.site.register(ct.models.Lesson)
admin.site.register(ct.models.ConceptLink)
admin.site.register(ct.models.UnitLesson)
admin.site.register(ct.models.Unit)
admin.site.register(ct.models.Response)
admin.site.register(ct.models.StudentError)
admin.site.register(ct.models.Course)
admin.site.register(ct.models.CourseUnit)
| 28.65 | 45 | 0.799302 |
ace5e50bc8df6064d888a5c5b054513eba57e04f | 9,896 | py | Python | src/exabgp/reactor/network/tcp.py | pierky/exabgp | 34be537ae5906c0830b31da1152ae63108ccf911 | [
"BSD-3-Clause"
] | 1,560 | 2015-01-01T08:53:05.000Z | 2022-03-29T20:22:43.000Z | src/exabgp/reactor/network/tcp.py | pierky/exabgp | 34be537ae5906c0830b31da1152ae63108ccf911 | [
"BSD-3-Clause"
] | 818 | 2015-01-01T17:38:40.000Z | 2022-03-30T07:29:24.000Z | src/exabgp/reactor/network/tcp.py | pierky/exabgp | 34be537ae5906c0830b31da1152ae63108ccf911 | [
"BSD-3-Clause"
] | 439 | 2015-01-06T21:20:41.000Z | 2022-03-19T23:24:25.000Z | # encoding: utf-8
"""
tcp.py
Created by Thomas Mangin on 2013-07-13.
Copyright (c) 2013-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
import re
import base64
import socket
import select
import platform
from struct import pack, calcsize
from exabgp.util.errstr import errstr
from exabgp.protocol.family import AFI
from exabgp.protocol.ip import IP
from exabgp.reactor.network.error import errno
from exabgp.reactor.network.error import error
from exabgp.reactor.network.error import NotConnected
from exabgp.reactor.network.error import BindingError
from exabgp.reactor.network.error import MD5Error
from exabgp.reactor.network.error import NagleError
from exabgp.reactor.network.error import TTLError
from exabgp.reactor.network.error import AsyncError
from exabgp.logger import log
def create(afi):
try:
if afi == AFI.ipv4:
io = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
if afi == AFI.ipv6:
io = socket.socket(socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_TCP)
try:
io.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except (socket.error, AttributeError):
pass
try:
io.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) # pylint: disable=E1101
except (socket.error, AttributeError):
pass
except socket.error:
raise NotConnected('Could not create socket')
return io
def bind(io, ip, afi):
try:
if afi == AFI.ipv4:
io.bind((ip, 0))
if afi == AFI.ipv6:
io.bind((ip, 0, 0, 0))
except socket.error as exc:
raise BindingError('Could not bind to local ip %s - %s' % (ip, str(exc)))
def connect(io, ip, port, afi, md5):
try:
if afi == AFI.ipv4:
io.connect((ip, port))
if afi == AFI.ipv6:
io.connect((ip, port, 0, 0))
except socket.error as exc:
if exc.errno == errno.EINPROGRESS:
return
if md5:
raise NotConnected(
'Could not connect to peer %s:%d, check your MD5 password (%s)' % (ip, port, errstr(exc))
)
raise NotConnected('Could not connect to peer %s:%d (%s)' % (ip, port, errstr(exc)))
# http://lxr.free-electrons.com/source/include/uapi/linux/tcp.h#L197
#
# #define TCP_MD5SIG_MAXKEYLEN 80
#
# struct tcp_md5sig {
# struct __kernel_sockaddr_storage tcpm_addr; /* address associated */ 128
# __u16 __tcpm_pad1; /* zero */ 2
# __u16 tcpm_keylen; /* key length */ 2
# __u32 __tcpm_pad2; /* zero */ 4
# __u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN]; /* key (binary) */ 80
# }
#
# #define _K_SS_MAXSIZE 128
#
# #define _K_SS_ALIGNSIZE (__alignof__ (struct sockaddr *))
# /* Implementation specific desired alignment */
#
# typedef unsigned short __kernel_sa_family_t;
#
# struct __kernel_sockaddr_storage {
# __kernel_sa_family_t ss_family; /* address family */
# /* Following field(s) are implementation specific */
# char __data[_K_SS_MAXSIZE - sizeof(unsigned short)];
# /* space to achieve desired size, */
# /* _SS_MAXSIZE value minus size of ss_family */
# } __attribute__ ((aligned(_K_SS_ALIGNSIZE))); /* force desired alignment */
def MD5(io, ip, port, md5, md5_base64):
platform_os = platform.system()
if platform_os == 'FreeBSD':
if md5:
if md5 != 'kernel':
raise MD5Error(
'FreeBSD requires that you set your MD5 key via ipsec.conf.\n'
'Something like:\n'
'flush;\n'
'add <local ip> <peer ip> tcp 0x1000 -A tcp-md5 "password";'
)
try:
TCP_MD5SIG = 0x10
io.setsockopt(socket.IPPROTO_TCP, TCP_MD5SIG, 1)
except socket.error:
raise MD5Error(
'FreeBSD requires that you rebuild your kernel to enable TCP MD5 Signatures:\n'
'options IPSEC\n'
'options TCP_SIGNATURE\n'
'device crypto\n'
)
elif platform_os == 'Linux':
try:
md5_bytes = None
if md5:
if md5_base64 is True:
try:
md5_bytes = base64.b64decode(md5)
except TypeError:
raise MD5Error("Failed to decode base 64 encoded PSK")
elif md5_base64 is None and not re.match('.*[^a-f0-9].*', md5): # auto
options = [md5 + '==', md5 + '=', md5]
for md5 in options:
try:
md5_bytes = base64.b64decode(md5)
break
except TypeError:
pass
# __kernel_sockaddr_storage
n_af = IP.toaf(ip)
n_addr = IP.pton(ip)
n_port = socket.htons(port)
# pack 'x' is padding, so we want the struct
# Do not use '!' for the pack, the network (big) endian switch in
# struct.pack is fighting against inet_pton and htons (note the n)
if IP.toafi(ip) == AFI.ipv4:
# SS_MAXSIZE is 128 but addr_family, port and ipaddr (8 bytes total) are written independently of the padding
SS_MAXSIZE_PADDING = 128 - calcsize('HH4s') # 8
sockaddr = pack('HH4s%dx' % SS_MAXSIZE_PADDING, socket.AF_INET, n_port, n_addr)
else:
SS_MAXSIZE_PADDING = 128 - calcsize('HI16sI') # 28
SIN6_FLOWINFO = 0
SIN6_SCOPE_ID = 0
sockaddr = pack('HHI16sI%dx' % SS_MAXSIZE_PADDING, n_af, n_port, SIN6_FLOWINFO, n_addr, SIN6_SCOPE_ID)
TCP_MD5SIG_MAXKEYLEN = 80
TCP_MD5SIG = 14
if md5_bytes:
key = pack('2xH4x%ds' % TCP_MD5SIG_MAXKEYLEN, len(md5_bytes), md5_bytes)
io.setsockopt(socket.IPPROTO_TCP, TCP_MD5SIG, sockaddr + key)
elif md5:
md5_bytes = bytes(md5, 'ascii')
key = pack('2xH4x%ds' % TCP_MD5SIG_MAXKEYLEN, len(md5_bytes), md5_bytes)
io.setsockopt(socket.IPPROTO_TCP, TCP_MD5SIG, sockaddr + key)
# else:
# key = pack('2xH4x%ds' % TCP_MD5SIG_MAXKEYLEN, 0, b'')
# io.setsockopt(socket.IPPROTO_TCP, TCP_MD5SIG, sockaddr + key)
except socket.error as exc:
if exc.errno != errno.ENOENT:
raise MD5Error('This linux machine does not support TCP_MD5SIG, you can not use MD5 (%s)' % errstr(exc))
elif md5:
raise MD5Error('ExaBGP has no MD5 support for %s' % platform_os)
def nagle(io, ip):
try:
# diable Nagle's algorithm (no grouping of packets)
io.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except (socket.error, AttributeError):
raise NagleError("Could not disable nagle's algorithm for %s" % ip)
def TTL(io, ip, ttl):
# None (ttl-security unset) or zero (maximum TTL) is the same thing
if ttl:
try:
io.setsockopt(socket.IPPROTO_IP, socket.IP_TTL, ttl)
except socket.error as exc:
raise TTLError('This OS does not support IP_TTL (ttl-security) for %s (%s)' % (ip, errstr(exc)))
def TTLv6(io, ip, ttl):
if ttl:
try:
io.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_UNICAST_HOPS, ttl)
except socket.error as exc:
raise TTLError('This OS does not support unicast_hops (ttl-security) for %s (%s)' % (ip, errstr(exc)))
def MIN_TTL(io, ip, ttl):
# None (ttl-security unset) or zero (maximum TTL) is the same thing
if ttl:
try:
io.setsockopt(socket.IPPROTO_IP, socket.IP_MINTTL, ttl)
except socket.error as exc:
raise TTLError('This OS does not support IP_MINTTL (ttl-security) for %s (%s)' % (ip, errstr(exc)))
except AttributeError:
pass
try:
io.setsockopt(socket.IPPROTO_IP, socket.IP_TTL, ttl)
except socket.error as exc:
raise TTLError(
'This OS does not support IP_MINTTL or IP_TTL (ttl-security) for %s (%s)' % (ip, errstr(exc))
)
def asynchronous(io, ip):
try:
io.setblocking(0)
except socket.error as exc:
raise AsyncError('could not set socket non-blocking for %s (%s)' % (ip, errstr(exc)))
def ready(io):
poller = select.poll()
poller.register(io, select.POLLOUT | select.POLLNVAL | select.POLLERR)
found = False
while True:
try:
for _, event in poller.poll(0):
if event & select.POLLOUT or event & select.POLLIN:
found = True
elif event & select.POLLHUP:
yield False, 'could not connect, retrying'
return
elif event & select.POLLERR or event & select.POLLNVAL:
yield False, 'connect attempt failed, issue with reading on the network, retrying'
return
if found:
err = io.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if not err:
yield True, 'connection established'
return
elif err in error.block:
yield False, 'connect attempt failed, retrying, reason %s' % errno.errorcode[err]
return
yield False, 'waiting for socket to become ready'
except select.error as err:
yield False, 'error, retrying %s' % str(err)
return
| 37.06367 | 125 | 0.571847 |
ace5e535bc689f4f7297e0f3de398111451dfeae | 4,326 | py | Python | deeplytough/engine/predictor.py | truatpasteurdotfr/DeeplyTough | fd4737b464c5724312a97654548bcf9cb3b2e258 | [
"FTL",
"Xnet",
"Net-SNMP"
] | 105 | 2019-04-03T20:39:32.000Z | 2022-03-25T01:24:46.000Z | deeplytough/engine/predictor.py | truatpasteurdotfr/DeeplyTough | fd4737b464c5724312a97654548bcf9cb3b2e258 | [
"FTL",
"Xnet",
"Net-SNMP"
] | 11 | 2020-01-10T17:16:57.000Z | 2022-02-21T12:55:39.000Z | deeplytough/engine/predictor.py | truatpasteurdotfr/DeeplyTough | fd4737b464c5724312a97654548bcf9cb3b2e258 | [
"FTL",
"Xnet",
"Net-SNMP"
] | 32 | 2019-04-07T12:18:58.000Z | 2022-02-06T21:51:18.000Z | import logging
import os
import numpy as np
import torch
import torch.nn.functional as nnf
from tqdm.autonotebook import tqdm
from engine.datasets import PointOfInterestVoxelizedDataset
from engine.models import create_model
logger = logging.getLogger(__name__)
def load_model(model_dir, device):
"""
Loads the model from file
"""
if isinstance(device, str):
device = torch.device(device)
fname = os.path.join(model_dir, 'model.pth.tar') if 'pth.tar' not in model_dir else model_dir
checkpoint = torch.load(fname, map_location=str(device))
model = create_model(checkpoint['args'], PointOfInterestVoxelizedDataset, device)
model.load_state_dict(checkpoint['state_dict'])
return model, checkpoint['args']
def load_and_precompute_point_feats(model, args, pdb_list, point_list, device, nworkers, batch_size):
"""
Compute descriptors for every (pdb, point) pair given
"""
model.eval()
if isinstance(device, str):
device = torch.device(device)
dataset = PointOfInterestVoxelizedDataset(pdb_list, point_list, box_size=args.patch_size)
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, num_workers=nworkers)
with torch.no_grad():
feats = [None] * len(point_list)
for batch in tqdm(loader):
inputs = batch['inputs'].squeeze(1).to(device)
outputs = model(inputs)
if args.l2_normed_descriptors:
outputs = nnf.normalize(outputs)
descriptors = outputs.cpu().float()
for b in range(descriptors.shape[0]):
feats[batch['pdb_idx'][b]] = descriptors[b].view(-1, descriptors[b].shape[0])
return feats
def match_precomputed_point_pairs(descriptors_A, descriptors_B):
"""
Match pairs of descriptors. Some may be None, then their distance is NaN
"""
with torch.no_grad():
distances = []
for feats_A, feats_B in tqdm(zip(descriptors_A, descriptors_B)):
if feats_A is None or feats_B is None:
distances.append(np.nan)
else:
distances.append(nnf.pairwise_distance(feats_A, feats_B).numpy())
return np.squeeze(np.array(distances))
def match_precomputed_points_bipartite(descriptors_A, descriptors_B):
"""
Matches the Cartesian product of descriptors (bipartite or complete matching, if B is None)
Some may be None, then their distance is NaN
"""
with torch.no_grad():
def assemble(descriptors):
try:
nfeat = next(filter(lambda x: x is not None, descriptors)).shape[1]
except StopIteration:
return None
feats = torch.full((len(descriptors), nfeat), np.nan, dtype=torch.float64)
for i, f in enumerate(descriptors):
if f is not None:
feats[i, :] = f
return feats
feats_A = assemble(descriptors_A)
if descriptors_B is not None:
feats_B = assemble(descriptors_B)
else:
feats_B = feats_A
descriptors_B = descriptors_A
if feats_A is None or feats_B is None:
return np.full((len(descriptors_A), len(descriptors_B)), np.nan)
else:
return bag_distances(feats_A, feats_B).numpy()
def bag_euclidean_distances2(x, y=None):
"""
Input: x is a Nxd matrix
y is an optional Mxd matirx
Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]
if y is not given then use 'y=x'.
i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2
(https://discuss.pytorch.org/t/efficient-distance-matrix-computation/9065/2)
"""
x_norm2 = (x**2).sum(1).view(-1, 1)
if y is not None:
y_t = torch.transpose(y, 0, 1)
y_norm2 = (y**2).sum(1).view(1, -1)
else:
y_t = torch.transpose(x, 0, 1)
y_norm2 = x_norm2.view(1, -1)
dist = x_norm2 + y_norm2 - 2.0 * torch.mm(x, y_t)
return torch.clamp(dist, min=0)
def bag_distances(x, y):
if x.shape[0] == 1:
return nnf.pairwise_distance(x, y)
else:
# eps because derivative of sqrt at 0 is nan .. but no gradient if vectors identical due to clamping
return torch.sqrt(bag_euclidean_distances2(x, y) + 1e-8)
| 33.276923 | 108 | 0.634304 |
ace5e573ae0d1cf4db17accbe63de404418fbe88 | 4,302 | py | Python | courses/api/views.py | sakukode/minicourse_with_django | b153a64fcd530061d669f46891e479fb151e40bd | [
"MIT"
] | null | null | null | courses/api/views.py | sakukode/minicourse_with_django | b153a64fcd530061d669f46891e479fb151e40bd | [
"MIT"
] | 6 | 2020-06-05T23:08:31.000Z | 2022-02-10T09:47:51.000Z | courses/api/views.py | sakukode/minicourse_with_django | b153a64fcd530061d669f46891e479fb151e40bd | [
"MIT"
] | 1 | 2019-10-23T05:56:51.000Z | 2019-10-23T05:56:51.000Z | from rest_framework import viewsets, filters, status
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from ..models import Course
from ..serializers import CourseSerializer
from courses.api.permissions import CoursePermission
from courses.api.filters import CourseFilter
class CourseViewSet(viewsets.ModelViewSet):
permission_classes = [CoursePermission]
queryset = Course.objects.all()
serializer_class = CourseSerializer
filter_backends = [DjangoFilterBackend, filters.OrderingFilter]
filterset_class = CourseFilter
ordering_fields = ['id', 'title', 'subtitle', 'price']
ordering = ['id']
def get_displayed_fields(self, pk=None):
fields_string = self.request.query_params.get('fields')
if fields_string is None:
if pk is None:
fields = self.ordering_fields
else:
fields = None
else:
fields_string = fields_string[1:-1]
fields_list = fields_string.split(',')
fields = tuple(fields_list)
return fields
def get_field_order(self):
order_field = self.request.query_params.get('ordering')
if order_field:
field = order_field.replace("-", "")
order_field = order_field if (field in self.ordering_fields) else self.ordering[0]
else:
order_field = self.ordering[0]
return order_field
def list(self, request, **kwargs):
fields = self.get_displayed_fields()
queryset = super().get_queryset()
order_field = self.get_field_order()
# queryset = queryset.order_by(order_field)
queryset = self.filter_queryset(queryset)
page = self.paginate_queryset(queryset)
serializer = self.serializer_class(page, many=True, fields=fields)
return self.get_paginated_response(serializer.data)
def retrieve(self, request, pk=None):
fields = self.get_displayed_fields(pk=pk)
data = self.get_object()
serializer = self.serializer_class(data, fields=fields)
return Response(serializer.data)
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save(author=request.user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
@action(detail=True, methods=['post'], permission_classes=[IsAuthenticated])
def join(self, request, pk=None, *args, **kwargs):
""" action to join class """
course = self.get_object()
user = request.user
serializer = self.serializer_class(course)
res = Course.objects.join(course.id, user)
if res:
return Response({'status': True,
'message': 'Success Join Course',
'data': serializer.data}, status=status.HTTP_200_OK)
else:
return Response({'status': False,
'message': 'You have joined Course, Please check your dashboard',
'data': serializer.data},
status=status.HTTP_200_OK)
@action(detail=False)
def me(self, request, **kwargs):
fields = self.get_displayed_fields()
queryset = super().get_queryset().filter(author=request.user)
order_field = self.get_field_order()
queryset = queryset.order_by(order_field)
page = self.paginate_queryset(queryset)
serializer = self.serializer_class(page, many=True, fields=fields)
return self.get_paginated_response(serializer.data)
@action(detail=False)
def my_class(self, request, **kwargs):
fields = self.get_displayed_fields()
user = request.user
queryset = super().get_queryset().filter(members__id=user.id)
order_field = self.get_field_order()
queryset = queryset.order_by(order_field)
page = self.paginate_queryset(queryset)
serializer = self.serializer_class(page, many=True, fields=fields)
return self.get_paginated_response(serializer.data)
| 37.736842 | 94 | 0.658298 |
ace5e5de73d508d1c43248c7682155d643d07e1e | 170 | py | Python | isomorphic_strings.py | spencercjh/sync-leetcode-today-problem-python3-example | 4957e5eadb697334741df0fc297bec2edaa9e2ab | [
"Apache-2.0"
] | null | null | null | isomorphic_strings.py | spencercjh/sync-leetcode-today-problem-python3-example | 4957e5eadb697334741df0fc297bec2edaa9e2ab | [
"Apache-2.0"
] | null | null | null | isomorphic_strings.py | spencercjh/sync-leetcode-today-problem-python3-example | 4957e5eadb697334741df0fc297bec2edaa9e2ab | [
"Apache-2.0"
] | null | null | null |
class IsomorphicStrings:
"""
https://leetcode-cn.com/problems/isomorphic-strings/
"""
def isIsomorphic(self, s: str, t: str) -> bool:
| 17 | 56 | 0.564706 |
ace5e61caf032e0811914681470e0776547ac22b | 354 | py | Python | src/grokcore/view/tests/base/view/missingcontext.py | zopefoundation/grokcore.view | c574c0d041130ac607c95feb610a2b75bfc30abf | [
"ZPL-2.1"
] | null | null | null | src/grokcore/view/tests/base/view/missingcontext.py | zopefoundation/grokcore.view | c574c0d041130ac607c95feb610a2b75bfc30abf | [
"ZPL-2.1"
] | 8 | 2016-02-02T13:42:20.000Z | 2022-02-16T07:06:52.000Z | src/grokcore/view/tests/base/view/missingcontext.py | zopefoundation/grokcore.view | c574c0d041130ac607c95feb610a2b75bfc30abf | [
"ZPL-2.1"
] | 5 | 2015-04-03T05:01:45.000Z | 2018-06-13T08:41:30.000Z | """
Views without a context cannot be grokked:
>>> grok.testing.grok(__name__)
Traceback (most recent call last):
...
martian.error.GrokError: No module-level context for\
<class 'grokcore.view.tests.base.view.missingcontext.Club'>, please use the\
'context' directive.
"""
import grokcore.view as grok
class Club(grok.View):
pass
| 19.666667 | 78 | 0.706215 |
ace5e69d349e1223d3a39d596303cb1acc68084f | 578 | py | Python | reactive/plugins.py | hpcc-charms/layer-hpccsystems-plugins | b2d5ee1010d51de4c908e420a86336e163f1b0c1 | [
"Apache-2.0"
] | null | null | null | reactive/plugins.py | hpcc-charms/layer-hpccsystems-plugins | b2d5ee1010d51de4c908e420a86336e163f1b0c1 | [
"Apache-2.0"
] | null | null | null | reactive/plugins.py | hpcc-charms/layer-hpccsystems-plugins | b2d5ee1010d51de4c908e420a86336e163f1b0c1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import os
import platform
import yaml
import re
#import Configparser
#from subprocess import check_call,check_output,CalledProcessError
from charmhelpers.core import hookenv
from charmhelpers.core.hookenv import (
log,
CRITICAL,
ERROR,
WARNING,
INFO,
DEBUG
)
from charms.reactive.helpers import is_state
from charms.reactive.bus import (
set_state,
get_state,
remove_state
)
from charms.reactive import when
from charms.reactive import when_not
from charms.layer.hpccsystems_plugin import HPCCSystemsPluginConfig
| 17 | 67 | 0.778547 |
ace5e75484141c7cc43c0ce21748484d63289558 | 249 | py | Python | Gathered CTF writeups/2018-04-30-rhme3/CPA/new_the_imposters/show_prep.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:00:41.000Z | 2022-03-27T06:00:41.000Z | Gathered CTF writeups/2018-04-30-rhme3/CPA/new_the_imposters/show_prep.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | null | null | null | Gathered CTF writeups/2018-04-30-rhme3/CPA/new_the_imposters/show_prep.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:01:42.000Z | 2022-03-27T06:01:42.000Z | from library import *
if len(sys.argv) > 2:
n = int(sys.argv[2])
else:
n = None
i, o, t = load_npz(sys.argv[1])
normalize(t)
smooth(t, 25)
align_fft(t, 35000)
print_corr(t)
i, o, t = filter_corr(i, o, t, 0.4)
print len(t)
show_traces(t)
| 13.833333 | 35 | 0.62249 |
ace5e758f353247e8f55e25751249146ea2b34f1 | 31,021 | py | Python | train_cmc_joint.py | Alice1820/CMC | 4f4354b3a33ec9c0784baefd7d1d9798e191ead5 | [
"BSD-2-Clause"
] | null | null | null | train_cmc_joint.py | Alice1820/CMC | 4f4354b3a33ec9c0784baefd7d1d9798e191ead5 | [
"BSD-2-Clause"
] | null | null | null | train_cmc_joint.py | Alice1820/CMC | 4f4354b3a33ec9c0784baefd7d1d9798e191ead5 | [
"BSD-2-Clause"
] | null | null | null | """
Train CMC with AlexNet
"""
from __future__ import print_function
import os
import sys
import time
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import argparse
import socket
import numpy as np
import tensorboard_logger as tb_logger
from torchvision import transforms, datasets
from datasets.dataset import RGB2Lab, RGB2YCbCr
from util import adjust_learning_rate, AverageMeter, accuracy
from models.alexnet import MyAlexNetCMC
from models.resnet import MyResNetsCMC, Normalize
from models.i3d import MyI3DCMC, I3D
from models.tsm import MyTSMCMC, TSN, ConsensusModule
from NCE.NCEAverage import NCEAverage
from NCE.NCECriterion import NCECriterion
from NCE.NCECriterion import NCESoftmaxLoss
from datasets.dataset import ImageFolderInstance
from datasets.ntu import NTU, get_dataloaders
try:
from apex import amp, optimizers
except ImportError:
pass
"""
TODO: python 3.6 ModuleNotFoundError
"""
def parse_option():
parser = argparse.ArgumentParser('argument for training')
parser.add_argument('--print_freq', type=int, default=10, help='print frequency')
parser.add_argument('--tb_freq', type=int, default=500, help='tb frequency')
parser.add_argument('--save_freq', type=int, default=2, help='save frequency')
parser.add_argument('--batch_size', type=int, default=128 , help='batch_size')
parser.add_argument('--batch_size_glb', type=int, default=128, help='batch_size')
parser.add_argument('--num_workers', type=int, default=8, help='num of workers to use')
parser.add_argument('--epochs', type=int, default=240, help='number of training epochs')
# optimization
parser.add_argument('--learning_rate', type=float, default=1e-3, help='learning rate')
parser.add_argument('--lr_decay_epochs', type=str, default='120,160,200', help='where to decay lr, can be a list')
parser.add_argument('--lr_decay_rate', type=float, default=0.2, help='decay rate for learning rate')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam')
parser.add_argument('--beta2', type=float, default=0.999, help='beta2 for Adam')
parser.add_argument('--weight_decay', type=float, default=1e-4, help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
# resume path
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--test', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
# model definition
parser.add_argument('--model', type=str, default='tsm', choices=['alexnet',
'resnet50v1', 'resnet101v1', 'resnet18v1',
'resnet50v2', 'resnet101v2', 'resnet18v2',
'resnet50v3', 'resnet101v3', 'resnet18v3',
'tsm', 'i3d'])
parser.add_argument('--base_model', type=str, default='resnet18')
parser.add_argument('--softmax', action='store_true', help='using softmax contrastive loss rather than NCE')
parser.add_argument('--nce_k', type=int, default=511)
parser.add_argument('--nce_t', type=float, default=0.07)
parser.add_argument('--nce_m', type=float, default=0.5)
parser.add_argument('--feat_dim', type=int, default=128, help='dim of feat for inner product')
parser.add_argument('--lambda_u', type=float, default=1.0, help='coefficient of unsupervised loss')
# video
parser.add_argument('--num_segments', type=int, default=8, help='')
parser.add_argument('--num_class', type=int, default=120, help='')
# dataset
parser.add_argument('--dataset', type=str, default='imagenet', choices=['imagenet100', 'imagenet'])
# specify folder
parser.add_argument('--data_folder', type=str, default='/data0/xifan/NTU_RGBD_60/', help='path to data')
parser.add_argument('--model_path', type=str, default='checkpoints', help='path to save model')
parser.add_argument('--tb_path', type=str, default='logs', help='path to tensorboard')
# add new views
parser.add_argument('--view', type=str, default='RGBD', choices=['Lab', 'YCbCr', 'RGBD'])
# mixed precision setting
parser.add_argument('--amp', action='store_true', help='using mixed precision')
parser.add_argument('--opt_level', type=str, default='O2', choices=['O1', 'O2'])
# data crop threshold
parser.add_argument('--crop_low', type=float, default=0.2, help='low area in crop')
# CMC phase
parser.add_argument('--task', type=str, default=None)
opt = parser.parse_args()
if (opt.data_folder is None) or (opt.model_path is None) or (opt.tb_path is None):
raise ValueError('one or more of the folders is None: data_folder | model_path | tb_path')
if opt.dataset == 'imagenet':
if 'alexnet' not in opt.model:
opt.crop_low = 0.08
iterations = opt.lr_decay_epochs.split(',')
opt.lr_decay_epochs = list([])
for it in iterations:
opt.lr_decay_epochs.append(int(it))
opt.accum = int(opt.batch_size_glb / opt.batch_size)
opt.method = 'softmax' if opt.softmax else 'nce'
if opt.task is None:
raise Exception('Task name is None.')
opt.model_name = '{}_{}_{}_{}_lam_bsz_{}_view_RGBD'.format(opt.task, opt.method, opt.nce_k, opt.model, opt.lambda_u, opt.batch_size_glb)
if opt.amp:
opt.model_name = '{}_amp_{}'.format(opt.model_name, opt.opt_level)
opt.model_folder = os.path.join(opt.model_path, opt.model_name)
if not os.path.isdir(opt.model_folder):
os.makedirs(opt.model_folder)
opt.tb_folder = os.path.join(opt.tb_path, opt.model_name)
if not os.path.isdir(opt.tb_folder):
os.makedirs(opt.tb_folder)
if not os.path.isdir(opt.data_folder):
raise ValueError('data path not exist: {}'.format(opt.data_folder))
return opt
def get_train_loader(split='train', args=None):
"""get the train loader"""
# data_folder = os.path.join(args.data_folder, 'train')
# if args.view == 'Lab':
# mean = [(0 + 100) / 2, (-86.183 + 98.233) / 2, (-107.857 + 94.478) / 2]
# std = [(100 - 0) / 2, (86.183 + 98.233) / 2, (107.857 + 94.478) / 2]
# color_transfer = RGB2Lab()
# elif args.view == 'YCbCr':
# mean = [116.151, 121.080, 132.342]
# std = [109.500, 111.855, 111.964]
# color_transfer = RGB2YCbCr()
# else:
# raise NotImplemented('view not implemented {}'.format(args.view))
# normalize = transforms.Normalize(mean=mean, std=std)
# train_transform = transforms.Compose([
# transforms.RandomResizedCrop(224, scale=(args.crop_low, 1.)),
# transforms.RandomHorizontalFlip(),ssss
# color_transfer,
# transforms.ToTensor(),
# normalize,
# ])
# train_dataset = ImageFolderInstance(data_folder, transform=train_transform)
train_dataset = NTU(root_dir=args.data_folder, stage=split, vid_len=(args.num_segments, args.num_segments))
train_sampler = None
# train loader
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
# train_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True, sampler=train_sampler)
# num of samples
n_data = len(train_dataset)
print('number of samples: {}'.format(n_data))
return train_loader, n_data
def set_model(args, n_data):
# set the model
if args.model == 'tsm':
model_x = TSN(num_class=args.num_class)
model_y = TSN(num_class=args.num_class)
encoder_x = nn.Linear(512, args.feat_dim) # [2048, 128]
encoder_y = nn.Linear(512, args.feat_dim)
# classifier_x = nn.Linear(512, 120)
# classifier_y = nn.Linear(512, 120)
# classifier = nn.Linear(1024, 120)
elif args.model == 'i3d':
model_x = I3D()
model_y = I3D()
encoder_x = nn.Linear(2048, args.feat_dim) # [2048, 128]
encoder_y = nn.Linear(2048, args.feat_dim)
# classifier_x = nn.Linear(2048, 120)
# classifier_y = nn.Linear(2048, 120)
# classifier = nn.Linear(4096, 120)
else:
raise Exception("model not implemented.")
contrast = NCEAverage(args.feat_dim, n_data, args.nce_k, args.nce_t, args.nce_m, args.softmax)
criterion_x = NCESoftmaxLoss() if args.softmax else NCECriterion(n_data)
criterion_y = NCESoftmaxLoss() if args.softmax else NCECriterion(n_data)
criterion = nn.CrossEntropyLoss()
# ===================classifier=====================
# classifier_x = classifier_x.cuda()
# classifier_x = nn.DataParallel(classifier_x)
# classifier_x.train()
# classifier_y = classifier_y.cuda()
# classifier_y = nn.DataParallel(classifier_y)
# classifier_y.train()
# classifier = classifier.cuda()
# classifier = nn.DataParallel(classifier)
# classifier.train()
if torch.cuda.is_available():
model_x = model_x.cuda()
model_y = model_y.cuda()
encoder_x = encoder_x.cuda()
encoder_y = encoder_y.cuda()
contrast = contrast.cuda()
criterion_y = criterion_y.cuda()
criterion_x = criterion_x.cuda()
criterion = criterion.cuda()
cudnn.benchmark = True
model_x = nn.DataParallel(model_x)
model_y = nn.DataParallel(model_y)
encoder_x = nn.DataParallel(encoder_x)
encoder_y = nn.DataParallel(encoder_y)
# contrast = nn.DataParallel(contrast)
# return model_x, model_y, encoder_x, encoder_y, contrast, criterion_y, criterion_x, classifier_x, classifier_y, classifier, criterion
return model_x, model_y, encoder_x, encoder_y, contrast, criterion_y, criterion_x, criterion
def set_optimizer(args, model, encoder):
# return optimizer
optimizer = torch.optim.Adam(list(model.parameters()) + list(encoder.parameters()),
lr=args.learning_rate,
betas=[args.beta1, args.beta2])
return optimizer
def train(epoch, labeled_loader, unlabeled_loader, model_x, model_y, encoder_x, encoder_y, contrast, criterion_x, criterion_y, criterion, optimizer_x, optimizer_y, args):
"""
one epoch training
"""
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
losses_x = AverageMeter()
losses_y = AverageMeter()
l_loss_meter = AverageMeter()
ab_loss_meter = AverageMeter()
l_prob_meter = AverageMeter()
ab_prob_meter = AverageMeter()
cls_l_loss_meter = AverageMeter()
cls_ab_loss_meter = AverageMeter()
acc_l_meter = AverageMeter()
acc_ab_meter = AverageMeter()
top1 = AverageMeter()
top1_x = AverageMeter()
top1_y = AverageMeter()
top5 = AverageMeter()
top5_x = AverageMeter()
top5_y = AverageMeter()
end = time.time()
optimizer_x.zero_grad()
optimizer_y.zero_grad()
labeled_iter = iter(labeled_loader)
unlabeled_iter = iter(unlabeled_loader)
for idx in range(len(labeled_loader)):
data_time.update(time.time() - end)
try:
inputs, index = labeled_iter.next()
except:
labeled_iter = iter(labeled_loader)
inputs, index = labeled_iter.next()
try:
inputs_u, index_u = unlabeled_iter.next()
except:
unlabeled_iter = iter(unlabeled_loader)
inputs_u, index_u = unlabeled_iter.next()
# l, ab = inputs['rgb'], inputs['rgb']
l, ab = inputs['rgb'], inputs['dep']
label = inputs['label']
l_u, ab_u = inputs_u['rgb'], inputs_u['dep']
# print (l.size())
# print (ab.size())
bsz = l.size(0)
l = l.float()
ab = ab.float()
l_u = l.float()
ab_u = ab.float()
# print (torch.max(ab[0]), 'max')
# print (torch.min(ab[0]), 'min')
# print (torch.mean(ab[0]), 'mean')
if torch.cuda.is_available():
index = index.cuda()
l = l.cuda()
ab = ab.cuda()
l_u = l_u.cuda()
ab_u = ab_u.cuda()
label = label.cuda()
# ===================forward feature=====================
model_x.train()
model_y.train()
encoder_x.train()
encoder_y.train()
# model.eval() # verify after loaded model, cmc loss start with 4.xx
contrast.train()
# contrast.eval()
# ===================supervised=====================
_, logit_l = model_x(l)
_, logit_ab = model_y(ab) # [bs, 8, 2048]
# ===================unsupervised=====================
feat_l, _ = model_x(l_u)
feat_ab, _ = model_y(ab_u) # [bs, 8, 2048]
# print (logit_l.size()) # [bs, 120]
if args.model == 'tsm':
# ===================consensus feature=====================
consensus = ConsensusModule('avg')
l2norm = Normalize(2)
# ===================forward encoder=====================
enc_l = l2norm(encoder_x(feat_l))
enc_ab = l2norm(encoder_y(feat_ab))
enc_l = enc_l.view((-1, args.num_segments) + enc_l.size()[1:])
enc_ab = enc_ab.view((-1, args.num_segments) + enc_ab.size()[1:])
# logit_l = logit_l.view((-1, args.num_segments) + logit_l.size()[1:])
# logit_ab = logit_ab.view((-1, args.num_segments) + logit_ab.size()[1:])
# print (enc_l.size())
enc_l = consensus(enc_l).squeeze()
enc_ab = consensus(enc_ab).squeeze()
# logit_l = consensus(logit_l).squeeze()
# logit_ab = consensus(logit_ab).squeeze()
# print (enc_l.size())
elif args.model == 'i3d':
enc_l = encoder_x(feat_l)
enc_ab = encoder_y(feat_ab)
out_l, out_ab = contrast(enc_l, enc_ab, index)
l_loss = criterion_x(out_l)
ab_loss = criterion_y(out_ab)
l_prob = out_l[:, 0].mean()
ab_prob = out_ab[:, 0].mean()
cls_l_loss = criterion(logit_l, label)
cls_ab_loss = criterion(logit_ab, label)
us_loss = l_loss + ab_loss
ss_loss = cls_l_loss + cls_ab_loss
loss = ss_loss + args.lambda_u * us_loss
# check if loss is nan or inf
loss.backward()
if idx % args.accum == 0:
# torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1e2, norm_type=1)
# print (model.encoder.module.l_to_ab.classifier.weight.grad)
# print (model.encoder.module.l_to_ab.classifier.bias.grad)
# print (model.encoder.module.l_to_ab.new_fc.weight.grad)
# print (model.encoder.module.l_to_ab.new_fc.bias.grad)
# print (model.encoder.module.l_to_ab.base_model.layer3[0].conv1.weight.grad) # learning_rate?
optimizer_x.step()
optimizer_y.step()
optimizer_x.zero_grad()
optimizer_y.zero_grad()
# ===================meters=====================
losses.update(loss.item(), bsz)
l_loss_meter.update(l_loss.item(), bsz)
l_prob_meter.update(l_prob.item(), bsz)
ab_loss_meter.update(ab_loss.item(), bsz)
ab_prob_meter.update(ab_prob.item(), bsz)
cls_l_loss_meter.update(cls_l_loss.item(), bsz)
cls_ab_loss_meter.update(cls_ab_loss.item(), bsz)
# acc_l_meter.update(acc_l, bsz)
# acc_ab_meter.update(acc_ab, bsz)
# ===================accuracy=====================
# acc1, acc5 = accuracy(output, target, topk=(1, 5))
acc1_x, acc5_x = accuracy(logit_l, label, topk=(1, 5))
acc1_y, acc5_y = accuracy(logit_ab, label, topk=(1, 5))
top1_x.update(acc1_x[0], bsz)
top1_y.update(acc1_y[0], bsz)
top5_x.update(acc5_x[0], bsz)
top5_y.update(acc5_y[0], bsz)
# losses.update(loss.item(), input_x.size(0))
losses_x.update(cls_l_loss.item(), bsz)
losses_y.update(cls_ab_loss.item(), bsz)
torch.cuda.synchronize()
batch_time.update(time.time() - end)
end = time.time()
# print info
if idx % (args.print_freq * args.accum) == 0:
print('Train: [{0}][{1}/{2}]\t'
'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
'loss {loss.val:.3f} ({loss.avg:.3f})\t'
'l_p {lprobs.val:.3f} ({lprobs.avg:.3f})\t'
'ab_p {abprobs.val:.3f} ({abprobs.avg:.3f})'.format(
epoch, idx + 1, len(unlabeled_loader), batch_time=batch_time,
data_time=data_time, loss=losses, lprobs=l_prob_meter,
abprobs=ab_prob_meter))
# print(out_l.shape)
sys.stdout.flush()
print('ViewX: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, idx, len(unlabeled_loader), batch_time=batch_time,
data_time=data_time, loss=losses_x, top1=top1_x, top5=top5_x))
sys.stdout.flush()
print('ViewY: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, idx, len(unlabeled_loader), batch_time=batch_time,
data_time=data_time, loss=losses_y, top1=top1_y, top5=top5_y))
sys.stdout.flush()
print ('')
# return l_loss_meter.avg, l_prob_meter.avg, ab_loss_meter.avg, ab_prob_meter.avg, cls_l_loss_meter.avg, cls_ab_loss_meter.avg
return losses.avg, l_loss_meter.avg, ab_loss_meter.avg, top1_x.avg, top5_x.avg, losses_x.avg, top1_y.avg, top5_y.avg, losses_y.avg
def validate(val_loader, model_x, model_y, criterion, opt):
"""
evaluation
"""
batch_time = AverageMeter()
losses = AverageMeter()
losses_x = AverageMeter()
losses_y = AverageMeter()
top1 = AverageMeter()
top1_x = AverageMeter()
top1_y = AverageMeter()
top5 = AverageMeter()
top5_x = AverageMeter()
top5_y = AverageMeter()
# switch to evaluate mode
model_x.eval()
model_y.eval()
with torch.no_grad():
end = time.time()
for idx, (inputs, index) in enumerate(val_loader):
input_x = inputs['rgb']
input_y = inputs['dep']
input_x = input_x.float()
input_y = input_y.float()
target = inputs['label']
if torch.cuda.is_available():
input_x = input_x.cuda()
input_y = input_y.cuda()
target = target.cuda()
# ===================forward=====================
_, logit_l = model_x(input_x) # [bs, 8, 512]
_, logit_ab = model_y(input_y) # [bs, 8, 512]
# feat = torch.cat((feat_l.detach(), feat_ab.detach()), dim=1)
# ===================consensus feature=====================
# if opt.model == 'tsm':
# consensus = ConsensusModule('avg')
# logit_l = logit_l.view((-1, args.num_segments) + logit_l.size()[1:])
# logit_ab = logit_ab.view((-1, args.num_segments) + logit_ab.size()[1:])
# logit_l = consensus(logit_l).squeeze()
# logit_ab = consensus(logit_ab).squeeze()
# print (output.size()) # [bs, 120]
loss_x = criterion(logit_l, target)
loss_y = criterion(logit_ab, target)
acc1_x, acc5_x = accuracy(logit_l, target, topk=(1, 5))
acc1_y, acc5_y = accuracy(logit_ab, target, topk=(1, 5))
losses_x.update(loss_x.item(), input_x.size(0))
losses_y.update(loss_y.item(), input_y.size(0))
top1_x.update(acc1_x[0], input_x.size(0))
top1_y.update(acc1_y[0], input_y.size(0))
top5_x.update(acc5_x[0], input_x.size(0))
top5_y.update(acc5_y[0], input_y.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx % opt.print_freq == 0:
print('Test: ViewX: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
idx, len(val_loader), batch_time=batch_time, loss=losses_x,
top1=top1_x, top5=top5_x))
print('Test ViewY: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
idx, len(val_loader), batch_time=batch_time, loss=losses_y,
top1=top1_y, top5=top5_y))
print ('')
print(' *[ViewX] Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1_x, top5=top5_x))
print(' *[ViewY] Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1_y, top5=top5_y))
return top1_x.avg, top5_x.avg, losses_x.avg, top1_y.avg, top5_y.avg, losses_y.avg
def main():
global best_acc1
global best_acc1_x
global best_acc1_y
best_acc1 = 0
best_acc1_x = 0
best_acc1_y = 0
best_loss = 1e3
# parse the args
args = parse_option()
# set the loader
# train_loader, n_data = get_train_loader(split='train', args=args)
# eval_loader, _ = get_train_loader(split='dev', args=args)
# test_loader, _ = get_train_loader(split='test', args=args)
# set the loader
unlabeled_loader, n_data = get_dataloaders(args=args, stage='train')
labeled_loader, _ = get_dataloaders(args=args, stage='train25') # 5% labeled data
eval_loader, _ = get_dataloaders(args=args, stage='dev')
test_loader, _ = get_dataloaders(args=args, stage='test')
# set the model
model_x, model_y, encoder_x, encoder_y, contrast, criterion_y, criterion_x, criterion = set_model(args, n_data)
if args.test:
# load pre-trained model
print('==> loading pre-trained model for testing')
ckpt = torch.load(args.test)
model_x.load_state_dict(ckpt['model_x']) # rgb
model_y.load_state_dict(ckpt['model_y']) # depth
print("==> loaded checkpoint for testing'{}' (epoch {})".format(args.test, ckpt['epoch']))
print('==> done')
top1_x, top5_x, losses_x, top1_y, top5_y, losses_y = validate(test_loader, model_x, model_y, criterion, args)
exit()
# cls_l = nn.Linear(2048, args.num_classes)
# cls_ab = nn.Linear(2048, args.num_classes)
# if torch.cuda.is_available():
# cls_l = cls_l.cuda()
# cls_ab = cls_ab.cuda()
# criterion_cls = criterion_cls.cuda()
# set the optimizer
optimizer_x = set_optimizer(args, model_x, encoder_x)
optimizer_y = set_optimizer(args, model_y, encoder_y)
# optionally resume from a checkpoint
args.start_epoch = 1
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location='cpu')
args.start_epoch = checkpoint['epoch'] + 1
model_x.load_state_dict(checkpoint['model_x'])
encoder_x.load_state_dict(checkpoint['encoder_x'])
model_y.load_state_dict(checkpoint['model_y'])
encoder_y.load_state_dict(checkpoint['encoder_y'])
optimizer_x.load_state_dict(checkpoint['optimizer_x'])
optimizer_y.load_state_dict(checkpoint['optimizer_y'])
contrast.load_state_dict(checkpoint['contrast'])
contrast.K = args.nce_k
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
del checkpoint
torch.cuda.empty_cache()
else:
print("=> no checkpoint found at '{}'".format(args.resume))
# tensorboard
logger = tb_logger.Logger(logdir=args.tb_folder, flush_secs=2)
# routine
for epoch in range(args.start_epoch, args.epochs + 1):
# adjust_learning_rate(epoch - args.start_epoch, args, optimizer_x)
# adjust_learning_rate(epoch - args.start_epoch, args, optimizer_y)
print("==> training...")
time1 = time.time()
loss, l_loss, ab_loss, top1_x, top5_x, losses_x, top1_y, top5_y, losses_y = \
train(epoch, labeled_loader, unlabeled_loader, model_x, model_y, encoder_x, encoder_y, contrast,
criterion_x, criterion_y, criterion, optimizer_x, optimizer_y, args)
# top1_x, top5_x, losses_x, top1_y, top5_y, losses_y = \
# validate(eval_loader, model_x, model_y, criterion, args)
time2 = time.time()
print('epoch {} train, total time {:.2f}'.format(epoch, time2 - time1))
# tensorboard logger
# logger.log_value('l_loss', l_loss, epoch)
# logger.log_value('l_prob', l_prob, epoch)
# logger.log_value('ab_loss', ab_loss, epoch)
# logger.log_value('ab_prob', ab_prob, epoch)
logger.log_value('loss', loss, epoch)
logger.log_value('l_loss', l_loss, epoch)
logger.log_value('ab_loss', ab_loss, epoch)
logger.log_value('x/train_acc', top1_x, epoch)
logger.log_value('x/train_acc5', top5_x, epoch)
logger.log_value('x/train_loss', losses_x, epoch)
logger.log_value('y/train_acc', top1_y, epoch)
logger.log_value('y/train_acc5', top5_y, epoch)
logger.log_value('y/train_loss', losses_y, epoch)
# print("==> evaluating...")
# time1 = time.time()
# cls_l_loss, cls_ab_loss = eval(epoch, eval_loader, model_x, model_y, encoder_x, encoder_y, contrast,
# criterion_x, criterion_y, optimizer_x, optimizer_y, args)
# time2 = time.time()
# print('epoch {} test, total time {:.2f}'.format(epoch, time2 - time1))
# # print("==> testing...")
# # time1 = time.time()
# # cls_l_loss, cls_ab_loss = eval(epoch, test_loader, model, contrast, criterion_x, criterion_y, criterion_cls,
# # optimizer, args)
# # time2 = time.time()
# # print('epoch {} test, total time {:.2f}'.format(epoch, time2 - time1))
# # exit()
# # tensorboard logger
# # logger.log_value('l_loss', l_loss, epoch)
# # logger.log_value('l_prob', l_prob, epoch)
# # logger.log_value('ab_loss', ab_loss, epoch)
# # logger.log_value('ab_prob', ab_prob, epoch)
# logger.log_value('test_cls_l_loss', cls_l_loss, epoch)
# logger.log_value('test_cls_ab_loss', cls_ab_loss, epoch)
print("==> evaluating...")
top1_x, top5_x, losses_x, top1_y, top5_y, losses_y = \
validate(eval_loader, model_x, model_y, criterion, args)
logger.log_value('x/eval_acc', top1_x, epoch)
logger.log_value('x/eval_acc5', top5_x, epoch)
logger.log_value('x/eval_loss', losses_x, epoch)
logger.log_value('y/eval_acc', top1_y, epoch)
logger.log_value('y/eval_acc5', top5_y, epoch)
logger.log_value('y/eval_loss', losses_y, epoch)
# save the best model
if top1_x > best_acc1_x:
best_acc1_x = top1_x
state = {
'opt': args,
'epoch': epoch,
'model_x': model_x.state_dict(),
'model_y': model_y.state_dict(),
'best_acc1': best_acc1_x,
'encoder_x': encoder_x.state_dict(),
'encoder_y': encoder_y.state_dict(),
'contrast': contrast.state_dict(),
'optimizer_x': optimizer_x.state_dict(),
'optimizer_y': optimizer_y.state_dict(),
}
save_name = '{}_best_rgb.pth'.format(args.model)
save_name = os.path.join(args.model_folder, save_name)
print('saving best rgb model!')
torch.save(state, save_name)
# save the best model
if top1_y > best_acc1_y:
best_acc1_y = top1_y
state = {
'opt': args,
'epoch': epoch,
'model_x': model_x.state_dict(),
'model_y': model_y.state_dict(),
'best_acc1': best_acc1_y,
'encoder_x': encoder_x.state_dict(),
'encoder_y': encoder_y.state_dict(),
'contrast': contrast.state_dict(),
'optimizer_x': optimizer_x.state_dict(),
'optimizer_y': optimizer_y.state_dict(),
}
save_name = '{}_best_dep.pth'.format(args.model)
save_name = os.path.join(args.model_folder, save_name)
print('saving best dep model!')
torch.save(state, save_name)
# save model
if epoch % args.save_freq == 0:
print('==> Saving...')
state = {
'opt': args,
'epoch': epoch,
'model_x': model_x.state_dict(),
'model_y': model_y.state_dict(),
'best_acc1': best_acc1,
'encoder_x': encoder_x.state_dict(),
'encoder_y': encoder_y.state_dict(),
'contrast': contrast.state_dict(),
'optimizer_x': optimizer_x.state_dict(),
'optimizer_y': optimizer_y.state_dict(),
}
save_name = 'rgbd_ckpt_epoch_{epoch}.pth'.format(epoch=epoch)
save_name = os.path.join(args.model_folder, save_name)
print('saving regular model!')
torch.save(state, save_name)
torch.cuda.empty_cache()
if __name__ == '__main__':
main()
| 42.787586 | 170 | 0.588343 |
ace5e7acf7df77bbe20715f93f8c5a5f724361af | 6,474 | py | Python | make/photon/prepare/utils/chart.py | ckd/harbor | 1ceb7a2fb9512c77deac97def51d875d60a7bf55 | [
"Apache-2.0"
] | 1 | 2020-09-22T07:40:39.000Z | 2020-09-22T07:40:39.000Z | make/photon/prepare/utils/chart.py | ckd/harbor | 1ceb7a2fb9512c77deac97def51d875d60a7bf55 | [
"Apache-2.0"
] | 2 | 2022-03-02T05:03:32.000Z | 2022-03-17T22:25:26.000Z | make/photon/prepare/utils/chart.py | ckd/harbor | 1ceb7a2fb9512c77deac97def51d875d60a7bf55 | [
"Apache-2.0"
] | 1 | 2019-07-15T11:51:51.000Z | 2019-07-15T11:51:51.000Z | import os, shutil
from g import templates_dir, config_dir
from .jinja import render_jinja
chartm_temp_dir = os.path.join(templates_dir, "chartserver")
chartm_env_temp = os.path.join(chartm_temp_dir, "env.jinja")
chartm_config_dir = os.path.join(config_dir, "chartserver")
chartm_env = os.path.join(config_dir, "chartserver", "env")
def prepare_chartmuseum(config_dict):
core_secret = config_dict['core_secret']
redis_host = config_dict['redis_host']
redis_port = config_dict['redis_port']
redis_password = config_dict['redis_password']
redis_db_index_chart = config_dict['redis_db_index_chart']
storage_provider_name = config_dict['storage_provider_name']
storage_provider_config_map = config_dict['storage_provider_config']
if not os.path.isdir(chartm_config_dir):
print ("Create config folder: %s" % chartm_config_dir)
os.makedirs(chartm_config_dir)
# process redis info
cache_store = "redis"
cache_redis_password = redis_password
cache_redis_addr = "{}:{}".format(redis_host, redis_port)
cache_redis_db_index = redis_db_index_chart
# process storage info
#default using local file system
storage_driver = "local"
# storage provider configurations
# please be aware that, we do not check the validations of the values for the specified keys
# convert the configs to config map
storage_provider_config_options = []
if storage_provider_name == 's3':
# aws s3 storage
storage_driver = "amazon"
storage_provider_config_options.append("STORAGE_AMAZON_BUCKET=%s" % (storage_provider_config_map.get("bucket") or '') )
storage_provider_config_options.append("STORAGE_AMAZON_PREFIX=%s" % (storage_provider_config_map.get("rootdirectory") or '') )
storage_provider_config_options.append("STORAGE_AMAZON_REGION=%s" % (storage_provider_config_map.get("region") or '') )
storage_provider_config_options.append("STORAGE_AMAZON_ENDPOINT=%s" % (storage_provider_config_map.get("regionendpoint") or '') )
storage_provider_config_options.append("AWS_ACCESS_KEY_ID=%s" % (storage_provider_config_map.get("accesskey") or '') )
storage_provider_config_options.append("AWS_SECRET_ACCESS_KEY=%s" % (storage_provider_config_map.get("secretkey") or '') )
elif storage_provider_name == 'gcs':
# google cloud storage
storage_driver = "google"
storage_provider_config_options.append("STORAGE_GOOGLE_BUCKET=%s" % ( storage_provider_config_map.get("bucket") or '') )
storage_provider_config_options.append("STORAGE_GOOGLE_PREFIX=%s" % ( storage_provider_config_map.get("rootdirectory") or '') )
if storage_provider_config_map.get("keyfile"):
storage_provider_config_options.append('GOOGLE_APPLICATION_CREDENTIALS=%s' % '/etc/chartserver/gcs.key')
elif storage_provider_name == 'azure':
# azure storage
storage_driver = "microsoft"
storage_provider_config_options.append("STORAGE_MICROSOFT_CONTAINER=%s" % ( storage_provider_config_map.get("container") or '') )
storage_provider_config_options.append("AZURE_STORAGE_ACCOUNT=%s" % ( storage_provider_config_map.get("accountname") or '') )
storage_provider_config_options.append("AZURE_STORAGE_ACCESS_KEY=%s" % ( storage_provider_config_map.get("accountkey") or '') )
storage_provider_config_options.append("STORAGE_MICROSOFT_PREFIX=/azure/harbor/charts")
elif storage_provider_name == 'swift':
# open stack swift
storage_driver = "openstack"
storage_provider_config_options.append("STORAGE_OPENSTACK_CONTAINER=%s" % ( storage_provider_config_map.get("container") or '') )
storage_provider_config_options.append("STORAGE_OPENSTACK_PREFIX=%s" % ( storage_provider_config_map.get("rootdirectory") or '') )
storage_provider_config_options.append("STORAGE_OPENSTACK_REGION=%s" % ( storage_provider_config_map.get("region") or '') )
storage_provider_config_options.append("OS_AUTH_URL=%s" % ( storage_provider_config_map.get("authurl") or '') )
storage_provider_config_options.append("OS_USERNAME=%s" % ( storage_provider_config_map.get("username") or '') )
storage_provider_config_options.append("OS_PASSWORD=%s" % ( storage_provider_config_map.get("password") or '') )
storage_provider_config_options.append("OS_PROJECT_ID=%s" % ( storage_provider_config_map.get("tenantid") or '') )
storage_provider_config_options.append("OS_PROJECT_NAME=%s" % ( storage_provider_config_map.get("tenant") or '') )
storage_provider_config_options.append("OS_DOMAIN_ID=%s" % ( storage_provider_config_map.get("domainid") or '') )
storage_provider_config_options.append("OS_DOMAIN_NAME=%s" % ( storage_provider_config_map.get("domain") or '') )
elif storage_provider_name == 'oss':
# aliyun OSS
storage_driver = "alibaba"
bucket = storage_provider_config_map.get("bucket") or ''
endpoint = storage_provider_config_map.get("endpoint") or ''
if endpoint.startswith(bucket + "."):
endpoint = endpoint.replace(bucket + ".", "")
storage_provider_config_options.append("STORAGE_ALIBABA_BUCKET=%s" % bucket )
storage_provider_config_options.append("STORAGE_ALIBABA_ENDPOINT=%s" % endpoint )
storage_provider_config_options.append("STORAGE_ALIBABA_PREFIX=%s" % ( storage_provider_config_map.get("rootdirectory") or '') )
storage_provider_config_options.append("ALIBABA_CLOUD_ACCESS_KEY_ID=%s" % ( storage_provider_config_map.get("accesskeyid") or '') )
storage_provider_config_options.append("ALIBABA_CLOUD_ACCESS_KEY_SECRET=%s" % ( storage_provider_config_map.get("accesskeysecret") or '') )
else:
# use local file system
storage_provider_config_options.append("STORAGE_LOCAL_ROOTDIR=/chart_storage")
# generate storage provider configuration
all_storage_provider_configs = ('\n').join(storage_provider_config_options)
render_jinja(
chartm_env_temp,
chartm_env,
cache_store=cache_store,
cache_redis_addr=cache_redis_addr,
cache_redis_password=cache_redis_password,
cache_redis_db_index=cache_redis_db_index,
core_secret=config_dict['core_secret'],
storage_driver=storage_driver,
all_storage_driver_configs=all_storage_provider_configs,
public_url=config_dict['public_url'],
chart_absolute_url=config_dict['chart_absolute_url']) | 60.504673 | 147 | 0.743281 |
ace5e82f661f1f3e747dc21446f65cd99dd20e82 | 29,275 | py | Python | rcsb/utils/config/ConfigUtil.py | rcsb/py-rcsb_utils_config | 5a1bbdfe546220d94520720a7030fed4008091bf | [
"Apache-2.0"
] | null | null | null | rcsb/utils/config/ConfigUtil.py | rcsb/py-rcsb_utils_config | 5a1bbdfe546220d94520720a7030fed4008091bf | [
"Apache-2.0"
] | 1 | 2022-03-04T00:13:53.000Z | 2022-03-04T00:13:53.000Z | rcsb/utils/config/ConfigUtil.py | rcsb/py-rcsb_utils_config | 5a1bbdfe546220d94520720a7030fed4008091bf | [
"Apache-2.0"
] | 3 | 2020-12-03T17:37:14.000Z | 2020-12-03T17:37:34.000Z | ##
# File: ConfigUtil.py
# Author: J. Westbrook
# Date: 14-Mar-2018
# Version: 0.001
#
# Updates:
# 31-Mar-2018 jdw standardize argument names
# 16-Jun-2018. jdw add more convenient support for multiple config sections
# 18-Jun-2018 jdw push the mocking down to a new getPath() method.
# 20-Aug-2018 jdw add getHelper() to return an instance of a module/class
# 13-Sep-2018 jdw add YAML support and read/write methods.
# 16-Sep-2018 jdw add support importing a CommentedMap
# 3-Oct-2018 jdw add support to import environment for ini/configparser format files.
# 10-Oct-2018 jdw added methods getConfigPath() adn getMockTopPath()
# 23-Oct-2018 jdw refine export method to manually extract content from configparser structure
# 24-Oct-2018 jdw if config format is not specified perceive the format from the config filename extension
# change default section name management.
# 4-Jan-2019 jdw add optional arguments to getPath(...,prefixName=None, prefixSectionName=None)
# add methods getDefaultSectionName(), replaceSectionName(), and getSectionNameReplacement()
# 10-Mar-2019 jdw add method getEnvValue() to dereference config option as an environmental variable
# 3-Feb-2020 jdw add __processAppendedSections() to handle nested configuration sections
##
"""
Manage simple configuration options.
"""
__docformat__ = "restructuredtext en"
__author__ = "John Westbrook"
__email__ = "jwest@rcsb.rutgers.edu"
__license__ = "Apache 2.0"
import base64
import copy
import logging
import os
import sys
import tempfile
import ruamel.yaml
from nacl.encoding import HexEncoder
from nacl.secret import SecretBox
from rcsb.utils.io.FileUtil import FileUtil
try:
from configparser import ConfigParser as cp
except ImportError:
from ConfigParser import SafeConfigParser as cp
logger = logging.getLogger(__name__)
class ConfigUtil(object):
def __init__(
self,
configPath=None,
defaultSectionName="DEFAULT",
fallbackEnvPath=None,
mockTopPath=None,
configFormat=None,
cachePath=None,
useCache=False,
appendConfigOption="CONFIG_APPEND_LOCATOR_PATHS",
**kwargs
):
"""Manage simple configuration options stored in INI (Python configparser-style) or YAML configuration files.
Args:
configPath (str, optional): Configuration file path
defaultSectionName (str, optional): Name of configuration section holding default option values (e.g. DEFAULT)
fallbackEnvPath (str, optional): Environmental variable holding configuration file path
mockTopPath (str, optional): Mockpath is prepended to path configuration options if it specified (default=None)
configFormat (str, optional): Configuration file format (e.g. ini or yaml default=ini)
cachePath (str,optional): top cache path for remote configuration assets
useCache (bool, optional): use cached configuration assets
appendConfigOption (str, optional): option name containing a list of appendable configuration files (yaml only)
**kwargs: importEnvironment(bool) imports environment as default values for ini/configparser format files
"""
myFallbackPath = os.getenv(fallbackEnvPath, "setup.cfg") if fallbackEnvPath else None
self.__myConfigPath = configPath if configPath is not None else myFallbackPath
#
self.__defaultSectionName = defaultSectionName
#
# Mockpath is prepended to path configuration options if it specified
self.__mockTopPath = mockTopPath
#
# Top cache path for remote configuration assets - if no cache information is provided then use system temp area.
cachePath = cachePath if cachePath else tempfile.mkdtemp(prefix="config-util", suffix="-cache")
appendConfigOption = appendConfigOption if appendConfigOption else None
#
# This is the internal container for configuration data from all sources
self.__cD = {}
#
#
self.__sectionNameD = {"DEFAULT": defaultSectionName}
#
self.__configFormat = configFormat if configFormat else self.__getConfigFormat(self.__myConfigPath, defaultConfig="ini")
#
logger.debug("Using config path %s format %s", self.__myConfigPath, self.__configFormat)
if self.__myConfigPath:
self.__configFormat, self.__cD = self.__updateConfig(self.__myConfigPath, self.__configFormat, **kwargs)
self.__processAppendedSections(appendConfigOption, cachePath, useCache)
if not self.__cD:
logger.warning("No configuration information imported - configuration path is %s (%s)", self.__myConfigPath, configFormat)
def __processAppendedSections(self, appendConfigOption, cachePath, useCache=True):
"""Fetch and append configuration assets assigned to input configuration option.
Args:
appendConfigOption (str): reserved configuration option to hold a list of configuration asset locators
cachePath (str): path to store cached copies configuration assets
useCache (bool, optional): use existing cached configuration assets. Defaults to True.
Returns:
bool: True for success of False otherwise
"""
try:
ret = True
appendLocL = self.getList(appendConfigOption, sectionName=self.__defaultSectionName)
logger.debug("appendLocL is %r", appendLocL)
if appendLocL:
cP = os.path.join(cachePath, "config")
fU = FileUtil(workPath=cP)
logger.debug("Fetching append sections from %r", appendLocL)
for appendLoc in appendLocL:
fn = fU.getFileName(appendLoc)
fp = os.path.join(cP, fn)
okF = True
if not (useCache and fU.exists(fp)):
# get a fresh copy from source
okF = fU.get(appendLoc, fp)
logger.debug("Fetched %r to %r", appendLoc, fp)
ok = self.appendConfig(fp)
ret = ret and ok and okF
except Exception as e:
logger.exception("Failing for option %r cachePath %r with %s", appendConfigOption, cachePath, str(e))
ret = False
#
if not ret:
logger.error("Fetching appended sections failing %r", appendLocL)
return ret
def getConfigPath(self):
return self.__myConfigPath
def getMockTopPath(self):
return self.__mockTopPath
def getDefaultSectionName(self):
return self.__defaultSectionName
def replaceSectionName(self, orgSectionName, replaceSectionName):
"""Set an replacement section name that will override the section name for input requests."""
try:
self.__sectionNameD[orgSectionName] = replaceSectionName
return True
except Exception:
return False
def getSectionNameReplacement(self, orgSectionName):
try:
return self.__sectionNameD[orgSectionName] if orgSectionName in self.__sectionNameD else orgSectionName
except Exception:
return orgSectionName
def importConfig(self, dObj):
"""Import configuration options from the input dictionary-like object.
Args:
dObj (object): Dictionary-like configuration object
Returns:
bool: True for success or False otherwise
"""
try:
if isinstance(dObj, dict):
self.__cD.update(dObj)
elif isinstance(dObj, ruamel.yaml.comments.CommentedMap):
self.__cD = dObj
elif isinstance(dObj, cp):
self.__cD.update(self.__extractDict(dObj))
else:
logger.error("Cannot import object type %r", type(dObj))
except Exception as e:
logger.exception("Failing with %s", str(e))
return False
def exportConfig(self, sectionName=None):
try:
cD = self.__extractDict(self.__cD) if isinstance(self.__cD, cp) else self.__cD
if sectionName:
return copy.deepcopy(cD[sectionName])
else:
return copy.deepcopy(cD)
except Exception as e:
logger.exception("Failing with %s", str(e))
return None
def appendConfig(self, filePath, configFormat=None, **kwargs):
ok = False
try:
cf = configFormat if configFormat else self.__getConfigFormat(filePath, defaultConfig=self.__configFormat)
tf, cD = self.__updateConfig(filePath, cf, **kwargs)
if tf == cf:
self.__cD.update(cD)
ok = True
else:
logger.error("Configuration format inconstency %r .ne. %r", cf, tf)
except Exception as e:
logger.error("Appending %r (%r) failing with %s", filePath, cf, str(e))
return ok
def __getConfigFormat(self, filePath, defaultConfig="ini"):
configFormat = defaultConfig
try:
# Perceive the format from the file path or set default to 'ini'
if filePath:
_, ext = os.path.splitext(filePath)
if ext[1:].lower() in ["yaml", "yml"]:
configFormat = "yaml"
elif ext[1:].lower() in ["ini"]:
configFormat = "ini"
except Exception as e:
logger.debug("Failing with %s", e)
return configFormat
def __updateConfig(self, filePath, configFormat, **kwargs):
"""Update the current configuration options with data from the input configuration file.
Args:
filePath (str): Configuration file path
configFormat (str): Configuration file format (e.g. ini or yaml)
**kwargs: key value arguments pass to import methods
rountTrip (bool): parse yaml to preserve context for roundtrip processing
importEnvironment (bool): include the environment as defaults values for 'ini'/'configparser' format files
Returns:
tuple(str, object): 'ini' or 'yaml' and configuration object
"""
#
cD = None
try:
cf = configFormat
if cf.lower() in ["ini", "configparser"]:
useEnv = kwargs.get("importEnvironment", False)
cD = self.__readIniFile(filePath, useEnv=useEnv, **kwargs)
configFormat = "ini"
elif cf.lower() in ["yaml"]:
rt = kwargs.get("roundTrip", False)
cD = self.__readYamlFile(filePath, roundTrip=rt)
configFormat = "yaml"
except Exception as e:
logger.exception("Failing with filePath %r format %r with %s", filePath, configFormat, str(e))
#
return configFormat, cD
def writeConfig(self, filePath, configFormat=None, **kwargs):
"""Write the current configuration in the selected format.
Args:
filePath (str): Output configuration file path
configFormat (str, optional): configuration format (e.g. 'ini' or 'yaml')
**kwargs: key value arguments passed to export methods
Returns:
bool: True for success or False otherwise
"""
cf = configFormat if configFormat else self.__configFormat
#
if cf == "ini":
if not isinstance(self.__cD, cp):
cD = self.__createConfigParseObj(self.__cD, delimiter=";")
ok = self.__writeIniFile(filePath, cD, **kwargs)
else:
ok = self.__writeIniFile(filePath, self.__cD, **kwargs)
elif cf == "yaml":
cD = self.__extractDict(self.__cD) if isinstance(self.__cD, cp) else self.__cD
ok = self.__writeYamlFile(filePath, cD, **kwargs)
else:
ok = False
return ok
def get(self, name, default=None, sectionName=None, tokenName="CONFIG_SUPPORT_TOKEN"):
"""Return configuration value of input configuration option. Option names beginning with
leading underscore are treated as encrypted secrets. If an encrypted option is
not found in the section this method will fallback to the value of the unqualified
option.
Args:
name (str): configuration option name
default (str, optional): default value returned if no configuration option is provided
sectionName (str, optional): configuration section name, a simple key (default = defaultSectionName from object)
tokenName (str,optional): configuration option holding name of environmental variable
storing security key.
Returns:
str: configuration option value
"""
logMissing = False
ok = False
mySection = sectionName if sectionName else self.__defaultSectionName
mySection = self.getSectionNameReplacement(mySection)
try:
if "." in name:
ok = self.__getKeyExists(self.__cD[mySection], name)
else:
ok = name in self.__cD[mySection]
except Exception:
ok = False
#
if ok:
return self.__get(name, default=default, sectionName=sectionName, tokenName=tokenName)
elif name.startswith("_"):
return self.__get(name[1:], default=default, sectionName=sectionName, tokenName=tokenName)
else:
if logMissing:
logger.debug("Missing config option %r (%r) assigned default value %r", name, mySection, default)
return default
def __get(self, name, default=None, sectionName=None, tokenName="CONFIG_SUPPORT_TOKEN"):
"""Return configuration value of input configuration option. Option names beginning with
leading underscore are treated as encrypted secrets.
Args:
name (str): configuration option name
default (str, optional): default value returned if no configuration option is provided
sectionName (str, optional): configuration section name, a simple key (default = defaultSectionName from object)
tokenName (str,optional): configuration option holding name of environmental variable
storing security key.
Returns:
str: configuration option value
"""
logMissing = False
val = default
try:
mySection = sectionName if sectionName else self.__defaultSectionName
mySection = self.getSectionNameReplacement(mySection)
if "." in name:
val = self.__getKeyValue(self.__cD[mySection], name)
else:
val = self.__cD[mySection][name]
#
val = str(val) if self.__configFormat == "ini" else val
if name.startswith("_") and isinstance(val, str):
val = self.__getSecretValue(name, val, mySection, tokenName)
except Exception as e:
if logMissing:
logger.debug("Missing config option %r (%r) assigned default value %r (%s)", name, mySection, default, str(e))
#
return copy.deepcopy(val)
def getPath(self, name, default=None, sectionName=None, prefixName=None, prefixSectionName=None):
"""Return path associated with the input configuration option and an option prefix path.
This method supports mocking where the MOCK_TOP_PATH will be prepended to the configuration path.
Args:
name (str): configuration option name
default (str, optional): default value returned if no configuration option is provided
sectionName (str, optional): configuration section name, a simple key
prefixName(str, optional): optional configuration option for a prefix path
prefixSectionName(str, optional): optional configuration section name for a prefix path option (default = defaultSectionName from object)
Returns:
str: configuration path
"""
val = default
try:
val = self.get(name, default=default, sectionName=sectionName)
# don't prefix a fully qualified path or url
for st in ["/", "http://", "https://", "ftp://", "file://"]:
if val.startswith(st):
return val
#
myPrefixSectionName = prefixSectionName if prefixSectionName else self.__defaultSectionName
prefixPath = self.get(prefixName, default=None, sectionName=myPrefixSectionName) if prefixName else None
if prefixPath:
val = os.path.join(self.__mockTopPath, prefixPath, val) if self.__mockTopPath else os.path.join(prefixPath, val)
else:
val = os.path.join(self.__mockTopPath, val) if self.__mockTopPath else val
except Exception as e:
logger.debug("Missing config option %r (%r) assigned default value %r (%s)", name, sectionName, default, str(e))
#
return val
def __getSecretValue(self, name, val, sectionName, tokenName):
try:
hexKey = self.getEnvValue(tokenName, sectionName=sectionName)
if not hexKey:
logger.error("Empty key for token %r processing %r and %r", tokenName, name, val)
elif len(hexKey) < 32:
logger.error("Bad key (%d) for token %r processing %r and %r", len(hexKey), tokenName, name, val)
val = self.__decryptMessage(val, hexKey)
hexKey = None
except Exception as e:
logger.debug("Failing processing %s using %r secret value with %s", name, tokenName, str(e))
return val
def getSecret(self, name, default=None, sectionName=None, tokenName="CONFIG_SUPPORT_TOKEN"):
"""Return a decrypted value associated with the input sensitive configuration option.
Args:
name (str): configuration option name
default (str, optional): default value returned if no configuration option is provided
sectionName (str, optional): configuration section name, a simple key
tokenName (str,optional): configuration option holding name of environmental variable
storing security key.
Returns:
str: option value
"""
val = default
val = self.get(name, default=default, sectionName=sectionName)
if not name.startswith("_") and val and isinstance(val, str):
val = self.__getSecretValue(name, val, sectionName, tokenName)
#
return val
def getEnvValue(self, name, default=None, sectionName=None):
"""Return the value of the environmental variable named as the configuration option value.
Args:
name (str): configuration option name (value is environmental variable name)
default (str, optional): default value returned if no configuration option is provided
sectionName (str, optional): configuration section name, a simple key
Returns:
str: option(environmental variable) value
"""
val = default
try:
varName = self.get(name, default=None, sectionName=sectionName)
val = os.environ.get(varName, default)
except Exception as e:
logger.error("Failed processing environmental variable config option %r (%r) assigned default value %r (%s)", name, sectionName, default, str(e))
#
return val
def getList(self, name, default=None, sectionName=None, delimiter=","):
vL = default if default is not None else []
try:
val = self.get(name, default=default, sectionName=sectionName)
logger.debug("name %r sectionName %r val %r", name, sectionName, val)
if val:
if isinstance(val, (list, set, tuple)):
vL = list(val)
else:
vL = str(val).split(delimiter)
except Exception as e:
logger.debug("Missing config option list %r (%r) assigned default value %r (%s)", name, sectionName, default, str(e))
#
return vL
def getHelper(self, name, default=None, sectionName=None, **kwargs):
"""Return an instance of module/class corresponding to the configuration module path.
Args:
name (str): configuration option name
default (str, optional): default return value
sectionName (str, optional): configuration section name, a simple key
**kwargs: key-value arguments passed to the module/class instance
Returns:
object: instance of module/class
"""
val = default
try:
val = self.get(name, default=default, sectionName=sectionName)
except Exception as e:
logger.error("Missing configuration option %r (%r) assigned default value %r (%s)", name, sectionName, default, str(e))
#
return self.__getHelper(val, **kwargs)
def __getHelper(self, modulePath, **kwargs):
aObj = None
try:
aMod = __import__(modulePath, globals(), locals(), [""])
sys.modules[modulePath] = aMod
#
# Strip off any leading path to the module before we instaniate the object.
mpL = str(modulePath).split(".")
moduleName = mpL[-1]
#
aObj = getattr(aMod, moduleName)(**kwargs)
except Exception as e:
logger.error("Failing to instance helper %r with %s", modulePath, str(e))
return aObj
def __readIniFile(self, configPath, useEnv=False, **kwargs):
"""Internal method to read INI-style configuration file using standard ConfigParser/configparser library.
Args:
configPath (str): Configuration file path
**kwargs: (dict) passed to ConfigParser/configparser
Returns:
object: On success a ConfigParser dictionary-like object
"""
_ = kwargs
if useEnv:
# Note that environmetal variables are still lowercased.
logger.debug("Using enviroment length %d", len(os.environ))
configP = cp(os.environ, default_section=self.__defaultSectionName)
else:
configP = cp(default_section=self.__defaultSectionName)
try:
# This is to avoid case conversion of option names
# configP.optionxform = str
configP.optionxform = lambda option: option
configP.sections()
configP.read(configPath)
return configP
except Exception as e:
logger.error("Failed reading INI configuration file %s with %s", configPath, str(e))
return configP
def __readYamlFile(self, configPath, **kwargs):
"""Internal method to read YAML-style configuration file using ruamel.yaml library.
Args:
configPath (str): Configuration file path
**kwargs: (dict) passed to ConfigParser/configparser
Returns:
object: On success a ConfigParser dictionary-like object or an empty dictionary on Failure
"""
_ = kwargs
yaml = ruamel.yaml.YAML()
yaml.preserve_quotes = True
yaml.indent(mapping=4, sequence=6, offset=4)
yaml.explicit_start = True
rD = {}
try:
with open(configPath, "r", encoding="utf-8") as stream:
rD = yaml.load(stream)
except Exception as e:
logger.error("Failed reading YAML configuration file %s with %s", configPath, str(e))
return rD
def __writeIniFile(self, configPath, configObj, **kwargs):
"""Internal method to write INI-style configuration file using standard ConfigParser/configparser library.
Args:
configPath (str): Output file path
configObj (object): ConfigParser/configparser object
Returns:
bool: True for success or False otherwise
"""
_ = kwargs
try:
with open(configPath, "w", encoding="utf-8") as ofh:
configObj.write(ofh, space_around_delimiters=False)
return True
except Exception as e:
logger.error("Failed writing INI configuration file %s with %s", configPath, str(e))
return False
def __writeYamlFile(self, configPath, mObj, **kwargs):
"""Internal method to write YAML-style configuration file using standard ruamel.yaml library.
Args:
configPath (str): Output file path
dObj (mapping object): Mapping object or dictionary
Returns:
bool: True for success or False otherwise
"""
yaml = ruamel.yaml.YAML()
yaml.preserve_quotes = True
yaml.width = kwargs.get("width", 120)
yaml.indent(mapping=4, sequence=6, offset=4)
yaml.explicit_start = True
try:
#
with open(configPath, "w", encoding="utf-8") as ofh:
yaml.dump(mObj, ofh)
return True
except Exception as e:
logger.error("Failed writing YAML configuration file %s with %s", configPath, str(e))
return False
def __extractDict(self, configObj):
"""Internal method to copy the contents of the input ConfigParser object to a dictionary structure."""
sectDict = {}
#
defaults = configObj.defaults()
tD = {}
for key in defaults.keys():
tD[key] = defaults[key]
sectDict[self.__defaultSectionName] = tD
sections = configObj.sections()
logger.debug("Sections %r", sections)
for section in sections:
options = configObj.options(section)
tD = {}
for option in options:
tD[option] = configObj.get(section, option)
sectDict[section] = tD
logger.debug("Returning dictionary %r", sectDict.items())
return sectDict
def __createConfigParseObj(self, dObj, delimiter=","):
"""Internal method to create a configparser object from a dictionary representation
of configuration sections and objects.
The dictionary object must conform to the simple configparser data model. For instance:
d{'sectionName1': {'option1': value2, 'option2': value2, ... }, ... }
"""
cpObj = cp()
try:
for sK, sV in dObj.items():
if sK != self.__defaultSectionName:
cpObj.add_section(sK)
for oK, oV in sV.items():
if isinstance(oV, (list, tuple, set)):
cpObj.set(sK, oK, delimiter.join(oV))
elif isinstance(oV, (dict)):
continue
else:
cpObj.set(sK, oK, oV)
except Exception as e:
logger.exception("Failing with %s", str(e))
return cpObj
def __getKeyValue(self, dct, keyName):
"""Return the value of the corresponding key expressed in dot notation in the input dictionary object (nested)."""
try:
kys = keyName.split(".")
for key in kys:
try:
dct = dct[key]
except KeyError:
return None
return dct
except Exception as e:
logger.exception("Failing for key %r with %s", keyName, str(e))
return None
def __getKeyExists(self, dct, keyName):
"""Return the key expressed in dot notation is in the input dictionary object (nested)."""
try:
kys = keyName.split(".")
for key in kys:
try:
dct = dct[key]
except KeyError:
return False
return True
except Exception as e:
logger.exception("Failing for key %r with %s", keyName, str(e))
return False
def dump(self):
for section in self.__cD:
logger.info("Configuration section: %s", section)
for opt in self.__cD[section]:
logger.info(" ++++ option %s : %r ", opt, self.__cD[section][opt])
def __decryptMessage(self, msg, hexKey):
"""Decrypt the input message.
Args:
msg (str): input message
hexKey (str): encryption key
Returns:
(str): decrypted message text
"""
txt = None
try:
box = SecretBox(hexKey, encoder=HexEncoder)
bMsg = base64.b64decode(msg)
dcrMsg = box.decrypt(bMsg)
logger.debug("type %r text %r", type(dcrMsg), dcrMsg)
txt = dcrMsg.decode("utf-8")
except Exception as e:
logger.debug("Failing with %s", str(e))
return txt
| 41.058906 | 157 | 0.610282 |
ace5e91d3dbd4573179ac9b7f41dd121c5e748c8 | 5,603 | py | Python | adapters.py | kristijanbartol/keypoint-algorithms-benchmark | d3bf05220fd697fececb293ba139ca5c563acc21 | [
"BSD-2-Clause"
] | 16 | 2020-07-29T06:27:11.000Z | 2022-01-27T20:08:20.000Z | adapters.py | kristijanbartol/hpatches-benchmark | d3bf05220fd697fececb293ba139ca5c563acc21 | [
"BSD-2-Clause"
] | null | null | null | adapters.py | kristijanbartol/hpatches-benchmark | d3bf05220fd697fececb293ba139ca5c563acc21 | [
"BSD-2-Clause"
] | null | null | null | import numpy as np
from os import listdir
from os.path import join, isfile
from const import *
DATASET_ROOT = '../../../hpatches-sequences-release/'
KPZ_FILENAME = 'kp.npz'
DESC_FILENAME = 'des.npz'
def adapt_lfnet():
algo_name = 'lfnet'
for folder in listdir(DATASET_ROOT):
print('Processing directory: {}'.format(folder))
folder_path = join(DATASET_ROOT, folder)
kp_path = join(folder_path, KPZ_FILENAME)
desc_path = join(folder_path, DESC_FILENAME)
results_dir = join(folder_path, 'out')
kps = [0.] * 6
descs = [0.] * 6
for img_result_filename in filter(lambda f: 'ppm.npz' in f, listdir(results_dir)):
# Take index from filename as filter() doesn't keep the order.
img_idx = int(img_result_filename[0])
img_result_path = join(results_dir, img_result_filename)
img_result = dict(np.load(img_result_path))
orig_img = cv2.imread(join(folder_path, '{}.ppm'.format(img_idx)))
processed_img = cv2.imread(join(results_dir, '{}.ppm'.format(img_idx)))
x_scale = float(orig_img.shape[1]) / float(processed_img.shape[1])
y_scale = float(orig_img.shape[0]) / float(processed_img.shape[0])
img_result['kpts'][:,0] *= x_scale
img_result['kpts'][:,1] *= y_scale
kps[img_idx - 1] = img_result['kpts']
descs[img_idx - 1] = img_result['descs']
kp = dict(np.load(kp_path, allow_pickle=True)) if isfile(kp_path) else dict()
desc = dict(np.load(desc_path, allow_pickle=True)) if isfile(desc_path) else dict()
kp[algo_name] = np.array(kps)
desc[ALGO_TEMPLATE.format(algo_name, algo_name)] = np.array(descs)
np.savez(kp_path, **kp)
np.savez(desc_path, **desc)
def adapt_superpoint():
algo_name = 'superpoint'
x_dim = 320.
y_dim = 240.
for folder in listdir(DATASET_ROOT):
print('Processing directory: {}'.format(folder))
folder_path = join(DATASET_ROOT, folder)
kp_path = join(folder_path, KPZ_FILENAME)
desc_path = join(folder_path, DESC_FILENAME)
results_dir = join(folder_path, algo_name)
kps = [0.] * 6
descs = [0.] * 6
for img_result_filename in filter(lambda f: 'ppm.npz' in f, listdir(results_dir)):
# Take index from filename as filter() doesn't keep the order.
img_idx = int(img_result_filename[0])
img_result_path = join(results_dir, img_result_filename)
img_result = dict(np.load(img_result_path))
orig_img = cv2.imread(join(folder_path, '{}.ppm'.format(img_idx)))
processed_img = cv2.imread(join(results_dir, '{}.ppm'.format(img_idx)))
x_scale = float(orig_img.shape[1]) / x_dim
y_scale = float(orig_img.shape[0]) / y_dim
img_result['kpts'][:,0] *= x_scale
img_result['kpts'][:,1] *= y_scale
kps[img_idx - 1] = img_result['kpts'].T
descs[img_idx - 1] = img_result['descs'].T
kp = dict(np.load(kp_path, allow_pickle=True)) if isfile(kp_path) else dict()
desc = dict(np.load(desc_path, allow_pickle=True)) if isfile(desc_path) else dict()
kp[algo_name] = np.array(kps)
desc[ALGO_TEMPLATE.format(algo_name, algo_name)] = np.array(descs)
np.savez(kp_path, **kp)
np.savez(desc_path, **desc)
def adapt_d2net():
algo_name = 'd2net'
x_dim = 320.
y_dim = 240.
dataset_root = '/home/kristijan/hpatches-sequences-release/i_ajuntament/hpatches-sequences-release'
for folder in listdir(dataset_root):
print('Processing directory: {}'.format(folder))
folder_path = join(dataset_root, folder)
kp_path = join(folder_path, KPZ_FILENAME)
desc_path = join(folder_path, DESC_FILENAME)
# Results dir is folder dir for D2Net.
results_dir = folder_path
kps = [0.] * 6
descs = [0.] * 6
for img_result_filename in filter(lambda f: 'd2-net' in f, listdir(results_dir)):
# Take index from filename as filter() doesn't keep the order.
img_idx = int(img_result_filename[0])
img_result_path = join(results_dir, img_result_filename)
img_result = dict(np.load(img_result_path))
#orig_img = cv2.imread(join(folder_path, '{}.ppm'.format(img_idx)))
#processed_img = cv2.imread(join(results_dir, '{}.ppm'.format(img_idx)))
#x_scale = float(orig_img.shape[1]) / x_dim
#y_scale = float(orig_img.shape[0]) / y_dim
#img_result['kpts'][:,0] *= x_scale
#img_result['kpts'][:,1] *= y_scale
print(img_result_path)
kps[img_idx - 1] = img_result['keypoints'][:, :2]
descs[img_idx - 1] = img_result['descriptors'][:, :2]
kp = dict(np.load(kp_path, allow_pickle=True)) if isfile(kp_path) else dict()
desc = dict(np.load(desc_path, allow_pickle=True)) if isfile(desc_path) else dict()
kp[algo_name] = np.array(kps)
desc[ALGO_TEMPLATE.format(algo_name, algo_name)] = np.array(descs)
np.savez(kp_path, **kp)
np.savez(desc_path, **desc)
if __name__ == '__main__':
import argparse
parser_of_args = argparse.ArgumentParser(description='Select algorithm to adapt')
parser_of_args.add_argument('--algorithm', type=str,
help='name of the algorithm')
args = parser_of_args.parse_args()
result = locals()['adapt_{}'.format(args.algorithm)]()
| 36.861842 | 103 | 0.619668 |
ace5ea4f2cadcd749f53da4bd0a370d88a7d5c4e | 274 | py | Python | tests/artificial/transf_Integration/trend_Lag1Trend/cycle_12/ar_12/test_artificial_1024_Integration_Lag1Trend_12_12_20.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | tests/artificial/transf_Integration/trend_Lag1Trend/cycle_12/ar_12/test_artificial_1024_Integration_Lag1Trend_12_12_20.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 1 | 2019-11-30T23:39:38.000Z | 2019-12-01T04:34:35.000Z | tests/artificial/transf_Integration/trend_Lag1Trend/cycle_12/ar_12/test_artificial_1024_Integration_Lag1Trend_12_12_20.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 12, transform = "Integration", sigma = 0.0, exog_count = 20, ar_order = 12); | 39.142857 | 169 | 0.737226 |
ace5eb7bdb10a1d02c3e0c0b4de78a0f2b264c8f | 463 | py | Python | communication/migrations/0019_auto_20161021_0005.py | stewardshiptools/stewardshiptools | ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e | [
"MIT"
] | null | null | null | communication/migrations/0019_auto_20161021_0005.py | stewardshiptools/stewardshiptools | ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e | [
"MIT"
] | 11 | 2020-03-24T15:29:46.000Z | 2022-03-11T23:14:48.000Z | communication/migrations/0019_auto_20161021_0005.py | stewardshiptools/stewardshiptools | ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('communication', '0018_auto_20161019_0034'),
]
operations = [
migrations.AlterModelOptions(
name='mailbox',
options={'verbose_name_plural': 'Mailboxes', 'permissions': (('harvest_mailbox', 'Can run mailharvest on mailbox'),)},
),
]
| 24.368421 | 130 | 0.639309 |
ace5ebdf760ce20ac67bc580d0432c99c8d56ef7 | 11,409 | py | Python | train_nn.py | alexanderlarin/3dgnn | b9162098851721fe1a3538f0857171ef97f69c73 | [
"MIT"
] | 2 | 2019-05-30T14:26:03.000Z | 2019-05-30T20:02:43.000Z | train_nn.py | alexanderlarin/3dgnn | b9162098851721fe1a3538f0857171ef97f69c73 | [
"MIT"
] | null | null | null | train_nn.py | alexanderlarin/3dgnn | b9162098851721fe1a3538f0857171ef97f69c73 | [
"MIT"
] | null | null | null | import os
import sys
import time
import logging
import cv2
import numpy as np
import torch
import torch.backends.cudnn
import torch.multiprocessing
import torch.nn as nn
import torch.optim
from torch.utils.data import DataLoader
from tqdm import tqdm, tqdm_notebook
import config
from datasets import nyudv2
from models import Model
torch.multiprocessing.set_sharing_strategy('file_system')
torch.backends.cudnn.benchmark = True
logger = logging.getLogger('3dgnn')
LABEL_IDX = {'<UNK>': 0, 'beam': 1, 'board': 2, 'bookcase': 3, 'ceiling': 4, 'chair': 5, 'clutter': 6,
'column': 7, 'door': 8, 'floor': 9, 'sofa': 10, 'table': 11, 'wall': 12, 'window': 13}
IDX_LABEL = {0: '<UNK>', 1: 'beam', 2: 'board', 3: 'bookcase', 4: 'ceiling', 5: 'chair', 6: 'clutter',
7: 'column', 8: 'door', 9: 'floor', 10: 'sofa', 11: 'table', 12: 'wall', 13: 'window'}
MODELS_EXT = '.pth'
CHECK_POINT_SEP = '_'
def find_last_check_point(models_dir, check_point_prefix):
check_point_idx = 0
for filename in os.listdir(models_dir):
if not os.path.isfile(os.path.join(models_dir, filename)):
continue
name, ext = os.path.splitext(filename)
name_parts = name.split(CHECK_POINT_SEP)
if ext == MODELS_EXT and name_parts[0] == check_point_prefix:
idx = int(name_parts[1])
check_point_idx = max(check_point_idx, idx)
if check_point_idx != 0:
return check_point_idx + 1, os.path.join(models_dir, f'{check_point_prefix}{CHECK_POINT_SEP}{check_point_idx!s}{MODELS_EXT}')
return 1, ''
def train_nn(dataset_path, hha_dir, save_models_dir, num_epochs=50, batch_size=4,
from_last_check_point=False, check_point_prefix='checkpoint',
start_epoch=0, pre_train_model='', notebook=False):
progress = tqdm_notebook if notebook else tqdm
logger.info('Loading data...')
dataset_tr = nyudv2.Dataset(dataset_path, hha_dir, flip_prob=config.flip_prob, crop_type='Random', crop_size=config.crop_size)
dataloader_tr = DataLoader(dataset_tr, batch_size=batch_size, shuffle=True,
num_workers=config.workers_tr, drop_last=False, pin_memory=True)
dataset_va = nyudv2.Dataset(dataset_path, hha_dir, flip_prob=0.0, crop_type='Center', crop_size=config.crop_size)
dataloader_va = DataLoader(dataset_va, batch_size=batch_size, shuffle=False,
num_workers=config.workers_va, drop_last=False, pin_memory=True)
if from_last_check_point:
start_epoch, pre_train_model = find_last_check_point(save_models_dir, check_point_prefix)
cv2.setNumThreads(config.workers_tr)
logger.info('Preparing model...')
model = Model(config.nclasses, config.mlp_num_layers, config.use_gpu)
loss = nn.NLLLoss(reduce=not config.use_bootstrap_loss, weight=torch.FloatTensor(config.class_weights))
softmax = nn.Softmax(dim=1)
log_softmax = nn.LogSoftmax(dim=1)
if config.use_gpu:
model = model.cuda()
loss = loss.cuda()
softmax = softmax.cuda()
log_softmax = log_softmax.cuda()
optimizer = torch.optim.Adam([{'params': model.decoder.parameters()},
{'params': model.gnn.parameters(), 'lr': config.gnn_initial_lr}],
lr=config.base_initial_lr, betas=config.betas, eps=config.eps,
weight_decay=config.weight_decay)
if config.lr_schedule_type == 'exp':
def lambda_1(lambda_epoch):
return pow((1 - ((lambda_epoch - 1) / num_epochs)), config.lr_decay)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_1)
elif config.lr_schedule_type == 'plateau':
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=config.lr_decay,
patience=config.lr_patience)
else:
logger.error('Bad scheduler')
exit(1)
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
logger.info("Number of trainable parameters: %d", params)
def get_current_learning_rates():
learning_rates = []
for param_group in optimizer.param_groups:
learning_rates.append(param_group['lr'])
return learning_rates
def eval_set(dataloader):
model.eval()
with torch.no_grad():
loss_sum = 0.0
init_tensor_value = np.zeros(14 ** 2)
if config.use_gpu:
confusion_matrix = torch.cuda.FloatTensor(init_tensor_value)
else:
confusion_matrix = torch.FloatTensor(init_tensor_value)
start_time = time.time()
for batch_idx, rgbd_label_xy in progress(enumerate(dataloader), total=len(dataloader), desc=f'Eval set'):
x = rgbd_label_xy[0]
xy = rgbd_label_xy[2]
target = rgbd_label_xy[1].long()
x = x.float()
xy = xy.float()
input = x.permute(0, 3, 1, 2).contiguous()
xy = xy.permute(0, 3, 1, 2).contiguous()
if config.use_gpu:
input = input.cuda()
xy = xy.cuda()
target = target.cuda()
output = model(input, gnn_iterations=config.gnn_iterations, k=config.gnn_k, xy=xy,
use_gnn=config.use_gnn)
if config.use_bootstrap_loss:
loss_per_pixel = loss.forward(log_softmax(output.float()), target)
topk, indices = torch.topk(loss_per_pixel.view(output.size()[0], -1),
int((config.crop_size ** 2) * config.bootstrap_rate))
loss_ = torch.mean(topk)
else:
loss_ = loss.forward(log_softmax(output.float()), target)
loss_sum += loss_
pred = output.permute(0, 2, 3, 1).contiguous()
pred = pred.view(-1, config.nclasses)
pred = softmax(pred)
pred_max_val, pred_arg_max = pred.max(1)
pairs = target.view(-1) * 14 + pred_arg_max.view(-1)
for i in range(14 ** 2):
cumu = pairs.eq(i).float().sum()
confusion_matrix[i] += cumu.item()
sys.stdout.write(" - Eval time: {:.2f}s \n".format(time.time() - start_time))
loss_sum /= len(dataloader)
confusion_matrix = confusion_matrix.cpu().numpy().reshape((14, 14))
class_iou = np.zeros(14)
confusion_matrix[0, :] = np.zeros(14)
confusion_matrix[:, 0] = np.zeros(14)
for i in range(1, 14):
class_iou[i] = confusion_matrix[i, i] / (
np.sum(confusion_matrix[i, :]) + np.sum(confusion_matrix[:, i]) - confusion_matrix[i, i])
return loss_sum.item(), class_iou, confusion_matrix
# Training parameter
logger.info(f'Num_epochs: {num_epochs}')
interval_to_show = 100
train_losses = []
eval_losses = []
if pre_train_model:
logger.info(f'Loading pre-train model {pre_train_model}... ')
model.load_state_dict(torch.load(pre_train_model))
else:
logger.info('Starting training from scratch...')
# Training
for epoch in progress(range(start_epoch, num_epochs + 1), desc='Training'):
batch_loss_avg = 0
if config.lr_schedule_type == 'exp':
scheduler.step(epoch)
for batch_idx, rgbd_label_xy in progress(enumerate(dataloader_tr), total=len(dataloader_tr),
desc=f'Epoch {epoch}'):
x = rgbd_label_xy[0]
target = rgbd_label_xy[1].long()
xy = rgbd_label_xy[2]
x = x.float()
xy = xy.float()
input = x.permute(0, 3, 1, 2).contiguous()
input = input.type(torch.FloatTensor)
if config.use_gpu:
input = input.cuda()
xy = xy.cuda()
target = target.cuda()
xy = xy.permute(0, 3, 1, 2).contiguous()
optimizer.zero_grad()
model.train()
output = model(input, gnn_iterations=config.gnn_iterations, k=config.gnn_k, xy=xy, use_gnn=config.use_gnn)
if config.use_bootstrap_loss:
loss_per_pixel = loss.forward(log_softmax(output.float()), target)
topk, indices = torch.topk(loss_per_pixel.view(output.size()[0], -1),
int((config.crop_size ** 2) * config.bootstrap_rate))
loss_ = torch.mean(topk)
else:
loss_ = loss.forward(log_softmax(output.float()), target)
loss_.backward()
optimizer.step()
batch_loss_avg += loss_.item()
if batch_idx % interval_to_show == 0 and batch_idx > 0:
batch_loss_avg /= interval_to_show
train_losses.append(batch_loss_avg)
logger.info("E%dB%d Batch loss average: %s", epoch, batch_idx, batch_loss_avg)
print('\rEpoch:{}, Batch:{}, loss average:{}'.format(epoch, batch_idx, batch_loss_avg))
batch_loss_avg = 0
batch_idx = len(dataloader_tr)
logger.info("E%dB%d Saving model...", epoch, batch_idx)
torch.save(model.state_dict(),
os.path.join(save_models_dir, f'{check_point_prefix}{CHECK_POINT_SEP}{epoch!s}{MODELS_EXT}'))
# Evaluation
eval_loss, class_iou, confusion_matrix = eval_set(dataloader_va)
eval_losses.append(eval_loss)
if config.lr_schedule_type == 'plateau':
scheduler.step(eval_loss)
print('Learning ...')
logger.info("E%dB%d Def learning rate: %s", epoch, batch_idx, get_current_learning_rates()[0])
print('Epoch{} Def learning rate: {}'.format(epoch, get_current_learning_rates()[0]))
logger.info("E%dB%d GNN learning rate: %s", epoch, batch_idx, get_current_learning_rates()[1])
print('Epoch{} GNN learning rate: {}'.format(epoch, get_current_learning_rates()[1]))
logger.info("E%dB%d Eval loss: %s", epoch, batch_idx, eval_loss)
print('Epoch{} Eval loss: {}'.format(epoch, eval_loss))
logger.info("E%dB%d Class IoU:", epoch, batch_idx)
print('Epoch{} Class IoU:'.format(epoch))
for cl in range(14):
logger.info("%+10s: %-10s" % (IDX_LABEL[cl], class_iou[cl]))
print('{}:{}'.format(IDX_LABEL[cl], class_iou[cl]))
logger.info("Mean IoU: %s", np.mean(class_iou[1:]))
print("Mean IoU: %.2f" % np.mean(class_iou[1:]))
logger.info("E%dB%d Confusion matrix:", epoch, batch_idx)
logger.info(confusion_matrix)
logger.info('Finished training!')
logger.info('Saving trained model...')
torch.save(model.state_dict(), os.path.join(save_models_dir, f'finish{MODELS_EXT}'))
eval_loss, class_iou, confusion_matrix = eval_set(dataloader_va)
logger.info('Eval loss: %s', eval_loss)
logger.info('Class IoU:')
for cl in range(14):
logger.info("%+10s: %-10s" % (IDX_LABEL[cl], class_iou[cl]))
logger.info(f'Mean IoU: {np.mean(class_iou[1:])}')
| 42.412639 | 133 | 0.600403 |
ace5ed1364472452d074efdeaa63e14810a473f6 | 13,064 | py | Python | gnomad_mitochondria/mutserve_batch/process_mutserv.py | rahulg603/gnomad-mitochondria | 20753e4f9d3d90a9abaf051aa14b4e28697632c3 | [
"MIT"
] | 1 | 2021-09-05T10:56:12.000Z | 2021-09-05T10:56:12.000Z | gnomad_mitochondria/mutserve_batch/process_mutserv.py | rahulg603/gnomad-mitochondria | 20753e4f9d3d90a9abaf051aa14b4e28697632c3 | [
"MIT"
] | 13 | 2021-08-25T13:44:53.000Z | 2022-01-10T15:23:49.000Z | gnomad_mitochondria/mutserve_batch/process_mutserv.py | rahulg603/gnomad-mitochondria | 20753e4f9d3d90a9abaf051aa14b4e28697632c3 | [
"MIT"
] | 2 | 2021-10-06T06:46:28.000Z | 2021-12-13T20:42:21.000Z | #!/usr/bin/env python
import argparse
import logging
import os
import pysam
import re
import statistics
import sys
from io import TextIOWrapper
from os.path import basename, splitext
from subprocess import Popen, PIPE, check_output
logging.basicConfig(
level=logging.INFO,
format="%(levelname)s: %(asctime)s: %(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p",
)
logger = logging.getLogger("run mutserv")
logger.setLevel(logging.INFO)
def evaluate_variant(
alt: str,
pos: int,
variant_level: float,
dp: int,
ref: str,
filters: str,
results_out: TextIOWrapper,
line_count: int,
num_lines: int,
input_vcf: TextIOWrapper,
mt_reference: str,
) -> None:
"""
Check if variant is a SNP or insertion (output as is) or deletion (initiate deletion function).
Note: Initiating the deletion function will collapse multiple deletion calls into one deletion when certain conditions are met. This functionality is needed as deletions are output at individuals bases, but if it seems likely that several deletions are actually one large deletion, we want to represent the multiple deletions as just one deletion.
:param alt: Alternate allele
:param pos: Position of the variant
:param variant_level: Heteroplasmy level of the variant
:param dp: Depth of coverage at the given position
:param ref: Reference allele
:param filters: Filter fields
:param results_out: Open filehandle where results should be written
:param line_count: Line number for current iteration (count for row in the VCF)
:param num_lines: Total number of lines in file
:param input_vcf: Open filehandle for input vcf
:param mt_reference: Path to mitochondria reference fasta
:return: None
"""
# If the alternate allele is "*", start the process to collapse the deletion, otherwise output the variant
if alt == "*":
initiate_deletion(
pos,
variant_level,
dp,
ref,
results_out,
line_count,
num_lines,
input_vcf,
mt_reference,
)
else:
results_out.write(
f"MT\t{pos}\t.\t{ref}\t{alt}\t.\t{filters}\tAF=.\tGT:AD:VL:DP\t.:.:{variant_level}:{dp}\n"
)
def format_variant_info(line_items: list) -> list:
"""
Reformat VCF to pull out needed info to pass on to downstream functions and to correct types.
:param line_items: Variant row of the VCF, with fields split into a list
:return: List of position, reference allele, alternate allele, depth of coverage, variant level, and filters
"""
pos = int(line_items[1])
ref = line_items[3]
alt = line_items[4]
filters = line_items[6]
sample_info = line_items[9]
gt_fields = sample_info.split(":")
gt = gt_fields[0]
if gt == "1":
dp = gt_fields[1]
vl = 1.0
else:
dp = gt_fields[2]
vl = gt_fields[1]
dp = int(dp)
vl = float(vl)
return (pos, ref, alt, dp, vl, filters)
def close_deletion(
first_deletion_position: int,
num_deleted_bases: int,
deletion_coverage_depths: list,
deletion_variant_levels: list,
mt_reference: str,
) -> list:
"""
Close the deletion.
Take input of single or multiple deletions and output one deletion variant to the VCF. If mulitple deletions are collapsed into one deletion, the variant level and DP are the average of all positions that constitute the deletion.
:param first_deletion_position: First position of the deletion
:param num_deleted_bases: Number of bases that are deleted
:param deletion_coverage_depths: List of depth of coverage for all the positions of the deletion
:param deletion_variant_levels: List of heteroplasmy levels for all the positions of the deletion
:param mt_reference: Path to mitochondria reference fasta
:return: first deletion position, reference bases, deletion_alt (first base in reference bases), average depth across the deletion positions, average variant level across the deletion positions, last position of the deletion
"""
end_deletion = first_deletion_position + num_deleted_bases
fasta_open = pysam.Fastafile(mt_reference)
ref_bases = fasta_open.fetch("rCRS", first_deletion_position - 1, end_deletion)
deletion_alt = ref_bases[
0
] # Grab just the first position of the ref bases to report in the ALT column of the VCF
# Calculate the average depth across all positions of the deletion
deletion_dp = int(statistics.mean(deletion_coverage_depths))
# Calculate the average variant level across all positions of the deletion
deletion_variant_level = statistics.mean(deletion_variant_levels)
return (
first_deletion_position,
ref_bases,
deletion_alt,
deletion_dp,
deletion_variant_level,
end_deletion,
)
def initiate_deletion(
pos: int,
variant_level: float,
dp: int,
ref: str,
results_out: TextIOWrapper,
line_count: int,
num_lines: int,
input_vcf: TextIOWrapper,
mt_reference: str,
) -> None:
"""
Initiate a deletion.
Check next variant and if it's a deletion at the consecutive position with a varaint level +/- 10% of the current deletion,
append that deletion to the current one (count as one larger deletion).
For example, given three deletions at positions 2, 3, 4, such as (in format of POS REF ALT):
2 A *
3 A *
4 C *
If these deletions had similar variants levels, the three deletions would be reformatted to:
1 TAAC T
:param pos: Position
:param variant_level: Heteroplasmy level
:param dp: Depth of coverage at the given position
:param ref: Reference allele
:param results_out: Open filehandle where results should be written
:param line_count: Line number for current iteration
:param num_lines: Total number of lines in file
:param input_vcf: Open filehandle for input vcf
:param mt_reference: Path to mitochondria reference fasta
:return: None
"""
first_deletion_position = pos - 1
num_deleted_bases = 1
deletion_variant_levels = [variant_level]
deletion_coverage_depths = [dp]
deletion_pos = pos
deletion_variant_level = variant_level
# Close deletion if you've hit the last line of the VCF
if line_count == num_lines:
(
first_deletion_position,
ref_bases,
deletion_alt,
deletion_dp,
deletion_variant_level,
end_deletion,
) = close_deletion(
first_deletion_position,
num_deleted_bases,
deletion_coverage_depths,
deletion_variant_levels,
mt_reference,
)
results_out.write(
f"MT\t{first_deletion_position}\t.\t{ref_bases}\t{deletion_alt}\t.\tPASS\tAF=.\tGT:AD:VL:DP\t.:.:{deletion_variant_level}:{deletion_dp}\n"
)
else:
for next_variant in input_vcf:
line_count += 1
next_variant = next_variant.rstrip()
next_items = next_variant.split("\t")
pos, ref, alt, dp, variant_level, filters = format_variant_info(next_items)
# Extend the deletion if the bases are consecutive and the variant levels differ by no more than 10% heteroplasmy
if (
alt == "*"
and pos == (deletion_pos + 1)
and variant_level < (deletion_variant_level + 0.10)
and variant_level > (deletion_variant_level - 0.10)
):
deletion_pos = pos
deletion_variant_level = variant_level
num_deleted_bases += 1
deletion_variant_levels.append(variant_level)
deletion_coverage_depths.append(dp)
# Close deletion if you've hit the last line of the VCF
if line_count == num_lines:
(
first_deletion_position,
ref_bases,
deletion_alt,
deletion_dp,
deletion_variant_level,
end_deletion,
) = close_deletion(
first_deletion_position,
num_deleted_bases,
deletion_coverage_depths,
deletion_variant_levels,
mt_reference,
)
results_out.write(
f"MT\t{first_deletion_position}\t.\t{ref_bases}\t{deletion_alt}\t.\tPASS\tAF=.\tGT:AD:VL:DP\t.:.:{deletion_variant_level}:{deletion_dp}\n"
)
# When the bases of the deletion are not consecutive and/or the variant levels differ by more than 10% heteroplasmy, close the deletion
else:
(
first_deletion_position,
ref_bases,
deletion_alt,
deletion_dp,
deletion_variant_level,
end_deletion,
) = close_deletion(
first_deletion_position,
num_deleted_bases,
deletion_coverage_depths,
deletion_variant_levels,
mt_reference,
)
# Handle exception where deletion is at first reference position
# From VCF spec: "must include the base before the event (which must be reflected in the POS field), unless the event occurs at position 1 on the contig in which case it must include the base after the event"
# For example, a deletion of A at the first position would be represented as (in the order of CHROM POS REF ALT): chromosome_name 1 AG G
if first_deletion_position == 0:
first_deletion_position = 1
one_over = end_deletion + 1
fasta_open = pysam.Fastafile(mt_reference)
ref_bases = fasta_open.fetch(
"rCRS", first_deletion_position - 1, one_over
)
deletion_alt = ref_bases[
0
] # Grab just the first position of the ref bases to report in the ALT column of the VCF
results_out.write(
f"MT\t{first_deletion_position}\t.\t{ref_bases}\t{deletion_alt}\t.\tPASS\tAF=.\tGT:AD:VL:DP\t.:.:{deletion_variant_level}:{deletion_dp}\n"
)
evaluate_variant(
alt,
pos,
variant_level,
dp,
ref,
filters,
results_out,
line_count,
num_lines,
input_vcf,
mt_reference,
)
break
def main(args): # noqa: D103
input_file = args.input_file
output_file = args.output_file
mt_reference = args.mt_reference
# Get the count of the number of lines in the file
num_lines = check_output(["wc", "-l", f"{input_file}"])
num_lines = num_lines.decode().split(" ")[0]
num_lines = int(num_lines)
# Open the file where output will be written no
results_out = open(output_file, "w")
logger.info("Reformatting VCF...")
# Reformat VCF to be compatible with the combine_vcfs.py script
line_count = 1
with open(input_file, "r") as input_vcf:
for line in input_vcf:
line_count += 1
if line.startswith("#"):
results_out.write(line)
else:
line = line.rstrip()
items = line.split("\t")
# Format the VCF content
pos, ref, alt, dp, vl, filters = format_variant_info(items)
# Evaluate the variant (check if it's a SNP, insertion, or deletion) and collapse multiple deletions into one deletion if necessary
evaluate_variant(
alt,
pos,
vl,
dp,
ref,
filters,
results_out,
line_count,
num_lines,
input_vcf,
mt_reference,
)
results_out.close()
if __name__ == "__main__":
p = argparse.ArgumentParser("This script runs mutserv and reformats the output")
p.add_argument(
"-i",
"--input-file",
required=True,
help="The VCF output by mtDNA-Server with multiallelic sites split",
)
p.add_argument(
"-o", "--output-file", required=True, help="Name to use for the output file"
)
p.add_argument(
"-r", "--mt-reference", required=True, help="Mitochondria rCRS reference fasta"
)
args = p.parse_args()
main(args)
| 36.8 | 351 | 0.604333 |
ace5edfeae5fa51a9c615912ed762dc2dbcef70b | 136,140 | py | Python | src/azure-cli/azure/cli/command_modules/storage/_params.py | susanshi/azure-cli | 11270e8c69d227a56c6d9563ed22837b0f056fb4 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/storage/_params.py | susanshi/azure-cli | 11270e8c69d227a56c6d9563ed22837b0f056fb4 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/storage/_params.py | susanshi/azure-cli | 11270e8c69d227a56c6d9563ed22837b0f056fb4 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.validators import get_default_location_from_resource_group
from azure.cli.core.commands.parameters import (tags_type, file_type, get_location_type, get_enum_type,
get_three_state_flag, edge_zone_type)
from azure.cli.core.local_context import LocalContextAttribute, LocalContextAction, ALL
from ._validators import (get_datetime_type, validate_metadata, get_permission_validator, get_permission_help_string,
resource_type_type, services_type, validate_entity, validate_select, validate_blob_type,
validate_included_datasets_validator, validate_custom_domain, validate_hns_migration_type,
validate_container_public_access,
validate_table_payload_format, add_progress_callback, process_resource_group,
storage_account_key_options, process_file_download_namespace, process_metric_update_namespace,
get_char_options_validator, validate_bypass, validate_encryption_source, validate_marker,
validate_storage_data_plane_list, validate_azcopy_upload_destination_url,
validate_azcopy_remove_arguments, as_user_validator, parse_storage_account,
validate_delete_retention_days, validate_container_delete_retention_days,
validate_file_delete_retention_days, validator_change_feed_retention_days,
validate_fs_public_access, validate_logging_version, validate_or_policy, validate_policy,
get_api_version_type, blob_download_file_path_validator, blob_tier_validator, validate_subnet,
validate_blob_name_for_upload)
def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statements, too-many-lines, too-many-branches
from argcomplete.completers import FilesCompleter
from six import u as unicode_string
from knack.arguments import ignore_type, CLIArgumentType
from azure.cli.core.commands.parameters import get_resource_name_completion_list
from .sdkutil import get_table_data_type
from .completers import get_storage_name_completion_list
t_base_blob_service = self.get_sdk('blob.baseblobservice#BaseBlobService')
t_file_service = self.get_sdk('file#FileService')
t_queue_service = self.get_sdk('queue#QueueService')
t_table_service = get_table_data_type(self.cli_ctx, 'table', 'TableService')
storage_account_type = CLIArgumentType(options_list='--storage-account',
help='The name or ID of the storage account.',
validator=parse_storage_account, id_part='name')
acct_name_type = CLIArgumentType(options_list=['--account-name', '-n'], help='The storage account name.',
id_part='name',
completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'),
local_context_attribute=LocalContextAttribute(
name='storage_account_name', actions=[LocalContextAction.GET]))
blob_name_type = CLIArgumentType(options_list=['--blob-name', '-b'], help='The blob name.',
completer=get_storage_name_completion_list(t_base_blob_service, 'list_blobs',
parent='container_name'))
container_name_type = CLIArgumentType(options_list=['--container-name', '-c'], help='The container name.',
completer=get_storage_name_completion_list(t_base_blob_service,
'list_containers'))
directory_type = CLIArgumentType(options_list=['--directory-name', '-d'], help='The directory name.',
completer=get_storage_name_completion_list(t_file_service,
'list_directories_and_files',
parent='share_name'))
file_name_type = CLIArgumentType(options_list=['--file-name', '-f'],
completer=get_storage_name_completion_list(t_file_service,
'list_directories_and_files',
parent='share_name'))
share_name_type = CLIArgumentType(options_list=['--share-name', '-s'], help='The file share name.',
completer=get_storage_name_completion_list(t_file_service, 'list_shares'))
table_name_type = CLIArgumentType(options_list=['--table-name', '-t'],
completer=get_storage_name_completion_list(t_table_service, 'list_tables'))
queue_name_type = CLIArgumentType(options_list=['--queue-name', '-q'], help='The queue name.',
completer=get_storage_name_completion_list(t_queue_service, 'list_queues'))
progress_type = CLIArgumentType(help='Include this flag to disable progress reporting for the command.',
action='store_true', validator=add_progress_callback)
socket_timeout_type = CLIArgumentType(help='The socket timeout(secs), used by the service to regulate data flow.',
type=int)
large_file_share_type = CLIArgumentType(
action='store_true', min_api='2019-04-01',
help='Enable the capability to support large file shares with more than 5 TiB capacity for storage account.'
'Once the property is enabled, the feature cannot be disabled. Currently only supported for LRS and '
'ZRS replication types, hence account conversions to geo-redundant accounts would not be possible. '
'For more information, please refer to https://go.microsoft.com/fwlink/?linkid=2086047.')
adds_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2019-04-01',
arg_group='Azure Files Identity Based Authentication',
help='Enable Azure Files Active Directory Domain Service Authentication for '
'storage account. When --enable-files-adds is set to true, Azure Active '
'Directory Properties arguments must be provided.')
aadds_type = CLIArgumentType(arg_type=get_three_state_flag(), min_api='2018-11-01',
arg_group='Azure Files Identity Based Authentication',
help='Enable Azure Active Directory Domain Services authentication for Azure Files')
domain_name_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the primary domain that the AD DNS server is authoritative for. "
"Required when --enable-files-adds is set to True")
net_bios_domain_name_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the NetBIOS domain name. "
"Required when --enable-files-adds is set to True")
forest_name_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the Active Directory forest to get. "
"Required when --enable-files-adds is set to True")
domain_guid_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the domain GUID. Required when --enable-files-adds is set to True")
domain_sid_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the security identifier (SID). Required when --enable-files-adds "
"is set to True")
azure_storage_sid_type = CLIArgumentType(min_api='2019-04-01', arg_group="Azure Active Directory Properties",
help="Specify the security identifier (SID) for Azure Storage. "
"Required when --enable-files-adds is set to True")
exclude_pattern_type = CLIArgumentType(arg_group='Additional Flags', help='Exclude these files where the name '
'matches the pattern list. For example: *.jpg;*.pdf;exactName. This '
'option supports wildcard characters (*)')
include_pattern_type = CLIArgumentType(arg_group='Additional Flags', help='Include only these files where the name '
'matches the pattern list. For example: *.jpg;*.pdf;exactName. This '
'option supports wildcard characters (*)')
exclude_path_type = CLIArgumentType(arg_group='Additional Flags', help='Exclude these paths. This option does not '
'support wildcard characters (*). Checks relative path prefix. For example: '
'myFolder;myFolder/subDirName/file.pdf.')
include_path_type = CLIArgumentType(arg_group='Additional Flags', help='Include only these paths. This option does '
'not support wildcard characters (*). Checks relative path prefix. For example:'
'myFolder;myFolder/subDirName/file.pdf')
recursive_type = CLIArgumentType(options_list=['--recursive', '-r'], action='store_true',
help='Look into sub-directories recursively.')
sas_help = 'The permissions the SAS grants. Allowed values: {}. Do not use if a stored access policy is ' \
'referenced with --id that specifies this value. Can be combined.'
t_routing_choice = self.get_models('RoutingChoice', resource_type=ResourceType.MGMT_STORAGE)
routing_choice_type = CLIArgumentType(
arg_group='Routing Preference', arg_type=get_enum_type(t_routing_choice),
help='Routing Choice defines the kind of network routing opted by the user.',
min_api='2019-06-01')
publish_microsoft_endpoints_type = CLIArgumentType(
arg_group='Routing Preference', arg_type=get_three_state_flag(), min_api='2019-06-01',
help='A boolean flag which indicates whether microsoft routing storage endpoints are to be published.')
publish_internet_endpoints_type = CLIArgumentType(
arg_group='Routing Preference', arg_type=get_three_state_flag(), min_api='2019-06-01',
help='A boolean flag which indicates whether internet routing storage endpoints are to be published.')
umask_type = CLIArgumentType(
help='When creating a file or directory and the parent folder does not have a default ACL, the umask restricts '
'the permissions of the file or directory to be created. The resulting permission is given by p & ^u, '
'where p is the permission and u is the umask. For more information, please refer to '
'https://docs.microsoft.com/azure/storage/blobs/data-lake-storage-access-control#umask.')
permissions_type = CLIArgumentType(
help='POSIX access permissions for the file owner, the file owning group, and others. Each class may be '
'granted read, write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) '
'and 4-digit octal notation (e.g. 0766) are supported. For more information, please refer to https://'
'docs.microsoft.com/azure/storage/blobs/data-lake-storage-access-control#levels-of-permission.')
timeout_type = CLIArgumentType(
help='Request timeout in seconds. Applies to each call to the service.', type=int
)
marker_type = CLIArgumentType(
help='A string value that identifies the portion of the list of containers to be '
'returned with the next listing operation. The operation returns the NextMarker value within '
'the response body if the listing operation did not return all containers remaining to be listed '
'with the current page. If specified, this generator will begin returning results from the point '
'where the previous generator stopped.')
num_results_type = CLIArgumentType(
default=5000, validator=validate_storage_data_plane_list,
help='Specify the maximum number to return. If the request does not specify '
'num_results, or specifies a value greater than 5000, the server will return up to 5000 items. Note that '
'if the listing operation crosses a partition boundary, then the service will return a continuation token '
'for retrieving the remaining of the results. Provide "*" to return all.'
)
if_modified_since_type = CLIArgumentType(
help='Commence only if modified since supplied UTC datetime (Y-m-d\'T\'H:M\'Z\')',
type=get_datetime_type(False))
if_unmodified_since_type = CLIArgumentType(
help='Commence only if unmodified since supplied UTC datetime (Y-m-d\'T\'H:M\'Z\')',
type=get_datetime_type(False))
allow_shared_key_access_type = CLIArgumentType(
arg_type=get_three_state_flag(), options_list=['--allow-shared-key-access', '-k'], min_api='2019-04-01',
help='Indicate whether the storage account permits requests to be authorized with the account access key via '
'Shared Key. If false, then all requests, including shared access signatures, must be authorized with '
'Azure Active Directory (Azure AD). The default value is null, which is equivalent to true.')
sas_expiration_period_type = CLIArgumentType(
options_list=['--sas-expiration-period', '--sas-exp'], min_api='2021-02-01',
help='Expiration period of the SAS Policy assigned to the storage account, DD.HH:MM:SS.'
)
key_expiration_period_in_days_type = CLIArgumentType(
options_list=['--key-expiration-period-in-days', '--key-exp-days'], min_api='2021-02-01', type=int,
help='Expiration period in days of the Key Policy assigned to the storage account'
)
allow_cross_tenant_replication_type = CLIArgumentType(
arg_type=get_three_state_flag(), options_list=['--allow-cross-tenant-replication', '-r'], min_api='2021-04-01',
help='Allow or disallow cross AAD tenant object replication. The default interpretation is true for this '
'property.')
t_share_permission = self.get_models('DefaultSharePermission', resource_type=ResourceType.MGMT_STORAGE)
default_share_permission_type = CLIArgumentType(
options_list=['--default-share-permission', '-d'],
arg_type=get_enum_type(t_share_permission),
min_api='2020-08-01-preview',
arg_group='Azure Files Identity Based Authentication',
help='Default share permission for users using Kerberos authentication if RBAC role is not assigned.')
t_blob_tier = self.get_sdk('_generated.models._azure_blob_storage_enums#AccessTierOptional',
resource_type=ResourceType.DATA_STORAGE_BLOB)
t_rehydrate_priority = self.get_sdk('_generated.models._azure_blob_storage_enums#RehydratePriority',
resource_type=ResourceType.DATA_STORAGE_BLOB)
tier_type = CLIArgumentType(
arg_type=get_enum_type(t_blob_tier), min_api='2019-02-02',
help='The tier value to set the blob to. For page blob, the tier correlates to the size of the blob '
'and number of allowed IOPS. Possible values are P10, P15, P20, P30, P4, P40, P50, P6, P60, P70, P80 '
'and this is only applicable to page blobs on premium storage accounts; For block blob, possible '
'values are Archive, Cool and Hot. This is only applicable to block blobs on standard storage accounts.'
)
rehydrate_priority_type = CLIArgumentType(
arg_type=get_enum_type(t_rehydrate_priority), options_list=('--rehydrate-priority', '-r'),
min_api='2019-02-02',
help='Indicate the priority with which to rehydrate an archived blob.')
action_type = CLIArgumentType(
help='The action of virtual network rule. Possible value is Allow.'
)
public_network_access_enum = self.get_sdk('models._storage_management_client_enums#PublicNetworkAccess',
resource_type=ResourceType.MGMT_STORAGE)
with self.argument_context('storage') as c:
c.argument('container_name', container_name_type)
c.argument('directory_name', directory_type)
c.argument('share_name', share_name_type)
c.argument('table_name', table_name_type)
c.argument('retry_wait', options_list=('--retry-interval',))
c.ignore('progress_callback')
c.argument('metadata', nargs='+',
help='Metadata in space-separated key=value pairs. This overwrites any existing metadata.',
validator=validate_metadata)
c.argument('timeout', help='Request timeout in seconds. Applies to each call to the service.', type=int)
with self.argument_context('storage', arg_group='Precondition') as c:
c.argument('if_modified_since', if_modified_since_type)
c.argument('if_unmodified_since', if_unmodified_since_type)
c.argument('if_match')
c.argument('if_none_match')
for item in ['delete', 'show', 'update', 'show-connection-string', 'keys', 'network-rule', 'revoke-delegation-keys', 'failover', 'hns-migration']: # pylint: disable=line-too-long
with self.argument_context('storage account {}'.format(item)) as c:
c.argument('account_name', acct_name_type, options_list=['--name', '-n'])
c.argument('resource_group_name', required=False, validator=process_resource_group)
with self.argument_context('storage account blob-inventory-policy') as c:
c.ignore('blob_inventory_policy_name')
c.argument('resource_group_name', required=False, validator=process_resource_group)
c.argument('account_name',
help='The name of the storage account within the specified resource group. Storage account names '
'must be between 3 and 24 characters in length and use numbers and lower-case letters only.')
with self.argument_context('storage account blob-inventory-policy create') as c:
c.argument('policy', type=file_type, completer=FilesCompleter(),
help='The Storage Account Blob Inventory Policy, string in JSON format or json file path. See more '
'details in https://docs.microsoft.com/azure/storage/blobs/blob-inventory#inventory-policy.')
with self.argument_context('storage account check-name') as c:
c.argument('name', options_list=['--name', '-n'],
help='The name of the storage account within the specified resource group')
with self.argument_context('storage account delete') as c:
c.argument('account_name', acct_name_type, options_list=['--name', '-n'], local_context_attribute=None)
with self.argument_context('storage account create', resource_type=ResourceType.MGMT_STORAGE) as c:
t_account_type, t_sku_name, t_kind, t_tls_version = \
self.get_models('AccountType', 'SkuName', 'Kind', 'MinimumTlsVersion',
resource_type=ResourceType.MGMT_STORAGE)
t_identity_type = self.get_models('IdentityType', resource_type=ResourceType.MGMT_STORAGE)
c.register_common_storage_account_options()
c.argument('location', get_location_type(self.cli_ctx), validator=get_default_location_from_resource_group)
c.argument('account_type', help='The storage account type', arg_type=get_enum_type(t_account_type))
c.argument('account_name', acct_name_type, options_list=['--name', '-n'], completer=None,
local_context_attribute=LocalContextAttribute(
name='storage_account_name', actions=[LocalContextAction.SET], scopes=[ALL]))
c.argument('kind', help='Indicate the type of storage account.',
arg_type=get_enum_type(t_kind),
default='StorageV2' if self.cli_ctx.cloud.profile == 'latest' else 'Storage')
c.argument('https_only', arg_type=get_three_state_flag(), min_api='2019-04-01',
help='Allow https traffic only to storage service if set to true. The default value is true.')
c.argument('https_only', arg_type=get_three_state_flag(), max_api='2018-11-01',
help='Allow https traffic only to storage service if set to true. The default value is false.')
c.argument('tags', tags_type)
c.argument('custom_domain', help='User domain assigned to the storage account. Name is the CNAME source.')
c.argument('sku', help='The storage account SKU.', arg_type=get_enum_type(t_sku_name, default='standard_ragrs'))
c.argument('enable_files_aadds', aadds_type)
c.argument('enable_files_adds', adds_type)
c.argument('enable_large_file_share', arg_type=large_file_share_type)
c.argument('domain_name', domain_name_type)
c.argument('net_bios_domain_name', net_bios_domain_name_type)
c.argument('forest_name', forest_name_type)
c.argument('domain_guid', domain_guid_type)
c.argument('domain_sid', domain_sid_type)
c.argument('azure_storage_sid', azure_storage_sid_type)
c.argument('enable_hierarchical_namespace', arg_type=get_three_state_flag(),
options_list=['--enable-hierarchical-namespace', '--hns',
c.deprecate(target='--hierarchical-namespace', redirect='--hns', hide=True)],
help=" Allow the blob service to exhibit filesystem semantics. This property can be enabled only "
"when storage account kind is StorageV2.",
min_api='2018-02-01')
c.argument('encryption_key_type_for_table', arg_type=get_enum_type(['Account', 'Service']),
help='Set the encryption key type for Table service. "Account": Table will be encrypted '
'with account-scoped encryption key. "Service": Table will always be encrypted with '
'service-scoped keys. Currently the default encryption key type is "Service".',
min_api='2019-06-01', options_list=['--encryption-key-type-for-table', '-t'])
c.argument('encryption_key_type_for_queue', arg_type=get_enum_type(['Account', 'Service']),
help='Set the encryption key type for Queue service. "Account": Queue will be encrypted '
'with account-scoped encryption key. "Service": Queue will always be encrypted with '
'service-scoped keys. Currently the default encryption key type is "Service".',
min_api='2019-06-01', options_list=['--encryption-key-type-for-queue', '-q'])
c.argument('routing_choice', routing_choice_type)
c.argument('publish_microsoft_endpoints', publish_microsoft_endpoints_type)
c.argument('publish_internet_endpoints', publish_internet_endpoints_type)
c.argument('require_infrastructure_encryption', options_list=['--require-infrastructure-encryption', '-i'],
arg_type=get_three_state_flag(),
help='A boolean indicating whether or not the service applies a secondary layer of encryption with '
'platform managed keys for data at rest.')
c.argument('allow_blob_public_access', arg_type=get_three_state_flag(), min_api='2019-04-01',
help='Allow or disallow public access to all blobs or containers in the storage account. '
'The default value for this property is null, which is equivalent to true. When true, containers '
'in the account may be configured for public access. Note that setting this property to true does '
'not enable anonymous access to any data in the account. The additional step of configuring the '
'public access setting for a container is required to enable anonymous access.')
c.argument('min_tls_version', arg_type=get_enum_type(t_tls_version),
help='The minimum TLS version to be permitted on requests to storage. '
'The default interpretation is TLS 1.0 for this property')
c.argument('allow_shared_key_access', allow_shared_key_access_type)
c.argument('edge_zone', edge_zone_type, min_api='2020-08-01-preview')
c.argument('identity_type', arg_type=get_enum_type(t_identity_type), arg_group='Identity',
help='The identity type.')
c.argument('user_identity_id', arg_group='Identity',
help='The key is the ARM resource identifier of the identity. Only 1 User Assigned identity is '
'permitted here.')
c.argument('key_expiration_period_in_days', key_expiration_period_in_days_type, is_preview=True)
c.argument('sas_expiration_period', sas_expiration_period_type, is_preview=True)
c.argument('allow_cross_tenant_replication', allow_cross_tenant_replication_type)
c.argument('default_share_permission', default_share_permission_type)
c.argument('enable_nfs_v3', arg_type=get_three_state_flag(), is_preview=True, min_api='2021-01-01',
help='NFS 3.0 protocol support enabled if sets to true.')
c.argument('public_network_access', arg_type=get_enum_type(public_network_access_enum), min_api='2021-06-01',
help='Enable or disable public network access to the storage account. '
'Possible values include: `Enabled` or `Disabled`.')
with self.argument_context('storage account private-endpoint-connection',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('private_endpoint_connection_name', options_list=['--name', '-n'],
help='The name of the private endpoint connection associated with the Storage Account.')
for item in ['approve', 'reject', 'show', 'delete']:
with self.argument_context('storage account private-endpoint-connection {}'.format(item),
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('private_endpoint_connection_name', options_list=['--name', '-n'], required=False,
help='The name of the private endpoint connection associated with the Storage Account.')
c.extra('connection_id', options_list=['--id'],
help='The ID of the private endpoint connection associated with the Storage Account. You can get '
'it using `az storage account show`.')
c.argument('account_name', help='The storage account name.', required=False)
c.argument('resource_group_name', help='The resource group name of specified storage account.',
required=False)
c.argument('description', help='Comments for {} operation.'.format(item))
with self.argument_context('storage account update', resource_type=ResourceType.MGMT_STORAGE) as c:
t_tls_version = self.get_models('MinimumTlsVersion', resource_type=ResourceType.MGMT_STORAGE)
t_identity_type = self.get_models('IdentityType', resource_type=ResourceType.MGMT_STORAGE)
c.register_common_storage_account_options()
c.argument('sku', arg_type=get_enum_type(t_sku_name),
help='Note that the SKU name cannot be updated to Standard_ZRS, Premium_LRS or Premium_ZRS, '
'nor can accounts of those SKU names be updated to any other value')
c.argument('custom_domain',
help='User domain assigned to the storage account. Name is the CNAME source. Use "" to clear '
'existing value.',
validator=validate_custom_domain)
c.argument('use_subdomain', help='Specify whether to use indirect CNAME validation.',
arg_type=get_enum_type(['true', 'false']))
c.argument('tags', tags_type, default=None)
c.argument('enable_files_aadds', aadds_type)
c.argument('enable_files_adds', adds_type)
c.argument('enable_large_file_share', arg_type=large_file_share_type)
c.argument('domain_name', domain_name_type)
c.argument('net_bios_domain_name', net_bios_domain_name_type)
c.argument('forest_name', forest_name_type)
c.argument('domain_guid', domain_guid_type)
c.argument('domain_sid', domain_sid_type)
c.argument('azure_storage_sid', azure_storage_sid_type)
c.argument('routing_choice', routing_choice_type)
c.argument('publish_microsoft_endpoints', publish_microsoft_endpoints_type)
c.argument('publish_internet_endpoints', publish_internet_endpoints_type)
c.argument('allow_blob_public_access', arg_type=get_three_state_flag(), min_api='2019-04-01',
help='Allow or disallow public access to all blobs or containers in the storage account. '
'The default value for this property is null, which is equivalent to true. When true, containers '
'in the account may be configured for public access. Note that setting this property to true does '
'not enable anonymous access to any data in the account. The additional step of configuring the '
'public access setting for a container is required to enable anonymous access.')
c.argument('min_tls_version', arg_type=get_enum_type(t_tls_version),
help='The minimum TLS version to be permitted on requests to storage. '
'The default interpretation is TLS 1.0 for this property')
c.argument('allow_shared_key_access', allow_shared_key_access_type)
c.argument('identity_type', arg_type=get_enum_type(t_identity_type), arg_group='Identity',
help='The identity type.')
c.argument('user_identity_id', arg_group='Identity',
help='The key is the ARM resource identifier of the identity. Only 1 User Assigned identity is '
'permitted here.')
c.argument('key_expiration_period_in_days', key_expiration_period_in_days_type, is_preview=True)
c.argument('sas_expiration_period', sas_expiration_period_type, is_preview=True)
c.argument('allow_cross_tenant_replication', allow_cross_tenant_replication_type)
c.argument('default_share_permission', default_share_permission_type)
c.argument('public_network_access', arg_type=get_enum_type(public_network_access_enum), min_api='2021-06-01',
help='Enable or disable public network access to the storage account. '
'Possible values include: `Enabled` or `Disabled`.')
for scope in ['storage account create', 'storage account update']:
with self.argument_context(scope, arg_group='Customer managed key', min_api='2017-06-01',
resource_type=ResourceType.MGMT_STORAGE) as c:
t_key_source = self.get_models('KeySource', resource_type=ResourceType.MGMT_STORAGE)
c.argument('encryption_key_name', help='The name of the KeyVault key.', )
c.argument('encryption_key_vault', help='The Uri of the KeyVault.')
c.argument('encryption_key_version',
help='The version of the KeyVault key to use, which will opt out of implicit key rotation. '
'Please use "" to opt in key auto-rotation again.')
c.argument('encryption_key_source',
arg_type=get_enum_type(t_key_source),
help='The default encryption key source',
validator=validate_encryption_source)
c.argument('key_vault_user_identity_id', options_list=['--key-vault-user-identity-id', '-u'],
min_api='2021-01-01',
help='Resource identifier of the UserAssigned identity to be associated with server-side '
'encryption on the storage account.')
for scope in ['storage account create', 'storage account update']:
with self.argument_context(scope, resource_type=ResourceType.MGMT_STORAGE, min_api='2017-06-01',
arg_group='Network Rule') as c:
t_bypass, t_default_action = self.get_models('Bypass', 'DefaultAction',
resource_type=ResourceType.MGMT_STORAGE)
c.argument('bypass', nargs='+', validator=validate_bypass, arg_type=get_enum_type(t_bypass),
help='Bypass traffic for space-separated uses.')
c.argument('default_action', arg_type=get_enum_type(t_default_action),
help='Default action to apply when no rule matches.')
c.argument('subnet', help='Name or ID of subnet. If name is supplied, `--vnet-name` must be supplied.')
c.argument('vnet_name', help='Name of a virtual network.', validator=validate_subnet)
c.argument('action', action_type)
with self.argument_context('storage account show-connection-string') as c:
c.argument('protocol', help='The default endpoint protocol.', arg_type=get_enum_type(['http', 'https']))
c.argument('sas_token', help='The SAS token to be used in the connection-string.')
c.argument('key_name', options_list=['--key'], help='The key to use.',
arg_type=get_enum_type(list(storage_account_key_options.keys())))
for item in ['blob', 'file', 'queue', 'table']:
c.argument('{}_endpoint'.format(item), help='Custom endpoint for {}s.'.format(item))
with self.argument_context('storage account encryption-scope') as c:
c.argument('account_name', help='The storage account name.')
c.argument('resource_group_name', validator=process_resource_group, required=False)
c.argument('encryption_scope_name', options_list=['--name', '-n'],
help='The name of the encryption scope within the specified storage account.')
for scope in ['storage account encryption-scope create', 'storage account encryption-scope update']:
with self.argument_context(scope, resource_type=ResourceType.MGMT_STORAGE) as c:
from ._validators import validate_encryption_key
t_encryption_key_source = self.get_models('EncryptionScopeSource', resource_type=ResourceType.MGMT_STORAGE)
c.argument('key_source', options_list=['-s', '--key-source'],
arg_type=get_enum_type(t_encryption_key_source, default="Microsoft.Storage"),
help='The provider for the encryption scope.', validator=validate_encryption_key)
c.argument('key_uri', options_list=['-u', '--key-uri'],
help='The object identifier for a key vault key object. When applied, the encryption scope will '
'use the key referenced by the identifier to enable customer-managed key support on this '
'encryption scope.')
c.argument('require_infrastructure_encryption', options_list=['--require-infrastructure-encryption', '-i'],
arg_type=get_three_state_flag(), min_api='2021-01-01',
help='A boolean indicating whether or not the service applies a secondary layer of encryption '
'with platform managed keys for data at rest.')
with self.argument_context('storage account encryption-scope update') as c:
t_state = self.get_models("EncryptionScopeState", resource_type=ResourceType.MGMT_STORAGE)
c.argument('key_source', options_list=['-s', '--key-source'],
arg_type=get_enum_type(t_encryption_key_source),
help='The provider for the encryption scope.', validator=validate_encryption_key)
c.argument('state', arg_type=get_enum_type(t_state),
help='Change the state the encryption scope. When disabled, '
'all blob read/write operations using this encryption scope will fail.')
with self.argument_context('storage account keys list', resource_type=ResourceType.MGMT_STORAGE) as c:
t_expand_key_type = self.get_models('ListKeyExpand', resource_type=ResourceType.MGMT_STORAGE)
c.argument("expand", options_list=['--expand-key-type'], help='Specify the expanded key types to be listed.',
arg_type=get_enum_type(t_expand_key_type), min_api='2019-04-01', is_preview=True)
with self.argument_context('storage account keys renew', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('key_name', options_list=['--key'], help='The key options to regenerate.',
arg_type=get_enum_type(list(storage_account_key_options.keys())))
c.extra('key_type', help='The key type to regenerate. If --key-type is not specified, one of access keys will '
'be regenerated by default.', arg_type=get_enum_type(['kerb']), min_api='2019-04-01')
c.argument('account_name', acct_name_type, id_part=None)
with self.argument_context('storage account management-policy create') as c:
c.argument('policy', type=file_type, completer=FilesCompleter(),
help='The Storage Account ManagementPolicies Rules, in JSON format. See more details in: '
'https://docs.microsoft.com/azure/storage/common/storage-lifecycle-managment-concepts.')
for item in ['create', 'update', 'show', 'delete']:
with self.argument_context('storage account management-policy {}'.format(item)) as c:
c.argument('account_name', help='The name of the storage account within the specified resource group.')
with self.argument_context('storage account keys list') as c:
c.argument('account_name', acct_name_type, id_part=None)
with self.argument_context('storage account network-rule', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('ip_address', help='IPv4 address or CIDR range.')
c.argument('subnet', help='Name or ID of subnet. If name is supplied, `--vnet-name` must be supplied.')
c.argument('vnet_name', help='Name of a virtual network.', validator=validate_subnet)
c.argument('action', action_type)
c.argument('resource_id', help='The resource id to add in network rule.', arg_group='Resource Access Rule',
min_api='2020-08-01-preview')
c.argument('tenant_id', help='The tenant id to add in network rule.', arg_group='Resource Access Rule',
min_api='2020-08-01-preview')
with self.argument_context('storage account blob-service-properties show',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
with self.argument_context('storage account blob-service-properties update',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
c.argument('enable_change_feed', arg_type=get_three_state_flag(), min_api='2019-04-01',
arg_group='Change Feed Policy')
c.argument('change_feed_retention_days', is_preview=True,
options_list=['--change-feed-retention-days', '--change-feed-days'],
type=int, min_api='2019-06-01', arg_group='Change Feed Policy',
validator=validator_change_feed_retention_days,
help='Indicate the duration of changeFeed retention in days. '
'Minimum value is 1 day and maximum value is 146000 days (400 years). '
'A null value indicates an infinite retention of the change feed.'
'(Use `--enable-change-feed` without `--change-feed-days` to indicate null)')
c.argument('enable_container_delete_retention',
arg_type=get_three_state_flag(),
options_list=['--enable-container-delete-retention', '--container-retention'],
arg_group='Container Delete Retention Policy', min_api='2019-06-01',
help='Enable container delete retention policy for container soft delete when set to true. '
'Disable container delete retention policy when set to false.')
c.argument('container_delete_retention_days',
options_list=['--container-delete-retention-days', '--container-days'],
type=int, arg_group='Container Delete Retention Policy',
min_api='2019-06-01', validator=validate_container_delete_retention_days,
help='Indicate the number of days that the deleted container should be retained. The minimum '
'specified value can be 1 and the maximum value can be 365.')
c.argument('enable_delete_retention', arg_type=get_three_state_flag(), arg_group='Delete Retention Policy',
min_api='2018-07-01')
c.argument('delete_retention_days', type=int, arg_group='Delete Retention Policy',
validator=validate_delete_retention_days, min_api='2018-07-01')
c.argument('enable_restore_policy', arg_type=get_three_state_flag(), arg_group='Restore Policy',
min_api='2019-06-01', help="Enable blob restore policy when it set to true.")
c.argument('restore_days', type=int, arg_group='Restore Policy',
min_api='2019-06-01', help="The number of days for the blob can be restored. It should be greater "
"than zero and less than Delete Retention Days.")
c.argument('enable_versioning', arg_type=get_three_state_flag(), help='Versioning is enabled if set to true.',
min_api='2019-06-01')
c.argument('default_service_version', options_list=['--default-service-version', '-d'],
type=get_api_version_type(), min_api='2018-07-01',
help="Indicate the default version to use for requests to the Blob service if an incoming request's "
"version is not specified.")
c.argument('enable_last_access_tracking', arg_type=get_three_state_flag(), min_api='2019-06-01',
options_list=['--enable-last-access-tracking', '-t'],
help='When set to true last access time based tracking policy is enabled.')
with self.argument_context('storage account file-service-properties show',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
with self.argument_context('storage account file-service-properties update',
resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
c.argument('enable_delete_retention', arg_type=get_three_state_flag(), arg_group='Delete Retention Policy',
min_api='2019-06-01', help='Enable file service properties for share soft delete.')
c.argument('delete_retention_days', type=int, arg_group='Delete Retention Policy',
validator=validate_file_delete_retention_days, min_api='2019-06-01',
help='Indicate the number of days that the deleted item should be retained. The minimum specified '
'value can be 1 and the maximum value can be 365.')
c.argument('enable_smb_multichannel', options_list=['--enable-smb-multichannel', '--mc'],
arg_type=get_three_state_flag(), min_api='2020-08-01-preview', arg_group='SMB Setting',
help='Set SMB Multichannel setting for file service. Applies to Premium FileStorage only.')
c.argument('versions', arg_group='SMB Setting', min_api='2020-08-01-preview',
help="SMB protocol versions supported by server. Valid values are SMB2.1, SMB3.0, "
"SMB3.1.1. Should be passed as a string with delimiter ';'.")
c.argument('authentication_methods', options_list='--auth-methods', arg_group='SMB Setting',
min_api='2020-08-01-preview',
help="SMB authentication methods supported by server. Valid values are NTLMv2, Kerberos. "
"Should be passed as a string with delimiter ';'.")
c.argument('kerberos_ticket_encryption', options_list=['--kerb-ticket-encryption', '-k'],
arg_group='SMB Setting', min_api='2020-08-01-preview',
help="Kerberos ticket encryption supported by server. Valid values are RC4-HMAC, AES-256. "
"Should be passed as a string with delimiter ';'.")
c.argument('channel_encryption', arg_group='SMB Setting', min_api='2020-08-01-preview',
help="SMB channel encryption supported by server. Valid values are AES-128-CCM, AES-128-GCM, "
"AES-256-GCM. Should be passed as a string with delimiter ';' ")
with self.argument_context('storage account generate-sas') as c:
t_account_permissions = self.get_sdk('common.models#AccountPermissions')
c.register_sas_arguments()
c.argument('services', type=services_type(self))
c.argument('resource_types', type=resource_type_type(self))
c.argument('expiry', type=get_datetime_type(True))
c.argument('start', type=get_datetime_type(True))
c.argument('account_name', acct_name_type, options_list=['--account-name'])
c.argument('permission', options_list=('--permissions',),
help='The permissions the SAS grants. Allowed values: {}. Can be combined.'.format(
get_permission_help_string(t_account_permissions)),
validator=get_permission_validator(t_account_permissions))
c.ignore('sas_token')
or_policy_type = CLIArgumentType(
options_list=['--policy', '-p'],
help='The object replication policy definition between two storage accounts, in JSON format. '
'Multiple rules can be defined in one policy.'
)
policy_id_type = CLIArgumentType(
options_list=['--policy-id'],
help='The ID of object replication policy or "default" if the policy ID is unknown. Policy Id will be '
'auto-generated when setting on destination account. Required when setting on source account.'
)
rule_id_type = CLIArgumentType(
options_list=['--rule-id', '-r'],
help='Rule Id is auto-generated for each new rule on destination account. It is required '
'for put policy on source account.'
)
prefix_math_type = CLIArgumentType(
nargs='+', arg_group='Filters', options_list=['--prefix-match', '--prefix'],
help='Optional. Filter the results to replicate only blobs whose names begin with the specified '
'prefix.'
)
min_creation_time_type = CLIArgumentType(
options_list=['--min-creation-time', '-t'], arg_group='Filters', type=get_datetime_type(True),
help="Blobs created after the time will be replicated to the destination. It must be in datetime format "
"'yyyy-MM-ddTHH:mm:ssZ'. Example: 2020-02-19T16:05:00Z")
with self.argument_context('storage account or-policy') as c:
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
c.argument('object_replication_policy_id', policy_id_type)
c.argument('policy_id', policy_id_type)
c.argument('source_account', options_list=['--source-account', '-s'],
help='The source storage account name or resource Id. Required when no --policy provided.')
c.argument('destination_account', options_list=['--destination-account', '-d'],
help='The destination storage account name or resource Id. Apply --account-name value as '
'destination account when there is no destination account provided in --policy and '
'--destination-account.')
c.argument('properties', or_policy_type)
c.argument('prefix_match', prefix_math_type)
c.argument('min_creation_time', min_creation_time_type)
for item in ['create', 'update']:
with self.argument_context('storage account or-policy {}'.format(item),
arg_group="Object Replication Policy Rule") as c:
c.argument('rule_id', help='Rule Id is auto-generated for each new rule on destination account. It is '
'required for put policy on source account.')
c.argument('source_container', options_list=['--source-container', '--scont'],
help='The source storage container name. Required when no --policy provided.')
c.argument('destination_container', options_list=['--destination-container', '--dcont'],
help='The destination storage container name. Required when no --policy provided.')
with self.argument_context('storage account or-policy create') as c:
c.argument('properties', or_policy_type, validator=validate_or_policy)
with self.argument_context('storage account or-policy rule') as c:
c.argument('policy_id', policy_id_type)
c.argument('source_container', options_list=['--source-container', '-s'],
help='The source storage container name.')
c.argument('destination_container', options_list=['--destination-container', '-d'],
help='The destination storage container name.')
c.argument('rule_id', rule_id_type)
with self.argument_context('storage account hns-migration start') as c:
c.argument('request_type', options_list=['--type', '--request-type'],
arg_type=get_enum_type(['validation', 'upgrade']), validator=validate_hns_migration_type,
help='Start a validation request for migration or start a migration request')
for item in ['show', 'off']:
with self.argument_context('storage logging {}'.format(item)) as c:
c.extra('services', validator=get_char_options_validator('bqt', 'services'), default='bqt')
with self.argument_context('storage logging update') as c:
c.extra('services', validator=get_char_options_validator('bqt', 'services'), options_list='--services',
required=True)
c.argument('log', validator=get_char_options_validator('rwd', 'log'))
c.argument('retention', type=int)
c.argument('version', type=float, validator=validate_logging_version)
with self.argument_context('storage metrics show') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), default='bfqt')
c.argument('interval', arg_type=get_enum_type(['hour', 'minute', 'both']))
with self.argument_context('storage metrics update') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), options_list='--services',
required=True)
c.argument('hour', validator=process_metric_update_namespace, arg_type=get_enum_type(['true', 'false']))
c.argument('minute', arg_type=get_enum_type(['true', 'false']))
c.argument('api', arg_type=get_enum_type(['true', 'false']))
c.argument('retention', type=int)
with self.argument_context('storage blob') as c:
c.argument('blob_name', options_list=('--name', '-n'), arg_type=blob_name_type)
c.argument('destination_path', help='The destination path that will be prepended to the blob name.')
with self.argument_context('storage blob list') as c:
from ._validators import get_include_help_string
t_blob_include = self.get_sdk('_generated.models._azure_blob_storage_enums#ListBlobsIncludeItem',
resource_type=ResourceType.DATA_STORAGE_BLOB)
c.register_container_arguments()
c.argument('delimiter',
help='When the request includes this parameter, the operation returns a BlobPrefix element in the '
'result list that acts as a placeholder for all blobs whose names begin with the same substring '
'up to the appearance of the delimiter character. The delimiter may be a single character or a '
'string.')
c.argument('include', help="Specify one or more additional datasets to include in the response. "
"Options include: {}. Can be combined.".format(get_include_help_string(t_blob_include)),
validator=validate_included_datasets_validator(include_class=t_blob_include))
c.argument('marker', arg_type=marker_type)
c.argument('num_results', arg_type=num_results_type)
c.argument('prefix',
help='Filter the results to return only blobs whose name begins with the specified prefix.')
c.argument('show_next_marker', action='store_true',
help='Show nextMarker in result when specified.')
with self.argument_context('storage blob generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
t_blob_permissions = self.get_sdk('blob.models#BlobPermissions')
c.register_sas_arguments()
c.argument('cache_control', help='Response header value for Cache-Control when resource is accessed '
'using this shared access signature.')
c.argument('content_disposition', help='Response header value for Content-Disposition when resource is '
'accessed using this shared access signature.')
c.argument('content_encoding', help='Response header value for Content-Encoding when resource is accessed '
'using this shared access signature.')
c.argument('content_language', help='Response header value for Content-Language when resource is accessed '
'using this shared access signature.')
c.argument('content_type', help='Response header value for Content-Type when resource is accessed '
'using this shared access signature.')
c.argument('full_uri', action='store_true',
help='Indicates that this command return the full blob URI and the shared access signature token.')
c.argument('as_user', min_api='2018-11-09', action='store_true',
validator=as_user_validator,
help="Indicates that this command return the SAS signed with the user delegation key. "
"The expiry parameter and '--auth-mode login' are required if this argument is specified. ")
c.argument('id', options_list='--policy-name', validator=validate_policy,
help='The name of a stored access policy within the container\'s ACL.',
completer=get_storage_acl_name_completion_list(t_base_blob_service, 'container_name',
'get_container_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_blob_permissions)),
validator=get_permission_validator(t_blob_permissions))
c.ignore('sas_token')
with self.argument_context('storage blob restore', resource_type=ResourceType.MGMT_STORAGE) as c:
from ._validators import BlobRangeAddAction
c.argument('blob_ranges', options_list=['--blob-range', '-r'], action=BlobRangeAddAction, nargs='+',
help='Blob ranges to restore. You need to two values to specify start_range and end_range for each '
'blob range, e.g. -r blob1 blob2. Note: Empty means account start as start range value, and '
'means account end for end range.')
c.argument('account_name', acct_name_type, id_part=None)
c.argument('resource_group_name', required=False, validator=process_resource_group)
c.argument('time_to_restore', type=get_datetime_type(True), options_list=['--time-to-restore', '-t'],
help='Restore blob to the specified time, which should be UTC datetime in (Y-m-d\'T\'H:M:S\'Z\').')
with self.argument_context('storage blob rewrite', resource_type=ResourceType.DATA_STORAGE_BLOB,
min_api='2020-04-08') as c:
c.register_blob_arguments()
c.register_precondition_options()
c.argument('source_url', options_list=['--source-uri', '-u'],
help='A URL of up to 2 KB in length that specifies a file or blob. The value should be URL-encoded '
'as it would appear in a request URI. If the source is in another account, the source must either '
'be public or must be authenticated via a shared access signature. If the source is public, no '
'authentication is required.')
c.extra('lease', options_list='--lease-id',
help='Required if the blob has an active lease. Value can be a BlobLeaseClient object '
'or the lease ID as a string.')
c.extra('standard_blob_tier', arg_type=get_enum_type(t_blob_tier), options_list='--tier',
help='A standard blob tier value to set the blob to. For this version of the library, '
'this is only applicable to block blobs on standard storage accounts.')
c.extra('encryption_scope',
help='A predefined encryption scope used to encrypt the data on the service. An encryption scope '
'can be created using the Management API and referenced here by name. If a default encryption scope '
'has been defined at the container, this value will override it if the container-level scope is '
'configured to allow overrides. Otherwise an error will be raised.')
with self.argument_context('storage blob update') as c:
t_blob_content_settings = self.get_sdk('blob.models#ContentSettings')
c.register_content_settings_argument(t_blob_content_settings, update=True)
with self.argument_context('storage blob exists') as c:
c.argument('blob_name', required=True)
with self.argument_context('storage blob url') as c:
c.argument('protocol', arg_type=get_enum_type(['http', 'https'], 'https'), help='Protocol to use.')
c.argument('snapshot', help='An string value that uniquely identifies the snapshot. The value of '
'this query parameter indicates the snapshot version.')
with self.argument_context('storage blob set-tier') as c:
from azure.cli.command_modules.storage._validators import (blob_rehydrate_priority_validator)
c.register_blob_arguments()
c.argument('blob_type', options_list=('--type', '-t'), arg_type=get_enum_type(('block', 'page')))
c.argument('tier', validator=blob_tier_validator)
c.argument('rehydrate_priority', options_list=('--rehydrate-priority', '-r'),
arg_type=get_enum_type(('High', 'Standard')), validator=blob_rehydrate_priority_validator,
is_preview=True, help="Indicate the priority with which to rehydrate an archived blob. "
"The priority can be set on a blob only once, default value is Standard.")
with self.argument_context('storage blob service-properties delete-policy update') as c:
c.argument('enable', arg_type=get_enum_type(['true', 'false']), help='Enables/disables soft-delete.')
c.argument('days_retained', type=int,
help='Number of days that soft-deleted blob will be retained. Must be in range [1,365].')
with self.argument_context('storage blob service-properties update', min_api='2018-03-28') as c:
c.argument('delete_retention', arg_type=get_three_state_flag(), arg_group='Soft Delete',
help='Enables soft-delete.')
c.argument('delete_retention_period', type=int, arg_group='Soft Delete',
help='Number of days that soft-deleted blob will be retained. Must be in range [1,365].')
c.argument('static_website', arg_group='Static Website', arg_type=get_three_state_flag(),
help='Enables static-website.')
c.argument('index_document', help='Represents the name of the index document. This is commonly "index.html".',
arg_group='Static Website')
c.argument('error_document_404_path', options_list=['--404-document'], arg_group='Static Website',
help='Represents the path to the error document that should be shown when an error 404 is issued,'
' in other words, when a browser requests a page that does not exist.')
with self.argument_context('storage blob show') as c:
c.register_blob_arguments()
c.register_precondition_options()
c.extra('snapshot', help='The snapshot parameter is an opaque DateTime value that, when present, '
'specifies the blob snapshot to retrieve.')
c.argument('lease_id', help='Required if the blob has an active lease.')
with self.argument_context('storage blob upload') as c:
from ._validators import page_blob_tier_validator, validate_encryption_scope_client_params
from .sdkutil import get_blob_types, get_blob_tier_names
t_blob_content_settings = self.get_sdk('blob.models#ContentSettings')
c.register_content_settings_argument(t_blob_content_settings, update=False)
c.register_blob_arguments()
c.extra('blob_name', validator=validate_blob_name_for_upload)
c.argument('file_path', options_list=('--file', '-f'), type=file_type, completer=FilesCompleter())
c.argument('max_connections', type=int)
c.argument('blob_type', options_list=('--type', '-t'), validator=validate_blob_type,
arg_type=get_enum_type(get_blob_types()))
c.argument('validate_content', action='store_true', min_api='2016-05-31')
c.extra('no_progress', progress_type)
c.extra('socket_timeout', socket_timeout_type)
# TODO: Remove once #807 is complete. Smart Create Generation requires this parameter.
# register_extra_cli_argument('storage blob upload', '_subscription_id', options_list=('--subscription',),
# help=argparse.SUPPRESS)
c.argument('tier', validator=page_blob_tier_validator,
arg_type=get_enum_type(get_blob_tier_names(self.cli_ctx, 'PremiumPageBlobTier')),
min_api='2017-04-17')
c.argument('encryption_scope', validator=validate_encryption_scope_client_params,
help='A predefined encryption scope used to encrypt the data on the service.')
with self.argument_context('storage blob upload-batch') as c:
from .sdkutil import get_blob_types
t_blob_content_settings = self.get_sdk('blob.models#ContentSettings')
c.register_content_settings_argument(t_blob_content_settings, update=False, arg_group='Content Control')
c.ignore('source_files', 'destination_container_name')
c.argument('source', options_list=('--source', '-s'))
c.argument('destination', options_list=('--destination', '-d'))
c.argument('max_connections', type=int,
help='Maximum number of parallel connections to use when the blob size exceeds 64MB.')
c.argument('maxsize_condition', arg_group='Content Control')
c.argument('validate_content', action='store_true', min_api='2016-05-31', arg_group='Content Control')
c.argument('blob_type', options_list=('--type', '-t'), arg_type=get_enum_type(get_blob_types()))
c.extra('no_progress', progress_type)
c.extra('socket_timeout', socket_timeout_type)
with self.argument_context('storage blob download') as c:
c.argument('file_path', options_list=('--file', '-f'), type=file_type,
completer=FilesCompleter(), validator=blob_download_file_path_validator)
c.argument('max_connections', type=int)
c.argument('start_range', type=int)
c.argument('end_range', type=int)
c.argument('validate_content', action='store_true', min_api='2016-05-31')
c.extra('no_progress', progress_type)
c.extra('socket_timeout', socket_timeout_type)
with self.argument_context('storage blob download-batch') as c:
c.ignore('source_container_name')
c.argument('destination', options_list=('--destination', '-d'))
c.argument('source', options_list=('--source', '-s'))
c.extra('no_progress', progress_type)
c.extra('socket_timeout', socket_timeout_type)
c.argument('max_connections', type=int,
help='Maximum number of parallel connections to use when the blob size exceeds 64MB.')
with self.argument_context('storage blob delete') as c:
from .sdkutil import get_delete_blob_snapshot_type_names
c.argument('delete_snapshots', arg_type=get_enum_type(get_delete_blob_snapshot_type_names()))
with self.argument_context('storage blob delete-batch') as c:
c.ignore('source_container_name')
c.argument('source', options_list=('--source', '-s'))
c.argument('delete_snapshots', arg_type=get_enum_type(get_delete_blob_snapshot_type_names()),
help='Required if the blob has associated snapshots.')
c.argument('lease_id', help='The active lease id for the blob.')
with self.argument_context('storage blob lease') as c:
c.argument('blob_name', arg_type=blob_name_type)
with self.argument_context('storage blob lease acquire') as c:
c.register_precondition_options()
c.register_blob_arguments()
c.extra('lease_id', options_list='--proposed-lease-id', help='Proposed lease ID, in a GUID string format. '
'The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format.')
c.argument('lease_duration', help='Specify the duration of the lease, in seconds, or negative one (-1) for '
'a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease '
'duration cannot be changed using renew or change. Default is -1 (infinite lease)', type=int)
with self.argument_context('storage blob lease break') as c:
c.register_precondition_options()
c.register_blob_arguments()
c.argument('lease_break_period', type=int,
help="This is the proposed duration of seconds that the lease should continue before it is broken, "
"between 0 and 60 seconds. This break period is only used if it is shorter than the time remaining "
"on the lease. If longer, the time remaining on the lease is used. A new lease will not be "
"available before the break period has expired, but the lease may be held for longer than the break "
"period. If this header does not appear with a break operation, a fixed-duration lease breaks after "
"the remaining lease period elapses, and an infinite lease breaks immediately.")
with self.argument_context('storage blob lease change') as c:
c.register_precondition_options()
c.register_blob_arguments()
c.extra('proposed_lease_id', help='Proposed lease ID, in a GUID string format. The Blob service returns 400 '
'(Invalid request) if the proposed lease ID is not in the correct format.', required=True)
c.extra('lease_id', help='Required if the blob has an active lease.', required=True)
for item in ['release', 'renew']:
with self.argument_context('storage blob lease {}'.format(item)) as c:
c.register_precondition_options()
c.register_blob_arguments()
c.extra('lease_id', help='Required if the blob has an active lease.', required=True)
with self.argument_context('storage copy') as c:
c.argument('destination',
options_list=['--destination', '-d',
c.deprecate(target='--destination-local-path', redirect='--destination')],
help="The path/url of copy destination. "
"It can be a local path, an url to azure storage server. If you provide destination parameter "
"here, you do not need to provide arguments in copy destination arguments group and copy "
"destination arguments will be deprecated in future.", required=False)
c.argument('source',
options_list=['--source', '-s',
c.deprecate(target='--source-local-path', redirect='--source')],
help="The path/url of copy source. It can be a local"
" path, an url to azure storage server or AWS S3 buckets. If you provide source parameter here,"
" you do not need to provide arguments in copy source arguments group and copy source arguments"
" will be deprecated in future.", required=False)
for item in ['destination', 'source']:
c.extra('{}_container'.format(item), arg_group='Copy {}'.format(item),
help='Container name of copy {} storage account'.format(item))
c.extra('{}_blob'.format(item), arg_group='Copy {}'.format(item),
help='Blob name in blob container of copy {} storage account'.format(item))
c.extra('{}_share'.format(item), arg_group='Copy {}'.format(item),
help='File share name of copy {} storage account'.format(item))
c.extra('{}_file_path'.format(item), arg_group='Copy {}'.format(item),
help='File path in file share of copy {} storage account'.format(item))
c.argument('account_name', acct_name_type, arg_group='Storage Account', id_part=None,
options_list=['--account-name',
c.deprecate(target='--destination-account-name', redirect='--account-name')],
help='Storage account name of copy destination')
c.extra('source_account_name', arg_group='Copy source',
help='Account name of copy source storage account.')
c.extra('source_account_key', arg_group='Copy source',
help='Account key of copy source storage account. Must be used in conjunction with source storage '
'account name.')
c.extra('source_connection_string', arg_group='Copy source',
options_list=['--source-connection-string', '--src-conn'],
help='Connection string of source storage account.')
c.extra('source_sas', arg_group='Copy source',
help='Shared Access Signature (SAS) token of copy source. Must be used in conjunction with source '
'storage account name.')
c.argument('put_md5', arg_group='Additional Flags', action='store_true',
help='Create an MD5 hash of each file, and save the hash as the Content-MD5 property of the '
'destination blob/file.Only available when uploading.')
c.argument('blob_type', arg_group='Additional Flags',
arg_type=get_enum_type(["BlockBlob", "PageBlob", "AppendBlob"]),
help='The type of blob at the destination.')
c.argument('preserve_s2s_access_tier', arg_group='Additional Flags', arg_type=get_three_state_flag(),
help='Preserve access tier during service to service copy. '
'Please refer to https://docs.microsoft.com/azure/storage/blobs/storage-blob-storage-tiers '
'to ensure destination storage account support setting access tier. In the cases that setting '
'access tier is not supported, please use `--preserve-s2s-access-tier false` to bypass copying '
'access tier. (Default true)')
c.argument('exclude_pattern', exclude_pattern_type)
c.argument('include_pattern', include_pattern_type)
c.argument('exclude_path', exclude_path_type)
c.argument('include_path', include_path_type)
c.argument('recursive', recursive_type)
c.argument('content_type', arg_group='Additional Flags', help="Specify content type of the file. ")
c.argument('follow_symlinks', arg_group='Additional Flags', action='store_true',
help='Follow symbolic links when uploading from local file system.')
c.argument('cap_mbps', arg_group='Additional Flags', help="Caps the transfer rate, in megabits per second. "
"Moment-by-moment throughput might vary slightly from the cap. "
"If this option is set to zero, or it is omitted, the throughput isn't capped. ")
with self.argument_context('storage blob copy') as c:
for item in ['destination', 'source']:
c.argument('{}_if_modified_since'.format(item), arg_group='Pre-condition', arg_type=if_modified_since_type)
c.argument('{}_if_unmodified_since'.format(item), arg_group='Pre-condition',
arg_type=if_unmodified_since_type)
c.argument('{}_if_match'.format(item), arg_group='Pre-condition')
c.argument('{}_if_none_match'.format(item), arg_group='Pre-condition')
c.argument('container_name', container_name_type, options_list=('--destination-container', '-c'))
c.argument('blob_name', blob_name_type, options_list=('--destination-blob', '-b'),
help='Name of the destination blob. If the exists, it will be overwritten.')
c.argument('source_lease_id', arg_group='Copy Source')
with self.argument_context('storage blob copy start', resource_type=ResourceType.DATA_STORAGE_BLOB) as c:
from ._validators import validate_source_url
c.register_blob_arguments()
c.register_precondition_options()
c.register_precondition_options(prefix='source_')
c.register_source_uri_arguments(validator=validate_source_url)
c.ignore('incremental_copy')
c.argument('if_match', options_list=['--destination-if-match'])
c.argument('if_modified_since', options_list=['--destination-if-modified-since'])
c.argument('if_none_match', options_list=['--destination-if-none-match'])
c.argument('if_unmodified_since', options_list=['--destination-if-unmodified-since'])
c.argument('if_tags_match_condition', options_list=['--destination-tags-condition'])
c.argument('blob_name', options_list=['--destination-blob', '-b'], required=True,
help='Name of the destination blob. If the exists, it will be overwritten.')
c.argument('container_name', options_list=['--destination-container', '-c'], required=True,
help='The container name.')
c.extra('destination_lease', options_list='--destination-lease-id',
help='The lease ID specified for this header must match the lease ID of the estination blob. '
'If the request does not include the lease ID or it is not valid, the operation fails with status '
'code 412 (Precondition Failed).')
c.extra('source_lease', options_list='--source-lease-id', arg_group='Copy Source',
help='Specify this to perform the Copy Blob operation only if the lease ID given matches the '
'active lease ID of the source blob.')
c.extra('rehydrate_priority', rehydrate_priority_type)
c.extra('requires_sync', arg_type=get_three_state_flag(),
help='Enforce that the service will not return a response until the copy is complete.')
c.extra('tier', tier_type)
c.extra('tags', tags_type)
with self.argument_context('storage blob copy start-batch', arg_group='Copy Source') as c:
from azure.cli.command_modules.storage._validators import get_source_file_or_blob_service_client
c.argument('source_client', ignore_type, validator=get_source_file_or_blob_service_client)
c.extra('source_account_name')
c.extra('source_account_key')
c.extra('source_uri')
c.argument('source_sas')
c.argument('source_container')
c.argument('source_share')
with self.argument_context('storage blob incremental-copy start') as c:
from azure.cli.command_modules.storage._validators import process_blob_source_uri
c.register_source_uri_arguments(validator=process_blob_source_uri, blob_only=True)
c.argument('destination_if_modified_since', arg_group='Pre-condition', arg_type=if_modified_since_type)
c.argument('destination_if_unmodified_since', arg_group='Pre-condition', arg_type=if_unmodified_since_type)
c.argument('destination_if_match', arg_group='Pre-condition')
c.argument('destination_if_none_match', arg_group='Pre-condition')
c.argument('container_name', container_name_type, options_list=('--destination-container', '-c'))
c.argument('blob_name', blob_name_type, options_list=('--destination-blob', '-b'),
help='Name of the destination blob. If the exists, it will be overwritten.')
c.argument('source_lease_id', arg_group='Copy Source')
with self.argument_context('storage blob query') as c:
from ._validators import validate_text_configuration
c.register_blob_arguments()
c.register_precondition_options()
line_separator = CLIArgumentType(help="The string used to separate records.", default='\n')
column_separator = CLIArgumentType(help="The string used to separate columns.", default=',')
quote_char = CLIArgumentType(help="The string used to quote a specific field.", default='"')
record_separator = CLIArgumentType(help="The string used to separate records.", default='\n')
escape_char = CLIArgumentType(help="The string used as an escape character. Default to empty.", default="")
has_header = CLIArgumentType(
arg_type=get_three_state_flag(),
help="Whether the blob data includes headers in the first line. "
"The default value is False, meaning that the data will be returned inclusive of the first line. "
"If set to True, the data will be returned exclusive of the first line.", default=False)
c.extra('lease', options_list='--lease-id',
help='Required if the blob has an active lease.')
c.argument('query_expression', help='The query expression in SQL. The maximum size of the query expression '
'is 256KiB. For more information about the expression syntax, please see '
'https://docs.microsoft.com/azure/storage/blobs/query-acceleration-sql-reference')
c.extra('input_format', arg_type=get_enum_type(['csv', 'json']), validator=validate_text_configuration,
help='Serialization type of the data currently stored in the blob. '
'The default is to treat the blob data as CSV data formatted in the default dialect.'
'The blob data will be reformatted according to that profile when blob format is specified. '
'If you choose `json`, please specify `Output Json Text Configuration Arguments` accordingly; '
'If you choose `csv`, please specify `Output Delimited Text Configuration Arguments`.')
c.extra('output_format', arg_type=get_enum_type(['csv', 'json']),
help='Output serialization type for the data stream. '
'By default the data will be returned as it is represented in the blob. '
'By providing an output format, the blob data will be reformatted according to that profile. '
'If you choose `json`, please specify `Output Json Text Configuration Arguments` accordingly; '
'If you choose `csv`, please specify `Output Delimited Text Configuration Arguments`.')
c.extra('in_line_separator',
arg_group='Input Json Text Configuration',
arg_type=line_separator)
c.extra('in_column_separator', arg_group='Input Delimited Text Configuration',
arg_type=column_separator)
c.extra('in_quote_char', arg_group='Input Delimited Text Configuration',
arg_type=quote_char)
c.extra('in_record_separator', arg_group='Input Delimited Text Configuration',
arg_type=record_separator)
c.extra('in_escape_char', arg_group='Input Delimited Text Configuration',
arg_type=escape_char)
c.extra('in_has_header', arg_group='Input Delimited Text Configuration',
arg_type=has_header)
c.extra('out_line_separator',
arg_group='Output Json Text Configuration',
arg_type=line_separator)
c.extra('out_column_separator', arg_group='Output Delimited Text Configuration',
arg_type=column_separator)
c.extra('out_quote_char', arg_group='Output Delimited Text Configuration',
arg_type=quote_char)
c.extra('out_record_separator', arg_group='Output Delimited Text Configuration',
arg_type=record_separator)
c.extra('out_escape_char', arg_group='Output Delimited Text Configuration',
arg_type=escape_char)
c.extra('out_has_header', arg_group='Output Delimited Text Configuration',
arg_type=has_header)
c.extra('result_file', help='Specify the file path to save result.')
c.ignore('input_config')
c.ignore('output_config')
with self.argument_context('storage blob sync') as c:
c.extra('destination_container', options_list=['--container', '-c'], required=True,
help='The sync destination container.')
c.extra('destination_path', options_list=['--destination', '-d'],
validator=validate_azcopy_upload_destination_url,
help='The sync destination path.')
c.argument('source', options_list=['--source', '-s'],
help='The source file path to sync from.')
c.ignore('destination')
c.argument('exclude_pattern', exclude_pattern_type)
c.argument('include_pattern', include_pattern_type)
c.argument('exclude_path', exclude_path_type)
with self.argument_context('storage container') as c:
from .sdkutil import get_container_access_type_names
c.argument('container_name', container_name_type, options_list=('--name', '-n'))
c.argument('public_access', validator=validate_container_public_access,
arg_type=get_enum_type(get_container_access_type_names()),
help='Specifies whether data in the container may be accessed publicly.')
with self.argument_context('storage container create') as c:
c.argument('container_name', container_name_type, options_list=('--name', '-n'), completer=None)
c.argument('fail_on_exist', help='Throw an exception if the container already exists.')
c.argument('account_name', help='Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT.')
c.argument('default_encryption_scope', options_list=['--default-encryption-scope', '-d'],
arg_group='Encryption Policy', is_preview=True,
help='Default the container to use specified encryption scope for all writes.')
c.argument('prevent_encryption_scope_override', options_list=['--prevent-encryption-scope-override', '-p'],
arg_type=get_three_state_flag(), arg_group='Encryption Policy', is_preview=True,
help='Block override of encryption scope from the container default.')
with self.argument_context('storage container delete') as c:
c.argument('fail_not_exist', help='Throw an exception if the container does not exist.')
c.argument('bypass_immutability_policy', action='store_true', help='Bypasses upcoming service behavior that '
'will block a container from being deleted if it has a immutability-policy. Specifying this will '
'ignore arguments aside from those used to identify the container ("--name", "--account-name").')
c.argument('lease_id', help="If specified, delete_container only succeeds if the container's lease is active "
"and matches this ID. Required if the container has an active lease.")
c.ignore('processed_resource_group')
c.ignore('processed_account_name')
c.ignore('mgmt_client')
with self.argument_context('storage container exists') as c:
c.ignore('blob_name', 'snapshot')
for item in ['create', 'extend']:
with self.argument_context('storage container immutability-policy {}'.format(item)) as c:
c.argument('account_name',
help='Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT.')
c.argument('if_match', help="An ETag value, or the wildcard character (*). Specify this header to perform "
"the operation only if the resource's ETag matches the value specified.")
c.extra('allow_protected_append_writes', options_list=['--allow-protected-append-writes', '-w'],
arg_type=get_three_state_flag(), help='This property can only be changed for unlocked time-based '
'retention policies. When enabled, new blocks can be '
'written to an append blob while maintaining immutability '
'protection and compliance. Only new blocks can be added '
'and any existing blocks cannot be modified or deleted. '
'This property cannot be changed with '
'ExtendImmutabilityPolicy API.')
c.extra('allow_protected_append_writes_all', options_list=['--allow-protected-append-writes-all',
'--w-all'],
arg_type=get_three_state_flag(), help="This property can only be changed for unlocked time-based "
"retention policies. When enabled, new blocks can be written "
"to both 'Append and Block Blobs' while maintaining "
"immutability protection and compliance. "
"Only new blocks can be added and any existing blocks cannot "
"be modified or deleted. This property cannot be changed with"
" ExtendImmutabilityPolicy API. The "
"'allowProtectedAppendWrites' and "
"'allowProtectedAppendWritesAll' properties are mutually "
"exclusive.")
c.extra('period', type=int, help='The immutability period for the blobs in the container since the policy '
'creation, in days.')
c.ignore('parameters')
with self.argument_context('storage container list') as c:
c.argument('num_results', arg_type=num_results_type)
with self.argument_context('storage container set-permission') as c:
c.ignore('signed_identifiers')
with self.argument_context('storage container lease') as c:
c.argument('container_name', container_name_type)
with self.argument_context('storage container') as c:
c.argument('account_name', completer=get_resource_name_completion_list('Microsoft.Storage/storageAccounts'))
c.argument('resource_group_name', required=False, validator=process_resource_group)
with self.argument_context('storage container immutability-policy') as c:
c.argument('immutability_period_since_creation_in_days', options_list='--period')
c.argument('container_name', container_name_type)
with self.argument_context('storage container legal-hold') as c:
c.argument('container_name', container_name_type)
c.argument('account_name',
help='Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT.')
c.argument('tags', nargs='+',
help='Space-separated tags. Each tag should be 3 to 23 alphanumeric characters and is normalized '
'to lower case')
for item in ['set', 'clear']:
with self.argument_context(f'storage container legal-hold {item}') as c:
c.extra('allow_protected_append_writes_all', options_list=['--allow-protected-append-writes-all',
'--w-all'],
arg_type=get_three_state_flag(),
help="When enabled, new blocks can be written to both Append and Block Blobs while maintaining "
"legal hold protection and compliance. Only new blocks can be added and any existing blocks "
"cannot be modified or deleted.")
with self.argument_context('storage container policy') as c:
from .completers import get_storage_acl_name_completion_list
t_container_permissions = self.get_sdk('blob.models#ContainerPermissions')
c.argument('container_name', container_name_type)
c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.',
completer=get_storage_acl_name_completion_list(t_base_blob_service, 'container_name',
'get_container_acl'))
help_str = 'Allowed values: {}. Can be combined'.format(get_permission_help_string(t_container_permissions))
c.argument('permission', options_list='--permissions', help=help_str,
validator=get_permission_validator(t_container_permissions))
c.argument('start', type=get_datetime_type(True),
help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.')
c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')')
for item in ['create', 'delete', 'list', 'show', 'update']:
with self.argument_context('storage container policy {}'.format(item)) as c:
c.extra('lease_id', options_list='--lease-id', help='The container lease ID.')
with self.argument_context('storage container generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
t_container_permissions = self.get_sdk('blob.models#ContainerPermissions')
c.register_sas_arguments()
c.argument('id', options_list='--policy-name', validator=validate_policy,
help='The name of a stored access policy within the container\'s ACL.',
completer=get_storage_acl_name_completion_list(t_container_permissions, 'container_name',
'get_container_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_container_permissions)),
validator=get_permission_validator(t_container_permissions))
c.argument('cache_control', help='Response header value for Cache-Control when resource is accessed '
'using this shared access signature.')
c.argument('content_disposition', help='Response header value for Content-Disposition when resource is '
'accessed using this shared access signature.')
c.argument('content_encoding', help='Response header value for Content-Encoding when resource is accessed '
'using this shared access signature.')
c.argument('content_language', help='Response header value for Content-Language when resource is accessed '
'using this shared access signature.')
c.argument('content_type', help='Response header value for Content-Type when resource is accessed '
'using this shared access signature.')
c.argument('as_user', min_api='2018-11-09', action='store_true',
validator=as_user_validator,
help="Indicates that this command return the SAS signed with the user delegation key. "
"The expiry parameter and '--auth-mode login' are required if this argument is specified. ")
c.ignore('sas_token')
with self.argument_context('storage container lease') as c:
c.argument('lease_duration', type=int)
c.argument('lease_break_period', type=int)
with self.argument_context('storage container list', resource_type=ResourceType.DATA_STORAGE_BLOB) as c:
c.extra('timeout', timeout_type)
c.argument('marker', arg_type=marker_type)
c.argument('num_results', arg_type=num_results_type)
c.argument('prefix',
help='Filter the results to return only blobs whose name begins with the specified prefix.')
c.argument('include_metadata', arg_type=get_three_state_flag(),
help='Specify that container metadata to be returned in the response.')
c.argument('show_next_marker', action='store_true', is_preview=True,
help='Show nextMarker in result when specified.')
c.argument('include_deleted', arg_type=get_three_state_flag(), min_api='2020-02-10',
help='Specify that deleted containers to be returned in the response. This is for container restore '
'enabled account. The default value is `False`')
with self.argument_context('storage container restore') as c:
c.argument('deleted_container_name', options_list=['--name', '-n'],
help='Specify the name of the deleted container to restore.')
c.argument('deleted_container_version', options_list=['--deleted-version'],
help='Specify the version of the deleted container to restore.')
c.extra('timeout', timeout_type)
with self.argument_context('storage container-rm', resource_type=ResourceType.MGMT_STORAGE) as c:
from .sdkutil import get_container_access_type_names
c.argument('container_name', container_name_type, options_list=('--name', '-n'), id_part='child_name_2')
c.argument('account_name', storage_account_type)
c.argument('resource_group_name', required=False)
c.argument('public_access', validator=validate_container_public_access,
arg_type=get_enum_type(get_container_access_type_names()),
help='Specify whether data in the container may be accessed publicly.')
c.ignore('filter', 'maxpagesize')
with self.argument_context('storage container-rm create', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('fail_on_exist', help='Throw an exception if the container already exists.')
c.argument('enable_vlw', arg_type=get_three_state_flag(), min_api='2021-01-01', is_preview=True,
help='The object level immutability property of the container. The property is immutable and can '
'only be set to true at the container creation time. Existing containers must undergo a migration '
'process.')
for item in ['create', 'update']:
with self.argument_context('storage container-rm {}'.format(item),
resource_type=ResourceType.MGMT_STORAGE) as c:
from ._validators import validate_container_nfsv3_squash
t_root_squash = self.get_models('RootSquashType', resource_type=ResourceType.MGMT_STORAGE)
c.argument('default_encryption_scope', options_list=['--default-encryption-scope', '-d'],
arg_group='Encryption Policy', min_api='2019-06-01',
help='Default the container to use specified encryption scope for all writes.')
c.argument('deny_encryption_scope_override',
options_list=['--deny-encryption-scope-override', '--deny-override'],
arg_type=get_three_state_flag(), arg_group='Encryption Policy', min_api='2019-06-01',
help='Block override of encryption scope from the container default.')
c.extra('root_squash', arg_type=get_enum_type(t_root_squash), min_api='2021-06-01',
help='Enable NFSv3 squash on blob container.', validator=validate_container_nfsv3_squash)
c.ignore('enable_nfs_v3_root_squash')
c.ignore('enable_nfs_v3_all_squash')
with self.argument_context('storage container-rm list', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', storage_account_type, id_part=None)
c.argument('include_deleted', action='store_true',
help='Include soft deleted containers when specified.')
with self.argument_context('storage share') as c:
c.argument('share_name', share_name_type, options_list=('--name', '-n'))
with self.argument_context('storage share-rm', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('resource_group_name', required=False)
c.argument('account_name', storage_account_type)
c.argument('share_name', share_name_type, options_list=('--name', '-n'), id_part='child_name_2')
c.argument('expand', default=None)
c.argument('x_ms_snapshot', options_list=['--snapshot'], is_preview=True,
help='The DateTime value that specifies the share snapshot to retrieve.')
c.ignore('filter', 'maxpagesize')
with self.argument_context('storage share-rm delete', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('include', default='none')
with self.argument_context('storage share-rm update', resource_type=ResourceType.MGMT_STORAGE) as c:
c.ignore('x_ms_snapshot')
for item in ['create', 'update', 'snapshot']:
with self.argument_context('storage share-rm {}'.format(item), resource_type=ResourceType.MGMT_STORAGE) as c:
t_enabled_protocols, t_root_squash, t_access_tier = \
self.get_models('EnabledProtocols', 'RootSquashType', 'ShareAccessTier',
resource_type=ResourceType.MGMT_STORAGE)
c.argument('share_quota', type=int, options_list=['--quota', '-q'],
help='The maximum size of the share in gigabytes. Must be greater than 0, and less than or '
'equal to 5TB (5120). For Large File Shares, the maximum size is 102400.')
c.argument('metadata', nargs='+',
help='Metadata in space-separated key=value pairs that is associated with the share. '
'This overwrites any existing metadata',
validator=validate_metadata)
c.argument('enabled_protocols', arg_type=get_enum_type(t_enabled_protocols),
min_api='2019-06-01', help='Immutable property for file shares protocol. NFS protocol will be '
'only available for premium file shares (file shares in the FileStorage account type).')
c.argument('root_squash', arg_type=get_enum_type(t_root_squash),
min_api='2019-06-01', help='Reduction of the access rights for the remote superuser.')
c.argument('access_tier', arg_type=get_enum_type(t_access_tier), min_api='2019-06-01',
help='Access tier for specific share. GpV2 account can choose between TransactionOptimized '
'(default), Hot, and Cool. FileStorage account can choose Premium.')
with self.argument_context('storage share-rm list', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('account_name', storage_account_type, id_part=None)
c.argument('include_deleted', action='store_true',
help='Include soft deleted file shares when specified.')
c.argument('include_snapshot', action='store_true',
help='Include file share snapshots when specified.')
with self.argument_context('storage share-rm restore', resource_type=ResourceType.MGMT_STORAGE) as c:
c.argument('deleted_version',
help='Identify the version of the deleted share that will be restored.')
c.argument('share_name',
help='The file share name. Identify the name of the deleted share that will be restored.')
c.argument('restored_name',
help='A new file share name to be restored. If not specified, deleted share name will be used.')
with self.argument_context('storage share url') as c:
c.argument('unc', action='store_true', help='Output UNC network path.')
c.argument('protocol', arg_type=get_enum_type(['http', 'https'], 'https'), help='Protocol to use.')
with self.argument_context('storage share list') as c:
c.argument('num_results', arg_type=num_results_type)
with self.argument_context('storage share exists') as c:
c.ignore('directory_name', 'file_name')
with self.argument_context('storage share policy') as c:
from .completers import get_storage_acl_name_completion_list
t_file_svc = self.get_sdk('file#FileService')
t_share_permissions = self.get_sdk('file.models#SharePermissions')
c.argument('container_name', share_name_type)
c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.',
completer=get_storage_acl_name_completion_list(t_file_svc, 'container_name', 'get_share_acl'))
help_str = 'Allowed values: {}. Can be combined'.format(get_permission_help_string(t_share_permissions))
c.argument('permission', options_list='--permissions', help=help_str,
validator=get_permission_validator(t_share_permissions))
c.argument('start', type=get_datetime_type(True),
help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.')
c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')')
with self.argument_context('storage share delete') as c:
from .sdkutil import get_delete_file_snapshot_type_names
c.argument('delete_snapshots', arg_type=get_enum_type(get_delete_file_snapshot_type_names()),
help='Specify the deletion strategy when the share has snapshots.')
with self.argument_context('storage share generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
t_share_permissions = self.get_sdk('file.models#SharePermissions')
c.register_sas_arguments()
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the share\'s ACL.',
completer=get_storage_acl_name_completion_list(t_share_permissions, 'share_name', 'get_share_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_share_permissions)),
validator=get_permission_validator(t_share_permissions))
c.ignore('sas_token')
with self.argument_context('storage directory') as c:
c.argument('directory_name', directory_type, options_list=('--name', '-n'))
with self.argument_context('storage directory exists') as c:
c.ignore('file_name')
c.argument('directory_name', required=True)
with self.argument_context('storage file') as c:
c.argument('file_name', file_name_type, options_list=('--name', '-n'))
c.argument('directory_name', directory_type, required=False)
with self.argument_context('storage file copy') as c:
c.argument('share_name', share_name_type, options_list=('--destination-share', '-s'),
help='Name of the destination share. The share must exist.')
with self.argument_context('storage file copy cancel') as c:
c.register_path_argument(options_list=('--destination-path', '-p'))
with self.argument_context('storage file delete') as c:
c.register_path_argument()
with self.argument_context('storage file download') as c:
c.register_path_argument()
c.argument('file_path', options_list=('--dest',), type=file_type, required=False,
help='Path of the file to write to. The source filename will be used if not specified.',
validator=process_file_download_namespace, completer=FilesCompleter())
c.argument('path', validator=None) # validator called manually from process_file_download_namespace
c.extra('no_progress', progress_type)
c.argument('max_connections', type=int)
c.argument('start_range', type=int)
c.argument('end_range', type=int)
with self.argument_context('storage file exists') as c:
c.register_path_argument()
with self.argument_context('storage file generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
c.register_path_argument()
c.register_sas_arguments()
t_file_svc = self.get_sdk('file.fileservice#FileService')
t_file_permissions = self.get_sdk('file.models#FilePermissions')
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the container\'s ACL.',
completer=get_storage_acl_name_completion_list(t_file_svc, 'container_name', 'get_container_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_file_permissions)),
validator=get_permission_validator(t_file_permissions))
c.ignore('sas_token')
with self.argument_context('storage file list') as c:
from .completers import dir_path_completer
c.argument('directory_name', options_list=('--path', '-p'), help='The directory path within the file share.',
completer=dir_path_completer)
c.argument('num_results', arg_type=num_results_type)
with self.argument_context('storage file metadata show') as c:
c.register_path_argument()
with self.argument_context('storage file metadata update') as c:
c.register_path_argument()
with self.argument_context('storage file resize') as c:
c.register_path_argument()
c.argument('content_length', options_list='--size')
with self.argument_context('storage file show') as c:
c.register_path_argument()
with self.argument_context('storage file update') as c:
t_file_content_settings = self.get_sdk('file.models#ContentSettings')
c.register_path_argument()
c.register_content_settings_argument(t_file_content_settings, update=True)
with self.argument_context('storage file upload') as c:
t_file_content_settings = self.get_sdk('file.models#ContentSettings')
c.register_path_argument(default_file_param='local_file_path')
c.register_content_settings_argument(t_file_content_settings, update=False, guess_from_file='local_file_path')
c.argument('local_file_path', options_list='--source', type=file_type, completer=FilesCompleter())
c.extra('no_progress', progress_type)
c.argument('max_connections', type=int)
with self.argument_context('storage file url') as c:
c.register_path_argument()
c.argument('protocol', arg_type=get_enum_type(['http', 'https'], 'https'), help='Protocol to use.')
with self.argument_context('storage file upload-batch') as c:
from ._validators import process_file_upload_batch_parameters
c.argument('source', options_list=('--source', '-s'), validator=process_file_upload_batch_parameters)
c.argument('destination', options_list=('--destination', '-d'))
c.argument('max_connections', arg_group='Download Control', type=int)
c.argument('validate_content', action='store_true', min_api='2016-05-31')
c.register_content_settings_argument(t_file_content_settings, update=False, arg_group='Content Settings')
c.extra('no_progress', progress_type)
with self.argument_context('storage file download-batch') as c:
from ._validators import process_file_download_batch_parameters
c.argument('source', options_list=('--source', '-s'), validator=process_file_download_batch_parameters)
c.argument('destination', options_list=('--destination', '-d'))
c.argument('max_connections', arg_group='Download Control', type=int)
c.argument('validate_content', action='store_true', min_api='2016-05-31')
c.extra('no_progress', progress_type)
with self.argument_context('storage file delete-batch') as c:
from ._validators import process_file_batch_source_parameters
c.argument('source', options_list=('--source', '-s'), validator=process_file_batch_source_parameters)
with self.argument_context('storage file copy start') as c:
from azure.cli.command_modules.storage._validators import validate_source_uri
c.register_path_argument(options_list=('--destination-path', '-p'))
c.register_source_uri_arguments(validator=validate_source_uri)
c.extra('file_snapshot', default=None, arg_group='Copy Source',
help='The file snapshot for the source storage account.')
with self.argument_context('storage file copy start-batch', arg_group='Copy Source') as c:
from ._validators import get_source_file_or_blob_service_client
c.argument('source_client', ignore_type, validator=get_source_file_or_blob_service_client)
c.extra('source_account_name')
c.extra('source_account_key')
c.extra('source_uri')
c.argument('source_sas')
c.argument('source_container')
c.argument('source_share')
with self.argument_context('storage cors list') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), default='bqft',
options_list='--services', required=False)
with self.argument_context('storage cors add') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), required=True,
options_list='--services')
c.argument('max_age')
c.argument('origins', nargs='+')
c.argument('methods', nargs='+',
arg_type=get_enum_type(['DELETE', 'GET', 'HEAD', 'MERGE', 'POST', 'OPTIONS', 'PUT']))
c.argument('allowed_headers', nargs='+')
c.argument('exposed_headers', nargs='+')
with self.argument_context('storage cors clear') as c:
c.extra('services', validator=get_char_options_validator('bfqt', 'services'), required=True,
options_list='--services')
with self.argument_context('storage queue generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
t_queue_permissions = self.get_sdk('queue.models#QueuePermissions')
c.register_sas_arguments()
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the share\'s ACL.',
completer=get_storage_acl_name_completion_list(t_queue_permissions, 'queue_name', 'get_queue_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_queue_permissions)),
validator=get_permission_validator(t_queue_permissions))
c.ignore('sas_token')
c.ignore('auth_mode')
with self.argument_context('storage queue') as c:
c.argument('queue_name', queue_name_type, options_list=('--name', '-n'))
with self.argument_context('storage queue list') as c:
c.argument('include_metadata', help='Specify that queue metadata be returned in the response.')
c.argument('marker', arg_type=marker_type)
c.argument('num_results', arg_type=num_results_type)
c.argument('prefix', help='Filter the results to return only queues whose names '
'begin with the specified prefix.')
c.argument('show_next_marker', action='store_true',
help='Show nextMarker in result when specified.')
c.extra('timeout', help='Request timeout in seconds. Apply to each call to the service.', type=int)
with self.argument_context('storage queue create') as c:
c.argument('queue_name', queue_name_type, options_list=('--name', '-n'), completer=None)
with self.argument_context('storage queue policy') as c:
from .completers import get_storage_acl_name_completion_list
t_queue_permissions = self.get_sdk('queue.models#QueuePermissions')
c.argument('container_name', queue_name_type)
c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.',
completer=get_storage_acl_name_completion_list(t_queue_service, 'container_name', 'get_queue_acl'))
help_str = 'Allowed values: {}. Can be combined'.format(get_permission_help_string(t_queue_permissions))
c.argument('permission', options_list='--permissions', help=help_str,
validator=get_permission_validator(t_queue_permissions))
c.argument('start', type=get_datetime_type(True),
help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.')
c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')')
c.ignore('auth_mode')
with self.argument_context('storage message') as c:
c.argument('queue_name', queue_name_type)
c.argument('message_id', options_list='--id')
c.argument('content', type=unicode_string, help='Message content, up to 64KB in size.')
with self.argument_context('storage remove') as c:
from .completers import file_path_completer
c.extra('container_name', container_name_type, validator=validate_azcopy_remove_arguments)
c.extra('blob_name', options_list=('--name', '-n'), arg_type=blob_name_type)
c.extra('share_name', share_name_type, help='The file share name.')
c.extra('path', options_list=('--path', '-p'),
help='The path to the file within the file share.',
completer=file_path_completer)
c.argument('exclude_pattern', exclude_pattern_type)
c.argument('include_pattern', include_pattern_type)
c.argument('exclude_path', exclude_path_type)
c.argument('include_path', include_path_type)
c.argument('recursive', recursive_type)
c.ignore('destination')
c.ignore('service')
c.ignore('target')
with self.argument_context('storage table') as c:
c.argument('table_name', table_name_type, options_list=('--name', '-n'))
with self.argument_context('storage table create') as c:
c.argument('table_name', table_name_type, options_list=('--name', '-n'), completer=None)
c.argument('fail_on_exist', help='Throw an exception if the table already exists.')
with self.argument_context('storage table policy') as c:
from ._validators import table_permission_validator
from .completers import get_storage_acl_name_completion_list
c.argument('container_name', table_name_type)
c.argument('policy_name', options_list=('--name', '-n'), help='The stored access policy name.',
completer=get_storage_acl_name_completion_list(t_table_service, 'table_name', 'get_table_acl'))
help_str = 'Allowed values: (r)ead/query (a)dd (u)pdate (d)elete. Can be combined.'
c.argument('permission', options_list='--permissions', help=help_str, validator=table_permission_validator)
c.argument('start', type=get_datetime_type(True),
help='start UTC datetime (Y-m-d\'T\'H:M:S\'Z\'). Defaults to time of request.')
c.argument('expiry', type=get_datetime_type(True), help='expiration UTC datetime in (Y-m-d\'T\'H:M:S\'Z\')')
with self.argument_context('storage table generate-sas') as c:
from .completers import get_storage_acl_name_completion_list
c.register_sas_arguments()
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy within the table\'s ACL.',
completer=get_storage_acl_name_completion_list(t_table_service, 'table_name', 'get_table_acl'))
c.argument('permission', options_list='--permissions',
help=sas_help.format('(r)ead/query (a)dd (u)pdate (d)elete'),
validator=table_permission_validator)
c.ignore('sas_token')
with self.argument_context('storage entity') as c:
c.ignore('property_resolver')
c.argument('entity', options_list=('--entity', '-e'), validator=validate_entity, nargs='+')
c.argument('select', nargs='+', validator=validate_select,
help='Space-separated list of properties to return for each entity.')
with self.argument_context('storage entity insert') as c:
c.argument('if_exists', arg_type=get_enum_type(['fail', 'merge', 'replace']))
with self.argument_context('storage entity query') as c:
c.argument('accept', default='minimal', validator=validate_table_payload_format,
arg_type=get_enum_type(['none', 'minimal', 'full']),
help='Specifies how much metadata to include in the response payload.')
c.argument('marker', validator=validate_marker, nargs='+')
for item in ['create', 'show', 'delete', 'exists', 'metadata update', 'metadata show']:
with self.argument_context('storage fs {}'.format(item)) as c:
c.extra('file_system_name', options_list=['--name', '-n'],
help="File system name (i.e. container name).", required=True)
c.extra('timeout', timeout_type)
with self.argument_context('storage fs create') as c:
from .sdkutil import get_fs_access_type_names
c.argument('public_access', arg_type=get_enum_type(get_fs_access_type_names()),
validator=validate_fs_public_access,
help="Specify whether data in the file system may be accessed publicly and the level of access.")
with self.argument_context('storage fs generate-sas') as c:
t_file_system_permissions = self.get_sdk('_models#FileSystemSasPermissions',
resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE)
c.register_sas_arguments()
c.argument('file_system', options_list=['--name', '-n'], help="File system name (i.e. container name).")
c.argument('id', options_list='--policy-name',
help='The name of a stored access policy.')
c.argument('permission', options_list='--permissions',
help=sas_help.format(get_permission_help_string(t_file_system_permissions)),
validator=get_permission_validator(t_file_system_permissions))
c.argument('cache_control', help='Response header value for Cache-Control when resource is accessed'
'using this shared access signature.')
c.argument('content_disposition', help='Response header value for Content-Disposition when resource is accessed'
'using this shared access signature.')
c.argument('content_encoding', help='Response header value for Content-Encoding when resource is accessed'
'using this shared access signature.')
c.argument('content_language', help='Response header value for Content-Language when resource is accessed'
'using this shared access signature.')
c.argument('content_type', help='Response header value for Content-Type when resource is accessed'
'using this shared access signature.')
c.argument('as_user', min_api='2018-11-09', action='store_true',
validator=as_user_validator,
help="Indicates that this command return the SAS signed with the user delegation key. "
"The expiry parameter and '--auth-mode login' are required if this argument is specified. ")
c.ignore('sas_token')
c.argument('full_uri', action='store_true',
help='Indicate that this command return the full blob URI and the shared access signature token.')
with self.argument_context('storage fs list') as c:
c.argument('include_metadata', arg_type=get_three_state_flag(),
help='Specify that file system metadata be returned in the response. The default value is "False".')
c.argument('name_starts_with', options_list=['--prefix'],
help='Filter the results to return only file systems whose names begin with the specified prefix.')
for item in ['create', 'show', 'delete', 'exists', 'move', 'metadata update', 'metadata show']:
with self.argument_context('storage fs directory {}'.format(item)) as c:
c.extra('file_system_name', options_list=['-f', '--file-system'],
help="File system name (i.e. container name).", required=True)
c.extra('directory_path', options_list=['--name', '-n'],
help="The name of directory.", required=True)
c.extra('timeout', timeout_type)
with self.argument_context('storage fs directory create') as c:
c.extra('permissions', permissions_type)
c.extra('umask', umask_type)
with self.argument_context('storage fs directory list') as c:
c.extra('file_system_name', options_list=['-f', '--file-system'],
help="File system name (i.e. container name).", required=True)
c.argument('recursive', arg_type=get_three_state_flag(), default=True,
help='Look into sub-directories recursively when set to true.')
c.argument('path', help="Filter the results to return only paths under the specified path.")
c.argument('num_results', type=int, help='Specify the maximum number of results to return.')
with self.argument_context('storage fs directory move') as c:
c.argument('new_name', options_list=['--new-directory', '-d'],
help='The new directory name the users want to move to. The value must have the following format: '
'"{filesystem}/{directory}/{subdirectory}".')
with self.argument_context('storage fs directory upload') as c:
from ._validators import validate_fs_directory_upload_destination_url
c.extra('destination_fs', options_list=['--file-system', '-f'], required=True,
help='The upload destination file system.')
c.extra('destination_path', options_list=['--destination-path', '-d'],
validator=validate_fs_directory_upload_destination_url,
help='The upload destination directory path. It should be an absolute path to file system. '
'If the specified destination path does not exist, a new directory path will be created.')
c.argument('source', options_list=['--source', '-s'],
help='The source file path to upload from.')
c.argument('recursive', recursive_type, help='Recursively upload files. If enabled, all the files '
'including the files in subdirectories will be uploaded.')
c.ignore('destination')
with self.argument_context('storage fs directory download') as c:
from ._validators import validate_fs_directory_download_source_url
c.extra('source_fs', options_list=['--file-system', '-f'], required=True,
help='The download source file system.')
c.extra('source_path', options_list=['--source-path', '-s'],
validator=validate_fs_directory_download_source_url,
help='The download source directory path. It should be an absolute path to file system.')
c.argument('destination', options_list=['--destination-path', '-d'],
help='The destination local directory path to download.')
c.argument('recursive', recursive_type, help='Recursively download files. If enabled, all the files '
'including the files in subdirectories will be downloaded.')
c.ignore('source')
with self.argument_context('storage fs file list') as c:
c.extra('file_system_name', options_list=['-f', '--file-system'],
help="File system name (i.e. container name).", required=True)
c.argument('recursive', arg_type=get_three_state_flag(), default=True,
help='Look into sub-directories recursively when set to true.')
c.argument('exclude_dir', action='store_true',
help='List only files in the given file system.')
c.argument('path', help='Filter the results to return only paths under the specified path.')
c.argument('num_results', type=int, default=5000,
help='Specify the maximum number of results to return. If the request does not specify num_results '
'or specifies a value greater than 5,000, the server will return up to 5,000 items.')
c.argument('marker',
help='An opaque continuation token. This value can be retrieved from the next_marker field of a '
'previous generator object. If specified, this generator will begin returning results from this '
'point.')
c.argument('show_next_marker', action='store_true', is_preview=True,
help='Show nextMarker in result when specified.')
for item in ['create', 'show', 'delete', 'exists', 'upload', 'append', 'download', 'show', 'metadata update',
'metadata show']:
with self.argument_context('storage fs file {}'.format(item)) as c:
c.extra('file_system_name', options_list=['-f', '--file-system'],
help='File system name (i.e. container name).', required=True)
c.extra('path', options_list=['-p', '--path'], help="The file path in a file system.",
required=True)
c.extra('timeout', timeout_type)
c.argument('content', help='Content to be appended to file.')
with self.argument_context('storage fs file create') as c:
t_file_content_settings = self.get_sdk('_models#ContentSettings',
resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE)
c.register_content_settings_argument(t_file_content_settings, update=False)
c.extra('permissions', permissions_type)
c.extra('umask', umask_type)
c.extra('timeout', timeout_type)
with self.argument_context('storage fs file download') as c:
c.argument('destination_path', options_list=['--destination', '-d'], type=file_type,
help='The local file where the file or folder will be downloaded to. The source filename will be '
'used if not specified.')
c.argument('overwrite', arg_type=get_three_state_flag(),
help="Overwrite an existing file when specified. Default value is false.")
with self.argument_context('storage fs file move') as c:
t_file_content_settings = self.get_sdk('_models#ContentSettings',
resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE)
c.register_content_settings_argument(t_file_content_settings, update=False)
c.extra('file_system_name', options_list=['-f', '--file-system'],
help='File system name (i.e. container name).', required=True)
c.extra('path', options_list=['-p', '--path'], required=True,
help="The original file path users want to move in a file system.")
c.argument('new_name', options_list=['--new-path'],
help='The new path the users want to move to. The value must have the following format: '
'"{filesystem}/{directory}/{subdirectory}/{file}".')
with self.argument_context('storage fs file upload') as c:
t_file_content_settings = self.get_sdk('_models#ContentSettings',
resource_type=ResourceType.DATA_STORAGE_FILEDATALAKE)
c.register_content_settings_argument(t_file_content_settings, update=False)
c.argument('local_path', options_list=['--source', '-s'],
help='Path of the local file to upload as the file content.')
c.argument('overwrite', arg_type=get_three_state_flag(), help="Overwrite an existing file when specified.")
c.argument('if_match', arg_group='Precondition',
help="An ETag value, or the wildcard character (*). Specify this header to perform the operation "
"only if the resource's ETag matches the value specified.")
c.argument('if_none_match', arg_group='Precondition',
help="An ETag value, or the wildcard character (*). Specify this header to perform the operation "
"only if the resource's ETag does not match the value specified.")
c.argument('if_modified_since', arg_group='Precondition',
help="A Commence only if modified since supplied UTC datetime (Y-m-d'T'H:M'Z').")
c.argument('if_unmodified_since', arg_group='Precondition',
help="A Commence only if unmodified since supplied UTC datetime (Y-m-d'T'H:M'Z').")
c.argument('permissions', permissions_type)
c.argument('umask', umask_type)
for item in ['set', 'show']:
with self.argument_context('storage fs access {}'.format(item)) as c:
from ._validators import validate_access_control
c.extra('file_system_name', options_list=['-f', '--file-system'],
help='File system name (i.e. container name).', required=True)
c.extra('directory_path', options_list=['-p', '--path'],
help='The path to a file or directory in the specified file system.', required=True)
c.argument('permissions', validator=validate_access_control)
c.ignore('upn')
for item in ['set-recursive', 'update-recursive', 'remove-recursive']:
with self.argument_context('storage fs access {}'.format(item)) as c:
c.register_fs_directory_arguments()
c.argument('acl', help='The value is a comma-separated list of access control entries. Each access control '
'entry (ACE) consists of a scope, a type, a user or group identifier, and permissions in the '
'format "[scope:][type]:[id]:[permissions]". For more information, please refer to '
'https://docs.microsoft.com/azure/storage/blobs/data-lake-storage-access-control.')
c.extra('continuation',
help='Optional continuation token that can be used to resume previously stopped operation.')
c.extra('batch_size', type=int, help='Optional. If data set size exceeds batch size then operation will '
'be split into multiple requests so that progress can be tracked. Batch size should be between 1 '
'and 2000. The default when unspecified is 2000.')
c.extra('max_batches', type=int, help='Optional. Define maximum number of batches that single change '
'Access Control operation can execute. If maximum is reached before all sub-paths are processed, '
'then continuation token can be used to resume operation. Empty value indicates that maximum '
'number of batches in unbound and operation continues till end.')
c.extra('continue_on_failure', arg_type=get_three_state_flag(),
help='If set to False, the operation will terminate quickly on encountering user errors (4XX). '
'If True, the operation will ignore user errors and proceed with the operation on other '
'sub-entities of the directory. Continuation token will only be returned when '
'--continue-on-failure is True in case of user errors. If not set the default value is False '
'for this.')
| 70.356589 | 183 | 0.652762 |
ace5f0b6d98c541c79aa35a7b927359ae41599c6 | 4,924 | py | Python | example/BtmUpPose/Multi3D/train/loss.py | ddddwee1/TorchSUL | 775832049564d8ee7c43e510b57bd716e0a746dd | [
"WTFPL"
] | 7 | 2019-12-14T12:23:36.000Z | 2021-11-16T00:25:13.000Z | example/BtmUpPose/Multi3D/train/loss.py | ddddwee1/TorchSUL | 775832049564d8ee7c43e510b57bd716e0a746dd | [
"WTFPL"
] | 1 | 2020-10-20T06:33:53.000Z | 2020-10-26T19:01:21.000Z | example/BtmUpPose/Multi3D/train/loss.py | ddddwee1/TorchSUL | 775832049564d8ee7c43e510b57bd716e0a746dd | [
"WTFPL"
] | 1 | 2021-08-24T09:09:36.000Z | 2021-08-24T09:09:36.000Z | import torch
from TorchSUL import Model as M
import torch.nn as nn
import config
class AELoss(M.Model):
def inst_ae(self, idmap, pts, debugmap=None):
area = 0
pull_loss = 0
conf = pts[:,:,2]
pts = pts.long()
tags_means = []
for i in range(config.max_inst):
tags = []
for j in range(config.num_pts):
if conf[i,j]>0:
x,y = pts[i,j,0], pts[i,j,1]
for xx in range(x-area, x+area+1):
for yy in range(y-area, y+area+1):
if xx<0 or xx>=config.out_size or yy<0 or yy>=config.out_size:
continue
tag = idmap[j,yy,xx]
tags.append(tag)
if debugmap is not None:
debugmap[j,yy,xx] = 1
if len(tags)==0:
continue
tags = torch.stack(tags)
tags_mean = tags.mean()
intra = torch.pow(tags - tags_mean, 2).mean()
pull_loss = pull_loss + intra
tags_means.append(tags_mean)
num_tags = len(tags_means)
if num_tags==0:
return torch.zeros(1)[0].float().to(idmap.device), torch.zeros(1)[0].float().to(idmap.device)
pull_loss = pull_loss / num_tags
if num_tags==1:
return torch.zeros(1)[0].float().to(idmap.device), pull_loss
push_loss = 0
for i in range(num_tags):
for j in range(num_tags):
if i!=j:
diff = torch.pow(tags_means[i] - tags_means[j], 2)
diff = torch.exp(-diff)
push_loss = push_loss + diff
push_loss = push_loss * 0.5 / ((num_tags-1) * num_tags)
return push_loss, pull_loss
def forward(self, idmap, pts, is_muco):
bsize = idmap.shape[0]
push_loss = []
pull_loss = []
for i in range(bsize):
push, pull = self.inst_ae(idmap[i], pts[i])
push_loss.append(push)
pull_loss.append(pull)
push_loss = torch.stack(push_loss)
pull_loss = torch.stack(pull_loss)
return push_loss, pull_loss
class DepthLoss(M.Model):
def inst_dep(self, depout, depth):
area = 1
pts = depth[:,:2]
d = depth[:,2]
pts = pts.long()
total_loss = 0
counter = 0
for i in range(depth.shape[0]):
x,y = pts[i,0], pts[i,1]
gt = d[i]
for xx in range(x-area, x+area+1):
for yy in range(y-area, y+area+1):
if xx<0 or xx>=config.out_size or yy<0 or yy>=config.out_size:
continue
val = depout[0,yy,xx]
ls = torch.mean(torch.pow( val - gt , 2))
total_loss = total_loss + ls
counter += 1
if counter == 0:
return torch.zeros(1)[0].float().to(depout.device)
else:
total_loss = total_loss / counter
return total_loss
def forward(self, depout, depth, is_muco):
bsize = depout.shape[0]
losses = []
for i in range(bsize):
if is_muco[i]==0:
losses.append(torch.zeros(1)[0].float().to(depout.device))
else:
ls = self.inst_dep(depout[i], depth[i])
losses.append(ls)
losses = torch.stack(losses)
return losses
class DepthAllLoss(M.Model):
def inst_dep(self, depout, depth):
area = 0
pts = depth[:,:,:2]
d = depth[:,:,2]
pts = pts.long()
total_loss = 0
counter = 0
for i in range(depth.shape[0]):
for j in range(config.num_pts-1):
x,y = pts[i,j,0], pts[i,j,1]
gt = d[i,j]
for xx in range(x-area, x+area+1):
for yy in range(y-area, y+area+1):
if xx<0 or xx>=config.out_size or yy<0 or yy>=config.out_size:
continue
val = depout[j,yy,xx]
ls = torch.mean(torch.pow( val - gt , 2))
total_loss = total_loss + ls
counter += 1
if counter == 0:
return torch.zeros(1)[0].float().to(depout.device)
else:
total_loss = total_loss / counter
return total_loss
def forward(self, depout, depth, is_muco):
bsize = depout.shape[0]
losses = []
for i in range(bsize):
if is_muco[i]==0:
losses.append(torch.zeros(1)[0].float().to(depout.device))
else:
ls = self.inst_dep(depout[i], depth[i])
losses.append(ls)
losses = torch.stack(losses)
return losses
class HmapLoss(M.Model):
def forward(self, hmap, gt, mask, is_muco):
# hmap = torch.sigmoid(hmap)
loss = torch.pow(hmap - gt, 2)
loss = loss * (1 - mask.expand_as(loss))
# loss = loss * (gt.detach() * 10 + 1)
loss = loss.mean(dim=3).mean(dim=2)
bsize = hmap.shape[0]
loss_total = []
for i in range(bsize):
if is_muco[i]==0:
ls = loss[i,config.muco_coco_idx]
else:
ls = loss[i]
loss_total.append(ls.mean())
loss_total = torch.stack(loss_total)
return loss_total
class ModelWithLoss(M.Model):
def initialize(self, model):
self.AE = AELoss()
self.HM = HmapLoss()
self.RDEP = DepthLoss()
self.DEP = DepthAllLoss()
self.model = model
def forward(self, img, hmap, mask, pts, depth, depth_all, is_muco):
mask = mask.unsqueeze(1)
outs, idout, depout, depallout = self.model(img)
push, pull = self.AE(idout, pts, is_muco)
hm = self.HM(outs, hmap, mask, is_muco)
rdep = self.RDEP(depout, depth, is_muco)
dep = self.DEP(depallout, depth_all, is_muco)
return hm, push, pull, rdep, dep, outs, idout, depout, depallout
def run(self, img):
outs, idout = self.model(img)
return outs, idout
| 27.662921 | 96 | 0.631397 |
ace5f19b92e7137087aaf430f2a283c8ba4c496f | 9,897 | py | Python | intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_dynamic_multicast_interface.py | Stienvdh/statrick | 7b092fc42171e226718a70a285a4b323f2f395ad | [
"MIT"
] | null | null | null | intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_dynamic_multicast_interface.py | Stienvdh/statrick | 7b092fc42171e226718a70a285a4b323f2f395ad | [
"MIT"
] | null | null | null | intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_dynamic_multicast_interface.py | Stienvdh/statrick | 7b092fc42171e226718a70a285a4b323f2f395ad | [
"MIT"
] | null | null | null | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_dynamic_multicast_interface
short_description: no description
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
bypass_validation:
description: only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
dynamic_multicast_interface:
description: the top level parameters set
required: false
type: dict
suboptions:
default-mapping:
type: str
description: no description
choices:
- 'disable'
- 'enable'
defmap-intf:
type: str
description: no description
description:
type: str
description: no description
dynamic_mapping:
description: no description
type: list
suboptions:
_scope:
description: no description
type: list
suboptions:
name:
type: str
description: no description
vdom:
type: str
description: no description
local-intf:
type: str
description: no description
name:
type: str
description: no description
'''
EXAMPLES = '''
- hosts: fortimanager-inventory
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: no description
fmgr_dynamic_multicast_interface:
bypass_validation: False
workspace_locking_adom: <value in [global, custom adom including root]>
workspace_locking_timeout: 300
rc_succeeded: [0, -2, -3, ...]
rc_failed: [-2, -3, ...]
adom: <your own value>
state: <value in [present, absent]>
dynamic_multicast_interface:
default-mapping: <value in [disable, enable]>
defmap-intf: <value of string>
description: <value of string>
dynamic_mapping:
-
_scope:
-
name: <value of string>
vdom: <value of string>
local-intf: <value of string>
name: <value of string>
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/adom/{adom}/obj/dynamic/multicast/interface',
'/pm/config/global/obj/dynamic/multicast/interface'
]
perobject_jrpc_urls = [
'/pm/config/adom/{adom}/obj/dynamic/multicast/interface/{interface}',
'/pm/config/global/obj/dynamic/multicast/interface/{interface}'
]
url_params = ['adom']
module_primary_key = 'name'
module_arg_spec = {
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'state': {
'type': 'str',
'required': True,
'choices': [
'present',
'absent'
]
},
'adom': {
'required': True,
'type': 'str'
},
'dynamic_multicast_interface': {
'required': False,
'type': 'dict',
'options': {
'default-mapping': {
'required': False,
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'defmap-intf': {
'required': False,
'type': 'str'
},
'description': {
'required': False,
'type': 'str'
},
'dynamic_mapping': {
'required': False,
'type': 'list',
'options': {
'_scope': {
'required': False,
'type': 'list',
'options': {
'name': {
'required': False,
'type': 'str'
},
'vdom': {
'required': False,
'type': 'str'
}
}
},
'local-intf': {
'required': False,
'type': 'str'
}
}
},
'name': {
'required': True,
'type': 'str'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'dynamic_multicast_interface'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_curd()
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| 33.211409 | 153 | 0.535415 |
ace5f1f49fbcd5d1aa68cbccd781de9ef9c683ba | 1,079 | py | Python | examples/mach_cad_examples/example_inner_rotor_stator.py | Severson-Group/MachEval | dbb7999188133f8744636da53cab475ae538ce80 | [
"BSD-3-Clause"
] | 6 | 2021-11-02T20:12:32.000Z | 2021-11-13T10:50:35.000Z | examples/mach_cad_examples/example_inner_rotor_stator.py | Severson-Group/MachEval | dbb7999188133f8744636da53cab475ae538ce80 | [
"BSD-3-Clause"
] | 18 | 2021-11-29T20:14:55.000Z | 2022-03-02T07:17:37.000Z | examples/mach_cad_examples/example_inner_rotor_stator.py | Severson-Group/MachEval | dbb7999188133f8744636da53cab475ae538ce80 | [
"BSD-3-Clause"
] | 1 | 2022-01-29T00:52:38.000Z | 2022-01-29T00:52:38.000Z | import os
import sys
# change current working directory to file location
os.chdir(os.path.dirname(__file__))
# add the directory immediately above this file's directory to path for module import
sys.path.append("../..")
import mach_cad.tools.magnet as mn
import mach_cad.model_obj as mo
x = mo.DimMillimeter(4)
y = mo.DimMillimeter(80)
z = mo.DimMillimeter(40)
stator4 = mo.CrossSectInnerRotorStator(
name="stator1",
dim_alpha_st=mo.DimDegree(40),
dim_alpha_so=mo.DimDegree(20),
dim_r_si=mo.DimMillimeter(40),
dim_d_so=mo.DimMillimeter(5),
dim_d_sp=mo.DimMillimeter(10),
dim_d_st=mo.DimMillimeter(15),
dim_d_sy=mo.DimMillimeter(15),
dim_w_st=mo.DimMillimeter(13),
dim_r_st=mo.DimMillimeter(0),
dim_r_sf=mo.DimMillimeter(0),
dim_r_sb=mo.DimMillimeter(0),
Q=6,
location=mo.Location2D(),
)
# create an instance of the MagNet class
toolMn = mn.MagNet(visible=True)
toolMn.open()
# draw hollowcylinders
c1 = stator4.draw(toolMn)
toolMn.view_all()
# select inner coordinate of a hollow cylinder
toolMn.prepare_section(c1)
| 25.093023 | 85 | 0.739574 |
ace5f202e53d01a5162a1f417e48c5cf9599166e | 21,005 | py | Python | allennlp_models/structured_prediction/models/srl.py | matt-peters/allennlp-models | cdd505ed539fdc2b82e4cc0a23eae4bfd3368e7e | [
"Apache-2.0"
] | 402 | 2020-03-11T22:58:35.000Z | 2022-03-29T09:05:27.000Z | allennlp_models/structured_prediction/models/srl.py | matt-peters/allennlp-models | cdd505ed539fdc2b82e4cc0a23eae4bfd3368e7e | [
"Apache-2.0"
] | 116 | 2020-03-11T01:26:57.000Z | 2022-03-25T13:03:56.000Z | allennlp_models/structured_prediction/models/srl.py | matt-peters/allennlp-models | cdd505ed539fdc2b82e4cc0a23eae4bfd3368e7e | [
"Apache-2.0"
] | 140 | 2020-03-11T00:51:35.000Z | 2022-03-29T09:05:36.000Z | from typing import Dict, List, TextIO, Optional, Any
import warnings
from overrides import overrides
import torch
from torch.nn.modules import Linear, Dropout
import torch.nn.functional as F
from allennlp.common.checks import check_dimensions_match
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator
from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits
from allennlp.nn.util import get_lengths_from_binary_sequence_mask, viterbi_decode
from allennlp_models.structured_prediction.metrics.srl_eval_scorer import (
SrlEvalScorer,
DEFAULT_SRL_EVAL_PATH,
)
def write_bio_formatted_tags_to_file(
prediction_file: TextIO,
gold_file: TextIO,
verb_index: Optional[int],
sentence: List[str],
prediction: List[str],
gold_labels: List[str],
):
"""
Prints predicate argument predictions and gold labels for a single verbal
predicate in a sentence to two provided file references.
The CoNLL SRL format is described in
[the shared task data README](https://www.lsi.upc.edu/~srlconll/conll05st-release/README).
This function expects IOB2-formatted tags, where the B- tag is used in the beginning
of every chunk (i.e. all chunks start with the B- tag).
# Parameters
prediction_file : `TextIO`, required.
A file reference to print predictions to.
gold_file : `TextIO`, required.
A file reference to print gold labels to.
verb_index : `Optional[int]`, required.
The index of the verbal predicate in the sentence which
the gold labels are the arguments for, or None if the sentence
contains no verbal predicate.
sentence : `List[str]`, required.
The word tokens.
prediction : `List[str]`, required.
The predicted BIO labels.
gold_labels : `List[str]`, required.
The gold BIO labels.
"""
conll_formatted_predictions = convert_bio_tags_to_conll_format(prediction)
conll_formatted_gold_labels = convert_bio_tags_to_conll_format(gold_labels)
write_conll_formatted_tags_to_file(
prediction_file,
gold_file,
verb_index,
sentence,
conll_formatted_predictions,
conll_formatted_gold_labels,
)
def write_conll_formatted_tags_to_file(
prediction_file: TextIO,
gold_file: TextIO,
verb_index: Optional[int],
sentence: List[str],
conll_formatted_predictions: List[str],
conll_formatted_gold_labels: List[str],
):
"""
Prints predicate argument predictions and gold labels for a single verbal
predicate in a sentence to two provided file references.
The CoNLL SRL format is described in
[the shared task data README](https://www.lsi.upc.edu/~srlconll/conll05st-release/README).
This function expects IOB2-formatted tags, where the B- tag is used in the beginning
of every chunk (i.e. all chunks start with the B- tag).
# Parameters
prediction_file : `TextIO`, required.
A file reference to print predictions to.
gold_file : `TextIO`, required.
A file reference to print gold labels to.
verb_index : `Optional[int]`, required.
The index of the verbal predicate in the sentence which
the gold labels are the arguments for, or None if the sentence
contains no verbal predicate.
sentence : `List[str]`, required.
The word tokens.
conll_formatted_predictions : `List[str]`, required.
The predicted CoNLL-formatted labels.
conll_formatted_gold_labels : `List[str]`, required.
The gold CoNLL-formatted labels.
"""
verb_only_sentence = ["-"] * len(sentence)
if verb_index is not None:
verb_only_sentence[verb_index] = sentence[verb_index]
for word, predicted, gold in zip(
verb_only_sentence, conll_formatted_predictions, conll_formatted_gold_labels
):
prediction_file.write(word.ljust(15))
prediction_file.write(predicted.rjust(15) + "\n")
gold_file.write(word.ljust(15))
gold_file.write(gold.rjust(15) + "\n")
prediction_file.write("\n")
gold_file.write("\n")
def convert_bio_tags_to_conll_format(labels: List[str]):
"""
Converts BIO formatted SRL tags to the format required for evaluation with the
official CONLL 2005 perl script. Spans are represented by bracketed labels,
with the labels of words inside spans being the same as those outside spans.
Beginning spans always have a opening bracket and a closing asterisk (e.g. "(ARG-1*" )
and closing spans always have a closing bracket (e.g. "*)" ). This applies even for
length 1 spans, (e.g "(ARG-0*)").
A full example of the conversion performed:
[B-ARG-1, I-ARG-1, I-ARG-1, I-ARG-1, I-ARG-1, O]
[ "(ARG-1*", "*", "*", "*", "*)", "*"]
# Parameters
labels : `List[str]`, required.
A list of BIO tags to convert to the CONLL span based format.
# Returns
A list of labels in the CONLL span based format.
"""
sentence_length = len(labels)
conll_labels = []
for i, label in enumerate(labels):
if label == "O":
conll_labels.append("*")
continue
new_label = "*"
# Are we at the beginning of a new span, at the first word in the sentence,
# or is the label different from the previous one? If so, we are seeing a new label.
if label[0] == "B" or i == 0 or label[1:] != labels[i - 1][1:]:
new_label = "(" + label[2:] + new_label
# Are we at the end of the sentence, is the next word a new span, or is the next
# word not in a span? If so, we need to close the label span.
if i == sentence_length - 1 or labels[i + 1][0] == "B" or label[1:] != labels[i + 1][1:]:
new_label = new_label + ")"
conll_labels.append(new_label)
return conll_labels
@Model.register("srl")
class SemanticRoleLabeler(Model):
"""
This model performs semantic role labeling using BIO tags using Propbank semantic roles.
Specifically, it is an implementation of [Deep Semantic Role Labeling - What works
and what's next](https://www.aclweb.org/anthology/P17-1044).
This implementation is effectively a series of stacked interleaved LSTMs with highway
connections, applied to embedded sequences of words concatenated with a binary indicator
containing whether or not a word is the verbal predicate to generate predictions for in
the sentence. Additionally, during inference, Viterbi decoding is applied to constrain
the predictions to contain valid BIO sequences.
Specifically, the model expects and outputs IOB2-formatted tags, where the
B- tag is used in the beginning of every chunk (i.e. all chunks start with the B- tag).
# Parameters
vocab : `Vocabulary`, required
A Vocabulary, required in order to compute sizes for input/output projections.
text_field_embedder : `TextFieldEmbedder`, required
Used to embed the `tokens` `TextField` we get as input to the model.
encoder : `Seq2SeqEncoder`
The encoder (with its own internal stacking) that we will use in between embedding tokens
and predicting output tags.
binary_feature_dim : `int`, required.
The dimensionality of the embedding of the binary verb predicate features.
initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)
Used to initialize the model parameters.
label_smoothing : `float`, optional (default = `0.0`)
Whether or not to use label smoothing on the labels when computing cross entropy loss.
ignore_span_metric : `bool`, optional (default = `False`)
Whether to calculate span loss, which is irrelevant when predicting BIO for Open Information Extraction.
srl_eval_path : `str`, optional (default=`DEFAULT_SRL_EVAL_PATH`)
The path to the srl-eval.pl script. By default, will use the srl-eval.pl included with allennlp,
which is located at allennlp/tools/srl-eval.pl . If `None`, srl-eval.pl is not used.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
binary_feature_dim: int,
embedding_dropout: float = 0.0,
initializer: InitializerApplicator = InitializerApplicator(),
label_smoothing: float = None,
ignore_span_metric: bool = False,
srl_eval_path: str = DEFAULT_SRL_EVAL_PATH,
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
self.text_field_embedder = text_field_embedder
self.num_classes = self.vocab.get_vocab_size("labels")
if srl_eval_path is not None:
# For the span based evaluation, we don't want to consider labels
# for verb, because the verb index is provided to the model.
self.span_metric = SrlEvalScorer(srl_eval_path, ignore_classes=["V"])
else:
self.span_metric = None
self.encoder = encoder
# There are exactly 2 binary features for the verb predicate embedding.
self.binary_feature_embedding = Embedding(
num_embeddings=2, embedding_dim=binary_feature_dim
)
self.tag_projection_layer = TimeDistributed(
Linear(self.encoder.get_output_dim(), self.num_classes)
)
self.embedding_dropout = Dropout(p=embedding_dropout)
self._label_smoothing = label_smoothing
self.ignore_span_metric = ignore_span_metric
check_dimensions_match(
text_field_embedder.get_output_dim() + binary_feature_dim,
encoder.get_input_dim(),
"text embedding dim + verb indicator embedding dim",
"encoder input dim",
)
initializer(self)
def forward( # type: ignore
self,
tokens: TextFieldTensors,
verb_indicator: torch.LongTensor,
tags: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
# Parameters
tokens : `TextFieldTensors`, required
The output of `TextField.as_array()`, which should typically be passed directly to a
`TextFieldEmbedder`. This output is a dictionary mapping keys to `TokenIndexer`
tensors. At its most basic, using a `SingleIdTokenIndexer` this is : `{"tokens":
Tensor(batch_size, num_tokens)}`. This dictionary will have the same keys as were used
for the `TokenIndexers` when you created the `TextField` representing your
sequence. The dictionary is designed to be passed directly to a `TextFieldEmbedder`,
which knows how to combine different word representations into a single vector per
token in your input.
verb_indicator: `torch.LongTensor`, required.
An integer `SequenceFeatureField` representation of the position of the verb
in the sentence. This should have shape (batch_size, num_tokens) and importantly, can be
all zeros, in the case that the sentence has no verbal predicate.
tags : `torch.LongTensor`, optional (default = `None`)
A torch tensor representing the sequence of integer gold class labels
of shape `(batch_size, num_tokens)`
metadata : `List[Dict[str, Any]]`, optional, (default = `None`)
metadata containg the original words in the sentence and the verb to compute the
frame for, under 'words' and 'verb' keys, respectively.
# Returns
An output dictionary consisting of:
logits : `torch.FloatTensor`
A tensor of shape `(batch_size, num_tokens, tag_vocab_size)` representing
unnormalised log probabilities of the tag classes.
class_probabilities : `torch.FloatTensor`
A tensor of shape `(batch_size, num_tokens, tag_vocab_size)` representing
a distribution of the tag classes per word.
loss : `torch.FloatTensor`, optional
A scalar loss to be optimised.
"""
embedded_text_input = self.embedding_dropout(self.text_field_embedder(tokens))
mask = get_text_field_mask(tokens)
embedded_verb_indicator = self.binary_feature_embedding(verb_indicator.long())
# Concatenate the verb feature onto the embedded text. This now
# has shape (batch_size, sequence_length, embedding_dim + binary_feature_dim).
embedded_text_with_verb_indicator = torch.cat(
[embedded_text_input, embedded_verb_indicator], -1
)
batch_size, sequence_length, _ = embedded_text_with_verb_indicator.size()
encoded_text = self.encoder(embedded_text_with_verb_indicator, mask)
logits = self.tag_projection_layer(encoded_text)
reshaped_log_probs = logits.view(-1, self.num_classes)
class_probabilities = F.softmax(reshaped_log_probs, dim=-1).view(
[batch_size, sequence_length, self.num_classes]
)
output_dict = {"logits": logits, "class_probabilities": class_probabilities}
# We need to retain the mask in the output dictionary
# so that we can crop the sequences to remove padding
# when we do viterbi inference in self.make_output_human_readable.
output_dict["mask"] = mask
if tags is not None:
loss = sequence_cross_entropy_with_logits(
logits, tags, mask, label_smoothing=self._label_smoothing
)
if not self.ignore_span_metric and self.span_metric is not None and not self.training:
batch_verb_indices = [
example_metadata["verb_index"] for example_metadata in metadata
]
batch_sentences = [example_metadata["words"] for example_metadata in metadata]
# Get the BIO tags from make_output_human_readable()
# TODO (nfliu): This is kind of a hack, consider splitting out part
# of make_output_human_readable() to a separate function.
batch_bio_predicted_tags = self.make_output_human_readable(output_dict).pop("tags")
batch_conll_predicted_tags = [
convert_bio_tags_to_conll_format(tags) for tags in batch_bio_predicted_tags
]
batch_bio_gold_tags = [
example_metadata["gold_tags"] for example_metadata in metadata
]
batch_conll_gold_tags = [
convert_bio_tags_to_conll_format(tags) for tags in batch_bio_gold_tags
]
self.span_metric(
batch_verb_indices,
batch_sentences,
batch_conll_predicted_tags,
batch_conll_gold_tags,
)
output_dict["loss"] = loss
words, verbs = zip(*[(x["words"], x["verb"]) for x in metadata])
if metadata is not None:
output_dict["words"] = list(words)
output_dict["verb"] = list(verbs)
return output_dict
@overrides
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
Does constrained viterbi decoding on class probabilities output in :func:`forward`. The
constraint simply specifies that the output tags must be a valid BIO sequence. We add a
`"tags"` key to the dictionary with the result.
"""
all_predictions = output_dict["class_probabilities"]
sequence_lengths = get_lengths_from_binary_sequence_mask(output_dict["mask"]).data.tolist()
if all_predictions.dim() == 3:
predictions_list = [
all_predictions[i].detach().cpu() for i in range(all_predictions.size(0))
]
else:
predictions_list = [all_predictions]
all_tags = []
transition_matrix = self.get_viterbi_pairwise_potentials()
start_transitions = self.get_start_transitions()
for predictions, length in zip(predictions_list, sequence_lengths):
max_likelihood_sequence, _ = viterbi_decode(
predictions[:length], transition_matrix, allowed_start_transitions=start_transitions
)
tags = [
self.vocab.get_token_from_index(x, namespace="labels")
for x in max_likelihood_sequence
]
all_tags.append(tags)
output_dict["tags"] = all_tags
return output_dict
def get_metrics(self, reset: bool = False):
if self.ignore_span_metric:
# Return an empty dictionary if ignoring the
# span metric
return {}
else:
metric_dict = self.span_metric.get_metric(reset=reset)
# This can be a lot of metrics, as there are 3 per class.
# we only really care about the overall metrics, so we filter for them here.
return {x: y for x, y in metric_dict.items() if "overall" in x}
def get_viterbi_pairwise_potentials(self):
"""
Generate a matrix of pairwise transition potentials for the BIO labels.
The only constraint implemented here is that I-XXX labels must be preceded
by either an identical I-XXX tag or a B-XXX tag. In order to achieve this
constraint, pairs of labels which do not satisfy this constraint have a
pairwise potential of -inf.
# Returns
transition_matrix : `torch.Tensor`
A `(num_labels, num_labels)` matrix of pairwise potentials.
"""
all_labels = self.vocab.get_index_to_token_vocabulary("labels")
num_labels = len(all_labels)
transition_matrix = torch.zeros([num_labels, num_labels])
for i, previous_label in all_labels.items():
for j, label in all_labels.items():
# I labels can only be preceded by themselves or
# their corresponding B tag.
if i != j and label[0] == "I" and not previous_label == "B" + label[1:]:
transition_matrix[i, j] = float("-inf")
return transition_matrix
def get_start_transitions(self):
"""
In the BIO sequence, we cannot start the sequence with an I-XXX tag.
This transition sequence is passed to viterbi_decode to specify this constraint.
# Returns
start_transitions : `torch.Tensor`
The pairwise potentials between a START token and
the first token of the sequence.
"""
all_labels = self.vocab.get_index_to_token_vocabulary("labels")
num_labels = len(all_labels)
start_transitions = torch.zeros(num_labels)
for i, label in all_labels.items():
if label[0] == "I":
start_transitions[i] = float("-inf")
return start_transitions
default_predictor = "semantic_role_labeling"
def write_to_conll_eval_file(
prediction_file: TextIO,
gold_file: TextIO,
verb_index: Optional[int],
sentence: List[str],
prediction: List[str],
gold_labels: List[str],
):
"""
.. deprecated:: 0.8.4
The `write_to_conll_eval_file` function was deprecated in favor of the
identical `write_bio_formatted_tags_to_file` in version 0.8.4.
Prints predicate argument predictions and gold labels for a single verbal
predicate in a sentence to two provided file references.
The CoNLL SRL format is described in
[the shared task data README](https://www.lsi.upc.edu/~srlconll/conll05st-release/README).
This function expects IOB2-formatted tags, where the B- tag is used in the beginning
of every chunk (i.e. all chunks start with the B- tag).
# Parameters
prediction_file : `TextIO`, required.
A file reference to print predictions to.
gold_file : `TextIO`, required.
A file reference to print gold labels to.
verb_index : `Optional[int]`, required.
The index of the verbal predicate in the sentence which
the gold labels are the arguments for, or None if the sentence
contains no verbal predicate.
sentence : `List[str]`, required.
The word tokens.
prediction : `List[str]`, required.
The predicted BIO labels.
gold_labels : `List[str]`, required.
The gold BIO labels.
"""
warnings.warn(
"The 'write_to_conll_eval_file' function has been deprecated in favor of "
"the identical 'write_bio_formatted_tags_to_file' function.",
DeprecationWarning,
)
write_bio_formatted_tags_to_file(
prediction_file, gold_file, verb_index, sentence, prediction, gold_labels
)
| 42.693089 | 112 | 0.666365 |
ace5f20614e01e0cff507bbfd8c3d68a81a987bd | 1,106 | py | Python | authapp/migrations/0004_auto_20211227_1317.py | Roman-R2/gb-django-shop-opltimization | f99d5e81653ec4b50640476308380ec2984aa061 | [
"MIT"
] | null | null | null | authapp/migrations/0004_auto_20211227_1317.py | Roman-R2/gb-django-shop-opltimization | f99d5e81653ec4b50640476308380ec2984aa061 | [
"MIT"
] | null | null | null | authapp/migrations/0004_auto_20211227_1317.py | Roman-R2/gb-django-shop-opltimization | f99d5e81653ec4b50640476308380ec2984aa061 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.9 on 2021-12-27 08:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('authapp', '0003_shopuserprofile'),
]
operations = [
migrations.AlterField(
model_name='shopuser',
name='avatar',
field=models.ImageField(blank=True, upload_to='users', verbose_name='Аватар'),
),
migrations.AlterField(
model_name='shopuserprofile',
name='about_me',
field=models.TextField(blank=True, max_length=512, null=True, verbose_name='Обо мне'),
),
migrations.AlterField(
model_name='shopuserprofile',
name='gender',
field=models.CharField(choices=[('M', 'Мужчина'), ('F', 'Женщина'), ('U', 'Неизвестно')], default='U', max_length=1, verbose_name='Пол'),
),
migrations.AlterField(
model_name='shopuserprofile',
name='tagline',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='Тэги'),
),
]
| 32.529412 | 149 | 0.587703 |
ace5f243d550479bd41ac75ebadf5b92a3524fbe | 999 | py | Python | test/test_gzip_encoder.py | mmontagna/generic-encoders | 3344a6dfb6173feb0f8ac6102ac01d5a4425c99c | [
"MIT"
] | 5 | 2018-07-06T05:30:51.000Z | 2021-03-04T05:09:35.000Z | test/test_gzip_encoder.py | mmontagna/generic-encoders | 3344a6dfb6173feb0f8ac6102ac01d5a4425c99c | [
"MIT"
] | 4 | 2018-07-03T15:09:33.000Z | 2018-08-11T04:06:01.000Z | test/test_gzip_encoder.py | mmontagna/generic-encoders | 3344a6dfb6173feb0f8ac6102ac01d5a4425c99c | [
"MIT"
] | null | null | null | import unittest
import six
from generic_encoders import gzip_encoder
class TestGzipEncoder(unittest.TestCase):
def test_gzip_encoder_bytes(self):
string = b"some string 123"
self.assertEqual(string,
gzip_encoder.decode(
gzip_encoder.encode(string)
))
def test_string_throws_exception_on_p3(self):
string = "some string 123"
if not six.PY2:
with self.assertRaises(TypeError) as context:
gzip_encoder.encode(string)
else:
gzip_encoder.encode(string)
def test_throws_exception_when_encode_passed_bogus_type(self):
string = 123
with self.assertRaises(TypeError) as context:
gzip_encoder.encode(string)
def test_throws_exception_when_decode_passed_bogus_type(self):
string = 123
with self.assertRaises(TypeError) as context:
gzip_encoder.decode(string)
if __name__ == '__main__':
unittest.main() | 27.75 | 66 | 0.65966 |
ace5f2fd78f1cf388f9de4a3ebe5537b85be062c | 711 | py | Python | setup.py | karbassi/finch-robot | f017ef9ba3f897f2c556e8dc35e2cac4d89b5b1e | [
"MIT"
] | null | null | null | setup.py | karbassi/finch-robot | f017ef9ba3f897f2c556e8dc35e2cac4d89b5b1e | [
"MIT"
] | null | null | null | setup.py | karbassi/finch-robot | f017ef9ba3f897f2c556e8dc35e2cac4d89b5b1e | [
"MIT"
] | null | null | null | from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(name='finch-robot',
version='0.1',
description='Finch robot USB connection',
long_description=readme(),
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Topic :: Text Processing :: Linguistic',
],
keywords='finch robot',
url='http://github.com/karbassi/finch-robot',
author='Ali Karbassi',
author_email='ali@karbassi.com',
license='MIT',
packages=['finch-robot'],
include_package_data=True,
zip_safe=False)
| 26.333333 | 51 | 0.599156 |
ace5f3e27687642a5b1c10a36a877268070839be | 54,645 | py | Python | tests/test_passive_optical_scanner_model.py | bgorr/instrupy | e3dca871ce2dcd2ef279898fcc36bf9d18f0c243 | [
"Apache-2.0"
] | null | null | null | tests/test_passive_optical_scanner_model.py | bgorr/instrupy | e3dca871ce2dcd2ef279898fcc36bf9d18f0c243 | [
"Apache-2.0"
] | null | null | null | tests/test_passive_optical_scanner_model.py | bgorr/instrupy | e3dca871ce2dcd2ef279898fcc36bf9d18f0c243 | [
"Apache-2.0"
] | null | null | null | """Unit tests for instrupy.passive_optical_sensor_model
Tests:
* test_from_json_basic
* test_calculate_integration_time
* test_calculate_number_of_signal_electrons
* test_radianceWithEarthAsReflector
* test_radianceWithEarthAsBlackBodyRadiator
* test_planck_photon_integral
* test_calc_data_metrics_smad_truth: Test with SMAD 3rd ed truth data (firesat example).
For the below tests refer to the following article more context:
V. Ravindra and S. Nag, "Instrument Data Metrics Evaluator for Tradespace Analysis of Earth Observing Constellations," 2020 IEEE Aerospace Conference, Big Sky, MT, USA, 2020.
* test_calc_data_metrics_TIRSBand1_precomputed: Model instrument with TIRS Band 1 specs (IR, pushbroom), Landsat-8 orbit and test with the results as computed on 4 April 2021.
* test_calc_data_metrics_OLIBlueBand_precomputed: Model instrument with OLI Blue band specs (Optical, pushbroom), Landsat-8 orbit and test with the results as computed on 4 April 2021.
* test_calc_data_metrics_MODISBand10_precomputed: Model instrument with MODIS Band 10 specs (Optical, whiskbroom), Aqua orbit and test with the results as computed on 4 April 2021.
* test_calc_data_metrics_MODISBand1_precomputed: Model instrument with MODIS Band 1 specs (Optical, whiskbroom), Aqua orbit and test with the results as computed on 4 April 2021.
* test_calc_data_metrics_CCAMBlueBand_precomputed: Model instrument with CCAM Blue Band specs (Matrix, optical), Aqua orbit and test with the results as computed on 4 April 2021.
.. note:: The results using the LOWTRAN7 model are computed at resolution wav_step_percm = 5.
"""
import unittest
import json
import numpy as np
import sys, os
import math
from instrupy.passive_optical_scanner_model import ScanTech, PassiveOpticalScannerModel, AtmosphericLossModel
from instrupy.util import Orientation, SphericalGeometry, ViewGeometry, Maneuver
firesat_json = '{"@type": "Passive Optical Scanner",' \
'"name": "FireSat",' \
'"mass": 28,' \
'"volume": 0.12,' \
'"power": 32,' \
'"fieldOfViewGeometry": {' \
' "shape": "RECTanGULAR",' \
' "angleHeight": 0.628,' \
' "angleWidth": 115.8' \
' },' \
'"scanTechnique": "WhiskBROOM",' \
'"orientation": {' \
' "referenceFrame": "SC_BODY_FIXED",' \
' "convention": "SIDE_loOK",' \
' "sideLookAngle": 0' \
' },' \
'"dataRate": 85,' \
'"numberDetectorRows": 256,' \
'"numberDetectorCols": 1,' \
'"detectorWidth": 30e-6,' \
'"focalLength": 0.7,' \
'"operatingWavelength": 4.2e-6,' \
'"bandwidth": 1.9e-6,' \
'"quantumEff": 0.5,' \
'"targetBlackBodyTemp": 290,' \
'"bitsPerPixel": 8,' \
'"opticsSysEff": 0.75,' \
'"numOfReadOutE": 25,' \
'"apertureDia": 0.26,' \
'"Fnum": 2.7,' \
'"atmosLossModel": "LOWTRAN7"}'
class TestPassiveOpticalScannerModel(unittest.TestCase):
def assertNearlyZeroErrorFraction(self,a,b,fraction=0.01,msg=None):
if abs(a-b) > abs(fraction*a):
if msg is None:
self.fail("The given numbers %s and %s are not near each other."%(a,b))
else:
self.fail(msg)
def test_from_json_basic(self):
# Test: Typical case
firesat = PassiveOpticalScannerModel.from_json(firesat_json)
self.assertEqual(firesat._type, "Passive Optical Scanner")
self.assertIsNotNone(firesat._id) # default random-id assigned
self.assertIsInstance(firesat.name, str)
self.assertEqual(firesat.name, "FireSat")
self.assertIsInstance(firesat.mass, float)
self.assertEqual(firesat.mass, 28)
self.assertIsInstance(firesat.volume, float)
self.assertEqual(firesat.volume, 0.12)
self.assertIsInstance(firesat.power, float)
self.assertEqual(firesat.power, 32)
self.assertIsInstance(firesat.dataRate, float)
self.assertEqual(firesat.dataRate, 85)
self.assertIsInstance(firesat.orientation, Orientation)
self.assertEqual(firesat.orientation, Orientation.from_dict({'referenceFrame': 'SC_BODY_FIXED', 'convention': 'EULER', 'eulerSeq1':1, 'eulerSeq2':2, 'eulerSeq3':3, 'eulerAngle1':0, 'eulerAngle2':0, 'eulerAngle3':0}))
self.assertIsInstance(firesat.fieldOfView, ViewGeometry)
self.assertEqual(firesat.fieldOfView, ViewGeometry.from_dict({'orientation': {'referenceFrame': 'SC_BODY_FIXED', 'convention': 'EULER', 'eulerSeq1':1, 'eulerSeq2':2, 'eulerSeq3':3, 'eulerAngle1':0, 'eulerAngle2':0, 'eulerAngle3':0},
'sphericalGeometry':{'shape':'rectangular', 'angleHeight':0.628, 'angleWidth':115.8}}))
self.assertIsInstance(firesat.sceneFieldOfView, ViewGeometry)
self.assertEqual(firesat.sceneFieldOfView, firesat.fieldOfView)
self.assertIsNone(firesat.maneuver)
self.assertIsNone(firesat.fieldOfRegard)
self.assertIsNone(firesat.pointingOption)
self.assertIsInstance(firesat.scanTechnique, str)
self.assertEqual(firesat.scanTechnique, "WHISKBROOM")
self.assertIsInstance(firesat.detectorWidth, float)
self.assertEqual(firesat.detectorWidth, 30e-6)
self.assertIsInstance(firesat.focalLength, float)
self.assertEqual(firesat.focalLength, 0.7)
self.assertIsInstance(firesat.operatingWavelength, float)
self.assertEqual(firesat.operatingWavelength, 4.2e-6)
self.assertIsInstance(firesat.bandwidth, float)
self.assertEqual(firesat.bandwidth, 1.9e-6)
self.assertIsInstance(firesat.quantumEff, float)
self.assertEqual(firesat.quantumEff, 0.5)
self.assertIsInstance(firesat.targetBlackBodyTemp, float)
self.assertEqual(firesat.targetBlackBodyTemp, 290)
self.assertIsInstance(firesat.bitsPerPixel, int)
self.assertEqual(firesat.bitsPerPixel, 8)
self.assertIsInstance(firesat.opticsSysEff, float)
self.assertEqual(firesat.opticsSysEff, 0.75)
self.assertIsInstance(firesat.numOfReadOutE, float)
self.assertEqual(firesat.numOfReadOutE, 25)
self.assertIsInstance(firesat.apertureDia, float)
self.assertEqual(firesat.apertureDia, 0.26)
self.assertIsInstance(firesat.Fnum, float)
self.assertEqual(firesat.Fnum, 2.7)
self.assertIsInstance(firesat.atmosLossModel, AtmosphericLossModel)
self.assertTrue(firesat.atmosLossModel, AtmosphericLossModel.LOWTRAN7)
self.assertIsInstance(firesat, PassiveOpticalScannerModel)
# Test the setting of default parameters
o = PassiveOpticalScannerModel.from_json('{"@type": "Passive Optical Scanner",'
'"fieldOfViewGeometry": {'
' "shape": "RECTanGULAR",'
' "angleHeight": 0.628,'
' "angleWidth": 115.8'
' },'
'"scanTechnique": "WhiskBROOM",'
'"numberDetectorRows": 256,'
'"numberDetectorCols": 1,'
'"detectorWidth": 30e-6,'
'"focalLength": 0.7,'
'"operatingWavelength": 4.2e-6,'
'"bandwidth": 1.9e-6,'
'"quantumEff": 0.5,'
'"opticsSysEff": 0.75,'
'"numOfReadOutE": 25,'
'"apertureDia": 0.26,'
'"Fnum": 2.7}')
self.assertIsNotNone(o._id)
self.assertEqual(o.orientation, Orientation.from_dict({'referenceFrame': 'SC_BODY_FIXED', 'convention': 'EULER', 'eulerSeq1':1, 'eulerSeq2':2, 'eulerSeq3':3, 'eulerAngle1':0, 'eulerAngle2':0, 'eulerAngle3':0}))
self.assertEqual(o.fieldOfView, o.sceneFieldOfView)
self.assertEqual(firesat.targetBlackBodyTemp, 290)
self.assertIsNone(o.atmosLossModel)
# Test of an un-supported field-of-view specification.
with self.assertRaises(Exception):
o = PassiveOpticalScannerModel.from_json('{"@type": "Passive Optical Scanner",'
'"name": "FireSat",'
'"mass": 28,'
'"volume": 0.12,'
'"power": 32,'
'"fieldOfViewGeometry": {'
' "shape": "CIRCULAR",'
' "diameter": 60,'
' },'
'"scanTechnique": "WhiskBROOM",'
'"orientation": {'
' "referenceFrame": "SC_BODY_FIXED",'
' "convention": "SIDE_loOK",'
' "sideLookAngle": 0'
' },'
'"dataRate": 85,'
'"numberDetectorRows": 256,'
'"numberDetectorCols": 1,'
'"detectorWidth": 30e-6,'
'"focalLength": 0.7,'
'"operatingWavelength": 4.2e-6,'
'"bandwidth": 1.9e-6,'
'"quantumEff": 0.5,'
'"targetBlackBodyTemp": 290,'
'"bitsPerPixel": 8,'
'"opticsSysEff": 0.75,'
'"numOfReadOutE": 25,'
'"apertureDia": 0.26,'
'"Fnum": 2.7,'
'"atmosLossModel": "LOWTRAN7"}')
# Test of an improper scanning technique specification
with self.assertRaises(Exception):
o = PassiveOpticalScannerModel.from_json('{"@type": "Passive Optical Scanner",'
'"name": "FireSat",'
'"mass": 28,'
'"volume": 0.12,'
'"power": 32,'
'"fieldOfViewGeometry": {'
' "shape": "RECTanGULAR",'
' "angleHeight": 0.628,'
' "angleWidth": 115.8'
' },'
'"scanTechnique": "Broombroom",'
'"orientation": {'
' "referenceFrame": "SC_BODY_FIXED",'
' "convention": "SIDE_loOK",'
' "sideLookAngle": 0'
' },'
'"dataRate": 85,'
'"numberDetectorRows": 256,'
'"numberDetectorCols": 1,'
'"detectorWidth": 30e-6,'
'"focalLength": 0.7,'
'"operatingWavelength": 4.2e-6,'
'"bandwidth": 1.9e-6,'
'"quantumEff": 0.5,'
'"targetBlackBodyTemp": 290,'
'"bitsPerPixel": 8,'
'"opticsSysEff": 0.75,'
'"numOfReadOutE": 25,'
'"apertureDia": 0.26,'
'"Fnum": 2.7,'
'"atmosLossModel": "LOWTRAN7"}')
# Test of an PUSHBROOM scanning technique specification and more than one :code:`numberDetectorRows` specification.
with self.assertRaises(Exception):
o = PassiveOpticalScannerModel.from_json('{"@type": "Passive Optical Scanner",'
'"name": "FireSat",'
'"mass": 28,'
'"volume": 0.12,'
'"power": 32,'
'"fieldOfViewGeometry": {'
' "shape": "RECTanGULAR",'
' "angleHeight": 0.628,'
' "angleWidth": 115.8'
' },'
'"scanTechnique": "Pushbroom",'
'"orientation": {'
' "referenceFrame": "SC_BODY_FIXED",'
' "convention": "SIDE_loOK",'
' "sideLookAngle": 0'
' },'
'"dataRate": 85,'
'"numberDetectorRows": 10,'
'"numberDetectorCols": 10,'
'"detectorWidth": 30e-6,'
'"focalLength": 0.7,'
'"operatingWavelength": 4.2e-6,'
'"bandwidth": 1.9e-6,'
'"quantumEff": 0.5,'
'"targetBlackBodyTemp": 290,'
'"bitsPerPixel": 8,'
'"opticsSysEff": 0.75,'
'"numOfReadOutE": 25,'
'"apertureDia": 0.26,'
'"Fnum": 2.7,'
'"atmosLossModel": "LOWTRAN7"}')
# Test of an WHISKBROOM scanning technique specification and more than one :code:`numberDetectorCols` specification.
with self.assertRaises(Exception):
o = PassiveOpticalScannerModel.from_json('{"@type": "Passive Optical Scanner",'
'"name": "FireSat",'
'"mass": 28,'
'"volume": 0.12,'
'"power": 32,'
'"fieldOfViewGeometry": {'
' "shape": "RECTanGULAR",'
' "angleHeight": 0.628,'
' "angleWidth": 115.8'
' },'
'"scanTechnique": "WhiskBROOM",'
'"orientation": {'
' "referenceFrame": "SC_BODY_FIXED",'
' "convention": "SIDE_loOK",'
' "sideLookAngle": 0'
' },'
'"dataRate": 85,'
'"numberDetectorRows": 10,'
'"numberDetectorCols": 10,'
'"detectorWidth": 30e-6,'
'"focalLength": 0.7,'
'"operatingWavelength": 4.2e-6,'
'"bandwidth": 1.9e-6,'
'"quantumEff": 0.5,'
'"targetBlackBodyTemp": 290,'
'"bitsPerPixel": 8,'
'"opticsSysEff": 0.75,'
'"numOfReadOutE": 25,'
'"apertureDia": 0.26,'
'"Fnum": 2.7,'
'"atmosLossModel": "LOWTRAN7"}')
def test_planck_photon_integral(self):
# Test trivial case with 0 wavelength
self.assertAlmostEqual(PassiveOpticalScannerModel.planck_photon_integral(0,290), 0)
""" Tests using online calculator from <https://www.opticsthewebsite.com/OpticsCalculators.aspx> as truth data.
Note that the online calculator requires minimum wavelength to be set as 1e-9 um which is nearly 0 wavelength.
"""
self.assertNearlyZeroErrorFraction(PassiveOpticalScannerModel.planck_photon_integral(12e-6,1500), 1.46801e+20 * 1e4)
self.assertNearlyZeroErrorFraction(PassiveOpticalScannerModel.planck_photon_integral(2e-6,500), 3.36875e+14 * 1e4)
self.assertNearlyZeroErrorFraction(PassiveOpticalScannerModel.planck_photon_integral(500e-6,45), 4.10754e+15 * 1e4)
self.assertNearlyZeroErrorFraction(PassiveOpticalScannerModel.planck_photon_integral(1e9,45), 4.40891e+15 * 1e4) # specifying 1e9 m as wavelength is to get approximately the radiance over entire spectrum
def test_radianceWithEarthAsBlackBodyRadiator(self):
""" Tests using online calculator from <https://www.opticsthewebsite.com/OpticsCalculators.aspx> as truth data for in-band radiance calculation.
Note that the online calculator requires minimum wavelength to be set as 1e-9 um which is nearly 0 wavelength.
"""
self.assertNearlyZeroErrorFraction(PassiveOpticalScannerModel.radiance_with_earth_as_bb_radiator(16.25e-6, 12.5e-6, 290, 0, atmos_loss_model=None), 5.08113e+17 * 1e4) # 10 um to 22.5 um at 290 K, 0 deg incidence angle
self.assertNearlyZeroErrorFraction(PassiveOpticalScannerModel.radiance_with_earth_as_bb_radiator(6e-6, 4e-6, 180.5, 0, atmos_loss_model=None), 6.74299e+14 * 1e4)
self.assertNearlyZeroErrorFraction(PassiveOpticalScannerModel.radiance_with_earth_as_bb_radiator(0.5e-6, 0.8e-6, 270, 0, atmos_loss_model=None), 2.74990e-5 * 1e4)
self.assertNearlyZeroErrorFraction(PassiveOpticalScannerModel.radiance_with_earth_as_bb_radiator(140e-6, 40e-6, 330, 0, atmos_loss_model=None), 1.77269e+16 * 1e4)
# Tests with 10 um to 22.5 um at 290 K at different incidence angles
self.assertNearlyZeroErrorFraction(PassiveOpticalScannerModel.radiance_with_earth_as_bb_radiator(16.25e-6, 12.5e-6, 290, 0.1, atmos_loss_model=None), 5.08113e+17 * 1e4 * np.cos(0.1))
self.assertNearlyZeroErrorFraction(PassiveOpticalScannerModel.radiance_with_earth_as_bb_radiator(16.25e-6, 12.5e-6, 290, -0.1, atmos_loss_model=None), 5.08113e+17 * 1e4 * np.cos(0.1))
self.assertNearlyZeroErrorFraction(PassiveOpticalScannerModel.radiance_with_earth_as_bb_radiator(16.25e-6, 12.5e-6, 290, 0.5 + 2*3.141, atmos_loss_model=None), 5.08113e+17 * 1e4 * np.cos(0.5))
self.assertNearlyZeroErrorFraction(PassiveOpticalScannerModel.radiance_with_earth_as_bb_radiator(16.25e-6, 12.5e-6, 290, -0.5 - 8*3.141, atmos_loss_model=None), 5.08113e+17 * 1e4 * np.cos(0.5))
# Tests with unrealistic observation incidence angles
with self.assertRaises(Exception):
PassiveOpticalScannerModel.radiance_with_earth_as_bb_radiator(16.25e-6, 12.5e-6, 290, np.deg2rad(91), atmos_loss_model=None)
with self.assertRaises(Exception):
PassiveOpticalScannerModel.radiance_with_earth_as_bb_radiator(16.25e-6, 12.5e-6, 290, np.deg2rad(-91), atmos_loss_model=None)
with self.assertRaises(Exception):
PassiveOpticalScannerModel.radiance_with_earth_as_bb_radiator(16.25e-6, 12.5e-6, 290, np.deg2rad(-91 + 360*8), atmos_loss_model=None)
def test_radianceWithEarthAsReflector(self):
""" Initialize parameters which ensure that the target (ground-pixel) and observer (satelltie) and the Sun have a LOS geometry.
This is verified using GMAT (Orbit View animation).
"""
tObs_JDUT1 = 2451623.999630 # spring equinox day
obs_pos_km = [6577.848345501363, -9.521529479781905e-013, 2394.141003279681] # satelltie is in the XZ plane
tar_pos_km = [6577.848345501363 - 622, -9.521529479781905e-013, 2394.141003279681] # satellite altitude is 622 km and target is in the XZ plane
obs_area_m2 = 1
""" Test: Reflected energy far-outside visible wavelengths must be near 0.
"""
self.assertAlmostEqual(PassiveOpticalScannerModel.radiance_with_earth_as_reflector(0.5e-9, 0.2e-9, tObs_JDUT1, obs_pos_km, tar_pos_km, obs_area_m2, atmos_loss_model='LOWTRAN7'), 0)
self.assertAlmostEqual(PassiveOpticalScannerModel.radiance_with_earth_as_reflector(1, 1e-2, tObs_JDUT1, obs_pos_km, tar_pos_km, obs_area_m2, atmos_loss_model='LOWTRAN7'), 0, places = 3)
""" Test: Reflected energy for visible wavelengths must be greater than that of other wavelengths, keeping bandwidth same.
"""
self.assertGreater(PassiveOpticalScannerModel.radiance_with_earth_as_reflector(0.5e-6, 0.2e-6, tObs_JDUT1, obs_pos_km, tar_pos_km, obs_area_m2, atmos_loss_model='LOWTRAN7'), PassiveOpticalScannerModel.radiance_with_earth_as_reflector(6e-6, 0.2e-6, tObs_JDUT1, obs_pos_km, tar_pos_km, obs_area_m2, atmos_loss_model='LOWTRAN7')) # longer wavelengths than visible
self.assertGreater(PassiveOpticalScannerModel.radiance_with_earth_as_reflector(0.5e-6, 0.2e-6, tObs_JDUT1, obs_pos_km, tar_pos_km, obs_area_m2, atmos_loss_model='LOWTRAN7'), PassiveOpticalScannerModel.radiance_with_earth_as_reflector(0.25e-6, 0.2e-6, tObs_JDUT1, obs_pos_km, tar_pos_km, obs_area_m2, atmos_loss_model='LOWTRAN7')) # shorter wavelengths than visible
"""
Test with another observer position, one where the observer sees the target pixel (of fixed area) at a larger angle.
"""
opWav_m = 0.5e-6
bw_m = 0.2e-6
obs2_pos_km = [6893.654271085462, -9.186593534864809e-013, 1215.537243668513]
self.assertGreater(PassiveOpticalScannerModel.radiance_with_earth_as_reflector(opWav_m, bw_m, tObs_JDUT1, obs_pos_km, tar_pos_km, obs_area_m2, atmos_loss_model='LOWTRAN7'), PassiveOpticalScannerModel.radiance_with_earth_as_reflector(opWav_m, bw_m, tObs_JDUT1, obs2_pos_km, tar_pos_km, obs_area_m2, atmos_loss_model='LOWTRAN7')) # longer wavelengths than visible
def test_calculate_number_of_signal_electrons(self):
# Test: Truth data from SMAD 3rd edition, Table 9-15.
opWav_m = 4.2e-6
bw_m = 1.9e-6
bbT_K = 290
apDia_m = 0.2626
opTrns = 0.75
QE = 0.5
tObs_JDUT1 = 2451623.999630 # date corresponds to satellite over target at day-time.
obs_pos_km = [6378+700, 0, 0] # satellite on the X-axis at altitude 700 km
tar_pos_km = [6378, 0, 0]
pixelArea_m2 = 30.0519 * 30.0519
Ti_s = 24.1827e-6
# The InstruPy computed value must be greater than truth value since SMAD does not consider the energy reflected off Sun, and the date corresponds to satellite over target at day-time.
self.assertGreater(PassiveOpticalScannerModel.calculate_number_of_signal_electrons(opWav_m, bw_m, bbT_K, apDia_m, opTrns, QE, tObs_JDUT1, obs_pos_km, tar_pos_km, pixelArea_m2, Ti_s, atmos_loss_model='LOWTRAN7'), 8286.104444633884)
def test_calculate_integration_time(self):
# Test: PUSHBROOM scanning
self.assertAlmostEqual(PassiveOpticalScannerModel.calculate_integration_time(ScanTech.PUSHBROOM, 1, 1, 12.5, 1), 12.5)
self.assertAlmostEqual(PassiveOpticalScannerModel.calculate_integration_time(ScanTech.PUSHBROOM, 1, 150, 12.5, 0.1), 12.5)
self.assertAlmostEqual(PassiveOpticalScannerModel.calculate_integration_time(ScanTech.PUSHBROOM, 1, 150, 12.5, 0.1, angle_width_deg=30), 12.5)
# check max exposure time functionality
self.assertAlmostEqual(PassiveOpticalScannerModel.calculate_integration_time(ScanTech.PUSHBROOM, 1, 150, 12.5, 0.1, max_det_exp_time=5), 5)
with self.assertRaises(Exception): # Exception expected if number of detector rows in along-track direction is not 1.
PassiveOpticalScannerModel.calculate_integration_time(ScanTech.PUSHBROOM, 10, 150, 12.5, 1)
# Test: Whiskroom scanning
self.assertAlmostEqual(PassiveOpticalScannerModel.calculate_integration_time(ScanTech.WHISKBROOM, 1, 1, 12.5, 1, max_det_exp_time=None, angle_width_deg=30), 12.5/30)
self.assertAlmostEqual(PassiveOpticalScannerModel.calculate_integration_time(ScanTech.WHISKBROOM, 1, 1, 12.5, 0.1, max_det_exp_time=None, angle_width_deg=30), 12.5/300)
self.assertAlmostEqual(PassiveOpticalScannerModel.calculate_integration_time(ScanTech.WHISKBROOM, 20, 1, 12.5, 0.1, max_det_exp_time=None, angle_width_deg=30), 12.5/300)
# check max exposure time functionality
self.assertAlmostEqual(PassiveOpticalScannerModel.calculate_integration_time(ScanTech.WHISKBROOM, 20, 1, 12.5, 0.1, max_det_exp_time=(12.5/3000), angle_width_deg=30), 12.5/3000)
with self.assertRaises(Exception): # Exception expected if number of detector columns in cross-track direction is not 1.
PassiveOpticalScannerModel.calculate_integration_time(ScanTech.WHISKBROOM, 10, 150, 12.5, 1, max_det_exp_time=None, angle_width_deg=30)
with self.assertRaises(Exception): # Exception expected if cross-track-fov is not specified
PassiveOpticalScannerModel.calculate_integration_time(ScanTech.WHISKBROOM, 20, 1, 12.5, 0.1, max_det_exp_time=None, angle_width_deg=None)
# Test: MATRIX_IMAGER scanning
self.assertAlmostEqual(PassiveOpticalScannerModel.calculate_integration_time(ScanTech.MATRIX_IMAGER, 1, 1, 12.5, 1), 12.5)
self.assertAlmostEqual(PassiveOpticalScannerModel.calculate_integration_time(ScanTech.MATRIX_IMAGER, 1, 1, 12.5, 0.1), 12.5)
self.assertAlmostEqual(PassiveOpticalScannerModel.calculate_integration_time(ScanTech.MATRIX_IMAGER, 20, 1, 12.5, 0.1), 12.5)
self.assertAlmostEqual(PassiveOpticalScannerModel.calculate_integration_time(ScanTech.MATRIX_IMAGER, 1, 20, 12.5, 0.1), 12.5)
self.assertAlmostEqual(PassiveOpticalScannerModel.calculate_integration_time(ScanTech.MATRIX_IMAGER, 20, 20, 12.5, 0.1, 30), 12.5)
# check max exposure time functionality
self.assertAlmostEqual(PassiveOpticalScannerModel.calculate_integration_time(ScanTech.MATRIX_IMAGER, 1, 20, 12.5, 0.1, max_det_exp_time=1.2), 1.2)
def test_calc_data_metrics_smad_truth(self):
""" Test: Truth data from SMAD 3rd edition, Table 9-15. Note that the inputs are made so that they are consistent with the example.
Further note that SMAD does not consider the energy from Sun reflected off Sun in their calculations.
"""
firesat = PassiveOpticalScannerModel.from_json(firesat_json)
epoch_JDUT1 = 2451623.999630
sc_orbit_state = {'time [JDUT1]':epoch_JDUT1, 'x [km]': 7078.137, 'y [km]': 0, 'z [km]': 0, 'vx [km/s]': 0, 'vy [km/s]': 7.5, 'vz [km/s]': 0} # equatorial orbit, altitude about 700 km
target_coords = {'lat [deg]': 0, 'lon [deg]': 0} # lat = 0, lon = 0 corresponds to [6378, 0, 0] km in ECI for observer position, check using Matlab function: eci2lla([6378, 0, 0] ,[2000 3 20 11 59 28.000])
obsv_metrics = firesat.calc_data_metrics(sc_orbit_state, target_coords)
self.assertAlmostEqual(obsv_metrics["ground pixel along-track resolution [m]"], 30, delta=3)
self.assertAlmostEqual(obsv_metrics["ground pixel cross-track resolution [m]"], 30, delta=3)
# A (positive) deviation is expected since SMAD does not consider the energy from Sun reflected off Sun
self.assertGreater(obsv_metrics["SNR"], 88)
self.assertGreater(obsv_metrics["dynamic range"], 332.9)
self.assertAlmostEqual(obsv_metrics["noise-equivalent delta T [K]"], 0.3 , delta=0.01)
self.assertEqual(obsv_metrics, {'ground pixel along-track resolution [m]': 31.34, 'ground pixel cross-track resolution [m]': 32.91, 'SNR': 148.15, 'dynamic range': 902.27, 'noise-equivalent delta T [K]': 0.30609})
# @TODO, make test with satellite in night region, thus no reflected energy of Sun. Result should match SMAD.
def test_calc_data_metrics_TIRSBand1_precomputed(self):
""" Model instrument with TIRS Band 1 specs (IR, pushbroom) and test with the results as computed on 4 April 2021.
Refer to the following article more context:
V. Ravindra and S. Nag, "Instrument Data Metrics Evaluator for Tradespace Analysis of Earth Observing Constellations,"
2020 IEEE Aerospace Conference, Big Sky, MT, USA, 2020.
.. note:: The results using the LOWTRAN7 model are computed at resolution wav_step_percm = 5.
"""
landsat_tirs_band1_dict = {
"@type": "Passive Optical Scanner",
"name": "Landsat 8 TIRS Band1",
"mass": 236,
"volume": 0.261,
"power": 380,
"fieldOfViewGeometry": {
"shape": "RECTANGULAR",
"angleHeight": 0.0081,
"angleWidth": 15
},
"scanTechnique": "PUSHBROOM",
"orientation": {
"referenceFrame": "SC_BODY_FIXED",
"convention": "REF_FRAME_ALIGNED"
},
"dataRate": 384,
"numberDetectorRows": 1,
"numberDetectorCols": 1850,
"detectorWidth": 25e-6,
"focalLength": 0.178,
"operatingWavelength": 10.9e-6,
"bandwidth": 0.6e-6,
"quantumEff": 0.025,
"targetBlackBodyTemp": 290,
"bitsPerPixel": 12,
"opticsSysEff": 0.60 ,
"numOfReadOutE": 20,
"apertureDia": 0.1085366,
"Fnum": 1.64,
"maxDetectorExposureTime": 3.49e-3,
"atmosLossModel": "LOWTRAN7",
"_comments": ["Above is Total payload data-rate not just off the TIRS.",
"numReadOutE is guessed."]
}
landsat_tirs_band1 = PassiveOpticalScannerModel.from_dict(landsat_tirs_band1_dict)
# landsat 8 orbit at 10 Apr 2021 14:24:17.819 UTC
sc_orbit_state = {'time [JDUT1]':2459315.100208333, 'x [km]': -7012.215259847972, 'y [km]': 981.6284579029395, 'z [km]': 16.62328546479549,
'vx [km/s]': 0.1664588472531363, 'vy [km/s]': 1.055747095699285, 'vz [km/s]': 7.426472416008381 }
target_coords = {'lat [deg]': 0.01942147899019397 , 'lon [deg]': 117.1899962481559} # nadir position of satellite
obsv_metrics = landsat_tirs_band1.calc_data_metrics(sc_orbit_state, target_coords)
self.assertEqual(obsv_metrics, {'ground pixel along-track resolution [m]': 98.78, 'ground pixel cross-track resolution [m]': 98.92, 'SNR': 1507.48, 'dynamic range': 113645.23, 'noise-equivalent delta T [K]': 0.04162})
# disable LOWTRAN7 atmospheric loss model and evaluate results
landsat_tirs_band1_dict["atmosLossModel"] = None
landsat_tirs_band1 = PassiveOpticalScannerModel.from_dict(landsat_tirs_band1_dict)
obsv_metrics = landsat_tirs_band1.calc_data_metrics(sc_orbit_state, target_coords)
self.assertEqual(obsv_metrics, {'ground pixel along-track resolution [m]': 98.78, 'ground pixel cross-track resolution [m]': 98.92, 'SNR': 1610.69, 'dynamic range': 129736.13, 'noise-equivalent delta T [K]': 0.03895})
def test_calc_data_metrics_OLIBlueBand_precomputed(self):
""" Model instrument with OLI Blue band specs (Optical, pushbroom) and test with the results as computed on 4 April 2021.
Refer to the following article more context:
V. Ravindra and S. Nag, "Instrument Data Metrics Evaluator for Tradespace Analysis of Earth Observing Constellations,"
2020 IEEE Aerospace Conference, Big Sky, MT, USA, 2020.
.. note:: The results using the LOWTRAN7 model are computed at resolution wav_step_percm = 5.
"""
landsat_oli_blue_dict = { "@type": "Passive Optical Scanner",
"name": "Landsat 8 OLI Blue band",
"mass": 1,
"volume": 1,
"power": 1,
"fieldOfViewGeometry": {
"shape": "RECTANGULAR",
"angleHeight": 0.00244080020725731,
"angleWidth": 15
},
"scanTechnique": "PUSHBROOM",
"dataRate": 384,
"numberDetectorRows": 1,
"numberDetectorCols": 6146,
"detectorWidth": 36e-6,
"focalLength": 845.1e-3,
"operatingWavelength": 482e-9,
"bandwidth": 65e-9,
"quantumEff": 0.85,
"targetBlackBodyTemp": 290,
"bitsPerPixel": 12,
"opticsSysEff": 0.90 ,
"numOfReadOutE": 8,
"apertureDia": 0.1320,
"Fnum": 6.4,
"maxDetectorExposureTime": 3.6e-3,
"atmosLossModel": "LOWTRAN7",
"_comments": ["Above is Total payload data-rate not just off the OLI.",
"Mass, power and volume are simply wrong.",
"numReadOutE is guessed."]
}
landsat_oli_blue = PassiveOpticalScannerModel.from_dict(landsat_oli_blue_dict)
# landsat 8 orbit at 10 Apr 2021 14:24:17.819 UTC (NIGHT time)
sc_orbit_state = {'time [JDUT1]':2459315.100208333, 'x [km]': -7012.215259847972, 'y [km]': 981.6284579029395, 'z [km]': 16.62328546479549,
'vx [km/s]': 0.1664588472531363, 'vy [km/s]': 1.055747095699285, 'vz [km/s]': 7.426472416008381 }
target_coords = {'lat [deg]': 0.01942147899019397 , 'lon [deg]': 117.1899962481559} # nadir position of satellite
obsv_metrics = landsat_oli_blue.calc_data_metrics(sc_orbit_state, target_coords)
self.assertEqual(obsv_metrics['ground pixel along-track resolution [m]'], 29.96)
self.assertEqual(obsv_metrics['ground pixel cross-track resolution [m]'], 30.0)
self.assertEqual(obsv_metrics['SNR'], 0.0)
self.assertEqual(obsv_metrics['dynamic range'], 0.0)
assert math.isclose(obsv_metrics['noise-equivalent delta T [K]'], 1318917697165785.8, rel_tol=1e-9)
# 10 Apr 2021 15:07:22.788 (Day time)
sc_orbit_state = {'time [JDUT1]':2459315.130127315, 'x [km]': 6512.435033854175, 'y [km]': -511.354859668807, 'z [km]': 2713.225164499847,
'vx [km/s]': 2.748229230268995, 'vy [km/s]': -1.377488737466059, 'vz [km/s]': -6.850883979753837 }
target_coords = {'lat [deg]': 22.7948561533915 , 'lon [deg]': -70.13495405345812} # nadir position of satellite
obsv_metrics = landsat_oli_blue.calc_data_metrics(sc_orbit_state, target_coords)
self.assertEqual(obsv_metrics, {'ground pixel along-track resolution [m]': 29.67, 'ground pixel cross-track resolution [m]': 29.73, 'SNR': 1065.8, 'dynamic range': 141998.68, 'noise-equivalent delta T [K]': np.inf})
# disable LOWTRAN7 atmospheric loss model and evaluate results
landsat_oli_blue_dict["atmosLossModel"] = None
landsat_oli_blue = PassiveOpticalScannerModel.from_dict(landsat_oli_blue_dict)
obsv_metrics = landsat_oli_blue.calc_data_metrics(sc_orbit_state, target_coords)
self.assertEqual(obsv_metrics, {'ground pixel along-track resolution [m]': 29.67, 'ground pixel cross-track resolution [m]': 29.73, 'SNR': 1281.85, 'dynamic range': 205400.19, 'noise-equivalent delta T [K]': np.inf})
def test_calc_data_metrics_MODISBand10_precomputed(self):
""" Model instrument with MODIS Band 10 specs (Optical, whiskbroom) and test with the results as computed on 4 April 2021.
Refer to the following article more context:
V. Ravindra and S. Nag, "Instrument Data Metrics Evaluator for Tradespace Analysis of Earth Observing Constellations,"
2020 IEEE Aerospace Conference, Big Sky, MT, USA, 2020.
.. note:: The results using the LOWTRAN7 model are computed at resolution wav_step_percm = 5.
"""
modis_band10_dict = {
"@type": "Passive Optical Scanner",
"name": "MODIS Band10",
"mass": 274,
"volume": 1.6,
"power": 162.5,
"fieldOfViewGeometry": {
"shape": "RECTANGULAR",
"angleHeight": 0.812366806011266,
"angleWidth": 110
},
"scanTechnique": "WHISKBROOM",
"dataRate": 6.2,
"numberDetectorRows": 10,
"numberDetectorCols": 1,
"detectorWidth": 540e-6,
"focalLength": 380.859e-3,
"operatingWavelength": 490e-9,
"bandwidth": 10e-9,
"quantumEff": 0.33,
"targetBlackBodyTemp": 300,
"bitsPerPixel": 12,
"opticsSysEff": 0.8,
"numOfReadOutE": 25,
"apertureDia": 0.1778,
"Fnum": 2.1421,
"maxDetectorExposureTime": 323.333e-6,
"atmosLossModel": "LOWTRAN7",
"_comments": ["purpose is for observation of surface/ cloud temperature(note target temp)",
"quantumEff, opticsSysEff, numofReadoutE are guessed."]
}
modis_band10 = PassiveOpticalScannerModel.from_dict(modis_band10_dict)
# Aqua orbit at 10 Apr 2021 15:07:56.800 UTC (NIGHT time)
sc_orbit_state = {'time [JDUT1]':2459315.130520833, 'x [km]': -5054.315202286442, 'y [km]': -4878.491479401228, 'z [km]': 883.5310463297755,
'vx [km/s]': -1.417318347731835, 'vy [km/s]': 0.1319708892386859, 'vz [km/s]': -7.367383505358474 }
target_coords = {'lat [deg]': 7.127116160568699 , 'lon [deg]': 158.1924750010043} # nadir position of satellite
obsv_metrics = modis_band10.calc_data_metrics(sc_orbit_state, target_coords)
self.assertEqual(obsv_metrics['ground pixel along-track resolution [m]'], 996.11)
self.assertEqual(obsv_metrics['ground pixel cross-track resolution [m]'], 997.19)
self.assertEqual(obsv_metrics['SNR'], 0.0)
self.assertEqual(obsv_metrics['dynamic range'], 0.0)
assert math.isclose(obsv_metrics['noise-equivalent delta T [K]'], 224514118246476.62, rel_tol=1e-9)
# 10 Apr 2021 15:55:53.269 (Day time)
sc_orbit_state = {'time [JDUT1]':2459315.1638078704, 'x [km]': 4904.051098680667, 'y [km]': 4868.949787679997, 'z [km]': -1516.567875770611,
'vx [km/s]': 1.903191094106026, 'vy [km/s]': 0.3436316797910688, 'vz [km/s]': 7.255195566766275 }
target_coords = {'lat [deg]': -12.36694967995247 , 'lon [deg]': -33.02510031498068} # nadir position of satellite
obsv_metrics = modis_band10.calc_data_metrics(sc_orbit_state, target_coords)
self.assertEqual(obsv_metrics, {'ground pixel along-track resolution [m]': 988.98, 'ground pixel cross-track resolution [m]': 989.93, 'SNR': 3254.27, 'dynamic range': 423635.37, 'noise-equivalent delta T [K]': np.inf})
# disable LOWTRAN7 atmospheric loss model and evaluate results
modis_band10_dict["atmosLossModel"] = None
modis_band10 = PassiveOpticalScannerModel.from_dict(modis_band10_dict)
obsv_metrics = modis_band10.calc_data_metrics(sc_orbit_state, target_coords)
self.assertEqual(obsv_metrics, {'ground pixel along-track resolution [m]': 988.98, 'ground pixel cross-track resolution [m]': 989.93, 'SNR': 3892.58, 'dynamic range': 606110.78, 'noise-equivalent delta T [K]': np.inf})
def test_calc_data_metrics_MODISBand1_precomputed(self):
""" Model instrument with MODIS Band 1 specs (Optical, whiskbroom) and test with the results as computed on 4 April 2021.
Refer to the following article more context:
V. Ravindra and S. Nag, "Instrument Data Metrics Evaluator for Tradespace Analysis of Earth Observing Constellations,"
2020 IEEE Aerospace Conference, Big Sky, MT, USA, 2020.
.. note:: The results using the LOWTRAN7 model are computed at resolution wav_step_percm = 5.
"""
modis_band1_dict = {
"@type": "Passive Optical Scanner",
"name": "MODIS Band1",
"mass": 274,
"volume": 1.6,
"power": 162.5,
"fieldOfViewGeometry": {
"shape": "RECTANGULAR",
"angleHeight": 0.812366806011266,
"angleWidth": 110
},
"scanTechnique": "WHISKBROOM",
"dataRate": 6.2,
"numberDetectorRows": 40,
"numberDetectorCols": 1,
"detectorWidth": 135e-6,
"focalLength": 380.859e-3,
"operatingWavelength": 645e-9,
"bandwidth": 50e-9,
"quantumEff": 0.33,
"targetBlackBodyTemp": 300,
"bitsPerPixel": 12,
"opticsSysEff": 0.8,
"numOfReadOutE": 25,
"apertureDia": 0.1778,
"Fnum": 2.1421,
"maxDetectorExposureTime": 73.3e-6,
"atmosLossModel": "LOWTRAN7",
"_comments": ["purpose for surface/ cloud temperature(note target temp)",
"quantumEff, opticsSysEff, numofReadoutE are guessed."]
}
modis_band1 = PassiveOpticalScannerModel.from_dict(modis_band1_dict)
# Aqua orbit at 10 Apr 2021 15:07:56.800 UTC (NIGHT time)
sc_orbit_state = {'time [JDUT1]':2459315.130520833, 'x [km]': -5054.315202286442, 'y [km]': -4878.491479401228, 'z [km]': 883.5310463297755,
'vx [km/s]': -1.417318347731835, 'vy [km/s]': 0.1319708892386859, 'vz [km/s]': -7.367383505358474 }
target_coords = {'lat [deg]': 7.127116160568699 , 'lon [deg]': 158.1924750010043} # nadir position of satellite
obsv_metrics = modis_band1.calc_data_metrics(sc_orbit_state, target_coords)
self.assertEqual(obsv_metrics['ground pixel along-track resolution [m]'], 249.03)
self.assertEqual(obsv_metrics['ground pixel cross-track resolution [m]'], 249.3)
self.assertEqual(obsv_metrics['SNR'], 0.0)
self.assertEqual(obsv_metrics['dynamic range'], 0.0)
assert math.isclose(obsv_metrics['noise-equivalent delta T [K]'], 10124816467.16259, rel_tol=1e-9)
# 10 Apr 2021 15:55:53.269 (Day time)
sc_orbit_state = {'time [JDUT1]':2459315.1638078704, 'x [km]': 4904.051098680667, 'y [km]': 4868.949787679997, 'z [km]': -1516.567875770611,
'vx [km/s]': 1.903191094106026, 'vy [km/s]': 0.3436316797910688, 'vz [km/s]': 7.255195566766275 }
target_coords = {'lat [deg]': -12.36694967995247 , 'lon [deg]': -33.02510031498068} # nadir position of satellite
obsv_metrics = modis_band1.calc_data_metrics(sc_orbit_state, target_coords)
self.assertEqual(obsv_metrics, {'ground pixel along-track resolution [m]': 247.24, 'ground pixel cross-track resolution [m]': 247.48, 'SNR': 986.24, 'dynamic range': 38932.1, 'noise-equivalent delta T [K]': np.inf})
# disable LOWTRAN7 atmospheric loss model and evaluate results
modis_band1_dict["atmosLossModel"] = None
modis_band1 = PassiveOpticalScannerModel.from_dict(modis_band1_dict)
obsv_metrics = modis_band1.calc_data_metrics(sc_orbit_state, target_coords)
self.assertEqual(obsv_metrics, {'ground pixel along-track resolution [m]': 247.24, 'ground pixel cross-track resolution [m]': 247.48, 'SNR': 1085.11, 'dynamic range': 47123.85, 'noise-equivalent delta T [K]': np.inf})
def test_calc_data_metrics_CCAMBlueBand_precomputed(self):
""" Model instrument with CCAM Blue Band specs (Matrix, optical) and test with the results as computed on 4 April 2021.
Refer to the following article more context:
V. Ravindra and S. Nag, "Instrument Data Metrics Evaluator for Tradespace Analysis of Earth Observing Constellations,"
2020 IEEE Aerospace Conference, Big Sky, MT, USA, 2020.
.. note:: The results using the LOWTRAN7 model are computed at resolution wav_step_percm = 5.
"""
ccam_blue_band_dict = {
"@type": "Passive Optical Scanner",
"name": "CCAM",
"fieldOfViewGeometry": {
"shape": "RECTANGULAR",
"angleHeight": 1.2,
"angleWidth": 1.2
},
"scanTechnique": "MATRIX_IMAGER",
"numberDetectorRows": 2048,
"numberDetectorCols": 2048,
"detectorWidth": 5.5e-6,
"focalLength": 520e-3,
"operatingWavelength": 470e-9,
"bandwidth": 150e-9,
"quantumEff": 0.40,
"targetBlackBodyTemp": 290,
"opticsSysEff": 0.6,
"numOfReadOutE": 13,
"apertureDia": 94.6e-3,
"Fnum": 5.5,
"maxDetectorExposureTime": 678e-6,
"atmosLossModel": "LOWTRAN7"
}
ccam_blue_band = PassiveOpticalScannerModel.from_dict(ccam_blue_band_dict)
# Aqua orbit at 10 Apr 2021 15:07:56.800 UTC (NIGHT time)
sc_orbit_state = {'time [JDUT1]':2459315.130520833, 'x [km]': -5054.315202286442, 'y [km]': -4878.491479401228, 'z [km]': 883.5310463297755,
'vx [km/s]': -1.417318347731835, 'vy [km/s]': 0.1319708892386859, 'vz [km/s]': -7.367383505358474 }
target_coords = {'lat [deg]': 7.127116160568699 , 'lon [deg]': 158.1924750010043} # nadir position of satellite
obsv_metrics = ccam_blue_band.calc_data_metrics(sc_orbit_state, target_coords)
self.assertEqual(obsv_metrics['ground pixel along-track resolution [m]'], 7.43)
self.assertEqual(obsv_metrics['ground pixel cross-track resolution [m]'], 7.44)
self.assertEqual(obsv_metrics['SNR'], 0.0)
self.assertEqual(obsv_metrics['dynamic range'], 0.0)
assert math.isclose(obsv_metrics['noise-equivalent delta T [K]'], 2302356852773662.0, rel_tol=1e-9)
# 10 Apr 2021 15:55:53.269 (Day time)
sc_orbit_state = {'time [JDUT1]':2459315.1638078704, 'x [km]': 4904.051098680667, 'y [km]': 4868.949787679997, 'z [km]': -1516.567875770611,
'vx [km/s]': 1.903191094106026, 'vy [km/s]': 0.3436316797910688, 'vz [km/s]': 7.255195566766275 }
target_coords = {'lat [deg]': -12.36694967995247 , 'lon [deg]': -33.02510031498068} # nadir position of satellite
obsv_metrics = ccam_blue_band.calc_data_metrics(sc_orbit_state, target_coords)
self.assertEqual(obsv_metrics, {'ground pixel along-track resolution [m]': 7.38, 'ground pixel cross-track resolution [m]': 7.38, 'SNR': 63.3, 'dynamic range': 320.71, 'noise-equivalent delta T [K]': np.inf})
# disable LOWTRAN7 atmospheric loss model and evaluate results
ccam_blue_band_dict["atmosLossModel"] = None
ccam_blue_band = PassiveOpticalScannerModel.from_dict(ccam_blue_band_dict)
obsv_metrics = ccam_blue_band.calc_data_metrics(sc_orbit_state, target_coords)
self.assertEqual(obsv_metrics, {'ground pixel along-track resolution [m]': 7.38, 'ground pixel cross-track resolution [m]': 7.38, 'SNR': 79.01, 'dynamic range': 492.91, 'noise-equivalent delta T [K]': np.inf})
| 73.94452 | 372 | 0.521695 |
ace5f5187daf297031068830107bfff6ef3bcc6d | 17,308 | py | Python | train_deep_sdf.py | nihalsid/DeepSDF | 765b7dd8b8721cbf1017eb9a226bad7726d3c974 | [
"MIT"
] | null | null | null | train_deep_sdf.py | nihalsid/DeepSDF | 765b7dd8b8721cbf1017eb9a226bad7726d3c974 | [
"MIT"
] | null | null | null | train_deep_sdf.py | nihalsid/DeepSDF | 765b7dd8b8721cbf1017eb9a226bad7726d3c974 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import torch
import torch.utils.data as data_utils
import signal
import sys
import os
import logging
import math
import json
import numpy as np
import time
import torch.nn.functional as F
import deep_sdf
import deep_sdf.workspace as ws
class LearningRateSchedule:
def get_learning_rate(self, epoch):
pass
class ConstantLearningRateSchedule(LearningRateSchedule):
def __init__(self, value):
self.value = value
def get_learning_rate(self, epoch):
return self.value
class StepLearningRateSchedule(LearningRateSchedule):
def __init__(self, initial, interval, factor):
self.initial = initial
self.interval = interval
self.factor = factor
def get_learning_rate(self, epoch):
return self.initial * (self.factor ** (epoch // self.interval))
class WarmupLearningRateSchedule(LearningRateSchedule):
def __init__(self, initial, warmed_up, length):
self.initial = initial
self.warmed_up = warmed_up
self.length = length
def get_learning_rate(self, epoch):
if epoch > self.length:
return self.warmed_up
return self.initial + (self.warmed_up - self.initial) * epoch / self.length
def get_learning_rate_schedules(specs):
schedule_specs = specs["LearningRateSchedule"]
schedules = []
for schedule_specs in schedule_specs:
if schedule_specs["Type"] == "Step":
schedules.append(
StepLearningRateSchedule(
schedule_specs["Initial"],
schedule_specs["Interval"],
schedule_specs["Factor"],
)
)
elif schedule_specs["Type"] == "Warmup":
schedules.append(
WarmupLearningRateSchedule(
schedule_specs["Initial"],
schedule_specs["Final"],
schedule_specs["Length"],
)
)
elif schedule_specs["Type"] == "Constant":
schedules.append(ConstantLearningRateSchedule(schedule_specs["Value"]))
else:
raise Exception(
'no known learning rate schedule of type "{}"'.format(
schedule_specs["Type"]
)
)
return schedules
def save_model(experiment_directory, filename, decoder, epoch):
model_params_dir = ws.get_model_params_dir(experiment_directory, True)
torch.save(
{"epoch": epoch, "model_state_dict": decoder.state_dict()},
os.path.join(model_params_dir, filename),
)
def save_optimizer(experiment_directory, filename, optimizer, epoch):
optimizer_params_dir = ws.get_optimizer_params_dir(experiment_directory, True)
torch.save(
{"epoch": epoch, "optimizer_state_dict": optimizer.state_dict()},
os.path.join(optimizer_params_dir, filename),
)
def load_optimizer(experiment_directory, filename, optimizer):
full_filename = os.path.join(
ws.get_optimizer_params_dir(experiment_directory), filename
)
if not os.path.isfile(full_filename):
raise Exception(
'optimizer state dict "{}" does not exist'.format(full_filename)
)
data = torch.load(full_filename)
optimizer.load_state_dict(data["optimizer_state_dict"])
return data["epoch"]
def save_latent_vectors(experiment_directory, filename, latent_vec, epoch):
latent_codes_dir = ws.get_latent_codes_dir(experiment_directory, True)
all_latents = latent_vec.state_dict()
torch.save(
{"epoch": epoch, "latent_codes": all_latents},
os.path.join(latent_codes_dir, filename),
)
# TODO: duplicated in workspace
def load_latent_vectors(experiment_directory, filename, lat_vecs):
full_filename = os.path.join(
ws.get_latent_codes_dir(experiment_directory), filename
)
if not os.path.isfile(full_filename):
raise Exception('latent state file "{}" does not exist'.format(full_filename))
data = torch.load(full_filename)
if isinstance(data["latent_codes"], torch.Tensor):
# for backwards compatibility
if not lat_vecs.num_embeddings == data["latent_codes"].size()[0]:
raise Exception(
"num latent codes mismatched: {} vs {}".format(
lat_vecs.num_embeddings, data["latent_codes"].size()[0]
)
)
if not lat_vecs.embedding_dim == data["latent_codes"].size()[2]:
raise Exception("latent code dimensionality mismatch")
for i, lat_vec in enumerate(data["latent_codes"]):
lat_vecs.weight.data[i, :] = lat_vec
else:
lat_vecs.load_state_dict(data["latent_codes"])
return data["epoch"]
def save_logs(
experiment_directory,
loss_log,
lr_log,
timing_log,
lat_mag_log,
param_mag_log,
epoch,
):
torch.save(
{
"epoch": epoch,
"loss": loss_log,
"learning_rate": lr_log,
"timing": timing_log,
"latent_magnitude": lat_mag_log,
"param_magnitude": param_mag_log,
},
os.path.join(experiment_directory, ws.logs_filename),
)
def load_logs(experiment_directory):
full_filename = os.path.join(experiment_directory, ws.logs_filename)
if not os.path.isfile(full_filename):
raise Exception('log file "{}" does not exist'.format(full_filename))
data = torch.load(full_filename)
return (
data["loss"],
data["learning_rate"],
data["timing"],
data["latent_magnitude"],
data["param_magnitude"],
data["epoch"],
)
def clip_logs(loss_log, lr_log, timing_log, lat_mag_log, param_mag_log, epoch):
iters_per_epoch = len(loss_log) // len(lr_log)
loss_log = loss_log[: (iters_per_epoch * epoch)]
lr_log = lr_log[:epoch]
timing_log = timing_log[:epoch]
lat_mag_log = lat_mag_log[:epoch]
for n in param_mag_log:
param_mag_log[n] = param_mag_log[n][:epoch]
return (loss_log, lr_log, timing_log, lat_mag_log, param_mag_log)
def get_spec_with_default(specs, key, default):
try:
return specs[key]
except KeyError:
return default
def get_mean_latent_vector_magnitude(latent_vectors):
return torch.mean(torch.norm(latent_vectors.weight.data.detach(), dim=1))
def append_parameter_magnitudes(param_mag_log, model):
for name, param in model.named_parameters():
if len(name) > 7 and name[:7] == "module.":
name = name[7:]
if name not in param_mag_log.keys():
param_mag_log[name] = []
param_mag_log[name].append(param.data.norm().item())
def main_function(experiment_directory, continue_from, batch_split):
logging.debug("running " + experiment_directory)
specs = ws.load_experiment_specifications(experiment_directory)
logging.info("Experiment description: \n" + specs["Description"])
data_source = specs["DataSource"]
train_split_file = specs["TrainSplit"]
arch = __import__("networks." + specs["NetworkArch"], fromlist=["Decoder"])
logging.debug(specs["NetworkSpecs"])
latent_size = specs["CodeLength"]
checkpoints = list(
range(
specs["SnapshotFrequency"],
specs["NumEpochs"] + 1,
specs["SnapshotFrequency"],
)
)
for checkpoint in specs["AdditionalSnapshots"]:
checkpoints.append(checkpoint)
checkpoints.sort()
lr_schedules = get_learning_rate_schedules(specs)
grad_clip = get_spec_with_default(specs, "GradientClipNorm", None)
if grad_clip is not None:
logging.debug("clipping gradients to max norm {}".format(grad_clip))
def save_latest(epoch):
save_model(experiment_directory, "latest.pth", decoder, epoch)
save_optimizer(experiment_directory, "latest.pth", optimizer_all, epoch)
save_latent_vectors(experiment_directory, "latest.pth", lat_vecs, epoch)
def save_checkpoints(epoch):
save_model(experiment_directory, str(epoch) + ".pth", decoder, epoch)
save_optimizer(experiment_directory, str(epoch) + ".pth", optimizer_all, epoch)
save_latent_vectors(experiment_directory, str(epoch) + ".pth", lat_vecs, epoch)
def signal_handler(sig, frame):
logging.info("Stopping early...")
sys.exit(0)
def adjust_learning_rate(lr_schedules, optimizer, epoch):
for i, param_group in enumerate(optimizer.param_groups):
param_group["lr"] = lr_schedules[i].get_learning_rate(epoch)
def empirical_stat(latent_vecs, indices):
lat_mat = torch.zeros(0).cuda()
for ind in indices:
lat_mat = torch.cat([lat_mat, latent_vecs[ind]], 0)
mean = torch.mean(lat_mat, 0)
var = torch.var(lat_mat, 0)
return mean, var
signal.signal(signal.SIGINT, signal_handler)
num_samp_per_scene = specs["SamplesPerScene"]
scene_per_batch = specs["ScenesPerBatch"]
clamp_dist = specs["ClampingDistance"]
minT = -clamp_dist
maxT = clamp_dist
enforce_minmax = True
do_code_regularization = get_spec_with_default(specs, "CodeRegularization", True)
code_reg_lambda = get_spec_with_default(specs, "CodeRegularizationLambda", 1e-4)
code_bound = get_spec_with_default(specs, "CodeBound", None)
decoder = arch.Decoder(latent_size, **specs["NetworkSpecs"]).cuda()
logging.info("training with {} GPU(s)".format(torch.cuda.device_count()))
# if torch.cuda.device_count() > 1:
decoder = torch.nn.DataParallel(decoder)
num_epochs = specs["NumEpochs"]
log_frequency = get_spec_with_default(specs, "LogFrequency", 10)
with open(train_split_file, "r") as f:
train_split = json.load(f)
sdf_dataset = deep_sdf.data.SDFSamples(
data_source, train_split, num_samp_per_scene, load_ram=False
)
num_data_loader_threads = get_spec_with_default(specs, "DataLoaderThreads", 1)
logging.debug("loading data with {} threads".format(num_data_loader_threads))
sdf_loader = data_utils.DataLoader(
sdf_dataset,
batch_size=scene_per_batch,
shuffle=True,
num_workers=num_data_loader_threads,
drop_last=False,
)
logging.debug("torch num_threads: {}".format(torch.get_num_threads()))
num_scenes = len(sdf_dataset)
logging.info("There are {} scenes".format(num_scenes))
logging.debug(decoder)
lat_vecs = torch.nn.Embedding(num_scenes, latent_size, max_norm=code_bound)
torch.nn.init.normal_(
lat_vecs.weight.data,
0.0,
get_spec_with_default(specs, "CodeInitStdDev", 1.0) / math.sqrt(latent_size),
)
logging.debug(
"initialized with mean magnitude {}".format(
get_mean_latent_vector_magnitude(lat_vecs)
)
)
loss_l1 = torch.nn.L1Loss(reduction="sum")
optimizer_all = torch.optim.Adam(
[
{
"params": decoder.parameters(),
"lr": lr_schedules[0].get_learning_rate(0),
},
{
"params": lat_vecs.parameters(),
"lr": lr_schedules[1].get_learning_rate(0),
},
]
)
loss_log = []
lr_log = []
lat_mag_log = []
timing_log = []
param_mag_log = {}
start_epoch = 1
if continue_from is not None:
logging.info('continuing from "{}"'.format(continue_from))
lat_epoch = load_latent_vectors(
experiment_directory, continue_from + ".pth", lat_vecs
)
model_epoch = ws.load_model_parameters(
experiment_directory, continue_from, decoder
)
optimizer_epoch = load_optimizer(
experiment_directory, continue_from + ".pth", optimizer_all
)
loss_log, lr_log, timing_log, lat_mag_log, param_mag_log, log_epoch = load_logs(
experiment_directory
)
if not log_epoch == model_epoch:
loss_log, lr_log, timing_log, lat_mag_log, param_mag_log = clip_logs(
loss_log, lr_log, timing_log, lat_mag_log, param_mag_log, model_epoch
)
if not (model_epoch == optimizer_epoch and model_epoch == lat_epoch):
raise RuntimeError(
"epoch mismatch: {} vs {} vs {} vs {}".format(
model_epoch, optimizer_epoch, lat_epoch, log_epoch
)
)
start_epoch = model_epoch + 1
logging.debug("loaded")
logging.info("starting from epoch {}".format(start_epoch))
logging.info(
"Number of decoder parameters: {}".format(
sum(p.data.nelement() for p in decoder.parameters())
)
)
logging.info(
"Number of shape code parameters: {} (# codes {}, code dim {})".format(
lat_vecs.num_embeddings * lat_vecs.embedding_dim,
lat_vecs.num_embeddings,
lat_vecs.embedding_dim,
)
)
for epoch in range(start_epoch, num_epochs + 1):
start = time.time()
logging.info("epoch {}...".format(epoch))
decoder.train()
adjust_learning_rate(lr_schedules, optimizer_all, epoch)
for sdf_data, indices in sdf_loader:
# Process the input data
sdf_data = sdf_data.reshape(-1, 4)
num_sdf_samples = sdf_data.shape[0]
sdf_data.requires_grad = False
xyz = sdf_data[:, 0:3]
sdf_gt = sdf_data[:, 3].unsqueeze(1)
if enforce_minmax:
sdf_gt = torch.clamp(sdf_gt, minT, maxT)
xyz = torch.chunk(xyz, batch_split)
indices = torch.chunk(
indices.unsqueeze(-1).repeat(1, num_samp_per_scene).view(-1),
batch_split,
)
sdf_gt = torch.chunk(sdf_gt, batch_split)
batch_loss = 0.0
optimizer_all.zero_grad()
for i in range(batch_split):
batch_vecs = lat_vecs(indices[i])
input = torch.cat([batch_vecs, xyz[i]], dim=1)
# NN optimization
pred_sdf = decoder(input)
if enforce_minmax:
pred_sdf = torch.clamp(pred_sdf, minT, maxT)
chunk_loss = loss_l1(pred_sdf, sdf_gt[i].cuda()) / num_sdf_samples
# chunk_loss = F.binary_cross_entropy_with_logits(pred_sdf, sdf_gt[i].cuda(), reduction='sum') / num_sdf_samples
if do_code_regularization:
l2_size_loss = torch.sum(torch.norm(batch_vecs, dim=1))
reg_loss = (
code_reg_lambda * min(1, epoch / 100) * l2_size_loss
) / num_sdf_samples
chunk_loss = chunk_loss + reg_loss.cuda()
chunk_loss.backward()
batch_loss += chunk_loss.item()
logging.debug("loss = {}".format(batch_loss))
loss_log.append(batch_loss)
if grad_clip is not None:
torch.nn.utils.clip_grad_norm_(decoder.parameters(), grad_clip)
optimizer_all.step()
end = time.time()
seconds_elapsed = end - start
timing_log.append(seconds_elapsed)
lr_log.append([schedule.get_learning_rate(epoch) for schedule in lr_schedules])
lat_mag_log.append(get_mean_latent_vector_magnitude(lat_vecs))
append_parameter_magnitudes(param_mag_log, decoder)
if epoch in checkpoints:
save_checkpoints(epoch)
if epoch % log_frequency == 0:
save_latest(epoch)
save_logs(
experiment_directory,
loss_log,
lr_log,
timing_log,
lat_mag_log,
param_mag_log,
epoch,
)
if __name__ == "__main__":
import argparse
arg_parser = argparse.ArgumentParser(description="Train a DeepSDF autodecoder")
arg_parser.add_argument(
"--experiment",
"-e",
dest="experiment_directory",
required=True,
help="The experiment directory. This directory should include "
+ "experiment specifications in 'specs.json', and logging will be "
+ "done in this directory as well.",
)
arg_parser.add_argument(
"--continue",
"-c",
dest="continue_from",
help="A snapshot to continue from. This can be 'latest' to continue"
+ "from the latest running snapshot, or an integer corresponding to "
+ "an epochal snapshot.",
)
arg_parser.add_argument(
"--batch_split",
dest="batch_split",
default=1,
help="This splits the batch into separate subbatches which are "
+ "processed separately, with gradients accumulated across all "
+ "subbatches. This allows for training with large effective batch "
+ "sizes in memory constrained environments.",
)
deep_sdf.add_common_args(arg_parser)
args = arg_parser.parse_args()
deep_sdf.configure_logging(args)
main_function(args.experiment_directory, args.continue_from, int(args.batch_split))
| 29.089076 | 128 | 0.627976 |
ace5f56c077ec9167712bec55726862ea8cdf55d | 15,987 | py | Python | samples/client/petstore/python/petstore_api/api/store_api.py | kymbalon/openapi-generator | 8327a920408314aacb7d00f64285ae88e9195633 | [
"Apache-2.0"
] | 2 | 2019-03-26T11:04:18.000Z | 2021-01-03T10:54:10.000Z | samples/client/petstore/python/petstore_api/api/store_api.py | kymbalon/openapi-generator | 8327a920408314aacb7d00f64285ae88e9195633 | [
"Apache-2.0"
] | 7 | 2021-03-01T21:26:03.000Z | 2022-02-27T10:10:20.000Z | samples/client/petstore/python/petstore_api/api/store_api.py | kymbalon/openapi-generator | 8327a920408314aacb7d00f64285ae88e9195633 | [
"Apache-2.0"
] | 4 | 2019-04-08T17:06:09.000Z | 2020-06-09T18:16:08.000Z | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from petstore_api.api_client import ApiClient
from petstore_api.exceptions import (
ApiTypeError,
ApiValueError
)
class StoreApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_order(self, order_id, **kwargs): # noqa: E501
"""Delete purchase order by ID # noqa: E501
For valid response try integer IDs with value < 1000. Anything above 1000 or nonintegers will generate API errors # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_order(order_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str order_id: ID of the order that needs to be deleted (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_order_with_http_info(order_id, **kwargs) # noqa: E501
else:
(data) = self.delete_order_with_http_info(order_id, **kwargs) # noqa: E501
return data
def delete_order_with_http_info(self, order_id, **kwargs): # noqa: E501
"""Delete purchase order by ID # noqa: E501
For valid response try integer IDs with value < 1000. Anything above 1000 or nonintegers will generate API errors # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_order_with_http_info(order_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str order_id: ID of the order that needs to be deleted (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['order_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_order" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'order_id' is set
if ('order_id' not in local_var_params or
local_var_params['order_id'] is None):
raise ApiValueError("Missing the required parameter `order_id` when calling `delete_order`") # noqa: E501
collection_formats = {}
path_params = {}
if 'order_id' in local_var_params:
path_params['order_id'] = local_var_params['order_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/store/order/{order_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_inventory(self, **kwargs): # noqa: E501
"""Returns pet inventories by status # noqa: E501
Returns a map of status codes to quantities # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_inventory(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: dict(str, int)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_inventory_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_inventory_with_http_info(**kwargs) # noqa: E501
return data
def get_inventory_with_http_info(self, **kwargs): # noqa: E501
"""Returns pet inventories by status # noqa: E501
Returns a map of status codes to quantities # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_inventory_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: dict(str, int)
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_inventory" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/store/inventory', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, int)', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_order_by_id(self, order_id, **kwargs): # noqa: E501
"""Find purchase order by ID # noqa: E501
For valid response try integer IDs with value <= 5 or > 10. Other values will generated exceptions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_order_by_id(order_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int order_id: ID of pet that needs to be fetched (required)
:return: Order
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_order_by_id_with_http_info(order_id, **kwargs) # noqa: E501
else:
(data) = self.get_order_by_id_with_http_info(order_id, **kwargs) # noqa: E501
return data
def get_order_by_id_with_http_info(self, order_id, **kwargs): # noqa: E501
"""Find purchase order by ID # noqa: E501
For valid response try integer IDs with value <= 5 or > 10. Other values will generated exceptions # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_order_by_id_with_http_info(order_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int order_id: ID of pet that needs to be fetched (required)
:return: Order
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['order_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_order_by_id" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'order_id' is set
if ('order_id' not in local_var_params or
local_var_params['order_id'] is None):
raise ApiValueError("Missing the required parameter `order_id` when calling `get_order_by_id`") # noqa: E501
if 'order_id' in local_var_params and local_var_params['order_id'] > 5: # noqa: E501
raise ApiValueError("Invalid value for parameter `order_id` when calling `get_order_by_id`, must be a value less than or equal to `5`") # noqa: E501
if 'order_id' in local_var_params and local_var_params['order_id'] < 1: # noqa: E501
raise ApiValueError("Invalid value for parameter `order_id` when calling `get_order_by_id`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'order_id' in local_var_params:
path_params['order_id'] = local_var_params['order_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/store/order/{order_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Order', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def place_order(self, body, **kwargs): # noqa: E501
"""Place an order for a pet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.place_order(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Order body: order placed for purchasing the pet (required)
:return: Order
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.place_order_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.place_order_with_http_info(body, **kwargs) # noqa: E501
return data
def place_order_with_http_info(self, body, **kwargs): # noqa: E501
"""Place an order for a pet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.place_order_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Order body: order placed for purchasing the pet (required)
:return: Order
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method place_order" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in local_var_params or
local_var_params['body'] is None):
raise ApiValueError("Missing the required parameter `body` when calling `place_order`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/xml', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/store/order', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Order', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 38.803398 | 174 | 0.618377 |
ace5f660a10e7c672d36f1e426f425c4d9c5d192 | 971 | py | Python | tests/step_defs/test_retirement_steps.py | K-eli-wtech/tau-pytest-bdd | bfa0052610eb29ba420a79848cec79228946e9b8 | [
"Apache-2.0"
] | null | null | null | tests/step_defs/test_retirement_steps.py | K-eli-wtech/tau-pytest-bdd | bfa0052610eb29ba420a79848cec79228946e9b8 | [
"Apache-2.0"
] | null | null | null | tests/step_defs/test_retirement_steps.py | K-eli-wtech/tau-pytest-bdd | bfa0052610eb29ba420a79848cec79228946e9b8 | [
"Apache-2.0"
] | null | null | null | from pytest_bdd import scenario, parsers, given, when, then
from retirement import *
EXTRA_TYPES = {
'Number': int,
}
CONVERTERS = {
'year': int,
'month': int,
}
scenario('../features/retire_age.feature', 'Birth year entered in range')
def test_retire_year():
pass
@given('The input is numbers', target_fixture='input_valid')
def input_valid():
pass
@when(parsers.cfparse('Birth "{year:Number}" is input', extra_types=EXTRA_TYPES))
@when('Birth "<year>" is input')
def check_year(year):
assert _validate_age_year(year)
year_correct = True
return year_correct
@when(parsers.cfparse('Birth "{month:Number}" is input', extra_types=EXTRA_TYPES))
@when('Birth "<month>" is input')
def check_month(month):
assert _validate_birth_month(month)
month_correct = True
return month_correct
@then('The correct year and month is listed for age to retire along with how many years that will be')
def output_correct():
pass
| 21.108696 | 102 | 0.709578 |
ace5f762df5cd9d9150df574bc247f2171055dc5 | 6,021 | py | Python | configs/representation/ssst/ssst_r18_t_a1_nc_sgd_cos_50e_r2_1xNx2_k400.py | happywu/mmaction2-CycleContrast | 019734e471dffd1161b7a9c617ba862d2349a96c | [
"Apache-2.0"
] | null | null | null | configs/representation/ssst/ssst_r18_t_a1_nc_sgd_cos_50e_r2_1xNx2_k400.py | happywu/mmaction2-CycleContrast | 019734e471dffd1161b7a9c617ba862d2349a96c | [
"Apache-2.0"
] | null | null | null | configs/representation/ssst/ssst_r18_t_a1_nc_sgd_cos_50e_r2_1xNx2_k400.py | happywu/mmaction2-CycleContrast | 019734e471dffd1161b7a9c617ba862d2349a96c | [
"Apache-2.0"
] | null | null | null | # model settings
temperature = 0.2
with_norm = True
query_dim = 128
model = dict(
type='SimSiamBaseSTSNTracker',
backbone=dict(
type='ResNet',
pretrained=None,
depth=18,
out_indices=(3, ),
# strides=(1, 2, 1, 1),
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
zero_init_residual=True),
# cls_head=None,
# patch_head=None,
att_plugin=dict(
type='SelfAttention',
dropout=0.,
matmul_norm=True,
use_residual=True,
downsample=2),
img_head=dict(
type='SimSiamHead',
in_channels=512,
norm_cfg=dict(type='SyncBN'),
num_projection_fcs=3,
projection_mid_channels=512,
projection_out_channels=512,
num_predictor_fcs=2,
predictor_mid_channels=128,
predictor_out_channels=512,
with_norm=True,
loss_feat=dict(type='CosineSimLoss', negative=False),
spatial_type='avg'))
# model training and testing settings
train_cfg = dict(
att_indices=(1, ),
self_as_value=True,
pred_frame_index=0,
target_frame_index=-1,
target_att=True)
test_cfg = dict(
precede_frames=20,
topk=10,
temperature=0.2,
strides=(1, 2, 1, 1),
out_indices=(2, 3),
neighbor_range=24,
with_first=True,
with_first_neighbor=True,
output_dir='eval_results')
# dataset settings
dataset_type = 'VideoDataset'
dataset_type_val = 'DavisDataset'
data_prefix = 'data/kinetics400/videos_train'
ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt'
data_prefix_val = 'data/davis/DAVIS/JPEGImages/480p'
anno_prefix_val = 'data/davis/DAVIS/Annotations/480p'
data_root_val = 'data/davis/DAVIS'
ann_file_val = 'data/davis/DAVIS/ImageSets/davis2017_val_list_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=2),
# dict(type='DuplicateFrames', times=2, as_clip=False),
# dict(type='Frame2Clip'),
# dict(
# type='AppendFrames',
# num_frames=1,
# frame_interval=8,
# temporal_jitter=False),
dict(type='DecordDecode'),
dict(
type='RandomResizedCrop',
area_range=(0.2, 1.),
same_across_clip=False,
same_on_clip=False,
same_frame_indices=None),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(
type='Flip',
flip_ratio=0.5,
same_across_clip=False,
same_on_clip=False,
same_frame_indices=None),
# dict(
# type='ColorJitter',
# brightness=0.4,
# contrast=0.4,
# saturation=0.4,
# hue=0.1,
# p=0.8,
# same_across_clip=False,
# same_on_clip=False),
# dict(
# type='RandomGrayScale',
# p=0.2,
# same_across_clip=False,
# same_on_clip=False),
# dict(
# type='RandomGaussianBlur',
# p=0.5,
# same_across_clip=False,
# same_on_clip=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='SequentialSampleFrames', frame_interval=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 480), keep_ratio=True),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(
type='Collect',
keys=['imgs', 'ref_seg_map'],
meta_keys=('frame_dir', 'frame_inds', 'original_shape', 'seg_map')),
dict(type='ToTensor', keys=['imgs', 'ref_seg_map'])
]
data = dict(
videos_per_gpu=128,
workers_per_gpu=16,
val_workers_per_gpu=1,
train=dict(
type='RepeatDataset',
times=2,
dataset=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_prefix,
pipeline=train_pipeline)),
val=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True),
test=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True))
# optimizer
# optimizer = dict(type='Adam', lr=1e-4)
optimizer = dict(type='SGD', lr=0.05, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
# lr_config = dict(policy='Fixed')
# lr_config = dict(
# policy='step',
# warmup='linear',
# warmup_iters=100,
# warmup_ratio=0.001,
# step=[1, 2])
total_epochs = 50
checkpoint_config = dict(interval=1)
evaluation = dict(
interval=1,
metrics='davis',
key_indicator='feat_1.J&F-Mean',
rule='greater')
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
dict(
type='WandbLoggerHook',
init_kwargs=dict(
project='mmaction2',
name='{{fileBasenameNoExtension}}',
resume=True,
tags=['ssst'],
dir='wandb/{{fileBasenameNoExtension}}',
config=dict(
model=model,
train_cfg=train_cfg,
test_cfg=test_cfg,
data=data))),
])
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = False
| 30.105 | 78 | 0.616509 |
ace5f7a845baa1453a311b3c558c0b5e203dde06 | 323 | py | Python | language/type/rule/formation.py | jedhsu/language | 3772a4a0ff287e1fc5ebefc716b8d91928d04c72 | [
"MIT"
] | null | null | null | language/type/rule/formation.py | jedhsu/language | 3772a4a0ff287e1fc5ebefc716b8d91928d04c72 | [
"MIT"
] | null | null | null | language/type/rule/formation.py | jedhsu/language | 3772a4a0ff287e1fc5ebefc716b8d91928d04c72 | [
"MIT"
] | null | null | null | """
*Product Formation* A, B: Type _proves_ A x B: Type
An element here amounts to a construction taking an element of A
to an element of B. In the case of mere propositions, an element
of the function type is a proof of the implication, a mapping
of a warrant for A to a warrant of B. (Corfield 39)
"""
| 29.363636 | 66 | 0.69969 |
ace5f87be1f71ed937c1d367fe5755cbcdd41052 | 3,715 | py | Python | awsf_cwl_v1/aws_decode_run_json.py | pkerpedjiev/tibanna | 8d8333bc7757076914c2bafbd68ee24c4ad611f6 | [
"MIT"
] | null | null | null | awsf_cwl_v1/aws_decode_run_json.py | pkerpedjiev/tibanna | 8d8333bc7757076914c2bafbd68ee24c4ad611f6 | [
"MIT"
] | null | null | null | awsf_cwl_v1/aws_decode_run_json.py | pkerpedjiev/tibanna | 8d8333bc7757076914c2bafbd68ee24c4ad611f6 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import json
import sys
downloadlist_filename = "download_command_list.txt"
input_yml_filename = "inputs.yml"
env_filename = "env_command_list.txt"
INPUT_DIR = "/data1/input"
# read json file
with open(sys.argv[1], 'r') as json_file:
Dict = json.load(json_file)
# create a download command list file from the information in json
Dict_input = Dict["Job"]["Input"]
with open(downloadlist_filename, 'w') as f_download:
for category in ["Input_files_data", "Secondary_files_data"]:
keys = Dict_input[category].keys()
for i in range(0, len(Dict_input[category])):
DATA_BUCKET = Dict_input[category][keys[i]]["dir"]
PROFILE = Dict_input[category][keys[i]].get("profile", '')
PROFILE_FLAG = "--profile " + PROFILE if PROFILE else ''
if isinstance(Dict_input[category][keys[i]]["path"], list):
for file in Dict_input[category][keys[i]]["path"]:
DATA_FILE = file
download_cmd = "aws s3 cp s3://{0}/{1} {2}/{1} {3}\n".format(DATA_BUCKET,
DATA_FILE,
INPUT_DIR,
PROFILE_FLAG)
f_download.write(download_cmd)
else:
DATA_FILE = Dict_input[category][keys[i]]["path"]
download_cmd = "aws s3 cp s3://{0}/{1} {2}/{1} {3}\n".format(DATA_BUCKET,
DATA_FILE,
INPUT_DIR,
PROFILE_FLAG)
f_download.write(download_cmd)
# create an input yml file for cwl-runner
with open(input_yml_filename, 'w') as f_yml:
inputs = Dict_input.copy()
yml = {}
for category in ["Input_parameters"]:
for item, value in inputs[category].iteritems():
yml[item] = value
for category in ["Input_files_data"]:
for item in inputs[category].keys():
v = inputs[category][item]
if 'dir' in v:
del v['dir']
if 'profile' in v:
del v['profile']
if isinstance(v['path'], list):
v2 = []
for i in range(0, len(v['path'])):
v2.append({"class": v['class'], "path": INPUT_DIR + '/' + v['path'][i]})
v = v2
yml[item] = v
else:
v['path'] = INPUT_DIR + '/' + v['path']
yml[item] = v.copy()
json.dump(yml, f_yml, indent=4, sort_keys=True)
# create a file that defines environmental variables
# I have to use these variables after this script finishes running.
# I didn't use os.environ + os.system('bash') because that would remove the other
# env variables set before this script started running.
with open(env_filename, 'w') as f_env:
f_env.write("CWL_URL={}\n".format(Dict["Job"]["App"]["cwl_url"]))
# main cwl to be run (the other cwl files will be called by this one)
f_env.write("MAIN_CWL={}\n".format(Dict["Job"]["App"]["main_cwl"]))
# list of cwl files in an array delimited by a space
f_env.write("CWL_FILES=\"{}\"\n".format(' '.join(Dict["Job"]["App"]["other_cwl_files"].split(','))))
f_env.write("OUTBUCKET={}\n".format(Dict["Job"]["Output"]["output_bucket_directory"]))
f_env.write("PUBLIC_POSTRUN_JSON={}\n".format('1' if Dict["config"].get('public_postrun_json', False) else '0'))
| 48.246753 | 116 | 0.522746 |
ace5f886513d5d29f2958deca9d43e5fa597f12c | 1,201 | py | Python | tfx/orchestration/interactive/notebook_extensions/skip.py | NaxoAI/tfx | 811e4c1cc0f7903d73d151b9d4f21f79f6013d4a | [
"Apache-2.0"
] | null | null | null | tfx/orchestration/interactive/notebook_extensions/skip.py | NaxoAI/tfx | 811e4c1cc0f7903d73d151b9d4f21f79f6013d4a | [
"Apache-2.0"
] | null | null | null | tfx/orchestration/interactive/notebook_extensions/skip.py | NaxoAI/tfx | 811e4c1cc0f7903d73d151b9d4f21f79f6013d4a | [
"Apache-2.0"
] | 1 | 2019-10-06T03:39:58.000Z | 2019-10-06T03:39:58.000Z | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom magic for marking cells to be skipped during pipeline export."""
from __future__ import print_function
from IPython.core.magic import cell_magic
from IPython.core.magic import Magics
from IPython.core.magic import magics_class
@magics_class
class SkipMagics(Magics):
@cell_magic
def skip_for_export(self, line, cell):
# Execute the cell normally for now. During export to pipeline, this cell
# will be skipped.
self.shell.ex(cell)
print('This cell will be skipped during export to pipeline.')
def load_ipython_extension(ipython):
ipython.register_magics(SkipMagics)
| 33.361111 | 77 | 0.768526 |
ace5fa786c1f6f81addf45a928e7cc855baaa2ca | 8,075 | py | Python | contrib/devtools/update-translations.py | lipcoin/lipcoin | 7afc0a02d63620e5a5601474cca131cb0cf3bbe4 | [
"MIT"
] | null | null | null | contrib/devtools/update-translations.py | lipcoin/lipcoin | 7afc0a02d63620e5a5601474cca131cb0cf3bbe4 | [
"MIT"
] | null | null | null | contrib/devtools/update-translations.py | lipcoin/lipcoin | 7afc0a02d63620e5a5601474cca131cb0cf3bbe4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'lipcoin_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
specifiers.append(s[percent+1])
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
| 38.636364 | 124 | 0.635789 |
ace5fad5e6f497df25744fbfdbb8f0775ac3c9b9 | 52 | py | Python | EduData/download_data/__init__.py | nnnyt/EduData | 1827f12167a68f15776cd303ce550814633f1256 | [
"MIT"
] | null | null | null | EduData/download_data/__init__.py | nnnyt/EduData | 1827f12167a68f15776cd303ce550814633f1256 | [
"MIT"
] | null | null | null | EduData/download_data/__init__.py | nnnyt/EduData | 1827f12167a68f15776cd303ce550814633f1256 | [
"MIT"
] | null | null | null | # coding: utf-8
# create by tongshiwei on 2019-8-16
| 17.333333 | 35 | 0.711538 |
ace5fd37cbe02628c965e54a7cce66390a4fe8fb | 9,230 | py | Python | sympy/functions/special/tests/test_zeta_functions.py | shilpiprd/sympy | 556e9c61b31d0d5f101cd56b43e843fbf3bcf121 | [
"BSD-3-Clause"
] | 8,323 | 2015-01-02T15:51:43.000Z | 2022-03-31T13:13:19.000Z | sympy/functions/special/tests/test_zeta_functions.py | shilpiprd/sympy | 556e9c61b31d0d5f101cd56b43e843fbf3bcf121 | [
"BSD-3-Clause"
] | 15,102 | 2015-01-01T01:33:17.000Z | 2022-03-31T22:53:13.000Z | sympy/functions/special/tests/test_zeta_functions.py | shilpiprd/sympy | 556e9c61b31d0d5f101cd56b43e843fbf3bcf121 | [
"BSD-3-Clause"
] | 4,490 | 2015-01-01T17:48:07.000Z | 2022-03-31T17:24:05.000Z | from sympy import (Symbol, zeta, nan, Rational, Float, pi, dirichlet_eta, log,
zoo, expand_func, polylog, lerchphi, S, exp, sqrt, I,
exp_polar, polar_lift, O, stieltjes, Abs, Sum, oo, riemann_xi)
from sympy.core.function import ArgumentIndexError
from sympy.functions.combinatorial.numbers import bernoulli, factorial
from sympy.testing.pytest import raises
from sympy.testing.randtest import (test_derivative_numerically as td,
random_complex_number as randcplx, verify_numerically)
x = Symbol('x')
a = Symbol('a')
b = Symbol('b', negative=True)
z = Symbol('z')
s = Symbol('s')
def test_zeta_eval():
assert zeta(nan) is nan
assert zeta(x, nan) is nan
assert zeta(0) == Rational(-1, 2)
assert zeta(0, x) == S.Half - x
assert zeta(0, b) == S.Half - b
assert zeta(1) is zoo
assert zeta(1, 2) is zoo
assert zeta(1, -7) is zoo
assert zeta(1, x) is zoo
assert zeta(2, 1) == pi**2/6
assert zeta(2) == pi**2/6
assert zeta(4) == pi**4/90
assert zeta(6) == pi**6/945
assert zeta(2, 2) == pi**2/6 - 1
assert zeta(4, 3) == pi**4/90 - Rational(17, 16)
assert zeta(6, 4) == pi**6/945 - Rational(47449, 46656)
assert zeta(2, -2) == pi**2/6 + Rational(5, 4)
assert zeta(4, -3) == pi**4/90 + Rational(1393, 1296)
assert zeta(6, -4) == pi**6/945 + Rational(3037465, 2985984)
assert zeta(oo) == 1
assert zeta(-1) == Rational(-1, 12)
assert zeta(-2) == 0
assert zeta(-3) == Rational(1, 120)
assert zeta(-4) == 0
assert zeta(-5) == Rational(-1, 252)
assert zeta(-1, 3) == Rational(-37, 12)
assert zeta(-1, 7) == Rational(-253, 12)
assert zeta(-1, -4) == Rational(119, 12)
assert zeta(-1, -9) == Rational(539, 12)
assert zeta(-4, 3) == -17
assert zeta(-4, -8) == 8772
assert zeta(0, 1) == Rational(-1, 2)
assert zeta(0, -1) == Rational(3, 2)
assert zeta(0, 2) == Rational(-3, 2)
assert zeta(0, -2) == Rational(5, 2)
assert zeta(
3).evalf(20).epsilon_eq(Float("1.2020569031595942854", 20), 1e-19)
def test_zeta_series():
assert zeta(x, a).series(a, 0, 2) == \
zeta(x, 0) - x*a*zeta(x + 1, 0) + O(a**2)
def test_dirichlet_eta_eval():
assert dirichlet_eta(0) == S.Half
assert dirichlet_eta(-1) == Rational(1, 4)
assert dirichlet_eta(1) == log(2)
assert dirichlet_eta(2) == pi**2/12
assert dirichlet_eta(4) == pi**4*Rational(7, 720)
def test_riemann_xi_eval():
assert riemann_xi(2) == pi/6
assert riemann_xi(0) == Rational(1, 2)
assert riemann_xi(1) == Rational(1, 2)
assert riemann_xi(3).rewrite(zeta) == 3*zeta(3)/(2*pi)
assert riemann_xi(4) == pi**2/15
def test_rewriting():
assert dirichlet_eta(x).rewrite(zeta) == (1 - 2**(1 - x))*zeta(x)
assert zeta(x).rewrite(dirichlet_eta) == dirichlet_eta(x)/(1 - 2**(1 - x))
assert zeta(x).rewrite(dirichlet_eta, a=2) == zeta(x)
assert verify_numerically(dirichlet_eta(x), dirichlet_eta(x).rewrite(zeta), x)
assert verify_numerically(zeta(x), zeta(x).rewrite(dirichlet_eta), x)
assert zeta(x, a).rewrite(lerchphi) == lerchphi(1, x, a)
assert polylog(s, z).rewrite(lerchphi) == lerchphi(z, s, 1)*z
assert lerchphi(1, x, a).rewrite(zeta) == zeta(x, a)
assert z*lerchphi(z, s, 1).rewrite(polylog) == polylog(s, z)
def test_derivatives():
from sympy import Derivative
assert zeta(x, a).diff(x) == Derivative(zeta(x, a), x)
assert zeta(x, a).diff(a) == -x*zeta(x + 1, a)
assert lerchphi(
z, s, a).diff(z) == (lerchphi(z, s - 1, a) - a*lerchphi(z, s, a))/z
assert lerchphi(z, s, a).diff(a) == -s*lerchphi(z, s + 1, a)
assert polylog(s, z).diff(z) == polylog(s - 1, z)/z
b = randcplx()
c = randcplx()
assert td(zeta(b, x), x)
assert td(polylog(b, z), z)
assert td(lerchphi(c, b, x), x)
assert td(lerchphi(x, b, c), x)
raises(ArgumentIndexError, lambda: lerchphi(c, b, x).fdiff(2))
raises(ArgumentIndexError, lambda: lerchphi(c, b, x).fdiff(4))
raises(ArgumentIndexError, lambda: polylog(b, z).fdiff(1))
raises(ArgumentIndexError, lambda: polylog(b, z).fdiff(3))
def myexpand(func, target):
expanded = expand_func(func)
if target is not None:
return expanded == target
if expanded == func: # it didn't expand
return False
# check to see that the expanded and original evaluate to the same value
subs = {}
for a in func.free_symbols:
subs[a] = randcplx()
return abs(func.subs(subs).n()
- expanded.replace(exp_polar, exp).subs(subs).n()) < 1e-10
def test_polylog_expansion():
assert polylog(s, 0) == 0
assert polylog(s, 1) == zeta(s)
assert polylog(s, -1) == -dirichlet_eta(s)
assert polylog(s, exp_polar(I*pi*Rational(4, 3))) == polylog(s, exp(I*pi*Rational(4, 3)))
assert polylog(s, exp_polar(I*pi)/3) == polylog(s, exp(I*pi)/3)
assert myexpand(polylog(1, z), -log(1 - z))
assert myexpand(polylog(0, z), z/(1 - z))
assert myexpand(polylog(-1, z), z/(1 - z)**2)
assert ((1-z)**3 * expand_func(polylog(-2, z))).simplify() == z*(1 + z)
assert myexpand(polylog(-5, z), None)
def test_polylog_series():
assert polylog(1, z).series(z, n=5) == z + z**2/2 + z**3/3 + z**4/4 + O(z**5)
assert polylog(1, sqrt(z)).series(z, n=3) == z/2 + z**2/4 + sqrt(z)\
+ z**(S(3)/2)/3 + z**(S(5)/2)/5 + O(z**3)
# https://github.com/sympy/sympy/issues/9497
assert polylog(S(3)/2, -z).series(z, 0, 5) == -z + sqrt(2)*z**2/4\
- sqrt(3)*z**3/9 + z**4/8 + O(z**5)
def test_issue_8404():
i = Symbol('i', integer=True)
assert Abs(Sum(1/(3*i + 1)**2, (i, 0, S.Infinity)).doit().n(4)
- 1.122) < 0.001
def test_polylog_values():
assert polylog(2, 2) == pi**2/4 - I*pi*log(2)
assert polylog(2, S.Half) == pi**2/12 - log(2)**2/2
for z in [S.Half, 2, (sqrt(5)-1)/2, -(sqrt(5)-1)/2, -(sqrt(5)+1)/2, (3-sqrt(5))/2]:
assert Abs(polylog(2, z).evalf() - polylog(2, z, evaluate=False).evalf()) < 1e-15
z = Symbol("z")
for s in [-1, 0]:
for _ in range(10):
assert verify_numerically(polylog(s, z), polylog(s, z, evaluate=False),
z, a=-3, b=-2, c=S.Half, d=2)
assert verify_numerically(polylog(s, z), polylog(s, z, evaluate=False),
z, a=2, b=-2, c=5, d=2)
from sympy import Integral
assert polylog(0, Integral(1, (x, 0, 1))) == -S.Half
def test_lerchphi_expansion():
assert myexpand(lerchphi(1, s, a), zeta(s, a))
assert myexpand(lerchphi(z, s, 1), polylog(s, z)/z)
# direct summation
assert myexpand(lerchphi(z, -1, a), a/(1 - z) + z/(1 - z)**2)
assert myexpand(lerchphi(z, -3, a), None)
# polylog reduction
assert myexpand(lerchphi(z, s, S.Half),
2**(s - 1)*(polylog(s, sqrt(z))/sqrt(z)
- polylog(s, polar_lift(-1)*sqrt(z))/sqrt(z)))
assert myexpand(lerchphi(z, s, 2), -1/z + polylog(s, z)/z**2)
assert myexpand(lerchphi(z, s, Rational(3, 2)), None)
assert myexpand(lerchphi(z, s, Rational(7, 3)), None)
assert myexpand(lerchphi(z, s, Rational(-1, 3)), None)
assert myexpand(lerchphi(z, s, Rational(-5, 2)), None)
# hurwitz zeta reduction
assert myexpand(lerchphi(-1, s, a),
2**(-s)*zeta(s, a/2) - 2**(-s)*zeta(s, (a + 1)/2))
assert myexpand(lerchphi(I, s, a), None)
assert myexpand(lerchphi(-I, s, a), None)
assert myexpand(lerchphi(exp(I*pi*Rational(2, 5)), s, a), None)
def test_stieltjes():
assert isinstance(stieltjes(x), stieltjes)
assert isinstance(stieltjes(x, a), stieltjes)
# Zero'th constant EulerGamma
assert stieltjes(0) == S.EulerGamma
assert stieltjes(0, 1) == S.EulerGamma
# Not defined
assert stieltjes(nan) is nan
assert stieltjes(0, nan) is nan
assert stieltjes(-1) is S.ComplexInfinity
assert stieltjes(1.5) is S.ComplexInfinity
assert stieltjes(z, 0) is S.ComplexInfinity
assert stieltjes(z, -1) is S.ComplexInfinity
def test_stieltjes_evalf():
assert abs(stieltjes(0).evalf() - 0.577215664) < 1E-9
assert abs(stieltjes(0, 0.5).evalf() - 1.963510026) < 1E-9
assert abs(stieltjes(1, 2).evalf() + 0.072815845 ) < 1E-9
def test_issue_10475():
a = Symbol('a', extended_real=True)
b = Symbol('b', extended_positive=True)
s = Symbol('s', zero=False)
assert zeta(2 + I).is_finite
assert zeta(1).is_finite is False
assert zeta(x).is_finite is None
assert zeta(x + I).is_finite is None
assert zeta(a).is_finite is None
assert zeta(b).is_finite is None
assert zeta(-b).is_finite is True
assert zeta(b**2 - 2*b + 1).is_finite is None
assert zeta(a + I).is_finite is True
assert zeta(b + 1).is_finite is True
assert zeta(s + 1).is_finite is True
def test_issue_14177():
n = Symbol('n', positive=True, integer=True)
assert zeta(2*n) == (-1)**(n + 1)*2**(2*n - 1)*pi**(2*n)*bernoulli(2*n)/factorial(2*n)
assert zeta(-n) == (-1)**(-n)*bernoulli(n + 1)/(n + 1)
n = Symbol('n')
assert zeta(2*n) == zeta(2*n) # As sign of z (= 2*n) is not determined
| 34.830189 | 93 | 0.594583 |
ace60013ba2055b59033c8aa220c04af1a24d6c1 | 11,955 | py | Python | NetSurfP/plotcomparaison_netsurfP.py | najmacherrad/master_thesis | 4a5c68d6dddb98548ff93105a330e21148a1fa8d | [
"MIT"
] | 1 | 2019-01-18T02:01:59.000Z | 2019-01-18T02:01:59.000Z | NetSurfP/plotcomparaison_netsurfP.py | najmacherrad/master_thesis | 4a5c68d6dddb98548ff93105a330e21148a1fa8d | [
"MIT"
] | null | null | null | NetSurfP/plotcomparaison_netsurfP.py | najmacherrad/master_thesis | 4a5c68d6dddb98548ff93105a330e21148a1fa8d | [
"MIT"
] | null | null | null | # NetSurfP
#Compare results between wild type and mutant
# coding=utf-8
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import csv
from scipy import stats
from pylab import plot, show, savefig, xlim, figure, \
hold, ylim, legend, boxplot, setp, axes
def getColumn(filename, column,deli):
results = csv.reader(open(filename), delimiter=deli)
return [result[column] for result in results]
#Import files
file_wt = 'netsurfpresults_wt.csv'
file_mut = 'netsurfpresults_mut.csv'
#-----------------------------------------------------------------------------
# RSA
#-----------------------------------------------------------------------------
#----------------
# SCATTER PLOT
RSA_wt = getColumn(file_wt,4,'\t')
RSA_mut = getColumn(file_mut,4,'\t')
RSA_wt.pop(0)
RSA_mut.pop(0)
x,y=[],[]
for i in range(0,len(RSA_wt)):
if RSA_wt[i]=='NA':
x.append(np.nan)
else:
x.append(float(RSA_wt[i]))
for i in range(0,len(RSA_mut)):
if RSA_mut[i]=='NA':
y.append(np.nan)
else:
y.append(float(RSA_mut[i]))
fig = plt.figure()
a=b=[0,0.2,0.3,0.4,0.5,0.6,0.9]
plt.scatter(x, y,edgecolor = 'none', c= 'k')
plt.plot(a,b,'r-')
plt.grid('on')
plt.xlim(0,0.9)
plt.ylim(0,0.9)
plt.xlabel('Wild types')
plt.ylabel('Deleterious DIDA mutants')
fig.savefig('RSA_wtVSmut.jpg')
#----------------
# PROBABILITY DENSITY CURVE
fig = figure()
mu1, std1 = stats.norm.fit(x)
mu2, std2 = stats.norm.fit(y)
xmin1, xmax1 = plt.xlim()
xmin2, xmax2 = plt.xlim()
x1 = np.linspace(xmin1, xmax1, 100)
x2 = np.linspace(xmin2, xmax2, 100)
p1 = stats.norm.pdf(x1, mu1, std1)
p2 = stats.norm.pdf(x2, mu2, std2)
plt.plot(x1, p1, 'k',label='Wild types (fit results: mu=%.2f,std=%.2f)'%(mu1, std1))
plt.plot(x2, p2, 'r',label='Deleterious DIDA mutants \n(fit results: mu=%.2f,std=%.2f)'%(mu2, std2))
plt.xlabel('Solvent accessibility predicted values')
plt.ylabel('Frequency')
plt.xlim(0,0.9)
plt.ylim(0,4)
plt.legend(loc='upper right')
fig.savefig('histo_netsurfp_missense_wtVSmut.png')
# STATS
miss=[]
[miss.append(x - y) for x, y in zip(x, y)]
#KOLMOGOROV-SMINORV TEST:
stats.kstest(miss,'norm') # (D,pvalue) = (0.44913569824019062, 0.0)
#So we reject H0 -> not normal distribution
#WILCOXON TEST:
stats.wilcoxon(miss) # (T, pvalue) = (10720.0, 0.01473848472842257)
#So we reject H0 -> There is a significant difference between wt and mut
#-----------------------------------------------------------------------------
# RSA ENVIRONNEMENT
#-----------------------------------------------------------------------------
#-----------------
# SCATTER PLOT
RSA_wt = getColumn(file_wt,5,'\t')
RSA_mut = getColumn(file_mut,5,'\t')
RSA_wt.pop(0)
RSA_mut.pop(0)
x,y=[],[]
for i in range(0,len(RSA_wt)):
if RSA_wt[i]=='NA':
x.append(np.nan)
else:
x.append(float(RSA_wt[i]))
for i in range(0,len(RSA_mut)):
if RSA_mut[i]=='NA':
y.append(np.nan)
else:
y.append(float(RSA_mut[i]))
fig = plt.figure()
a=b=[0,0.2,0.3,0.4,0.5,0.6,0.9]
plt.scatter(x, y,edgecolor = 'none', c= 'k')
plt.plot(a,b,'r-')
plt.grid('on')
plt.xlim(0,0.9)
plt.ylim(0,0.9)
plt.xlabel('Wild types')
plt.ylabel('Deleterious DIDA mutants')
fig.savefig('RSA_envt_wtVSmut.jpg')
#----------------
# PROBABILITY DENSITY CURVE
fig = figure()
mu1, std1 = stats.norm.fit(x)
mu2, std2 = stats.norm.fit(y)
bins = np.linspace(0, 99, 30)
xmin1, xmax1 = plt.xlim()
xmin2, xmax2 = plt.xlim()
x1 = np.linspace(xmin1, xmax1, 100)
x2 = np.linspace(xmin2, xmax2, 100)
p1 = stats.norm.pdf(x1, mu1, std1) #Probability density function
p2 = stats.norm.pdf(x2, mu2, std2)
plt.plot(x1, p1, 'k',label='Wild types (fit results: mu=%.2f,std=%.2f)'%(mu1, std1))
plt.plot(x2, p2, 'r',label='Deleterious DIDA mutants \n(fit results: mu=%.2f,std=%.2f)'%(mu2, std2))
plt.xlabel('Solvent accessibility predicted values')
plt.ylabel('Frequency')
plt.xlim(0,0.9)
plt.ylim(0,5)
plt.legend(loc='upper right')
fig.savefig('histo_netsurfp_missense_envt_wtVSmut.png')
# STATS
miss=[]
[miss.append(a_i - b_i) for a_i, b_i in zip(x, y)]
#KOLMOGOROV-SMINORV:
stats.kstest(miss,'norm') #(D,pvalue) = (0.47876635892857411, 0.0)
#So we reject H0 -> not normal distribution
#WILCOXON TEST:
stats.wilcoxon(miss) #-> (T, pvalue) = (13107.5, 0.17394709845400314)
#So we do not reject H0 -> There is no significant difference between wt and mut
#-----------------------------------------------------------------------------
# OUTLIERS FOR RSA (12)
#-----------------------------------------------------------------------------
RSA_wt = getColumn(file_wt,4,'\t')
RSA_mut = getColumn(file_mut,4,'\t')
RSA_wt.pop(0)
RSA_mut.pop(0)
RSAe_wt = getColumn(file_wt,5,'\t')
RSAe_mut = getColumn(file_mut,5,'\t')
RSAe_wt.pop(0)
RSAe_mut.pop(0)
variant_liste = getColumn(file_wt,0,'\t')
output = open('netsurfp_outliers.csv','w')
output.write('ID,RSA_wt,RSA_mut,difference,RSA_envt_wt,RSA_envt_mut,difference_envt\n')
for i in range(0,len(RSA_wt)):
for j in range(0,len(RSA_mut)):
if i==j:
if RSA_wt[i]!='NA'and RSA_mut[j]!='NA':
if (abs(float(RSA_wt[i])-float(RSA_mut[j]))) > 0.1:
output.write(variant_liste[i+1] + ',' + RSA_wt[i] + ',' + RSA_mut[j] + ',' + str(abs(float(RSA_wt[i])-float(RSA_mut[j]))) + ',' + RSAe_wt[i] + ',' + RSAe_mut[i] + ',' + str(abs(float(RSAe_wt[i])-float(RSAe_mut[j]))) + '\n')
output.close()
#-----------------------------------------------------------------------------
# RSA depending on Z-score
#-----------------------------------------------------------------------------
#-----------------
# SCATTER PLOT
Zscore_wt = getColumn(file_wt,6,'\t')
Zscore_mut = getColumn(file_mut,6,'\t')
Zscore_wt.pop(0)
Zscore_mut.pop(0)
RSA_wt = getColumn(file_wt,4,'\t')
RSA_mut = getColumn(file_mut,4,'\t')
RSA_wt.pop(0)
RSA_mut.pop(0)
ID = getColumn(file_wt,0,'\t')
ID.pop(0)
x_pos,x_neg,y_pos,y_neg=[],[],[],[]
IDwt_pos,IDwt_neg = [],[]
for i in range(0,len(RSA_wt)):
if float(Zscore_wt[i])>=0:
x_pos.append(float(RSA_wt[i]))
IDwt_pos.append(ID[i])
else:
x_neg.append(float(RSA_wt[i]))
IDwt_neg.append(ID[i])
IDmut_pos,IDmut_neg = [],[]
for i in range(0,len(RSA_mut)):
if ID[i] in IDwt_pos:
y_pos.append(float(RSA_mut[i]))
IDmut_pos.append(ID[i])
else:
y_neg.append(float(RSA_mut[i]))
IDmut_neg.append(ID[i])
# Z-score > 0 for wild types
fig = plt.figure()
a=b=[0,0,0.9]
plt.scatter(x_pos, y_pos,edgecolor = 'none', c= 'k')
plt.plot(a,b,'r-')
plt.grid('on')
plt.xlim(0,0.9)
plt.ylim(0,0.9)
plt.xlabel('Wild types')
plt.ylabel('Deleterious DIDA mutants')
fig.savefig('RSA_wtVSmut_zscore_pos.jpg')
#outliers (4)
output = open('netsurfp_outliers_zscore_pos.csv','w')
output.write('ID,RSA_wt,RSA_mut,difference\n')
for i in range(0,len(x_pos)):
for j in range(0,len(y_pos)):
if i==j:
if (abs(float(x_pos[i])-float(y_pos[j]))) > 0.1:
output.write(IDwt_pos[i] + ',' + str(x_pos[i]) + ',' + str(y_pos[j]) + ',' + str(abs(float(x_pos[i])-float(y_pos[j]))) + '\n')
output.close()
# Z-score < 0 fot wild types
fig = plt.figure()
a=b=[0,0,0.9]
plt.scatter(x_neg, y_neg,edgecolor = 'none', c= 'k')
plt.plot(a,b,'r-')
plt.grid('on')
plt.xlim(0,0.9)
plt.ylim(0,0.9)
plt.xlabel('Wild types')
plt.ylabel('Deleterious DIDA mutants')
fig.savefig('RSA_wtVSmut_zscore_neg.jpg')
#-----------------------------------------------------------------------------
# SECONDARY STRUCTURE
#-----------------------------------------------------------------------------
#Proba_a_helix
pa_wt = getColumn(file_wt,9,'\t')
pa_mut = getColumn(file_mut,9,'\t')
pa_wt.pop(0)
pa_mut.pop(0)
xa,ya=[],[]
for i in range(0,len(pa_wt)):
if pa_wt[i]=='NA':
xa.append(np.nan)
else:
xa.append(pa_wt[i])
for i in range(0,len(pa_mut)):
if pa_mut[i]=='NA':
ya.append(np.nan)
else:
ya.append(pa_mut[i])
fig = plt.figure()
a=b=[0,1]
plt.scatter(xa, ya,edgecolor = 'none', c= 'k')
plt.plot(a,b,'r-')
plt.grid('on')
plt.xlim(0,1)
plt.ylim(0,1)
plt.xlabel('Wild types')
plt.ylabel('Deleterious DIDA mutants')
fig.savefig('Proba_a_helix_wtVSmut.jpg')
#---------------------------
#Proba_b_strand
pb_wt = getColumn(file_wt,10,'\t')
pb_mut = getColumn(file_mut,10,'\t')
pb_wt.pop(0)
pb_mut.pop(0)
xb,yb=[],[]
for i in range(0,len(pb_wt)):
if pb_wt[i]=='NA':
xb.append(np.nan)
else:
xb.append(pb_wt[i])
for i in range(0,len(pb_mut)):
if pb_mut[i]=='NA':
yb.append(np.nan)
else:
yb.append(pb_mut[i])
fig = plt.figure()
a=b=[0,1]
plt.scatter(xb, yb,edgecolor = 'none', c= 'k')
plt.plot(a,b,'r-')
plt.grid('on')
plt.xlim(0,1)
plt.ylim(0,1)
plt.xlabel('Wild types')
plt.ylabel('Deleterious DIDA mutants')
fig.savefig('Proba_b_strand_wtVSmut.jpg')
#-----------------------------
#Proba_coil
pc_wt = getColumn(file_wt,11,'\t')
pc_mut = getColumn(file_mut,11,'\t')
pc_wt.pop(0)
pc_mut.pop(0)
xc,yc=[],[]
for i in range(0,len(pc_wt)):
if pc_wt[i]=='NA':
xc.append(np.nan)
else:
xc.append(pc_wt[i])
for i in range(0,len(pc_mut)):
if pc_mut[i]=='NA':
yc.append(np.nan)
else:
yc.append(pc_mut[i])
fig = plt.figure()
a=b=[0,1]
plt.scatter(xc, yc,edgecolor = 'none', c= 'k')
plt.plot(a,b,'r-')
plt.grid('on')
plt.xlim(0,1)
plt.ylim(0,1)
plt.xlabel('Wild types')
plt.ylabel('Deleterious DIDA mutants')
fig.savefig('Proba_coil_wtVSmut.jpg')
#-----------------------------
# BAR PLOTS
# Change probability by letter A (alpha-helix), B (beta-strand) or C (coil)
struct_wt = []
for i in range(0,len(xa)):
proba = max(xa[i],xb[i],xc[i])
if proba == xa[i]:
struct_wt.append('A')
elif proba == xb[i]:
struct_wt.append('B')
elif proba == xc[i]:
struct_wt.append('C')
struct_mut = []
for i in range(0,len(ya)):
proba = max(ya[i],yb[i],yc[i])
if proba == ya[i]:
struct_mut.append('A')
elif proba == yb[i]:
struct_mut.append('B')
elif proba == yc[i]:
struct_mut.append('C')
# difference
df1 = pd.read_csv(file_wt,'\t')
df1['struct_wt']=struct_wt
df2 = pd.read_csv(file_mut,'\t')
df2['struct_mut']=struct_mut
df3 = pd.concat([df1['ID'],df1['Gene_name'],df1['Variant_effect'],df1['struct_wt'],
df2['struct_mut']], axis=1)
df1List = df3['struct_wt'].tolist()
df2List = df3['struct_mut'].tolist()
newlist=[]
for i in range(0,len(df1List)):
for j in range(0,len(df2List)):
if i==j:
if df1List[i]!=df2List[i]:
newlist.append(str(df1List[i])+' to '+ str(df2List[i]))
else:
newlist.append('0')
df3['difference']=''
df3['difference'] = newlist
df3 = df3[df3.difference != '0']
df3.to_csv('netsurfp_diff_struct.csv', index=False)
dfdiff = df3.groupby(['difference']).count()
dfdiff = dfdiff.drop(['Gene_name','Variant_effect','struct_wt','struct_mut'], 1)
dfdiff.plot(kind='bar',legend=False,color='k')
plt.ylabel('Number of variants')
plt.savefig('barplotdiff_struct_netsurfp.png')
# Comparison wild types VS Deleterious DIDA mutants
N = 3
ind = np.arange(N) # the x locations for the groups
width = 0.30 # the width of the bars
fig, ax = plt.subplots()
wt = (struct_wt.count('A'),struct_wt.count('B'),struct_wt.count('C'))
rects1 = ax.bar(ind, wt, width, color='grey')
mut = (struct_mut.count('A'),struct_mut.count('B'),struct_mut.count('C'))
rects2 = ax.bar(ind + width, mut, width, color='r')
ax.set_ylabel('Number of variants')
ax.set_xticks(ind + width)
ax.set_xticklabels(('A', 'B','C'))
ax.set_xlabel('Secondary structure prediction')
ax.legend((rects1[0], rects2[0]), ('Wild types', 'Deleterious DIDA mutants'),loc='upper center')
fig.savefig('barplot_netsurfp_struct_missense.png')
stats.chi2_contingency(np.column_stack((wt,mut)))
#(0.70700945913170021, 0.70222267137184713, 2, array([[ 108.5, 108.5],[ 38. , 38. ],[ 94.5, 94.5]]))
| 29.087591 | 243 | 0.589795 |
ace600c146cfe01010fd0c44816040c2106b1d82 | 2,598 | py | Python | posCouche/formeTS.py | jhillairet/posCouche | e64f46a4d7998a5f008923760f3442a64e881c66 | [
"CECILL-B"
] | 1 | 2021-05-29T16:02:01.000Z | 2021-05-29T16:02:01.000Z | posCouche/formeTS.py | jhillairet/posCouche | e64f46a4d7998a5f008923760f3442a64e881c66 | [
"CECILL-B"
] | null | null | null | posCouche/formeTS.py | jhillairet/posCouche | e64f46a4d7998a5f008923760f3442a64e881c66 | [
"CECILL-B"
] | 1 | 2017-10-05T12:48:09.000Z | 2017-10-05T12:48:09.000Z | # -*- coding: utf-8 -*-
"""
@author: J.Hillairet
"""
import pywed as pw
import numpy as np
import os
def vacuum_vessel(shot):
"""
Get the coordinates of the Tore Supra / WEST vacuum vessel
R_wall, Z_wall = vacuum_vessel(shot)
Arguments:
- shot: Tore Supra or WEST shot number
Returns:
- R_wall: radius of the vacuum chamber walls [m]
- Z_wall: height of the vacuum chamber walls [m]
TODO: once WEST will have started, get the final vacuum vessel coordinates
"""
if (shot <= 0) or (not isinstance(shot, int)):
raise ValueError('Shot number should be a positive integer')
elif shot < 50000: # Tore Supra vacuum chamber profile
wall = pw.tsmat(shot, 'APOLO;+Parametres;Paroi')
R_wall = wall[:,0]
Z_wall = wall[:,1]
else: # WEST vacuum chamber profile
# get the absolute path of the filename, in order to work even if launched from other dir
filename = os.path.dirname(__file__) + os.sep + 'WEST_vacuum_vessel.txt'
R_wall, Z_wall = np.loadtxt(filename, skiprows=1, unpack=True)
return R_wall, Z_wall
def LCFS(shot):
"""
Get the coordinates of the LCFS as a function of time.
R_ext, Z_ext, t = LCFS(shot)
Arguments:
shot: Tore Supra or WEST shot number
Returns:
R_ext: radius of LCFS [m]
Z_ext: height of LCFS [m]
t: time [s]
"""
if (shot <= 0) or (not isinstance(shot, int)):
raise ValueError('Shot number should be a positive integer')
if shot < 28540:
raise ValueError('Shot number should be larger than 28540')
# small radius vs time
y, t = pw.tsbase(shot, 'GRHO', nargout=2)
t = t[:,0]
# poloidal profile (assumed circular)
theta = np.arange(0, 24*15, 15) * np.pi/180
R0 = 2.42
R_ext = R0 + y*np.cos(theta)
Z_ext = y*np.sin(theta)
# trick to have a full profile
R_ext = np.column_stack((R_ext, R_ext[:,0]))
Z_ext = np.column_stack((Z_ext, Z_ext[:,0]))
return R_ext, Z_ext, t
# Below a test code which is run only if this file is executed directly
if __name__ == '__main__':
import matplotlib.pyplot as plt
R_wall, Z_wall = vacuum_vessel(50001)
R_ext, Z_ext, t = LCFS(47979)
fig, ax = plt.subplots(1,1)
ax.plot(R_wall, Z_wall, 'k', lw=2)
ax.axis('equal')
# plasma profile at the middle of the shot
R_e = R_ext[int(len(R_ext)/2)]
Z_e = Z_ext[int(len(R_ext)/2)]
ax.plot(R_e, Z_e, 'b')
| 28.549451 | 97 | 0.600847 |
ace600c4287214e8536406296694832e6a340978 | 36 | py | Python | hello.py | csagar131/automate-stuffs-py | 903c888a5187a9887dea09520781cd3f7743b423 | [
"BSL-1.0"
] | null | null | null | hello.py | csagar131/automate-stuffs-py | 903c888a5187a9887dea09520781cd3f7743b423 | [
"BSL-1.0"
] | null | null | null | hello.py | csagar131/automate-stuffs-py | 903c888a5187a9887dea09520781cd3f7743b423 | [
"BSL-1.0"
] | null | null | null | #! python
print('hello laptop')
| 9 | 22 | 0.611111 |
ace6020bd5e3e538255c6f4b6e2bff9e2caf3595 | 422 | py | Python | spikeforest/spikeforest/spikewidgets/example_datasets/real.py | mhhennig/spikeforest | 5b4507ead724af3de0be5d48a3b23aaedb0be170 | [
"Apache-2.0"
] | 1 | 2021-09-23T01:07:19.000Z | 2021-09-23T01:07:19.000Z | spikeforest/spikeforest/spikewidgets/example_datasets/real.py | mhhennig/spikeforest | 5b4507ead724af3de0be5d48a3b23aaedb0be170 | [
"Apache-2.0"
] | null | null | null | spikeforest/spikeforest/spikewidgets/example_datasets/real.py | mhhennig/spikeforest | 5b4507ead724af3de0be5d48a3b23aaedb0be170 | [
"Apache-2.0"
] | 1 | 2021-09-23T01:07:21.000Z | 2021-09-23T01:07:21.000Z | from spikeforest import SFMdaRecordingExtractor
import numpy as np
def real(name='franklab_tetrode', download=True):
if name == 'franklab_tetrode':
dsdir = 'kbucket://b5ecdf1474c5/datasets/neuron_paper/franklab_tetrode'
IX = SFMdaRecordingExtractor(dataset_directory=dsdir, download=download)
return (IX, None)
else:
raise Exception('Unrecognized name for real dataset: ' + name)
| 35.166667 | 80 | 0.727488 |
ace602822976ae8c4417245434acbb1ea50a0324 | 527 | py | Python | examples/mnist/train_dist.py | TARTRL/TLaunch | 198dada129f2143b6f626a50b82d45575f4c1115 | [
"Apache-2.0"
] | 18 | 2021-12-19T09:43:17.000Z | 2021-12-30T06:09:03.000Z | examples/mnist/train_dist.py | TARTRL/TLaunch | 198dada129f2143b6f626a50b82d45575f4c1115 | [
"Apache-2.0"
] | null | null | null | examples/mnist/train_dist.py | TARTRL/TLaunch | 198dada129f2143b6f626a50b82d45575f4c1115 | [
"Apache-2.0"
] | 1 | 2022-01-14T06:20:20.000Z | 2022-01-14T06:20:20.000Z | import sys
from tlaunch import lp_ssh
from base_trainer import MultiGPUTrainer
def main(argv):
program = lp_ssh.Program('mnist_distributed')
for host_index, host in enumerate(['host1','host2','host3','host4']):
ssh_node = lp_ssh.SSHNode(MultiGPUTrainer, argv, host_index).to_host(host)
program.add_node(ssh_node, label=host + '_MultiGPUTrainer')
lp_ssh.launch(program, terminal='ssh_tmux_session')
if __name__ == '__main__':
from absl import flags
FLAGS = flags.FLAGS
FLAGS([""])
main(sys.argv[1:])
| 25.095238 | 78 | 0.732448 |
ace60557c52b2c811296ae1365edbb9d61e8e732 | 2,249 | py | Python | macs.py | shell909090/pyoui | d71e5e9eaed2c62710c02dceb180ded423918877 | [
"BSD-3-Clause"
] | 2 | 2019-04-10T00:03:53.000Z | 2019-04-10T16:10:29.000Z | macs.py | shell909090/pyoui | d71e5e9eaed2c62710c02dceb180ded423918877 | [
"BSD-3-Clause"
] | null | null | null | macs.py | shell909090/pyoui | d71e5e9eaed2c62710c02dceb180ded423918877 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
@date: 2019-04-09
@author: Shell.Xu
@copyright: 2019, Shell.Xu <shell909090@gmail.com>
@license: BSD-3-clause
'''
from __future__ import absolute_import, division,\
print_function, unicode_literals
import sys
import gzip
import base64
import getopt
import binascii
from os import path
OUI_PATH = path.join(
path.dirname(path.abspath(__file__)),
'ouis.gz')
def base64mac(mac):
return base64.b64encode(binascii.a2b_hex(mac[:6])).decode('latin')+mac[6:]
def compress():
import re
re_entry = re.compile('{(?P<mac>.*)}{(?P<name>.*)}')
rslt = {}
for line in sys.stdin:
m = re_entry.match(line.strip())
if not m:
raise Exception()
mac, name = m.groups()
name = name.strip('." \t')
rslt.setdefault(name, []).append(base64mac(mac))
for name, macs in sorted(rslt.items(), key=lambda x: x[0]):
print('|'.join([name,]+macs))
def lookup(mac):
bmac = base64mac(mac.upper().replace(':', '').replace('-', ''))
with gzip.open(OUI_PATH, mode='rt', encoding='utf-8') as fi:
for line in fi:
name, *macs = line.strip().split('|')
for m in macs:
if bmac.startswith(m):
return name
return 'not found'
class MacIndex(object):
def __init__(self):
self.idx = {}
with gzip.open(OUI_PATH, mode='rt', encoding='utf-8') as fi:
for line in fi:
name, *macs = line.strip().split('|')
for m in macs:
self.idx.setdefault(m[:4], []).append((name, m[4:]))
def __getitem__(self, mac):
bmac = base64mac(mac.upper().replace(':', '').replace('-', ''))
idx, bmac = bmac[:4], bmac[4:]
l = self.idx.get(idx)
if not l:
return 'not found'
for name, off in l:
if bmac.startswith(off):
return name
def main():
optlist, args = getopt.getopt(sys.argv[1:], 'ch')
optdict = dict(optlist)
if '-h' in optdict:
print(main.__doc__)
return
if '-c' in optdict:
return compress()
for m in args:
print(lookup(m))
if __name__ == '__main__':
main()
| 25.269663 | 78 | 0.552245 |
ace6055aa6b589b4cb0439e022e237fc508d62c5 | 261 | py | Python | apps/methodologies/admin.py | l3l3l/vulnman | f9d520dbf9f6838c459741bb1173a0382d5ecfd6 | [
"MIT"
] | null | null | null | apps/methodologies/admin.py | l3l3l/vulnman | f9d520dbf9f6838c459741bb1173a0382d5ecfd6 | [
"MIT"
] | null | null | null | apps/methodologies/admin.py | l3l3l/vulnman | f9d520dbf9f6838c459741bb1173a0382d5ecfd6 | [
"MIT"
] | null | null | null | from django.contrib import admin
from apps.methodologies import models
# Register your models here.
admin.site.register(models.Methodology)
admin.site.register(models.Task)
admin.site.register(models.ProjectMethodology)
admin.site.register(models.ProjectTask)
| 29 | 46 | 0.83908 |
ace6078a7f81e7794c66e2173cc4924b2af3602f | 3,827 | py | Python | src/test-apps/happy/tests/standalone/wdmNext/test_weave_wdm_next_mutual_subscribe_55.py | Shumaro/openweave-core | 0f1388d2d887208d59c3409bd73d5939727c8bda | [
"Apache-2.0"
] | null | null | null | src/test-apps/happy/tests/standalone/wdmNext/test_weave_wdm_next_mutual_subscribe_55.py | Shumaro/openweave-core | 0f1388d2d887208d59c3409bd73d5939727c8bda | [
"Apache-2.0"
] | null | null | null | src/test-apps/happy/tests/standalone/wdmNext/test_weave_wdm_next_mutual_subscribe_55.py | Shumaro/openweave-core | 0f1388d2d887208d59c3409bd73d5939727c8bda | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2016-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Calls Weave WDM mutual subscribe between nodes.
# I12: Mutual Subscribe: Initiator Continuous Events. Publisher in initiator aborts
# M36: Stress Mutual Subscribe: Initiator Continuous Events. Publisher in initiator aborts
#
import unittest
import set_test_path
from weave_wdm_next_test_base import weave_wdm_next_test_base
import WeaveUtilities
class test_weave_wdm_next_mutual_subscribe_55(weave_wdm_next_test_base):
def test_weave_wdm_next_mutual_subscribe_55(self):
wdm_next_args = {}
wdm_next_args['wdm_option'] = "mutual_subscribe"
wdm_next_args['total_client_count'] = 4
wdm_next_args['final_client_status'] = 3
wdm_next_args['timer_client_period'] = 15000
wdm_next_args['test_client_iterations'] = 5
wdm_next_args['test_client_delay'] = 35000
wdm_next_args['enable_client_flip'] = 0
wdm_next_args['total_server_count'] = 0
wdm_next_args['final_server_status'] = 4
wdm_next_args['timer_server_period'] = 0
wdm_next_args['enable_server_flip'] = 0
wdm_next_args['client_event_generator'] = 'Security'
wdm_next_args['client_inter_event_period'] = 2000
wdm_next_args['client_log_check'] = [('Client\[0\] \[(ALIVE|CONFM)\] bound mutual subscription is going away', wdm_next_args['test_client_iterations']),
('Handler\[0\] \[ALIVE\] AbortSubscription Ref\(\d+\)', wdm_next_args['test_client_iterations']),
('Client->kEvent_OnNotificationProcessed', 1),
('Client\[0\] moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations']),
('Handler\[0\] Moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations'])]
wdm_next_args['server_log_check'] = [('TimerEventHandler Ref\(\d+\) Timeout', wdm_next_args['test_client_iterations']),
('bound mutual subscription is going away', wdm_next_args['test_client_iterations']),
('Client\[0\] \[(ALIVE|CONFM)\] HandleSubscriptionTerminated', wdm_next_args['test_client_iterations']),
('Client\[0\] moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations']),
('Handler\[0\] Moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations'])]
wdm_next_args['test_tag'] = self.__class__.__name__[19:].upper()
wdm_next_args['test_case_name'] = ['I12: Mutual Subscribe: Initiator Continuous Events. Publisher in initiator aborts',
'M36: Stress Mutual Subscribe: Initiator Continuous Events. Publisher in initiator aborts']
print 'test file: ' + self.__class__.__name__
print "weave-wdm-next test I12 and M36"
super(test_weave_wdm_next_mutual_subscribe_55, self).weave_wdm_next_test_base(wdm_next_args)
if __name__ == "__main__":
WeaveUtilities.run_unittest()
| 48.443038 | 160 | 0.645675 |
ace6083b7d76c33f5ac66b1f72c3dafdc936fea8 | 1,845 | py | Python | setup.py | bmello4688/lean-cli | 20024db4d56ebb9ad8a149d1120baa94f96bfe0a | [
"Apache-2.0"
] | null | null | null | setup.py | bmello4688/lean-cli | 20024db4d56ebb9ad8a149d1120baa94f96bfe0a | [
"Apache-2.0"
] | null | null | null | setup.py | bmello4688/lean-cli | 20024db4d56ebb9ad8a149d1120baa94f96bfe0a | [
"Apache-2.0"
] | null | null | null | import os
import re
from setuptools import find_packages, setup
current_dir = os.path.abspath(os.path.dirname(__file__))
def read(relative_path: str) -> str:
with open(os.path.join(current_dir, relative_path)) as file:
return file.read()
def get_version() -> str:
version_file = read("lean/__init__.py")
version_match = re.search(r"^__version__ = \"([^\"]+)\"", version_file, re.M)
return version_match.group(1)
# Production dependencies
install_requires = [
"click~=7.1.2",
"requests~=2.25.1",
"json5~=0.9.5",
"docker~=4.4.1",
"rich~=9.10.0",
"dependency-injector~=4.20.1",
"pydantic~=1.7.3",
"python-dateutil~=2.8.1",
"setuptools",
"quantconnect-stubs"
]
setup(
name="lean",
version=get_version(),
description="A CLI aimed at making it easier to run QuantConnect's LEAN engine locally and in the cloud",
author="QuantConnect",
author_email="support@quantconnect.com",
url="https://github.com/QuantConnect/lean-cli",
long_description=read("README.md"),
long_description_content_type="text/markdown",
packages=find_packages(include=["lean", "lean.*"]),
package_data={
"lean": ["ssh/*"]
},
entry_points={
"console_scripts": ["lean=lean.main:main"]
},
install_requires=install_requires,
python_requires=">= 3.6",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Financial and Insurance Industry",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9"
]
)
| 28.828125 | 109 | 0.63252 |
ace60859ea0747fed3cc9b8dce25ac26bff20620 | 6,522 | py | Python | soccer/gameplay/evaluation/shooting.py | bopas2/robocup-software | 2b926983e745e109db3630a8c919cb1d4651891d | [
"Apache-2.0"
] | null | null | null | soccer/gameplay/evaluation/shooting.py | bopas2/robocup-software | 2b926983e745e109db3630a8c919cb1d4651891d | [
"Apache-2.0"
] | null | null | null | soccer/gameplay/evaluation/shooting.py | bopas2/robocup-software | 2b926983e745e109db3630a8c919cb1d4651891d | [
"Apache-2.0"
] | null | null | null | import constants
import robocup
import main
## Find the chance of a shot succeeding by looking at pass distance and what robots are in the way
# The total goal angle as well as the percent covered is taken into account
# @param from_point The Point the shot is coming from
# @param excluded_robots A list of robots that shouldn't be counted as obstacles to this shot
# @return a value from zero to one that estimates the probability of the shot succeeding
def eval_shot(from_point, excluded_robots=[]):
kick_eval = robocup.KickEvaluator(main.system_state())
for r in excluded_robots:
kick_eval.add_excluded_robot(r)
point, chance = kick_eval.eval_pt_to_opp_goal(from_point)
return chance
## Shoot through a formation of enemy robots at a target
#
# @param target_pos: the target to shoot at
# @param max max_shooting_angle: The largest angle we will search to find a gap
# @param robot_offset: Max angle offset from an enemy robot we will shoot
# @return a point
def find_gap(target_pos=constants.Field.TheirGoalSegment.center(), max_shooting_angle=60, robot_offset=8, dist_from_point=.75):
if (not main.ball().valid):
return target_pos
# Find the hole in the defenders to kick at
# The limit is 20 cm so any point past it should be defenders right there
win_eval = robocup.WindowEvaluator(main.system_state())
# 500 cm min circle distance plus the robot width
test_distance = dist_from_point + constants.Robot.Radius
# +- max offset to dodge ball
max_angle = max_shooting_angle * constants.DegreesToRadians
# How much left and right of a robot to give
# Dont make this too big or it will always go far to the right or left of the robots
robot_angle_offset = robot_offset * constants.DegreesToRadians
zero_point = robocup.Point(0, 0)
# Limit the angle so as we get closer, we dont miss the goal completely as much
goal_vector = target_pos - main.ball().pos
max_length_vector = robocup.Point(constants.Field.Length, constants.Field.Width)
goal_limit = (goal_vector.mag() / max_length_vector.mag()) * max_angle
# Limit on one side so we dont directly kick out of bounds
# Add in the angle from the sideline to the target
field_limit = (1 - abs(main.ball().pos.x) / (constants.Field.Width / 2)) * max_angle
field_limit = field_limit + goal_vector.angle_between(robocup.Point(0, 1))
# Limit the angle based on the opponent robots to try and always minimize the
left_robot_limit = 0
right_robot_limit = 0
for robot in main.their_robots():
ball_to_bot = robot.pos - main.ball().pos
# Add an extra radius as wiggle room
# kick eval already deals with the wiggle room so it isn't needed there
if (ball_to_bot.mag() <= test_distance + constants.Robot.Radius):
angle = goal_vector.angle_between(ball_to_bot)
# Try and rotate onto the goal vector
# if we actually do, then the robot is to the right of the ball vector
ball_to_bot.rotate(zero_point, angle)
if (ball_to_bot.angle_between(goal_vector) < 0.01):
right_robot_limit = max(right_robot_limit, angle + robot_angle_offset)
else:
left_robot_limit = max(left_robot_limit, angle + robot_angle_offset)
else:
win_eval.add_excluded_robot(robot)
# Angle limit on each side of the bot->goal vector
left_angle = max_angle
right_angle = max_angle
# Make sure we limit the correct side due to the field
if main.ball().pos.x < 0:
left_angle = min(left_angle, field_limit)
else:
right_angle = min(right_angle, field_limit)
# Limit due to goal
left_angle = min(left_angle, goal_limit)
right_angle = min(right_angle, goal_limit)
# Limit to just over the robots
if (left_robot_limit is not 0):
left_angle = min(left_angle, left_robot_limit)
if (right_robot_limit is not 0):
right_angle = min(right_angle, right_robot_limit)
# Get the angle that we need to rotate the target angle behind the defenders
# since kick eval doesn't support a nonsymmetric angle around a target
rotate_target_angle = (left_angle + -right_angle)/2
target_width = (left_angle + right_angle)
target_point = goal_vector.normalized() * test_distance
target_point.rotate(zero_point, rotate_target_angle)
windows, window = win_eval.eval_pt_to_pt(main.ball().pos, target_point + main.ball().pos, target_width)
# Test draw points
target_point.rotate(zero_point, target_width/2)
p1 = target_point + main.ball().pos
target_point.rotate(zero_point, -target_width)
p2 = target_point + main.ball().pos
p3 = main.ball().pos
main.system_state().draw_polygon([p1, p2, p3], (0, 0, 255), "Free Kick search zone")
is_opponent_blocking = False
for robot in main.their_robots():
if (goal_vector.dist_to(robot.pos) < constants.Robot.Radius and
(main.ball().pos - robot.pos).mag() < test_distance):
is_opponent_blocking = True
# Vector from ball position to the goal
ideal_shot = (target_pos - main.ball().pos).normalized()
# If on our side of the field and there are enemy robots around us,
# prioritize passing forward vs passing towards their goal
# Would have to change this if we are not aiming for their goal
if main.ball().pos.y < constants.Field.Length / 2 and len(windows) > 1:
ideal_shot = robocup.Point(0, 1)
main.system_state().draw_line(robocup.Line(main.ball().pos, target_pos), (0, 255, 0), "Target Point")
# Weights for determining best shot
k1 = 1.5 # Weight of closeness to ideal shot
k2 = 1 # Weight of shot chance
# Iterate through all possible windows to find the best possible shot
if windows:
best_shot = window.segment.center()
best_weight = 0
for wind in windows:
pos_to_wind = (wind.segment.center() - main.ball().pos).normalized()
dot_prod = pos_to_wind.dot(ideal_shot)
weight = k1 * dot_prod + k2 * wind.shot_success
if weight > best_weight:
best_weight = weight
best_shot = wind.segment.center()
main.system_state().draw_line(robocup.Line(main.ball().pos, best_shot), (255, 255, 0), "Target Shot")
best_shot = robocup.Point(0,1) + main.ball().pos
return best_shot
else:
return constants.Field.TheirGoalSegment.center()
| 41.807692 | 127 | 0.694419 |
ace608644594ea4bab7419bedcf45be1d8cc3a4b | 7,336 | py | Python | sphinx/builders/latex/constants.py | daobook/sphinx | ef8daca1f9a82ede9b4b0b5cde93f3414cee3dfe | [
"BSD-2-Clause"
] | null | null | null | sphinx/builders/latex/constants.py | daobook/sphinx | ef8daca1f9a82ede9b4b0b5cde93f3414cee3dfe | [
"BSD-2-Clause"
] | 1,662 | 2015-01-02T11:45:27.000Z | 2015-01-03T12:21:29.000Z | sphinx/builders/latex/constants.py | daobook/sphinx | ef8daca1f9a82ede9b4b0b5cde93f3414cee3dfe | [
"BSD-2-Clause"
] | null | null | null | """consntants for LaTeX builder."""
from typing import Any, Dict
PDFLATEX_DEFAULT_FONTPKG = r'''
\usepackage{tgtermes}
\usepackage{tgheros}
\renewcommand{\ttdefault}{txtt}
'''
PDFLATEX_DEFAULT_FONTSUBSTITUTION = r'''
\expandafter\ifx\csname T@LGR\endcsname\relax
\else
% LGR was declared as font encoding
\substitutefont{LGR}{\rmdefault}{cmr}
\substitutefont{LGR}{\sfdefault}{cmss}
\substitutefont{LGR}{\ttdefault}{cmtt}
\fi
\expandafter\ifx\csname T@X2\endcsname\relax
\expandafter\ifx\csname T@T2A\endcsname\relax
\else
% T2A was declared as font encoding
\substitutefont{T2A}{\rmdefault}{cmr}
\substitutefont{T2A}{\sfdefault}{cmss}
\substitutefont{T2A}{\ttdefault}{cmtt}
\fi
\else
% X2 was declared as font encoding
\substitutefont{X2}{\rmdefault}{cmr}
\substitutefont{X2}{\sfdefault}{cmss}
\substitutefont{X2}{\ttdefault}{cmtt}
\fi
'''
XELATEX_DEFAULT_FONTPKG = r'''
\setmainfont{FreeSerif}[
Extension = .otf,
UprightFont = *,
ItalicFont = *Italic,
BoldFont = *Bold,
BoldItalicFont = *BoldItalic
]
\setsansfont{FreeSans}[
Extension = .otf,
UprightFont = *,
ItalicFont = *Oblique,
BoldFont = *Bold,
BoldItalicFont = *BoldOblique,
]
\setmonofont{FreeMono}[
Extension = .otf,
UprightFont = *,
ItalicFont = *Oblique,
BoldFont = *Bold,
BoldItalicFont = *BoldOblique,
]
'''
XELATEX_GREEK_DEFAULT_FONTPKG = (XELATEX_DEFAULT_FONTPKG +
'\n\\newfontfamily\\greekfont{FreeSerif}' +
'\n\\newfontfamily\\greekfontsf{FreeSans}' +
'\n\\newfontfamily\\greekfonttt{FreeMono}')
LUALATEX_DEFAULT_FONTPKG = XELATEX_DEFAULT_FONTPKG
DEFAULT_SETTINGS: Dict[str, Any] = {
'latex_engine': 'pdflatex',
'papersize': '',
'pointsize': '',
'pxunit': '.75bp',
'classoptions': '',
'extraclassoptions': '',
'maxlistdepth': '',
'sphinxpkgoptions': '',
'sphinxsetup': '',
'fvset': '\\fvset{fontsize=auto}',
'passoptionstopackages': '',
'geometry': '\\usepackage{geometry}',
'inputenc': '',
'utf8extra': '',
'cmappkg': '\\usepackage{cmap}',
'fontenc': '\\usepackage[T1]{fontenc}',
'amsmath': '\\usepackage{amsmath,amssymb,amstext}',
'multilingual': '',
'babel': '\\usepackage{babel}',
'polyglossia': '',
'fontpkg': PDFLATEX_DEFAULT_FONTPKG,
'fontsubstitution': PDFLATEX_DEFAULT_FONTSUBSTITUTION,
'substitutefont': '',
'textcyrillic': '',
'textgreek': '\\usepackage{textalpha}',
'fncychap': '\\usepackage[Bjarne]{fncychap}',
'hyperref': ('% Include hyperref last.\n'
'\\usepackage{hyperref}\n'
'% Fix anchor placement for figures with captions.\n'
'\\usepackage{hypcap}% it must be loaded after hyperref.\n'
'% Set up styles of URL: it should be placed after hyperref.\n'
'\\urlstyle{same}'),
'contentsname': '',
'extrapackages': '',
'preamble': '',
'title': '',
'release': '',
'author': '',
'releasename': '',
'makeindex': '\\makeindex',
'shorthandoff': '',
'maketitle': '\\sphinxmaketitle',
'tableofcontents': '\\sphinxtableofcontents',
'atendofbody': '',
'printindex': '\\printindex',
'transition': '\n\n\\bigskip\\hrule\\bigskip\n\n',
'figure_align': 'htbp',
'tocdepth': '',
'secnumdepth': '',
}
ADDITIONAL_SETTINGS: Dict[Any, Dict[str, Any]] = {
'pdflatex': {
'inputenc': '\\usepackage[utf8]{inputenc}',
'utf8extra': ('\\ifdefined\\DeclareUnicodeCharacter\n'
'% support both utf8 and utf8x syntaxes\n'
' \\ifdefined\\DeclareUnicodeCharacterAsOptional\n'
' \\def\\sphinxDUC#1{\\DeclareUnicodeCharacter{"#1}}\n'
' \\else\n'
' \\let\\sphinxDUC\\DeclareUnicodeCharacter\n'
' \\fi\n'
' \\sphinxDUC{00A0}{\\nobreakspace}\n'
' \\sphinxDUC{2500}{\\sphinxunichar{2500}}\n'
' \\sphinxDUC{2502}{\\sphinxunichar{2502}}\n'
' \\sphinxDUC{2514}{\\sphinxunichar{2514}}\n'
' \\sphinxDUC{251C}{\\sphinxunichar{251C}}\n'
' \\sphinxDUC{2572}{\\textbackslash}\n'
'\\fi'),
},
'xelatex': {
'latex_engine': 'xelatex',
'polyglossia': '\\usepackage{polyglossia}',
'babel': '',
'fontenc': ('\\usepackage{fontspec}\n'
'\\defaultfontfeatures[\\rmfamily,\\sffamily,\\ttfamily]{}'),
'fontpkg': XELATEX_DEFAULT_FONTPKG,
'fvset': '\\fvset{fontsize=\\small}',
'fontsubstitution': '',
'textgreek': '',
'utf8extra': ('\\catcode`^^^^00a0\\active\\protected\\def^^^^00a0'
'{\\leavevmode\\nobreak\\ }'),
},
'lualatex': {
'latex_engine': 'lualatex',
'polyglossia': '\\usepackage{polyglossia}',
'babel': '',
'fontenc': ('\\usepackage{fontspec}\n'
'\\defaultfontfeatures[\\rmfamily,\\sffamily,\\ttfamily]{}'),
'fontpkg': LUALATEX_DEFAULT_FONTPKG,
'fvset': '\\fvset{fontsize=\\small}',
'fontsubstitution': '',
'textgreek': '',
'utf8extra': ('\\catcode`^^^^00a0\\active\\protected\\def^^^^00a0'
'{\\leavevmode\\nobreak\\ }'),
},
'platex': {
'latex_engine': 'platex',
'babel': '',
'classoptions': ',dvipdfmx',
'fontpkg': PDFLATEX_DEFAULT_FONTPKG,
'fontsubstitution': '',
'textgreek': '',
'fncychap': '',
'geometry': '\\usepackage[dvipdfm]{geometry}',
},
'uplatex': {
'latex_engine': 'uplatex',
'babel': '',
'classoptions': ',dvipdfmx',
'fontpkg': PDFLATEX_DEFAULT_FONTPKG,
'fontsubstitution': '',
'textgreek': '',
'fncychap': '',
'geometry': '\\usepackage[dvipdfm]{geometry}',
},
# special settings for latex_engine + language_code
('xelatex', 'fr'): {
# use babel instead of polyglossia by default
'polyglossia': '',
'babel': '\\usepackage{babel}',
},
('xelatex', 'zh'): {
'polyglossia': '',
'babel': '\\usepackage{babel}',
'fontenc': '\\usepackage{xeCJK}',
# set formatcom=\xeCJKVerbAddon to prevent xeCJK from adding extra spaces in
# fancyvrb Verbatim environment.
'fvset': '\\fvset{fontsize=\\small,formatcom=\\xeCJKVerbAddon}',
},
('xelatex', 'el'): {
'fontpkg': XELATEX_GREEK_DEFAULT_FONTPKG,
},
}
SHORTHANDOFF = r'''
\ifdefined\shorthandoff
\ifnum\catcode`\=\string=\active\shorthandoff{=}\fi
\ifnum\catcode`\"=\active\shorthandoff{"}\fi
\fi
'''
| 35.100478 | 87 | 0.527808 |
ace6089659afd32a1c268e4657f4c18b2bec2faf | 743 | py | Python | skorch/exceptions.py | dnouri/skorch | bce1299e54926b8596f3c57c8bc2091389fe20a3 | [
"BSD-3-Clause"
] | 1,881 | 2017-10-12T10:06:35.000Z | 2019-03-19T06:08:03.000Z | skorch/exceptions.py | sthagen/skorch-dev-skorch | 233f650c2cbca787a6ddb34f6dc79b0cc23e117a | [
"BSD-3-Clause"
] | 322 | 2017-10-11T10:51:31.000Z | 2019-03-18T12:09:23.000Z | skorch/exceptions.py | sthagen/skorch-dev-skorch | 233f650c2cbca787a6ddb34f6dc79b0cc23e117a | [
"BSD-3-Clause"
] | 167 | 2017-10-24T17:07:31.000Z | 2019-03-10T08:54:51.000Z | """Contains skorch-specific exceptions and warnings."""
from sklearn.exceptions import NotFittedError
class SkorchException(BaseException):
"""Base skorch exception."""
class NotInitializedError(SkorchException, NotFittedError):
"""Module is not initialized, please call the ``.initialize``
method or train the model by calling ``.fit(...)``.
"""
class SkorchAttributeError(SkorchException):
"""An attribute was set incorrectly on a skorch net."""
class SkorchWarning(UserWarning):
"""Base skorch warning."""
class DeviceWarning(SkorchWarning):
"""A problem with a device (e.g. CUDA) was detected."""
class SkorchTrainingImpossibleError(SkorchException):
"""The net cannot be used for training"""
| 23.967742 | 65 | 0.724092 |
ace609ac31861bed605e2ad24f723d9cef474ee7 | 1,827 | py | Python | torch/package/_directory_reader.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 24 | 2020-11-02T21:25:12.000Z | 2022-03-17T07:20:33.000Z | torch/package/_directory_reader.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 1 | 2021-06-25T22:00:31.000Z | 2021-06-25T22:00:31.000Z | torch/package/_directory_reader.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 12 | 2020-11-06T05:00:37.000Z | 2022-01-30T19:17:36.000Z | import os.path
from glob import glob
from typing import Any, List
import torch
_storages: List[Any] = [
torch.DoubleStorage,
torch.FloatStorage,
torch.LongStorage,
torch.IntStorage,
torch.ShortStorage,
torch.CharStorage,
torch.ByteStorage,
torch.BoolStorage,
]
_dtype_to_storage = {data_type(0).dtype: data_type for data_type in _storages}
# because get_storage_from_record returns a tensor!?
class _HasStorage(object):
def __init__(self, storage):
self._storage = storage
def storage(self):
return self._storage
class DirectoryReader(object):
"""
Class to allow PackageImporter to operate on unzipped packages. Methods
copy the behavior of the internal PyTorchFileReader class (which is used for
accessing packages in all other cases).
N.B.: ScriptObjects are not depickleable or accessible via this DirectoryReader
class due to ScriptObjects requiring an actual PyTorchFileReader instance.
"""
def __init__(self, directory):
self.directory = directory
def get_record(self, name):
filename = f"{self.directory}/{name}"
with open(filename, "rb") as f:
return f.read()
def get_storage_from_record(self, name, numel, dtype):
storage = _dtype_to_storage[dtype]
filename = f"{self.directory}/{name}"
return _HasStorage(storage.from_file(filename=filename, size=numel))
def has_record(self, path):
full_path = os.path.join(self.directory, path)
return os.path.isfile(full_path)
def get_all_records(
self,
):
files = []
for filename in glob(f"{self.directory}/**", recursive=True):
if not os.path.isdir(filename):
files.append(filename[len(self.directory) + 1 :])
return files
| 29 | 83 | 0.677066 |
ace60b8bd2d60fa83e5a28822ef7f41e55957469 | 9,672 | py | Python | cytomod/io.py | Yairsep-zz/CytoMod | cc14d6e75710ac36916901a1a98994bce72fa8d9 | [
"MIT"
] | 2 | 2020-08-16T15:00:17.000Z | 2020-11-16T07:34:37.000Z | cytomod/io.py | Yairsep/CytoMod | cc14d6e75710ac36916901a1a98994bce72fa8d9 | [
"MIT"
] | null | null | null | cytomod/io.py | Yairsep/CytoMod | cc14d6e75710ac36916901a1a98994bce72fa8d9 | [
"MIT"
] | 1 | 2022-03-15T08:40:09.000Z | 2022-03-15T08:40:09.000Z | import pandas as pd
from os.path import join as opj
from hclusterplot import plotHColCluster
import cytomod as cy
from cytomod import plotting as cyplot
import palettable
from custom_legends import colorLegend
import matplotlib.pyplot as plt
import numpy as np
import os
__all__ = ['write_modules', 'plot_modules']
def write_modules(clust_object, folder):
"""rawDf: pd.DataFrame with cytokines as columns and sample IDs along the index"""
clust_object.rCyDf.to_csv(opj(folder, '{name}_raw_log-conc.csv'.format(name=clust_object.name)))
"""cyDf: same as rawDf, but with missing values filled (if needed), log-transformed and possibly normalized"""
clust_object.cyDf.to_csv(opj(folder, '{name}_normalized_conc.csv'.format(name=clust_object.name)))
"""dmatDf: pd.DataFrame representation of pairwise distance matrix of cytokines (index and columns of cytokines)"""
clust_object.dmatDf.to_csv(opj(folder, '{name}_dmat.csv'.format(name=clust_object.name)))
"""pwrelDf: pd.DataFrame of pairwise cluster reliability (as a distance) from a bootstrap (index and columns of cytokines)"""
clust_object.pwrel.to_csv(opj(folder, '{name}_pwrel_dmat.csv'.format(name=clust_object.name)))
"""labels: pd.Series containing cluster labels, indexed by cytokine"""
clust_object.labels.to_csv(opj(folder, '{name}_cluster_labels.csv'.format(name=clust_object.name)))
"""modDf: pd.DataFrame of summarized module variables"""
clust_object.modDf.to_csv(opj(folder, '{name}_normalized_modules.csv'.format(name=clust_object.name)))
clust_object.rModDf.to_csv(opj(folder, '{name}_raw_modules.csv'.format(name=clust_object.name)))
def plot_modules(clust_object, folder):
'''Plot cytomod object modules information'''
"""Hierarchical clustering heatmap"""
plt.figure(41, figsize=(15.5, 9.5))
# colInds = plotHColCluster(ds[s].cyDf, method='complete', metric='pearson-signed', col_labels=ds[s].labels, col_dmat=ds[s].dmatDf)
colInds = plotHColCluster(clust_object.cyDf, method='complete', metric='pearson-signed',
col_labels=clust_object.labels,
save_path=os.path.join(folder, '%s_hierchical_clust_heatmap.png' % clust_object.name))
# plt.figure(41).savefig(os.path.join(folder, '%s_hierchical_clust_heatmap.png' % clust_object.name))
"""Heatmaps of pairwise reliability"""
plt.figure(43, figsize=(15.5, 9.5))
colInds = cyplot.plotHierClust(1 - clust_object.pwrel, clust_object.Z, labels=clust_object.labels,
titleStr='Pairwise reliability (%s)' % clust_object.name.replace('_', ' '), vRange=(0, 1))
plt.figure(43).savefig(os.path.join(folder, '%s_pwrel.png' % clust_object.name), dpi=300)
"""color_label_legend"""
plt.figure(48, figsize=(3, 3))
plt.clf()
axh = plt.subplot(1, 1, 1)
colorLegend(palettable.colorbrewer.qualitative.Set3_6.mpl_colors, ['%s' % s for s in clust_object.modDf.columns],
loc=10)
axh.spines['right'].set_color('none')
axh.spines['left'].set_color('none')
axh.spines['top'].set_color('none')
axh.spines['bottom'].set_color('none')
axh.set_xticks([])
axh.set_yticks([])
axh.set_facecolor('white')
plt.figure(48).savefig(os.path.join(folder, 'color_label_legend.png'), dpi=300)
"""Plot intra-module correlation"""
plt.figure(50, figsize=(15, 9))
for lab in list(cy.labels2modules(clust_object.labels, clust_object.dropped).keys()):
cyplot.plotModuleCorr(clust_object.cyDf, clust_object.labels, lab, dropped=clust_object.dropped)
plt.figure(50).savefig(os.path.join(folder, '%s_mod_corr_%s.png' % (clust_object.name, lab)), dpi=300)
"""Cytokine embedding"""
plt.figure(901, figsize=(13, 9.7))
cyplot.plotModuleEmbedding(clust_object.dmatDf, clust_object.labels, method='kpca')
colors = palettable.colorbrewer.get_map('Set1', 'qualitative', len(np.unique(clust_object.labels))).mpl_colors
colorLegend(colors, ['%s%1.0f' % (clust_object.sampleStr, i) for i in np.unique(clust_object.labels)],
loc='lower left')
plt.figure(901).savefig(os.path.join(folder, '%sembed.png' % clust_object.name), dpi=300)
def plot_modules(clust_object, folder, heatmap_figsize=(15.5, 9.5)):
'''Plot cytomod object modules information'''
"""Hierarchical clustering heatmap"""
plt.figure(41, figsize=heatmap_figsize)
# colInds = plotHColCluster(ds[s].cyDf, method='complete', metric='pearson-signed', col_labels=ds[s].labels, col_dmat=ds[s].dmatDf)
colInds = plotHColCluster(clust_object.cyDf, method='complete', metric='pearson-signed',
col_labels=clust_object.labels,
save_path=os.path.join(folder, '%s_hier.png' % clust_object.name))
# plt.figure(41).savefig(os.path.join(folder, '%s_hier.png' % clust_object.name))
"""Heatmaps of pairwise reliability"""
plt.figure(43, figsize=heatmap_figsize)
colInds = cyplot.plotHierClust(1 - clust_object.pwrel, clust_object.Z, labels=clust_object.labels,
titleStr='Pairwise reliability (%s)' % clust_object.name.replace('_', ' '), vRange=(0, 1))
plt.figure(43).savefig(os.path.join(folder, '%s_reliability.png' % clust_object.name), dpi=300)
"""color_label_legend"""
plt.figure(48, figsize=(3, 3))
plt.clf()
axh = plt.subplot(1, 1, 1)
colorLegend(palettable.colorbrewer.qualitative.Set3_6.mpl_colors, ['%s' % s for s in clust_object.modDf.columns],
loc=10)
axh.spines['right'].set_color('none')
axh.spines['left'].set_color('none')
axh.spines['top'].set_color('none')
axh.spines['bottom'].set_color('none')
axh.set_xticks([])
axh.set_yticks([])
axh.set_facecolor('white')
plt.figure(48).savefig(os.path.join(folder, '%s_color_label_legend.png' % clust_object.name), dpi=300)
"""Plot intra-module correlation"""
plt.figure(50, figsize=(15, 9))
for lab in list(cy.labels2modules(clust_object.labels, clust_object.dropped).keys()):
cyplot.plotModuleCorr(clust_object.cyDf, clust_object.labels, lab, dropped=clust_object.dropped)
plt.figure(50).savefig(os.path.join(folder, '%s_modules_correlations_%s.png' % (clust_object.name, lab)), dpi=300)
"""Cytokine embedding"""
plt.figure(901, figsize=(13, 9.7))
cyplot.plotModuleEmbedding(clust_object.dmatDf, clust_object.labels, method='kpca')
colors = palettable.colorbrewer.get_map('Set1', 'qualitative', len(np.unique(clust_object.labels))).mpl_colors
colorLegend(colors, ['%s%1.0f' % (clust_object.sampleStr, i) for i in np.unique(clust_object.labels)],
loc='lower left')
plt.figure(901).savefig(os.path.join(folder, '%s_embedding.png' % clust_object.name), dpi=300)
def plot_clustering_heatmap(clust_object, folder, figsize=(15.5, 9.5)):
"""Hierarchical clustering heatmap"""
plt.figure(41, figsize=figsize)
# colInds = plotHColCluster(ds[s].cyDf, method='complete', metric='pearson-signed', col_labels=ds[s].labels, col_dmat=ds[s].dmatDf)
colInds = plotHColCluster(clust_object.cyDf, method='complete', metric='pearson-signed',
col_labels=clust_object.labels, figsize=figsize,
save_path=os.path.join(folder, '%s_hierchical_clust_heatmap.png' % clust_object.name))
# plt.figure(41).savefig(os.path.join(folder, '%s_hierchical_clust_heatmap.png' % clust_object.name))
def plot_reliability(clust_object, folder, figsize=(15.5, 9.5)):
"""Heatmaps of pairwise reliability"""
plt.figure(43, figsize=figsize)
colInds = cyplot.plotHierClust(1 - clust_object.pwrel, clust_object.Z, labels=clust_object.labels,
titleStr='Pairwise reliability (%s)' % clust_object.name.replace('_', ' '), vRange=(0, 1))
plt.figure(43).savefig(os.path.join(folder, '%s_reliability.png' % clust_object.name), dpi=300)
def plot_color_legend(clust_object, folder):
"""color_label_legend"""
plt.figure(48, figsize=(3, 3))
plt.clf()
axh = plt.subplot(1, 1, 1)
colorLegend(palettable.colorbrewer.qualitative.Set3_6.mpl_colors, ['%s' % s for s in clust_object.modDf.columns],
loc=10)
axh.spines['right'].set_color('none')
axh.spines['left'].set_color('none')
axh.spines['top'].set_color('none')
axh.spines['bottom'].set_color('none')
axh.set_xticks([])
axh.set_yticks([])
axh.set_facecolor('white')
plt.figure(48).savefig(os.path.join(folder, '%s_color_label_legend.png' % clust_object.name), dpi=300)
def plot_module_correl(clust_object, folder):
"""Plot intra-module correlation"""
i = 0
for lab in list(cy.labels2modules(clust_object.labels, clust_object.dropped).keys()):
plt.figure(50+i, figsize=(15, 9))
cyplot.plotModuleCorr(clust_object.cyDf, clust_object.labels, lab, dropped=clust_object.dropped)
plt.figure(50+i).savefig(os.path.join(folder, '%s_modules_correlations_%s.png' % (clust_object.name, lab)), dpi=300)
i += 1
def plot_cy_embedding(clust_object, folder):
"""Cytokine embedding"""
plt.figure(901, figsize=(13, 9.7))
cyplot.plotModuleEmbedding(clust_object.dmatDf, clust_object.labels, method='kpca')
colors = palettable.colorbrewer.get_map('Set1', 'qualitative', len(np.unique(clust_object.labels))).mpl_colors
colorLegend(colors, ['%s%1.0f' % (clust_object.sampleStr, i) for i in np.unique(clust_object.labels)],
loc='lower left')
plt.figure(901).savefig(os.path.join(folder, '%s_embedding.png' % clust_object.name), dpi=300) | 54.337079 | 135 | 0.691998 |
ace60cb62451f3ee73bb86efc7985ba3e57266b3 | 7,010 | py | Python | lib/pymedphys/_base/delivery.py | ethanio12345/pymedphys | cb34c992de8d442eced3385018a194364060092d | [
"Apache-2.0"
] | 207 | 2019-01-29T09:53:04.000Z | 2022-03-26T10:34:03.000Z | lib/pymedphys/_base/delivery.py | ethanio12345/pymedphys | cb34c992de8d442eced3385018a194364060092d | [
"Apache-2.0"
] | 1,209 | 2019-01-29T07:52:27.000Z | 2022-03-31T20:11:24.000Z | lib/pymedphys/_base/delivery.py | ethanio12345/pymedphys | cb34c992de8d442eced3385018a194364060092d | [
"Apache-2.0"
] | 58 | 2019-03-29T09:06:18.000Z | 2022-03-24T07:44:22.000Z | # Copyright (C) 2019 Cancer Care Associates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from collections import namedtuple
from typing import Dict, List, Tuple, Type, TypeVar, Union
from pymedphys._imports import numpy as np
from pymedphys._utilities.controlpoints import (
remove_irrelevant_control_points,
to_tuple,
)
# https://stackoverflow.com/a/44644576/3912576
# Create a generic variable that can be 'Parent', or any subclass.
DeliveryGeneric = TypeVar("DeliveryGeneric", bound="DeliveryBase")
DeliveryNamedTuple = namedtuple(
"Delivery", ["monitor_units", "gantry", "collimator", "mlc", "jaw"]
)
class DeliveryBase(DeliveryNamedTuple):
@property
def mu(self):
return self.monitor_units
@classmethod
def combine(cls, *args):
first = cls(*args[0])
if len(args) == 1:
return first
return first.merge(*args[1::])
def merge(self: DeliveryGeneric, *args: DeliveryGeneric) -> DeliveryGeneric:
cls = type(self)
separate: List[DeliveryGeneric] = [self] + [*args]
collection: Dict[str, Tuple] = {}
for delivery_data in separate:
for field in delivery_data._fields: # pylint: disable=no-member
try:
collection[field] = np.concatenate(
[collection[field], getattr(delivery_data, field)], axis=0
)
except KeyError:
collection[field] = getattr(delivery_data, field)
mu = np.concatenate([[0], np.diff(collection["monitor_units"])])
mu[mu < 0] = 0
collection["monitor_units"] = np.cumsum(mu)
merged = cls(**collection)
return merged
def __new__(cls, *args, **kwargs):
new_args = (to_tuple(arg) for arg in args)
new_kwargs = {key: to_tuple(item) for key, item in kwargs.items()}
return super().__new__(cls, *new_args, **new_kwargs)
@classmethod
def _empty(cls: Type[DeliveryGeneric]) -> DeliveryGeneric:
return cls(
tuple(),
tuple(),
tuple(),
tuple((tuple((tuple(), tuple())),)),
tuple((tuple(), tuple())),
)
@functools.lru_cache()
def _filter_cps(self):
cls = type(self)
return cls(*remove_irrelevant_control_points(*self))
@functools.lru_cache()
def _mask_by_gantry(
self,
angles: Union[Tuple, float, int],
gantry_tolerance=3,
allow_missing_angles=False,
):
try:
_ = iter(angles) # type: ignore
iterable_angles = tuple(angles) # type: ignore
except TypeError:
# Not iterable, assume just one angle provided
iterable_angles = tuple((angles,))
masks = self._gantry_angle_masks(
iterable_angles, gantry_tolerance, allow_missing_angles=allow_missing_angles
)
all_masked_delivery_data = tuple(
self._apply_mask_to_delivery_data(mask) for mask in masks
)
return all_masked_delivery_data
@functools.lru_cache()
def _metersets(self, gantry_angles, gantry_tolerance):
all_masked_delivery_data = self._mask_by_gantry(
gantry_angles, gantry_tolerance, allow_missing_angles=True
)
metersets = []
for delivery_data in all_masked_delivery_data:
try:
metersets.append(delivery_data.monitor_units[-1])
except IndexError:
continue
return tuple(metersets)
def _extract_one_gantry_angle(
self: DeliveryGeneric, gantry_angle, gantry_tolerance=3
) -> DeliveryGeneric:
near_angle = self._gantry_angle_mask(gantry_angle, gantry_tolerance)
return self._apply_mask_to_delivery_data(near_angle)
def _gantry_angle_masks(
self, gantry_angles, gantry_tol, allow_missing_angles=False
):
masks = [
self._gantry_angle_mask(gantry_angle, gantry_tol)
for gantry_angle in gantry_angles
]
for mask in masks:
if np.all(mask == 0):
continue
# TODO: Apply mask by more than just gantry angle to appropriately
# extract beam index even when multiple beams have the same gantry
# angle
is_duplicate_gantry_angles = (
np.sum(np.abs(np.diff(np.concatenate([[0], mask, [0]])))) != 2
)
if is_duplicate_gantry_angles:
raise ValueError("Duplicate gantry angles not yet supported")
try:
assert np.all(
np.sum(masks, axis=0) == 1
), "Not all beams were captured by the gantry tolerance of " " {}".format(
gantry_tol
)
except AssertionError:
if not allow_missing_angles:
print("Allowable gantry angles = {}".format(gantry_angles))
gantry = np.array(self.gantry, copy=False)
out_of_tolerance = np.unique(
gantry[np.sum(masks, axis=0) == 0]
).tolist()
print(
"The gantry angles out of tolerance were {}".format(
out_of_tolerance
)
)
raise
return masks
def _gantry_angle_mask(self, gantry_angle, gantry_angle_tol):
near_angle = np.abs(np.array(self.gantry) - gantry_angle) <= gantry_angle_tol
assert np.all(np.diff(np.where(near_angle)[0]) == 1)
return near_angle
def _apply_mask_to_delivery_data(self: DeliveryGeneric, mask) -> DeliveryGeneric:
cls = type(self)
new_delivery_data = []
for item in self:
new_delivery_data.append(np.array(item)[mask])
new_monitor_units = new_delivery_data[0]
try:
first_monitor_unit_item = new_monitor_units[0]
except IndexError:
return cls(*new_delivery_data)
new_delivery_data[0] = np.round(
np.array(new_delivery_data[0], copy=False) - first_monitor_unit_item,
decimals=7,
)
return cls(*new_delivery_data)
def _strip_delivery_data(self: DeliveryGeneric, skip_size) -> DeliveryGeneric:
cls = type(self)
new_delivery_data = []
for item in self:
new_delivery_data.append(np.array(item)[::skip_size])
return cls(*new_delivery_data)
| 32.009132 | 88 | 0.611698 |
ace60ce03c50c1ea5c4733309f5a411acecb4dc3 | 1,169 | py | Python | onnx_tf/handlers/backend/concat.py | yangchengtest/onnx-tf-atlas | 7c6772399256cd14b37a1ac4d7bad948583b0034 | [
"Apache-2.0"
] | 1 | 2020-06-04T14:16:39.000Z | 2020-06-04T14:16:39.000Z | onnx_tf/handlers/backend/concat.py | yangchengtest/onnx-tf-atlas | 7c6772399256cd14b37a1ac4d7bad948583b0034 | [
"Apache-2.0"
] | 1 | 2020-06-10T06:53:21.000Z | 2020-06-12T08:19:23.000Z | onnx_tf/handlers/backend/concat.py | yangchengtest/onnx-tf-atlas | 7c6772399256cd14b37a1ac4d7bad948583b0034 | [
"Apache-2.0"
] | null | null | null | import copy
import tensorflow as tf
from onnx_tf.handlers.backend_handler import BackendHandler
from onnx_tf.handlers.handler import onnx_op
from onnx_tf.handlers.handler import tf_func
@onnx_op("Concat")
@tf_func(tf.concat)
class Concat(BackendHandler):
@classmethod
def _common(cls, node, **kwargs):
inputs = [kwargs["tensor_dict"][inp] for inp in node.inputs]
input_format = kwargs.get("input_format", "NCHW")
attrs = copy.deepcopy(node.attrs)
if input_format =="NCHW":
attrs["axis"] = 1
else:
input_dict = kwargs["tensor_dict"]
x = input_dict[node.inputs[0]]
x_shape = x.get_shape().as_list()
print ("input_shape:",len(x_shape))
if len(x_shape)==4:
attrs["axis"] = len(x_shape)-1
else:
attrs["axis"] = -1
return [cls.make_tensor_from_onnx_node(node, inputs=[inputs], attrs=attrs)]
@classmethod
def version_1(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_4(cls, node, **kwargs):
return cls._common(node, **kwargs)
@classmethod
def version_11(cls, node, **kwargs):
return cls._common(node, **kwargs)
| 27.186047 | 79 | 0.668948 |
ace60d4558305f150dbb560abf1a493d87b69ab7 | 661 | py | Python | s3upload.py | tomcz/aws_py | 46c2e4930e45426a8a1328625a07b95cb15fda9f | [
"MIT"
] | 2 | 2016-05-05T22:40:21.000Z | 2016-06-27T13:23:43.000Z | s3upload.py | tomcz/aws_py | 46c2e4930e45426a8a1328625a07b95cb15fda9f | [
"MIT"
] | null | null | null | s3upload.py | tomcz/aws_py | 46c2e4930e45426a8a1328625a07b95cb15fda9f | [
"MIT"
] | 3 | 2016-06-14T14:18:18.000Z | 2020-01-20T14:02:11.000Z | from properties import loadcredentials
from optparse import OptionParser
from s3client import S3Client
import os
parser = OptionParser(usage="usage: %prog [options]")
parser.add_option("-f", dest="file", help="File to upload", metavar="FILE")
parser.add_option("-b", dest="bucket", help="Destination bucket", metavar="BUCKET")
options, args = parser.parse_args()
if options.file and options.bucket:
client = S3Client(loadcredentials())
object_key = os.path.basename(options.file)
print 'Sending', options.file, 'to', options.bucket, 'as', object_key
client.createObject(options.bucket, object_key, options.file)
else:
parser.print_help()
| 34.789474 | 83 | 0.74584 |
ace60d78f46511325d10ef5c5f941df09757d655 | 669 | py | Python | library/forms.py | dkquocbao/python_exercise | 32f63e54632a5cd6f77c3dee716f10ab766fbc2c | [
"bzip2-1.0.6"
] | null | null | null | library/forms.py | dkquocbao/python_exercise | 32f63e54632a5cd6f77c3dee716f10ab766fbc2c | [
"bzip2-1.0.6"
] | null | null | null | library/forms.py | dkquocbao/python_exercise | 32f63e54632a5cd6f77c3dee716f10ab766fbc2c | [
"bzip2-1.0.6"
] | null | null | null | from django.forms import ModelForm
from django import forms
from library.models import Category, Book
AVAILABLE = 0
BORROWED = 1
STATUS_CHOICES = (
(AVAILABLE, 'Available'),
(BORROWED, 'Borrowed'),
)
class CreateCategoryForm(ModelForm):
class Meta:
model = Category
fields = ('name', 'slug')
class CreateBookForm(ModelForm):
status = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple,
choices=STATUS_CHOICES)
class Meta:
model = Book
fields = ('name', 'categories', 'status', 'quantity')
widgets = {
'categories': forms.Select(),
}
| 23.068966 | 75 | 0.617339 |
ace60d7a33133b7466e58282619ed8b175ffce4d | 5,692 | py | Python | examples/FasterRCNN/convert_d2/convert_d2.py | wdings/Mask-RCNN | 8d5ae5cc2cfcf2e4e53b4d1064ac9e727f736d09 | [
"Apache-2.0"
] | 1 | 2019-10-15T04:16:12.000Z | 2019-10-15T04:16:12.000Z | examples/FasterRCNN/convert_d2/convert_d2.py | YikangGui/tensorpack | a6ca79c320ba109d61acf14e56768bd7b273871b | [
"Apache-2.0"
] | 6 | 2020-01-28T23:03:24.000Z | 2022-02-10T01:21:18.000Z | examples/FasterRCNN/convert_d2/convert_d2.py | YikangGui/tensorpack | a6ca79c320ba109d61acf14e56768bd7b273871b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import argparse
import numpy as np
import pickle
from detectron2.config import get_cfg
def convert_config(cfg):
ret = []
ret.append(("MODE_MASK", cfg.MODEL.MASK_ON))
has_fpn = "fpn" in cfg.MODEL.BACKBONE.NAME
ret.append(("MODE_FPN", has_fpn))
if not has_fpn:
# we only support C4 and FPN
assert cfg.MODEL.ROI_HEADS.NAME == "Res5ROIHeads"
else:
ret.append(("FPN.CASCADE", cfg.MODEL.ROI_HEADS.NAME == "CascadeROIHeads"))
assert len(cfg.MODEL.ROI_BOX_CASCADE_HEAD.IOUS) == 3
depth = cfg.MODEL.RESNETS.DEPTH
assert depth in [50, 101], depth
if depth == 101:
ret.append(("BACKBONE.RESNET_NUM_BLOCKS", [3, 4, 23, 3]))
ret.append(("BACKBONE.STRIDE_1X1", cfg.MODEL.RESNETS.STRIDE_IN_1X1))
ret.append(("PREPROC.PIXEL_MEAN", cfg.MODEL.PIXEL_MEAN[::-1]))
ret.append(("PREPROC.PIXEL_STD", cfg.MODEL.PIXEL_STD[::-1]))
assert cfg.MODEL.ROI_MASK_HEAD.POOLER_TYPE == "ROIAlignV2"
assert cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE == "ROIAlignV2"
return ret
def convert_weights(d, cfg):
has_fpn = "fpn" in cfg.MODEL.BACKBONE.NAME
ret = {}
def _convert_conv(src, dst):
src_w = d.pop(src + ".weight").transpose(2, 3, 1, 0)
ret[dst + "/W"] = src_w
if src + ".norm.weight" in d: # has norm
ret[dst + "/bn/gamma"] = d.pop(src + ".norm.weight")
ret[dst + "/bn/beta"] = d.pop(src + ".norm.bias")
ret[dst + "/bn/variance/EMA"] = d.pop(src + ".norm.running_var")
ret[dst + "/bn/mean/EMA"] = d.pop(src + ".norm.running_mean")
if src + ".bias" in d:
ret[dst + "/b"] = d.pop(src + ".bias")
def _convert_fc(src, dst):
ret[dst + "/W"] = d.pop(src + ".weight").transpose()
ret[dst + "/b"] = d.pop(src + ".bias")
if has_fpn:
backbone_prefix = "backbone.bottom_up."
else:
backbone_prefix = "backbone."
_convert_conv(backbone_prefix + "stem.conv1", "conv0")
for grpid in range(4):
if not has_fpn and grpid == 3:
backbone_prefix = "roi_heads."
for blkid in range([3, 4, 6 if cfg.MODEL.RESNETS.DEPTH == 50 else 23, 3][grpid]):
_convert_conv(backbone_prefix + f"res{grpid + 2}.{blkid}.conv1",
f"group{grpid}/block{blkid}/conv1")
_convert_conv(backbone_prefix + f"res{grpid + 2}.{blkid}.conv2",
f"group{grpid}/block{blkid}/conv2")
_convert_conv(backbone_prefix + f"res{grpid + 2}.{blkid}.conv3",
f"group{grpid}/block{blkid}/conv3")
if blkid == 0:
_convert_conv(backbone_prefix + f"res{grpid + 2}.{blkid}.shortcut",
f"group{grpid}/block{blkid}/convshortcut")
if has_fpn:
for lvl in range(2, 6):
_convert_conv(f"backbone.fpn_lateral{lvl}", f"fpn/lateral_1x1_c{lvl}")
_convert_conv(f"backbone.fpn_output{lvl}", f"fpn/posthoc_3x3_p{lvl}")
# RPN:
_convert_conv("proposal_generator.rpn_head.conv", "rpn/conv0")
_convert_conv("proposal_generator.rpn_head.objectness_logits", "rpn/class")
_convert_conv("proposal_generator.rpn_head.anchor_deltas", "rpn/box")
# bbox predictor
def _convert_box_predictor(src, dst):
if cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG:
_convert_fc(src + ".bbox_pred", dst + "/box")
else:
v = d.pop(src + ".bbox_pred.bias")
ret[dst + "/box/b"] = np.concatenate((v[:4], v))
v = d.pop(src + ".bbox_pred.weight")
ret[dst + "/box/W"] = np.concatenate((v[:4, :], v), axis=0).transpose()
_convert_fc(src + ".cls_score", dst + "/class")
num_class = ret[dst + "/class/W"].shape[1] - 1
idxs = np.concatenate(((num_class, ), np.arange(num_class)))
ret[dst + "/class/W"] = ret[dst + "/class/W"][:, idxs]
ret[dst + "/class/b"] = ret[dst + "/class/b"][idxs]
# Fast R-CNN: box head
has_cascade = cfg.MODEL.ROI_HEADS.NAME == "CascadeROIHeads"
if has_cascade:
assert cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG
for k in range(3):
for i in range(cfg.MODEL.ROI_BOX_HEAD.NUM_FC):
_convert_fc(f"roi_heads.box_head.{k}.fc{i+1}", f"cascade_rcnn_stage{k+1}/head/fc{i+6}")
_convert_box_predictor(f"roi_heads.box_predictor.{k}", f"cascade_rcnn_stage{k+1}/outputs")
else:
for i in range(cfg.MODEL.ROI_BOX_HEAD.NUM_FC):
_convert_fc(f"roi_heads.box_head.fc{i+1}", f"fastrcnn/fc{i+6}")
_convert_box_predictor("roi_heads.box_predictor", "fastrcnn/outputs" if has_fpn else "fastrcnn")
# mask head
for fcn in range(cfg.MODEL.ROI_MASK_HEAD.NUM_CONV):
_convert_conv(f"roi_heads.mask_head.mask_fcn{fcn+1}", f"maskrcnn/fcn{fcn}")
_convert_conv("roi_heads.mask_head.deconv", "maskrcnn/deconv")
_convert_conv("roi_heads.mask_head.predictor", "maskrcnn/conv")
for k in list(d.keys()):
if "cell_anchors" in k:
d.pop(k)
assert len(d) == 0, d.keys()
return ret
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--d2-config")
parser.add_argument("--d2-pkl")
parser.add_argument("--output")
args = parser.parse_args()
cfg = get_cfg()
cfg.merge_from_file(args.d2_config)
tp_cfg = convert_config(cfg)
for k, v in tp_cfg:
print("'{}={}'".format(k, v).replace(' ', ''), end=' ')
with open(args.d2_pkl, "rb") as f:
d2_dict = pickle.load(f)["model"]
tp_dict = convert_weights(d2_dict, cfg)
np.savez_compressed(args.output, **tp_dict)
| 39.255172 | 104 | 0.60383 |
ace60e0cb9dbc642660a124f32a3870387e4235a | 6,507 | py | Python | Arena.py | taylor-santos/alpha-zero-general | 2f22d68bd5337de56dbf0482229e301c16377f04 | [
"MIT"
] | null | null | null | Arena.py | taylor-santos/alpha-zero-general | 2f22d68bd5337de56dbf0482229e301c16377f04 | [
"MIT"
] | null | null | null | Arena.py | taylor-santos/alpha-zero-general | 2f22d68bd5337de56dbf0482229e301c16377f04 | [
"MIT"
] | null | null | null | import logging
import random
from subprocess import Popen, PIPE
from tqdm import tqdm
log = logging.getLogger(__name__)
class Arena():
"""
An Arena class where any 2 agents can be pit against each other.
"""
def __init__(self, AlphaPlayer, MinimaxPlayer, game, display=None):
"""
Input:
player 1,2: two functions that takes board as input, return action
game: Game object
display: a function that takes board as input and prints it (e.g.
display in othello/OthelloGame). Is necessary for verbose
mode.
see othello/OthelloPlayers.py for an example. See pit.py for pitting
human players/other baselines with each other.
"""
self.AlphaPlayer = AlphaPlayer
self.MinimaxPlayer = MinimaxPlayer
self.game = game
self.display = display
def playGameVsMinimax(self, startPlayer, proc, verbose=False):
"""
Executes one episode of a game.
Returns:
either
winner: 1 if startPlayer won, -1 if startPlayer lost
or
draw result returned from the game that is neither 1, -1, nor 0.
"""
players = [self.MinimaxPlayer, None, self.AlphaPlayer]
curPlayer = startPlayer
board = self.game.getInitBoard()
it = 0
while self.game.getGameEnded(board, curPlayer) == 0:
it += 1
# assert self.display
# print("Turn ", str(it), "Player ", str(curPlayer))
# self.display(board)
action = players[curPlayer + 1](self.game.getCanonicalForm(board, curPlayer), proc, verbose=verbose)
valids = self.game.getValidMoves(self.game.getCanonicalForm(board, curPlayer), 1)
if valids[action] == 0:
log.error(f'Action {action} is not valid!')
log.debug(f'valids = {valids}')
assert valids[action] > 0
if verbose:
print('(', int(action/9), ', ', action%9, ')', sep='')
board, curPlayer = self.game.getNextState(board, curPlayer, action)
# assert self.display
# print("Game over: Turn ", str(it), "Result ", str(self.game.getGameEnded(board, 1)))
# self.display(board)
return self.game.getGameEnded(board, startPlayer)
def playGame(self, verbose=False):
"""
Executes one episode of a game.
Returns:
either
winner: player who won the game (1 if player1, -1 if player2)
or
draw result returned from the game that is neither 1, -1, nor 0.
"""
players = [self.MinimaxPlayer, None, self.AlphaPlayer]
curPlayer = 1
board = self.game.getInitBoard()
it = 0
while self.game.getGameEnded(board, curPlayer) == 0:
it += 1
if verbose:
assert self.display
print("Turn ", str(it), "Player ", str(curPlayer))
self.display(board)
action = players[curPlayer + 1](self.game.getCanonicalForm(board, curPlayer))
valids = self.game.getValidMoves(self.game.getCanonicalForm(board, curPlayer), 1)
if valids[action] == 0:
log.error(f'Action {action} is not valid!')
log.debug(f'valids = {valids}')
assert valids[action] > 0
if verbose:
print('(', int(action/9), ', ', action%9, ')', sep='')
board, curPlayer = self.game.getNextState(board, curPlayer, action)
if verbose:
assert self.display
print("Game over: Turn ", str(it), "Result ", str(self.game.getGameEnded(board, 1)))
self.display(board)
return curPlayer * self.game.getGameEnded(board, curPlayer)
def playGamesVsMinimax(self, num, depth, verbose=False):
"""
Plays num games in which player1 starts num/2 games and player2 starts
num/2 games.
Returns:
oneWon: games won by player1
twoWon: games won by player2
draws: games won by nobody
"""
num = int(num / 2)
oneWon = [0, 0]
twoWon = [0, 0]
draws = [0, 0]
for _ in tqdm(range(num), desc="Arena.playGames (1)"):
# AlphaZero starts, playing as O
proc = Popen(['ultimatettt.exe', 'O', str(depth), str(random.randint(1, 1000000))], stdin=PIPE, stdout=PIPE, stderr=PIPE)
gameResult = self.playGameVsMinimax(1, proc, verbose=verbose)
if gameResult == 1:
oneWon[0] += 1
elif gameResult == -1:
twoWon[0] += 1
else:
draws[0] += 1
# self.AlphaPlayer, self.MinimaxPlayer = self.MinimaxPlayer, self.AlphaPlayer
for _ in tqdm(range(num), desc="Arena.playGames (2)"):
# Minimax starts, playing as X
proc = Popen(['ultimatettt.exe', 'X', str(depth), str(random.randint(1, 1000000))], stdin=PIPE, stdout=PIPE, stderr=PIPE)
gameResult = self.playGameVsMinimax(-1, proc, verbose=verbose)
if gameResult == -1:
oneWon[1] += 1
elif gameResult == 1:
twoWon[1] += 1
else:
draws[1] += 1
return oneWon, twoWon, draws
def playGames(self, num, verbose=False):
"""
Plays num games in which player1 starts num/2 games and player2 starts
num/2 games.
Returns:
oneWon: games won by player1
twoWon: games won by player2
draws: games won by nobody
"""
num = int(num / 2)
oneWon = 0
twoWon = 0
draws = 0
for _ in tqdm(range(num), desc="Arena.playGames (1)"):
gameResult = self.playGame(verbose=verbose)
if gameResult == 1:
oneWon += 1
elif gameResult == -1:
twoWon += 1
else:
draws += 1
self.AlphaPlayer, self.MinimaxPlayer = self.MinimaxPlayer, self.AlphaPlayer
for _ in tqdm(range(num), desc="Arena.playGames (2)"):
gameResult = self.playGame(verbose=verbose)
if gameResult == -1:
oneWon += 1
elif gameResult == 1:
twoWon += 1
else:
draws += 1
return oneWon, twoWon, draws
| 35.950276 | 133 | 0.546027 |
ace60f1107cd3cd78672c41c920ff740ce72b674 | 26,093 | py | Python | features/ppt_customformats/helpers.py | pyro-team/bkt-toolbox | bbccba142a81ca0a46056f2bcda75899979158a5 | [
"MIT"
] | 12 | 2019-05-31T02:57:26.000Z | 2022-03-26T09:40:50.000Z | features/ppt_customformats/helpers.py | mrflory/bkt-toolbox | bbccba142a81ca0a46056f2bcda75899979158a5 | [
"MIT"
] | 27 | 2021-11-27T16:33:19.000Z | 2022-03-27T17:47:26.000Z | features/ppt_customformats/helpers.py | pyro-team/bkt-toolbox | bbccba142a81ca0a46056f2bcda75899979158a5 | [
"MIT"
] | 3 | 2019-06-12T10:59:20.000Z | 2020-04-21T15:13:50.000Z | # -*- coding: utf-8 -*-
'''
Created on 2018-05-29
@author: Florian Stallmann
'''
from __future__ import absolute_import
import logging
from collections import OrderedDict
from functools import wraps
import bkt.library.powerpoint as pplib
def textframe_group_check(func):
@wraps(func)
def wrapper(cls, textframe_obj, *args, **kwargs):
try:
shape = textframe_obj.Parent
if shape.Type == pplib.MsoShapeType["msoGroup"]:
logging.debug("customformats: found group")
for shp in shape.GroupItems:
func(cls, shp.TextFrame2, *args, **kwargs)
else:
logging.debug("customformats: found non-group")
func(cls, textframe_obj, *args, **kwargs)
except:
logging.exception("customformats: group check failed")
func(cls, textframe_obj, *args, **kwargs)
return wrapper
class ShapeFormats(object):
always_keep_theme_color = True
always_consider_indentlevels = True
@classmethod
def mult_setattr(cls, obj, name, value):
logging.debug("mult_setattr: setting %s = %s", name, value)
attrs = name.split(".")
for name in attrs[:-1]:
try:
obj = getattr(obj, name)
except:
raise AttributeError("Cannot find attribute %s" % name)
try:
if attrs[-1] == "BKTColor":
obj.ObjectThemeColor = value[0]
obj.Brightness = value[1]
# If theme color is different from saved RGB color, use RGB color instead
if not cls.always_keep_theme_color and obj.RGB != value[2]:
obj.RGB = value[2]
else:
setattr(obj, attrs[-1], value)
except ValueError:
# bkt.helpers.exception_as_message("Cannot set %s = %s" % (attrs[-1], value))
raise ValueError("Cannot set %s = %s" % (attrs[-1], value))
@classmethod
def _write_color_to_array(cls, dict_ref, color_obj, arr_key):
if color_obj.Type == pplib.MsoColorType['msoColorTypeScheme'] and color_obj.ObjectThemeColor > 0:
dict_ref['%s.BKTColor' % arr_key] = [color_obj.ObjectThemeColor, float(color_obj.Brightness), color_obj.RGB]
# dict_ref['%s.ObjectThemeColor' % arr_key] = color_obj.ObjectThemeColor
# dict_ref['%s.Brightness' % arr_key] = float(color_obj.Brightness)
else:
dict_ref['%s.RGB' % arr_key] = color_obj.RGB
# dict_ref['%s.RGB' % arr_key] = color_obj.RGB #always use RGB for now as it can be used cross-presentations
dict_ref['%s.TintAndShade' % arr_key] = float(color_obj.TintAndShade)
@classmethod
def _get_indentlevels(cls, textframe_object, what): #paragraph or font
indent_levels = OrderedDict()
if textframe_object.TextRange.Paragraphs().Count > 0:
# at least one paragraph
indent_level_range = range(0,6) #indent levels 0 to 5, whereas 0 is used as internal fallback format!
for par in textframe_object.TextRange.Paragraphs():
indent_level = par.ParagraphFormat.IndentLevel
if indent_level == 1 and par.ParagraphFormat.Bullet.Visible == 0:
indent_level = 0 #fallback indent level
if indent_level in indent_level_range:
indent_level_range.remove(indent_level)
indent_levels[str(indent_level)] = cls._get_indentlevel_formats(par, what)
if 0 in indent_level_range:
#fallback not yet defined
indent_levels["0"] = cls._get_indentlevel_formats(textframe_object.TextRange.Paragraphs(1,1), what)
else:
indent_levels["0"] = cls._get_indentlevel_formats(textframe_object.TextRange, what)
return indent_levels
@classmethod
def _get_indentlevel_formats(cls, textrange_object, what):
if what == "paragraph":
return cls._get_paragraphformat(textrange_object.ParagraphFormat)
else:
return cls._get_font(textrange_object.Font)
@classmethod
@textframe_group_check
def _set_indentlevels(cls, textframe_object, what, indentlevels_dict):
if cls.always_consider_indentlevels and textframe_object.TextRange.Paragraphs().Count > 0:
for par in textframe_object.TextRange.Paragraphs():
indent_level = str(par.ParagraphFormat.IndentLevel)
if indent_level not in indentlevels_dict or (indent_level == "1" and par.ParagraphFormat.Bullet.Visible == 0):
indent_level = "0"
cls._set_indentlevel_formats(par, what, indentlevels_dict[indent_level])
else:
cls._set_indentlevel_formats(textframe_object.TextRange, what, indentlevels_dict["0"])
@classmethod
def _set_indentlevel_formats(cls, textrange_object, what, what_dict):
if what == "paragraph":
cls._set_paragraphformat(textrange_object.ParagraphFormat, what_dict)
else:
cls._set_font(textrange_object.Font, what_dict)
@classmethod
def _get_type(cls, shape):
tmp = OrderedDict()
if shape.Connector == -1:
tmp['ConnectorFormat.Type'] = shape.ConnectorFormat.Type
else:
#for connectors, autoshapetype is -2 and throws error setting this value
tmp['AutoShapeType'] = shape.AutoShapeType
tmp['VerticalFlip'] = shape.VerticalFlip #method
tmp['HorizontalFlip'] = shape.HorizontalFlip #method
tmp['Adjustments'] = [
float(shape.adjustments.item[i])
for i in range(1,shape.adjustments.count+1)
]
return tmp
@classmethod
def _set_type(cls, shape, type_dict):
logging.debug("customformats: set type")
if shape.Connector == -1 and "ConnectorFormat.Type" in type_dict:
shape.ConnectorFormat.Type = type_dict["ConnectorFormat.Type"]
elif "AutoShapeType" in type_dict:
shape.AutoShapeType = type_dict["AutoShapeType"]
if shape.VerticalFlip != type_dict["VerticalFlip"]:
shape.Flip(1) #msoFlipVertical
if shape.HorizontalFlip != type_dict["HorizontalFlip"]:
shape.Flip(0) #msoFlipHorizontal
for i in range(1,shape.adjustments.count+1):
try:
shape.adjustments.item[i] = type_dict["Adjustments"][i-1]
except:
continue
@classmethod
def _get_fill(cls, fill_object):
tmp = OrderedDict()
if fill_object.Visible == -1:
tmp['Visible'] = -1
if fill_object.Type == pplib.MsoFillType['msoFillBackground']:
tmp['Background'] = True #method!
elif fill_object.Type == pplib.MsoFillType['msoFillPatterned']:
tmp['Pattern'] = fill_object.Pattern #read-only attribute!
cls._write_color_to_array(tmp, fill_object.ForeColor, 'ForeColor')
cls._write_color_to_array(tmp, fill_object.BackColor, 'BackColor')
elif fill_object.Type == pplib.MsoFillType['msoFillGradient']:
save_color_stops = True
if fill_object.GradientColorType == 1: #msoGradientOneColor
tmp['GradientOneColor'] = [
fill_object.GradientStyle,
fill_object.GradientVariant,
float(fill_object.GradientDegree),
]
elif fill_object.GradientColorType == 2: #msoGradientTwoColors
tmp['GradientTwoColor'] = [
fill_object.GradientStyle,
fill_object.GradientVariant,
]
elif fill_object.GradientColorType == 3: #msoGradientPresetColors
tmp['GradientPresetColor'] = [
fill_object.GradientStyle,
fill_object.GradientVariant,
fill_object.PresetGradientType,
]
save_color_stops = False #no need to save color stops for preset gradients
elif fill_object.GradientColorType == 4: #msoGradientMultiColor
tmp['GradientMultiColor'] = [
fill_object.GradientStyle,
fill_object.GradientVariant,
]
else:
raise ValueError('unkown gradient type')
#NOTE: If angle is changed (for linear gradients), style can be -2 and variant 0 which are invalid values! This is handled is the setter function.
if save_color_stops:
tmp['GradientStops'] = []
for stop in fill_object.GradientStops:
stop_dict = OrderedDict()
stop_dict["Position"] = float(stop.Position)
cls._write_color_to_array(stop_dict, stop.Color, 'Color')
stop_dict["Transparency"] = float(stop.Transparency) #IMPORTANT: Set Transparency after color, because color resets transparency
tmp['GradientStops'].append(stop_dict)
# (stop.color.rgb,
# float(stop.Position),
# float(stop.Transparency),
# i+1,
# float(stop.color.brightness))
# for i,stop in enumerate(fill_object.GradientStops)
tmp['RotateWithObject'] = fill_object.RotateWithObject
try:
#angle is only accessible for certain gradient types/styles/variants...
tmp['GradientAngle'] = float(fill_object.GradientAngle)
except:
pass
#elif fill_object.Type == pplib.MsoFillType['msoFillTextured']:
# Textures in VBA is broken, property PresetTexture always returns -2
else:
tmp['Solid'] = True #method!
cls._write_color_to_array(tmp, fill_object.ForeColor, 'ForeColor')
tmp['Transparency'] = float(fill_object.Transparency)
# tmp['Type'] = fill_object.Type #read-only attribute!
else:
tmp['Visible'] = 0
return tmp
@classmethod
def _set_fill(cls, fill_object, fill_dict):
logging.debug("customformats: set fill")
for key, value in fill_dict.items():
if key == "Pattern":
fill_object.Patterned(value)
elif key == "Background":
fill_object.Background()
elif key == "Solid":
fill_object.Solid()
elif key == "GradientOneColor":
# fill_object.OneColorGradient(*value) #style, variant, degree
fill_object.OneColorGradient(max(1,value[0]), max(1,value[1]), value[2]) #style, variant, degree
elif key == "GradientTwoColor":
fill_object.TwoColorGradient(max(1,value[0]), max(1,value[1])) #style, variant
elif key == "GradientPresetColor":
fill_object.PresetGradient(max(1,value[0]), max(1,value[1]), value[2]) #style, variant, preset-gradient-type
elif key == "GradientMultiColor":
fill_object.TwoColorGradient(max(1,value[0]), max(1,value[1])) #style, variant
elif key == "GradientStops":
cur_stops = fill_object.GradientStops.Count
for i in range(max(cur_stops, len(value))):
if i > len(value):
fill_object.GradientStops.Delete(i+1)
continue
elif i < cur_stops:
pass
# fill_object.GradientStops[i+1].color.rgb = value[i][0]
# fill_object.GradientStops[i+1].Position = value[i]["Position"]
# fill_object.GradientStops[i+1].Transparency = value[i]["Transparency"]
# fill_object.GradientStops[i+1].color.brightness = value[i][4]
else:
# fill_object.GradientStops.Insert2(*value[i])
fill_object.GradientStops.Insert(1, 1.0) #rgb, position
stop_object = fill_object.GradientStops[i+1]
for k, v in value[i].items():
# logging.debug("Setting %s = %s", k, v)
cls.mult_setattr(stop_object, k, v)
else:
cls.mult_setattr(fill_object, key, value)
@classmethod
def _get_line(cls, line_object):
tmp = OrderedDict()
if line_object.Visible == -1:
#NOTE: Line gradient not supported via VBA
tmp['Visible'] = -1
cls._write_color_to_array(tmp, line_object.ForeColor, 'ForeColor')
cls._write_color_to_array(tmp, line_object.BackColor, 'BackColor')
tmp['Style'] = line_object.Style
tmp['DashStyle'] = line_object.DashStyle
tmp['Weight'] = float(line_object.Weight)
tmp['Transparency'] = max(0, float(line_object.Transparency)) #NOTE: transparency can be -2.14748e+09 if line gradient is active
# tmp['InsetPen'] = line_object.InsetPen #NOTE: this property is not accessible via UI as it was default until PPT97
#the following properties are relevant for connectors and special shapes, e.g. freeform-line. other shapes will throw ValueError
tmp['BeginArrowheadLength'] = line_object.BeginArrowheadLength
tmp['BeginArrowheadStyle'] = line_object.BeginArrowheadStyle
tmp['BeginArrowheadWidth'] = line_object.BeginArrowheadWidth
tmp['EndArrowheadLength'] = line_object.EndArrowheadLength
tmp['EndArrowheadStyle'] = line_object.EndArrowheadStyle
tmp['EndArrowheadWidth'] = line_object.EndArrowheadWidth
else:
tmp['Visible'] = 0
return tmp
@classmethod
def _set_line(cls, line_object, line_dict):
logging.debug("customformats: set line")
for key, value in line_dict.items():
cls.mult_setattr(line_object, key, value)
@classmethod
def _get_shadow(cls, shadow_object):
tmp = OrderedDict()
if shadow_object.Visible == -1:
tmp['Visible'] = -1
if shadow_object.Type != -2: #msoShadowMixed
tmp['Type'] = shadow_object.Type
cls._write_color_to_array(tmp, shadow_object.ForeColor, 'ForeColor')
else:
tmp['Style'] = shadow_object.Style
cls._write_color_to_array(tmp, shadow_object.ForeColor, 'ForeColor')
tmp['Size'] = float(shadow_object.Size)
tmp['Blur'] = float(shadow_object.Blur)
tmp['OffsetX'] = float(shadow_object.OffsetX)
tmp['OffsetY'] = float(shadow_object.OffsetY)
tmp['Transparency'] = float(shadow_object.Transparency)
else:
tmp['Visible'] = 0
return tmp
@classmethod
def _set_shadow(cls, shadow_object, shadow_dict):
logging.debug("customformats: set shadow")
for key, value in shadow_dict.items():
cls.mult_setattr(shadow_object, key, value)
@classmethod
def _get_glow(cls, glow_object):
tmp = OrderedDict()
if glow_object.Radius > 0:
cls._write_color_to_array(tmp, glow_object.Color, 'Color')
tmp['Radius'] = float(glow_object.Radius)
tmp['Transparency'] = float(glow_object.Transparency)
else:
tmp['Radius'] = 0.0
return tmp
@classmethod
def _set_glow(cls, glow_object, glow_dict):
logging.debug("customformats: set glow")
for key, value in glow_dict.items():
cls.mult_setattr(glow_object, key, value)
@classmethod
def _get_softedge(cls, softedge_object):
tmp = OrderedDict()
if softedge_object.Radius > 0:
tmp['Type'] = softedge_object.Type
tmp['Radius'] = float(softedge_object.Radius)
else:
tmp['Radius'] = 0.0
return tmp
@classmethod
def _set_softedge(cls, softedge_object, softedge_dict):
logging.debug("customformats: set softedge")
for key, value in softedge_dict.items():
cls.mult_setattr(softedge_object, key, value)
@classmethod
def _get_reflection(cls, reflection_object):
tmp = OrderedDict()
if reflection_object.Type > 0:
tmp['Type'] = reflection_object.Type
tmp['Blur'] = float(reflection_object.Blur)
tmp['Offset'] = float(reflection_object.Offset)
tmp['Size'] = float(reflection_object.Size)
tmp['Transparency'] = float(reflection_object.Transparency)
else:
tmp['Type'] = 0
return tmp
@classmethod
def _set_reflection(cls, reflection_object, reflection_dict):
logging.debug("customformats: set reflection")
for key, value in reflection_dict.items():
cls.mult_setattr(reflection_object, key, value)
@classmethod
def _get_textframe(cls, textframe_object):
tmp = OrderedDict()
tmp['HorizontalAnchor'] = textframe_object.HorizontalAnchor
tmp['VerticalAnchor'] = textframe_object.VerticalAnchor
tmp['Orientation'] = textframe_object.Orientation
tmp['AutoSize'] = textframe_object.AutoSize
tmp['WordWrap'] = textframe_object.WordWrap
tmp['MarginBottom'] = float(textframe_object.MarginBottom)
tmp['MarginLeft'] = float(textframe_object.MarginLeft)
tmp['MarginRight'] = float(textframe_object.MarginRight)
tmp['MarginTop'] = float(textframe_object.MarginTop)
tmp['Column.Number'] = textframe_object.Column.Number
tmp['Column.Spacing'] = float(textframe_object.Column.Spacing)
return tmp
@classmethod
def _set_textframe(cls, textframe_object, textframe_dict):
logging.debug("customformats: set textframe")
for key, value in textframe_dict.items():
cls.mult_setattr(textframe_object, key, value)
@classmethod
def _get_font(cls, font_object):
tmp = OrderedDict()
# #Font color
# if font_object.Fill.Visible == -1:
# tmp['Fill.Visible'] = -1
# cls._write_color_to_array(tmp, font_object.Fill.ForeColor, 'Fill.ForeColor')
# else:
# tmp['Fill.Visible'] = 0
# #Font line color
# if font_object.Line.Visible == -1:
# tmp['Line.Visible'] = -1
# cls._write_color_to_array(tmp, font_object.Line.ForeColor, 'Line.ForeColor')
# else:
# tmp['Line.Visible'] = 0 #NOTE: this is not working in VBA
#Font setting
tmp['Name'] = font_object.Name
tmp['Size'] = float(font_object.Size)
tmp['Bold'] = font_object.Bold
tmp['Italic'] = font_object.Italic
tmp['UnderlineStyle'] = font_object.UnderlineStyle
if font_object.UnderlineColor.Type > 0:
cls._write_color_to_array(tmp, font_object.UnderlineColor, 'UnderlineColor')
tmp['Caps'] = font_object.Caps
tmp['Strike'] = font_object.Strike
tmp['Kerning'] = float(font_object.Kerning)
tmp['Spacing'] = float(font_object.Spacing)
#Fill, line and all effects objects
tmp['Fill'] = cls._get_fill(font_object.Fill)
tmp['Line'] = cls._get_line(font_object.Line)
tmp['Glow'] = cls._get_glow(font_object.Glow)
tmp['Reflection'] = cls._get_reflection(font_object.Reflection)
tmp['Shadow'] = cls._get_shadow(font_object.Shadow)
#NOTE: Highlight property is not accessible via UI and cannot be disabled via VBA, so we don't use it
return tmp
@classmethod
def _set_font(cls, font_object, font_dict):
logging.debug("customformats: set font")
for key, value in font_dict.items():
if key == "Fill":
try:
cls._set_fill(font_object.Fill, value)
except:
logging.error("customformats: error in setting font fill")
elif key == "Line":
try:
cls._set_line(font_object.Line, value)
except:
logging.error("customformats: error in setting font line")
elif key == "Shadow":
try:
cls._set_shadow(font_object.Shadow, value)
except:
logging.error("customformats: error in setting font shadow")
elif key == "Glow":
try:
cls._set_glow(font_object.Glow, value)
except:
logging.error("customformats: error in setting font glow")
elif key == "Reflection":
try:
cls._set_reflection(font_object.Reflection, value)
except:
logging.error("customformats: error in setting font reflection")
else:
cls.mult_setattr(font_object, key, value)
@classmethod
def _get_paragraphformat(cls, parfor_object):
tmp = OrderedDict()
tmp['Alignment'] = parfor_object.Alignment
tmp['BaselineAlignment'] = parfor_object.BaselineAlignment
tmp['LineRuleAfter'] = parfor_object.LineRuleAfter
tmp['SpaceAfter'] = float(parfor_object.SpaceAfter)
tmp['LineRuleBefore'] = parfor_object.LineRuleBefore
tmp['SpaceBefore'] = float(parfor_object.SpaceBefore)
tmp['LineRuleWithin'] = parfor_object.LineRuleWithin
tmp['SpaceWithin'] = float(parfor_object.SpaceWithin)
#Bullet points
if parfor_object.Bullet.Visible == -1:
tmp['Bullet.Visible'] = -1
btype = parfor_object.Bullet.Type
tmp['Bullet.Type'] = btype
if btype == 1: #ppBulletUnnumbered
tmp['Bullet.Character'] = parfor_object.Bullet.Character
elif btype == 2: #ppBulletNumbered
tmp['Bullet.Style'] = parfor_object.Bullet.Style
tmp['Bullet.StartValue'] = parfor_object.Bullet.StartValue
tmp['Bullet.RelativeSize'] = float(parfor_object.Bullet.RelativeSize)
if parfor_object.Bullet.UseTextFont == -1:
tmp['Bullet.UseTextFont'] = -1
else:
tmp['Bullet.Font.Name'] = parfor_object.Bullet.Font.Name
if parfor_object.Bullet.UseTextColor == -1:
tmp['Bullet.UseTextColor'] = -1
else:
cls._write_color_to_array(tmp, parfor_object.Bullet.Font.Fill.ForeColor, 'Bullet.Font.Fill.ForeColor')
else:
tmp['Bullet.Type'] = 0
tmp['Bullet.Visible'] = 0
tmp['FirstLineIndent'] = float(parfor_object.FirstLineIndent)
tmp['LeftIndent'] = float(parfor_object.LeftIndent)
tmp['RightIndent'] = float(parfor_object.RightIndent)
tmp['HangingPunctuation'] = parfor_object.HangingPunctuation
tmp['TabStops.DefaultSpacing'] = float(parfor_object.TabStops.DefaultSpacing)
tmp['TabStops.Items'] = [(ts.type, float(ts.position)) for ts in parfor_object.TabStops]
return tmp
@classmethod
def _set_paragraphformat(cls, parfor_object, parfor_dict):
logging.debug("customformats: set parformat")
for key, value in parfor_dict.items():
if key == 'TabStops.Items':
#remove all tabstops
for t in list(iter(parfor_object.TabStops)):
t.Clear()
#add tabstops
for ts_type, ts_pos in value:
parfor_object.TabStops.Add(ts_type, ts_pos)
else:
cls.mult_setattr(parfor_object, key, value)
@classmethod
def _get_size(cls, shape):
tmp = OrderedDict()
tmp['Width'] = float(shape.Width)
tmp['Height'] = float(shape.Height)
tmp['LockAspectRatio'] = shape.LockAspectRatio
return tmp
@classmethod
def _set_size(cls, shape, size_dict):
logging.debug("customformats: set size")
shape.LockAspectRatio = 0
shape.Width = size_dict["Width"]
shape.Height = size_dict["Height"]
shape.LockAspectRatio = size_dict["LockAspectRatio"]
@classmethod
def _get_position(cls, shape):
tmp = OrderedDict()
tmp['Left'] = float(shape.Left)
tmp['Top'] = float(shape.Top)
tmp['Rotation'] = float(shape.Rotation)
return tmp
@classmethod
def _set_position(cls, shape, position_dict):
logging.debug("customformats: set position")
shape.Left = position_dict["Left"]
shape.Top = position_dict["Top"]
shape.Rotation = position_dict["Rotation"] | 47.184448 | 162 | 0.573181 |
ace60f702881b1951d9820ac5cdbd359208cdf75 | 416 | py | Python | src/dispatch/plugins/bases/conference.py | axellaurelut/dispatch | 338482d59846dda9aff14e761045b374725ab1bd | [
"Apache-2.0"
] | 3,417 | 2020-02-23T22:54:47.000Z | 2022-03-31T13:01:01.000Z | src/dispatch/plugins/bases/conference.py | axellaurelut/dispatch | 338482d59846dda9aff14e761045b374725ab1bd | [
"Apache-2.0"
] | 607 | 2020-02-24T14:27:02.000Z | 2022-03-30T19:15:39.000Z | src/dispatch/plugins/bases/conference.py | axellaurelut/dispatch | 338482d59846dda9aff14e761045b374725ab1bd | [
"Apache-2.0"
] | 359 | 2020-02-24T19:04:43.000Z | 2022-03-29T06:48:12.000Z | """
.. module: dispatch.plugins.bases.conference
:platform: Unix
:copyright: (c) 2019 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>
"""
from dispatch.plugins.base import Plugin
class ConferencePlugin(Plugin):
type = "conference"
def create(self, items, **kwargs):
raise NotImplementedError
| 26 | 62 | 0.706731 |
ace60f7195b3ae822d7c61c61e03fb37dbbc211d | 3,240 | py | Python | app/recipe/tests/tests_tags_api.py | sleonvaz/recipe-app-api | 673953dad7a15274de2f1fd9101bbc3d110dbf3b | [
"MIT"
] | null | null | null | app/recipe/tests/tests_tags_api.py | sleonvaz/recipe-app-api | 673953dad7a15274de2f1fd9101bbc3d110dbf3b | [
"MIT"
] | null | null | null | app/recipe/tests/tests_tags_api.py | sleonvaz/recipe-app-api | 673953dad7a15274de2f1fd9101bbc3d110dbf3b | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag, Recipe
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
"""Test the public available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required for retrieving tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTest(TestCase):
"""Test the authirized user tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
'test@gmail.com',
'123456'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Test retrieve tags"""
Tag.objects.create(user=self.user, name="Vegan")
Tag.objects.create(user=self.user, name="Dessert")
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""Test that tag returned are for the authenticate user"""
self.user2 = get_user_model().objects.create_user(
'other@gmail.com',
'123456'
)
Tag.objects.create(user=self.user2, name="Fruity")
tag = Tag.objects.create(user=self.user, name="Comfort Food")
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tag_successful(self):
"""Test creating a new tag"""
payload = {'name': 'Test tag'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_tag_invalid(self):
"""Test creating a new tag with invalid payload"""
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_assigned_to_recipes(self):
"""Test filtering tags by those assigned to recipes"""
tag1 = Tag.objects.create(user=self.user, name="Breakfast")
tag2 = Tag.objects.create(user=self.user, name="Lunch")
recipe = Recipe.objects.create(
title='Coriander eggs',
time_minutes= 30,
price=5.00,
user=self.user
)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
| 30.857143 | 71 | 0.644753 |
ace60f72670551b9aae0872e9f371f522dabb8ae | 13,181 | py | Python | google/cloud/retail_v2/services/completion_service/async_client.py | renovate-bot/python-retail | da0e20f4b683b550c9964afb9df97a3d82892128 | [
"Apache-2.0"
] | null | null | null | google/cloud/retail_v2/services/completion_service/async_client.py | renovate-bot/python-retail | da0e20f4b683b550c9964afb9df97a3d82892128 | [
"Apache-2.0"
] | null | null | null | google/cloud/retail_v2/services/completion_service/async_client.py | renovate-bot/python-retail | da0e20f4b683b550c9964afb9df97a3d82892128 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.retail_v2.types import completion_service
from google.cloud.retail_v2.types import import_config
from .transports.base import CompletionServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import CompletionServiceGrpcAsyncIOTransport
from .client import CompletionServiceClient
class CompletionServiceAsyncClient:
"""Auto-completion service for retail.
This feature is only available for users who have Retail Search
enabled. Please submit a form
`here <https://cloud.google.com/contact>`__ to contact cloud sales
if you are interested in using Retail Search.
"""
_client: CompletionServiceClient
DEFAULT_ENDPOINT = CompletionServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = CompletionServiceClient.DEFAULT_MTLS_ENDPOINT
catalog_path = staticmethod(CompletionServiceClient.catalog_path)
parse_catalog_path = staticmethod(CompletionServiceClient.parse_catalog_path)
common_billing_account_path = staticmethod(
CompletionServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
CompletionServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(CompletionServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
CompletionServiceClient.parse_common_folder_path
)
common_organization_path = staticmethod(
CompletionServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
CompletionServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(CompletionServiceClient.common_project_path)
parse_common_project_path = staticmethod(
CompletionServiceClient.parse_common_project_path
)
common_location_path = staticmethod(CompletionServiceClient.common_location_path)
parse_common_location_path = staticmethod(
CompletionServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CompletionServiceAsyncClient: The constructed client.
"""
return CompletionServiceClient.from_service_account_info.__func__(CompletionServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CompletionServiceAsyncClient: The constructed client.
"""
return CompletionServiceClient.from_service_account_file.__func__(CompletionServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> CompletionServiceTransport:
"""Returns the transport used by the client instance.
Returns:
CompletionServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(CompletionServiceClient).get_transport_class, type(CompletionServiceClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, CompletionServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the completion service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.CompletionServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = CompletionServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def complete_query(
self,
request: Union[completion_service.CompleteQueryRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> completion_service.CompleteQueryResponse:
r"""Completes the specified prefix with keyword suggestions.
This feature is only available for users who have Retail Search
enabled. Please submit a form
`here <https://cloud.google.com/contact>`__ to contact cloud
sales if you are interested in using Retail Search.
Args:
request (Union[google.cloud.retail_v2.types.CompleteQueryRequest, dict]):
The request object. Auto-complete parameters.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.retail_v2.types.CompleteQueryResponse:
Response of the auto-complete query.
"""
# Create or coerce a protobuf request object.
request = completion_service.CompleteQueryRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.complete_query,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("catalog", request.catalog),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def import_completion_data(
self,
request: Union[import_config.ImportCompletionDataRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Bulk import of processed completion dataset.
Request processing may be synchronous. Partial updating is not
supported.
This feature is only available for users who have Retail Search
enabled. Please submit a form
`here <https://cloud.google.com/contact>`__ to contact cloud
sales if you are interested in using Retail Search.
Args:
request (Union[google.cloud.retail_v2.types.ImportCompletionDataRequest, dict]):
The request object. Request message for
ImportCompletionData methods.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.retail_v2.types.ImportCompletionDataResponse` Response of the
[ImportCompletionDataRequest][google.cloud.retail.v2.ImportCompletionDataRequest].
If the long running operation is done, this message
is returned by the
google.longrunning.Operations.response field if the
operation is successful.
"""
# Create or coerce a protobuf request object.
request = import_config.ImportCompletionDataRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.import_completion_data,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
import_config.ImportCompletionDataResponse,
metadata_type=import_config.ImportMetadata,
)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-retail",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("CompletionServiceAsyncClient",)
| 41.844444 | 146 | 0.687125 |
ace610a9e4bedf4818c7b820178efb23cd6be7ab | 495 | py | Python | demo/zip_two_files.py | neerajchhimwal/gradio | 97a2cb6cbd6babfb08bd31d79784a15df91fc324 | [
"Apache-2.0"
] | 5,481 | 2019-05-27T06:18:02.000Z | 2022-03-31T20:33:43.000Z | demo/zip_two_files.py | neerajchhimwal/gradio | 97a2cb6cbd6babfb08bd31d79784a15df91fc324 | [
"Apache-2.0"
] | 652 | 2019-06-18T20:16:03.000Z | 2022-03-31T19:36:16.000Z | demo/zip_two_files.py | neerajchhimwal/gradio | 97a2cb6cbd6babfb08bd31d79784a15df91fc324 | [
"Apache-2.0"
] | 366 | 2019-07-03T00:32:02.000Z | 2022-03-31T11:32:01.000Z | import gradio as gr
from zipfile import ZipFile
def zip_two_files(file1, file2):
with ZipFile('tmp.zip', 'w') as zipObj:
zipObj.write(file1.name, "file1")
zipObj.write(file2.name, "file2")
return "tmp.zip"
iface = gr.Interface(
zip_two_files,
["file", "file"],
"file",
examples=[
["images/1.jpg", "images/2.jpg"],
["files/titanic.csv", "audio/cantina.wav"],
]
)
iface.test_launch()
if __name__ == "__main__":
iface.launch()
| 19.8 | 51 | 0.60202 |
ace610e01df5da43145f91f7a5d52cc4207150ca | 2,561 | py | Python | pybamboo/tests/test_base.py | makinacorpus/pybamboo | 8e176e51413d44546e8ca7e933ac1dde8451a1ec | [
"BSD-3-Clause"
] | null | null | null | pybamboo/tests/test_base.py | makinacorpus/pybamboo | 8e176e51413d44546e8ca7e933ac1dde8451a1ec | [
"BSD-3-Clause"
] | null | null | null | pybamboo/tests/test_base.py | makinacorpus/pybamboo | 8e176e51413d44546e8ca7e933ac1dde8451a1ec | [
"BSD-3-Clause"
] | null | null | null | import os
import time
import unittest
from pybamboo.connection import Connection, DEFAULT_BAMBOO_URL
class TestBase(unittest.TestCase):
class MockResponse(object):
pass
CSV_FILE = os.getcwd() + '/tests/fixtures/good_eats.csv'
JSON_FILE = os.getcwd() + '/tests/fixtures/good_eats.json'
SCHEMA_FILE = os.getcwd() + '/tests/fixtures/good_eats.schema.json'
AUX_CSV_FILE = os.getcwd() + '/tests/fixtures/good_eats_aux.csv'
NUM_COLS = 15
NUM_ROWS = 19
# use this to forward tests to a different bamboo instance
TEST_BAMBOO_URL = os.environ.get('TEST_BAMBOO_URL', DEFAULT_BAMBOO_URL)
# change this only to test while offline
DEFAULT_BAMBOO_URL = os.environ.get('DEFAULT_BAMBOO_URL',
DEFAULT_BAMBOO_URL)
VERSION_KEYS = [
'version',
'description',
'branch',
'commit',
'version_major',
'version_minor'
]
def setUp(self):
self.bamboo_url = self.TEST_BAMBOO_URL
self.connection = Connection(self.bamboo_url)
self.default_connection = Connection(DEFAULT_BAMBOO_URL)
# these two datasets (if created) will automatically
# get deleted by the test harness
# NOTE: do not reuse these names for tests, they
# should only be created through the helper functions
self.dataset = None
self.aux_dataset = None
# add any additional datasets should be added
# to this list and they will be deleted as well
self.datasets_to_delete = []
def tearDown(self):
self._delete_datasets()
def _cleanup(self, dataset):
self.wait(3) # give some time
self.datasets_to_delete.append(dataset)
def _delete_dataset(self, dataset):
if hasattr(dataset, 'has_aggs_to_remove')\
and dataset.has_aggs_to_remove:
self.wait(3) # wait for them to finish
aggs = dataset.get_aggregate_datasets()
for group, agg in aggs.iteritems():
agg.delete()
dataset.delete()
def _delete_datasets(self):
if self.dataset:
self._delete_dataset(self.dataset)
if self.aux_dataset:
self._delete_dataset(self.aux_dataset)
for dataset in self.datasets_to_delete:
self._delete_dataset(dataset)
def assert_keys_in_dict(self, keys, d):
d_keys = d.keys()
for k in keys:
self.assertTrue(k in d_keys)
def wait(self, seconds=5):
time.sleep(seconds)
| 31.617284 | 75 | 0.636861 |
ace610f8d7b98750f00dbdb49d96977180fc6dd7 | 935 | py | Python | app/helpers.py | teamgivn/givnapp | 5daf19ca24a794eb92e5b7b5dcb0397b5d0429e1 | [
"MIT"
] | 1 | 2015-04-21T09:07:11.000Z | 2015-04-21T09:07:11.000Z | app/helpers.py | teamgivn/givnapp | 5daf19ca24a794eb92e5b7b5dcb0397b5d0429e1 | [
"MIT"
] | null | null | null | app/helpers.py | teamgivn/givnapp | 5daf19ca24a794eb92e5b7b5dcb0397b5d0429e1 | [
"MIT"
] | null | null | null | from os import urandom, path
from datetime import datetime
from functools import wraps
import json
from flask import session, g, redirect, current_app, jsonify
from flask.ext.login import login_required as flask_ext_login_required
from flask.ext.login import current_user as flask_ext_current_user
from flask.ext.mail import Message
from app import mail, db
login_required = flask_ext_login_required
current_user = flask_ext_current_user
def send_email(recipients, subject, body):
"""
Send the awaiting for confirmation mail to the user.
"""
msg = Message(subject=subject, sender=current_app.config['MAIL_SENDER'], recipients=recipients)
msg.body = body
# msg.html = '<b>HTML</b> body'
with current_app.app_context():
mail.send(msg)
return True
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in current_app.config['ALLOWED_EXTENSIONS'] | 33.392857 | 99 | 0.747594 |
ace6114a7de83b9c01165c708c5d7dfb2a8013ff | 2,926 | py | Python | curses/menu_exp.py | Wrench56/Chat | e0812732fe86c6842118d0f6eafc545caa444d8b | [
"MIT"
] | null | null | null | curses/menu_exp.py | Wrench56/Chat | e0812732fe86c6842118d0f6eafc545caa444d8b | [
"MIT"
] | null | null | null | curses/menu_exp.py | Wrench56/Chat | e0812732fe86c6842118d0f6eafc545caa444d8b | [
"MIT"
] | null | null | null | #menu
import sys,os
import curses
def draw_menu(stdscr):
k = 0
cursor_x = 0
cursor_y = 0
# Clear and refresh the screen for a blank canvas
stdscr.clear()
stdscr.refresh()
# Start colors in curses
curses.start_color()
curses.init_pair(1, curses.COLOR_CYAN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_WHITE)
# Loop where k is the last character pressed
while (k != ord('q')):
# Initialization
stdscr.clear()
height, width = stdscr.getmaxyx()
if k == curses.KEY_DOWN:
cursor_y = cursor_y + 1
elif k == curses.KEY_UP:
cursor_y = cursor_y - 1
elif k == curses.KEY_RIGHT:
cursor_x = cursor_x + 1
elif k == curses.KEY_LEFT:
cursor_x = cursor_x - 1
cursor_x = max(0, cursor_x)
cursor_x = min(width-1, cursor_x)
cursor_y = max(0, cursor_y)
cursor_y = min(height-1, cursor_y)
# Declaration of strings
title = "Curses example"[:width-1]
subtitle = "Written by Clay McLeod"[:width-1]
keystr = "Last key pressed: {}".format(k)[:width-1]
statusbarstr = "Press 'q' to exit | STATUS BAR | Pos: {}, {}".format(cursor_x, cursor_y)
if k == 0:
keystr = "No key press detected..."[:width-1]
# Centering calculations
start_x_title = int((width // 2) - (len(title) // 2) - len(title) % 2)
start_x_subtitle = int((width // 2) - (len(subtitle) // 2) - len(subtitle) % 2)
start_x_keystr = int((width // 2) - (len(keystr) // 2) - len(keystr) % 2)
start_y = int((height // 2) - 2)
# Rendering some text
whstr = "Width: {}, Height: {}".format(width, height)
stdscr.addstr(0, 0, whstr, curses.color_pair(1))
# Render status bar
stdscr.attron(curses.color_pair(3))
stdscr.addstr(height-1, 0, statusbarstr)
stdscr.addstr(height-1, len(statusbarstr), " " * (width - len(statusbarstr) - 1))
stdscr.attroff(curses.color_pair(3))
# Turning on attributes for title
stdscr.attron(curses.color_pair(2))
stdscr.attron(curses.A_BOLD)
# Rendering title
stdscr.addstr(start_y, start_x_title, title)
# Turning off attributes for title
stdscr.attroff(curses.color_pair(2))
stdscr.attroff(curses.A_BOLD)
# Print rest of text
stdscr.addstr(start_y + 1, start_x_subtitle, subtitle)
stdscr.addstr(start_y + 3, (width // 2) - 2, '-' * 4)
stdscr.addstr(start_y + 5, start_x_keystr, keystr)
stdscr.move(cursor_y, cursor_x)
# Refresh the screen
stdscr.refresh()
# Wait for next input
k = stdscr.getch()
def main():
curses.wrapper(draw_menu)
if __name__ == "__main__":
main() | 31.462366 | 96 | 0.592276 |
ace611a751735ea9fb1fea8ae0e540d58f575624 | 409 | py | Python | src/blogProject_main/wsgi.py | SleepNoMore/django_simple_blog_site | b44c54123f661db1c00c188d2b39246db45bce0b | [
"MIT"
] | null | null | null | src/blogProject_main/wsgi.py | SleepNoMore/django_simple_blog_site | b44c54123f661db1c00c188d2b39246db45bce0b | [
"MIT"
] | null | null | null | src/blogProject_main/wsgi.py | SleepNoMore/django_simple_blog_site | b44c54123f661db1c00c188d2b39246db45bce0b | [
"MIT"
] | null | null | null | """
WSGI config for blogProject_main project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'blogProject_main.settings')
application = get_wsgi_application()
| 24.058824 | 78 | 0.794621 |
ace611bc92c9ca89b58863cd1cac0b2b92afd586 | 17,875 | py | Python | yt_dlp/extractor/rcs.py | mrBliss/yt-dlp | aecd021656b672dbb617e5bae54a8986f9c4ebaf | [
"Unlicense"
] | 80 | 2021-05-25T11:33:49.000Z | 2022-03-29T20:36:53.000Z | yt_dlp/extractor/rcs.py | mrBliss/yt-dlp | aecd021656b672dbb617e5bae54a8986f9c4ebaf | [
"Unlicense"
] | 53 | 2017-04-12T19:53:18.000Z | 2022-02-22T10:33:13.000Z | yt_dlp/extractor/rcs.py | mrBliss/yt-dlp | aecd021656b672dbb617e5bae54a8986f9c4ebaf | [
"Unlicense"
] | 22 | 2021-05-07T05:01:27.000Z | 2022-03-26T19:10:54.000Z | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
clean_html,
ExtractorError,
js_to_json,
base_url,
url_basename,
urljoin,
)
class RCSBaseIE(InfoExtractor):
# based on VideoPlayerLoader.prototype.getVideoSrc
# and VideoPlayerLoader.prototype.transformSrc from
# https://js2.corriereobjects.it/includes2013/LIBS/js/corriere_video.sjs
_ALL_REPLACE = {
'media2vam.corriere.it.edgesuite.net':
'media2vam-corriere-it.akamaized.net',
'media.youreporter.it.edgesuite.net':
'media-youreporter-it.akamaized.net',
'corrierepmd.corriere.it.edgesuite.net':
'corrierepmd-corriere-it.akamaized.net',
'media2vam-corriere-it.akamaized.net/fcs.quotidiani/vr/videos/':
'video.corriere.it/vr360/videos/',
'.net//': '.net/',
}
_MP4_REPLACE = {
'media2vam.corbologna.corriere.it.edgesuite.net':
'media2vam-bologna-corriere-it.akamaized.net',
'media2vam.corfiorentino.corriere.it.edgesuite.net':
'media2vam-fiorentino-corriere-it.akamaized.net',
'media2vam.cormezzogiorno.corriere.it.edgesuite.net':
'media2vam-mezzogiorno-corriere-it.akamaized.net',
'media2vam.corveneto.corriere.it.edgesuite.net':
'media2vam-veneto-corriere-it.akamaized.net',
'media2.oggi.it.edgesuite.net':
'media2-oggi-it.akamaized.net',
'media2.quimamme.it.edgesuite.net':
'media2-quimamme-it.akamaized.net',
'media2.amica.it.edgesuite.net':
'media2-amica-it.akamaized.net',
'media2.living.corriere.it.edgesuite.net':
'media2-living-corriere-it.akamaized.net',
'media2.style.corriere.it.edgesuite.net':
'media2-style-corriere-it.akamaized.net',
'media2.iodonna.it.edgesuite.net':
'media2-iodonna-it.akamaized.net',
'media2.leitv.it.edgesuite.net':
'media2-leitv-it.akamaized.net',
}
_MIGRATION_MAP = {
'videoamica-vh.akamaihd': 'amica',
'media2-amica-it.akamaized': 'amica',
'corrierevam-vh.akamaihd': 'corriere',
'media2vam-corriere-it.akamaized': 'corriere',
'cormezzogiorno-vh.akamaihd': 'corrieredelmezzogiorno',
'media2vam-mezzogiorno-corriere-it.akamaized': 'corrieredelmezzogiorno',
'corveneto-vh.akamaihd': 'corrieredelveneto',
'media2vam-veneto-corriere-it.akamaized': 'corrieredelveneto',
'corbologna-vh.akamaihd': 'corrieredibologna',
'media2vam-bologna-corriere-it.akamaized': 'corrieredibologna',
'corfiorentino-vh.akamaihd': 'corrierefiorentino',
'media2vam-fiorentino-corriere-it.akamaized': 'corrierefiorentino',
'corinnovazione-vh.akamaihd': 'corriereinnovazione',
'media2-gazzanet-gazzetta-it.akamaized': 'gazzanet',
'videogazzanet-vh.akamaihd': 'gazzanet',
'videogazzaworld-vh.akamaihd': 'gazzaworld',
'gazzettavam-vh.akamaihd': 'gazzetta',
'media2vam-gazzetta-it.akamaized': 'gazzetta',
'videoiodonna-vh.akamaihd': 'iodonna',
'media2-leitv-it.akamaized': 'leitv',
'videoleitv-vh.akamaihd': 'leitv',
'videoliving-vh.akamaihd': 'living',
'media2-living-corriere-it.akamaized': 'living',
'media2-oggi-it.akamaized': 'oggi',
'videooggi-vh.akamaihd': 'oggi',
'media2-quimamme-it.akamaized': 'quimamme',
'quimamme-vh.akamaihd': 'quimamme',
'videorunning-vh.akamaihd': 'running',
'media2-style-corriere-it.akamaized': 'style',
'style-vh.akamaihd': 'style',
'videostyle-vh.akamaihd': 'style',
'media2-stylepiccoli-it.akamaized': 'stylepiccoli',
'stylepiccoli-vh.akamaihd': 'stylepiccoli',
'doveviaggi-vh.akamaihd': 'viaggi',
'media2-doveviaggi-it.akamaized': 'viaggi',
'media2-vivimilano-corriere-it.akamaized': 'vivimilano',
'vivimilano-vh.akamaihd': 'vivimilano',
'media2-youreporter-it.akamaized': 'youreporter'
}
_MIGRATION_MEDIA = {
'advrcs-vh.akamaihd': '',
'corriere-f.akamaihd': '',
'corrierepmd-corriere-it.akamaized': '',
'corrprotetto-vh.akamaihd': '',
'gazzetta-f.akamaihd': '',
'gazzettapmd-gazzetta-it.akamaized': '',
'gazzprotetto-vh.akamaihd': '',
'periodici-f.akamaihd': '',
'periodicisecure-vh.akamaihd': '',
'videocoracademy-vh.akamaihd': ''
}
def _get_video_src(self, video):
mediaFiles = video.get('mediaProfile').get('mediaFile')
src = {}
# audio
if video.get('mediaType') == 'AUDIO':
for aud in mediaFiles:
# todo: check
src['mp3'] = aud.get('value')
# video
else:
for vid in mediaFiles:
if vid.get('mimeType') == 'application/vnd.apple.mpegurl':
src['m3u8'] = vid.get('value')
if vid.get('mimeType') == 'video/mp4':
src['mp4'] = vid.get('value')
# replace host
for t in src:
for s, r in self._ALL_REPLACE.items():
src[t] = src[t].replace(s, r)
for s, r in self._MP4_REPLACE.items():
src[t] = src[t].replace(s, r)
# switch cdn
if 'mp4' in src and 'm3u8' in src:
if ('-lh.akamaihd' not in src.get('m3u8')
and 'akamai' in src.get('mp4')):
if 'm3u8' in src:
matches = re.search(r'(?:https*:)?\/\/(?P<host>.*)\.net\/i(?P<path>.*)$', src.get('m3u8'))
src['m3u8'] = 'https://vod.rcsobjects.it/hls/%s%s' % (
self._MIGRATION_MAP[matches.group('host')],
matches.group('path').replace(
'///', '/').replace(
'//', '/').replace(
'.csmil', '.urlset'
)
)
if 'mp4' in src:
matches = re.search(r'(?:https*:)?\/\/(?P<host>.*)\.net\/i(?P<path>.*)$', src.get('mp4'))
if matches:
if matches.group('host') in self._MIGRATION_MEDIA:
vh_stream = 'https://media2.corriereobjects.it'
if src.get('mp4').find('fcs.quotidiani_!'):
vh_stream = 'https://media2-it.corriereobjects.it'
src['mp4'] = '%s%s' % (
vh_stream,
matches.group('path').replace(
'///', '/').replace(
'//', '/').replace(
'/fcs.quotidiani/mediacenter', '').replace(
'/fcs.quotidiani_!/mediacenter', '').replace(
'corriere/content/mediacenter/', '').replace(
'gazzetta/content/mediacenter/', '')
)
else:
src['mp4'] = 'https://vod.rcsobjects.it/%s%s' % (
self._MIGRATION_MAP[matches.group('host')],
matches.group('path').replace('///', '/').replace('//', '/')
)
if 'mp3' in src:
src['mp3'] = src.get('mp3').replace(
'media2vam-corriere-it.akamaized.net',
'vod.rcsobjects.it/corriere')
if 'mp4' in src:
if src.get('mp4').find('fcs.quotidiani_!'):
src['mp4'] = src.get('mp4').replace('vod.rcsobjects', 'vod-it.rcsobjects')
if 'm3u8' in src:
if src.get('m3u8').find('fcs.quotidiani_!'):
src['m3u8'] = src.get('m3u8').replace('vod.rcsobjects', 'vod-it.rcsobjects')
if 'geoblocking' in video.get('mediaProfile'):
if 'm3u8' in src:
src['m3u8'] = src.get('m3u8').replace('vod.rcsobjects', 'vod-it.rcsobjects')
if 'mp4' in src:
src['mp4'] = src.get('mp4').replace('vod.rcsobjects', 'vod-it.rcsobjects')
if 'm3u8' in src:
if src.get('m3u8').find('csmil') and src.get('m3u8').find('vod'):
src['m3u8'] = src.get('m3u8').replace('.csmil', '.urlset')
return src
def _create_formats(self, urls, video_id):
formats = []
formats = self._extract_m3u8_formats(
urls.get('m3u8'), video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False)
if urls.get('mp4'):
formats.append({
'format_id': 'http-mp4',
'url': urls['mp4']
})
self._sort_formats(formats)
return formats
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id = mobj.group('id')
if 'cdn' not in mobj.groupdict():
raise ExtractorError('CDN not found in url: %s' % url)
# for leitv/youreporter/viaggi don't use the embed page
if ((mobj.group('cdn') not in ['leitv.it', 'youreporter.it'])
and (mobj.group('vid') == 'video')):
url = 'https://video.%s/video-embed/%s' % (mobj.group('cdn'), video_id)
page = self._download_webpage(url, video_id)
video_data = None
# look for json video data url
json = self._search_regex(
r'''(?x)url\s*=\s*(["'])
(?P<url>
(?:https?:)?//video\.rcs\.it
/fragment-includes/video-includes/.+?\.json
)\1;''',
page, video_id, group='url', default=None)
if json:
if json.startswith('//'):
json = 'https:%s' % json
video_data = self._download_json(json, video_id)
# if json url not found, look for json video data directly in the page
else:
# RCS normal pages and most of the embeds
json = self._search_regex(
r'[\s;]video\s*=\s*({[\s\S]+?})(?:;|,playlist=)',
page, video_id, default=None)
if not json and 'video-embed' in url:
page = self._download_webpage(url.replace('video-embed', 'video-json'), video_id)
json = self._search_regex(
r'##start-video##({[\s\S]+?})##end-video##',
page, video_id, default=None)
if not json:
# if no video data found try search for iframes
emb = RCSEmbedsIE._extract_url(page)
if emb:
return {
'_type': 'url_transparent',
'url': emb,
'ie_key': RCSEmbedsIE.ie_key()
}
if json:
video_data = self._parse_json(
json, video_id, transform_source=js_to_json)
if not video_data:
raise ExtractorError('Video data not found in the page')
formats = self._create_formats(
self._get_video_src(video_data), video_id)
description = (video_data.get('description')
or clean_html(video_data.get('htmlDescription'))
or self._html_search_meta('description', page))
uploader = video_data.get('provider') or mobj.group('cdn')
return {
'id': video_id,
'title': video_data.get('title'),
'description': description,
'uploader': uploader,
'formats': formats
}
class RCSEmbedsIE(RCSBaseIE):
_VALID_URL = r'''(?x)
https?://(?P<vid>video)\.
(?P<cdn>
(?:
rcs|
(?:corriere\w+\.)?corriere|
(?:gazzanet\.)?gazzetta
)\.it)
/video-embed/(?P<id>[^/=&\?]+?)(?:$|\?)'''
_TESTS = [{
'url': 'https://video.rcs.it/video-embed/iodonna-0001585037',
'md5': '623ecc8ffe7299b2d0c1046d8331a9df',
'info_dict': {
'id': 'iodonna-0001585037',
'ext': 'mp4',
'title': 'Sky Arte racconta Madonna nella serie "Artist to icon"',
'description': 'md5:65b09633df9ffee57f48b39e34c9e067',
'uploader': 'rcs.it',
}
}, {
# redownload the page changing 'video-embed' in 'video-json'
'url': 'https://video.gazzanet.gazzetta.it/video-embed/gazzanet-mo05-0000260789',
'md5': 'a043e3fecbe4d9ed7fc5d888652a5440',
'info_dict': {
'id': 'gazzanet-mo05-0000260789',
'ext': 'mp4',
'title': 'Valentino Rossi e papà Graziano si divertono col drifting',
'description': 'md5:a8bf90d6adafd9815f70fc74c0fc370a',
'uploader': 'rcd',
}
}, {
'url': 'https://video.corriere.it/video-embed/b727632a-f9d0-11ea-91b0-38d50a849abb?player',
'match_only': True
}, {
'url': 'https://video.gazzetta.it/video-embed/49612410-00ca-11eb-bcd8-30d4253e0140',
'match_only': True
}]
@staticmethod
def _sanitize_urls(urls):
# add protocol if missing
for i, e in enumerate(urls):
if e.startswith('//'):
urls[i] = 'https:%s' % e
# clean iframes urls
for i, e in enumerate(urls):
urls[i] = urljoin(base_url(e), url_basename(e))
return urls
@staticmethod
def _extract_urls(webpage):
entries = [
mobj.group('url')
for mobj in re.finditer(r'''(?x)
(?:
data-frame-src=|
<iframe[^\n]+src=
)
(["'])
(?P<url>(?:https?:)?//video\.
(?:
rcs|
(?:corriere\w+\.)?corriere|
(?:gazzanet\.)?gazzetta
)
\.it/video-embed/.+?)
\1''', webpage)]
return RCSEmbedsIE._sanitize_urls(entries)
@staticmethod
def _extract_url(webpage):
urls = RCSEmbedsIE._extract_urls(webpage)
return urls[0] if urls else None
class RCSIE(RCSBaseIE):
_VALID_URL = r'''(?x)https?://(?P<vid>video|viaggi)\.
(?P<cdn>
(?:
corrieredelmezzogiorno\.
|corrieredelveneto\.
|corrieredibologna\.
|corrierefiorentino\.
)?corriere\.it
|(?:gazzanet\.)?gazzetta\.it)
/(?!video-embed/).+?/(?P<id>[^/\?]+)(?=\?|/$|$)'''
_TESTS = [{
'url': 'https://video.corriere.it/sport/formula-1/vettel-guida-ferrari-sf90-mugello-suo-fianco-c-elecrerc-bendato-video-esilarante/b727632a-f9d0-11ea-91b0-38d50a849abb',
'md5': '0f4ededc202b0f00b6e509d831e2dcda',
'info_dict': {
'id': 'b727632a-f9d0-11ea-91b0-38d50a849abb',
'ext': 'mp4',
'title': 'Vettel guida la Ferrari SF90 al Mugello e al suo fianco c\'è Leclerc (bendato): il video è esilarante',
'description': 'md5:93b51c9161ac8a64fb2f997b054d0152',
'uploader': 'Corriere Tv',
}
}, {
# video data inside iframe
'url': 'https://viaggi.corriere.it/video/norvegia-il-nuovo-ponte-spettacolare-sopra-la-cascata-di-voringsfossen/',
'md5': 'da378e4918d2afbf7d61c35abb948d4c',
'info_dict': {
'id': '5b7cd134-e2c1-11ea-89b3-b56dd0df2aa2',
'ext': 'mp4',
'title': 'La nuova spettacolare attrazione in Norvegia: il ponte sopra Vøringsfossen',
'description': 'md5:18b35a291f6746c0c8dacd16e5f5f4f8',
'uploader': 'DOVE Viaggi',
}
}, {
'url': 'https://video.gazzetta.it/video-motogp-catalogna-cadute-dovizioso-vale-rossi/49612410-00ca-11eb-bcd8-30d4253e0140?vclk=Videobar',
'md5': 'eedc1b5defd18e67383afef51ff7bdf9',
'info_dict': {
'id': '49612410-00ca-11eb-bcd8-30d4253e0140',
'ext': 'mp4',
'title': 'Dovizioso, il contatto con Zarco e la caduta. E anche Vale finisce a terra',
'description': 'md5:8c6e905dc3b9413218beca11ebd69778',
'uploader': 'AMorici',
}
}, {
'url': 'https://video.corriere.it/video-360/metro-copenaghen-tutta-italiana/a248a7f0-e2db-11e9-9830-af2de6b1f945',
'match_only': True
}]
class RCSVariousIE(RCSBaseIE):
_VALID_URL = r'''(?x)https?://www\.
(?P<cdn>
leitv\.it|
youreporter\.it
)/(?:[^/]+/)?(?P<id>[^/]+?)(?:$|\?|/)'''
_TESTS = [{
'url': 'https://www.leitv.it/benessere/mal-di-testa-come-combatterlo-ed-evitarne-la-comparsa/',
'md5': '92b4e63667b8f95acb0a04da25ae28a1',
'info_dict': {
'id': 'mal-di-testa-come-combatterlo-ed-evitarne-la-comparsa',
'ext': 'mp4',
'title': 'Cervicalgia e mal di testa, il video con i suggerimenti dell\'esperto',
'description': 'md5:ae21418f34cee0b8d02a487f55bcabb5',
'uploader': 'leitv.it',
}
}, {
'url': 'https://www.youreporter.it/fiume-sesia-3-ottobre-2020/',
'md5': '8dccd436b47a830bab5b4a88232f391a',
'info_dict': {
'id': 'fiume-sesia-3-ottobre-2020',
'ext': 'mp4',
'title': 'Fiume Sesia 3 ottobre 2020',
'description': 'md5:0070eef1cc884d13c970a4125063de55',
'uploader': 'youreporter.it',
}
}]
| 41.764019 | 177 | 0.523413 |
ace6127feee5bfcf6534039a98184e544641fdee | 1,822 | py | Python | altimeter/aws/resource/eks/cluster.py | elliotsegler/altimeter | c3924524938b4bae86b1acda2a4fc3f79ac523ff | [
"MIT"
] | 48 | 2019-11-06T03:20:53.000Z | 2022-02-22T21:10:45.000Z | altimeter/aws/resource/eks/cluster.py | elliotsegler/altimeter | c3924524938b4bae86b1acda2a4fc3f79ac523ff | [
"MIT"
] | 27 | 2020-01-07T23:48:30.000Z | 2022-02-26T00:24:04.000Z | altimeter/aws/resource/eks/cluster.py | elliotsegler/altimeter | c3924524938b4bae86b1acda2a4fc3f79ac523ff | [
"MIT"
] | 21 | 2019-12-20T03:06:35.000Z | 2021-12-15T23:26:00.000Z | """Resource for Clusters"""
from typing import Type
from botocore.client import BaseClient
from botocore.exceptions import ClientError
from altimeter.aws.resource.resource_spec import ListFromAWSResult
from altimeter.aws.resource.eks import EKSResourceSpec
from altimeter.core.graph.field.scalar_field import ScalarField
from altimeter.core.graph.schema import Schema
class EKSClusterResourceSpec(EKSResourceSpec):
"""Resource for Clusters"""
type_name = "cluster"
schema = Schema(ScalarField("Name"),)
@classmethod
def list_from_aws(
cls: Type["EKSClusterResourceSpec"], client: BaseClient, account_id: str, region: str
) -> ListFromAWSResult:
"""Return a dict of dicts of the format:
{'cluster_1_arn': {cluster_1_dict},
'cluster_2_arn': {cluster_2_dict},
...}
Where the dicts represent results from list_clusters."""
clusters = {}
try:
paginator = client.get_paginator("list_clusters")
for resp in paginator.paginate():
for cluster_name in resp.get("clusters", []):
resource_arn = cls.generate_arn(
account_id=account_id, region=region, resource_id=cluster_name
)
clusters[resource_arn] = {"Name": cluster_name}
except ClientError as c_e:
response_error = getattr(c_e, "response", {}).get("Error", {})
error_code = response_error.get("Code", "")
if error_code != "AccessDeniedException":
raise c_e
error_msg = response_error.get("Message", "")
if error_msg != f"Account {account_id} is not authorized to use this service":
raise c_e
return ListFromAWSResult(resources=clusters)
| 37.958333 | 93 | 0.63831 |
ace613432d3b27b9b2996005bec2da353a240f28 | 44,244 | py | Python | comtrade.py | dparrini/python-comtrade | cc0db59562432bb0bd20ca6cd134ebf62c1885ad | [
"MIT"
] | 39 | 2019-01-15T09:50:41.000Z | 2022-02-27T23:28:42.000Z | comtrade.py | yathivunnam/python-comtrade | cc0db59562432bb0bd20ca6cd134ebf62c1885ad | [
"MIT"
] | 27 | 2019-04-18T19:14:17.000Z | 2022-03-11T08:07:31.000Z | comtrade.py | yathivunnam/python-comtrade | cc0db59562432bb0bd20ca6cd134ebf62c1885ad | [
"MIT"
] | 32 | 2019-04-18T07:12:59.000Z | 2022-03-03T23:41:54.000Z | # -*- coding: utf-8 -*-
# MIT License
# Copyright (c) 2018 David Rodrigues Parrini
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import array
import datetime as dt
import errno
import io
import math
import os
import re
import struct
import sys
import warnings
try:
import numpy
HAS_NUMPY = True
except ModuleNotFoundError:
HAS_NUMPY = False
# COMTRADE standard revisions
REV_1991 = "1991"
REV_1999 = "1999"
REV_2013 = "2013"
# DAT file format types
TYPE_ASCII = "ASCII"
TYPE_BINARY = "BINARY"
TYPE_BINARY32 = "BINARY32"
TYPE_FLOAT32 = "FLOAT32"
# Special values
TIMESTAMP_MISSING = 0xFFFFFFFF
# CFF headers
CFF_HEADER_REXP = r"(?i)--- file type: ([a-z]+)(?:\s+([a-z0-9]+)(?:\s*\:\s*([0-9]+))?)? ---$"
# common separator character of data fields of CFG and ASCII DAT files
SEPARATOR = ","
# timestamp regular expression
re_date = re.compile(r"([0-9]{1,2})/([0-9]{1,2})/([0-9]{2,4})")
re_time = re.compile(r"([0-9]{1,2}):([0-9]{2}):([0-9]{2})(\.([0-9]{1,12}))?")
# Non-standard revision warning
WARNING_UNKNOWN_REVISION = "Unknown standard revision \"{}\""
# Date time with nanoseconds resolution warning
WARNING_DATETIME_NANO = "Unsupported datetime objects with nanoseconds \
resolution. Using truncated values."
# Date time with year 0, month 0 and/or day 0.
WARNING_MINDATE = "Missing date values. Using minimum values: {}."
def _read_sep_values(line, expected: int = -1, default: str = ''):
values = tuple(map(lambda cell: cell.strip(), line.split(SEPARATOR)))
if expected == -1 or len(values) == expected:
return values
return [values[i] if i < len(values) else default
for i in range(expected)]
def _preallocate_values(array_type, size, use_numpy_arrays):
type_mapping_numpy = {"f": "float32", "i": "int32"}
if HAS_NUMPY and use_numpy_arrays:
return numpy.zeros(size, dtype=type_mapping_numpy[array_type])
return array.array(array_type, [0]) * size
def _prevent_null(str_value: str, value_type: type, default_value):
if len(str_value.strip()) == 0:
return default_value
else:
return value_type(str_value)
def _get_date(date_str: str) -> tuple:
m = re_date.match(date_str)
if m is not None:
day = int(m.group(1))
month = int(m.group(2))
year = int(m.group(3))
return day, month, year
return 0, 0, 0
def _get_time(time_str: str, ignore_warnings: bool = False) -> tuple:
m = re_time.match(time_str)
if m is not None:
hour = int(m.group(1))
minute = int(m.group(2))
second = int(m.group(3))
fracsec_str = m.group(5)
# Pad fraction of seconds with 0s to the right
if len(fracsec_str) <= 6:
fracsec_str = fill_with_zeros_to_the_right(fracsec_str, 6)
else:
fracsec_str = fill_with_zeros_to_the_right(fracsec_str, 9)
frac_second = int(fracsec_str)
in_nanoseconds = len(fracsec_str) > 6
microsecond = frac_second
if in_nanoseconds:
# Nanoseconds resolution is not supported by datetime module, so it's
# converted to integer below.
if not ignore_warnings:
warnings.warn(Warning(WARNING_DATETIME_NANO))
microsecond = int(microsecond * 1E-3)
return hour, minute, second, microsecond, in_nanoseconds
def fill_with_zeros_to_the_right(number_str: str, width: int):
actual_len = len(number_str)
if actual_len < width:
difference = width - actual_len
fill_chars = "0"*difference
return number_str + fill_chars
return number_str
def _read_timestamp(timestamp_line: str, rev_year: str, ignore_warnings: bool = False) -> tuple:
"""Process comma separated fields and returns a tuple containing the timestamp
and a boolean value indicating whether nanoseconds are used.
Can possibly return the timestamp 00/00/0000 00:00:00.000 for empty strings
or empty pairs."""
day, month, year, hour, minute, second, microsecond = (0,)*7
nanosec = False
if len(timestamp_line.strip()) > 0:
values = _read_sep_values(timestamp_line, 2)
if len(values) >= 2:
date_str, time_str = values[0:2]
if len(date_str.strip()) > 0:
# 1991 Format Uses mm/dd/yyyy format
if rev_year == REV_1991:
month, day, year = _get_date(date_str)
# Modern Formats Use dd/mm/yyyy format
else:
day, month, year = _get_date(date_str)
if len(time_str.strip()) > 0:
hour, minute, second, microsecond, \
nanosec = _get_time(time_str, ignore_warnings)
using_min_data = False
if year <= 0:
year = dt.MINYEAR
using_min_data = True
if month <= 0:
month = 1
using_min_data = True
if day <= 0:
day = 1
using_min_data = True
# Timezone info unsupported
tzinfo = None
timestamp = dt.datetime(year, month, day, hour, minute, second,
microsecond, tzinfo)
if not ignore_warnings and using_min_data:
warnings.warn(Warning(WARNING_MINDATE.format(str(timestamp))))
return timestamp, nanosec
def _file_is_utf8(file_path):
if os.path.exists(file_path):
with open(file_path, "r") as file:
return _stream_is_utf8(file)
return False
def _stream_is_utf8(stream):
try:
contents = stream.readlines()
except UnicodeDecodeError as exception:
return True
return False
class Cfg:
"""Parses and stores Comtrade's CFG data."""
# time base units
TIME_BASE_NANOSEC = 1E-9
TIME_BASE_MICROSEC = 1E-6
def __init__(self, **kwargs):
"""
Cfg object constructor.
Keyword arguments:
ignore_warnings -- whether warnings are displayed in stdout
(default: False)
"""
self.filename = ""
# implicit data
self._time_base = self.TIME_BASE_MICROSEC
# Default CFG data
self._station_name = ""
self._rec_dev_id = ""
self._rev_year = 2013
self._channels_count = 0
self._analog_channels = []
self._status_channels = []
self._analog_count = 0
self._status_count = 0
self._frequency = 0.0
self._nrates = 1
self._sample_rates = []
self._timestamp_critical = False
self._start_timestamp = dt.datetime(1900, 1, 1)
self._trigger_timestamp = dt.datetime(1900, 1, 1)
self._ft = TYPE_ASCII
self._time_multiplier = 1.0
# 2013 standard revision information
# time_code,local_code = 0,0 means local time is UTC
self._time_code = 0
self._local_code = 0
# tmq_code,leapsec
self._tmq_code = 0
self._leap_second = 0
if "ignore_warnings" in kwargs:
self.ignore_warnings = kwargs["ignore_warnings"]
else:
self.ignore_warnings = False
@property
def station_name(self) -> str:
"""Return the recording device's station name."""
return self._station_name
@property
def rec_dev_id(self) -> str:
"""Return the recording device id."""
return self._rec_dev_id
@property
def rev_year(self) -> int:
"""Return the COMTRADE revision year."""
return self._rev_year
@property
def channels_count(self) -> int:
"""Return the number of channels, total."""
return self._channels_count
@property
def analog_channels(self) -> list:
"""Return the analog channels list with complete channel description."""
return self._analog_channels
@property
def status_channels(self) -> list:
"""Return the status channels list with complete channel description."""
return self._status_channels
@property
def analog_count(self) -> int:
"""Return the number of analog channels."""
return self._analog_count
@property
def status_count(self) -> int:
"""Return the number of status channels."""
return self._status_count
@property
def time_base(self) -> float:
"""Return the time base."""
return self._time_base
@property
def frequency(self) -> float:
"""Return the measured line frequency in Hertz."""
return self._frequency
@property
def ft(self) -> str:
"""Return the expected DAT file format."""
return self._ft
@property
def timemult(self) -> float:
"""Return the DAT time multiplier (Default = 1)."""
return self._time_multiplier
@property
def timestamp_critical(self) -> bool:
"""Returns whether the DAT file must contain non-zero
timestamp values."""
return self._timestamp_critical
@property
def start_timestamp(self) -> dt.datetime:
"""Return the recording start time stamp as a datetime object."""
return self._start_timestamp
@property
def trigger_timestamp(self) -> dt.datetime:
"""Return the trigger time stamp as a datetime object."""
return self._trigger_timestamp
@property
def nrates(self) -> int:
"""Return the number of different sample rates within the DAT file."""
return self._nrates
@property
def sample_rates(self) -> list:
"""
Return a list with pairs describing the number of samples for a given
sample rate.
"""
return self._sample_rates
# Deprecated properties - Changed "Digital" for "Status"
@property
def digital_channels(self) -> list:
"""Returns the status channels bidimensional values list."""
if not self.ignore_warnings:
warnings.warn(FutureWarning("digital_channels is deprecated, "
"use status_channels instead."))
return self._status_channels
@property
def digital_count(self) -> int:
"""Returns the number of status channels."""
if not self.ignore_warnings:
warnings.warn(FutureWarning("digital_count is deprecated, "
"use status_count instead."))
return self._status_count
def load(self, filepath, **user_kwargs):
"""Load and read a CFG file contents."""
self.filepath = filepath
if os.path.isfile(self.filepath):
kwargs = {}
if "encoding" not in user_kwargs and _file_is_utf8(self.filepath):
kwargs["encoding"] = "utf-8"
elif "encoding" in user_kwargs:
kwargs["encoding"] = user_kwargs["encoding"]
with open(self.filepath, "r", **kwargs) as cfg:
self._read_io(cfg)
else:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
self.filepath)
def read(self, cfg_lines):
"""Read CFG-format data of a FileIO or StringIO object."""
if type(cfg_lines) is str:
self._read_io(io.StringIO(cfg_lines))
else:
self._read_io(cfg_lines)
def _read_io(self, cfg):
"""Read CFG-format lines and stores its data."""
line_count = 0
self._nrates = 1
self._sample_rates = []
self._analog_channels = []
self._status_channels = []
# First line
line = cfg.readline()
# station, device, and comtrade standard revision information
packed = _read_sep_values(line)
if 3 == len(packed):
# only 1999 revision and above has the standard revision year
self._station_name, self._rec_dev_id, self._rev_year = packed
self._rev_year = self._rev_year.strip()
if self._rev_year not in (REV_1991, REV_1999, REV_2013):
if not self.ignore_warnings:
msg = WARNING_UNKNOWN_REVISION.format(self._rev_year)
warnings.warn(Warning(msg))
else:
self._station_name, self._rec_dev_id = packed
self._rev_year = REV_1991
line_count = line_count + 1
# Second line
line = cfg.readline()
# number of channels and its type
totchn, achn, schn = _read_sep_values(line, 3, '0')
self._channels_count = int(totchn)
self._analog_count = int(achn[:-1])
self._status_count = int(schn[:-1])
self._analog_channels = [None]*self._analog_count
self._status_channels = [None]*self._status_count
line_count = line_count + 1
# Analog channel description lines
for ichn in range(self._analog_count):
line = cfg.readline()
packed = _read_sep_values(line, 13, '0')
# unpack values
n, name, ph, ccbm, uu, a, b, skew, cmin, cmax, \
primary, secondary, pors = packed
# type conversion
n = int(n)
a = float(a)
b = _prevent_null(b, float, 0.0)
skew = _prevent_null(skew, float, 0.0)
cmin = float(cmin)
cmax = float(cmax)
primary = float(primary)
secondary = float(secondary)
self.analog_channels[ichn] = AnalogChannel(n, a, b, skew,
cmin, cmax, name, uu, ph, ccbm, primary, secondary, pors)
line_count = line_count + 1
# Status channel description lines
for ichn in range(self._status_count):
line = cfg.readline()
# unpack values
packed = _read_sep_values(line, 5, '0')
n, name, ph, ccbm, y = packed
# type conversion
n = int(n)
y = _prevent_null(y, int, 0) # TODO: actually a critical data. In the future add a warning.
self.status_channels[ichn] = StatusChannel(n, name, ph, ccbm, y)
line_count = line_count + 1
# Frequency line
line = cfg.readline()
if len(line.strip()) > 0:
self._frequency = float(line.strip())
line_count = line_count + 1
# Nrates line
# number of different sample rates
line = cfg.readline()
self._nrates = int(line.strip())
if self._nrates == 0:
self._nrates = 1
self._timestamp_critical = True
else:
self._timestamp_critical = False
line_count = line_count + 1
for inrate in range(self._nrates):
line = cfg.readline()
# each sample rate
samp, endsamp = _read_sep_values(line)
samp = float(samp)
endsamp = int(endsamp)
self.sample_rates.append([samp, endsamp])
line_count = line_count + 1
# First data point time and time base
line = cfg.readline()
ts_str = line.strip()
self._start_timestamp, nanosec = _read_timestamp(
ts_str,
self.rev_year,
self.ignore_warnings
)
self._time_base = self._get_time_base(nanosec)
line_count = line_count + 1
# Event data point and time base
line = cfg.readline()
ts_str = line.strip()
self._trigger_timestamp, nanosec = _read_timestamp(
ts_str,
self.rev_year,
self.ignore_warnings
)
self._time_base = min([self.time_base, self._get_time_base(nanosec)])
line_count = line_count + 1
# DAT file type
line = cfg.readline()
self._ft = line.strip()
line_count = line_count + 1
# Timestamp multiplication factor
if self._rev_year in (REV_1999, REV_2013):
line = cfg.readline().strip()
if len(line) > 0:
self._time_multiplier = float(line)
else:
self._time_multiplier = 1.0
line_count = line_count + 1
# time_code and local_code
if self._rev_year == REV_2013:
line = cfg.readline()
if line:
self._time_code, self._local_code = _read_sep_values(line)
line_count = line_count + 1
line = cfg.readline()
# time_code and local_code
self._tmq_code, self._leap_second = _read_sep_values(line)
line_count = line_count + 1
def _get_time_base(self, using_nanoseconds: bool):
"""
Return the time base, which is based on the fractionary part of the
seconds in a timestamp (00.XXXXX).
"""
if using_nanoseconds:
return self.TIME_BASE_NANOSEC
else:
return self.TIME_BASE_MICROSEC
class Comtrade:
"""Parses and stores Comtrade data."""
# extensions
EXT_CFG = "cfg"
EXT_DAT = "dat"
EXT_INF = "inf"
EXT_HDR = "hdr"
# format specific
ASCII_SEPARATOR = ","
def __init__(self, **kwargs):
"""
Comtrade object constructor.
Keyword arguments:
ignore_warnings -- whether warnings are displayed in stdout
(default: False).
"""
self.file_path = ""
self._cfg = Cfg(**kwargs)
# Default CFG data
self._analog_channel_ids = []
self._analog_phases = []
self._status_channel_ids = []
self._status_phases = []
self._timestamp_critical = False
# Data types
if "use_numpy_arrays" in kwargs:
self._use_numpy_arrays = kwargs["use_numpy_arrays"]
else:
self._use_numpy_arrays = False
# DAT file data
self._time_values = _preallocate_values("f", 0, self._use_numpy_arrays)
self._analog_values = []
self._status_values = []
# Additional CFF data (or additional comtrade files)
self._hdr = None
self._inf = None
if "ignore_warnings" in kwargs:
self.ignore_warnings = kwargs["ignore_warnings"]
else:
self.ignore_warnings = False
@property
def station_name(self) -> str:
"""Return the recording device's station name."""
return self._cfg.station_name
@property
def rec_dev_id(self) -> str:
"""Return the recording device id."""
return self._cfg.rec_dev_id
@property
def rev_year(self) -> int:
"""Return the COMTRADE revision year."""
return self._cfg.rev_year
@property
def cfg(self) -> Cfg:
"""Return the underlying CFG class instance."""
return self._cfg
@property
def hdr(self):
"""Return the HDR file contents."""
return self._hdr
@property
def inf(self):
"""Return the INF file contents."""
return self._inf
@property
def analog_channel_ids(self) -> list:
"""Returns the analog channels name list."""
return self._analog_channel_ids
@property
def analog_phases(self) -> list:
"""Returns the analog phase name list."""
return self._analog_phases
@property
def status_channel_ids(self) -> list:
"""Returns the status channels name list."""
return self._status_channel_ids
@property
def status_phases(self) -> list:
"""Returns the status phase name list."""
return self._status_phases
@property
def time(self) -> list:
"""Return the time values list."""
return self._time_values
@property
def analog(self) -> list:
"""Return the analog channel values bidimensional list."""
return self._analog_values
@property
def status(self) -> list:
"""Return the status channel values bidimensional list."""
return self._status_values
@property
def total_samples(self) -> int:
"""Return the total number of samples (per channel)."""
return self._total_samples
@property
def frequency(self) -> float:
"""Return the measured line frequency in Hertz."""
return self._cfg.frequency
@property
def start_timestamp(self):
"""Return the recording start time stamp as a datetime object."""
return self._cfg.start_timestamp
@property
def trigger_timestamp(self):
"""Return the trigger time stamp as a datetime object."""
return self._cfg.trigger_timestamp
@property
def channels_count(self) -> int:
"""Return the number of channels, total."""
return self._cfg.channels_count
@property
def analog_count(self) -> int:
"""Return the number of analog channels."""
return self._cfg.analog_count
@property
def status_count(self) -> int:
"""Return the number of status channels."""
return self._cfg.status_count
@property
def trigger_time(self) -> float:
"""Return relative trigger time in seconds."""
stt = self._cfg.start_timestamp
trg = self._cfg.trigger_timestamp
tdiff = trg - stt
tsec = (tdiff.days*60*60*24) + tdiff.seconds + (tdiff.microseconds*1E-6)
return tsec
@property
def time_base(self) -> float:
"""Return the time base."""
return self._cfg.time_base
@property
def ft(self) -> str:
"""Return the expected DAT file format."""
return self._cfg.ft
# Deprecated properties - Changed "Digital" for "Status"
@property
def digital_channel_ids(self) -> list:
"""Returns the status channels name list."""
if not self.ignore_warnings:
warnings.warn(FutureWarning("digital_channel_ids is deprecated, use status_channel_ids instead."))
return self._status_channel_ids
@property
def digital(self) -> list:
"""Returns the status channels bidimensional values list."""
if not self.ignore_warnings:
warnings.warn(FutureWarning("digital is deprecated, use status instead."))
return self._status_values
@property
def digital_count(self) -> int:
"""Returns the number of status channels."""
if not self.ignore_warnings:
warnings.warn(FutureWarning("digital_count is deprecated, use status_count instead."))
return self._cfg.status_count
def _get_dat_reader(self):
# case insensitive comparison of file format
dat = None
ft_upper = self.ft.upper()
dat_kwargs = {"use_numpy_arrays": self._use_numpy_arrays}
if ft_upper == TYPE_ASCII:
dat = AsciiDatReader(**dat_kwargs)
elif ft_upper == TYPE_BINARY:
dat = BinaryDatReader(**dat_kwargs)
elif ft_upper == TYPE_BINARY32:
dat = Binary32DatReader(**dat_kwargs)
elif ft_upper == TYPE_FLOAT32:
dat = Float32DatReader(**dat_kwargs)
else:
dat = None
raise Exception("Not supported data file format: {}".format(self.ft))
return dat
def read(self, cfg_lines, dat_lines_or_bytes) -> None:
"""
Read CFG and DAT files contents. Expects FileIO or StringIO objects.
"""
self._cfg.read(cfg_lines)
# channel ids
self._cfg_extract_channels_ids(self._cfg)
# channel phases
self._cfg_extract_phases(self._cfg)
dat = self._get_dat_reader()
dat.read(dat_lines_or_bytes, self._cfg)
# copy dat object information
self._dat_extract_data(dat)
def _cfg_extract_channels_ids(self, cfg) -> None:
self._analog_channel_ids = [channel.name for channel in cfg.analog_channels]
self._status_channel_ids = [channel.name for channel in cfg.status_channels]
def _cfg_extract_phases(self, cfg) -> None:
self._analog_phases = [channel.ph for channel in cfg.analog_channels]
self._status_phases = [channel.ph for channel in cfg.status_channels]
def _dat_extract_data(self, dat) -> None:
self._time_values = dat.time
self._analog_values = dat.analog
self._status_values = dat.status
self._total_samples = dat.total_samples
def load(self, cfg_file, dat_file = None, **kwargs) -> None:
"""
Load CFG, DAT, INF, and HDR files. Each must be a FileIO or StringIO
object. dat_file, inf_file, and hdr_file are optional (Default: None).
cfg_file is the cfg file path, including its extension.
dat_file is optional, and may be set if the DAT file name differs from
the CFG file name.
Keyword arguments:
inf_file -- optional INF file path (Default = None)
hdr_file -- optional HDR file path (Default = None)
"""
if "inf_file" in kwargs:
inf_file = kwargs["inf_file"]
else:
inf_file = None
if "hdr_file" in kwargs:
hdr_file = kwargs["hdr_file"]
else:
hdr_file = None
# which extension: CFG or CFF?
file_ext = cfg_file[-3:].upper()
if file_ext == "CFG":
basename = cfg_file[:-3]
# if not informed, infer dat_file with cfg_file
if dat_file is None:
dat_file = cfg_file[:-3] + self.EXT_DAT
if inf_file is None:
inf_file = basename + self.EXT_INF
if hdr_file is None:
hdr_file = basename + self.EXT_HDR
# load both cfg and dat
file_kwargs = {}
if "encoding" in kwargs:
file_kwargs["encoding"] = kwargs["encoding"]
self._load_cfg_dat(cfg_file, dat_file, **file_kwargs)
# Load additional inf and hdr files, if they exist.
self._load_inf(inf_file, **file_kwargs)
self._load_hdr(hdr_file, **file_kwargs)
elif file_ext == "CFF":
# check if the CFF file exists
self._load_cff(cfg_file)
else:
raise Exception(r"Expected CFG file path, got intead \"{}\".".format(cfg_file))
def _load_cfg_dat(self, cfg_filepath, dat_filepath, **kwargs):
self._cfg.load(cfg_filepath, **kwargs)
# channel ids
self._cfg_extract_channels_ids(self._cfg)
# channel phases
self._cfg_extract_phases(self._cfg)
dat = self._get_dat_reader()
dat.load(dat_filepath, self._cfg, **kwargs)
# copy dat object information
self._dat_extract_data(dat)
def _load_inf(self, inf_file, **kwargs):
if os.path.exists(inf_file):
if "encoding" not in kwargs and _file_is_utf8(self.file_path):
kwargs["encoding"] = "utf-8"
with open(inf_file, 'r', **kwargs) as file:
self._inf = file.read()
if len(self._inf) == 0:
self._inf = None
else:
self._inf = None
def _load_hdr(self, hdr_file, **kwargs):
if os.path.exists(hdr_file):
if "encoding" not in kwargs and _file_is_utf8(self.file_path):
kwargs["encoding"] = "utf-8"
with open(hdr_file, 'r', **kwargs) as file:
self._hdr = file.read()
if len(self._hdr) == 0:
self._hdr = None
else:
self._hdr = None
def _load_cff(self, cff_file_path: str, **kwargs):
# stores each file type lines
cfg_lines = []
dat_lines = []
hdr_lines = []
inf_lines = []
# file type: CFG, HDR, INF, DAT
ftype = None
# file format: ASCII, BINARY, BINARY32, FLOAT32
fformat = None
if "encoding" not in kwargs and _file_is_utf8(cff_file_path):
kwargs["encoding"] = "utf-8"
# Number of bytes for binary/float dat
fbytes = 0
with open(cff_file_path, "r", **kwargs) as file:
header_re = re.compile(CFF_HEADER_REXP)
last_match = None
line_number = 0
line = file.readline()
while line != "":
line_number += 1
mobj = header_re.match(line.strip().upper())
if mobj is not None:
last_match = mobj
groups = last_match.groups()
ftype = groups[0]
if len(groups) > 1:
fformat = last_match.groups()[1]
fbytes_obj = last_match.groups()[2]
fbytes = int(fbytes_obj) if fbytes_obj is not None else 0
elif last_match is not None and ftype == "CFG":
cfg_lines.append(line.strip())
elif last_match is not None and ftype == "DAT":
if fformat == TYPE_ASCII:
dat_lines.append(line.strip())
else:
break
elif last_match is not None and ftype == "HDR":
hdr_lines.append(line.strip())
elif last_match is not None and ftype == "INF":
inf_lines.append(line.strip())
line = file.readline()
if fformat == TYPE_ASCII:
# process ASCII CFF data
self.read("\n".join(cfg_lines), "\n".join(dat_lines))
else:
# read dat bytes
total_bytes = os.path.getsize(cff_file_path)
cff_bytes_read = total_bytes - fbytes
with open(cff_file_path, "rb") as file:
file.read(cff_bytes_read)
dat_bytes = file.read(fbytes)
self.read("\n".join(cfg_lines), dat_bytes)
# stores additional data
self._hdr = "\n".join(hdr_lines)
if len(self._hdr) == 0:
self._hdr = None
self._inf = "\n".join(inf_lines)
if len(self._inf) == 0:
self._inf = None
def cfg_summary(self):
"""Returns the CFG attributes summary string."""
header_line = "Channels (total,A,D): {}A + {}D = {}"
sample_line = "Sample rate of {} Hz to the sample #{}"
interval_line = "From {} to {} with time mult. = {}"
format_line = "{} format"
lines = [header_line.format(self.analog_count, self.status_count,
self.channels_count),
"Line frequency: {} Hz".format(self.frequency)]
for i in range(self._cfg.nrates):
rate, points = self._cfg.sample_rates[i]
lines.append(sample_line.format(rate, points))
lines.append(interval_line.format(self.start_timestamp,
self.trigger_timestamp,
self._cfg.timemult))
lines.append(format_line.format(self.ft))
return "\n".join(lines)
class Channel:
"""Holds common channel description data."""
def __init__(self, n=1, name='', ph='', ccbm=''):
"""Channel abstract class constructor."""
self.n = n
self.name = name
self.ph = ph
self.ccbm = ccbm
def __str__(self):
return ','.join([str(self.n), self.name, self.ph, self.ccbm])
class StatusChannel(Channel):
"""Holds status channel description data."""
def __init__(self, n: int, name='', ph='', ccbm='', y=0):
"""StatusChannel class constructor."""
super().__init__(n, name, ph, ccbm)
self.name = name
self.n = n
self.name = name
self.ph = ph
self.ccbm = ccbm
self.y = y
def __str__(self):
fields = [str(self.n), self.name, self.ph, self.ccbm, str(self.y)]
class AnalogChannel(Channel):
"""Holds analog channel description data."""
def __init__(self, n: int, a: float, b=0.0, skew=0.0, cmin=-32767,
cmax=32767, name='', uu='', ph='', ccbm='', primary=1.0,
secondary=1.0, pors='P'):
"""AnalogChannel class constructor."""
super().__init__(n, name, ph, ccbm)
self.name = name
self.uu = uu
self.n = n
self.a = a
self.b = b
self.skew = skew
self.cmin = cmin
self.cmax = cmax
# misc
self.uu = uu
self.ph = ph
self.ccbm = ccbm
self.primary = primary
self.secondary = secondary
self.pors = pors
def __str__(self):
fields = [str(self.n), self.name, self.ph, self.ccbm, self.uu,
str(self.a), str(self.b), str(self.skew), str(self.cmin),
str(self.cmax), str(self.primary), str(self.secondary), self.pors]
return ','.join(fields)
class DatReader:
"""Abstract DatReader class. Used to parse DAT file contents."""
read_mode = "r"
def __init__(self, **kwargs):
"""DatReader class constructor."""
if "use_numpy_arrays" in kwargs:
self._use_numpy_arrays = kwargs["use_numpy_arrays"]
else:
self._use_numpy_arrays = False
self.file_path = ""
self._content = None
self._cfg = None
self.time = _preallocate_values("f", 0, self._use_numpy_arrays)
self.analog = []
self.status = []
self._total_samples = 0
@property
def total_samples(self):
"""Return the total samples (per channel)."""
return self._total_samples
def load(self, dat_filepath, cfg, **kwargs):
"""Load a DAT file and parse its contents."""
self.file_path = dat_filepath
self._content = None
if os.path.isfile(self.file_path):
# extract CFG file information regarding data dimensions
self._cfg = cfg
self._preallocate()
if "encoding" not in kwargs and self.read_mode != "rb" and \
_file_is_utf8(self.file_path):
kwargs["encoding"] = "utf-8"
with open(self.file_path, self.read_mode, **kwargs) as contents:
self.parse(contents)
else:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT),
self.file_path)
def read(self, dat_lines, cfg):
"""
Read a DAT file contents, expecting a list of string or FileIO object.
"""
self.file_path = None
self._content = dat_lines
self._cfg = cfg
self._preallocate()
self.parse(dat_lines)
def _preallocate(self):
# read from the cfg file the number of samples in the dat file
steps = self._cfg.sample_rates[-1][1] # last samp field
self._total_samples = steps
# analog and status count
analog_count = self._cfg.analog_count
status_count = self._cfg.status_count
# preallocate analog and status values
self.time = _preallocate_values("f", steps, self._use_numpy_arrays)
self.analog = [None] * analog_count
self.status = [None] * status_count
# preallocate each channel values with zeros
for i in range(analog_count):
self.analog[i] = _preallocate_values("f", steps,
self._use_numpy_arrays)
for i in range(status_count):
self.status[i] = _preallocate_values("i", steps,
self._use_numpy_arrays)
def _get_samp(self, n) -> float:
"""Get the sampling rate for a sample n (1-based index)."""
# TODO: make tests.
last_sample_rate = 1.0
for samp, endsamp in self._cfg.sample_rates:
if n <= endsamp:
return samp
return last_sample_rate
def _get_time(self, n: int, ts_value: float, time_base: float,
time_multiplier: float):
# TODO: add option to enforce dat file timestamp, when available.
# TODO: make tests.
ts = 0
sample_rate = self._get_samp(n)
if not self._cfg.timestamp_critical or ts_value == TIMESTAMP_MISSING:
# if the timestamp is missing, use calculated.
if sample_rate != 0.0:
return (n - 1) / sample_rate
else:
raise Exception("Missing timestamp and no sample rate "
"provided.")
else:
# Use provided timestamp if its not missing
return ts_value * time_base * time_multiplier
def parse(self, contents):
"""Virtual method, parse DAT file contents."""
pass
class AsciiDatReader(DatReader):
"""ASCII format DatReader subclass."""
def __init__(self, **kwargs):
# Call the initialization for the inherited class
super().__init__(**kwargs)
self.ASCII_SEPARATOR = SEPARATOR
self.DATA_MISSING = ""
def parse(self, contents):
"""Parse a ASCII file contents."""
analog_count = self._cfg.analog_count
status_count = self._cfg.status_count
time_mult = self._cfg.timemult
time_base = self._cfg.time_base
# auxiliary vectors (channels gains and offsets)
a = [x.a for x in self._cfg.analog_channels]
b = [x.b for x in self._cfg.analog_channels]
# extract lines
if type(contents) is str:
lines = contents.splitlines()
else:
lines = contents
line_number = 0
for line in lines:
line_number = line_number + 1
if line_number > self._total_samples:
break
values = line.strip().split(self.ASCII_SEPARATOR)
n = int(values[0])
# Read time
ts_val = float(values[1])
ts = self._get_time(n, ts_val, time_base, time_mult)
avalues = [float(x)*a[i] + b[i] for i, x in enumerate(values[2:analog_count+2])]
svalues = [int(x) for x in values[len(values)-status_count:]]
# store
self.time[line_number-1] = ts
for i in range(analog_count):
self.analog[i][line_number - 1] = avalues[i]
for i in range(status_count):
self.status[i][line_number - 1] = svalues[i]
class BinaryDatReader(DatReader):
"""16-bit binary format DatReader subclass."""
def __init__(self, **kwargs):
# Call the initialization for the inherited class
super().__init__(**kwargs)
self.ANALOG_BYTES = 2
self.STATUS_BYTES = 2
self.TIME_BYTES = 4
self.SAMPLE_NUMBER_BYTES = 4
# maximum negative value
self.DATA_MISSING = 0xFFFF
self.read_mode = "rb"
if struct.calcsize("L") == 4:
self.STRUCT_FORMAT = "LL {acount:d}h {dcount:d}H"
self.STRUCT_FORMAT_ANALOG_ONLY = "LL {acount:d}h"
self.STRUCT_FORMAT_STATUS_ONLY = "LL {dcount:d}H"
else:
self.STRUCT_FORMAT = "II {acount:d}h {dcount:d}H"
self.STRUCT_FORMAT_ANALOG_ONLY = "II {acount:d}h"
self.STRUCT_FORMAT_STATUS_ONLY = "II {dcount:d}H"
def get_reader_format(self, analog_channels, status_bytes):
# Number of status fields of 2 bytes based on the total number of
# bytes.
dcount = math.floor(status_bytes / 2)
# Check the file configuration
if int(status_bytes) > 0 and int(analog_channels) > 0:
return self.STRUCT_FORMAT.format(acount=analog_channels,
dcount=dcount)
elif int(analog_channels) > 0:
# Analog channels only.
return self.STRUCT_FORMAT_ANALOG_ONLY.format(acount=analog_channels)
else:
# Status channels only.
return self.STRUCT_FORMAT_STATUS_ONLY.format(acount=dcount)
def parse(self, contents):
"""Parse DAT binary file contents."""
time_mult = self._cfg.timemult
time_base = self._cfg.time_base
achannels = self._cfg.analog_count
schannel = self._cfg.status_count
# auxillary vectors (channels gains and offsets)
a = [x.a for x in self._cfg.analog_channels]
b = [x.b for x in self._cfg.analog_channels]
sample_id_bytes = self.SAMPLE_NUMBER_BYTES + self.TIME_BYTES
abytes = achannels*self.ANALOG_BYTES
dbytes = self.STATUS_BYTES * math.ceil(schannel / 16.0)
bytes_per_row = sample_id_bytes + abytes + dbytes
groups_of_16bits = math.floor(dbytes / self.STATUS_BYTES)
# Struct format.
row_reader = struct.Struct(self.get_reader_format(achannels, dbytes))
# Row reading function.
next_row = None
if isinstance(contents, io.TextIOBase) or \
isinstance(contents, io.BufferedIOBase):
# Read all buffer contents
contents = contents.read()
for irow, values in enumerate(row_reader.iter_unpack(contents)):
# Sample number
n = values[0]
# Time stamp
ts_val = values[1]
ts = self._get_time(n, ts_val, time_base, time_mult)
if irow >= self.total_samples:
break
self.time[irow] = ts
# Extract analog channel values.
for ichannel in range(achannels):
yint = values[ichannel + 2]
y = a[ichannel] * yint + b[ichannel]
self.analog[ichannel][irow] = y
# Extract status channel values.
for igroup in range(groups_of_16bits):
group = values[achannels + 2 + igroup]
# for each group of 16 bits, extract the status channels
maxchn = min([ (igroup+1) * 16, schannel])
for ichannel in range(igroup * 16, maxchn):
chnindex = ichannel - igroup*16
mask = int('0b01', 2) << chnindex
extract = (group & mask) >> chnindex
self.status[ichannel][irow] = extract
# Get the next row
irow += 1
class Binary32DatReader(BinaryDatReader):
"""32-bit binary format DatReader subclass."""
def __init__(self, **kwargs):
# Call the initialization for the inherited class
super().__init__(**kwargs)
self.ANALOG_BYTES = 4
if struct.calcsize("L") == 4:
self.STRUCT_FORMAT = "LL {acount:d}l {dcount:d}H"
self.STRUCT_FORMAT_ANALOG_ONLY = "LL {acount:d}l"
else:
self.STRUCT_FORMAT = "II {acount:d}i {dcount:d}H"
self.STRUCT_FORMAT_ANALOG_ONLY = "II {acount:d}i"
# maximum negative value
self.DATA_MISSING = 0xFFFFFFFF
class Float32DatReader(BinaryDatReader):
"""Single precision (float) binary format DatReader subclass."""
def __init__(self, **kwargs):
# Call the initialization for the inherited class
super().__init__(**kwargs)
self.ANALOG_BYTES = 4
if struct.calcsize("L") == 4:
self.STRUCT_FORMAT = "LL {acount:d}f {dcount:d}H"
self.STRUCT_FORMAT_ANALOG_ONLY = "LL {acount:d}f"
else:
self.STRUCT_FORMAT = "II {acount:d}f {dcount:d}H"
self.STRUCT_FORMAT_ANALOG_ONLY = "II {acount:d}f"
# Maximum negative value
self.DATA_MISSING = sys.float_info.min
| 34.138889 | 110 | 0.59034 |
ace6149bdc6b98ad8d9cf7b6cefba9d877e046b4 | 9,223 | py | Python | mypy/semanal_newtype.py | jag426/mypy | 62d3bdf1f5114a669c7499258d7e766c1a6fa640 | [
"PSF-2.0"
] | 2 | 2020-06-20T14:11:12.000Z | 2020-10-12T07:11:32.000Z | mypy/semanal_newtype.py | jag426/mypy | 62d3bdf1f5114a669c7499258d7e766c1a6fa640 | [
"PSF-2.0"
] | 30 | 2019-01-04T10:14:56.000Z | 2020-10-12T14:00:31.000Z | mypy/semanal_newtype.py | jag426/mypy | 62d3bdf1f5114a669c7499258d7e766c1a6fa640 | [
"PSF-2.0"
] | null | null | null | """Semantic analysis of NewType definitions.
This is conceptually part of mypy.semanal (semantic analyzer pass 2).
"""
from typing import Tuple, Optional
from mypy.types import (
Type, Instance, CallableType, NoneType, TupleType, AnyType, PlaceholderType,
TypeOfAny, get_proper_type
)
from mypy.nodes import (
AssignmentStmt, NewTypeExpr, CallExpr, NameExpr, RefExpr, Context, StrExpr, BytesExpr,
UnicodeExpr, Block, FuncDef, Argument, TypeInfo, Var, SymbolTableNode, MDEF, ARG_POS,
PlaceholderNode
)
from mypy.semanal_shared import SemanticAnalyzerInterface
from mypy.options import Options
from mypy.exprtotype import expr_to_unanalyzed_type, TypeTranslationError
from mypy.typeanal import check_for_explicit_any, has_any_from_unimported_type
from mypy.messages import MessageBuilder, format_type
from mypy.errorcodes import ErrorCode
from mypy import errorcodes as codes
class NewTypeAnalyzer:
def __init__(self,
options: Options,
api: SemanticAnalyzerInterface,
msg: MessageBuilder) -> None:
self.options = options
self.api = api
self.msg = msg
def process_newtype_declaration(self, s: AssignmentStmt) -> bool:
"""Check if s declares a NewType; if yes, store it in symbol table.
Return True if it's a NewType declaration. The current target may be
deferred as a side effect if the base type is not ready, even if
the return value is True.
The logic in this function mostly copies the logic for visit_class_def()
with a single (non-Generic) base.
"""
name, call = self.analyze_newtype_declaration(s)
if name is None or call is None:
return False
# OK, now we know this is a NewType. But the base type may be not ready yet,
# add placeholder as we do for ClassDef.
fullname = self.api.qualified_name(name)
if (not call.analyzed or
isinstance(call.analyzed, NewTypeExpr) and not call.analyzed.info):
# Start from labeling this as a future class, as we do for normal ClassDefs.
placeholder = PlaceholderNode(fullname, s, s.line, becomes_typeinfo=True)
self.api.add_symbol(name, placeholder, s, can_defer=False)
old_type, should_defer = self.check_newtype_args(name, call, s)
old_type = get_proper_type(old_type)
if not call.analyzed:
call.analyzed = NewTypeExpr(name, old_type, line=call.line, column=call.column)
if old_type is None:
if should_defer:
# Base type is not ready.
self.api.defer()
return True
# Create the corresponding class definition if the aliased type is subtypeable
if isinstance(old_type, TupleType):
newtype_class_info = self.build_newtype_typeinfo(name, old_type,
old_type.partial_fallback)
newtype_class_info.tuple_type = old_type
elif isinstance(old_type, Instance):
if old_type.type.is_protocol:
self.fail("NewType cannot be used with protocol classes", s)
newtype_class_info = self.build_newtype_typeinfo(name, old_type, old_type)
else:
if old_type is not None:
message = "Argument 2 to NewType(...) must be subclassable (got {})"
self.fail(message.format(format_type(old_type)), s, code=codes.VALID_NEWTYPE)
# Otherwise the error was already reported.
old_type = AnyType(TypeOfAny.from_error)
object_type = self.api.named_type('__builtins__.object')
newtype_class_info = self.build_newtype_typeinfo(name, old_type, object_type)
newtype_class_info.fallback_to_any = True
check_for_explicit_any(old_type, self.options, self.api.is_typeshed_stub_file, self.msg,
context=s)
if self.options.disallow_any_unimported and has_any_from_unimported_type(old_type):
self.msg.unimported_type_becomes_any("Argument 2 to NewType(...)", old_type, s)
# If so, add it to the symbol table.
assert isinstance(call.analyzed, NewTypeExpr)
# As we do for normal classes, create the TypeInfo only once, then just
# update base classes on next iterations (to get rid of placeholders there).
if not call.analyzed.info:
call.analyzed.info = newtype_class_info
else:
call.analyzed.info.bases = newtype_class_info.bases
self.api.add_symbol(name, call.analyzed.info, s)
newtype_class_info.line = s.line
return True
def analyze_newtype_declaration(self,
s: AssignmentStmt) -> Tuple[Optional[str], Optional[CallExpr]]:
"""Return the NewType call expression if `s` is a newtype declaration or None otherwise."""
name, call = None, None
if (len(s.lvalues) == 1
and isinstance(s.lvalues[0], NameExpr)
and isinstance(s.rvalue, CallExpr)
and isinstance(s.rvalue.callee, RefExpr)
and s.rvalue.callee.fullname == 'typing.NewType'):
name = s.lvalues[0].name
if s.type:
self.fail("Cannot declare the type of a NewType declaration", s)
names = self.api.current_symbol_table()
existing = names.get(name)
# Give a better error message than generic "Name already defined".
if (existing and
not isinstance(existing.node, PlaceholderNode) and not s.rvalue.analyzed):
self.fail("Cannot redefine '%s' as a NewType" % name, s)
# This dummy NewTypeExpr marks the call as sufficiently analyzed; it will be
# overwritten later with a fully complete NewTypeExpr if there are no other
# errors with the NewType() call.
call = s.rvalue
return name, call
def check_newtype_args(self, name: str, call: CallExpr,
context: Context) -> Tuple[Optional[Type], bool]:
"""Ananlyze base type in NewType call.
Return a tuple (type, should defer).
"""
has_failed = False
args, arg_kinds = call.args, call.arg_kinds
if len(args) != 2 or arg_kinds[0] != ARG_POS or arg_kinds[1] != ARG_POS:
self.fail("NewType(...) expects exactly two positional arguments", context)
return None, False
# Check first argument
if not isinstance(args[0], (StrExpr, BytesExpr, UnicodeExpr)):
self.fail("Argument 1 to NewType(...) must be a string literal", context)
has_failed = True
elif args[0].value != name:
msg = "String argument 1 '{}' to NewType(...) does not match variable name '{}'"
self.fail(msg.format(args[0].value, name), context)
has_failed = True
# Check second argument
msg = "Argument 2 to NewType(...) must be a valid type"
try:
unanalyzed_type = expr_to_unanalyzed_type(args[1])
except TypeTranslationError:
self.fail(msg, context)
return None, False
# We want to use our custom error message (see above), so we suppress
# the default error message for invalid types here.
old_type = get_proper_type(self.api.anal_type(unanalyzed_type,
report_invalid_types=False))
should_defer = False
if old_type is None or isinstance(old_type, PlaceholderType):
should_defer = True
# The caller of this function assumes that if we return a Type, it's always
# a valid one. So, we translate AnyTypes created from errors into None.
if isinstance(old_type, AnyType) and old_type.is_from_error:
self.fail(msg, context)
return None, False
return None if has_failed else old_type, should_defer
def build_newtype_typeinfo(self, name: str, old_type: Type, base_type: Instance) -> TypeInfo:
info = self.api.basic_new_typeinfo(name, base_type)
info.is_newtype = True
# Add __init__ method
args = [Argument(Var('self'), NoneType(), None, ARG_POS),
self.make_argument('item', old_type)]
signature = CallableType(
arg_types=[Instance(info, []), old_type],
arg_kinds=[arg.kind for arg in args],
arg_names=['self', 'item'],
ret_type=NoneType(),
fallback=self.api.named_type('__builtins__.function'),
name=name)
init_func = FuncDef('__init__', args, Block([]), typ=signature)
init_func.info = info
init_func._fullname = self.api.qualified_name(name) + '.__init__'
info.names['__init__'] = SymbolTableNode(MDEF, init_func)
return info
# Helpers
def make_argument(self, name: str, type: Type) -> Argument:
return Argument(Var(name), type, None, ARG_POS)
def fail(self, msg: str, ctx: Context, *, code: Optional[ErrorCode] = None) -> None:
self.api.fail(msg, ctx, code=code)
| 44.771845 | 99 | 0.634826 |
ace61502e89f3cbb739e88a2304216311cd75843 | 592 | py | Python | code/t1/fsave_to_fsave5.py | ejcorn/fir_pca_22q | 8347e6c0c178167102c90ec4e582fa4f5b375fe0 | [
"MIT"
] | 1 | 2021-04-20T02:47:38.000Z | 2021-04-20T02:47:38.000Z | code/t1/fsave_to_fsave5.py | ejcorn/fir_pca_22q | 8347e6c0c178167102c90ec4e582fa4f5b375fe0 | [
"MIT"
] | null | null | null | code/t1/fsave_to_fsave5.py | ejcorn/fir_pca_22q | 8347e6c0c178167102c90ec4e582fa4f5b375fe0 | [
"MIT"
] | null | null | null | import os
os.environ['SUBJECTS_DIR'] = '/cbica/software/external/freesurfer/centos7/5.3.0/subjects/'
os.chdir('/cbica/home/cornblae/ecornblath/brain_states_22q/data/Sunetal2018_Fig1Maps/')
hemis = {'lh':'left','rh':'right'}
metrics = ['CT','SA']
# iterate through hemispheres and metrics and convert to fsaverage5
for hemi_key in hemis.keys():
for metric in metrics:
os.system('mri_surf2surf --hemi ' + hemi_key + ' --srcsubject fsaverage --srcsurfval ' + hemis[hemi_key] + '-'+metric+'-beta.mgh --trgsubject fsaverage5 --trgsurfval '+ hemis[hemi_key] +'-'+metric+'-beta-fsaverage5.mgh') | 53.818182 | 222 | 0.72973 |
ace615f640017c718e6969346b918695179a9022 | 3,154 | py | Python | django_cradmin/tests/test_uicontainer/test_blocklist.py | appressoas/django_cradmin | 0f8715afdfe1ad32e46033f442e622aecf6a4dec | [
"BSD-3-Clause"
] | 11 | 2015-07-05T16:57:58.000Z | 2020-11-24T16:58:19.000Z | django_cradmin/tests/test_uicontainer/test_blocklist.py | appressoas/django_cradmin | 0f8715afdfe1ad32e46033f442e622aecf6a4dec | [
"BSD-3-Clause"
] | 91 | 2015-01-08T22:38:13.000Z | 2022-02-10T10:25:27.000Z | django_cradmin/tests/test_uicontainer/test_blocklist.py | appressoas/django_cradmin | 0f8715afdfe1ad32e46033f442e622aecf6a4dec | [
"BSD-3-Clause"
] | 3 | 2016-12-07T12:19:24.000Z | 2018-10-03T14:04:18.000Z | import htmls
from django import test
from django_cradmin import uicontainer
class TestBlocklistItemTitle(test.TestCase):
def test_sanity(self):
container = uicontainer.blocklist.BlocklistItemTitle(html_tag='h1').bootstrap()
selector = htmls.S(container.render())
self.assertTrue(selector.exists('h1'))
def test_default_css_classes(self):
container = uicontainer.blocklist.BlocklistItemTitle(html_tag='h1').bootstrap()
with self.settings(DJANGO_CRADMIN_INCLUDE_TEST_CSS_CLASSES=False):
selector = htmls.S(container.render())
self.assertEqual(selector.one('h1')['class'], 'blocklist__itemtitle')
def test_no_text(self):
container = uicontainer.blocklist.BlocklistItemTitle(html_tag='h1').bootstrap()
selector = htmls.S(container.render())
self.assertEqual(selector.one('h1').alltext_normalized, '')
def test_has_text(self):
container = uicontainer.blocklist.BlocklistItemTitle(html_tag='h1', text='test').bootstrap()
selector = htmls.S(container.render())
self.assertEqual(selector.one('h1').alltext_normalized, 'test')
class TestBlocklistItem(test.TestCase):
def test_sanity(self):
container = uicontainer.blocklist.BlocklistItem().bootstrap()
selector = htmls.S(container.render())
self.assertTrue(selector.exists('div'))
def test_default_css_classes(self):
container = uicontainer.blocklist.BlocklistItem().bootstrap()
with self.settings(DJANGO_CRADMIN_INCLUDE_TEST_CSS_CLASSES=False):
selector = htmls.S(container.render())
self.assertEqual(selector.one('div')['class'], 'blocklist__item')
def test_no_text(self):
container = uicontainer.blocklist.BlocklistItem().bootstrap()
selector = htmls.S(container.render())
self.assertFalse(selector.exists('div p'))
def test_has_text(self):
container = uicontainer.blocklist.BlocklistItem(text='test').bootstrap()
selector = htmls.S(container.render())
self.assertEqual(selector.one('div p').alltext_normalized, 'test')
class TestBlocklist(test.TestCase):
def test_sanity(self):
container = uicontainer.blocklist.Blocklist().bootstrap()
selector = htmls.S(container.render())
self.assertTrue(selector.exists('div'))
def test_default_css_classes(self):
container = uicontainer.blocklist.Blocklist().bootstrap()
with self.settings(DJANGO_CRADMIN_INCLUDE_TEST_CSS_CLASSES=False):
selector = htmls.S(container.render())
self.assertEqual(selector.one('div')['class'], 'blocklist')
def test_with_children(self):
container = uicontainer.blocklist.Blocklist(
children=[
uicontainer.blocklist.BlocklistItem(text='a'),
uicontainer.blocklist.BlocklistItem(text='b'),
]
).bootstrap()
selector = htmls.S(container.render())
child_text = [child.alltext_normalized for child in selector.list('.blocklist__item')]
self.assertEqual(child_text[0], 'a')
self.assertEqual(child_text[1], 'b')
| 42.053333 | 100 | 0.687698 |
ace617a21353833561faaf44c8c4dfa557d2de3f | 2,896 | py | Python | src/zvt/domain/quotes/__init__.py | jjandnn/zvt | 91e305f67b19c9ece88255ab7d6a8a7008d22e26 | [
"MIT"
] | null | null | null | src/zvt/domain/quotes/__init__.py | jjandnn/zvt | 91e305f67b19c9ece88255ab7d6a8a7008d22e26 | [
"MIT"
] | null | null | null | src/zvt/domain/quotes/__init__.py | jjandnn/zvt | 91e305f67b19c9ece88255ab7d6a8a7008d22e26 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from sqlalchemy import String, Column, Float
from zvt.contract import Mixin
class KdataCommon(Mixin):
provider = Column(String(length=32))
code = Column(String(length=32))
name = Column(String(length=32))
# Enum constraint is not extendable
# level = Column(Enum(IntervalLevel, values_callable=enum_value))
level = Column(String(length=32))
# 开盘价
open = Column(Float)
# 收盘价
close = Column(Float)
# 最高价
high = Column(Float)
# 最低价
low = Column(Float)
# 成交量
volume = Column(Float)
# 成交金额
turnover = Column(Float)
# 涨跌幅
change_pct = Column(Float)
# 换手率
turnover_rate = Column(Float)
class TickCommon(Mixin):
provider = Column(String(length=32))
code = Column(String(length=32))
name = Column(String(length=32))
level = Column(String(length=32))
order = Column(String(length=32))
price = Column(Float)
volume = Column(Float)
turnover = Column(Float)
direction = Column(String(length=32))
order_type = Column(String(length=32))
class BlockKdataCommon(KdataCommon):
pass
class IndexKdataCommon(KdataCommon):
pass
class IndexusKdataCommon(KdataCommon):
pass
class EtfKdataCommon(KdataCommon):
turnover_rate = Column(Float)
# ETF 累计净值(货币 ETF 为七日年化)
cumulative_net_value = Column(Float)
class StockKdataCommon(KdataCommon):
pass
class StockusKdataCommon(KdataCommon):
pass
class StockhkKdataCommon(KdataCommon):
pass
# the __all__ is generated
__all__ = [
"KdataCommon",
"TickCommon",
"BlockKdataCommon",
"IndexKdataCommon",
"IndexusKdataCommon",
"EtfKdataCommon",
"StockKdataCommon",
"StockusKdataCommon",
"StockhkKdataCommon",
]
# __init__.py structure:
# common code of the package
# export interface in __all__ which contains __all__ of its sub modules
# import all from submodule trade_day
from .trade_day import *
from .trade_day import __all__ as _trade_day_all
__all__ += _trade_day_all
# import all from submodule indexus
from .indexus import *
from .indexus import __all__ as _indexus_all
__all__ += _indexus_all
# import all from submodule stockhk
from .stockhk import *
from .stockhk import __all__ as _stockhk_all
__all__ += _stockhk_all
# import all from submodule stockus
from .stockus import *
from .stockus import __all__ as _stockus_all
__all__ += _stockus_all
# import all from submodule index
from .index import *
from .index import __all__ as _index_all
__all__ += _index_all
# import all from submodule etf
from .etf import *
from .etf import __all__ as _etf_all
__all__ += _etf_all
# import all from submodule stock
from .stock import *
from .stock import __all__ as _stock_all
__all__ += _stock_all
# import all from submodule block
from .block import *
from .block import __all__ as _block_all
__all__ += _block_all
| 20.394366 | 71 | 0.715815 |
ace618e501f62911d1bf0bb585713cd7f3beb654 | 98,147 | py | Python | test/sql/test_external_traversal.py | petit87/sqlalchemy | 67d674bd63ca36ac32b23f96e2b19e9dac6b0863 | [
"MIT"
] | null | null | null | test/sql/test_external_traversal.py | petit87/sqlalchemy | 67d674bd63ca36ac32b23f96e2b19e9dac6b0863 | [
"MIT"
] | null | null | null | test/sql/test_external_traversal.py | petit87/sqlalchemy | 67d674bd63ca36ac32b23f96e2b19e9dac6b0863 | [
"MIT"
] | null | null | null | import pickle
import re
from sqlalchemy import and_
from sqlalchemy import bindparam
from sqlalchemy import case
from sqlalchemy import Column
from sqlalchemy import exc
from sqlalchemy import extract
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import join
from sqlalchemy import literal
from sqlalchemy import literal_column
from sqlalchemy import MetaData
from sqlalchemy import null
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import true
from sqlalchemy import tuple_
from sqlalchemy import union
from sqlalchemy.sql import ClauseElement
from sqlalchemy.sql import column
from sqlalchemy.sql import LABEL_STYLE_TABLENAME_PLUS_COL
from sqlalchemy.sql import operators
from sqlalchemy.sql import table
from sqlalchemy.sql import util as sql_util
from sqlalchemy.sql import visitors
from sqlalchemy.sql.elements import _clone
from sqlalchemy.sql.expression import _from_objects
from sqlalchemy.sql.visitors import ClauseVisitor
from sqlalchemy.sql.visitors import cloned_traverse
from sqlalchemy.sql.visitors import CloningVisitor
from sqlalchemy.sql.visitors import ReplacingCloningVisitor
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import AssertsExecutionResults
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_not
from sqlalchemy.testing.schema import eq_clause_element
A = B = t1 = t2 = t3 = table1 = table2 = table3 = table4 = None
class TraversalTest(
fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL
):
"""test ClauseVisitor's traversal, particularly its
ability to copy and modify a ClauseElement in place."""
@classmethod
def setup_test_class(cls):
global A, B
# establish two fictitious ClauseElements.
# define deep equality semantics as well as deep
# identity semantics.
class A(ClauseElement):
__visit_name__ = "a"
_traverse_internals = []
def __init__(self, expr):
self.expr = expr
def is_other(self, other):
return other is self
__hash__ = ClauseElement.__hash__
def __eq__(self, other):
return other.expr == self.expr
def __ne__(self, other):
return other.expr != self.expr
def __str__(self):
return "A(%s)" % repr(self.expr)
class B(ClauseElement):
__visit_name__ = "b"
def __init__(self, *items):
self.items = items
def is_other(self, other):
if other is not self:
return False
for i1, i2 in zip(self.items, other.items):
if i1 is not i2:
return False
return True
__hash__ = ClauseElement.__hash__
def __eq__(self, other):
for i1, i2 in zip(self.items, other.items):
if i1 != i2:
return False
return True
def __ne__(self, other):
for i1, i2 in zip(self.items, other.items):
if i1 != i2:
return True
return False
def _copy_internals(self, clone=_clone, **kw):
self.items = [clone(i, **kw) for i in self.items]
def get_children(self, **kwargs):
return self.items
def __str__(self):
return "B(%s)" % repr([str(i) for i in self.items])
def test_test_classes(self):
a1 = A("expr1")
struct = B(a1, A("expr2"), B(A("expr1b"), A("expr2b")), A("expr3"))
struct2 = B(a1, A("expr2"), B(A("expr1b"), A("expr2b")), A("expr3"))
struct3 = B(
a1, A("expr2"), B(A("expr1b"), A("expr2bmodified")), A("expr3")
)
assert a1.is_other(a1)
assert struct.is_other(struct)
assert struct == struct2
assert struct != struct3
assert not struct.is_other(struct2)
assert not struct.is_other(struct3)
def test_clone(self):
struct = B(
A("expr1"), A("expr2"), B(A("expr1b"), A("expr2b")), A("expr3")
)
class Vis(CloningVisitor):
def visit_a(self, a):
pass
def visit_b(self, b):
pass
vis = Vis()
s2 = vis.traverse(struct)
assert struct == s2
assert not struct.is_other(s2)
def test_no_clone(self):
struct = B(
A("expr1"), A("expr2"), B(A("expr1b"), A("expr2b")), A("expr3")
)
class Vis(ClauseVisitor):
def visit_a(self, a):
pass
def visit_b(self, b):
pass
vis = Vis()
s2 = vis.traverse(struct)
assert struct == s2
assert struct.is_other(s2)
def test_clone_anon_label(self):
from sqlalchemy.sql.elements import Grouping
c1 = Grouping(literal_column("q"))
s1 = select(c1)
class Vis(CloningVisitor):
def visit_grouping(self, elem):
pass
vis = Vis()
s2 = vis.traverse(s1)
eq_(list(s2.selected_columns)[0]._anon_name_label, c1._anon_name_label)
@testing.combinations(
("clone",), ("pickle",), ("conv_to_unique"), ("none"), argnames="meth"
)
@testing.combinations(
("name with space",),
("name with [brackets]",),
("name with~~tildes~~",),
argnames="name",
)
def test_bindparam_key_proc_for_copies(self, meth, name):
r"""test :ticket:`6249`.
The key of the bindparam needs spaces and other characters
escaped out for the POSTCOMPILE regex to work correctly.
Currently, the bind key reg is::
re.sub(r"[%\(\) \$\[\]]", "_", name)
and the compiler postcompile reg is::
re.sub(r"\__[POSTCOMPILE_(\S+)\]", process_expanding, self.string)
Interestingly, brackets in the name seems to work out.
"""
expr = column(name).in_([1, 2, 3])
if meth == "clone":
expr = visitors.cloned_traverse(expr, {}, {})
elif meth == "pickle":
expr = pickle.loads(pickle.dumps(expr))
elif meth == "conv_to_unique":
expr.right.unique = False
expr.right._convert_to_unique()
token = re.sub(r"[%\(\) \$\[\]]", "_", name)
self.assert_compile(
expr,
'"%(name)s" IN (:%(token)s_1_1, '
":%(token)s_1_2, :%(token)s_1_3)" % {"name": name, "token": token},
render_postcompile=True,
dialect="default",
)
def test_expanding_in_bindparam_safe_to_clone(self):
expr = column("x").in_([1, 2, 3])
expr2 = expr._clone()
# shallow copy, bind is used twice
is_(expr.right, expr2.right)
stmt = and_(expr, expr2)
self.assert_compile(
stmt, "x IN (__[POSTCOMPILE_x_1]) AND x IN (__[POSTCOMPILE_x_1])"
)
self.assert_compile(
stmt, "x IN (1, 2, 3) AND x IN (1, 2, 3)", literal_binds=True
)
def test_traversal_size(self):
"""Test :ticket:`6304`.
Testing that _iterate_from_elements returns only unique FROM
clauses; overall traversal should be short and all items unique.
"""
t = table("t", *[column(x) for x in "pqrxyz"])
s1 = select(t.c.p, t.c.q, t.c.r, t.c.x, t.c.y, t.c.z).subquery()
s2 = (
select(s1.c.p, s1.c.q, s1.c.r, s1.c.x, s1.c.y, s1.c.z)
.select_from(s1)
.subquery()
)
s3 = (
select(s2.c.p, s2.c.q, s2.c.r, s2.c.x, s2.c.y, s2.c.z)
.select_from(s2)
.subquery()
)
tt = list(s3.element._iterate_from_elements())
eq_(tt, [s2])
total = list(visitors.iterate(s3))
# before the bug was fixed, this was 750
eq_(len(total), 25)
seen = set()
for elem in visitors.iterate(s3):
assert elem not in seen
seen.add(elem)
eq_(len(seen), 25)
def test_change_in_place(self):
struct = B(
A("expr1"), A("expr2"), B(A("expr1b"), A("expr2b")), A("expr3")
)
struct2 = B(
A("expr1"),
A("expr2modified"),
B(A("expr1b"), A("expr2b")),
A("expr3"),
)
struct3 = B(
A("expr1"),
A("expr2"),
B(A("expr1b"), A("expr2bmodified")),
A("expr3"),
)
class Vis(CloningVisitor):
def visit_a(self, a):
if a.expr == "expr2":
a.expr = "expr2modified"
def visit_b(self, b):
pass
vis = Vis()
s2 = vis.traverse(struct)
assert struct != s2
assert not struct.is_other(s2)
assert struct2 == s2
class Vis2(CloningVisitor):
def visit_a(self, a):
if a.expr == "expr2b":
a.expr = "expr2bmodified"
def visit_b(self, b):
pass
vis2 = Vis2()
s3 = vis2.traverse(struct)
assert struct != s3
assert struct3 == s3
def test_visit_name(self):
# override fns in testlib/schema.py
from sqlalchemy import Column
class CustomObj(Column):
pass
assert CustomObj.__visit_name__ == Column.__visit_name__ == "column"
foo, bar = CustomObj("foo", String), CustomObj("bar", String)
bin_ = foo == bar
set(ClauseVisitor().iterate(bin_))
assert set(ClauseVisitor().iterate(bin_)) == set([foo, bar, bin_])
class BinaryEndpointTraversalTest(fixtures.TestBase):
"""test the special binary product visit"""
def _assert_traversal(self, expr, expected):
canary = []
def visit(binary, l, r):
canary.append((binary.operator, l, r))
print(binary.operator, l, r)
sql_util.visit_binary_product(visit, expr)
eq_(canary, expected)
def test_basic(self):
a, b = column("a"), column("b")
self._assert_traversal(a == b, [(operators.eq, a, b)])
def test_with_tuples(self):
a, b, c, d, b1, b1a, b1b, e, f = (
column("a"),
column("b"),
column("c"),
column("d"),
column("b1"),
column("b1a"),
column("b1b"),
column("e"),
column("f"),
)
expr = tuple_(a, b, b1 == tuple_(b1a, b1b == d), c) > tuple_(
func.go(e + f)
)
self._assert_traversal(
expr,
[
(operators.gt, a, e),
(operators.gt, a, f),
(operators.gt, b, e),
(operators.gt, b, f),
(operators.eq, b1, b1a),
(operators.eq, b1b, d),
(operators.gt, c, e),
(operators.gt, c, f),
],
)
def test_composed(self):
a, b, e, f, q, j, r = (
column("a"),
column("b"),
column("e"),
column("f"),
column("q"),
column("j"),
column("r"),
)
expr = and_((a + b) == q + func.sum(e + f), and_(j == r, f == q))
self._assert_traversal(
expr,
[
(operators.eq, a, q),
(operators.eq, a, e),
(operators.eq, a, f),
(operators.eq, b, q),
(operators.eq, b, e),
(operators.eq, b, f),
(operators.eq, j, r),
(operators.eq, f, q),
],
)
def test_subquery(self):
a, b, c = column("a"), column("b"), column("c")
subq = select(c).where(c == a).scalar_subquery()
expr = and_(a == b, b == subq)
self._assert_traversal(
expr, [(operators.eq, a, b), (operators.eq, b, subq)]
)
class ClauseTest(fixtures.TestBase, AssertsCompiledSQL):
"""test copy-in-place behavior of various ClauseElements."""
__dialect__ = "default"
@classmethod
def setup_test_class(cls):
global t1, t2, t3
t1 = table("table1", column("col1"), column("col2"), column("col3"))
t2 = table("table2", column("col1"), column("col2"), column("col3"))
t3 = Table(
"table3",
MetaData(),
Column("col1", Integer),
Column("col2", Integer),
)
def test_binary(self):
clause = t1.c.col2 == t2.c.col2
eq_(str(clause), str(CloningVisitor().traverse(clause)))
def test_binary_anon_label_quirk(self):
t = table("t1", column("col1"))
f = t.c.col1 * 5
self.assert_compile(
select(f), "SELECT t1.col1 * :col1_1 AS anon_1 FROM t1"
)
f._anon_name_label
a = t.alias()
f = sql_util.ClauseAdapter(a).traverse(f)
self.assert_compile(
select(f), "SELECT t1_1.col1 * :col1_1 AS anon_1 FROM t1 AS t1_1"
)
@testing.combinations(
(lambda t1: t1.c.col1, "t1_1.col1"),
(lambda t1: t1.c.col1 == "foo", "t1_1.col1 = :col1_1"),
(
lambda t1: case((t1.c.col1 == "foo", "bar"), else_=t1.c.col1),
"CASE WHEN (t1_1.col1 = :col1_1) THEN :param_1 ELSE t1_1.col1 END",
),
argnames="case, expected",
)
@testing.combinations(False, True, argnames="label_")
@testing.combinations(False, True, argnames="annotate")
def test_annotated_label_cases(self, case, expected, label_, annotate):
"""test #6550"""
t1 = table("t1", column("col1"))
a1 = t1.alias()
expr = case(t1=t1)
if label_:
expr = expr.label(None)
if annotate:
expr = expr._annotate({"foo": "bar"})
adapted = sql_util.ClauseAdapter(a1).traverse(expr)
self.assert_compile(adapted, expected)
@testing.combinations((null(),), (true(),))
def test_dont_adapt_singleton_elements(self, elem):
"""test :ticket:`6259`"""
t1 = table("t1", column("c1"))
stmt = select(t1.c.c1, elem)
wherecond = t1.c.c1.is_(elem)
subq = stmt.subquery()
adapted_wherecond = sql_util.ClauseAdapter(subq).traverse(wherecond)
stmt = select(subq).where(adapted_wherecond)
self.assert_compile(
stmt,
"SELECT anon_1.c1, anon_1.anon_2 FROM (SELECT t1.c1 AS c1, "
"%s AS anon_2 FROM t1) AS anon_1 WHERE anon_1.c1 IS %s"
% (str(elem), str(elem)),
dialect="default_enhanced",
)
def test_adapt_funcs_etc_on_identity_one(self):
"""Adapting to a function etc. will adapt if its on identity"""
t1 = table("t1", column("c1"))
elem = func.foobar()
stmt = select(t1.c.c1, elem)
wherecond = t1.c.c1 == elem
subq = stmt.subquery()
adapted_wherecond = sql_util.ClauseAdapter(subq).traverse(wherecond)
stmt = select(subq).where(adapted_wherecond)
self.assert_compile(
stmt,
"SELECT anon_1.c1, anon_1.foobar_1 FROM (SELECT t1.c1 AS c1, "
"foobar() AS foobar_1 FROM t1) AS anon_1 "
"WHERE anon_1.c1 = anon_1.foobar_1",
dialect="default_enhanced",
)
def test_adapt_funcs_etc_on_identity_two(self):
"""Adapting to a function etc. will not adapt if they are different"""
t1 = table("t1", column("c1"))
elem = func.foobar()
elem2 = func.foobar()
stmt = select(t1.c.c1, elem)
wherecond = t1.c.c1 == elem2
subq = stmt.subquery()
adapted_wherecond = sql_util.ClauseAdapter(subq).traverse(wherecond)
stmt = select(subq).where(adapted_wherecond)
self.assert_compile(
stmt,
"SELECT anon_1.c1, anon_1.foobar_1 FROM (SELECT t1.c1 AS c1, "
"foobar() AS foobar_1 FROM t1) AS anon_1 "
"WHERE anon_1.c1 = foobar()",
dialect="default_enhanced",
)
def test_join(self):
clause = t1.join(t2, t1.c.col2 == t2.c.col2)
c1 = str(clause)
assert str(clause) == str(CloningVisitor().traverse(clause))
class Vis(CloningVisitor):
def visit_binary(self, binary):
binary.right = t2.c.col3
clause2 = Vis().traverse(clause)
assert c1 == str(clause)
assert str(clause2) == str(t1.join(t2, t1.c.col2 == t2.c.col3))
def test_aliased_column_adapt(self):
t1.select()
aliased = t1.select().alias()
aliased2 = t1.alias()
adapter = sql_util.ColumnAdapter(aliased)
f = select(*[adapter.columns[c] for c in aliased2.c]).select_from(
aliased
)
s = select(aliased2).select_from(aliased)
eq_(str(s), str(f))
f = select(adapter.columns[func.count(aliased2.c.col1)]).select_from(
aliased
)
eq_(
str(select(func.count(aliased2.c.col1)).select_from(aliased)),
str(f),
)
def test_aliased_cloned_column_adapt_inner(self):
clause = select(t1.c.col1, func.foo(t1.c.col2).label("foo"))
c_sub = clause.subquery()
aliased1 = select(c_sub.c.col1, c_sub.c.foo).subquery()
aliased2 = clause
aliased2.selected_columns.col1, aliased2.selected_columns.foo
aliased3 = cloned_traverse(aliased2, {}, {})
# fixed by [ticket:2419]. the inside columns
# on aliased3 have _is_clone_of pointers to those of
# aliased2. corresponding_column checks these
# now.
adapter = sql_util.ColumnAdapter(aliased1)
f1 = select(*[adapter.columns[c] for c in aliased2._raw_columns])
f2 = select(*[adapter.columns[c] for c in aliased3._raw_columns])
eq_(str(f1), str(f2))
def test_aliased_cloned_column_adapt_exported(self):
clause = select(t1.c.col1, func.foo(t1.c.col2).label("foo")).subquery()
aliased1 = select(clause.c.col1, clause.c.foo).subquery()
aliased2 = clause
aliased2.c.col1, aliased2.c.foo
aliased3 = cloned_traverse(aliased2, {}, {})
# also fixed by [ticket:2419]. When we look at the
# *outside* columns of aliased3, they previously did not
# have an _is_clone_of pointer. But we now modified _make_proxy
# to assign this.
adapter = sql_util.ColumnAdapter(aliased1)
f1 = select(*[adapter.columns[c] for c in aliased2.c])
f2 = select(*[adapter.columns[c] for c in aliased3.c])
eq_(str(f1), str(f2))
def test_aliased_cloned_schema_column_adapt_exported(self):
clause = select(t3.c.col1, func.foo(t3.c.col2).label("foo")).subquery()
aliased1 = select(clause.c.col1, clause.c.foo).subquery()
aliased2 = clause
aliased2.c.col1, aliased2.c.foo
aliased3 = cloned_traverse(aliased2, {}, {})
# also fixed by [ticket:2419]. When we look at the
# *outside* columns of aliased3, they previously did not
# have an _is_clone_of pointer. But we now modified _make_proxy
# to assign this.
adapter = sql_util.ColumnAdapter(aliased1)
f1 = select(*[adapter.columns[c] for c in aliased2.c])
f2 = select(*[adapter.columns[c] for c in aliased3.c])
eq_(str(f1), str(f2))
def test_labeled_expression_adapt(self):
lbl_x = (t3.c.col1 == 1).label("x")
t3_alias = t3.alias()
adapter = sql_util.ColumnAdapter(t3_alias)
lblx_adapted = adapter.traverse(lbl_x)
is_not(lblx_adapted._element, lbl_x._element)
lblx_adapted = adapter.traverse(lbl_x)
self.assert_compile(
select(lblx_adapted.self_group()),
"SELECT (table3_1.col1 = :col1_1) AS x FROM table3 AS table3_1",
)
self.assert_compile(
select(lblx_adapted.is_(True)),
"SELECT (table3_1.col1 = :col1_1) IS 1 AS anon_1 "
"FROM table3 AS table3_1",
)
def test_cte_w_union(self):
t = select(func.values(1).label("n")).cte("t", recursive=True)
t = t.union_all(select(t.c.n + 1).where(t.c.n < 100))
s = select(func.sum(t.c.n))
from sqlalchemy.sql.visitors import cloned_traverse
cloned = cloned_traverse(s, {}, {})
self.assert_compile(
cloned,
"WITH RECURSIVE t(n) AS "
"(SELECT values(:values_1) AS n "
"UNION ALL SELECT t.n + :n_1 AS anon_1 "
"FROM t "
"WHERE t.n < :n_2) "
"SELECT sum(t.n) AS sum_1 FROM t",
)
def test_aliased_cte_w_union(self):
t = (
select(func.values(1).label("n"))
.cte("t", recursive=True)
.alias("foo")
)
t = t.union_all(select(t.c.n + 1).where(t.c.n < 100))
s = select(func.sum(t.c.n))
from sqlalchemy.sql.visitors import cloned_traverse
cloned = cloned_traverse(s, {}, {})
self.assert_compile(
cloned,
"WITH RECURSIVE foo(n) AS (SELECT values(:values_1) AS n "
"UNION ALL SELECT foo.n + :n_1 AS anon_1 FROM foo "
"WHERE foo.n < :n_2) SELECT sum(foo.n) AS sum_1 FROM foo",
)
def test_text(self):
clause = text("select * from table where foo=:bar").bindparams(
bindparam("bar")
)
c1 = str(clause)
class Vis(CloningVisitor):
def visit_textclause(self, text):
text.text = text.text + " SOME MODIFIER=:lala"
text._bindparams["lala"] = bindparam("lala")
clause2 = Vis().traverse(clause)
assert c1 == str(clause)
assert str(clause2) == c1 + " SOME MODIFIER=:lala"
assert list(clause._bindparams.keys()) == ["bar"]
assert set(clause2._bindparams.keys()) == set(["bar", "lala"])
def test_select(self):
s2 = select(t1)
s2_assert = str(s2)
s3_assert = str(select(t1).where(t1.c.col2 == 7))
class Vis(CloningVisitor):
def visit_select(self, select):
select.where.non_generative(select, t1.c.col2 == 7)
s3 = Vis().traverse(s2)
assert str(s3) == s3_assert
assert str(s2) == s2_assert
print(str(s2))
print(str(s3))
class Vis(ClauseVisitor):
def visit_select(self, select):
select.where.non_generative(select, t1.c.col2 == 7)
Vis().traverse(s2)
assert str(s2) == s3_assert
s4_assert = str(select(t1).where(and_(t1.c.col2 == 7, t1.c.col3 == 9)))
class Vis(CloningVisitor):
def visit_select(self, select):
select.where.non_generative(select, t1.c.col3 == 9)
s4 = Vis().traverse(s3)
print(str(s3))
print(str(s4))
assert str(s4) == s4_assert
assert str(s3) == s3_assert
s5_assert = str(select(t1).where(and_(t1.c.col2 == 7, t1.c.col1 == 9)))
class Vis(CloningVisitor):
def visit_binary(self, binary):
if binary.left is t1.c.col3:
binary.left = t1.c.col1
binary.right = bindparam("col1", unique=True)
s5 = Vis().traverse(s4)
print(str(s4))
print(str(s5))
assert str(s5) == s5_assert
assert str(s4) == s4_assert
def test_union(self):
u = union(t1.select(), t2.select())
u2 = CloningVisitor().traverse(u)
eq_(str(u), str(u2))
eq_(
[str(c) for c in u2.selected_columns],
[str(c) for c in u.selected_columns],
)
u = union(t1.select(), t2.select())
cols = [str(c) for c in u.selected_columns]
u2 = CloningVisitor().traverse(u)
eq_(str(u), str(u2))
eq_([str(c) for c in u2.selected_columns], cols)
s1 = select(t1).where(t1.c.col1 == bindparam("id_param"))
s2 = select(t2)
u = union(s1, s2)
u2 = u.params(id_param=7)
u3 = u.params(id_param=10)
eq_(str(u), str(u2))
eq_(str(u2), str(u3))
eq_(u2.compile().params, {"id_param": 7})
eq_(u3.compile().params, {"id_param": 10})
def test_params_elements_in_setup_joins(self):
"""test #7055"""
meta = MetaData()
X = Table("x", meta, Column("a", Integer), Column("b", Integer))
Y = Table("y", meta, Column("a", Integer), Column("b", Integer))
s1 = select(X.c.a).where(X.c.b == bindparam("xb")).alias("s1")
jj = (
select(Y)
.join(s1, Y.c.a == s1.c.a)
.where(Y.c.b == bindparam("yb"))
.alias("s2")
)
params = {"xb": 42, "yb": 33}
sel = select(Y).select_from(jj).params(params)
eq_(
[
eq_clause_element(bindparam("yb", value=33)),
eq_clause_element(bindparam("xb", value=42)),
],
sel._generate_cache_key()[1],
)
def test_params_on_expr_against_subquery(self):
"""test #7489"""
meta = MetaData()
b = Table("b", meta, Column("id", Integer), Column("data", String))
subq = select(b.c.id).where(b.c.data == "some data").subquery()
criteria = b.c.id == subq.c.id
stmt = select(b).where(criteria)
param_key = stmt._generate_cache_key()[1][0].key
self.assert_compile(
stmt,
"SELECT b.id, b.data FROM b, (SELECT b.id AS id "
"FROM b WHERE b.data = :data_1) AS anon_1 WHERE b.id = anon_1.id",
checkparams={"data_1": "some data"},
)
eq_(
[
eq_clause_element(bindparam(param_key, value="some data")),
],
stmt._generate_cache_key()[1],
)
stmt = select(b).where(criteria.params({param_key: "some other data"}))
self.assert_compile(
stmt,
"SELECT b.id, b.data FROM b, (SELECT b.id AS id "
"FROM b WHERE b.data = :data_1) AS anon_1 WHERE b.id = anon_1.id",
checkparams={"data_1": "some other data"},
)
eq_(
[
eq_clause_element(
bindparam(param_key, value="some other data")
),
],
stmt._generate_cache_key()[1],
)
def test_params_subqueries_in_joins_one(self):
"""test #7055"""
meta = MetaData()
Pe = Table(
"pe",
meta,
Column("c", Integer),
Column("p", Integer),
Column("pid", Integer),
)
S = Table(
"s",
meta,
Column("c", Integer),
Column("p", Integer),
Column("sid", Integer),
)
Ps = Table("ps", meta, Column("c", Integer), Column("p", Integer))
params = {"pid": 42, "sid": 33}
pe_s = select(Pe).where(Pe.c.pid == bindparam("pid")).alias("pe_s")
s_s = select(S).where(S.c.sid == bindparam("sid")).alias("s_s")
jj = join(
Ps,
join(pe_s, s_s, and_(pe_s.c.c == s_s.c.c, pe_s.c.p == s_s.c.p)),
and_(Ps.c.c == pe_s.c.c, Ps.c.p == Ps.c.p),
).params(params)
eq_(
[
eq_clause_element(bindparam("pid", value=42)),
eq_clause_element(bindparam("sid", value=33)),
],
jj._generate_cache_key()[1],
)
def test_params_subqueries_in_joins_two(self):
"""test #7055"""
meta = MetaData()
Pe = Table(
"pe",
meta,
Column("c", Integer),
Column("p", Integer),
Column("pid", Integer),
)
S = Table(
"s",
meta,
Column("c", Integer),
Column("p", Integer),
Column("sid", Integer),
)
Ps = Table("ps", meta, Column("c", Integer), Column("p", Integer))
params = {"pid": 42, "sid": 33}
pe_s = select(Pe).where(Pe.c.pid == bindparam("pid")).alias("pe_s")
s_s = select(S).where(S.c.sid == bindparam("sid")).alias("s_s")
jj = (
join(Ps, pe_s, and_(Ps.c.c == pe_s.c.c, Ps.c.p == Ps.c.p))
.join(s_s, and_(Ps.c.c == s_s.c.c, Ps.c.p == s_s.c.p))
.params(params)
)
eq_(
[
eq_clause_element(bindparam("pid", value=42)),
eq_clause_element(bindparam("sid", value=33)),
],
jj._generate_cache_key()[1],
)
def test_in(self):
expr = t1.c.col1.in_(["foo", "bar"])
expr2 = CloningVisitor().traverse(expr)
assert str(expr) == str(expr2)
def test_over(self):
expr = func.row_number().over(order_by=t1.c.col1)
expr2 = CloningVisitor().traverse(expr)
assert str(expr) == str(expr2)
assert expr in visitors.iterate(expr, {})
def test_within_group(self):
expr = func.row_number().within_group(t1.c.col1)
expr2 = CloningVisitor().traverse(expr)
assert str(expr) == str(expr2)
assert expr in visitors.iterate(expr, {})
def test_funcfilter(self):
expr = func.count(1).filter(t1.c.col1 > 1)
expr2 = CloningVisitor().traverse(expr)
assert str(expr) == str(expr2)
def test_adapt_union(self):
u = union(
t1.select().where(t1.c.col1 == 4),
t1.select().where(t1.c.col1 == 5),
).alias()
assert sql_util.ClauseAdapter(u).traverse(t1) is u
def test_bindparams(self):
"""test that unique bindparams change their name upon clone()
to prevent conflicts"""
s = select(t1).where(t1.c.col1 == bindparam(None, unique=True)).alias()
s2 = CloningVisitor().traverse(s).alias()
s3 = select(s).where(s.c.col2 == s2.c.col2)
self.assert_compile(
s3,
"SELECT anon_1.col1, anon_1.col2, anon_1.col3 FROM "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 WHERE table1.col1 = :param_1) "
"AS anon_1, "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, table1.col3 "
"AS col3 FROM table1 WHERE table1.col1 = :param_2) AS anon_2 "
"WHERE anon_1.col2 = anon_2.col2",
)
s = select(t1).where(t1.c.col1 == 4).alias()
s2 = CloningVisitor().traverse(s).alias()
s3 = select(s).where(s.c.col2 == s2.c.col2)
self.assert_compile(
s3,
"SELECT anon_1.col1, anon_1.col2, anon_1.col3 FROM "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 WHERE table1.col1 = :col1_1) "
"AS anon_1, "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, table1.col3 "
"AS col3 FROM table1 WHERE table1.col1 = :col1_2) AS anon_2 "
"WHERE anon_1.col2 = anon_2.col2",
)
def test_extract(self):
s = select(extract("foo", t1.c.col1).label("col1"))
self.assert_compile(
s, "SELECT EXTRACT(foo FROM table1.col1) AS col1 FROM table1"
)
s2 = CloningVisitor().traverse(s).alias()
s3 = select(s2.c.col1)
self.assert_compile(
s, "SELECT EXTRACT(foo FROM table1.col1) AS col1 FROM table1"
)
self.assert_compile(
s3,
"SELECT anon_1.col1 FROM (SELECT EXTRACT(foo FROM "
"table1.col1) AS col1 FROM table1) AS anon_1",
)
@testing.emits_warning(".*replaced by another column with the same key")
def test_alias(self):
subq = t2.select().alias("subq")
s = select(t1.c.col1, subq.c.col1).select_from(
t1, subq, t1.join(subq, t1.c.col1 == subq.c.col2)
)
orig = str(s)
s2 = CloningVisitor().traverse(s)
eq_(orig, str(s))
eq_(str(s), str(s2))
s4 = CloningVisitor().traverse(s2)
eq_(orig, str(s))
eq_(str(s), str(s2))
eq_(str(s), str(s4))
s3 = sql_util.ClauseAdapter(table("foo")).traverse(s)
eq_(orig, str(s))
eq_(str(s), str(s3))
s4 = sql_util.ClauseAdapter(table("foo")).traverse(s3)
eq_(orig, str(s))
eq_(str(s), str(s3))
eq_(str(s), str(s4))
subq = subq.alias("subq")
s = select(t1.c.col1, subq.c.col1).select_from(
t1,
subq,
t1.join(subq, t1.c.col1 == subq.c.col2),
)
s5 = CloningVisitor().traverse(s)
eq_(str(s), str(s5))
def test_correlated_select(self):
s = (
select(literal_column("*"))
.where(t1.c.col1 == t2.c.col1)
.select_from(t1, t2)
.correlate(t2)
)
class Vis(CloningVisitor):
def visit_select(self, select):
select.where.non_generative(select, t1.c.col2 == 7)
self.assert_compile(
select(t2).where(t2.c.col1 == Vis().traverse(s).scalar_subquery()),
"SELECT table2.col1, table2.col2, table2.col3 "
"FROM table2 WHERE table2.col1 = "
"(SELECT * FROM table1 WHERE table1.col1 = table2.col1 "
"AND table1.col2 = :col2_1)",
)
def test_this_thing(self):
s = select(t1).where(t1.c.col1 == "foo").alias()
s2 = select(s.c.col1)
self.assert_compile(
s2,
"SELECT anon_1.col1 FROM (SELECT "
"table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 WHERE "
"table1.col1 = :col1_1) AS anon_1",
)
t1a = t1.alias()
s2 = sql_util.ClauseAdapter(t1a).traverse(s2)
self.assert_compile(
s2,
"SELECT anon_1.col1 FROM (SELECT "
"table1_1.col1 AS col1, table1_1.col2 AS "
"col2, table1_1.col3 AS col3 FROM table1 "
"AS table1_1 WHERE table1_1.col1 = "
":col1_1) AS anon_1",
)
def test_this_thing_using_setup_joins_one(self):
s = select(t1).join_from(t1, t2, t1.c.col1 == t2.c.col2).subquery()
s2 = select(s.c.col1).join_from(t3, s, t3.c.col2 == s.c.col1)
self.assert_compile(
s2,
"SELECT anon_1.col1 FROM table3 JOIN (SELECT table1.col1 AS "
"col1, table1.col2 AS col2, table1.col3 AS col3 FROM table1 "
"JOIN table2 ON table1.col1 = table2.col2) AS anon_1 "
"ON table3.col2 = anon_1.col1",
)
t1a = t1.alias()
s2 = sql_util.ClauseAdapter(t1a).traverse(s2)
self.assert_compile(
s2,
"SELECT anon_1.col1 FROM table3 JOIN (SELECT table1_1.col1 AS "
"col1, table1_1.col2 AS col2, table1_1.col3 AS col3 "
"FROM table1 AS table1_1 JOIN table2 ON table1_1.col1 = "
"table2.col2) AS anon_1 ON table3.col2 = anon_1.col1",
)
def test_this_thing_using_setup_joins_two(self):
s = select(t1.c.col1).join(t2, t1.c.col1 == t2.c.col2).subquery()
s2 = select(s.c.col1)
self.assert_compile(
s2,
"SELECT anon_1.col1 FROM (SELECT table1.col1 AS col1 "
"FROM table1 JOIN table2 ON table1.col1 = table2.col2) AS anon_1",
)
t1alias = t1.alias("t1alias")
j = t1.join(t1alias, t1.c.col1 == t1alias.c.col2)
vis = sql_util.ClauseAdapter(j)
s2 = vis.traverse(s2)
self.assert_compile(
s2,
"SELECT anon_1.col1 FROM (SELECT table1.col1 AS col1 "
"FROM table1 JOIN table1 AS t1alias "
"ON table1.col1 = t1alias.col2 "
"JOIN table2 ON table1.col1 = table2.col2) AS anon_1",
)
def test_this_thing_using_setup_joins_three(self):
j = t1.join(t2, t1.c.col1 == t2.c.col2)
s1 = select(j)
s2 = s1.join(t3, t1.c.col1 == t3.c.col1)
self.assert_compile(
s2,
"SELECT table1.col1, table1.col2, table1.col3, "
"table2.col1 AS col1_1, table2.col2 AS col2_1, "
"table2.col3 AS col3_1 FROM table1 "
"JOIN table2 ON table1.col1 = table2.col2 JOIN table3 "
"ON table3.col1 = table1.col1",
)
vis = sql_util.ClauseAdapter(j)
s3 = vis.traverse(s1)
s4 = s3.join(t3, t1.c.col1 == t3.c.col1)
self.assert_compile(
s4,
"SELECT table1.col1, table1.col2, table1.col3, "
"table2.col1 AS col1_1, table2.col2 AS col2_1, "
"table2.col3 AS col3_1 FROM table1 "
"JOIN table2 ON table1.col1 = table2.col2 JOIN table3 "
"ON table3.col1 = table1.col1",
)
s5 = vis.traverse(s3)
s6 = s5.join(t3, t1.c.col1 == t3.c.col1)
self.assert_compile(
s6,
"SELECT table1.col1, table1.col2, table1.col3, "
"table2.col1 AS col1_1, table2.col2 AS col2_1, "
"table2.col3 AS col3_1 FROM table1 "
"JOIN table2 ON table1.col1 = table2.col2 JOIN table3 "
"ON table3.col1 = table1.col1",
)
def test_this_thing_using_setup_joins_four(self):
j = t1.join(t2, t1.c.col1 == t2.c.col2)
s1 = select(j)
assert not s1._from_obj
s2 = s1.join(t3, t1.c.col1 == t3.c.col1)
self.assert_compile(
s2,
"SELECT table1.col1, table1.col2, table1.col3, "
"table2.col1 AS col1_1, table2.col2 AS col2_1, "
"table2.col3 AS col3_1 FROM table1 "
"JOIN table2 ON table1.col1 = table2.col2 JOIN table3 "
"ON table3.col1 = table1.col1",
)
s3 = visitors.replacement_traverse(s1, {}, lambda elem: None)
s4 = s3.join(t3, t1.c.col1 == t3.c.col1)
self.assert_compile(
s4,
"SELECT table1.col1, table1.col2, table1.col3, "
"table2.col1 AS col1_1, table2.col2 AS col2_1, "
"table2.col3 AS col3_1 FROM table1 "
"JOIN table2 ON table1.col1 = table2.col2 JOIN table3 "
"ON table3.col1 = table1.col1",
)
s5 = visitors.replacement_traverse(s3, {}, lambda elem: None)
s6 = s5.join(t3, t1.c.col1 == t3.c.col1)
self.assert_compile(
s6,
"SELECT table1.col1, table1.col2, table1.col3, "
"table2.col1 AS col1_1, table2.col2 AS col2_1, "
"table2.col3 AS col3_1 FROM table1 "
"JOIN table2 ON table1.col1 = table2.col2 JOIN table3 "
"ON table3.col1 = table1.col1",
)
def test_select_fromtwice_one(self):
t1a = t1.alias()
s = (
select(1)
.where(t1.c.col1 == t1a.c.col1)
.select_from(t1a)
.correlate(t1a)
)
s = select(t1).where(t1.c.col1 == s.scalar_subquery())
self.assert_compile(
s,
"SELECT table1.col1, table1.col2, table1.col3 FROM table1 "
"WHERE table1.col1 = "
"(SELECT 1 FROM table1, table1 AS table1_1 "
"WHERE table1.col1 = table1_1.col1)",
)
s = CloningVisitor().traverse(s)
self.assert_compile(
s,
"SELECT table1.col1, table1.col2, table1.col3 FROM table1 "
"WHERE table1.col1 = "
"(SELECT 1 FROM table1, table1 AS table1_1 "
"WHERE table1.col1 = table1_1.col1)",
)
def test_select_fromtwice_two(self):
s = select(t1).where(t1.c.col1 == "foo").alias()
s2 = (
select(1).where(t1.c.col1 == s.c.col1).select_from(s).correlate(t1)
)
s3 = select(t1).where(t1.c.col1 == s2.scalar_subquery())
self.assert_compile(
s3,
"SELECT table1.col1, table1.col2, table1.col3 "
"FROM table1 WHERE table1.col1 = "
"(SELECT 1 FROM "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 "
"WHERE table1.col1 = :col1_1) "
"AS anon_1 WHERE table1.col1 = anon_1.col1)",
)
s4 = ReplacingCloningVisitor().traverse(s3)
self.assert_compile(
s4,
"SELECT table1.col1, table1.col2, table1.col3 "
"FROM table1 WHERE table1.col1 = "
"(SELECT 1 FROM "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 "
"WHERE table1.col1 = :col1_1) "
"AS anon_1 WHERE table1.col1 = anon_1.col1)",
)
def test_select_setup_joins_adapt_element_one(self):
s = select(t1).join(t2, t1.c.col1 == t2.c.col2)
t1a = t1.alias()
s2 = sql_util.ClauseAdapter(t1a).traverse(s)
self.assert_compile(
s,
"SELECT table1.col1, table1.col2, table1.col3 "
"FROM table1 JOIN table2 ON table1.col1 = table2.col2",
)
self.assert_compile(
s2,
"SELECT table1_1.col1, table1_1.col2, table1_1.col3 "
"FROM table1 AS table1_1 JOIN table2 "
"ON table1_1.col1 = table2.col2",
)
def test_select_setup_joins_adapt_element_two(self):
s = select(literal_column("1")).join_from(
t1, t2, t1.c.col1 == t2.c.col2
)
t1a = t1.alias()
s2 = sql_util.ClauseAdapter(t1a).traverse(s)
self.assert_compile(
s, "SELECT 1 FROM table1 JOIN table2 ON table1.col1 = table2.col2"
)
self.assert_compile(
s2,
"SELECT 1 FROM table1 AS table1_1 "
"JOIN table2 ON table1_1.col1 = table2.col2",
)
def test_select_setup_joins_adapt_element_three(self):
s = select(literal_column("1")).join_from(
t1, t2, t1.c.col1 == t2.c.col2
)
t2a = t2.alias()
s2 = sql_util.ClauseAdapter(t2a).traverse(s)
self.assert_compile(
s, "SELECT 1 FROM table1 JOIN table2 ON table1.col1 = table2.col2"
)
self.assert_compile(
s2,
"SELECT 1 FROM table1 "
"JOIN table2 AS table2_1 ON table1.col1 = table2_1.col2",
)
def test_select_setup_joins_straight_clone(self):
s = select(t1).join(t2, t1.c.col1 == t2.c.col2)
s2 = CloningVisitor().traverse(s)
self.assert_compile(
s,
"SELECT table1.col1, table1.col2, table1.col3 "
"FROM table1 JOIN table2 ON table1.col1 = table2.col2",
)
self.assert_compile(
s2,
"SELECT table1.col1, table1.col2, table1.col3 "
"FROM table1 JOIN table2 ON table1.col1 = table2.col2",
)
class ColumnAdapterTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
@classmethod
def setup_test_class(cls):
global t1, t2
t1 = table(
"table1",
column("col1"),
column("col2"),
column("col3"),
column("col4"),
)
t2 = table("table2", column("col1"), column("col2"), column("col3"))
def test_traverse_memoizes_w_columns(self):
t1a = t1.alias()
adapter = sql_util.ColumnAdapter(t1a, anonymize_labels=True)
expr = select(t1a.c.col1).label("x")
expr_adapted = adapter.traverse(expr)
is_not(expr, expr_adapted)
is_(adapter.columns[expr], expr_adapted)
def test_traverse_memoizes_w_itself(self):
t1a = t1.alias()
adapter = sql_util.ColumnAdapter(t1a, anonymize_labels=True)
expr = select(t1a.c.col1).label("x")
expr_adapted = adapter.traverse(expr)
is_not(expr, expr_adapted)
is_(adapter.traverse(expr), expr_adapted)
def test_columns_memoizes_w_itself(self):
t1a = t1.alias()
adapter = sql_util.ColumnAdapter(t1a, anonymize_labels=True)
expr = select(t1a.c.col1).label("x")
expr_adapted = adapter.columns[expr]
is_not(expr, expr_adapted)
is_(adapter.columns[expr], expr_adapted)
def test_wrapping_fallthrough(self):
t1a = t1.alias(name="t1a")
t2a = t2.alias(name="t2a")
a1 = sql_util.ColumnAdapter(t1a)
s1 = (
select(t1a.c.col1, t2a.c.col1)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.alias()
)
a2 = sql_util.ColumnAdapter(s1)
a3 = a2.wrap(a1)
a4 = a1.wrap(a2)
a5 = a1.chain(a2)
# t1.c.col1 -> s1.c.t1a_col1
# adapted by a2
is_(a3.columns[t1.c.col1], s1.c.t1a_col1)
is_(a4.columns[t1.c.col1], s1.c.t1a_col1)
# chaining can't fall through because a1 grabs it
# first
is_(a5.columns[t1.c.col1], t1a.c.col1)
# t2.c.col1 -> s1.c.t2a_col1
# adapted by a2
is_(a3.columns[t2.c.col1], s1.c.t2a_col1)
is_(a4.columns[t2.c.col1], s1.c.t2a_col1)
# chaining, t2 hits s1
is_(a5.columns[t2.c.col1], s1.c.t2a_col1)
# t1.c.col2 -> t1a.c.col2
# fallthrough to a1
is_(a3.columns[t1.c.col2], t1a.c.col2)
is_(a4.columns[t1.c.col2], t1a.c.col2)
# chaining hits a1
is_(a5.columns[t1.c.col2], t1a.c.col2)
# t2.c.col2 -> t2.c.col2
# fallthrough to no adaption
is_(a3.columns[t2.c.col2], t2.c.col2)
is_(a4.columns[t2.c.col2], t2.c.col2)
def test_wrapping_ordering(self):
"""illustrate an example where order of wrappers matters.
This test illustrates both the ordering being significant
as well as a scenario where multiple translations are needed
(e.g. wrapping vs. chaining).
"""
stmt = (
select(t1.c.col1, t2.c.col1)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
sa = stmt.alias()
stmt2 = select(t2, sa).subquery()
a1 = sql_util.ColumnAdapter(stmt)
a2 = sql_util.ColumnAdapter(stmt2)
a2_to_a1 = a2.wrap(a1)
a1_to_a2 = a1.wrap(a2)
# when stmt2 and stmt represent the same column
# in different contexts, order of wrapping matters
# t2.c.col1 via a2 is stmt2.c.col1; then ignored by a1
is_(a2_to_a1.columns[t2.c.col1], stmt2.c.col1)
# t2.c.col1 via a1 is stmt.c.table2_col1; a2 then
# sends this to stmt2.c.table2_col1
is_(a1_to_a2.columns[t2.c.col1], stmt2.c.table2_col1)
# check that these aren't the same column
is_not(stmt2.c.col1, stmt2.c.table2_col1)
# for mutually exclusive columns, order doesn't matter
is_(a2_to_a1.columns[t1.c.col1], stmt2.c.table1_col1)
is_(a1_to_a2.columns[t1.c.col1], stmt2.c.table1_col1)
is_(a2_to_a1.columns[t2.c.col2], stmt2.c.col2)
def test_wrapping_multiple(self):
"""illustrate that wrapping runs both adapters"""
t1a = t1.alias(name="t1a")
t2a = t2.alias(name="t2a")
a1 = sql_util.ColumnAdapter(t1a)
a2 = sql_util.ColumnAdapter(t2a)
a3 = a2.wrap(a1)
stmt = select(t1.c.col1, t2.c.col2)
self.assert_compile(
a3.traverse(stmt),
"SELECT t1a.col1, t2a.col2 FROM table1 AS t1a, table2 AS t2a",
)
# chaining does too because these adapters don't share any
# columns
a4 = a2.chain(a1)
self.assert_compile(
a4.traverse(stmt),
"SELECT t1a.col1, t2a.col2 FROM table1 AS t1a, table2 AS t2a",
)
def test_wrapping_inclusions(self):
"""test wrapping and inclusion rules together,
taking into account multiple objects with equivalent hash identity."""
t1a = t1.alias(name="t1a")
t2a = t2.alias(name="t2a")
a1 = sql_util.ColumnAdapter(
t1a, include_fn=lambda col: "a1" in col._annotations
)
s1 = (
select(t1a, t2a)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.alias()
)
a2 = sql_util.ColumnAdapter(
s1, include_fn=lambda col: "a2" in col._annotations
)
a3 = a2.wrap(a1)
c1a1 = t1.c.col1._annotate(dict(a1=True))
c1a2 = t1.c.col1._annotate(dict(a2=True))
c1aa = t1.c.col1._annotate(dict(a1=True, a2=True))
c2a1 = t2.c.col1._annotate(dict(a1=True))
c2a2 = t2.c.col1._annotate(dict(a2=True))
c2aa = t2.c.col1._annotate(dict(a1=True, a2=True))
is_(a3.columns[c1a1], t1a.c.col1)
is_(a3.columns[c1a2], s1.c.t1a_col1)
is_(a3.columns[c1aa], s1.c.t1a_col1)
# not covered by a1, accepted by a2
is_(a3.columns[c2aa], s1.c.t2a_col1)
# not covered by a1, accepted by a2
is_(a3.columns[c2a2], s1.c.t2a_col1)
# not covered by a1, rejected by a2
is_(a3.columns[c2a1], c2a1)
class ClauseAdapterTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
@classmethod
def setup_test_class(cls):
global t1, t2
t1 = table("table1", column("col1"), column("col2"), column("col3"))
t2 = table("table2", column("col1"), column("col2"), column("col3"))
def test_correlation_on_clone(self):
t1alias = t1.alias("t1alias")
t2alias = t2.alias("t2alias")
vis = sql_util.ClauseAdapter(t1alias)
s = (
select(literal_column("*"))
.select_from(t1alias, t2alias)
.scalar_subquery()
)
froms = list(s._iterate_from_elements())
assert t2alias in froms
assert t1alias in froms
self.assert_compile(
select(literal_column("*")).where(t2alias.c.col1 == s),
"SELECT * FROM table2 AS t2alias WHERE "
"t2alias.col1 = (SELECT * FROM table1 AS "
"t1alias)",
)
s = vis.traverse(s)
froms = list(s._iterate_from_elements())
assert t2alias in froms # present because it was not cloned
assert t1alias in froms # present because the adapter placed
# it there and was also not cloned
# correlate list on "s" needs to take into account the full
# _cloned_set for each element in _froms when correlating
self.assert_compile(
select(literal_column("*")).where(t2alias.c.col1 == s),
"SELECT * FROM table2 AS t2alias WHERE "
"t2alias.col1 = (SELECT * FROM table1 AS "
"t1alias)",
)
s = (
select(literal_column("*"))
.select_from(t1alias, t2alias)
.correlate(t2alias)
.scalar_subquery()
)
self.assert_compile(
select(literal_column("*")).where(t2alias.c.col1 == s),
"SELECT * FROM table2 AS t2alias WHERE "
"t2alias.col1 = (SELECT * FROM table1 AS "
"t1alias)",
)
s = vis.traverse(s)
self.assert_compile(
select(literal_column("*")).where(t2alias.c.col1 == s),
"SELECT * FROM table2 AS t2alias WHERE "
"t2alias.col1 = (SELECT * FROM table1 AS "
"t1alias)",
)
s = CloningVisitor().traverse(s)
self.assert_compile(
select(literal_column("*")).where(t2alias.c.col1 == s),
"SELECT * FROM table2 AS t2alias WHERE "
"t2alias.col1 = (SELECT * FROM table1 AS "
"t1alias)",
)
s = (
select(literal_column("*"))
.where(t1.c.col1 == t2.c.col1)
.scalar_subquery()
)
self.assert_compile(
select(t1.c.col1, s),
"SELECT table1.col1, (SELECT * FROM table2 "
"WHERE table1.col1 = table2.col1) AS "
"anon_1 FROM table1",
)
vis = sql_util.ClauseAdapter(t1alias)
s = vis.traverse(s)
self.assert_compile(
select(t1alias.c.col1, s),
"SELECT t1alias.col1, (SELECT * FROM "
"table2 WHERE t1alias.col1 = table2.col1) "
"AS anon_1 FROM table1 AS t1alias",
)
s = CloningVisitor().traverse(s)
self.assert_compile(
select(t1alias.c.col1, s),
"SELECT t1alias.col1, (SELECT * FROM "
"table2 WHERE t1alias.col1 = table2.col1) "
"AS anon_1 FROM table1 AS t1alias",
)
s = (
select(literal_column("*"))
.where(t1.c.col1 == t2.c.col1)
.correlate(t1)
.scalar_subquery()
)
self.assert_compile(
select(t1.c.col1, s),
"SELECT table1.col1, (SELECT * FROM table2 "
"WHERE table1.col1 = table2.col1) AS "
"anon_1 FROM table1",
)
vis = sql_util.ClauseAdapter(t1alias)
s = vis.traverse(s)
self.assert_compile(
select(t1alias.c.col1, s),
"SELECT t1alias.col1, (SELECT * FROM "
"table2 WHERE t1alias.col1 = table2.col1) "
"AS anon_1 FROM table1 AS t1alias",
)
s = CloningVisitor().traverse(s)
self.assert_compile(
select(t1alias.c.col1, s),
"SELECT t1alias.col1, (SELECT * FROM "
"table2 WHERE t1alias.col1 = table2.col1) "
"AS anon_1 FROM table1 AS t1alias",
)
def test_adapt_select_w_unlabeled_fn(self):
expr = func.count(t1.c.col1)
stmt = select(t1, expr)
self.assert_compile(
stmt,
"SELECT table1.col1, table1.col2, table1.col3, "
"count(table1.col1) AS count_1 FROM table1",
)
stmt2 = select(stmt.subquery())
self.assert_compile(
stmt2,
"SELECT anon_1.col1, anon_1.col2, anon_1.col3, anon_1.count_1 "
"FROM (SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3, count(table1.col1) AS count_1 "
"FROM table1) AS anon_1",
)
is_(
stmt2.selected_columns[3],
stmt2.selected_columns.corresponding_column(expr),
)
is_(
sql_util.ClauseAdapter(stmt2).replace(expr),
stmt2.selected_columns[3],
)
column_adapter = sql_util.ColumnAdapter(stmt2)
is_(column_adapter.columns[expr], stmt2.selected_columns[3])
@testing.combinations((True,), (False,), argnames="use_adapt_from")
def test_correlate_except_on_clone(self, use_adapt_from):
# test [ticket:4537]'s issue
t1alias = t1.alias("t1alias")
j = t1.join(t1alias, t1.c.col1 == t1alias.c.col2)
if use_adapt_from:
vis = sql_util.ClauseAdapter(j, adapt_from_selectables=[t1])
else:
vis = sql_util.ClauseAdapter(j)
# "control" subquery - uses correlate which has worked w/ adaption
# for a long time
control_s = (
select(t2.c.col1)
.where(t2.c.col1 == t1.c.col1)
.correlate(t2)
.scalar_subquery()
)
# test subquery - given only t1 and t2 in the enclosing selectable,
# will do the same thing as the "control" query since the correlation
# works out the same
s = (
select(t2.c.col1)
.where(t2.c.col1 == t1.c.col1)
.correlate_except(t1)
.scalar_subquery()
)
# use both subqueries in statements
control_stmt = select(control_s, t1.c.col1, t2.c.col1).select_from(
t1.join(t2, t1.c.col1 == t2.c.col1)
)
stmt = select(s, t1.c.col1, t2.c.col1).select_from(
t1.join(t2, t1.c.col1 == t2.c.col1)
)
# they are the same
self.assert_compile(
control_stmt,
"SELECT "
"(SELECT table2.col1 FROM table1 "
"WHERE table2.col1 = table1.col1) AS anon_1, "
"table1.col1, table2.col1 AS col1_1 "
"FROM table1 "
"JOIN table2 ON table1.col1 = table2.col1",
)
self.assert_compile(
stmt,
"SELECT "
"(SELECT table2.col1 FROM table1 "
"WHERE table2.col1 = table1.col1) AS anon_1, "
"table1.col1, table2.col1 AS col1_1 "
"FROM table1 "
"JOIN table2 ON table1.col1 = table2.col1",
)
# now test against the adaption of "t1" into "t1 JOIN t1alias".
# note in the control case, we aren't actually testing that
# Select is processing the "correlate" list during the adaption
# since we aren't adapting the "correlate"
self.assert_compile(
vis.traverse(control_stmt),
"SELECT "
"(SELECT table2.col1 FROM "
"table1 JOIN table1 AS t1alias ON table1.col1 = t1alias.col2 "
"WHERE table2.col1 = table1.col1) AS anon_1, "
"table1.col1, table2.col1 AS col1_1 "
"FROM table1 JOIN table1 AS t1alias ON table1.col1 = t1alias.col2 "
"JOIN table2 ON table1.col1 = table2.col1",
)
# but here, correlate_except() does have the thing we're adapting
# so whatever is in there has to be expanded out to include
# the adaptation target, in this case "t1 JOIN t1alias".
self.assert_compile(
vis.traverse(stmt),
"SELECT "
"(SELECT table2.col1 FROM "
"table1 JOIN table1 AS t1alias ON table1.col1 = t1alias.col2 "
"WHERE table2.col1 = table1.col1) AS anon_1, "
"table1.col1, table2.col1 AS col1_1 "
"FROM table1 JOIN table1 AS t1alias ON table1.col1 = t1alias.col2 "
"JOIN table2 ON table1.col1 = table2.col1",
)
@testing.combinations((True,), (False,), argnames="use_adapt_from")
def test_correlate_except_with_mixed_tables(self, use_adapt_from):
# test [ticket:6060]'s issue
stmt = select(
t1.c.col1,
select(func.count(t2.c.col1))
.where(t2.c.col1 == t1.c.col1)
.correlate_except(t2)
.scalar_subquery(),
)
self.assert_compile(
stmt,
"SELECT table1.col1, "
"(SELECT count(table2.col1) AS count_1 FROM table2 "
"WHERE table2.col1 = table1.col1) AS anon_1 "
"FROM table1",
)
subq = (
select(t1)
.join(t2, t1.c.col1 == t2.c.col1)
.where(t2.c.col2 == "x")
.subquery()
)
if use_adapt_from:
vis = sql_util.ClauseAdapter(subq, adapt_from_selectables=[t1])
else:
vis = sql_util.ClauseAdapter(subq)
if use_adapt_from:
self.assert_compile(
vis.traverse(stmt),
"SELECT anon_1.col1, "
"(SELECT count(table2.col1) AS count_1 FROM table2 WHERE "
"table2.col1 = anon_1.col1) AS anon_2 "
"FROM (SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 JOIN table2 ON table1.col1 = "
"table2.col1 WHERE table2.col2 = :col2_1) AS anon_1",
)
else:
# here's the buggy version. table2 gets yanked out of the
# correlated subquery also. AliasedClass now uses
# adapt_from_selectables in all cases
self.assert_compile(
vis.traverse(stmt),
"SELECT anon_1.col1, "
"(SELECT count(table2.col1) AS count_1 FROM table2, "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 JOIN table2 ON "
"table1.col1 = table2.col1 WHERE table2.col2 = :col2_1) AS "
"anon_1 WHERE table2.col1 = anon_1.col1) AS anon_2 "
"FROM (SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 JOIN table2 "
"ON table1.col1 = table2.col1 "
"WHERE table2.col2 = :col2_1) AS anon_1",
)
@testing.fails_on_everything_except()
def test_joins_dont_adapt(self):
# adapting to a join, i.e. ClauseAdapter(t1.join(t2)), doesn't
# make much sense. ClauseAdapter doesn't make any changes if
# it's against a straight join.
users = table("users", column("id"))
addresses = table("addresses", column("id"), column("user_id"))
ualias = users.alias()
s = (
select(func.count(addresses.c.id))
.where(users.c.id == addresses.c.user_id)
.correlate(users)
)
s = sql_util.ClauseAdapter(ualias).traverse(s)
j1 = addresses.join(ualias, addresses.c.user_id == ualias.c.id)
self.assert_compile(
sql_util.ClauseAdapter(j1).traverse(s),
"SELECT count(addresses.id) AS count_1 "
"FROM addresses WHERE users_1.id = "
"addresses.user_id",
)
def test_prev_entities_adapt(self):
"""test #6503"""
m = MetaData()
users = Table("users", m, Column("id", Integer, primary_key=True))
addresses = Table(
"addresses",
m,
Column("id", Integer, primary_key=True),
Column("user_id", ForeignKey("users.id")),
)
ualias = users.alias()
s = select(users).join(addresses).with_only_columns(addresses.c.id)
s = sql_util.ClauseAdapter(ualias).traverse(s)
self.assert_compile(
s,
"SELECT addresses.id FROM users AS users_1 "
"JOIN addresses ON users_1.id = addresses.user_id",
)
@testing.combinations((True,), (False,), argnames="use_adapt_from")
def test_table_to_alias_1(self, use_adapt_from):
t1alias = t1.alias("t1alias")
if use_adapt_from:
vis = sql_util.ClauseAdapter(t1alias, adapt_from_selectables=[t1])
else:
vis = sql_util.ClauseAdapter(t1alias)
ff = vis.traverse(func.count(t1.c.col1).label("foo"))
assert list(_from_objects(ff)) == [t1alias]
@testing.combinations((True,), (False,), argnames="use_adapt_from")
def test_table_to_alias_2(self, use_adapt_from):
t1alias = t1.alias("t1alias")
if use_adapt_from:
vis = sql_util.ClauseAdapter(t1alias, adapt_from_selectables=[t1])
else:
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(select(literal_column("*")).select_from(t1)),
"SELECT * FROM table1 AS t1alias",
)
@testing.combinations((True,), (False,), argnames="use_adapt_from")
def test_table_to_alias_3(self, use_adapt_from):
t1alias = t1.alias("t1alias")
if use_adapt_from:
vis = sql_util.ClauseAdapter(t1alias, adapt_from_selectables=[t1])
else:
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(
select(literal_column("*")).where(t1.c.col1 == t2.c.col2)
),
"SELECT * FROM table1 AS t1alias, table2 "
"WHERE t1alias.col1 = table2.col2",
)
@testing.combinations((True,), (False,), argnames="use_adapt_from")
def test_table_to_alias_4(self, use_adapt_from):
t1alias = t1.alias("t1alias")
if use_adapt_from:
vis = sql_util.ClauseAdapter(t1alias, adapt_from_selectables=[t1])
else:
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(
select(literal_column("*"))
.where(t1.c.col1 == t2.c.col2)
.select_from(t1, t2)
),
"SELECT * FROM table1 AS t1alias, table2 "
"WHERE t1alias.col1 = table2.col2",
)
@testing.combinations((True,), (False,), argnames="use_adapt_from")
def test_table_to_alias_5(self, use_adapt_from):
t1alias = t1.alias("t1alias")
if use_adapt_from:
vis = sql_util.ClauseAdapter(t1alias, adapt_from_selectables=[t1])
else:
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
select(t1alias, t2).where(
t1alias.c.col1
== vis.traverse(
select(literal_column("*"))
.where(t1.c.col1 == t2.c.col2)
.select_from(t1, t2)
.correlate(t1)
.scalar_subquery()
)
),
"SELECT t1alias.col1, t1alias.col2, t1alias.col3, "
"table2.col1 AS col1_1, table2.col2 AS col2_1, "
"table2.col3 AS col3_1 "
"FROM table1 AS t1alias, table2 WHERE t1alias.col1 = "
"(SELECT * FROM table2 WHERE t1alias.col1 = table2.col2)",
)
@testing.combinations((True,), (False,), argnames="use_adapt_from")
def test_table_to_alias_6(self, use_adapt_from):
t1alias = t1.alias("t1alias")
if use_adapt_from:
vis = sql_util.ClauseAdapter(t1alias, adapt_from_selectables=[t1])
else:
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
select(t1alias, t2).where(
t1alias.c.col1
== vis.traverse(
select(literal_column("*"))
.where(t1.c.col1 == t2.c.col2)
.select_from(t1, t2)
.correlate(t2)
.scalar_subquery()
)
),
"SELECT t1alias.col1, t1alias.col2, t1alias.col3, "
"table2.col1 AS col1_1, table2.col2 AS col2_1, "
"table2.col3 AS col3_1 "
"FROM table1 AS t1alias, table2 "
"WHERE t1alias.col1 = "
"(SELECT * FROM table1 AS t1alias "
"WHERE t1alias.col1 = table2.col2)",
)
def test_table_to_alias_7(self):
t1alias = t1.alias("t1alias")
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(case((t1.c.col1 == 5, t1.c.col2), else_=t1.c.col1)),
"CASE WHEN (t1alias.col1 = :col1_1) THEN "
"t1alias.col2 ELSE t1alias.col1 END",
)
def test_table_to_alias_8(self):
t1alias = t1.alias("t1alias")
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(
case((5, t1.c.col2), value=t1.c.col1, else_=t1.c.col1)
),
"CASE t1alias.col1 WHEN :param_1 THEN "
"t1alias.col2 ELSE t1alias.col1 END",
)
def test_table_to_alias_9(self):
s = select(literal_column("*")).select_from(t1).alias("foo")
self.assert_compile(
s.select(), "SELECT foo.* FROM (SELECT * FROM table1) " "AS foo"
)
def test_table_to_alias_10(self):
s = select(literal_column("*")).select_from(t1).alias("foo")
t1alias = t1.alias("t1alias")
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(s.select()),
"SELECT foo.* FROM (SELECT * FROM table1 " "AS t1alias) AS foo",
)
def test_table_to_alias_11(self):
s = select(literal_column("*")).select_from(t1).alias("foo")
self.assert_compile(
s.select(), "SELECT foo.* FROM (SELECT * FROM table1) " "AS foo"
)
def test_table_to_alias_12(self):
t1alias = t1.alias("t1alias")
vis = sql_util.ClauseAdapter(t1alias)
ff = vis.traverse(func.count(t1.c.col1).label("foo"))
self.assert_compile(
select(ff),
"SELECT count(t1alias.col1) AS foo FROM " "table1 AS t1alias",
)
assert list(_from_objects(ff)) == [t1alias]
# def test_table_to_alias_2(self):
# TODO: self.assert_compile(vis.traverse(select(func.count(t1.c
# .col1).l abel('foo')), clone=True), "SELECT
# count(t1alias.col1) AS foo FROM table1 AS t1alias")
def test_table_to_alias_13(self):
t1alias = t1.alias("t1alias")
vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias("t2alias")
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(
vis.traverse(
select(literal_column("*")).where(t1.c.col1 == t2.c.col2)
),
"SELECT * FROM table1 AS t1alias, table2 "
"AS t2alias WHERE t1alias.col1 = "
"t2alias.col2",
)
def test_table_to_alias_14(self):
t1alias = t1.alias("t1alias")
vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias("t2alias")
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(
vis.traverse(
select("*").where(t1.c.col1 == t2.c.col2).select_from(t1, t2)
),
"SELECT * FROM table1 AS t1alias, table2 "
"AS t2alias WHERE t1alias.col1 = "
"t2alias.col2",
)
def test_table_to_alias_15(self):
t1alias = t1.alias("t1alias")
vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias("t2alias")
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(
select(t1alias, t2alias).where(
t1alias.c.col1
== vis.traverse(
select("*")
.where(t1.c.col1 == t2.c.col2)
.select_from(t1, t2)
.correlate(t1)
.scalar_subquery()
)
),
"SELECT t1alias.col1, t1alias.col2, t1alias.col3, "
"t2alias.col1 AS col1_1, t2alias.col2 AS col2_1, "
"t2alias.col3 AS col3_1 "
"FROM table1 AS t1alias, table2 AS t2alias "
"WHERE t1alias.col1 = "
"(SELECT * FROM table2 AS t2alias "
"WHERE t1alias.col1 = t2alias.col2)",
)
def test_table_to_alias_16(self):
t1alias = t1.alias("t1alias")
vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias("t2alias")
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(
t2alias.select().where(
t2alias.c.col2
== vis.traverse(
select("*")
.where(t1.c.col1 == t2.c.col2)
.select_from(t1, t2)
.correlate(t2)
.scalar_subquery()
)
),
"SELECT t2alias.col1, t2alias.col2, t2alias.col3 "
"FROM table2 AS t2alias WHERE t2alias.col2 = "
"(SELECT * FROM table1 AS t1alias WHERE "
"t1alias.col1 = t2alias.col2)",
)
def test_include_exclude(self):
m = MetaData()
a = Table(
"a",
m,
Column("id", Integer, primary_key=True),
Column(
"xxx_id",
Integer,
ForeignKey("a.id", name="adf", use_alter=True),
),
)
e = a.c.id == a.c.xxx_id
assert str(e) == "a.id = a.xxx_id"
b = a.alias()
e = sql_util.ClauseAdapter(
b,
include_fn=lambda x: x in set([a.c.id]),
equivalents={a.c.id: set([a.c.id])},
).traverse(e)
assert str(e) == "a_1.id = a.xxx_id"
def test_recursive_equivalents(self):
m = MetaData()
a = Table("a", m, Column("x", Integer), Column("y", Integer))
b = Table("b", m, Column("x", Integer), Column("y", Integer))
c = Table("c", m, Column("x", Integer), Column("y", Integer))
# force a recursion overflow, by linking a.c.x<->c.c.x, and
# asking for a nonexistent col. corresponding_column should prevent
# endless depth.
adapt = sql_util.ClauseAdapter(
b, equivalents={a.c.x: set([c.c.x]), c.c.x: set([a.c.x])}
)
assert adapt._corresponding_column(a.c.x, False) is None
def test_multilevel_equivalents(self):
m = MetaData()
a = Table("a", m, Column("x", Integer), Column("y", Integer))
b = Table("b", m, Column("x", Integer), Column("y", Integer))
c = Table("c", m, Column("x", Integer), Column("y", Integer))
alias = select(a).select_from(a.join(b, a.c.x == b.c.x)).alias()
# two levels of indirection from c.x->b.x->a.x, requires recursive
# corresponding_column call
adapt = sql_util.ClauseAdapter(
alias, equivalents={b.c.x: set([a.c.x]), c.c.x: set([b.c.x])}
)
assert adapt._corresponding_column(a.c.x, False) is alias.c.x
assert adapt._corresponding_column(c.c.x, False) is alias.c.x
def test_join_to_alias(self):
metadata = MetaData()
a = Table("a", metadata, Column("id", Integer, primary_key=True))
b = Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column("aid", Integer, ForeignKey("a.id")),
)
c = Table(
"c",
metadata,
Column("id", Integer, primary_key=True),
Column("bid", Integer, ForeignKey("b.id")),
)
d = Table(
"d",
metadata,
Column("id", Integer, primary_key=True),
Column("aid", Integer, ForeignKey("a.id")),
)
j1 = a.outerjoin(b)
j2 = (
select(j1)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
j3 = c.join(j2, j2.c.b_id == c.c.bid)
j4 = j3.outerjoin(d)
self.assert_compile(
j4,
"c JOIN (SELECT a.id AS a_id, b.id AS "
"b_id, b.aid AS b_aid FROM a LEFT OUTER "
"JOIN b ON a.id = b.aid) AS anon_1 ON anon_1.b_id = c.bid "
"LEFT OUTER JOIN d ON anon_1.a_id = d.aid",
)
j5 = (
j3.select()
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery("foo")
)
j6 = sql_util.ClauseAdapter(j5).copy_and_process([j4])[0]
# this statement takes c join(a join b), wraps it inside an
# aliased "select * from c join(a join b) AS foo". the outermost
# right side "left outer join d" stays the same, except "d"
# joins against foo.a_id instead of plain "a_id"
self.assert_compile(
j6,
"(SELECT c.id AS c_id, c.bid AS c_bid, "
"anon_1.a_id AS anon_1_a_id, anon_1.b_id AS anon_1_b_id, "
"anon_1.b_aid AS "
"anon_1_b_aid FROM c JOIN (SELECT a.id AS a_id, "
"b.id AS b_id, b.aid AS b_aid FROM a LEFT "
"OUTER JOIN b ON a.id = b.aid) AS anon_1 ON anon_1.b_id = "
"c.bid) AS foo LEFT OUTER JOIN d ON "
"foo.anon_1_a_id = d.aid",
)
def test_derived_from(self):
assert select(t1).is_derived_from(t1)
assert not select(t2).is_derived_from(t1)
assert not t1.is_derived_from(select(t1))
assert t1.alias().is_derived_from(t1)
s1 = select(t1, t2).alias("foo")
s2 = select(s1).limit(5).offset(10).alias()
assert s2.is_derived_from(s1)
s2 = s2._clone()
assert s2.is_derived_from(s1)
def test_aliasedselect_to_aliasedselect_straight(self):
# original issue from ticket #904
s1 = select(t1).alias("foo")
s2 = select(s1).limit(5).offset(10).alias()
self.assert_compile(
sql_util.ClauseAdapter(s2).traverse(s1),
"SELECT foo.col1, foo.col2, foo.col3 FROM "
"(SELECT table1.col1 AS col1, table1.col2 "
"AS col2, table1.col3 AS col3 FROM table1) "
"AS foo LIMIT :param_1 OFFSET :param_2",
{"param_1": 5, "param_2": 10},
)
def test_aliasedselect_to_aliasedselect_join(self):
s1 = select(t1).alias("foo")
s2 = select(s1).limit(5).offset(10).alias()
j = s1.outerjoin(t2, s1.c.col1 == t2.c.col1)
self.assert_compile(
sql_util.ClauseAdapter(s2).traverse(j).select(),
"SELECT anon_1.col1, anon_1.col2, "
"anon_1.col3, table2.col1 AS col1_1, table2.col2 AS col2_1, "
"table2.col3 AS col3_1 FROM (SELECT foo.col1 AS "
"col1, foo.col2 AS col2, foo.col3 AS col3 "
"FROM (SELECT table1.col1 AS col1, "
"table1.col2 AS col2, table1.col3 AS col3 "
"FROM table1) AS foo LIMIT :param_1 OFFSET "
":param_2) AS anon_1 LEFT OUTER JOIN "
"table2 ON anon_1.col1 = table2.col1",
{"param_1": 5, "param_2": 10},
)
@testing.combinations((True,), (False,), argnames="use_adapt_from")
def test_aliasedselect_to_aliasedselect_join_nested_table(
self, use_adapt_from
):
"""test the logic in clauseadapter regarding not traversing aliases.
adapt_from_selectables case added to test #6762, which is a regression
from #6060
"""
s1 = select(t1).alias("foo")
s2 = select(s1).limit(5).offset(10).alias()
talias = t1.alias("bar")
# here is the problem. s2 is derived from s1 which is derived
# from t1
assert s2.is_derived_from(t1)
# however, s2 is not derived from talias, which *is* derived from t1
assert not s2.is_derived_from(talias)
# therefore, talias gets its table replaced, except for a rule
# we added to ClauseAdapter to stop traversal if the selectable is
# not derived from an alias of a table. This rule was previously
# in Alias._copy_internals().
j = s1.outerjoin(talias, s1.c.col1 == talias.c.col1)
if use_adapt_from:
vis = sql_util.ClauseAdapter(s2, adapt_from_selectables=[s1])
else:
vis = sql_util.ClauseAdapter(s2)
self.assert_compile(
vis.traverse(j).select(),
"SELECT anon_1.col1, anon_1.col2, "
"anon_1.col3, bar.col1 AS col1_1, bar.col2 AS col2_1, "
"bar.col3 AS col3_1 "
"FROM (SELECT foo.col1 AS col1, foo.col2 "
"AS col2, foo.col3 AS col3 FROM (SELECT "
"table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1) AS foo "
"LIMIT :param_1 OFFSET :param_2) AS anon_1 "
"LEFT OUTER JOIN table1 AS bar ON "
"anon_1.col1 = bar.col1",
{"param_1": 5, "param_2": 10},
)
def test_functions(self):
self.assert_compile(
sql_util.ClauseAdapter(t1.alias()).traverse(func.count(t1.c.col1)),
"count(table1_1.col1)",
)
s = select(func.count(t1.c.col1))
self.assert_compile(
sql_util.ClauseAdapter(t1.alias()).traverse(s),
"SELECT count(table1_1.col1) AS count_1 "
"FROM table1 AS table1_1",
)
def test_table_valued_column(self):
"""test #6775"""
stmt = select(func.some_json_func(t1.table_valued()))
self.assert_compile(
stmt,
"SELECT some_json_func(table1) AS some_json_func_1 FROM table1",
)
self.assert_compile(
sql_util.ClauseAdapter(t1.alias()).traverse(stmt),
"SELECT some_json_func(table1_1) AS some_json_func_1 "
"FROM table1 AS table1_1",
)
def test_recursive(self):
metadata = MetaData()
a = Table("a", metadata, Column("id", Integer, primary_key=True))
b = Table(
"b",
metadata,
Column("id", Integer, primary_key=True),
Column("aid", Integer, ForeignKey("a.id")),
)
c = Table(
"c",
metadata,
Column("id", Integer, primary_key=True),
Column("bid", Integer, ForeignKey("b.id")),
)
d = Table(
"d",
metadata,
Column("id", Integer, primary_key=True),
Column("aid", Integer, ForeignKey("a.id")),
)
u = union(
a.join(b).select().set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
a.join(d).select().set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
).alias()
self.assert_compile(
sql_util.ClauseAdapter(u).traverse(
select(c.c.bid).where(c.c.bid == u.c.b_aid)
),
"SELECT c.bid "
"FROM c, (SELECT a.id AS a_id, b.id AS b_id, b.aid AS b_aid "
"FROM a JOIN b ON a.id = b.aid UNION SELECT a.id AS a_id, d.id "
"AS d_id, d.aid AS d_aid "
"FROM a JOIN d ON a.id = d.aid) AS anon_1 "
"WHERE c.bid = anon_1.b_aid",
)
def test_label_anonymize_one(self):
t1a = t1.alias()
adapter = sql_util.ClauseAdapter(t1a, anonymize_labels=True)
expr = select(t1.c.col2).where(t1.c.col3 == 5).label("expr")
expr_adapted = adapter.traverse(expr)
stmt = select(expr, expr_adapted).order_by(expr, expr_adapted)
self.assert_compile(
stmt,
"SELECT "
"(SELECT table1.col2 FROM table1 WHERE table1.col3 = :col3_1) "
"AS expr, "
"(SELECT table1_1.col2 FROM table1 AS table1_1 "
"WHERE table1_1.col3 = :col3_2) AS anon_1 "
"ORDER BY expr, anon_1",
)
def test_label_anonymize_two(self):
t1a = t1.alias()
adapter = sql_util.ClauseAdapter(t1a, anonymize_labels=True)
expr = select(t1.c.col2).where(t1.c.col3 == 5).label(None)
expr_adapted = adapter.traverse(expr)
stmt = select(expr, expr_adapted).order_by(expr, expr_adapted)
self.assert_compile(
stmt,
"SELECT "
"(SELECT table1.col2 FROM table1 WHERE table1.col3 = :col3_1) "
"AS anon_1, "
"(SELECT table1_1.col2 FROM table1 AS table1_1 "
"WHERE table1_1.col3 = :col3_2) AS anon_2 "
"ORDER BY anon_1, anon_2",
)
def test_label_anonymize_three(self):
t1a = t1.alias()
adapter = sql_util.ColumnAdapter(
t1a, anonymize_labels=True, allow_label_resolve=False
)
expr = select(t1.c.col2).where(t1.c.col3 == 5).label(None)
l1 = expr
is_(l1._order_by_label_element, l1)
eq_(l1._allow_label_resolve, True)
expr_adapted = adapter.traverse(expr)
l2 = expr_adapted
is_(l2._order_by_label_element, l2)
eq_(l2._allow_label_resolve, False)
l3 = adapter.traverse(expr)
is_(l3._order_by_label_element, l3)
eq_(l3._allow_label_resolve, False)
class SpliceJoinsTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
@classmethod
def setup_test_class(cls):
global table1, table2, table3, table4
def _table(name):
return table(name, column("col1"), column("col2"), column("col3"))
table1, table2, table3, table4 = [
_table(name) for name in ("table1", "table2", "table3", "table4")
]
def test_splice(self):
t1, t2, t3, t4 = table1, table2, table1.alias(), table2.alias()
j = (
t1.join(t2, t1.c.col1 == t2.c.col1)
.join(t3, t2.c.col1 == t3.c.col1)
.join(t4, t4.c.col1 == t1.c.col1)
)
s = select(t1).where(t1.c.col2 < 5).alias()
self.assert_compile(
sql_util.splice_joins(s, j),
"(SELECT table1.col1 AS col1, table1.col2 "
"AS col2, table1.col3 AS col3 FROM table1 "
"WHERE table1.col2 < :col2_1) AS anon_1 "
"JOIN table2 ON anon_1.col1 = table2.col1 "
"JOIN table1 AS table1_1 ON table2.col1 = "
"table1_1.col1 JOIN table2 AS table2_1 ON "
"table2_1.col1 = anon_1.col1",
)
def test_stop_on(self):
t1, t2, t3 = table1, table2, table3
j1 = t1.join(t2, t1.c.col1 == t2.c.col1)
j2 = j1.join(t3, t2.c.col1 == t3.c.col1)
s = select(t1).select_from(j1).alias()
self.assert_compile(
sql_util.splice_joins(s, j2),
"(SELECT table1.col1 AS col1, table1.col2 "
"AS col2, table1.col3 AS col3 FROM table1 "
"JOIN table2 ON table1.col1 = table2.col1) "
"AS anon_1 JOIN table2 ON anon_1.col1 = "
"table2.col1 JOIN table3 ON table2.col1 = "
"table3.col1",
)
self.assert_compile(
sql_util.splice_joins(s, j2, j1),
"(SELECT table1.col1 AS col1, table1.col2 "
"AS col2, table1.col3 AS col3 FROM table1 "
"JOIN table2 ON table1.col1 = table2.col1) "
"AS anon_1 JOIN table3 ON table2.col1 = "
"table3.col1",
)
def test_splice_2(self):
t2a = table2.alias()
t3a = table3.alias()
j1 = table1.join(t2a, table1.c.col1 == t2a.c.col1).join(
t3a, t2a.c.col2 == t3a.c.col2
)
t2b = table4.alias()
j2 = table1.join(t2b, table1.c.col3 == t2b.c.col3)
self.assert_compile(
sql_util.splice_joins(table1, j1),
"table1 JOIN table2 AS table2_1 ON "
"table1.col1 = table2_1.col1 JOIN table3 "
"AS table3_1 ON table2_1.col2 = "
"table3_1.col2",
)
self.assert_compile(
sql_util.splice_joins(table1, j2),
"table1 JOIN table4 AS table4_1 ON " "table1.col3 = table4_1.col3",
)
self.assert_compile(
sql_util.splice_joins(sql_util.splice_joins(table1, j1), j2),
"table1 JOIN table2 AS table2_1 ON "
"table1.col1 = table2_1.col1 JOIN table3 "
"AS table3_1 ON table2_1.col2 = "
"table3_1.col2 JOIN table4 AS table4_1 ON "
"table1.col3 = table4_1.col3",
)
class SelectTest(fixtures.TestBase, AssertsCompiledSQL):
"""tests the generative capability of Select"""
__dialect__ = "default"
@classmethod
def setup_test_class(cls):
global t1, t2
t1 = table("table1", column("col1"), column("col2"), column("col3"))
t2 = table("table2", column("col1"), column("col2"), column("col3"))
def test_columns(self):
s = t1.select()
self.assert_compile(
s, "SELECT table1.col1, table1.col2, " "table1.col3 FROM table1"
)
select_copy = s.add_columns(column("yyy"))
self.assert_compile(
select_copy,
"SELECT table1.col1, table1.col2, " "table1.col3, yyy FROM table1",
)
is_not(s.selected_columns, select_copy.selected_columns)
is_not(s._raw_columns, select_copy._raw_columns)
self.assert_compile(
s, "SELECT table1.col1, table1.col2, " "table1.col3 FROM table1"
)
def test_froms(self):
s = t1.select()
self.assert_compile(
s, "SELECT table1.col1, table1.col2, " "table1.col3 FROM table1"
)
select_copy = s.select_from(t2)
self.assert_compile(
select_copy,
"SELECT table1.col1, table1.col2, "
"table1.col3 FROM table1, table2",
)
self.assert_compile(
s, "SELECT table1.col1, table1.col2, " "table1.col3 FROM table1"
)
def test_prefixes(self):
s = t1.select()
self.assert_compile(
s, "SELECT table1.col1, table1.col2, " "table1.col3 FROM table1"
)
select_copy = s.prefix_with("FOOBER")
self.assert_compile(
select_copy,
"SELECT FOOBER table1.col1, table1.col2, "
"table1.col3 FROM table1",
)
self.assert_compile(
s, "SELECT table1.col1, table1.col2, " "table1.col3 FROM table1"
)
def test_execution_options(self):
s = select().execution_options(foo="bar")
s2 = s.execution_options(bar="baz")
s3 = s.execution_options(foo="not bar")
# The original select should not be modified.
eq_(s.get_execution_options(), dict(foo="bar"))
# s2 should have its execution_options based on s, though.
eq_(s2.get_execution_options(), dict(foo="bar", bar="baz"))
eq_(s3.get_execution_options(), dict(foo="not bar"))
def test_invalid_options(self):
assert_raises(
exc.ArgumentError, select().execution_options, compiled_cache={}
)
assert_raises(
exc.ArgumentError,
select().execution_options,
isolation_level="READ_COMMITTED",
)
# this feature not available yet
def _NOTYET_test_execution_options_in_kwargs(self):
s = select(execution_options=dict(foo="bar"))
s2 = s.execution_options(bar="baz")
# The original select should not be modified.
assert s._execution_options == dict(foo="bar")
# s2 should have its execution_options based on s, though.
assert s2._execution_options == dict(foo="bar", bar="baz")
# this feature not available yet
def _NOTYET_test_execution_options_in_text(self):
s = text("select 42", execution_options=dict(foo="bar"))
assert s._execution_options == dict(foo="bar")
class ValuesBaseTest(fixtures.TestBase, AssertsCompiledSQL):
"""Tests the generative capability of Insert, Update"""
__dialect__ = "default"
# fixme: consolidate converage from elsewhere here and expand
@classmethod
def setup_test_class(cls):
global t1, t2
t1 = table("table1", column("col1"), column("col2"), column("col3"))
t2 = table("table2", column("col1"), column("col2"), column("col3"))
def test_prefixes(self):
i = t1.insert()
self.assert_compile(
i,
"INSERT INTO table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)",
)
gen = i.prefix_with("foober")
self.assert_compile(
gen,
"INSERT foober INTO table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)",
)
self.assert_compile(
i,
"INSERT INTO table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)",
)
i2 = t1.insert().prefix_with("squiznart")
self.assert_compile(
i2,
"INSERT squiznart INTO table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)",
)
gen2 = i2.prefix_with("quux")
self.assert_compile(
gen2,
"INSERT squiznart quux INTO "
"table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)",
)
def test_add_kwarg(self):
i = t1.insert()
compile_state = i._compile_state_factory(i, None)
eq_(compile_state._dict_parameters, None)
i = i.values(col1=5)
compile_state = i._compile_state_factory(i, None)
self._compare_param_dict(compile_state._dict_parameters, {"col1": 5})
i = i.values(col2=7)
compile_state = i._compile_state_factory(i, None)
self._compare_param_dict(
compile_state._dict_parameters, {"col1": 5, "col2": 7}
)
def test_via_tuple_single(self):
i = t1.insert()
compile_state = i._compile_state_factory(i, None)
eq_(compile_state._dict_parameters, None)
i = i.values((5, 6, 7))
compile_state = i._compile_state_factory(i, None)
self._compare_param_dict(
compile_state._dict_parameters,
{"col1": 5, "col2": 6, "col3": 7},
)
def test_kw_and_dict_simultaneously_single(self):
i = t1.insert()
assert_raises_message(
exc.ArgumentError,
r"Can't pass positional and kwargs to values\(\) simultaneously",
i.values,
{"col1": 5},
col2=7,
)
def test_via_tuple_multi(self):
i = t1.insert()
compile_state = i._compile_state_factory(i, None)
eq_(compile_state._dict_parameters, None)
i = i.values([(5, 6, 7), (8, 9, 10)])
compile_state = i._compile_state_factory(i, None)
eq_(
compile_state._dict_parameters,
{"col1": 5, "col2": 6, "col3": 7},
)
eq_(compile_state._has_multi_parameters, True)
eq_(
compile_state._multi_parameters,
[
{"col1": 5, "col2": 6, "col3": 7},
{"col1": 8, "col2": 9, "col3": 10},
],
)
def test_inline_values_single(self):
i = t1.insert().values({"col1": 5})
compile_state = i._compile_state_factory(i, None)
self._compare_param_dict(compile_state._dict_parameters, {"col1": 5})
is_(compile_state._has_multi_parameters, False)
def test_inline_values_multi(self):
i = t1.insert().values([{"col1": 5}, {"col1": 6}])
compile_state = i._compile_state_factory(i, None)
# multiparams are not converted to bound parameters
eq_(compile_state._dict_parameters, {"col1": 5})
# multiparams are not converted to bound parameters
eq_(compile_state._multi_parameters, [{"col1": 5}, {"col1": 6}])
is_(compile_state._has_multi_parameters, True)
def _compare_param_dict(self, a, b):
if list(a) != list(b):
return False
from sqlalchemy.types import NullType
for a_k, a_i in a.items():
b_i = b[a_k]
# compare BindParameter on the left to
# literal value on the right
assert a_i.compare(literal(b_i, type_=NullType()))
def test_add_dictionary(self):
i = t1.insert()
compile_state = i._compile_state_factory(i, None)
eq_(compile_state._dict_parameters, None)
i = i.values({"col1": 5})
compile_state = i._compile_state_factory(i, None)
self._compare_param_dict(compile_state._dict_parameters, {"col1": 5})
is_(compile_state._has_multi_parameters, False)
i = i.values({"col1": 6})
# note replaces
compile_state = i._compile_state_factory(i, None)
self._compare_param_dict(compile_state._dict_parameters, {"col1": 6})
is_(compile_state._has_multi_parameters, False)
i = i.values({"col2": 7})
compile_state = i._compile_state_factory(i, None)
self._compare_param_dict(
compile_state._dict_parameters, {"col1": 6, "col2": 7}
)
is_(compile_state._has_multi_parameters, False)
def test_add_kwarg_disallowed_multi(self):
i = t1.insert()
i = i.values([{"col1": 5}, {"col1": 7}])
i = i.values(col2=7)
assert_raises_message(
exc.InvalidRequestError,
"Can't mix single and multiple VALUES formats",
i.compile,
)
def test_cant_mix_single_multi_formats_dict_to_list(self):
i = t1.insert().values(col1=5)
i = i.values([{"col1": 6}])
assert_raises_message(
exc.InvalidRequestError,
"Can't mix single and multiple VALUES "
"formats in one INSERT statement",
i.compile,
)
def test_cant_mix_single_multi_formats_list_to_dict(self):
i = t1.insert().values([{"col1": 6}])
i = i.values({"col1": 5})
assert_raises_message(
exc.InvalidRequestError,
"Can't mix single and multiple VALUES "
"formats in one INSERT statement",
i.compile,
)
def test_erroneous_multi_args_dicts(self):
i = t1.insert()
assert_raises_message(
exc.ArgumentError,
"Only a single dictionary/tuple or list of "
"dictionaries/tuples is accepted positionally.",
i.values,
{"col1": 5},
{"col1": 7},
)
def test_erroneous_multi_args_tuples(self):
i = t1.insert()
assert_raises_message(
exc.ArgumentError,
"Only a single dictionary/tuple or list of "
"dictionaries/tuples is accepted positionally.",
i.values,
(5, 6, 7),
(8, 9, 10),
)
def test_erroneous_multi_args_plus_kw(self):
i = t1.insert()
assert_raises_message(
exc.ArgumentError,
r"Can't pass positional and kwargs to values\(\) simultaneously",
i.values,
[{"col1": 5}],
col2=7,
)
def test_update_no_support_multi_values(self):
u = t1.update()
u = u.values([{"col1": 5}, {"col1": 7}])
assert_raises_message(
exc.InvalidRequestError,
"UPDATE construct does not support multiple parameter sets.",
u.compile,
)
def test_update_no_support_multi_constructor(self):
stmt = t1.update().values([{"col1": 5}, {"col1": 7}])
assert_raises_message(
exc.InvalidRequestError,
"UPDATE construct does not support multiple parameter sets.",
stmt.compile,
)
| 33.577489 | 79 | 0.553568 |
ace61959ae523b3434fc9ada61f0ee9de69f0ac3 | 4,799 | py | Python | Version0.2/Python/Preprocessing/writeapp.py | rloekvh/Exasim | c794431e8b1eff902c2ffad8182d1a9b53339c0d | [
"MIT"
] | 37 | 2020-12-09T20:24:36.000Z | 2022-02-18T17:19:23.000Z | Version0.2/Python/Preprocessing/writeapp.py | rloekvh/Exasim | c794431e8b1eff902c2ffad8182d1a9b53339c0d | [
"MIT"
] | 25 | 2020-11-25T20:37:33.000Z | 2022-02-25T15:53:11.000Z | Version0.2/Python/Preprocessing/writeapp.py | rloekvh/Exasim | c794431e8b1eff902c2ffad8182d1a9b53339c0d | [
"MIT"
] | 8 | 2020-11-30T15:34:06.000Z | 2022-01-09T21:06:00.000Z | from numpy import *
def writeapp(app,filename):
app['flag'] = array(app['flag']);
app['problem'] = array(app['problem']);
app['factor'] = array(app['factor']);
app['solversparam'] = array(app['solversparam']);
appname = 0;
tmp = array([app['tdep'], app['wave'], app['linearproblem'], app['debugmode'], app['matvecorder'], app['GMRESortho'], app['preconditioner'], app['precMatrixType'], app['NLMatrixType'], app['runmode'], app['tdfunc'], app['source'], app['modelnumber']]);
app['flag'] = concatenate([tmp,app['flag']]);
tmp = array([app['hybrid'], appname, app['temporalscheme'], app['torder'], app['nstage'], app['convStabMethod'], app['diffStabMethod'], app['rotatingFrame'], app['viscosityModel'], app['SGSmodel'], app['ALE'], app['AV'], app['linearsolver'], app['NLiter'], app['linearsolveriter'], app['GMRESrestart'], app['RBdim'], app['saveSolFreq'], app['saveSolOpt'], app['timestepOffset'], app['stgNmode'], app['saveSolBouFreq'], app['ibs']]);
app['problem'] = concatenate([tmp, app['problem']]);
tmp = array([app['time']])
app['factor'] = concatenate([tmp, app['factor']]);
tmp = array([app['NLtol'], app['linearsolvertol'], app['matvectol'], app['NLparam']]);
app['solversparam'] = concatenate([tmp, app['solversparam']]);
app['flag'] = array(app['flag']).flatten('F');
app['problem'] = array(app['problem']);
app['factor'] = array(app['factor']);
app['solversparam'] = array(app['solversparam']);
ndims = zeros((40,1));
ndims[1-1] = app['mpiprocs']; # number of processors
ndims[2-1] = app['nd'];
ndims[3-1] = 0;
ndims[4-1] = 0;
ndims[5-1] = 0;
ndims[6-1] = app['nc'];
ndims[7-1] = app['ncu'];
ndims[8-1] = app['ncq'];
ndims[9-1] = app['ncp'];
ndims[10-1] = app['nco'];
ndims[11-1] = app['nch'];
ndims[12-1] = app['ncx'];
ndims[13-1] = app['nce'];
ndims[14-1] = app['ncw'];
if app['nco'] != app['vindx'].shape[0]: #size(app.vindx,1):
error("app.nco mus be equal to size(app.vindx,1)");
nsize = zeros((16,1));
nsize[1-1] = size(ndims);
nsize[2-1] = size(app['flag']); # size of flag
nsize[3-1] = size(app['problem']); # size of physics
nsize[4-1] = size(app['uinf']); # boundary data
nsize[5-1] = size(app['dt']); # number of time steps
nsize[6-1] = size(app['factor']); # size of factor
nsize[7-1] = size(app['physicsparam']); # number of physical parameters
nsize[8-1] = size(app['solversparam']); # number of solver parameters
nsize[9-1] = size(app['tau']); # number of solver parameters
nsize[10-1] = size(app['stgdata']);
nsize[11-1] = size(app['stgparam']);
nsize[12-1] = size(app['stgib']);
nsize[13-1] = size(app['vindx']);
print("Writing app into file...");
fileID = open(filename, 'wb');
array(size(nsize), dtype=float64).tofile(fileID)
nsize.astype('float64').tofile(fileID)
if nsize[1-1] > 0:
ndims.astype('float64').tofile(fileID);
if nsize[2-1] > 0:
app['flag'] = array(app['flag']).flatten(order = 'F');
app['flag'].astype('float64').tofile(fileID);
if nsize[3-1] > 0:
app['problem'] = array(app['problem']).flatten(order = 'F');
app['problem'].astype('float64').tofile(fileID);
if nsize[4-1] > 0:
app['uinf'] = array(app['uinf']).flatten(order = 'F');
app['uinf'].astype('float64').tofile(fileID);
if nsize[5-1] > 0:
app['dt'] = array(app['dt']).flatten(order = 'F');
app['dt'].astype('float64').tofile(fileID);
if nsize[6-1] > 0:
app['factor'] = array(app['factor']).flatten(order = 'F');
app['factor'].astype('float64').tofile(fileID);
if nsize[7-1] > 0:
app['physicsparam'] = array(app['physicsparam']).flatten(order = 'F');
app['physicsparam'].astype('float64').tofile(fileID);
if nsize[8-1] > 0:
app['solversparam'] = array(app['solversparam']).flatten(order = 'F');
app['solversparam'].astype('float64').tofile(fileID);
if nsize[9-1] > 0:
app['tau'] = array(app['tau']).flatten(order = 'F');
app['tau'].astype('float64').tofile(fileID);
if nsize[10-1] > 0:
app['stgdata'] = array(app['stgdata']).flatten(order = 'F');
app['stgdata'].astype('float64').tofile(fileID);
if nsize[11-1] > 0:
app['stgparam'] = array(app['stgparam']).flatten(order = 'F');
app['stgparam'].astype('float64').tofile(fileID);
if nsize[12-1] > 0:
app['stgib'] = array(app['stgib']).flatten(order = 'F');
app['stgib'].astype('float64').tofile(fileID);
if nsize[13-1] > 0:
app['vindx'] = array(app['vindx']).flatten(order = 'F')-1;
app['vindx'].astype('float64').tofile(fileID);
fileID.close();
return app;
| 45.704762 | 436 | 0.575537 |
ace61a456d90638f9e398cf260c0c4d57f6d42c2 | 1,505 | py | Python | gcpdiag/queries/artifact_registry_stub.py | GoogleCloudPlatform/gcpdiag | 1fb20974c80b54c145cb4281d8b254a0ad59667d | [
"Apache-2.0"
] | 63 | 2021-09-28T16:29:19.000Z | 2022-03-30T02:01:15.000Z | gcpdiag/queries/artifact_registry_stub.py | GoogleCloudPlatform/gcpdiag | 1fb20974c80b54c145cb4281d8b254a0ad59667d | [
"Apache-2.0"
] | 10 | 2021-10-06T11:59:44.000Z | 2022-03-24T16:41:38.000Z | gcpdiag/queries/artifact_registry_stub.py | GoogleCloudPlatform/gcpdiag | 1fb20974c80b54c145cb4281d8b254a0ad59667d | [
"Apache-2.0"
] | 20 | 2021-09-28T18:38:29.000Z | 2022-03-24T10:19:56.000Z | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Stub API calls used in gcf.py for testing.
Instead of doing real API calls, we return test JSON data.
"""
import re
from gcpdiag.queries import apis_stub
# pylint: disable=unused-argument
# pylint: disable=invalid-name
INCORRECT_RESOURCE_ERROR = ('incorrect resource format. Use '
'projects/*/locations/*/repositories/')
class ArtifactRegistryApiStub:
"""Mock object to simulate function api calls."""
def projects(self):
return self
def locations(self):
return self
def repositories(self):
return self
def getIamPolicy(self, resource: str) -> apis_stub.RestCallStub:
m = re.match(r'projects/([^/]+)/locations/([^/]+)/repositories/([^/]+)',
resource)
if m:
project_id = m.group(1)
return apis_stub.RestCallStub(project_id, 'artifact-registry-policy')
else:
raise ValueError(INCORRECT_RESOURCE_ERROR)
| 29.509804 | 76 | 0.706977 |
ace61a7a8535ea4e14455f63719042e6442986a3 | 15,436 | py | Python | Applied Neural Nets/p05.py | RobertGageStroud/Portfolio | 7e2f915cd34eb7f818b94398ba261f65c7135db9 | [
"MIT"
] | null | null | null | Applied Neural Nets/p05.py | RobertGageStroud/Portfolio | 7e2f915cd34eb7f818b94398ba261f65c7135db9 | [
"MIT"
] | null | null | null | Applied Neural Nets/p05.py | RobertGageStroud/Portfolio | 7e2f915cd34eb7f818b94398ba261f65c7135db9 | [
"MIT"
] | 1 | 2020-10-27T16:58:32.000Z | 2020-10-27T16:58:32.000Z | import numpy as np
def mlp_check_dimensions(x, y, ws, bs):
"""
Return True if the dimensions in double_u and beta agree.
:param x: a list of lists representing the x matrix.
:param y: a list output values.
:param ws: a list of weight matrices (one for each layer)
:param bs: a list of biases (one for each layer)
:return: True if the dimensions of x, y, ws and bs match
"""
## W rows should equal X columns, b col should equal W col
result = True
if len(ws) != len(bs):
return False
if len(x[0]) != len(ws[0]):
return False
if len(x) != len(y):
return False
if len(y[0]) != len(bs[len(bs) - 1][0]):
return False
for layer in range(len(ws)):
if len(ws[layer][0]) != len(bs[layer][0]):
return False
if layer == 0:
pass
else:
prev_w = ws[layer - 1]
if len(ws[layer]) != len(prev_w[0]):
return False
return result
def mlp_net_input(h, w, b):
"""
Return the network input z as a function of h, w, and b.
:param h: the input from the previous layer.
:param w: the weights for this layer.
:param b: the biases for this layer.
:return: the linear network activations for this layer.
"""
result = np.dot(h, w)
result = np.add(result, b)
return result
def mlp_tanh(z):
"""
Return the hyperbolic tangent of z using numpy.
:param z: the input "affinities" for this neuron.
:return: hyperbolic tangent "squashing function
"""
return np.tanh(z)
def mlp_softmax(z):
"""
Return the softmax function at z using a numerically stable approach.
:param z: A real number, list of real numbers, or list of lists of numbers.
:return: The output of the softmax function for z with the same shape as z.
"""
index = np.argmax(z)
result = np.empty((len(z), len(z[0])))
copy = np.copy(z)
copy = np.subtract(copy, copy.item(index))
for row in range(len(z)):
denom = 0
xp_1 = copy[row]
for col in range(len(z[0])):
xp_2 = copy[row][col]
denom += np.exp(xp_2)
buffer = (np.divide(np.exp(xp_1), denom))
result[row] = buffer
return result
def mlp_feed_layer(h, w, b, phi):
"""
Return the output of a layer of the network.
:param h: The input to the layer (output of last layer).
:param w: The weight matrix for this layer.
:param b: The bias vector for this layer.
:param phi: The activation function for this layer.
:return: The output of this layer.
"""
fuck = np.add(np.dot(h, w), b)
result = phi(fuck)
return result
def mlp_feed_forward(x, ws, bs, phis):
"""
Return the output of each layer of the network.
:param x: The input matrix to the network.
:param ws: The list of weight matrices for layers 1 to l.
:param bs: The list of bias vectors for layers 1 to l.
:param phis: The list of activation functions for layers 1 to l.
:return: The list of outputs for layers 0 to l
"""
h = x
result = [[] for i in range(len(ws) + 1)]
result[0] = h
for layer in range(len(ws)):
h = mlp_feed_layer(h, ws[layer], bs[layer], phis[layer])
result[layer + 1] = h
return result
def mlp_predict_proba(x, ws, bs, phis):
"""
Return the output matrix of probabilities for input matrix 'x'.
:param x: The input matrix to the network.
:param ws: The list of weight matrices for layers 1 to l.
:param bs: The list of bias vectors for layers 1 to l.
:param phis: The st matrix of probabilities (p)
"""
pass
##each layer needs to be processed then passed through the net fucntion and then pushed through softmax
prob = mlp_feed_forward(x, ws, bs, phis)
return prob[len(prob) - 1]
def mlp_predict(x, ws, bs, phis):
"""
Return the output vector of labels for input matrix 'x'.
:param x: The input matrix to the network.
:param ws: The list of weight matrices for layers 1 to l.
:param bs: The list of bias vectors for layers 1 to l.
:param phis: The list of activation functions for layers 1 to l.
:return: The output vector of class labels.
"""
predict = mlp_predict_proba(x, ws, bs, phis)
result = np.empty(len(predict))
for row in range(len(predict)):
result[row] = np.argmax(predict[row])
return result
def mlp_data_prep():
"""
Return the prepared data using the provided MNIST 1000 dataset.
:return: The x matrix, y vector, and the wide matrix version of y
"""
x_data = np.load('x_mnist1000.npy')
y_data = np.load('y_mnist1000.npy')
y_matrix = np.zeros((len(y_data), 10))
y_matrix[(range(len(x_data)), y_data)] = 1
y_matrix = y_matrix.astype(int)
np.random.seed(1)
ran_index = np.random.permutation(range(0, len(x_data)))
y_train_ran = [y_data[i] for i in ran_index]
y_train_ran = np.asarray(y_train_ran)
x_ran = [x_data[i] for i in ran_index]
x_ran = np.asarray(x_ran)
##Y_test is not giving the right 5 values when printed [:5]
num = 0
#x_ran = np.random.permutation(x_data)
y_matrix_ran = [y_matrix[i] for i in ran_index]
y_matrix_ran = np.asarray(y_matrix_ran)
"""
for index in range(len(ran_index)):
y_train_ran[index] = (y_data[ran_index[index]])
x_ran[index] = x_data[ran_index[index]]
y_matrix_ran[index] = (y_matrix[ran_index[index]])
num += 1
"""
x_train = x_ran[0:800]
y_matrix_train = y_matrix_ran[0:800]
y_train = y_train_ran[0:800]
x_test = x_ran[800:]
y_matrix_test = y_matrix_ran[800:]
y_test = y_train_ran[800:]
x_train = np.transpose(x_train)
x_test = np.transpose(x_test)
for row in range(len(x_train)):
mean = np.mean(x_train[row])
std = np.std(x_train[row])
if std == 0:
std = 1
for col in range(len(x_train[0])):
add = np.subtract(x_train[row][col], mean)
x_train[row][col] = np.divide(add,std)
if col < 200:
add2 = np.subtract(x_test[row][col], mean)
x_test[row][col] = np.divide(add2, std)
x_train = np.transpose(x_train)
x_test = np.transpose(x_test)
return x_train, x_test, y_train, y_test, y_matrix_train, y_matrix_test
############################################### Program 5 Start ##############################################################
def mlp_cost(x, y, ws, bs, phis, alpha):
"""
Return the cross entropy cost function with L2 regularization term.
:param x: a list of lists representing the x matrix.
:param y: a list of lists of output values.
:param ws: a list of weight matrices (one for each layer)
:param bs: a list of biases (one for each layer)
:param phis: a list of activation functions
:param alpha: the hyperparameter controlling regularization
:return: The cost function
"""
# The L2 norm is the sum squared of the weights.
sum_of_squares = 0
for layer in range(len(ws)):
buffer = np.power(ws[layer], 2)
buffer = np.sum(buffer)
sum_of_squares += buffer
a = np.divide(alpha, 2)
sum_of_squares = np.multiply(sum_of_squares, a)
prob = mlp_predict_proba(x, ws, bs, phis)
epsilon = .00000001
prob = np.add(prob, epsilon)
log = np.log(prob)
product = np.multiply(y, log)
sum = np.sum(product)
denom = np.divide(-1, len(x))
result = np.multiply(sum, denom)
result = np.add(result, sum_of_squares)
return result
def mlp_propagate_error(x, y, ws, bs, phis, hs):
"""
Return a list containing the gradient of the cost with respect to z^(k)for each layer.
:param x: a list of lists representing the x matrix.
:param y: a list of lists of output values.
:param ws: a list of weight matrices (one for each layer)
:param bs: a list of biases (one for each layer)
:param phis: a list of activation functions
:param hs: a list of outputs for each layer include h^(0) = x
:return: A list of gradients of J with respect to z^(k) for k=1..l
"""
#Shape mismatch on single iteration, like the 4th. It is the multiply function.
#thing needs to use whatever that layer it is.
P = hs[len(hs) - 1]
diff = np.subtract(P, y)
D_l = np.divide(diff, len(x))
result = []
result.append(D_l)
D_k = np.copy(D_l)
for layers in range((len(ws) - 1) ,0, -1):
thing = np.power(hs[layers], 2)
thing = np.subtract(1, thing)
transpose = np.transpose(ws[layers])
D_k = np.dot(D_k, transpose)
D_k = np.multiply(D_k, thing)
result.append(D_k)
result.reverse()
return result
def mlp_gradient(x, y, ws, bs, phis, alpha):
"""
Return a list containing the gradient of the cost with respect to z^(k)for each layer.
:param x: a list of lists representing the x matrix.
:param y: a list of lists of output values.
:param ws: a list of weight matrices (one for each layer)
:param bs: a list of biases (one for each layer)
:param phis: a list of activation functions:param hs
: a list of outputs for each layer include h^(0) = x
:return: A list of gradients of J with respect to z^(k) for k=1..l
"""
hs = mlp_feed_forward(x, ws, bs, phis)
D = mlp_propagate_error(x, y, ws, bs, phis, hs)
result_w = []
result_b = []
w_1 = np.dot(np.transpose(x), D[0])
step = np.multiply(alpha, ws[0])
w_1 = np.add(w_1, step)
w_1 = np.ndarray.tolist(w_1)
result_w.append(w_1)
for layers in range(1, len(ws)):
w_2 = np.dot(np.transpose(hs[layers]), D[layers])
w_2 = np.add(w_2, np.multiply(alpha, ws[layers]))
result_w.append(w_2)
for layers in range(len(ws)):
ones = np.ones((len(x), 1))
b_1 = np.dot(np.transpose(ones), D[layers])
result_b.append(b_1)
result_w = np.reshape(result_w, (1, -1))
return result_w, result_b
def mlp_initialize(layer_widths):
"""
Use Numpy's random package to initialize a list of weights,a list of biases,
and a list of activation functions forthe number of nodes per layer provided in the argument.
To pass the tests you will need to initialize the matricesin the following order:ws1, bs1, ws2, bs2, ..., wsl, bsl.
:param layer_widths: a list of layer widths
:return: a list of weights, a list of biases, and a list ofphis, one for each layer
"""
#weight dimension should be first argument by second argument and increment. bias should be 1 by whatever starting on the second size.
#Need to fix for edge case, it is not initializing any values for a single layer network or two layer. Now main problem.
#Need to also complete the edge case for last layer
result_w = []
result_b = []
result_phi = []
length = len(layer_widths)
for layer in range(length):
if layer < len(layer_widths) - 1:
result_w.append(np.random.normal(0, 0.1, (layer_widths[layer], layer_widths[layer + 1]) ))
result_b.append(np.random.normal(0, 0.1, (1, layer_widths[layer + 1])))
for layer in range(length - 1):
if layer == length - 2:
result_phi.append(mlp_softmax)
else:
result_phi.append(mlp_tanh)
if length == 1:
result_w.append(np.random.normal(0, 0.1, (784 , layer_widths[0])))
result_b.append(np.random.normal(0, 0.1, (1, layer_widths[0])))
result_phi.append(mlp_softmax)
"""
else:
result_w.append(np.random.normal(0, 0.1, (layer_widths[len(layer_widths) - 1], 10)))
result_b.append(np.random.normal(0, 0.1, (1, 10)))
"""
return result_w, result_b, result_phi
def mlp_gradient_descent(x, y, ws0, bs0, phis, alpha, eta, n_iter):
"""Uses gradient descent to estimate the weights, ws, and biases, bs,that reduce the cost.
:param x: a list of lists representing the x matrix.
:param y: a list of lists of output values.
:param ws0: a list of initial weight matrices (one for each layer)
:param bs0: a list of initial biases (one for each layer)
:param phis: a list of activation functions
:param alpha: the hyperparameter controlling regularization
:param eta: the learning rate
:param n_iter: the number of iterations
:return: the estimate weights, the estimated biases
"""
#cost function works, updating weights / biases has something wrong.
weights = ws0.copy()
bias = bs0.copy()
for index in range(n_iter):
cost = mlp_cost(x, y, weights, bias, phis, alpha)
print(cost)
w_gradient, b_gradient = mlp_gradient(x, y, weights, bias, phis, alpha)
for layers in range(len(weights)):
weight_product = np.multiply(eta, w_gradient[layers])
weights[layers] = np.subtract(weights[layers], weight_product)
bias_product = np.multiply(eta, b_gradient[layers])
bias[layers] = np.subtract(bias[layers], bias_product)
return weights, bias
def mlp_run_mnist():
"""
Prepare the data from the local directory and run gradient descentto estimate the parameters on the training data.
Use a learning rate of 0.2, regularization term of 0.05, 450 nodes in a singlehidden layer, and 300 iterations of gradient descent.
:return: x_train, y_matrix_train, y_train, x_test, y_matrix_test, y_test,ws0, bs0, ws_hat, bs_hat, train_acc, test_acc
"""
# issue with dimensions when computing the elementwise product between y and the log of the probabilities of x
x_train, x_test, y_train, y_test, y_matrix_train, y_matrix_test = mlp_data_prep()
eta = 0.2
alpha = 0.05
n_iter = 300
layer_widths = [784, 450, 10]
ws0, bs0, phis = mlp_initialize(layer_widths)
ws_hat, bs_hat = mlp_gradient_descent(x_train, y_matrix_train, ws0, bs0, phis, alpha, eta, n_iter)
y_hat_train = mlp_predict(x_train, ws_hat, bs_hat, phis)
train_acc = (y_hat_train == y_train).mean()
y_hat_test = mlp_predict(x_test, ws_hat, bs_hat, phis)
test_acc = (y_hat_test == y_test).mean()
return x_train, y_matrix_train, y_train, x_test, y_matrix_test, y_test, ws0, bs0, ws_hat, bs_hat, train_acc, test_acc
np.random.seed(1)
m, n0, n1, n2 = 10, 8, 5, 3
x = 2 * np.random.rand(m, n0) - 1
w1 = np.random.randn(n0, n1)
b1 = 0.1*np.random.randn(1, n1)
w2 = np.random.randn(n1, n2)
b2 = 0.1*np.random.randn(1, n2)
phi1 = mlp_tanh
phi2 = mlp_softmax
ws = [w1, w2]
bs = [b1, b2]
phis = [phi1, phi2]
p = mlp_predict_proba(x, ws, bs, phis)
r = np.random.rand(m, 1)
y = np.argmax(p.cumsum(axis=1) > r, axis=1)
y_matrix = np.zeros((m, n2))
y_matrix[(range(m), y)] = 1
alpha = 0.01
hs = mlp_feed_forward(x, ws, bs, phis)
ds = mlp_propagate_error(x, y_matrix, ws, bs, phis, hs)
x_train, y_matrix_train, y_train, x_test, y_matrix_test, y_test, ws0, bs0,ws_hat, bs_hat, train_acc, test_acc = mlp_run_mnist()
print(ws_hat[0])
print(ws_hat[1])
print(bs_hat[0])
print(bs_hat[1])
print(train_acc)
print(test_acc) | 40.835979 | 139 | 0.622506 |
ace61aa4acf192e36ef9b1a8e6b537ddc09cdbb0 | 2,045 | py | Python | recognition/arcface_paddle/tools/export.py | qaz734913414/insightface | 4101fe608ca1d38604a23d53f32314ce8a28fe79 | [
"MIT"
] | 12,377 | 2017-12-04T02:46:57.000Z | 2022-03-31T16:48:31.000Z | recognition/arcface_paddle/tools/export.py | qaz734913414/insightface | 4101fe608ca1d38604a23d53f32314ce8a28fe79 | [
"MIT"
] | 1,851 | 2017-12-05T05:41:23.000Z | 2022-03-30T13:06:22.000Z | recognition/arcface_paddle/tools/export.py | qaz734913414/insightface | 4101fe608ca1d38604a23d53f32314ce8a28fe79 | [
"MIT"
] | 4,198 | 2017-12-05T02:57:19.000Z | 2022-03-30T10:29:37.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
sys.path.insert(0, os.path.abspath('.'))
import argparse
def str2bool(v):
return str(v).lower() in ("true", "t", "1")
def parse_args():
parser = argparse.ArgumentParser(description='Paddle Face Exporter')
# Model setting
parser.add_argument(
'--is_static',
type=str2bool,
default='False',
help='whether to use static mode')
parser.add_argument(
'--export_type',
type=str,
default='paddle',
help='export type, paddle or onnx')
parser.add_argument(
'--backbone',
type=str,
default='FresResNet50',
help='backbone network')
parser.add_argument(
'--embedding_size', type=int, default=512, help='embedding size')
parser.add_argument(
'--checkpoint_dir',
type=str,
default='MS1M_v3_arcface/FresResNet50/24/',
help='checkpoint direcotry')
parser.add_argument(
'--output_dir',
type=str,
default='MS1M_v3_arcface/FresResNet50/exported_model',
help='export output direcotry')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if args.is_static:
import paddle
paddle.enable_static()
from static.export import export
else:
from dynamic.export import export
assert args.export_type in ['paddle', 'onnx']
export(args)
| 28.013699 | 74 | 0.659169 |
ace61af769b4a7a6cf96b3be80d4af4bdfc36f51 | 464 | py | Python | csv_example/csv_ex1.py | rhuey-eqix/pynet-ons | 7c7ce2d1aa263afa7701b2495b0e237110775a1d | [
"Apache-2.0"
] | 1 | 2021-01-11T23:17:26.000Z | 2021-01-11T23:17:26.000Z | csv_example/csv_ex1.py | rhuey-eqix/pynet-ons | 7c7ce2d1aa263afa7701b2495b0e237110775a1d | [
"Apache-2.0"
] | null | null | null | csv_example/csv_ex1.py | rhuey-eqix/pynet-ons | 7c7ce2d1aa263afa7701b2495b0e237110775a1d | [
"Apache-2.0"
] | 7 | 2020-07-21T17:15:08.000Z | 2021-12-14T01:13:56.000Z | #!/usr/bin/env python
"""Read a CSV file. Use the first line as a header line. Return a dictionary."""
import csv
def main():
"""Read a CSV file. Use the first line as a header line. Return a dictionary."""
# import ipdb
# ipdb.set_trace()
file_name = "test_net_devices.csv"
with open(file_name) as f:
read_csv = csv.DictReader(f)
for entry in read_csv:
print(dict(entry))
if __name__ == "__main__":
main()
| 23.2 | 84 | 0.62931 |
ace61b4aa909c0dadd6f419279fd821a680894ce | 1,564 | py | Python | tutorials/01-basics/linear_regression/main.py | johnwu0604/pytorch-tutorial | bdbc283a0b79620d9b582f1c4d2c2220a853b856 | [
"MIT"
] | null | null | null | tutorials/01-basics/linear_regression/main.py | johnwu0604/pytorch-tutorial | bdbc283a0b79620d9b582f1c4d2c2220a853b856 | [
"MIT"
] | null | null | null | tutorials/01-basics/linear_regression/main.py | johnwu0604/pytorch-tutorial | bdbc283a0b79620d9b582f1c4d2c2220a853b856 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
# Hyper-parameters
input_size = 1
output_size = 1
num_epochs = 60
learning_rate = 0.001
# Toy dataset
x_train = np.array([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168],
[9.779], [6.182], [7.59], [2.167], [7.042],
[10.791], [5.313], [7.997], [3.1]], dtype=np.float32)
y_train = np.array([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573],
[3.366], [2.596], [2.53], [1.221], [2.827],
[3.465], [1.65], [2.904], [1.3]], dtype=np.float32)
# Linear regression model
model = nn.Linear(input_size, output_size)
# Loss and optimizer
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Train the model
for epoch in range(num_epochs):
# Convert numpy arrays to torch tensors
inputs = torch.from_numpy(x_train)
targets = torch.from_numpy(y_train)
# Forward pass
outputs = model(inputs)
loss = criterion(outputs, targets)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1) % 5 == 0:
print ('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))
# Plot the graph
predicted = model(torch.from_numpy(x_train)).detach().numpy()
plt.plot(x_train, y_train, 'ro', label='Original data')
plt.plot(x_train, predicted, label='Fitted line')
plt.legend()
plt.show()
# Save the model checkpoint
torch.save(model.state_dict(), './outputs/model.ckpt')
| 27.928571 | 86 | 0.61445 |
ace61c6a81b3372631abe1431a72e202506628af | 7,651 | bzl | Python | model/text_proto_assets.bzl | bhaktideshmukh/oppia-android | 94626909570ddbbd06d2cd691b49f357b986db0f | [
"Apache-2.0"
] | null | null | null | model/text_proto_assets.bzl | bhaktideshmukh/oppia-android | 94626909570ddbbd06d2cd691b49f357b986db0f | [
"Apache-2.0"
] | null | null | null | model/text_proto_assets.bzl | bhaktideshmukh/oppia-android | 94626909570ddbbd06d2cd691b49f357b986db0f | [
"Apache-2.0"
] | null | null | null | """
Macros for preparing & creating textproto-converted assets to include in any module.
"""
load("@rules_proto//proto:defs.bzl", "ProtoInfo")
def _extract_proto_sources(deps):
"""
Returns the list of proto source files that make up the specified list of proto dependencies.
The returned list includes transitive dependencies.
"""
# See https://github.com/bazelbuild/rules_proto/pull/77/files &
# https://github.com/bazelbuild/rules_proto/issues/57 &
# https://docs.bazel.build/versions/master/skylark/lib/ProtoInfo.html for references.
combined_sources = []
for dep in deps:
combined_sources.extend(dep[ProtoInfo].transitive_sources.to_list())
return combined_sources
def _gen_binary_proto_from_text_impl(ctx):
# See: https://docs.bazel.build/versions/master/skylark/lib/actions.html#declare_file.
output_file = ctx.outputs.output_file
input_file = ctx.attr.input_file.files.to_list()[0].short_path
input_proto_files = _extract_proto_sources(ctx.attr.proto_deps)
# See 'protoc --help' for specifics on the arguments passed to the tool for converting text
# proto to binary, and expected stdin/stdout configurations. Note that the actual proto files
# are passed to the compiler since it requires them in order to transcode the text proto file.
command_path = ctx.executable._protoc_tool.path
proto_directory_path_args = ["--proto_path=%s" % file.dirname for file in input_proto_files]
proto_file_names = [file.basename for file in input_proto_files]
arguments = [command_path] + [
"--encode %s" % ctx.attr.proto_type_name,
] + proto_directory_path_args + proto_file_names + [
"< %s" % input_file,
"> %s" % output_file.path,
]
# Reference: https://docs.bazel.build/versions/master/skylark/lib/actions.html#run. This
# actually runs the proto compiler to perform the conversion. Note that this needs to use
# run_shell() instead of run() because it requires input redirection.
ctx.actions.run_shell(
outputs = [output_file],
inputs = ctx.files.input_file + input_proto_files,
tools = [ctx.executable._protoc_tool],
command = " ".join(arguments),
mnemonic = "GenerateBinaryProtoFromText",
)
return DefaultInfo(
files = depset([output_file]),
runfiles = ctx.runfiles(files = [output_file]),
)
# Custom Starlark rule for running the proto compiler in encode mode to convert a text proto to
# binary. The custom rule allows this to be done as part of the build graph so that binary files
# never need to be checked into the repository.
_gen_binary_proto_from_text = rule(
attrs = {
"input_file": attr.label(
allow_files = True,
mandatory = True,
),
"output_file": attr.output(
mandatory = True,
),
"proto_deps": attr.label_list(
allow_empty = False,
mandatory = True,
),
"proto_type_name": attr.string(mandatory = True),
"_protoc_tool": attr.label(
# This was partly inspired by https://stackoverflow.com/a/39138074.
executable = True,
cfg = "host",
default = "@protobuf_tools//:protoc",
),
},
implementation = _gen_binary_proto_from_text_impl,
)
def gen_binary_proto_from_text(name, proto_type_name, input_file, output_file, proto_deps):
"""
Generates a binary proto from a text proto.
Args:
name: str. A unique name to identify this generation. This can be built directly using Bazel
like any other build rule.
proto_type_name: str. The qualified type name of the proto being converted (e.g.
'model.Exploration').
input_file: file. The path to the text proto file being converted.
output_file: file. The output path for the generated binary proto file.
proto_deps: list of targets. The list of proto_library dependencies that are needed to
perform the conversion. Generally, only the proto file corresponding to the proto type
is needed since proto_library automatically pulls in transitive dependencies.
Returns:
str. The path to the newly generated binary file (same as output_file).
"""
_gen_binary_proto_from_text(
name = name,
proto_type_name = proto_type_name,
input_file = input_file,
output_file = output_file,
proto_deps = proto_deps,
)
return output_file
def _generate_single_asset_proto_binary(
name,
proto_file_name,
proto_dep_name,
proto_type_name,
asset_dir,
proto_dep_bazel_target_prefix,
proto_package):
"""
Converts a single asset text proto to a new binary asset.
Args:
name: str. The name of this target.
proto_file_name: str. The file name of the text proto under the assets directory that will
be converted. This is assuming to correspond to '<asset_dir>/<name>.textproto' and
will lead to a new generated file called '<asset_dir>/<name>.pb'.
proto_dep_name: str. The name of the proto library that contains the proto
definition being converted to binary.
proto_type_name: str. The name of the proto type being converted in the text proto.
asset_dir: str. The path to the assets directory where the textproto files are present.
Example: 'src/main/assets'.
proto_dep_bazel_target_prefix: str. The path to the library that contains the proto_dep.
Example: '//model'.
proto_package: str. The name of the proto package. Example: 'model'.
Returns:
str. The path to the newly generated binary file.
"""
return gen_binary_proto_from_text(
name = "generate_binary_proto_for_text_proto_%s" % name,
input_file = "%s/%s.textproto" % (asset_dir, proto_file_name),
output_file = "%s/%s.pb" % (asset_dir, proto_file_name),
proto_deps = [
"%s:%s_proto" % (proto_dep_bazel_target_prefix, proto_dep_name),
],
proto_type_name = "%s.%s" % (proto_package, proto_type_name),
)
def generate_proto_binary_assets(
name,
names,
proto_dep_name,
proto_type_name,
name_prefix,
asset_dir,
proto_dep_bazel_target_prefix,
proto_package):
"""
Converts a list of text proto assets to binary.
Args:
name: str. The name of this target.
names: list of str. The list of text proto file names under the assets directory that should
be converted.
proto_dep_name: str. See _generate_single_asset_proto_binary.
proto_type_name: str. See _generate_single_asset_proto_binary.
name_prefix: str. A prefix to attach to the name of this target.
asset_dir: str. See _generate_single_asset_proto_binary.
proto_dep_bazel_target_prefix: str. See _generate_single_asset_proto_binary.
proto_package: str. See _generate_single_asset_proto_binary.
Returns:
list of str. The list of new proto binary asset files that were generated.
"""
return [
_generate_single_asset_proto_binary(
name = "%s_%s" % (name_prefix, name),
proto_file_name = name,
proto_dep_name = proto_dep_name,
proto_type_name = proto_type_name,
asset_dir = asset_dir,
proto_dep_bazel_target_prefix = proto_dep_bazel_target_prefix,
proto_package = proto_package,
)
for name in names
]
| 41.134409 | 100 | 0.672461 |
ace61cba1a44434d4141e5654575c7267777e483 | 1,306 | py | Python | get_rising.py | sameerchandra/get_rising_earthporn | de19e611f23522e5ec4ad6f11d8f286d02d0703c | [
"MIT"
] | null | null | null | get_rising.py | sameerchandra/get_rising_earthporn | de19e611f23522e5ec4ad6f11d8f286d02d0703c | [
"MIT"
] | null | null | null | get_rising.py | sameerchandra/get_rising_earthporn | de19e611f23522e5ec4ad6f11d8f286d02d0703c | [
"MIT"
] | null | null | null | import os
import praw
import requests
def get_rising_images(subreddit):
r = praw.Reddit('Download top pics of Day from earthporn')
submissions = r.get_subreddit('earthporn').get_rising()
for submission in submissions:
url = submission.url
if url.endswith(('jpg', 'png', 'jpeg')):
yield url
def download_it(url):
url_chars = url.split('/')[-1][-10:]
file_name = 'sceneries_{chars}'.format(chars=url_chars)
home_dir = os.path.expanduser('~')
path = os.path.join(home_dir, 'sceneries')
if os.path.exists(path):
pass
else:
try:
os.mkdir(path)
except OSError as e:
print(e)
save_path = os.path.join(path, file_name)
if os.path.exists(save_path):
print ("{file_name} already downloaded".format(file_name=file_name))
else:
print ("Downloading to {save_path}".format(save_path=save_path))
r = requests.get(url, stream=True)
with open(save_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
if __name__ == "__main__":
for img_url in get_rising_images('earthporn'):
download_it(img_url)
| 23.321429 | 77 | 0.582695 |
ace61cbdf53e310c65e332e0c1a8b7dbf7c16909 | 8,384 | py | Python | dashboard/haAdmin/ha_instances/views.py | tsm55555/NCU-HASS | f98d6d82a86b172f99bb0f088dd73c7bed2a7de8 | [
"BSD-3-Clause"
] | 1 | 2022-03-11T00:32:33.000Z | 2022-03-11T00:32:33.000Z | dashboard/haAdmin/ha_instances/views.py | tsm55555/NCU-HASS | f98d6d82a86b172f99bb0f088dd73c7bed2a7de8 | [
"BSD-3-Clause"
] | null | null | null | dashboard/haAdmin/ha_instances/views.py | tsm55555/NCU-HASS | f98d6d82a86b172f99bb0f088dd73c7bed2a7de8 | [
"BSD-3-Clause"
] | 1 | 2022-01-04T11:55:10.000Z | 2022-01-04T11:55:10.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
#from django.utils.datastructures import SortedDict
from collections import OrderedDict as SortedDict
from django.core.urlresolvers import reverse_lazy
from django.core.urlresolvers import reverse
from horizon import exceptions
from horizon import tables
from horizon import forms
from openstack_dashboard import api
from openstack_dashboard.dashboards.haAdmin.ha_instances import tables as project_tables
from openstack_dashboard.dashboards.haAdmin.ha_instances\
import forms as project_forms
from openstack_dashboard.REST.RESTClient import RESTClient
server = RESTClient.get_instance()
class Response(object):
def __init__(self, code, message=None, data=None):
self.code = code
self.message = message
self.data = data
class AddView(forms.ModalFormView):
form_class = project_forms.AddForm
template_name = 'haAdmin/ha_instances/create.html'
success_url = reverse_lazy('horizon:haAdmin:ha_instances:index')
submit_label = _("Add")
submit_url = "horizon:haAdmin:ha_instances:add_to_protection"
page_title = _("Add Instance To Protection")
class UpdateView(forms.ModalFormView):
form_class = project_forms.UpdateForm
form_id = "update_instance_form"
modal_header = _("Edit Instance")
template_name = 'haAdmin/ha_instances/update.html'
success_url = reverse_lazy("horizon:haAdmin:ha_instances:index")
page_title = _("Update Instance")
submit_label = _("Save Changes")
submit_url = "horizon:haAdmin:ha_instances:update"
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
args = (self.kwargs['instance_id'],)
context["instance_id"] = self.kwargs['instance_id']
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def _get_object(self, *args, **kwargs):
instance_id = self.kwargs['instance_id']
try:
return api.nova.server_get(self.request, instance_id)
except Exception:
redirect = reverse("horizon:haAdmin:ha_instances:index")
msg = _('Unable to retrieve instance details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
initial = super(UpdateView, self).get_initial()
initial.update({'instance_id': self.kwargs['instance_id'],
'name': getattr(self._get_object(), 'name', '')})
return initial
class IndexView(tables.DataTableView):
table_class = project_tables.InstancesTable
template_name = 'haAdmin/ha_instances/index.html'
page_title = _("HA_Instances")
#def has_more_data(self, table):
#return self._more
def get_data(self):
authUrl = "http://user:0928759204@127.0.0.1:61209"
#server = xmlrpclib.ServerProxy(authUrl)
clusters = server.list_cluster()["data"]
instances = []
for cluster in clusters:
name = cluster["cluster_name"]
_cluster_instances = server.list_instance(name)
_cluster_instances = Response(code=_cluster_instances["code"], message=_cluster_instances["message"], data=_cluster_instances["data"])
result = _cluster_instances.code
cluster_instances = _cluster_instances.data.get("instanceList")
if result == 'succeed':
if cluster_instances != "":
#cluster_instances = cluster_instances.split(",")
for _instance in cluster_instances:
instance_id = _instance["id"]
try:
instance = api.nova.server_get(self.request, instance_id)
instance.cluster_name = name
# instance.cluster_id = uuid
instances.append(instance)
except Exception:
msg = _('Unable to retrieve instance list.')
exceptions.handle(self.request, msg)
marker = self.request.GET.get(
project_tables.InstancesTable._meta.pagination_param, None)
search_opts = self.get_filters({'marker': marker, 'paginate': True})
# Gather our tenants to correlate against IDs
try:
tenants, has_more = api.keystone.tenant_list(self.request)
except Exception:
tenants = []
msg = _('Unable to retrieve instance project information.')
exceptions.handle(self.request, msg)
if 'project' in search_opts:
ten_filter_ids = [t.id for t in tenants
if t.name == search_opts['project']]
del search_opts['project']
if len(ten_filter_ids) > 0:
search_opts['tenant_id'] = ten_filter_ids[0]
else:
self._more = False
return []
"""
try:
instances, self._more = api.nova.server_list(
self.request,
search_opts=search_opts,
all_tenants=True)
except Exception:
self._more = False
exceptions.handle(self.request,
_('Unable to retrieve instance list.'))
"""
if instances:
try:
api.network.servers_update_addresses(self.request, instances,
all_tenants=True)
except Exception:
exceptions.handle(
self.request,
message=_('Unable to retrieve IP addresses from Neutron.'),
ignore=True)
# Gather our flavors to correlate against IDs
try:
flavors = api.nova.flavor_list(self.request)
except Exception:
# If fails to retrieve flavor list, creates an empty list.
flavors = []
full_flavors = SortedDict([(f.id, f) for f in flavors])
tenant_dict = SortedDict([(t.id, t) for t in tenants])
count = 1 # to count instances number
# Loop through instances to get flavor and tenant info.
for inst in instances:
flavor_id = inst.flavor["id"]
try:
if flavor_id in full_flavors:
inst.full_flavor = full_flavors[flavor_id]
else:
# If the flavor_id is not in full_flavors list,
# gets it via nova api.
inst.full_flavor = api.nova.flavor_get(
self.request, flavor_id)
except Exception:
msg = _('Unable to retrieve instance size information.')
exceptions.handle(self.request, msg)
tenant = tenant_dict.get(inst.tenant_id, None)
inst.number = count
inst.tenant_name = getattr(tenant, "name", None)
cluster_name = inst.cluster_name
#result, node_list = server.listNode(cluster_id).split(";")
cluster_node = server.list_node(cluster_name)
cluster_node = Response(code=cluster_node["code"], message=cluster_node["message"], data=cluster_node["data"])
result = cluster_node.code
node_list = cluster_node.data.get("nodeList")
cluster_nodes = []
for node in node_list:
cluster_nodes.append(node["node_name"])
if len(cluster_nodes) == 1:
inst.protection = "Incomplete Protected"
else:
inst.protection = "Protected"
count = count +1
return instances
def get_filters(self, filters):
filter_action = self.table._meta._filter_action
if filter_action:
filter_field = self.table.get_filter_field()
if filter_action.is_api_filter(filter_field):
filter_string = self.table.get_filter_string()
if filter_field and filter_string:
filters[filter_field] = filter_string
return filters
| 40.307692 | 139 | 0.638001 |
ace61cfd4d614a0bede7c7fecfd791ba1fd96607 | 4,749 | py | Python | drl_agents/elegantrl_models_wt.py | eitin-infant/FinRL-Meta | 4c94011e58425796e7e2e5c1bf848afd65c828d6 | [
"MIT"
] | 214 | 2021-11-08T17:06:11.000Z | 2022-03-31T18:29:48.000Z | drl_agents/elegantrl_models_wt.py | eitin-infant/FinRL-Meta | 4c94011e58425796e7e2e5c1bf848afd65c828d6 | [
"MIT"
] | 51 | 2021-11-14T19:11:02.000Z | 2022-03-30T20:23:08.000Z | drl_agents/elegantrl_models_wt.py | eitin-infant/FinRL-Meta | 4c94011e58425796e7e2e5c1bf848afd65c828d6 | [
"MIT"
] | 110 | 2021-11-03T07:41:40.000Z | 2022-03-31T03:23:38.000Z | # RL models from elegantrl
import torch
from finrl_meta.env_future_trading.wt4elegantrl.elegantrl.agent import AgentDDPG, AgentPPO, AgentSAC, AgentTD3, AgentA2C
from finrl_meta.env_future_trading.wt4elegantrl.elegantrl.run import Arguments, train_and_evaluate
MODELS = {"ddpg": AgentDDPG, "td3": AgentTD3, "sac": AgentSAC, "ppo": AgentPPO, "a2c": AgentA2C}
OFF_POLICY_MODELS = ["ddpg", "td3", "sac"]
ON_POLICY_MODELS = ["ppo", "a2c"]
"""MODEL_KWARGS = {x: config.__dict__[f"{x.upper()}_PARAMS"] for x in MODELS.keys()}
NOISE = {
"normal": NormalActionNoise,
"ornstein_uhlenbeck": OrnsteinUhlenbeckActionNoise,
}"""
class DRLAgent:
"""Provides implementations for DRL algorithms
Attributes
----------
env: gym environment class
user-defined class
Methods
-------
get_model()
setup DRL algorithms
train_model()
train DRL algorithms in a train dataset
and output the trained model
DRL_prediction()
make a prediction in a test dataset and get results
"""
def __init__(self, env):
self.env = env
def get_model(self, model_name, model_kwargs):
env = self.env
agent = MODELS[model_name]()
if model_name not in MODELS:
raise NotImplementedError("NotImplementedError")
model = Arguments(env, agent)
if model_name in OFF_POLICY_MODELS:
model.if_off_policy = True
else:
model.if_off_policy = False
model.max_step = 3000
model.state_dim = 39
model.action_dim = 3
model.if_discrete = False
model.target_return = 5 # inf
if model_kwargs is not None:
try:
model.learning_rate = model_kwargs["learning_rate"]
model.batch_size = model_kwargs["batch_size"]
model.gamma = model_kwargs["gamma"]
model.seed = model_kwargs["seed"]
model.net_dim = model_kwargs["net_dimension"]
model.target_step = model_kwargs["target_step"]
model.eval_gap = model_kwargs["eval_time_gap"]
except BaseException:
raise ValueError(
"Fail to read arguments, please check 'model_kwargs' input."
)
return model
def train_model(self, model, cwd, total_timesteps=5000):
model.cwd = cwd
model.break_step = total_timesteps
train_and_evaluate(model)
@staticmethod
def DRL_prediction(model_name, cwd, net_dimension, environment):
if model_name not in MODELS:
raise NotImplementedError("NotImplementedError")
model = MODELS[model_name]()
environment.env_num = 1
args = Arguments(env=environment, agent=model)
if model_name in OFF_POLICY_MODELS:
args.if_off_policy = True
else:
args.if_off_policy = False
args.agent = model
args.env = environment
#args.agent.if_use_cri_target = True ##Not needed for test
# load agent
try:
state_dim = environment.state_dim
action_dim = environment.action_dim
agent = args.agent
net_dim = net_dimension
agent.init(net_dim, state_dim, action_dim)
agent.save_or_load_agent(cwd=cwd, if_save=False)
act = agent.act
device = agent.device
except BaseException:
raise ValueError("Fail to load agent!")
# test on the testing env
_torch = torch
state = environment.reset()
episode_returns = list() # the cumulative_return / initial_account
episode_total_assets = list()
episode_total_assets.append(environment.initial_total_asset)
with _torch.no_grad():
for i in range(environment.max_step):
s_tensor = _torch.as_tensor((state,), dtype=torch.float32, device=device)
a_tensor = act(s_tensor) # action_tanh = act.forward()
action = (
a_tensor.detach().cpu().numpy()[0]
) # not need detach(), because with torch.no_grad() outside
state, reward, done, _ = environment.step(action)
total_asset = environment.assets
episode_total_assets.append(total_asset)
episode_return = total_asset / environment.initial_total_asset
episode_returns.append(episode_return)
if done:
break
print("Test Finished!")
# return episode total_assets on testing data
print("episode_return", episode_return)
return episode_total_assets
| 37.101563 | 120 | 0.610444 |
ace61d356205e12f60b81a3b470ee2eff38a7458 | 278 | py | Python | pdoing/conftest.py | hamzabouissi/pdoing | 00e541c5f82f4027809a6ae4d172c5a2172fd8be | [
"MIT"
] | null | null | null | pdoing/conftest.py | hamzabouissi/pdoing | 00e541c5f82f4027809a6ae4d172c5a2172fd8be | [
"MIT"
] | null | null | null | pdoing/conftest.py | hamzabouissi/pdoing | 00e541c5f82f4027809a6ae4d172c5a2172fd8be | [
"MIT"
] | null | null | null | import pytest
from pdoing.users.models import User
from pdoing.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> User:
return UserFactory()
| 18.533333 | 52 | 0.773381 |
ace61e04d126ff1f1edb43ded4ac6e1f596b2af8 | 13,027 | py | Python | Python/preprocessing/abs_preprocessor.py | yuanmingze/OmniPhotos | e871feb085bbf3f363c22bd0e1636865f959a54f | [
"Apache-2.0"
] | 1 | 2021-04-18T09:30:49.000Z | 2021-04-18T09:30:49.000Z | Python/preprocessing/abs_preprocessor.py | reubenlindroos/OmniPhotos | de62590edc9caf1cfbd1c833bb9176993a10a579 | [
"Apache-2.0"
] | null | null | null | Python/preprocessing/abs_preprocessor.py | reubenlindroos/OmniPhotos | de62590edc9caf1cfbd1c833bb9176993a10a579 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Define the OpPreprocessor class.
The module use to define the class OpPreprocessor.
Typical usage example:
op_preprocessor = OpPreprocessor()
op_preprocessor.generating_config_files()
"""
import inspect
import os
import pathlib
import re
import PIL
import yaml
import ffmpeg
from abs_ui import AbsUI
class AbsPreprocessor:
"""Class to generate the config files for OmniPhotos.
"""
abs_ui = AbsUI() # output interface static variable
def __init__(self, args):
"""
load all configuration and set-up runtime environment (directories)
:param args: parameter from CLI
"""
self.config = None
self.show_infor_interval = 10 # the interval of output information & show images
self.root_dir = None # set the root directory as the root folder of config_omniphotos.yaml
self.load_config(args)
self.input_type_list = ["image", "video"]
self.trajectory_tool_list = ["all", "openvslam", "colmap"]
self.image_type_list = ["panoramic", "perspective"]
self.current_dir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe()))) # current directory
# the image folder
#self.original_images_dir = self.root_dir / "original_images"
# the trajectory reconstruction input images (rotated, selected)
self.traj_input_images_dir = self.root_dir / "trajectory_images"
# OmniPhotos ready images from traj_input_images_dir folder
self.op_images_dir = self.root_dir / "Input"
self.original_image_list = [] # storing all original image filenames
self.input_path = self.root_dir / self.config["preprocessing.input_path"]
# check input data type
self.input_type = self.config["preprocessing.input_type"]
if not self.input_type in self.input_type_list:
msg = "Input data type is {}, which is not supported.".format(self.input_type)
self.show_info(msg, "error")
# get the basic information for input frame info
self.frame_width = -1
self.frame_height = -1
self.frame_number = -1
self.frame_fps = -1
self.load_origin_data_info()
if self.input_type == "image":
self.frame_fps = 1
# the input image index range of OmniPhotos
self.image_start_idx = self.config["preprocessing.frame_index_start"]
self.image_end_idx = self.config["preprocessing.frame_index_end"]
if self.image_start_idx < 0 or self.image_start_idx > self.frame_number:
self.show_info("preprocessing.frame_index_start set error", "error")
if self.image_end_idx == -1:
self.image_end_idx = self.frame_number - 1
if self.image_end_idx >= self.frame_number:
self.show_info("preprocessing.frame_index_end set error", "error")
if self.image_start_idx > self.image_end_idx:
self.show_info("image_start_id is larger than image_end_idx", "error")
# create the images list
self.original_filename_expression = self.config["preprocessing.original_filename_expression"]
self.op_filename_expression = self.config["preprocessing.op_filename_expression"]
self.trajectory_images_list = [] # used to storage the processed original image file name
self.op_image_list = [] # used to storage the mp ready image file name
for idx in range(self.image_start_idx, self.image_end_idx + 1):
self.trajectory_images_list.append(self.original_filename_expression % idx)
self.op_image_list.append(self.op_filename_expression % idx)
self.image_list = self.op_image_list
# check the type of input data type, input image & trajectory reconstruction tool
self.image_type = self.config["preprocessing.colmap.input_camera_type"]
if not self.image_type in self.image_type_list:
msg = "Reconstruction tool is {}, which is not supported.".format(self.image_type)
self.show_info(msg, "error")
self.trajectory_tool = self.config["preprocessing.trajectory_tool"].lower()
if not self.trajectory_tool in self.trajectory_tool_list:
msg = "Reconstruction tool is {}, which is not support now.".format(self.trajectory_tool)
self.show_info(msg, "error")
# set-up trajectory reconstruction run-time environment
self.image_perspective_output_path = self.root_dir / "colmap_perspective"
self.traj_output_directory_path = self.root_dir / "Capture"
self.output_directory_path_colmap = self.traj_output_directory_path / "COLMAP"
self.output_directory_path_ovslam = self.traj_output_directory_path / "openvslam"
self.openvslam_config_file = self.root_dir / "config.yaml"
# COLMAP / openvslam necessary files
self.openvslam_output_file_list = ["map.msg", "frame_trajectory.txt"]
self.openvslam_essential_file_list = ["map.msg", "frame_trajectory_with_filename.txt", "cameras.txt"]
self.colmap_essential_file_list = ["points3D.txt", "images.txt", "cameras.txt", "full_model_ba_points3D.ply"]
# default settting
self.ffmpeg_thread_number = 3 # multi-thread thread number configuration
self.cache_folder_name = None
self.check_config()
def load_config(self, args):
"""
load configuration from *.yaml file and CLI
:param args: CLI input options
"""
# load YAML config file
config_file = args["config_file"]
config_file_path = pathlib.Path(config_file)
if config_file == "" or not config_file_path.exists():
msg = "config_file path is wrong: {}".format(args.config_file)
self.show_info(msg)
# set the root folder
self.root_dir = config_file_path.parents[0]
with open(config_file_path, "r") as yaml_config_file_handle:
config_str = yaml_config_file_handle.read()
self.config = yaml.load(config_str, Loader=yaml.CLoader)
# check the unset configuration
for term in self.config:
if self.config[term] is None:
self.show_info("Preprocessing yaml file options {} are not set.".format(term), "error")
# get config from CLI parameters, to replace the same one in YAML file
for term_key, term_value in args.items():
if not term_value is None:
self.show_info("Value {} are not set".format(term_key))
continue
self.config["preprocessing." + term_key] = term_value
# check the config, check the essential parameters
for term_key, term_value in self.config.items():
msg = ""
if term_value is None:
msg = 'Variable {} : {} is not set'.format(term_key, str(term_value))
elif term_key.find('path') != -1:
term_value = pathlib.Path(term_value)
if not term_value.exists():
msg = 'File {} : {} is not exist'.format(term_key, str(term_value))
elif term_key.find('directory') != -1:
term_value = pathlib.Path(term_value)
if not term_value.exists() or not term_value.is_dir():
msg = 'Directory {} : {} is not exist'.\
format(term_key, str(term_value))
if msg != "":
self.show_info(msg, "info")
def check_config(self):
"""
Check the setting of variables, and set the default value.
"""
# check the config, if not set, use the default value
setting_list = {"preprocessing.ffmpeg_thread_number": self.ffmpeg_thread_number, "preprocessing.cache_folder_name" : self.cache_folder_name}
for key in setting_list:
try:
item = self.config[key]
except KeyError:
self.show_info("{} not set, use default setting {}".format(key, setting_list[key]))
def load_origin_data_info(self):
"""
get the base information of input data
"""
# load data
if self.input_type == 'video':
# get video information
probe = ffmpeg.probe(str(self.input_path))
video_stream = next((stream for stream in probe['streams']\
if stream['codec_type'] == 'video'), None)
self.frame_width = int(video_stream['width'])
self.frame_height = int(video_stream['height'])
self.frame_fps = int(eval(video_stream['r_frame_rate']))
self.frame_number = int(video_stream['nb_frames'])
elif self.input_type == 'image':
if not self.input_path.is_dir():
msg = "The images input path {} is not a folder.".format(self.input_path)
raise RuntimeError(msg)
self.get_image_file_list(self.input_path, self.original_image_list)
self.frame_width, self.frame_height = \
PIL.Image.open(str(self.input_path / self.original_image_list[0])).size
self.frame_number = len(self.original_image_list)
else:
msg = 'Input_type error : {}'.format(self.input_type)
self.show_info(msg, "error")
msg = 'Input data type is {}. {}: width is {}, height is {}, FPS is {}, Frame number is {}'
msg = msg.format(self.input_type, self.input_path, self.frame_width,\
self.frame_height, self.frame_fps, self.frame_number)
self.show_info(msg)
# def get_image_file_list(self, image_directory=None, image_file_list=None):
def get_image_file_list(self, image_directory, image_file_list):
"""
get all image file
:param image_directory: the directory contain images
:param image_file_list: image file name list, not specify it set to `self.image_list`
"""
# # the image list is loaded from
# if image_file_list == None and len(self.image_list) != 0:
# return
# # load data from default directory to
# if image_directory == None:
# image_directory = str(self.image_output_path)
file_list = []
for (_, _, files) in os.walk(image_directory):
for filename in files:
if filename.endswith('.png') or filename.endswith('.jpg')\
or filename.endswith('.jpeg'):
file_list.append(filename)
if len(file_list) == 0:
msg = "There do not have images in {}".format(image_directory)
self.show_info(msg, "error")
file_list.sort()
# if image_file_list == None and len(self.image_list) == 0:
# self.image_list = file_list
# elif image_file_list != None:
image_file_list[:] = file_list
def dir_make(self, directory):
"""
check the existence of directory, if not mkdir
:param directory: the directory path
:type directory: str
"""
if isinstance(directory, str):
directory_path = pathlib.Path(directory)
elif isinstance(directory, pathlib.Path):
directory_path = directory
else:
msg = "Directory is neither str nor pathlib.Path {}".format(directory)
self.show_info(msg, "error")
return
if not directory_path.exists():
msg = "Reconstruction output directory {} do not exist, and make a new output directory"\
.format(directory)
directory_path.mkdir()
self.show_info(msg)
def instance_template(self, input_template_path, output_path, replace_dict):
"""
Generating the *.conf file for OpenVSLAM. It read config template file and replace key words.
:param input_template_path: the path of config file template
:param output_path: the path of output config file
:param replace_dict: [key-word, value], \
NOTE: the special character in key word should be escaped, e.g. $
"""
# read config template file
with open(input_template_path, 'r') as file_handle:
content = file_handle.read()
# replace content
for key, value in replace_dict.items():
content = re.sub(key, value, content, flags=re.MULTILINE)
# output config file
with open(str(output_path), 'w') as file_handle:
file_handle.write(content)
def show_info(self, info_string, level="info"):
"""
show info in console & UI
:param info_string: information string
:param level: the level of information, should be ["info", "error"]
"""
self.abs_ui.show_info(info_string, level)
def show_image(self, image_data):
"""
show image in a windows
:param image_data: image data in numpy array
:type image_data: numpy.array
"""
self.abs_ui.show_image(image_data)
| 42.571895 | 148 | 0.633837 |
ace61fa8abeac88bd9297dedd20e85c67d6f2e37 | 4,762 | py | Python | friends/views.py | ghostforpy/bonds-docker | fda77225b85264cb4ba06b15ff63bc807858425a | [
"MIT"
] | 2 | 2020-09-08T12:51:56.000Z | 2021-08-18T15:27:52.000Z | friends/views.py | ghostforpy/bonds-docker | fda77225b85264cb4ba06b15ff63bc807858425a | [
"MIT"
] | 1 | 2021-12-13T20:43:35.000Z | 2021-12-13T20:43:35.000Z | friends/views.py | ghostforpy/bonds-docker | fda77225b85264cb4ba06b15ff63bc807858425a | [
"MIT"
] | null | null | null | from django.shortcuts import render # , get_object_or_404
from django.core.exceptions import ObjectDoesNotExist
from django.http import JsonResponse
from django.urls import reverse
from django.views.decorators.http import require_POST
from django.contrib.auth.decorators import login_required
from .models import UserFriendsRequests
from bonds.users.models import User
from django.db.models import Q
# Create your views here.
@login_required
def friends_list(request):
friends = request.user.friends.friends.all()
request_friends_from = request.user.friend_request_from.all()
request_friends_to = list(request.user.friend_request_to.all())
request.user.friend_request_to.all().update(new=False)
return render(request,
'friends/list.html',
{'friends': friends,
'request_friends_to': request_friends_to,
'request_friends_from': request_friends_from,
'user': request.user,
})
@require_POST
@login_required
def friends_delete(request, id):
try:
user_friend = User.objects.get(id=id)
url = reverse('friends:friends_send_request',
args=[user_friend.id])
request.user.friends.friends.remove(user_friend.friends)
return JsonResponse({'status': 'friend_deleted', 'url': url})
except ValueError:
return JsonResponse({'status': 'no_user_in_friends'})
except ObjectDoesNotExist:
return JsonResponse({'status': 'no_friend_id'})
@require_POST
@login_required
def friends_request_accept(request, id):
try:
friends_request = UserFriendsRequests.objects.get(id=id)
if friends_request.user_to == request.user:
friends_request.accept = True
url = reverse('friends:friends_delete',
args=[friends_request.user_from.id])
status = friends_request.save()
return JsonResponse({'status': status, 'url': url})
else:
return JsonResponse({'status': 'no_valid'})
except ObjectDoesNotExist:
return JsonResponse({'status': 'no_friend_request_id'})
@require_POST
@login_required
def friends_request_reject(request, id):
try:
friends_request = UserFriendsRequests.objects.get(id=id)
if friends_request.user_to == request.user:
friends_request.reject = True
url = reverse('friends:friends_send_request',
args=[friends_request.user_from.id])
status = friends_request.save()
return JsonResponse({'status': status, 'url': url})
else:
return JsonResponse({'status': 'no_valid'})
except ObjectDoesNotExist:
print('1')
return JsonResponse({'status': 'no_friend_request_id'})
@require_POST
@login_required
def friends_request_cancel(request, id):
try:
friends_request = UserFriendsRequests.objects.get(id=id)
if friends_request.user_from == request.user:
url = reverse('friends:friends_send_request',
args=[friends_request.user_to.id])
friends_request.delete()
return JsonResponse({'status': 'request_canceled', 'url': url})
else:
return JsonResponse({'status': 'no_valid'})
except ObjectDoesNotExist:
return JsonResponse({'status': 'no_friend_request_id'})
@require_POST
@login_required
def friends_send_request(request, id):
try:
user = User.objects.get(id=id)
try:
friend_request = UserFriendsRequests.\
objects.filter(Q(user_from=request.user,
user_to=user) |
Q(user_from=user,
user_to=request.user))
friend_request = friend_request.get()
content = {'status': 'already_exist'}
content['url_accept'] = reverse('friends:friends_request_accept',
args=[friend_request.id])
content['url_reject'] = reverse('friends:friends_request_reject',
args=[friend_request.id])
except ObjectDoesNotExist:
new_friend_request = UserFriendsRequests.objects.create(
user_from=request.user, user_to=user)
status = new_friend_request.save()
content = {'status': status}
if status == 'request_saved':
content['url'] = reverse('friends:friends_request_cancel',
args=[new_friend_request.id])
return JsonResponse(content)
except ObjectDoesNotExist:
return JsonResponse({'status': 'no_user_id'})
| 38.715447 | 77 | 0.627257 |
ace61fd8c51d7a5a6cf0dd496f05b45e7f2aed39 | 1,858 | py | Python | env/Lib/site-packages/plotly/validators/sankey/hoverlabel/_font.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 7 | 2022-01-16T12:28:16.000Z | 2022-03-04T15:31:45.000Z | packages/python/plotly/plotly/validators/sankey/hoverlabel/_font.py | jiangrongbo/plotly.py | df19fc702b309586cc24e25373b87e8bdbb3ff60 | [
"MIT"
] | 14 | 2021-10-20T23:33:47.000Z | 2021-12-21T04:50:37.000Z | packages/python/plotly/plotly/validators/sankey/hoverlabel/_font.py | jiangrongbo/plotly.py | df19fc702b309586cc24e25373b87e8bdbb3ff60 | [
"MIT"
] | 1 | 2021-11-29T22:55:05.000Z | 2021-11-29T22:55:05.000Z | import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="font", parent_name="sankey.hoverlabel", **kwargs):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
""",
),
**kwargs
)
| 39.531915 | 86 | 0.54521 |
ace620475b63b52c77df552bdc09e91d814d9ed2 | 4,730 | py | Python | main.py | riandeoliveira/mern-stack-project-starter | 06b6425e1e2309f4d3c84ba205740de640e7045c | [
"MIT"
] | null | null | null | main.py | riandeoliveira/mern-stack-project-starter | 06b6425e1e2309f4d3c84ba205740de640e7045c | [
"MIT"
] | null | null | null | main.py | riandeoliveira/mern-stack-project-starter | 06b6425e1e2309f4d3c84ba205740de640e7045c | [
"MIT"
] | null | null | null | import pyautogui
import pyperclip
import time
print('========== MERN STACK PROJECT STARTER ==========\n')
print('by Rian Oliveira\n')
# Pega os dados necessários do usuário.
USERNAME = input('Nome de usuário do GitHub: ')
PROJECT_NAME = input('Nome do projeto: ')
DB_NAME = input('Nome do banco de dados: ')
PROJECT_LOCATION = input('Local onde ficará o projeto na sua máquina: ')
print('\nIniciando projeto, aguarde...')
print('Por favor, não mexa no teclado e mouse enquanto o programa estiver rodando.')
# Abre o navegador.
time.sleep(5)
pyautogui.hotkey('win', 'd')
time.sleep(0.5)
pyautogui.hotkey('win')
time.sleep(0.5)
pyautogui.write('Microsoft Edge')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(0.5)
pyautogui.hotkey('win', 'up')
# Vai até o perfil do GitHub do usuário.
time.sleep(0.5)
pyautogui.write('https://github.com/new')
time.sleep(0.5)
pyautogui.press('enter')
# Cria um novo repositório.
time.sleep(2)
pyautogui.press('tab')
time.sleep(0.5)
pyautogui.write(PROJECT_NAME)
time.sleep(0.5)
for i in range(9):
pyautogui.press('tab')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(5)
pyautogui.hotkey('win', 'd')
# Abre o terminal.
time.sleep(0.5)
pyautogui.press('win')
time.sleep(0.5)
pyautogui.write('Git Bash')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(2)
pyautogui.hotkey('win', 'up')
# Inicia o banco de dados MongoDB.
time.sleep(0.5)
pyautogui.write('mongod')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(3)
pyautogui.hotkey('win', 'd')
# Abre o terminal.
time.sleep(0.5)
pyautogui.press('win')
time.sleep(0.5)
pyautogui.write('Git Bash')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(2)
pyautogui.hotkey('win', 'up')
# Entra no MongoDB.
time.sleep(0.5)
pyautogui.write('mongo')
time.sleep(0.5)
pyautogui.press('enter')
# Cria um novo banco de dados e uma collection de exemplo.
time.sleep(0.5)
pyautogui.write('use ' + DB_NAME)
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(0.5)
pyautogui.write('db.example.insertOne({ "name" : "example" })')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(0.5)
pyautogui.write('show dbs')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(0.5)
pyautogui.write('db.example.find().pretty()')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(3)
pyautogui.hotkey('ctrl', 'c')
# Cria o diretório do projeto.
time.sleep(0.5)
pyperclip.copy('cd ' + PROJECT_LOCATION)
time.sleep(0.5)
pyautogui.hotkey('ctrl', 'v')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(0.5)
pyautogui.write('mkdir ' + PROJECT_NAME)
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(0.5)
pyautogui.write('cd ' + PROJECT_NAME)
time.sleep(0.5)
pyautogui.press('enter')
# Abre o VSCode.
time.sleep(0.5)
pyautogui.write('code .')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(5)
pyautogui.hotkey('win', 'up')
# Clona o repositório do GitHub que contém o template a ser utilizado.
time.sleep(2)
pyautogui.hotkey('ctrl', '"')
time.sleep(2)
pyautogui.write('git clone https://github.com/riandeoliveira/mern-stack-template .')
time.sleep(0.5)
pyautogui.press('enter')
# Remove o repositório git do template.
time.sleep(10)
pyautogui.write('bash')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(0.5)
pyautogui.write('rm -rf .git')
time.sleep(0.5)
pyautogui.press('enter')
# Inicia um novo repositório git.
time.sleep(0.5)
pyautogui.write('git init')
time.sleep(0.5)
pyautogui.press('enter')
# Configura a branch de master para main.
time.sleep(5)
pyautogui.write('git branch -M main')
time.sleep(0.5)
pyautogui.press('enter')
# Adiciona o repositório do GitHub pertencente ao projeto.
time.sleep(1)
pyautogui.write("git remote add origin 'https://github.com/" + USERNAME + "/" + PROJECT_NAME + "'")
time.sleep(0.5)
pyautogui.press('enter')
# Adiciona todo o template ao repositório local.
time.sleep(3)
pyautogui.write('git add .')
time.sleep(0.5)
pyautogui.press('enter')
# Faz commit de todo o projeto.
time.sleep(5)
pyautogui.write("git commit -m 'Initial commit'")
time.sleep(0.5)
pyautogui.press('enter')
# Faz um push, mandando o projeto para o GitHub.
time.sleep(5)
pyautogui.write('git push origin main')
time.sleep(0.5)
pyautogui.press('enter')
# Instala as dependências do back-end.
time.sleep(5)
pyautogui.write('cd server')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(0.5)
pyautogui.write('npm install')
time.sleep(0.5)
pyautogui.press('enter')
# Instala as dependências do front-end.
time.sleep(30)
pyautogui.write('cd ..')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(0.5)
pyautogui.write('cd client')
time.sleep(0.5)
pyautogui.press('enter')
time.sleep(0.5)
pyautogui.write('npm install')
time.sleep(0.5)
pyautogui.press('enter')
print('\n================================================') | 23.186275 | 99 | 0.713742 |
ace6205465fc4cf0c622c051b32f2ebd10e9d66a | 11,203 | py | Python | blockchain_adversary.py | prathyushpv/blockchain | e63d256247eca7608b215587ac2384353318149f | [
"MIT"
] | null | null | null | blockchain_adversary.py | prathyushpv/blockchain | e63d256247eca7608b215587ac2384353318149f | [
"MIT"
] | null | null | null | blockchain_adversary.py | prathyushpv/blockchain | e63d256247eca7608b215587ac2384353318149f | [
"MIT"
] | null | null | null | import hashlib
import json
from time import time
from urllib.parse import urlparse
from uuid import uuid4
import requests
from flask import Flask, jsonify, request
from random import seed
from random import random
from random import shuffle
import threading
from time import localtime
from time import sleep
from datetime import datetime
from os import system
import logging
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
p = 1
interval = 3
class Blockchain:
def __init__(self):
self.current_transactions = []
self.chain = []
self.nodes = set()
# Create the genesis block
self.new_block(previous_hash='1', proof=100)
def register_node(self, address):
"""
Add a new node to the list of nodes
:param address: Address of node. Eg. 'http://192.168.0.5:5000'
"""
parsed_url = urlparse(address)
if parsed_url.netloc:
self.nodes.add(parsed_url.netloc)
elif parsed_url.path:
# Accepts an URL without scheme like '192.168.0.5:5000'.
self.nodes.add(parsed_url.path)
else:
raise ValueError('Invalid URL')
def valid_chain(self, chain):
"""
Determine if a given blockchain is valid
:param chain: A blockchain
:return: True if valid, False if not
"""
last_block = chain[0]
current_index = 1
while current_index < len(chain):
block = chain[current_index]
# print(f'{last_block}')
# print(f'{block}')
# print("\n-----------\n")
# Check that the hash of the block is correct
last_block_hash = self.hash(last_block)
if block['previous_hash'] != last_block_hash:
return False
# Check that the Proof of Work is correct
if not self.valid_proof(last_block['proof'], block['proof'], last_block_hash):
return False
last_block = block
current_index += 1
return True
def resolve_conflicts(self):
"""
This is our consensus algorithm, it resolves conflicts
by replacing our chain with the longest one in the network.
:return: True if our chain was replaced, False if not
"""
neighbours = list(self.nodes)
shuffle(neighbours)
new_chain = None
# We're only looking for chains longer than ours
max_length = len(self.chain)
# Grab and verify the chains from all the nodes in our network
for node in neighbours:
response = requests.get(f'http://{node}/chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
# Check if the length is longer and the chain is valid
if length > max_length and self.valid_chain(chain):
max_length = length
new_chain = chain
# Replace our chain if we discovered a new, valid chain longer than ours
if new_chain:
self.chain = new_chain
return True
return False
def successful_mines(self):
neighbours = self.nodes
count = 0
# Grab and verify the chains from all the nodes in our network
for node in neighbours:
response = requests.get(f'http://{node}/chain')
if response.status_code == 200:
length = response.json()['length']
# Check if the length is longer and the chain is valid
if length > len(self.chain):
count = count + 1
return count
def new_block(self, proof, previous_hash):
"""
Create a new Block in the Blockchain
:param proof: The proof given by the Proof of Work algorithm
:param previous_hash: Hash of previous Block
:return: New Block
"""
block = {
'index': len(self.chain) + 1,
'timestamp': time(),
'transactions': self.current_transactions,
'proof': proof,
'previous_hash': previous_hash or self.hash(self.chain[-1]),
}
# Reset the current list of transactions
self.current_transactions = []
self.chain.append(block)
return block
def new_transaction(self, sender, recipient, amount):
"""
Creates a new transaction to go into the next mined Block
:param sender: Address of the Sender
:param recipient: Address of the Recipient
:param amount: Amount
:return: The index of the Block that will hold this transaction
"""
self.current_transactions.append({
'sender': sender,
'recipient': recipient,
'amount': amount,
})
return self.last_block['index'] + 1
@property
def last_block(self):
return self.chain[-1]
@staticmethod
def hash(block):
"""
Creates a SHA-256 hash of a Block
:param block: Block
"""
# We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes
block_string = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(block_string).hexdigest()
def proof_of_work(self, last_block):
"""
Simple Proof of Work Algorithm:
- Find a number p' such that hash(pp') contains leading 4 zeroes
- Where p is the previous proof, and p' is the new proof
:param last_block: <dict> last Block
:return: <int>
"""
last_proof = last_block['proof']
last_hash = self.hash(last_block)
proof = 0
while self.valid_proof(last_proof, proof, last_hash) is False:
proof += 1
return proof
@staticmethod
def valid_proof(last_proof, proof, last_hash):
"""
Validates the Proof
:param last_proof: <int> Previous Proof
:param proof: <int> Current Proof
:param last_hash: <str> The hash of the Previous Block
:return: <bool> True if correct, False if not.
"""
guess = f'{last_proof}{proof}{last_hash}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:4] == "0000"
def common_chain_length(self):
neighbours = self.nodes
chains = []
# Grab and verify the chains from all the nodes in our network
for node in neighbours:
response = requests.get(f'http://{node}/chain')
if response.status_code == 200:
chain = response.json()['chain']
chains.append(chain)
cur = 0
while cur < len(self.chain):
same = True
for chain in chains:
if chain and chain[cur] != self.chain[cur]:
same = False
break
if not same:
break
cur = cur + 1
return cur + 1
# Instantiate the Node
app = Flask(__name__)
# Generate a globally unique address for this node
node_identifier = str(uuid4()).replace('-', '')
# Instantiate the Blockchain
blockchain = Blockchain()
def mine_func(port):
r = random()
if r >= p:
response = {
'message': "Failed to mine"
}
return response
print(str(port-5000) + ": Success!")
# We run the proof of work algorithm to get the next proof...
last_block = blockchain.last_block
proof = blockchain.proof_of_work(last_block)
# We must receive a reward for finding the proof.
# The sender is "0" to signify that this node has mined a new coin.
blockchain.new_transaction(
sender="0",
recipient=node_identifier,
amount=1,
)
# Forge the new Block by adding it to the chain
previous_hash = blockchain.hash(last_block)
block = blockchain.new_block(proof, previous_hash)
response = {
'message': "New Block Forged",
'index': block['index'],
'transactions': block['transactions'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
}
return response
@app.route('/mine', methods=['GET'])
def mine():
response = mine_func(request)
return jsonify(response), 200
@app.route('/transactions/new', methods=['POST'])
def new_transaction():
values = request.get_json()
# Check that the required fields are in the POST'ed data
required = ['sender', 'recipient', 'amount']
if not all(k in values for k in required):
return 'Missing values', 400
# Create a new Transaction
index = blockchain.new_transaction(values['sender'], values['recipient'], values['amount'])
response = {'message': f'Transaction will be added to Block {index}'}
return jsonify(response), 201
@app.route('/chain', methods=['GET'])
def full_chain():
response = {
'chain': blockchain.chain,
'length': len(blockchain.chain),
}
return jsonify(response), 200
@app.route('/nodes/register', methods=['POST'])
def register_nodes():
values = request.get_json()
# print(values)
nodes = values.get('nodes')
if nodes is None:
return "Error: Please supply a valid list of nodes", 400
for node in nodes:
blockchain.register_node(node)
response = {
'message': 'New nodes have been added',
'total_nodes': list(blockchain.nodes),
}
return jsonify(response), 201
def consensus_func():
replaced = blockchain.resolve_conflicts()
if replaced:
response = {
'message': 'Our chain was replaced',
'new_chain': blockchain.chain
}
else:
response = {
'message': 'Our chain is authoritative',
'chain': blockchain.chain
}
return response
@app.route('/nodes/resolve', methods=['GET'])
def consensus():
response = consensus_func()
return jsonify(response), 200
def try_mine(port):
sleep(10)
length = blockchain.common_chain_length()
start = datetime.now()
while True:
sec = localtime().tm_sec
now = datetime.now()
if sec % interval == 0:
# print(str(port-5000)+": Trying to mine")
# mine_func(port)
sleep(0.5)
count = blockchain.successful_mines()
print("COUNT : %d" % count)
if count is 1:
print("Adversary mining")
mine_func(port)
consensus_func()
sleep(1)
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-p', '--port', default=5000, type=int, help='port to listen on')
args = parser.parse_args()
port = args.port
#app.run(host='0.0.0.0', port=port)
webapp = threading.Thread(target=app.run, args=('0.0.0.0', port))
mining = threading.Thread(target=try_mine, args=(port,))
webapp.start()
mining.start()
#app.run(host='0.0.0.0', port=port)
| 27.458333 | 95 | 0.587253 |
ace6209f71f96676b87a6c046a4fc77bed100062 | 1,449 | py | Python | mmdet/models/builder.py | Brym-Gyimah/mmdetection | d5d749afe57c77e2ec4500395faed3566fdfedae | [
"Apache-2.0"
] | 20,190 | 2018-09-10T01:11:53.000Z | 2022-03-31T22:31:33.000Z | mmdet/models/builder.py | Joker-co/mmdet_pro | 96abfd90cf0e38c5ce398795f949e9328eb85c1b | [
"Apache-2.0"
] | 6,736 | 2018-09-17T09:45:51.000Z | 2022-03-31T22:54:10.000Z | mmdet/models/builder.py | Joker-co/mmdet_pro | 96abfd90cf0e38c5ce398795f949e9328eb85c1b | [
"Apache-2.0"
] | 7,837 | 2018-09-11T02:58:23.000Z | 2022-03-31T22:31:38.000Z | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
from mmcv.cnn import MODELS as MMCV_MODELS
from mmcv.utils import Registry
MODELS = Registry('models', parent=MMCV_MODELS)
BACKBONES = MODELS
NECKS = MODELS
ROI_EXTRACTORS = MODELS
SHARED_HEADS = MODELS
HEADS = MODELS
LOSSES = MODELS
DETECTORS = MODELS
def build_backbone(cfg):
"""Build backbone."""
return BACKBONES.build(cfg)
def build_neck(cfg):
"""Build neck."""
return NECKS.build(cfg)
def build_roi_extractor(cfg):
"""Build roi extractor."""
return ROI_EXTRACTORS.build(cfg)
def build_shared_head(cfg):
"""Build shared head."""
return SHARED_HEADS.build(cfg)
def build_head(cfg):
"""Build head."""
return HEADS.build(cfg)
def build_loss(cfg):
"""Build loss."""
return LOSSES.build(cfg)
def build_detector(cfg, train_cfg=None, test_cfg=None):
"""Build detector."""
if train_cfg is not None or test_cfg is not None:
warnings.warn(
'train_cfg and test_cfg is deprecated, '
'please specify them in model', UserWarning)
assert cfg.get('train_cfg') is None or train_cfg is None, \
'train_cfg specified in both outer field and model field '
assert cfg.get('test_cfg') is None or test_cfg is None, \
'test_cfg specified in both outer field and model field '
return DETECTORS.build(
cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg))
| 24.15 | 71 | 0.692202 |
ace62149953bcd5f520270d3a3a98088195e99cc | 136 | py | Python | baekjoon/python/printNReverse.py | yskang/AlgorithmPracticeWithPython | f7129bd1924a7961489198f0ee052d2cd1e9cf40 | [
"MIT"
] | null | null | null | baekjoon/python/printNReverse.py | yskang/AlgorithmPracticeWithPython | f7129bd1924a7961489198f0ee052d2cd1e9cf40 | [
"MIT"
] | null | null | null | baekjoon/python/printNReverse.py | yskang/AlgorithmPracticeWithPython | f7129bd1924a7961489198f0ee052d2cd1e9cf40 | [
"MIT"
] | null | null | null | # Print N reverse
# https://www.acmicpc.net/problem/2742
print('\n'.join(list(map(str, [x for x in range(int(input()), 0, -1)]))))
| 27.2 | 74 | 0.617647 |
ace6219511564bcb95ae897120eb44a2b83968c8 | 6,834 | py | Python | tests/components/philips_js/test_config_flow.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | tests/components/philips_js/test_config_flow.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | tests/components/philips_js/test_config_flow.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Test the Philips TV config flow."""
from unittest.mock import ANY, patch
from haphilipsjs import PairingFailure
from pytest import fixture
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.philips_js.const import CONF_ALLOW_NOTIFY, DOMAIN
from . import (
MOCK_CONFIG,
MOCK_CONFIG_PAIRED,
MOCK_PASSWORD,
MOCK_SYSTEM_UNPAIRED,
MOCK_USERINPUT,
MOCK_USERNAME,
)
from tests.common import MockConfigEntry
@fixture(autouse=True, name="mock_setup_entry")
def mock_setup_entry_fixture():
"""Disable component setup."""
with patch(
"homeassistant.components.philips_js.async_setup_entry", return_value=True
) as mock_setup_entry, patch(
"homeassistant.components.philips_js.async_unload_entry", return_value=True
):
yield mock_setup_entry
@fixture
async def mock_tv_pairable(mock_tv):
"""Return a mock tv that is pariable."""
mock_tv.system = MOCK_SYSTEM_UNPAIRED
mock_tv.pairing_type = "digest_auth_pairing"
mock_tv.api_version = 6
mock_tv.api_version_detected = 6
mock_tv.secured_transport = True
mock_tv.pairRequest.return_value = {}
mock_tv.pairGrant.return_value = MOCK_USERNAME, MOCK_PASSWORD
return mock_tv
async def test_form(hass, mock_setup_entry):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_USERINPUT,
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "Philips TV (1234567890)"
assert result2["data"] == MOCK_CONFIG
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_cannot_connect(hass, mock_tv):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_tv.system = None
result = await hass.config_entries.flow.async_configure(
result["flow_id"], MOCK_USERINPUT
)
assert result["type"] == "form"
assert result["errors"] == {"base": "cannot_connect"}
async def test_form_unexpected_error(hass, mock_tv):
"""Test we handle unexpected exceptions."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_tv.getSystem.side_effect = Exception("Unexpected exception")
result = await hass.config_entries.flow.async_configure(
result["flow_id"], MOCK_USERINPUT
)
assert result["type"] == "form"
assert result["errors"] == {"base": "unknown"}
async def test_pairing(hass, mock_tv_pairable, mock_setup_entry):
"""Test we get the form."""
mock_tv = mock_tv_pairable
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_USERINPUT,
)
assert result["type"] == "form"
assert result["errors"] == {}
mock_tv.setTransport.assert_called_with(True)
mock_tv.pairRequest.assert_called()
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"pin": "1234"}
)
assert result == {
"flow_id": ANY,
"type": "create_entry",
"description": None,
"description_placeholders": None,
"handler": "philips_js",
"result": ANY,
"title": "55PUS7181/12 (ABCDEFGHIJKLF)",
"data": MOCK_CONFIG_PAIRED,
"version": 1,
"options": {},
}
await hass.async_block_till_done()
assert len(mock_setup_entry.mock_calls) == 1
async def test_pair_request_failed(hass, mock_tv_pairable, mock_setup_entry):
"""Test we get the form."""
mock_tv = mock_tv_pairable
mock_tv.pairRequest.side_effect = PairingFailure({})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_USERINPUT,
)
assert result == {
"flow_id": ANY,
"description_placeholders": {"error_id": None},
"handler": "philips_js",
"reason": "pairing_failure",
"type": "abort",
}
async def test_pair_grant_failed(hass, mock_tv_pairable, mock_setup_entry):
"""Test we get the form."""
mock_tv = mock_tv_pairable
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
MOCK_USERINPUT,
)
assert result["type"] == "form"
assert result["errors"] == {}
mock_tv.setTransport.assert_called_with(True)
mock_tv.pairRequest.assert_called()
# Test with invalid pin
mock_tv.pairGrant.side_effect = PairingFailure({"error_id": "INVALID_PIN"})
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"pin": "1234"}
)
assert result["type"] == "form"
assert result["errors"] == {"pin": "invalid_pin"}
# Test with unexpected failure
mock_tv.pairGrant.side_effect = PairingFailure({})
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"pin": "1234"}
)
assert result == {
"flow_id": ANY,
"description_placeholders": {"error_id": None},
"handler": "philips_js",
"reason": "pairing_failure",
"type": "abort",
}
async def test_options_flow(hass):
"""Test config flow options."""
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id="123456",
data=MOCK_CONFIG_PAIRED,
)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_ALLOW_NOTIFY: True}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {CONF_ALLOW_NOTIFY: True}
| 29.584416 | 83 | 0.674715 |
ace6223094abab33a7c315c96339d3ea2ab31af1 | 2,048 | py | Python | api/src/hasura.py | atlekbai/CRM | 3dcd87edfc3b7645aec043b13a04046b23fa569d | [
"MIT"
] | 1 | 2020-03-16T06:39:09.000Z | 2020-03-16T06:39:09.000Z | api/src/hasura.py | atlekbai/CRM | 3dcd87edfc3b7645aec043b13a04046b23fa569d | [
"MIT"
] | 1 | 2021-04-30T21:09:25.000Z | 2021-04-30T21:09:25.000Z | api/src/hasura.py | atlekbai/CRM | 3dcd87edfc3b7645aec043b13a04046b23fa569d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Author: Tlekbai Ali
# @Date: 2019-07-18 16:52:03
# @Last Modified by: Tlekbai Ali
# @Last Modified time: 2019-09-07 11:36:56
"""Hasura module
This module includes Hasura class to interact with hasura engine.
Examples of usage:
Create Hasura instance:
hasura = Hasura("https://<hasura_endpoint>", "<secret_key>")
Query data from hasura:
query = "{user(where: {githubLogin: {_eq: \\\"atlekbai\\\"}})\
{id, attrs, created_at}}"
response = hasura.query(query)
data = response["data"]["user"]
Commit mutations. Insert variable requires user_id:int and
updated_at:timestamp:
insert = "mutation {insert_users(objects: \
{
user_id: %d, \
updated_at: \\\"%s\\\"\
}) {affected_rows}}"
response = hasura.query(insert % (10, "2019-09-21T15:00:00"))
affected_rows = response["data"]["affected_rows"]
"""
import json
from urllib.request import Request, urlopen
class Hasura:
"""Class to wrap interactions with hasura
"""
def __init__(self, address, secret):
"""
Args:
address: url address to hasura
secret: secret key to access hasura
"""
self.address = address
self.secret = secret
def query(self, string):
"""Function accepts queries and mutiations.
Args:
string: query string
Returns:
json: response object containing all specified in string fields
"""
request_body = '{"query":"' + string + '"}'
data = str.encode(request_body)
req = Request("http://172.31.15.138/v1/graphql", data=data)
req.add_header("X-Hasura-Admin-Secret", self.secret)
req.add_header("Host", "crm.alem.school")
content = urlopen(req)
response = json.load(content)
return response
| 30.567164 | 75 | 0.552734 |
ace62247665f23dfb61d16df749a579703573ad8 | 12,305 | py | Python | utils/__init__.py | Mrhsk/cpsc2021 | dcb7fb23edf7df79549279d053e4a8cadab3b268 | [
"MIT"
] | 1 | 2021-12-21T11:59:57.000Z | 2021-12-21T11:59:57.000Z | utils/__init__.py | Mrhsk/cpsc2021 | dcb7fb23edf7df79549279d053e4a8cadab3b268 | [
"MIT"
] | null | null | null | utils/__init__.py | Mrhsk/cpsc2021 | dcb7fb23edf7df79549279d053e4a8cadab3b268 | [
"MIT"
] | null | null | null | """
"""
import os, sys
_BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
"""
copied from the official baseline entry repository
"""
import json
import numpy as np
import sys
import matplotlib.pyplot as plt
import pandas as pd
import peakutils
from sklearn import preprocessing
from scipy import signal
"""
Written by: Xingyao Wang, Chengyu Liu
School of Instrument Science and Engineering
Southeast University, China
chengyu@seu.edu.cn
"""
def p_t_qrs(ecg_original, fs=1000, gr=1):
delay = 0
skip = 0
m_selected_RR = 0
mean_RR = 0
ser_back = 0
if (fs == 200):
# Low pass and High pass
# Low pass
wn = 12 * 2 / fs
N = 3
a, b = signal.butter(N, wn, 'low')
ecg_l = signal.filtfilt(a, b, ecg_original)
ecg_l = ecg_l / max(abs(ecg_l))
ecg_l = np.around(ecg_l, decimals=4)
# High pass
wn = 5 * 2 / fs
N = 3
a, b = signal.butter(N, wn, 'high')
ecg_h = signal.filtfilt(a, b, ecg_original)
ecg_h = ecg_h / max(abs(ecg_h))
else:
# Bandpass
f1 = 5
f2 = 15
wn = []
wn.append(f1 * 2 / fs)
wn.append(f2 * 2 / fs)
N = 3
a, b = signal.butter(N, wn, 'bandpass')
ecg_h = signal.filtfilt(a, b, ecg_original)
ecg_h = ecg_h / max(abs(ecg_h))
# Derivative
int_c = (5 - 1) / (fs * 1 / 40)
x = np.arange(1,6)
xp = np.dot(np.array([1, 2, 0, -2, -1]), (1 / 8) * fs)
fp = np.arange(1,5+int_c,int_c)
b = np.interp(fp, x, xp)
ecg_d = signal.filtfilt(b, 1, ecg_h)
ecg_d = ecg_d / max(ecg_d)
# Squaring and Moving average
ecg_s = np.power(ecg_d, 2)
ecg_m = np.convolve(ecg_s ,np.ones(int(np.around(0.150*fs)))/np.around(0.150*fs))
delay = delay + np.around(0.150*fs) / 2
# Fiducial Marks
locs = peakutils.indexes(ecg_m, thres=0, min_dist=np.around(0.2 * fs))
pks = ecg_m[locs[:]]
# Init other parameters
LLp = len(pks)
qrs_c = np.zeros(LLp)
qrs_i = np.zeros(LLp)
qrs_i_raw = np.zeros(LLp)
qrs_amp_raw= np.zeros(LLp)
nois_c = np.zeros(LLp)
nois_i = np.zeros(LLp)
SIGL_buf = np.zeros(LLp)
NOISL_buf = np.zeros(LLp)
SIGL_buf1 = np.zeros(LLp)
NOISL_buf1 = np.zeros(LLp)
THRS_buf1 = np.zeros(LLp)
THRS_buf = np.zeros(LLp)
# Init training phase
THR_SIG = max(ecg_m[0:2*fs])*1/3
THR_NOISE = np.mean(ecg_m[0:2*fs])*1/2
SIG_LEV= THR_SIG
NOISE_LEV = THR_NOISE
# Init bandpath filter threshold
THR_SIG1 = max(ecg_h[0:2*fs])*1/3
THR_NOISE1 = np.mean(ecg_h[0:2*fs])*1/2
SIG_LEV1 = THR_SIG1
NOISE_LEV1 = THR_NOISE1
# Thresholding and desicion rule
Beat_C = -1
Beat_C1 = -1
Noise_Count = 0
for i in range(LLp):
if ((locs[i] - np.around(0.150*fs)) >= 1 and (locs[i] <= len(ecg_h))):
_start = locs[i] - np.around(0.15*fs).astype(int)
_ = ecg_h[_start:locs[i]]
y_i = max(_)
x_i = np.argmax(_)
else:
if i == 0:
y_i = max(ecg_h[0:locs[i]])
x_i = np.argmax(ecg_h[0:locs[i]])
ser_back = 1
elif (locs[i] >= len(ecg_h)):
_ = ecg_h[locs[i] - np.around(0.150*fs).astype(int):]
y_i = max(_)
x_i = np.argmax(_)
# Update the heart_rate
if (Beat_C >= 9):
diffRR = np.diff(qrs_i[Beat_C-8:Beat_C])
mean_RR = np.mean(diffRR)
comp = qrs_i[Beat_C] - qrs_i[Beat_C-1]
if ((comp <= 0.92*mean_RR) or (comp >= 1.16*mean_RR)):
THR_SIG = 0.5*(THR_SIG)
THR_SIG1 = 0.5*(THR_SIG1)
else:
m_selected_RR = mean_RR
# Calculate the mean last 8 R waves to ensure that QRS is not
if m_selected_RR:
test_m = m_selected_RR
elif (mean_RR and m_selected_RR == 0):
test_m = mean_RR
else:
test_m = 0
if test_m:
if ((locs[i] - qrs_i[Beat_C]) >= np.around(1.66*test_m)):
_start = int(qrs_i[Beat_C] + np.around(0.20*fs))
_end = int(locs[i] - np.around(0.20*fs))
pks_temp = max(ecg_m[_start:_end+1])
locs_temp = np.argmax(ecg_m[_start:_end+1])
locs_temp = qrs_i[Beat_C] + np.around(0.20*fs) + locs_temp - 1
if (pks_temp > THR_NOISE):
Beat_C += 1
qrs_c[Beat_C] = pks_temp
qrs_i[Beat_C] = locs_temp
if (locs_temp <= len(ecg_h)):
_start = int(locs_temp - np.around(0.150*fs))
_end = int(locs_temp + 1)
y_i_t = max(ecg_h[_start:_end])
x_i_t = np.argmax(ecg_h[_start:_end])
else:
_ = locs_temp - np.around(0.150*fs)
y_i_t = max(ecg_h[_:])
x_i_t = np.argmax(ecg_h[_:])
if (y_i_t > THR_NOISE1):
Beat_C1 += 1
qrs_i_raw[Beat_C1] = locs_temp - np.around(0.150*fs) + (x_i_t - 1)
qrs_amp_raw[Beat_C1] = y_i_t
SIG_LEV1 = 0.25*y_i_t + 0.75*SIG_LEV1
not_nois = 1
SIG_LEV = 0.25*pks_temp + 0.75*SIG_LEV
else:
not_nois = 0
# Find noise and QRS peaks
if (pks[i] >= THR_SIG):
if (Beat_C >= 3):
if ((locs[i] - qrs_i[Beat_C]) <= np.around(0.3600*fs)):
_start = locs[i] - np.around(0.075*fs).astype('int')
Slope1 = np.mean(np.diff(ecg_m[_start:locs[i]]))
_start = int(qrs_i[Beat_C] - np.around(0.075*fs))
_end = int(qrs_i[Beat_C])
Slope2 = np.mean(np.diff(ecg_m[_start:_end]))
if abs(Slope1) <= abs(0.5*(Slope2)):
nois_c[Noise_Count] = pks[i]
nois_i[Noise_Count] = locs[i]
Noise_Count += 1
skip = 1
NOISE_LEV1 = 0.125*y_i + 0.875*NOISE_LEV1
NOISE_LEV = 0.125*pks[i] + 0.875*NOISE_LEV
else:
skip = 0
if (skip == 0):
Beat_C += 1
qrs_c[Beat_C] = pks[i]
qrs_i[Beat_C] = locs[i]
if (y_i >= THR_SIG1):
Beat_C1 += 1
if ser_back:
qrs_i_raw[Beat_C1] = x_i
else:
qrs_i_raw[Beat_C1] = locs[i] - np.around(0.150*fs) + (x_i - 1)
qrs_amp_raw[Beat_C1] = y_i
SIG_LEV1 = 0.125*y_i + 0.875*SIG_LEV1
SIG_LEV = 0.125*pks[i] + 0.875*SIG_LEV
elif ((THR_NOISE <= pks[i]) and (pks[i] < THR_SIG)):
NOISE_LEV1 = 0.125*y_i + 0.875*NOISE_LEV1
NOISE_LEV = 0.125*pks[i] + 0.875*NOISE_LEV
elif (pks[i] < THR_NOISE):
nois_c[Noise_Count] = pks[i]
nois_i[Noise_Count] = locs[i]
NOISE_LEV1 = 0.125*y_i + 0.875*NOISE_LEV1
NOISE_LEV = 0.125*pks[i] + 0.875*NOISE_LEV
Noise_Count += 1
# Adjust the threshold with SNR
if (NOISE_LEV != 0 or SIG_LEV != 0):
THR_SIG = NOISE_LEV + 0.25*(abs(SIG_LEV - NOISE_LEV))
THR_NOISE = 0.5*(THR_SIG)
if (NOISE_LEV1 != 0 or SIG_LEV1 != 0):
THR_SIG1 = NOISE_LEV1 + 0.25*(abs(SIG_LEV1 - NOISE_LEV1))
THR_NOISE1 = 0.5*(THR_SIG1)
SIGL_buf[i] = SIG_LEV
NOISL_buf[i] = NOISE_LEV
THRS_buf[i] = THR_SIG
SIGL_buf1[i] = SIG_LEV1
NOISL_buf1[i] = NOISE_LEV1
THRS_buf1[i] = THR_SIG1
skip = 0
not_nois = 0
ser_back = 0
# Adjust lengths
qrs_i_raw = qrs_i_raw[0:Beat_C1+1]
qrs_amp_raw = qrs_amp_raw[0:Beat_C1+1]
qrs_c = qrs_c[0:Beat_C+1]
qrs_i = qrs_i[0:Beat_C+1]
return qrs_i_raw
def qrs_detect(ECG, fs):
winsize = 5 * fs * 60 # 5min 滑窗
#winsize = 10 * fs # 10s 滑窗
NB_SAMP = len(ECG)
peaks = []
if NB_SAMP < winsize:
peaks.extend(p_t_qrs(ECG, fs))
peaks = np.array(peaks)
peaks = np.delete(peaks, np.where(peaks >= NB_SAMP-2*fs)[0]) # 删除最后2sR波位置
else:
# 5分钟滑窗检测,重叠5s数据
count = NB_SAMP // winsize
for j in range(count+1):
if j == 0:
ecg_data = ECG[j*winsize: (j+1)*winsize]
peak = p_t_qrs(ecg_data, fs)
peak = np.array(peak)
peak = np.delete(peak, np.where(peak >= winsize-2*fs)[0]).tolist() # 删除5分钟窗口最后2sR波位置
peaks.extend(map(lambda n: n+j*winsize, peak))
elif j == count:
ecg_data = ECG[j*winsize-5*fs: ]
if len(ecg_data) == 0:
pass
else:
peak = p_t_qrs(ecg_data, fs)
peak = np.array(peak)
peak = np.delete(peak, np.where(peak <= 2*fs)[0]).tolist() # 删除最后多余窗口前2sR波位置
peaks.extend(map(lambda n: n+j*winsize-5*fs, peak))
else:
ecg_data = ECG[j*winsize-5*fs: (j+1)*winsize]
peak = p_t_qrs(ecg_data, fs)
peak = np.array(peak)
peak = np.delete(peak, np.where((peak <= 2*fs) | (peak >= winsize-2*fs))[0]).tolist() # 删除中间片段5分钟窗口前2s和最后2sR波位置
peaks.extend(map(lambda n: n+j*winsize-5*fs, peak))
peaks = np.array(peaks)
peaks = np.sort(peaks)
dp = np.abs(np.diff(peaks))
final_peaks = peaks[np.where(dp >= 0.2*fs)[0]+1]
return final_peaks
def sampen(rr_seq, max_temp_len, r):
"""
rr_seq: segment of the RR intervals series
max_temp_len: maximum template length
r: initial value of the tolerance matching
"""
length = len(rr_seq)
lastrun = np.zeros((1,length))
run = np.zeros((1,length))
A = np.zeros((max_temp_len,1))
B = np.zeros((max_temp_len,1))
p = np.zeros((max_temp_len,1))
e = np.zeros((max_temp_len,1))
for i in range(length - 1):
nj = length - i - 1
for jj in range(nj):
j = jj + i + 2
if np.abs(rr_seq[j-1] - rr_seq[i]) < r:
run[0, jj] = lastrun[0, jj] + 1
am1 = float(max_temp_len)
br1 = float(run[0,jj])
M1 = min(am1,br1)
for m in range(int(M1)):
A[m] = A[m] + 1
if j < length:
B[m] = B[m]+1
else:
run[0, jj] = 0
for j in range(nj):
lastrun[0, j] = run[0,j]
N = length * (length - 1) / 2
p[0] = A[0] / N
e[0] = -1 * np.log(p[0] + sys.float_info.min)
for m in range(max_temp_len-1):
p[m+1]=A[m+1]/B[m]
e[m+1]=-1*np.log(p[m+1])
return e, A, B
def comp_cosEn(rr_segment):
r = 0.03 # initial value of the tolerance matching
max_temp_len = 2 # maximum template length
min_num_count = 5 # minimum numerator count
dr = 0.001 # tolerance matching increment
match_num = np.ones((max_temp_len,1)) # number of matches for m=1,2,...,M
match_num = -1000 * match_num
while match_num[max_temp_len-1,0] < min_num_count:
e, match_num, B = sampen(rr_segment, max_temp_len, r)
r = r + dr
if match_num[max_temp_len-1, 0] != -1000:
mRR = np.mean(rr_segment)
cosEn = e[max_temp_len-1, 0] + np.log(2 * (r-dr)) - np.log(mRR)
else:
cosEn = -1000
sentropy = e[max_temp_len-1, 0]
return cosEn, sentropy
def load_dict(filename):
'''load dict from json file'''
with open(filename,"r") as json_file:
dic = json.load(json_file)
return dic
def save_dict(filename, dic):
'''save dict into json file'''
with open(filename,'w') as json_file:
json.dump(dic, json_file, ensure_ascii=False)
| 32.55291 | 115 | 0.50191 |
ace622944b9a226024300698440f0175c0920026 | 3,929 | py | Python | security/rijndael/gcc/python_script/init_layout.py | swl02/mibench | 9ebebd8d69ea46d0719df1aaa2d045721c552ef1 | [
"MIT"
] | null | null | null | security/rijndael/gcc/python_script/init_layout.py | swl02/mibench | 9ebebd8d69ea46d0719df1aaa2d045721c552ef1 | [
"MIT"
] | null | null | null | security/rijndael/gcc/python_script/init_layout.py | swl02/mibench | 9ebebd8d69ea46d0719df1aaa2d045721c552ef1 | [
"MIT"
] | null | null | null | import sys
import random
def write_inst(inst ,new_inst):
return inst + "\t.word\t"+ new_inst + "977" +"\n"
def is_cfi(inst):
if (inst.find("jal") != -1 or
inst.find("jalr") != -1 or
inst.find("beq") != -1 or
inst.find("bne") != -1 or
inst.find("ble") != -1 or
inst.find("bgt") != -1 or
inst.find("bltu") != -1 or
inst.find("bgeu") != -1 or
inst.find("blt") != -1 or
inst.find("bleu") != -1 or
inst.find("blez") != -1 or
inst.find("bgez") != -1 or
inst.find("j") != -1 or
inst.find("call") != -1 or
inst.find("jr") != -1 or
inst.find("ret") != -1 or
inst.find("bgtu") != -1 or
inst.find("bge") != -1) :
return True
return False
def is_label(inst):
if inst.find(':') != -1:
return True
return False
def is_main(inst):
if (inst.find('main:') != -1): #or
# inst.find('fillrand:') != -1 or
# inst.find('encrypt:') != -1 or
# inst.find('decrypt:') != -1 or
# inst.find('set_key:') != -1 or
# inst.find('encfile:') != -1 or
# inst.find('decfile:') != -1 ):
return True
return False
def is_data_seg(inst):
if (inst.find(".dword") != -1 or
inst.find(".size") != -1 or
inst.find(".text") != -1 or
inst.find(".option") != -1 or
inst.find(".globl") != -1 or
inst.find(".file") != -1 or
inst.find(".byte") != -1 or
inst.find(".align") != -1 or
inst.find(".section") != -1 or
inst.find(".string") != -1 or
inst.find(".comm") != -1 or
inst.find(".type") != -1):
return True
return False
def is_libc_fn(fn_name):
if (fn_name == "fseek" or
fn_name == "fgetpos" or
fn_name == "fwrite" or
fn_name == "fread" or
fn_name == "printf" or
fn_name == "fclose" or
fn_name == "fopen" or
fn_name == "puts" or
fn_name == "__locale_ctype_ptr" or
fn_name == "fgetpos"):
return True
return False
counter = 0
chk = []
gap = 0
prev_inst = ""
modified_stream = []
# with open('../chk_table/' + sys.argv[1] + '.chk','r') as fp:
# chk = fp.readlines()
chk_counter = int(sys.argv[2])
with open('../assembly_folder/' + sys.argv[1] + '.s','r') as fp:
stream = fp.readlines()
for inst in stream:
# enable integrity checking (assert csr)
if is_main(prev_inst) :
prev_inst = prev_inst + "\tcsrwi\t0xff,1\n"
#call subroutine for shared library glibc etc
if prev_inst.find("call") != -1:
call,fn_name = prev_inst.split()
if is_libc_fn(fn_name):
# print(prev_inst)
prev_inst = "\tcsrwi\t0xff,0\n" + prev_inst + "\tcsrwi\t0xff,1\n"
if not (is_data_seg(inst)):
# adding chk instruction
if is_label(prev_inst) or is_cfi(prev_inst):
if not (is_label(inst) and is_cfi(prev_inst)):
if (prev_inst.find(".LC0") == -1):
prev_inst = write_inst(prev_inst,hex(chk_counter))
chk_counter = chk_counter + 1
if (is_label(inst) and not(is_cfi(prev_inst) or is_data_seg(prev_inst))):
# print(prev_inst)
prev_inst = prev_inst + '\tj\t' + inst[:-2] + '\n'
# disable integrity checking (deassert csr)
if inst.find("ret") != -1 or inst.find("jr") != -1:
prev_inst = prev_inst + "\tcsrwi\t0xff,0\n"
#adding it to the modified one
modified_stream.append(prev_inst)
#iterating
prev_inst = inst
counter = counter + 1
with open('../assembly_folder/modified_'+ sys.argv[1] + '.s','w') as fp:
fp.writelines(modified_stream)
| 29.541353 | 85 | 0.500891 |
ace622c639e531eaa5e0bb281d7af32963e6f8fd | 889 | py | Python | venv/lib/python3.6/site-packages/csvimport/monkeypatch_tzinfo.py | odenyirechristopher/instagramapp | abd3d125f49f4a7a65c95d902024f59e60bcfc65 | [
"Unlicense"
] | 2 | 2021-04-09T14:04:41.000Z | 2021-04-29T07:50:58.000Z | venv/lib/python3.6/site-packages/csvimport/monkeypatch_tzinfo.py | odenyirechristopher/instagramapp | abd3d125f49f4a7a65c95d902024f59e60bcfc65 | [
"Unlicense"
] | 5 | 2020-06-02T16:10:56.000Z | 2022-01-13T02:47:34.000Z | venv/lib/python3.6/site-packages/csvimport/monkeypatch_tzinfo.py | odenyirechristopher/instagramapp | abd3d125f49f4a7a65c95d902024f59e60bcfc65 | [
"Unlicense"
] | 1 | 2020-05-31T20:23:38.000Z | 2020-05-31T20:23:38.000Z | """ Monkeypatch so that dates outside of 1970-2037 can be imported for
the DB backends that do not handle this
"""
import time
def _isdst(self, dt):
""" Monkeypatch from https://code.djangoproject.com/ticket/3418
since sqlite and other backends still have this issue in django 1.6
"""
year = dt.year
if int(year) < 1970:
year = 1970
if int(year) > 2037:
year = 2037
tt = (year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.weekday(), 0, -1)
try:
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0
except OverflowError:
pass
raise Exception("Cannot process dates from %s" % year)
try:
from django.utils.timezone import ReferenceLocalTimezone
ReferenceLocalTimezone._isdst = _isdst
except:
# Don't patch it if it isnt there to be patched!
pass
| 26.939394 | 85 | 0.645669 |
ace622fe033d8e4e2ac782973c8bd618f27f57f5 | 95,378 | py | Python | Wrapping/Python/paraview/simple.py | sakjain92/paraview | f3af0cd9f6750e24ad038eac573b870c88d6b7dd | [
"Apache-2.0",
"BSD-3-Clause"
] | 2 | 2019-09-27T08:04:34.000Z | 2019-10-16T22:30:54.000Z | Wrapping/Python/paraview/simple.py | sakjain92/paraview | f3af0cd9f6750e24ad038eac573b870c88d6b7dd | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | Wrapping/Python/paraview/simple.py | sakjain92/paraview | f3af0cd9f6750e24ad038eac573b870c88d6b7dd | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | r"""simple is a module for using paraview server manager in Python. It
provides a simple convenience layer to functionality provided by the
C++ classes wrapped to Python as well as the servermanager module.
A simple example::
from paraview.simple import *
# Create a new sphere proxy on the active connection and register it
# in the sources group.
sphere = Sphere(ThetaResolution=16, PhiResolution=32)
# Apply a shrink filter
shrink = Shrink(sphere)
# Turn the visiblity of the shrink object on.
Show(shrink)
# Render the scene
Render()
"""
#==============================================================================
#
# Program: ParaView
# Module: simple.py
#
# Copyright (c) Kitware, Inc.
# All rights reserved.
# See Copyright.txt or http://www.paraview.org/HTML/Copyright.html for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the above copyright notice for more information.
#
#==============================================================================
from __future__ import absolute_import, division, print_function
import paraview
from paraview import servermanager
import paraview._backwardscompatibilityhelper
# Bring OutputPort in our namespace.
from paraview.servermanager import OutputPort
import sys
if sys.version_info >= (3,):
xrange = range
def GetParaViewVersion():
"""Returns the version of the ParaView build"""
return paraview._version(servermanager.vtkSMProxyManager.GetVersionMajor(),
servermanager.vtkSMProxyManager.GetVersionMinor())
def GetParaViewSourceVersion():
"""Returns the paraview source version string e.g.
'paraview version x.x.x, Date: YYYY-MM-DD'."""
return servermanager.vtkSMProxyManager.GetParaViewSourceVersion()
#==============================================================================
# Client/Server Connection methods
#==============================================================================
def Disconnect(ns=None, force=True):
"""Free the current active session"""
if not ns:
ns = globals()
supports_simutaneous_connections =\
servermanager.vtkProcessModule.GetProcessModule().GetMultipleSessionsSupport()
if not force and supports_simutaneous_connections:
# This is an internal Disconnect request that doesn't need to happen in
# multi-server setup. Ignore it.
return
if servermanager.ActiveConnection:
_remove_functions(ns)
servermanager.Disconnect()
import gc
gc.collect()
# -----------------------------------------------------------------------------
def Connect(ds_host=None, ds_port=11111, rs_host=None, rs_port=11111):
"""Creates a connection to a server. Example usage::
> Connect("amber") # Connect to a single server at default port
> Connect("amber", 12345) # Connect to a single server at port 12345
> Connect("amber", 11111, "vis_cluster", 11111) # connect to data server, render server pair"""
Disconnect(globals(), False)
connection = servermanager.Connect(ds_host, ds_port, rs_host, rs_port)
_initializeSession(connection)
_add_functions(globals())
return connection
# -----------------------------------------------------------------------------
def ReverseConnect(port=11111):
"""Create a reverse connection to a server. Listens on port and waits for
an incoming connection from the server."""
Disconnect(globals(), False)
connection = servermanager.ReverseConnect(port)
_initializeSession(connection)
_add_functions(globals())
return connection
#==============================================================================
# Multi-servers
#==============================================================================
def SetActiveConnection(connection=None, ns=None):
"""Set the active connection. If the process was run without multi-server
enabled and this method is called with a non-None argument while an
ActiveConnection is present, it will raise a RuntimeError."""
if not ns:
ns = globals()
if servermanager.ActiveConnection != connection:
_remove_functions(ns)
servermanager.SetActiveConnection(connection)
_add_functions(ns)
#==============================================================================
# Views and Layout methods
#==============================================================================
def CreateView(view_xml_name, detachedFromLayout=False, **params):
"""Creates and returns the specified proxy view based on its name/label.
If detachedFromLayout is true, the view will no be grabbed by the layout
hence not visible unless it is attached after. This also set params keywords
arguments as view properties."""
view = servermanager._create_view(view_xml_name)
if not view:
raise RuntimeError ("Failed to create requested view", view_xml_name)
try:
registrationName = params["registrationName"]
del params["registrationName"]
except KeyError:
try:
registrationName = params["guiName"]
del params["guiName"]
except KeyError:
registrationName = None
controller = servermanager.ParaViewPipelineController()
controller.PreInitializeProxy(view)
SetProperties(view, **params)
controller.PostInitializeProxy(view)
if detachedFromLayout:
view.SMProxy.SetAnnotation("ParaView::DetachedFromLayout", "true")
controller.RegisterViewProxy(view, registrationName)
# setup an interactor if current process support interaction if an
# interactor hasn't already been set. This overcomes the problem where VTK
# segfaults if the interactor is created after the window was created.
view.MakeRenderWindowInteractor(True)
return view
# -----------------------------------------------------------------------------
def CreateRenderView(detachedFromLayout=False, **params):
""""Create standard 3D render view.
See CreateView for arguments documentation"""
return CreateView("RenderView", detachedFromLayout, **params)
# -----------------------------------------------------------------------------
def CreateXYPlotView(detachedFromLayout=False, **params):
"""Create XY plot Chart view.
See CreateView for arguments documentation"""
return CreateView("XYChartView", detachedFromLayout, **params)
# -----------------------------------------------------------------------------
def CreateXYPointPlotView(detachedFromLayout=False, **params):
"""Create XY plot point Chart view.
See CreateView for arguments documentation"""
return CreateView("XYPointChartView", detachedFromLayout, **params)
# -----------------------------------------------------------------------------
def CreateBarChartView(detachedFromLayout=False, **params):
""""Create Bar Chart view.
See CreateView for arguments documentation"""
return CreateView("XYBarChartView", detachedFromLayout, **params)
# -----------------------------------------------------------------------------
def CreateComparativeRenderView(detachedFromLayout=False, **params):
""""Create Comparative view.
See CreateView for arguments documentation"""
return CreateView("ComparativeRenderView", detachedFromLayout, **params)
# -----------------------------------------------------------------------------
def CreateComparativeXYPlotView(detachedFromLayout=False, **params):
""""Create comparative XY plot Chart view.
See CreateView for arguments documentation"""
return CreateView("ComparativeXYPlotView", detachedFromLayout, **params)
# -----------------------------------------------------------------------------
def CreateComparativeBarChartView(detachedFromLayout=False, **params):
""""Create comparative Bar Chart view.
See CreateView for arguments documentation"""
return CreateView("ComparativeBarChartView", detachedFromLayout, **params)
# -----------------------------------------------------------------------------
def CreateParallelCoordinatesChartView(detachedFromLayout=False, **params):
""""Create Parallele coordinate Chart view.
See CreateView for arguments documentation"""
return CreateView("ParallelCoordinatesChartView", detachedFromLayout, **params)
# -----------------------------------------------------------------------------
def Create2DRenderView(detachedFromLayout=False, **params):
""""Create the standard 3D render view with the 2D interaction mode turned ON.
See CreateView for arguments documentation"""
return CreateView("2DRenderView", detachedFromLayout, **params)
# -----------------------------------------------------------------------------
def GetRenderView():
"Returns the active view if there is one. Else creates and returns a new view."
view = active_objects.view
if not view:
# it's possible that there's no active view, but a render view exists.
# If so, locate that and return it (before trying to create a new one).
view = servermanager.GetRenderView()
if not view:
view = CreateRenderView()
return view
# -----------------------------------------------------------------------------
def GetRenderViews():
"Returns all render views as a list."
return servermanager.GetRenderViews()
def GetViews(viewtype=None):
"""Returns all views. If viewtype is specified, only the views of the
specified type are returned"""
val = []
for aProxy in servermanager.ProxyManager().GetProxiesInGroup("views").values():
if aProxy.IsA("vtkSMViewProxy") and \
(viewtype is None or aProxy.GetXMLName() == viewtype):
val.append(aProxy)
return val
# -----------------------------------------------------------------------------
def SetViewProperties(view=None, **params):
"""Sets one or more properties of the given view. If an argument
is not provided, the active view is used. Pass a list of property_name=value
pairs to this function to set property values. For example::
SetProperties(Background=[1, 0, 0], UseImmediateMode=0)
"""
if not view:
view = active_objects.view
SetProperties(view, **params)
# -----------------------------------------------------------------------------
def Render(view=None):
"""Renders the given view (default value is active view)"""
if not view:
view = active_objects.view
if not view:
raise AttributeError ("view cannot be None")
# setup an interactor if current process support interaction if an
# interactor hasn't already been set. This overcomes the problem where VTK
# segfaults if the interactor is created after the window was created.
view.MakeRenderWindowInteractor(True)
view.StillRender()
if _funcs_internals.first_render:
# Not all views have a ResetCamera method
try:
view.ResetCamera()
view.StillRender()
except AttributeError: pass
_funcs_internals.first_render = False
return view
# -----------------------------------------------------------------------------
def RenderAllViews():
"""Render all views"""
for view in GetViews(): Render(view)
# -----------------------------------------------------------------------------
def Interact(view=None):
"""Call this method to start interacting with a view. This method will
block till the interaction is done. This method will simply return
if the local process cannot support interactions."""
if not view:
view = active_objects.view
if not view:
raise ValueError ("view argument cannot be None")
if not view.MakeRenderWindowInteractor(False):
raise RuntimeError ("Configuration doesn't support interaction.")
paraview.print_debug_info("Staring interaction. Use 'q' to quit.")
# Views like ComparativeRenderView require that Render() is called before
# the Interaction is begun. Hence we call a Render() before start the
# interactor loop. This also avoids the case where there are pending updates
# and thus the interaction will be begun on stale datasets.
Render(view)
view.GetInteractor().Start()
# -----------------------------------------------------------------------------
def ResetCamera(view=None):
"""Resets the settings of the camera to preserver orientation but include
the whole scene. If an argument is not provided, the active view is
used."""
if not view:
view = active_objects.view
if hasattr(view, "ResetCamera"):
view.ResetCamera()
if hasattr(view, "ResetDisplay"):
view.ResetDisplay()
Render(view)
# -----------------------------------------------------------------------------
def CreateLayout(name=None):
"""Create a new layout with no active view."""
layout = servermanager.misc.ViewLayout(registrationGroup="layouts")
if name:
RenameLayout(name, layout)
return layout
# -----------------------------------------------------------------------------
def RemoveLayout(proxy=None):
"""Remove the provided layout, if none is provided,
remove the layout containing the active view.
If it is the last layout it will create a new
one with the same name as the removed one."""
pxm = servermanager.ProxyManager()
if not proxy:
proxy = GetLayout()
name = pxm.GetProxyName('layouts', proxy)
pxm.UnRegisterProxy('layouts', name, proxy)
if len(GetLayouts()) == 0:
CreateLayout(name)
# -----------------------------------------------------------------------------
def GetLayouts():
"""Returns the layout proxies on the active session.
Layout proxies are used to place views in a grid."""
return servermanager.ProxyManager().GetProxiesInGroup("layouts")
# -----------------------------------------------------------------------------
def GetLayout(view=None):
"""Return the layout containing the give view, if any.
If no view is specified, active view is used.
"""
if not view:
view = GetActiveView()
if not view:
raise RuntimeError ("No active view was found.")
layouts = GetLayouts()
for layout in layouts.values():
if layout.GetViewLocation(view) != -1:
return layout
return None
def GetLayoutByName(name):
"""Return the first layout with the given name, if any."""
layouts = GetLayouts()
for key in layouts.keys():
if key[0] == name:
return layouts.get(key)
return None
def GetViewsInLayout(layout=None):
"""Returns a list of views in the given layout. If not layout is specified,
the layout for the active view is used, if possible."""
layout = layout if layout else GetLayout()
if not layout:
raise RuntimeError ("Layout couldn't be determined. Please specify a valid layout.")
views = GetViews()
return [x for x in views if layout.GetViewLocation(x) != -1]
# -----------------------------------------------------------------------------
def RemoveViewsAndLayouts():
pxm = servermanager.ProxyManager()
layouts = pxm.GetProxiesInGroup("layouts")
for view in GetRenderViews():
Delete(view)
# Can not use regular delete for layouts
for name, id in layouts:
proxy = layouts[(name, id)]
pxm.UnRegisterProxy('layouts', name, layouts[(name, id)])
#==============================================================================
# XML State management
#==============================================================================
def LoadState(filename, connection=None, **extraArgs):
RemoveViewsAndLayouts()
pxm = servermanager.ProxyManager()
proxy = pxm.NewProxy('options', 'LoadStateOptions')
if ((proxy is not None) & proxy.PrepareToLoad(filename)):
if (proxy.HasDataFiles() and (extraArgs is not None)):
pyproxy = servermanager._getPyProxy(proxy)
SetProperties(pyproxy, **extraArgs)
proxy.Load()
# Try to set the new view active
if len(GetRenderViews()) > 0:
SetActiveView(GetRenderViews()[0])
# -----------------------------------------------------------------------------
def SaveState(filename):
servermanager.SaveState(filename)
#==============================================================================
# Representation methods
#==============================================================================
def GetRepresentation(proxy=None, view=None):
""""Given a pipeline object and view, returns the corresponding representation object.
If pipeline object and view are not specified, active objects are used."""
if not view:
view = active_objects.view
if not view:
raise ValueError ("view argument cannot be None.")
if not proxy:
proxy = active_objects.source
if not proxy:
raise ValueError ("proxy argument cannot be None.")
rep = servermanager.GetRepresentation(proxy, view)
if not rep:
controller = servermanager.ParaViewPipelineController()
return controller.Show(proxy, proxy.Port, view)
return rep
# -----------------------------------------------------------------------------
def GetDisplayProperties(proxy=None, view=None):
""""Given a pipeline object and view, returns the corresponding representation object.
If pipeline object and/or view are not specified, active objects are used."""
return GetRepresentation(proxy, view)
# -----------------------------------------------------------------------------
def Show(proxy=None, view=None, **params):
"""Turns the visibility of a given pipeline object on in the given view.
If pipeline object and/or view are not specified, active objects are used."""
if proxy == None:
proxy = GetActiveSource()
if proxy == None:
raise RuntimeError ("Show() needs a proxy argument or that an active source is set.")
if not view:
# it here's now active view, controller.Show() will create a new preferred view.
# if possible.
view = active_objects.view
controller = servermanager.ParaViewPipelineController()
rep = controller.Show(proxy, proxy.Port, view)
if rep == None:
raise RuntimeError ("Could not create a representation object for proxy %s" % proxy.GetXMLLabel())
for param in params.keys():
setattr(rep, param, params[param])
return rep
# -----------------------------------------------------------------------------
def Hide(proxy=None, view=None):
"""Turns the visibility of a given pipeline object off in the given view.
If pipeline object and/or view are not specified, active objects are used."""
if not proxy:
proxy = active_objects.source
if not view:
view = active_objects.view
if not proxy:
raise ValueError ("proxy argument cannot be None when no active source is present.")
controller = servermanager.ParaViewPipelineController()
controller.Hide(proxy, proxy.Port, view)
# -----------------------------------------------------------------------------
def HideAll(view=None):
"""Hide all pipeline sources in the given view.
If view is not specified, active view is used."""
if not view:
view = active_objects.view
controller = servermanager.ParaViewPipelineController()
controller.HideAll(view)
# -----------------------------------------------------------------------------
def SetDisplayProperties(proxy=None, view=None, **params):
"""Sets one or more display properties of the given pipeline object. If an argument
is not provided, the active source is used. Pass a list of property_name=value
pairs to this function to set property values. For example::
SetProperties(Color=[1, 0, 0], LineWidth=2)
"""
rep = GetDisplayProperties(proxy, view)
SetProperties(rep, **params)
# -----------------------------------------------------------------------------
def ColorBy(rep=None, value=None, separate=False):
"""Set scalar color. This will automatically setup the color maps and others
necessary state for the representations. 'rep' must be the display
properties proxy i.e. the value returned by GetDisplayProperties() function.
If none is provided the display properties for the active source will be
used, if possible. Set separate to True in order to use a separate color
map for this representation"""
rep = rep if rep else GetDisplayProperties()
if not rep:
raise ValueError ("No display properties can be determined.")
rep.UseSeparateColorMap = separate
association = rep.ColorArrayName.GetAssociation()
arrayname = rep.ColorArrayName.GetArrayName()
component = None
if value == None:
rep.SetScalarColoring(None, servermanager.GetAssociationFromString(association))
return
if not isinstance(value, tuple) and not isinstance(value, list):
value = (value,)
if len(value) == 1:
arrayname = value[0]
elif len(value) >= 2:
association = value[0]
arrayname = value[1]
if len(value) == 3:
# component name provided
componentName = value[2]
if componentName == "Magnitude":
component = -1
else:
if association == "POINTS":
array = rep.Input.PointData.GetArray(arrayname)
if association == "CELLS":
array = rep.Input.CellData.GetArray(arrayname)
if array:
# looking for corresponding component name
for i in range(0, array.GetNumberOfComponents()):
if componentName == array.GetComponentName(i):
component = i
break
# none have been found, try to use the name as an int
if i == array.GetNumberOfComponents() - 1:
try:
component = int(componentName)
except ValueError:
pass
if component is None:
rep.SetScalarColoring(arrayname, servermanager.GetAssociationFromString(association))
else:
rep.SetScalarColoring(arrayname, servermanager.GetAssociationFromString(association), component)
rep.RescaleTransferFunctionToDataRange()
# -----------------------------------------------------------------------------
def _DisableFirstRenderCameraReset():
"""Disable the first render camera reset. Normally a ResetCamera is called
automatically when Render is called for the first time after importing
this module."""
_funcs_internals.first_render = False
#==============================================================================
# Proxy handling methods
#==============================================================================
def SetProperties(proxy=None, **params):
"""Sets one or more properties of the given pipeline object. If an argument
is not provided, the active source is used. Pass a list of property_name=value
pairs to this function to set property values. For example::
SetProperties(Center=[1, 2, 3], Radius=3.5)
"""
if not proxy:
proxy = active_objects.source
properties = proxy.ListProperties()
for param in params.keys():
if param not in properties:
raise AttributeError("object has no property %s" % param)
proxy.SetPropertyWithName(param, params[param])
# -----------------------------------------------------------------------------
def GetProperty(*arguments, **keywords):
"""Get one property of the given pipeline object. If keywords are used,
you can set the proxy and the name of the property that you want to get
as shown in the following example::
GetProperty({proxy=sphere, name="Radius"})
If arguments are used, then you have two cases:
- if only one argument is used that argument will be
the property name.
- if two arguments are used then the first one will be
the proxy and the second one the property name.
Several example are given below::
GetProperty({name="Radius"})
GetProperty({proxy=sphereProxy, name="Radius"})
GetProperty( sphereProxy, "Radius" )
GetProperty( "Radius" )
"""
name = None
proxy = None
for key in keywords:
if key == "name":
name = keywords[key]
if key == "proxy":
proxy = keywords[key]
if len(arguments) == 1 :
name = arguments[0]
if len(arguments) == 2 :
proxy = arguments[0]
name = arguments[1]
if not name:
raise RuntimeError ("Expecting at least a property name as input. Otherwise keyword could be used to set 'proxy' and property 'name'")
if not proxy:
proxy = active_objects.source
return proxy.GetProperty(name)
# -----------------------------------------------------------------------------
def GetDisplayProperty(*arguments, **keywords):
"""Same as GetProperty, except that if no 'proxy' is passed, it will use
the active display properties, rather than the active source"""
proxy = None
name = None
for key in keywords:
if key == "name":
name = keywords[key]
if key == "proxy":
proxy = keywords[key]
if len(arguments) == 1 :
name = arguments[0]
if len(arguments) == 2 :
proxy = arguments[0]
name = arguments[1]
if not proxy:
proxy = GetDisplayProperties()
return GetProperty(proxy, name)
# -----------------------------------------------------------------------------
def GetViewProperty(*arguments, **keywords):
"""Same as GetProperty, except that if no 'proxy' is passed, it will use
the active view properties, rather than the active source"""
proxy = None
name = None
for key in keywords:
if key == "name":
name = keywords[key]
if key == "proxy":
proxy = keywords[key]
if len(arguments) == 1 :
name = arguments[0]
if len(arguments) == 2 :
proxy = arguments[0]
name = arguments[1]
if not proxy:
proxy = GetViewProperties()
return GetProperty(proxy, name)
# -----------------------------------------------------------------------------
def GetViewProperties(view=None):
""""Same as GetActiveView(), this API is provided just for consistency with
GetDisplayProperties()."""
return GetActiveView()
#==============================================================================
# ServerManager methods
#==============================================================================
def RenameProxy(proxy, group, newName):
"""Renames the given proxy."""
pxm = servermanager.ProxyManager()
oldName = pxm.GetProxyName(group, proxy)
if oldName and newName != oldName:
pxm.RegisterProxy(group, newName, proxy)
pxm.UnRegisterProxy(group, oldName, proxy)
def RenameSource(newName, proxy=None):
"""Renames the given source. If the given proxy is not registered
in the sources group this method will have no effect. If no source is
provided, the active source is used."""
if not proxy:
proxy = GetActiveSource()
RenameProxy(proxy, "sources", newName)
def RenameView(newName, proxy=None):
"""Renames the given view. If the given proxy is not registered
in the views group this method will have no effect. If no view is
provided, the active view is used."""
if not proxy:
proxy = GetActiveView()
RenameProxy(proxy, "views", newName)
def RenameLayout(newName, proxy=None):
"""Renames the given layout. If the given proxy is not registered
in the layout group this method will have no effect. If no layout is
provided, the active layout is used."""
if not proxy:
proxy = GetLayout()
RenameProxy(proxy, "layouts", newName)
# -----------------------------------------------------------------------------
def FindSource(name):
"""
Return a proxy base on the name that was used to register it
into the ProxyManager.
Example usage::
Cone(guiName='MySuperCone')
Show()
Render()
myCone = FindSource('MySuperCone')
"""
return servermanager.ProxyManager().GetProxy("sources", name)
def FindView(name):
"""
Return a view proxy on the name that was used to register it
into the ProxyManager.
Example usage::
CreateRenderView(guiName='RenderView1')
myView = FindSource('RenderView1')
"""
return servermanager.ProxyManager().GetProxy("views", name)
def GetActiveViewOrCreate(viewtype):
"""
Returns the active view, if the active view is of the given type,
otherwise creates a new view of the requested type."""
view = GetActiveView()
if view is None or view.GetXMLName() != viewtype:
view = CreateView(viewtype)
if not view:
raise RuntimeError ("Failed to create/locate the specified view")
return view
def FindViewOrCreate(name, viewtype):
"""
Returns the view, if a view with the given name exists and is of the
the given type, otherwise creates a new view of the requested type."""
view = FindView(name)
if view is None or view.GetXMLName() != viewtype:
view = CreateView(viewtype)
if not view:
raise RuntimeError ("Failed to create/locate the specified view")
return view
def LocateView(displayProperties=None):
"""
Given a displayProperties object i.e. the object returned by
GetDisplayProperties() or Show() functions, this function will locate a view
to which the displayProperties object corresponds."""
if displayProperties is None:
displayProperties = GetDisplayProperties()
if displayProperties is None:
raise ValueError ("'displayProperties' must be set")
for view in GetViews():
try:
if displayProperties in view.Representations: return view
except AttributeError:
pass
return None
# -----------------------------------------------------------------------------
def GetSources():
"""Given the name of a source, return its Python object."""
return servermanager.ProxyManager().GetProxiesInGroup("sources")
# -----------------------------------------------------------------------------
def GetRepresentations():
"""Returns all representations (display properties)."""
return servermanager.ProxyManager().GetProxiesInGroup("representations")
# -----------------------------------------------------------------------------
def UpdatePipeline(time=None, proxy=None):
"""Updates (executes) the given pipeline object for the given time as
necessary (i.e. if it did not already execute). If no source is provided,
the active source is used instead."""
if not proxy:
proxy = active_objects.source
if time:
proxy.UpdatePipeline(time)
else:
proxy.UpdatePipeline()
# -----------------------------------------------------------------------------
def Delete(proxy=None):
"""Deletes the given pipeline object or the active source if no argument
is specified."""
if not proxy:
proxy = active_objects.source
if not proxy:
raise RuntimeError ("Could not locate proxy to 'Delete'")
controller = servermanager.ParaViewPipelineController()
controller.UnRegisterProxy(proxy)
#==============================================================================
# Active Source / View / Camera / AnimationScene
#==============================================================================
def GetActiveView():
"""Returns the active view."""
return active_objects.view
# -----------------------------------------------------------------------------
def SetActiveView(view):
"""Sets the active view."""
active_objects.view = view
# -----------------------------------------------------------------------------
def GetActiveSource():
"""Returns the active source."""
return active_objects.source
# -----------------------------------------------------------------------------
def SetActiveSource(source):
"""Sets the active source."""
active_objects.source = source
# -----------------------------------------------------------------------------
def GetActiveCamera():
"""Returns the active camera for the active view. The returned object
is an instance of vtkCamera."""
return GetActiveView().GetActiveCamera()
#==============================================================================
# I/O methods
#==============================================================================
def OpenDataFile(filename, **extraArgs):
"""Creates a reader to read the give file, if possible.
This uses extension matching to determine the best reader possible.
If a reader cannot be identified, then this returns None."""
session = servermanager.ActiveConnection.Session
reader_factor = servermanager.vtkSMProxyManager.GetProxyManager().GetReaderFactory()
if reader_factor.GetNumberOfRegisteredPrototypes() == 0:
reader_factor.UpdateAvailableReaders()
first_file = filename
if type(filename) == list:
first_file = filename[0]
if not reader_factor.TestFileReadability(first_file, session):
msg = "File not readable: %s " % first_file
raise RuntimeError (msg)
if not reader_factor.CanReadFile(first_file, session):
msg = "File not readable. No reader found for '%s' " % first_file
raise RuntimeError (msg)
prototype = servermanager.ProxyManager().GetPrototypeProxy(
reader_factor.GetReaderGroup(), reader_factor.GetReaderName())
xml_name = paraview.make_name_valid(prototype.GetXMLLabel())
reader_func = _create_func(xml_name, servermanager.sources)
pname = servermanager.vtkSMCoreUtilities.GetFileNameProperty(prototype)
if pname:
extraArgs[pname] = filename
reader = reader_func(**extraArgs)
return reader
# -----------------------------------------------------------------------------
def ReloadFiles(proxy=None):
"""Forces the `proxy` to reload the data files. If no `proxy` is provided,
active source is used."""
if not proxy:
proxy = GetActiveSource()
helper = servermanager.vtkSMReaderReloadHelper()
return helper.ReloadFiles(proxy.SMProxy)
def ExtendFileSeries(proxy=None):
"""For a reader `proxy` that supports reading files series, detect any new files
added to the series and update the reader's filename property.
If no `proxy` is provided, active source is used."""
if not proxy:
proxy = GetActiveSource()
helper = servermanager.vtkSMReaderReloadHelper()
return helper.ExtendFileSeries(proxy.SMProxy)
# -----------------------------------------------------------------------------
def ImportCinema(filename, view=None):
"""Import a cinema database. This can potentially create multiple
sources/filters for visualizable objects in the Cinema database.
Returns True on success. If view is provided, then the cinema sources
are shown in that view as indicated in the database.
"""
try:
from vtkmodules.vtkPVCinemaReader import vtkSMCinemaDatabaseImporter
except ImportError:
# cinema not supported in current configuration
return False
session = servermanager.ActiveConnection.Session
importer = vtkSMCinemaDatabaseImporter()
return importer.ImportCinema(filename, session, view)
# -----------------------------------------------------------------------------
def CreateWriter(filename, proxy=None, **extraArgs):
"""Creates a writer that can write the data produced by the source proxy in
the given file format (identified by the extension). If no source is
provided, then the active source is used. This doesn't actually write the
data, it simply creates the writer and returns it."""
if not filename:
raise RuntimeError ("filename must be specified")
session = servermanager.ActiveConnection.Session
writer_factory = servermanager.vtkSMProxyManager.GetProxyManager().GetWriterFactory()
if writer_factory.GetNumberOfRegisteredPrototypes() == 0:
writer_factory.UpdateAvailableWriters()
if not proxy:
proxy = GetActiveSource()
if not proxy:
raise RuntimeError ("Could not locate source to write")
writer_proxy = writer_factory.CreateWriter(filename, proxy.SMProxy, proxy.Port)
writer_proxy.UnRegister(None)
pyproxy = servermanager._getPyProxy(writer_proxy)
if pyproxy and extraArgs:
SetProperties(pyproxy, **extraArgs)
return pyproxy
def SaveData(filename, proxy=None, **extraArgs):
"""Save data produced by 'proxy' in a file. If no proxy is specified the
active source is used. Properties to configure the writer can be passed in
as keyword arguments. Example usage::
SaveData("sample.pvtp", source0)
SaveData("sample.csv", FieldAssociation="Points")
"""
writer = CreateWriter(filename, proxy, **extraArgs)
if not writer:
raise RuntimeError ("Could not create writer for specified file or data type")
writer.UpdateVTKObjects()
writer.UpdatePipeline()
del writer
# -----------------------------------------------------------------------------
def WriteImage(filename, view=None, **params):
"""::deprecated:: 4.2
Use :func:`SaveScreenshot` instead.
"""
if not view:
view = active_objects.view
writer = None
if 'Writer' in params:
writer = params['Writer']
mag = 1
if 'Magnification' in params:
mag = int(params['Magnification'])
if not writer:
writer = _find_writer(filename)
view.WriteImage(filename, writer, mag)
# -----------------------------------------------------------------------------
def _SaveScreenshotLegacy(filename,
view=None, layout=None, magnification=None, quality=None, **params):
if view is not None and layout is not None:
raise ValueError ("both view and layout cannot be specified")
viewOrLayout = view if view else layout
viewOrLayout = viewOrLayout if viewOrLayout else GetActiveView()
if not viewOrLayout:
raise ValueError ("view or layout needs to be specified")
try:
magnification = int(magnification) if int(magnification) > 0 else 1
except TypeError:
magnification = 1
try:
quality = int(quality)
except TypeError:
quality = -1
# convert magnification to image resolution.
if viewOrLayout.IsA("vtkSMViewProxy"):
size = viewOrLayout.ViewSize
else:
assert(viewOrLayout.IsA("vtkSMViewLayoutProxy"))
exts = [0] * 4
viewOrLayout.GetLayoutExtent(exts)
size = [exts[1]-exts[0]+1, exts[3]-exts[2]+1]
imageResolution = (size[0]*magnification, size[1]*magnification)
# convert quality to ImageQuality
imageQuality = quality
# now, call the new API
return SaveScreenshot(filename, viewOrLayout,
ImageResolution=imageResolution,
ImageQuality=imageQuality)
def SaveScreenshot(filename, viewOrLayout=None, **params):
"""Save screenshot for a view or layout (collection of views) to an image.
`SaveScreenshot` is used to save the rendering results to an image.
**Parameters**
filename (str)
Name of the image file to save to. The filename extension is used to
determine the type of image file generated. Supported extensions are
`png`, `jpg`, `tif`, `bmp`, and `ppm`.
viewOrLayout (``proxy``, optional):
The view or layout to save image from, defaults to None. If None, then
the active view is used, if available. To save image from a single
view, this must be set to a view, to save an image from all views in a
layout, pass the layout.
**Keyword Parameters (optional)**
ImageResolution (tuple(int, int))
A 2-tuple to specify the output image resolution in pixels as
`(width, height)`. If not specified, the view (or layout) size is
used.
FontScaling (str)
Specify whether to scale fonts proportionally (`"Scale fonts
proportionally"`) or not (`"Do not scale fonts"`). Defaults to
`"Scale fonts proportionally"`.
SeparatorWidth (int)
When saving multiple views in a layout, specify the width (in
approximate pixels) for a separator between views in the generated
image.
SeparatorColor (tuple(float, float, float))
Specify the color for separator between views, if applicable.
OverrideColorPalette (:obj:str, optional)
Name of the color palette to use, if any. If none specified, current
color palette remains unchanged.
StereoMode (str)
Stereo mode to use, if any. Available values are `"No stereo"`,
`"Red-Blue"`, `"Interlaced"`, `"Left Eye Only"`, `"Right Eye Only"`,
`"Dresden"`, `"Anaglyph"`, `"Checkerboard"`,
`"Side-by-Side Horizontal"`, and the default `"No change"`.
TransparentBackground (int)
Set to 1 (or True) to save an image with background set to alpha=0, if
supported by the output image format.
In addition, several format-specific keyword parameters can be specified.
The format is chosen based on the file extension.
For JPEG (`*.jpg`), the following parameters are available (optional)
Quality (int) [0, 100]
Specify the JPEG compression quality. `O` is low quality (maximum compression)
and `100` is high quality (least compression).
Progressive (int):
Set to 1 (or True) to save progressive JPEG.
For PNG (`*.png`), the following parameters are available (optional)
CompressionLevel (int) [0, 9]
Specify the *zlib* compression level. `0` is no compression, while `9` is
maximum compression.
**Legacy Parameters**
Prior to ParaView version 5.4, the following parameters were available
and are still supported. However, they cannot be used together with
other keyword parameters documented earlier.
view (proxy)
Single view to save image from.
layout (proxy)
Layout to save image from.
magnification (int)
Magnification factor to use to save the output image. The current view
(or layout) size is scaled by the magnification factor provided.
quality (int)
Output image quality, a number in the range [0, 100].
ImageQuality (int)
For ParaView 5.4, the following parameters were available, however
it is ignored starting with ParaView 5.5. Instead, it is recommended
to use format-specific quality parameters based on the file format being used.
"""
# Let's handle backwards compatibility.
# Previous API for this method took the following arguments:
# SaveScreenshot(filename, view=None, layout=None, magnification=None, quality=None)
# If we notice any of the old arguments, call legacy method.
if "view" in params or "layout" in params or \
"magnification" in params or \
"quality" in params:
# since in previous variant, view was a positional param,
# we handle that too.
if "view" in params:
view = params.get("view")
del params["view"]
else:
view = viewOrLayout
return _SaveScreenshotLegacy(filename, view=view, **params)
# use active view if no view or layout is specified.
viewOrLayout = viewOrLayout if viewOrLayout else GetActiveView()
if not viewOrLayout:
raise ValueError("A view or layout must be specified.")
controller = servermanager.ParaViewPipelineController()
options = servermanager.misc.SaveScreenshot()
controller.PreInitializeProxy(options)
options.Layout = viewOrLayout if viewOrLayout.IsA("vtkSMViewLayoutProxy") else None
options.View = viewOrLayout if viewOrLayout.IsA("vtkSMViewProxy") else None
options.SaveAllViews = True if viewOrLayout.IsA("vtkSMViewLayoutProxy") else False
# this will choose the correct format.
options.UpdateDefaultsAndVisibilities(filename)
controller.PostInitializeProxy(options)
# explicitly process format properties.
formatProxy = options.Format
formatProperties = formatProxy.ListProperties()
for prop in formatProperties:
if prop in params:
formatProxy.SetPropertyWithName(prop, params[prop])
del params[prop]
if "ImageQuality" in params:
import warnings
warnings.warn("'ImageQuality' is deprecated and will be ignored.", DeprecationWarning)
del params["ImageQuality"]
SetProperties(options, **params)
return options.WriteImage(filename)
# -----------------------------------------------------------------------------
def SaveAnimation(filename, viewOrLayout=None, scene=None, **params):
"""Save animation as a movie file or series of images.
`SaveAnimation` is used to save an animation as a movie file (avi or ogv) or
a series of images.
**Parameters**
filename (str)
Name of the output file. The extension is used to determine the type
of the output. Supported extensions are `png`, `jpg`, `tif`, `bmp`,
and `ppm`. Based on platform (and build) configuration, `avi` and
`ogv` may be supported as well.
viewOrLayout (``proxy``, optional)
The view or layout to save image from, defaults to None. If None, then
the active view is used, if available. To save image from a single
view, this must be set to a view, to save an image from all views in a
layout, pass the layout.
scene (``proxy``, optional)
Animation scene to save. If None, then the active scene returned by
`GetAnimationScene` is used.
**Keyword Parameters (optional)**
`SaveAnimation` supports all keyword parameters supported by
`SaveScreenshot`. In addition, the following parameters are supported:
FrameRate (int):
Frame rate in frames per second for the output. This only affects the
output when generated movies (`avi` or `ogv`), and not when saving the
animation out as a series of images.
FrameWindow (tuple(int,int))
To save a part of the animation, provide the range in frames or
timesteps index.
In addition, several format-specific keyword parameters can be specified.
The format is chosen based on the file extension.
For Image-based file-formats that save series of images e.g. PNG, JPEG,
following parameters are available.
SuffixFormat (string):
Format string used to convert the frame number to file name suffix.
FFMPEG avi file format supports following parameters.
Compression (int)
Set to 1 or True to enable compression.
Quality:
When compression is 1 (or True), this specifies the compression
quality. `0` is worst quality (smallest file size) and `2` is best
quality (largest file size).
VideoForWindows (VFW) avi file format supports following parameters.
Quality:
This specifies the compression quality. `0` is worst quality
(smallest file size) and `2` is best quality (largest file size).
OGG/Theora file format supports following parameters.
Quality:
This specifies the compression quality. `0` is worst quality
(smallest file size) and `2` is best quality (largest file size).
UseSubsampling:
When set to 1 (or True), the video will be encoded uisng 4:2:0
subsampling for the color channels.
**Obsolete Parameters**
DisconnectAndSave (int):
This mode is no longer supported as of ParaView 5.5, and will be
ignored.
ImageQuality (int)
For ParaView 5.4, the following parameters were available, however
it is ignored starting with ParaView 5.5. Instead, it is recommended
to use format-specific quality parameters based on the file format being used.
"""
# use active view if no view or layout is specified.
viewOrLayout = viewOrLayout if viewOrLayout else GetActiveView()
if not viewOrLayout:
raise ValueError("A view or layout must be specified.")
scene = scene if scene else GetAnimationScene()
if not scene:
raise RuntimeError("Missing animation scene.")
if "DisconnectAndSave" in params:
import warnings
warnings.warn("'DisconnectAndSave' is deprecated and will be ignored.", DeprecationWarning)
del params["DisconnectAndSave"]
controller = servermanager.ParaViewPipelineController()
options = servermanager.misc.SaveAnimation()
controller.PreInitializeProxy(options)
options.AnimationScene = scene
options.Layout = viewOrLayout if viewOrLayout.IsA("vtkSMViewLayoutProxy") else None
options.View = viewOrLayout if viewOrLayout.IsA("vtkSMViewProxy") else None
options.SaveAllViews = True if viewOrLayout.IsA("vtkSMViewLayoutProxy") else False
# this will choose the correct format.
options.UpdateDefaultsAndVisibilities(filename)
controller.PostInitializeProxy(options)
# explicitly process format properties.
formatProxy = options.Format
formatProperties = formatProxy.ListProperties()
for prop in formatProperties:
if prop in params:
formatProxy.SetPropertyWithName(prop, params[prop])
del params[prop]
if "ImageQuality" in params:
import warnings
warnings.warn("'ImageQuality' is deprecated and will be ignored.", DeprecationWarning)
del params["ImageQuality"]
SetProperties(options, **params)
return options.WriteAnimation(filename)
def WriteAnimation(filename, **params):
"""
::deprecated:: 5.3
Use :func:`SaveAnimation` instead.
This function can still be used to save an animation, but using
:func: `SaveAnimation` is strongly recommended as it provides more
flexibility.
The following parameters are currently supported.
**Parameters**
filename (str)
Name of the output file.
**Keyword Parameters (optional)**
Magnification (int):
Magnification factor for the saved animation.
Quality (int)
int in range [0,2].
FrameRate (int)
Frame rate.
The following parameters are no longer supported and are ignored:
Subsampling, BackgroundColor, FrameRate, StartFileCount, PlaybackTimeWindow
"""
newparams = {}
# this method simply tries to provide legacy behavior.
scene = GetAnimationScene()
newparams["scene"] = scene
# previously, scene saved all views and only worked well if there was 1
# layout, so do that.
layout = GetLayout()
newparams["viewOrLayout"] = layout
if "Magnification" in params:
magnification = params["Magnification"]
exts = [0] * 4
layout.GetLayoutExtent(exts)
size = [exts[1]-exts[0]+1, exts[3]-exts[2]+1]
imageResolution = (size[0]*magnification, size[1]*magnification)
newparams["ImageResolution"] = imageResolution
if "Quality" in params:
# convert quality (0=worst, 2=best) to imageQuality (0 = worst, 100 = best)
quality = int(params["Quality"])
imageQuality = int(100 * quality/2.0)
newparams["ImageQuality"] = imageQuality
if "FrameRate" in params:
newparams["FrameRate"] = int(params["FrameRate"])
return SaveAnimation(filename, **newparams)
def WriteAnimationGeometry(filename, view=None):
"""Save the animation geometry from a specific view to a file specified.
The animation geometry is written out as a PVD file. If no view is
specified, the active view will be used of possible."""
view = view if view else GetActiveView()
if not view:
raise ValueError ("Please specify the view to use")
scene = GetAnimationScene()
writer = servermanager.vtkSMAnimationSceneGeometryWriter()
writer.SetFileName(filename)
writer.SetAnimationScene(scene.SMProxy)
writer.SetViewModule(view.SMProxy)
writer.Save()
#==============================================================================
# Lookup Table / Scalarbar methods
#==============================================================================
# -----------------------------------------------------------------------------
def HideUnusedScalarBars(view=None):
"""Hides all unused scalar bars from the view. A scalar bar is used if some
data is shown in that view that is coloring using the transfer function
shown by the scalar bar."""
if not view:
view = active_objects.view
if not view:
raise ValueError ("'view' argument cannot be None with no active is present.")
tfmgr = servermanager.vtkSMTransferFunctionManager()
return tfmgr.UpdateScalarBars(view.SMProxy, tfmgr.HIDE_UNUSED_SCALAR_BARS)
def HideScalarBarIfNotNeeded(lut, view=None):
"""Hides the given scalar bar if it is not used by any of the displayed data."""
if not view:
view = active_objects.view
if not view:
raise ValueError ("'view' argument cannot be None with no active present.")
tfmgr = servermanager.vtkSMTransferFunctionManager()
return tfmgr.HideScalarBarIfNotNeeded(lut.SMProxy, view.SMProxy)
def UpdateScalarBars(view=None):
"""Hides all unused scalar bar and shows used scalar bars. A scalar bar is used
if some data is shown in that view that is coloring using the transfer function
shown by the scalar bar."""
if not view:
view = active_objects.view
if not view:
raise ValueError ("'view' argument cannot be None with no active is present.")
tfmgr = servermanager.vtkSMTransferFunctionManager()
return tfmgr.UpdateScalarBars(view.SMProxy, tfmgr.HIDE_UNUSED_SCALAR_BARS | tfmgr.SHOW_USED_SCALAR_BARS)
def UpdateScalarBarsComponentTitle(ctf, representation=None):
"""Update all scalar bars using the provided lookup table. The representation is used to recover
the array from which the component title was obtained. If None is provided the representation
of the active source in the active view is used."""
if not representation:
view = active_objects.view
proxy = active_objects.source
if not view:
raise ValueError ("'representation' argument cannot be None with no active view.")
if not proxy:
raise ValueError ("'representation' argument cannot be None with no active source.")
representation = GetRepresentation(view, proxy)
tfmgr = servermanager.vtkSMTransferFunctionManager()
return tfmgr.UpdateScalarBarsComponentTitle(ctf.SMProxy, representation.SMProxy)
def GetScalarBar(ctf, view=None):
"""Returns the scalar bar for color transfer function in the given view.
If view is None, the active view will be used, if possible.
This will either return an existing scalar bar or create a new one."""
view = view if view else active_objects.view
if not view:
raise ValueError ("'view' argument cannot be None when no active view is present")
tfmgr = servermanager.vtkSMTransferFunctionManager()
sb = servermanager._getPyProxy(\
tfmgr.GetScalarBarRepresentation(ctf.SMProxy, view.SMProxy))
return sb
# -----------------------------------------------------------------------------
def GetColorTransferFunction(arrayname, representation=None, separate=False, **params):
"""Get the color transfer function used to mapping a data array with the
given name to colors. Representation is used to modify the array name
when using a separate color transfer function. separate can be used to recover
the separate color transfer function even if it is not used currently by the representation.
This may create a new color transfer function if none exists, or return an existing one"""
if representation:
if separate or representation.UseSeparateColorMap:
arrayname = ("%s%s_%s" % ("Separate_", representation.SMProxy.GetGlobalIDAsString(), arrayname))
if not servermanager.ActiveConnection:
raise RuntimeError ("Missing active session")
session = servermanager.ActiveConnection.Session
tfmgr = servermanager.vtkSMTransferFunctionManager()
lut = servermanager._getPyProxy(\
tfmgr.GetColorTransferFunction(arrayname, session.GetSessionProxyManager()))
SetProperties(lut, **params)
return lut
def GetOpacityTransferFunction(arrayname, representation=None, separate=False, **params):
"""Get the opacity transfer function used to mapping a data array with the
given name to opacity. Representation is used to modify the array name
when using a separate opacity transfer function. separate can be used to recover
the separate opacity transfer function even if it is not used currently by the representation.
This may create a new opacity transfer function if none exists, or return an existing one"""
if representation:
if separate or representation.UseSeparateColorMap:
arrayname = ("%s%s_%s" % ("Separate_", representation.SMProxy.GetGlobalIDAsString(), arrayname))
if not servermanager.ActiveConnection:
raise RuntimeError ("Missing active session")
session = servermanager.ActiveConnection.Session
tfmgr = servermanager.vtkSMTransferFunctionManager()
otf = servermanager._getPyProxy(\
tfmgr.GetOpacityTransferFunction(arrayname, session.GetSessionProxyManager()))
SetProperties(otf, **params)
return otf
# -----------------------------------------------------------------------------
def ImportPresets(filename):
"""Import presets from a file. The file can be in the legacy color map xml
format or in the new JSON format. Returns True on success."""
presets = servermanager.vtkSMTransferFunctionPresets()
return presets.ImportPresets(filename)
# -----------------------------------------------------------------------------
def CreateLookupTable(**params):
"""Create and return a lookup table. Optionally, parameters can be given
to assign to the lookup table.
"""
lt = servermanager.rendering.PVLookupTable()
controller = servermanager.ParaViewPipelineController()
controller.InitializeProxy(lt)
SetProperties(lt, **params)
controller.RegisterColorTransferFunctionProxy(lt)
return lt
# -----------------------------------------------------------------------------
def CreatePiecewiseFunction(**params):
"""Create and return a piecewise function. Optionally, parameters can be
given to assign to the piecewise function.
"""
pfunc = servermanager.piecewise_functions.PiecewiseFunction()
controller = servermanager.ParaViewPipelineController()
controller.InitializeProxy(pfunc)
SetProperties(pfunc, **params)
controller.RegisterOpacityTransferFunction(pfunc)
return pfunc
# -----------------------------------------------------------------------------
def GetLookupTableForArray(arrayname, num_components, **params):
"""Used to get an existing lookuptable for a array or to create one if none
exists. Keyword arguments can be passed in to initialize the LUT if a new
one is created.
*** DEPRECATED ***: Use GetColorTransferFunction instead"""
return GetColorTransferFunction(arrayname, **params)
# global lookup table reader instance
# the user can use the simple api below
# rather than creating a lut reader themself
_lutReader = None
def _GetLUTReaderInstance():
""" Internal api. Return the lookup table reader singleton. Create
it if needed."""
global _lutReader
if _lutReader is None:
import lookuptable
_lutReader = lookuptable.vtkPVLUTReader()
return _lutReader
# -----------------------------------------------------------------------------
def AssignLookupTable(arrayObject, LUTName, rangeOveride=[]):
"""Assign a lookup table to an array by lookup table name. The array
may ber obtained from a ParaView source in it's point or cell data.
The lookup tables available in ParaView's GUI are loaded by default.
To get a list of the available lookup table names see GetLookupTableNames.
To load a custom lookup table see LoadLookupTable."""
return _GetLUTReaderInstance().GetLUT(arrayObject, LUTName, rangeOveride)
# -----------------------------------------------------------------------------
def GetLookupTableNames():
"""Return a list containing the currently available lookup table names.
A name maybe used to assign a lookup table to an array. See
AssignLookupTable.
"""
return _GetLUTReaderInstance().GetLUTNames()
# -----------------------------------------------------------------------------
def LoadLookupTable(fileName):
"""Read the lookup tables in the named file and append them to the
global collection of lookup tables. The newly loaded lookup tables
may then be used with AssignLookupTable function.
"""
return _GetLUTReaderInstance().Read(fileName)
# -----------------------------------------------------------------------------
def CreateScalarBar(**params):
"""Create and return a scalar bar widget. The returned widget may
be added to a render view by appending it to the view's representations
The widget must have a valid lookup table before it is added to a view.
It is possible to pass the lookup table (and other properties) as arguments
to this method::
lt = MakeBlueToRedLt(3.5, 7.5)
bar = CreateScalarBar(LookupTable=lt, Title="Velocity")
GetRenderView().Representations.append(bar)
By default the returned widget is selectable and resizable.
"""
sb = servermanager.rendering.ScalarBarWidgetRepresentation()
servermanager.Register(sb)
sb.Selectable = 1
sb.Resizable = 1
sb.Enabled = 1
sb.Title = "Scalars"
SetProperties(sb, **params)
return sb
# -----------------------------------------------------------------------------
# TODO: Change this to take the array name and number of components. Register
# the lt under the name ncomp.array_name
def MakeBlueToRedLT(min, max):
"""
Create a LookupTable that go from blue to red using the scalar range
provided by the min and max arguments.
"""
# Define RGB points. These are tuples of 4 values. First one is
# the scalar values, the other 3 the RGB values.
rgbPoints = [min, 0, 0, 1, max, 1, 0, 0]
return CreateLookupTable(RGBPoints=rgbPoints, ColorSpace="HSV")
#==============================================================================
# CameraLink methods
#==============================================================================
def AddCameraLink(viewProxy, viewProxyOther, linkName):
"""Create a camera link between two view proxies. A name must be given
so that the link can be referred to by name. If a link with the given
name already exists it will be removed first."""
if not viewProxyOther: viewProxyOther = GetActiveView()
link = servermanager.vtkSMCameraLink()
link.AddLinkedProxy(viewProxy.SMProxy, 1)
link.AddLinkedProxy(viewProxyOther.SMProxy, 2)
link.AddLinkedProxy(viewProxyOther.SMProxy, 1)
link.AddLinkedProxy(viewProxy.SMProxy, 2)
RemoveCameraLink(linkName)
servermanager.ProxyManager().RegisterLink(linkName, link)
# -----------------------------------------------------------------------------
def RemoveCameraLink(linkName):
"""Remove a camera link with the given name."""
servermanager.ProxyManager().UnRegisterLink(linkName)
#==============================================================================
# Animation methods
#==============================================================================
def GetTimeKeeper():
"""Returns the time-keeper for the active session. Timekeeper is often used
to manage time step information known to the ParaView application."""
if not servermanager.ActiveConnection:
raise RuntimeError ("Missing active session")
session = servermanager.ActiveConnection.Session
controller = servermanager.ParaViewPipelineController()
return controller.FindTimeKeeper(session)
def GetAnimationScene():
"""Returns the application-wide animation scene. ParaView has only one
global animation scene. This method provides access to that. Users are
free to create additional animation scenes directly, but those scenes
won't be shown in the ParaView GUI."""
if not servermanager.ActiveConnection:
raise RuntimeError ("Missing active session")
session = servermanager.ActiveConnection.Session
controller = servermanager.ParaViewPipelineController()
return controller.GetAnimationScene(session)
# -----------------------------------------------------------------------------
def AnimateReader(reader=None, view=None, filename=None):
"""This is a utility function that, given a reader and a view
animates over all time steps of the reader. If the optional
filename is provided, a movie is created (type depends on the
extension of the filename."""
if not reader:
reader = active_objects.source
if not view:
view = active_objects.view
return servermanager.AnimateReader(reader, view, filename)
# -----------------------------------------------------------------------------
def _GetRepresentationAnimationHelper(sourceproxy):
"""Internal method that returns the representation animation helper for a
source proxy. It creates a new one if none exists."""
# ascertain that proxy is a source proxy
if not sourceproxy in GetSources().values():
return None
for proxy in servermanager.ProxyManager():
if proxy.GetXMLName() == "RepresentationAnimationHelper" and\
proxy.GetProperty("Source").IsProxyAdded(sourceproxy.SMProxy):
return proxy
# helper must have been created during RegisterPipelineProxy().
return None
# -----------------------------------------------------------------------------
def GetAnimationTrack(propertyname_or_property, index=None, proxy=None):
"""Returns an animation cue for the property. If one doesn't exist then a
new one will be created.
Typical usage::
track = GetAnimationTrack("Center", 0, sphere) or
track = GetAnimationTrack(sphere.GetProperty("Radius")) or
# this returns the track to animate visibility of the active source in
# all views.
track = GetAnimationTrack("Visibility")
For animating properties on implicit planes etc., use the following
signatures::
track = GetAnimationTrack(slice.SliceType.GetProperty("Origin"), 0) or
track = GetAnimationTrack("Origin", 0, slice.SliceType)
"""
if not proxy:
proxy = GetActiveSource()
if not isinstance(proxy, servermanager.Proxy):
raise TypeError ("proxy must be a servermanager.Proxy instance")
if isinstance(propertyname_or_property, str):
propertyname = propertyname_or_property
elif isinstance(propertyname_or_property, servermanager.Property):
prop = propertyname_or_property
propertyname = prop.Name
proxy = prop.Proxy
else:
raise TypeError ("propertyname_or_property must be a string or servermanager.Property")
# To handle the case where the property is actually a "display" property, in
# which case we are actually animating the "RepresentationAnimationHelper"
# associated with the source.
if propertyname in ["Visibility", "Opacity"]:
proxy = _GetRepresentationAnimationHelper(proxy)
if not proxy or not proxy.GetProperty(propertyname):
raise AttributeError ("Failed to locate property %s" % propertyname)
scene = GetAnimationScene()
for cue in scene.Cues:
try:
if cue.AnimatedProxy == proxy and\
cue.AnimatedPropertyName == propertyname:
if index == None or index == cue.AnimatedElement:
return cue
except AttributeError:
pass
# matching animation track wasn't found, create a new one.
cue = KeyFrameAnimationCue()
cue.AnimatedProxy = proxy
cue.AnimatedPropertyName = propertyname
if index != None:
cue.AnimatedElement = index
scene.Cues.append(cue)
return cue
# -----------------------------------------------------------------------------
def GetCameraTrack(view=None):
"""Returns the camera animation track for the given view. If no view is
specified, active view will be used. If no existing camera animation track
is found, a new one will be created."""
if not view:
view = GetActiveView()
if not view:
raise ValueError ("No view specified")
scene = GetAnimationScene()
for cue in scene.Cues:
if cue.AnimatedProxy == view and\
cue.GetXMLName() == "CameraAnimationCue":
return cue
# no cue was found, create a new one.
cue = CameraAnimationCue()
cue.AnimatedProxy = view
scene.Cues.append(cue)
return cue
# -----------------------------------------------------------------------------
def GetTimeTrack():
"""Returns the animation track used to control the time requested from all
readers/filters during playback.
This is the "TimeKeeper - Time" track shown in ParaView's 'Animation View'."""
scene = GetAnimationScene()
if not scene:
raise RuntimeError ("Missing animation scene")
controller = servermanager.ParaViewPipelineController()
return controller.GetTimeAnimationTrack(scene)
#==============================================================================
# Plugin Management
#==============================================================================
def LoadXML(xmlstring, ns=None):
"""Given a server manager XML as a string, parse and process it.
If you loaded the simple module with ``from paraview.simple import *``,
make sure to pass ``globals()`` as the second arguments::
LoadXML(xmlstring, globals())
Otherwise, the new functions will not appear in the global namespace."""
if not ns:
ns = globals()
servermanager.LoadXML(xmlstring)
_add_functions(ns)
# -----------------------------------------------------------------------------
def LoadPlugin(filename, remote=True, ns=None):
"""Loads a ParaView plugin and updates this module with new constructors
if any. The remote argument (default to ``True``) is to specify whether
the plugin will be loaded on client (``remote=False``) or on server
(``remote=True``).
If you loaded the simple module with ``from paraview.simple import *``,
make sure to pass ``globals()`` as an argument::
LoadPlugin("myplugin", False, globals()) # to load on client
LoadPlugin("myplugin", True, globals()) # to load on server
LoadPlugin("myplugin", ns=globals()) # to load on server
Otherwise, the new functions will not appear in the global namespace."""
if not ns:
ns = globals()
servermanager.LoadPlugin(filename, remote)
_add_functions(ns)
# -----------------------------------------------------------------------------
def LoadDistributedPlugin(pluginname, remote=True, ns=None):
"""Loads a plugin that's distributed with the executable. This uses the
information known about plugins distributed with ParaView to locate the
shared library for the plugin to load. Raises a RuntimeError if the plugin
was not found."""
if not servermanager.ActiveConnection:
raise RuntimeError ("Cannot load a plugin without a session.")
plm = servermanager.vtkSMProxyManager.GetProxyManager().GetPluginManager()
if remote:
session = servermanager.ActiveConnection.Session
info = plm.GetRemoteInformation(session)
else:
info = plm.GetLocalInformation()
for cc in range(0, info.GetNumberOfPlugins()):
if info.GetPluginName(cc) == pluginname:
return LoadPlugin(info.GetPluginFileName(cc), remote, ns)
raise RuntimeError ("Plugin '%s' not found" % pluginname)
#==============================================================================
# Custom Filters Management
#==============================================================================
def LoadCustomFilters(filename, ns=None):
"""Loads a custom filter XML file and updates this module with new
constructors if any. If you loaded the simple module with
``from paraview.simple import *``, make sure to pass ``globals()`` as an
argument.
"""
servermanager.ProxyManager().SMProxyManager.LoadCustomProxyDefinitions(filename)
if not ns:
ns = globals()
_add_functions(ns)
#==============================================================================
# Selection Management
#==============================================================================
def _select(seltype, query=None, proxy=None):
if not proxy:
proxy = GetActiveSource()
if not proxy:
raise RuntimeError ("No active source was found.")
if not query:
# This ends up being true for all cells.
query = "id >= 0"
# Note, selSource is not registered with the proxy manager.
selSource = servermanager.sources.SelectionQuerySource()
selSource.FieldType = seltype
selSource.QueryString = str(query)
proxy.SMProxy.SetSelectionInput(proxy.Port, selSource.SMProxy, 0)
return selSource
# -----------------------------------------------------------------------------
def SelectCells(query=None, proxy=None):
"""Select cells satisfying the query. If query is None, then all cells are
selected. If proxy is None, then the active source is used."""
return _select("CELL", query, proxy)
# -----------------------------------------------------------------------------
def SelectPoints(query=None, proxy=None):
"""Select points satisfying the query. If query is None, then all points are
selected. If proxy is None, then the active source is used."""
return _select("POINT", query, proxy)
# -----------------------------------------------------------------------------
def ClearSelection(proxy=None):
"""Clears the selection on the active source."""
if not proxy:
proxy = GetActiveSource()
if not proxy:
raise RuntimeError ("No active source was found.")
proxy.SMProxy.SetSelectionInput(proxy.Port, None, 0)
#==============================================================================
# Dynamic lights.
#==============================================================================
def CreateLight():
"""Makes a new vtkLight, unattached to a view."""
pxm = servermanager.ProxyManager()
lightproxy = pxm.NewProxy("additional_lights", "Light")
controller = servermanager.ParaViewPipelineController()
controller.SMController.RegisterLightProxy(lightproxy, None)
return servermanager._getPyProxy(lightproxy)
def AddLight(view=None):
"""Makes a new vtkLight and adds it to the designated or active view."""
view = view if view else GetActiveView()
if not view:
raise ValueError ("No 'view' was provided and no active view was found.")
if view.IsA("vtkSMRenderViewProxy") is False:
return
lightproxy = CreateLight()
nowlights = [l for l in view.AdditionalLights]
nowlights.append(lightproxy)
view.AdditionalLights = nowlights
# This is not the same as returning lightProxy
return GetLight(len(view.AdditionalLights) - 1, view)
def RemoveLight(light):
"""Removes an existing vtkLight from its view."""
if not light:
raise ValueError ("No 'light' was provided.")
view = GetViewForLight(light)
if view:
if (not view.IsA("vtkSMRenderViewProxy")) or (len(view.AdditionalLights) < 1):
raise RuntimeError ("View retrieved inconsistent with owning a 'light'.")
nowlights = [l for l in view.AdditionalLights if l != light]
view.AdditionalLights = nowlights
controller = servermanager.ParaViewPipelineController()
controller.SMController.UnRegisterProxy(light.SMProxy)
def GetLight(number, view=None):
"""Get a handle on a previously added light"""
if not view:
view = active_objects.view
numlights = len(view.AdditionalLights)
if numlights < 1 or number < 0 or number >= numlights:
return
return view.AdditionalLights[number]
def GetViewForLight(proxy):
"""Given a light proxy, find which view it belongs to"""
# search current views for this light.
for cc in range(proxy.GetNumberOfConsumers()):
consumer = proxy.GetConsumerProxy(cc)
consumer = consumer.GetTrueParentProxy()
if consumer.IsA("vtkSMRenderViewProxy") and proxy in consumer.AdditionalLights:
return consumer
return None
#==============================================================================
# Materials.
#==============================================================================
def GetMaterialLibrary():
"""Returns the material library for the active session. """
if not servermanager.ActiveConnection:
raise RuntimeError ("Missing active session")
session = servermanager.ActiveConnection.Session
controller = servermanager.ParaViewPipelineController()
return controller.FindMaterialLibrary(session)
#==============================================================================
# Miscellaneous functions.
#==============================================================================
def Show3DWidgets(proxy=None):
"""If possible in the current environment, this method will
request the application to show the 3D widget(s) for proxy"""
proxy = proxy if proxy else GetActiveSource()
if not proxy:
raise ValueError ("No 'proxy' was provided and no active source was found.")
_Invoke3DWidgetUserEvent(proxy, "ShowWidget")
def Hide3DWidgets(proxy=None):
"""If possible in the current environment, this method will
request the application to hide the 3D widget(s) for proxy"""
proxy = proxy if proxy else GetActiveSource()
if not proxy:
raise ValueError ("No 'proxy' was provided and no active source was found.")
_Invoke3DWidgetUserEvent(proxy, "HideWidget")
def _Invoke3DWidgetUserEvent(proxy, event):
"""Internal method used by Show3DWidgets/Hide3DWidgets"""
if proxy:
proxy.InvokeEvent('UserEvent', event)
# Since in 5.0 and earlier, Show3DWidgets/Hide3DWidgets was called with the
# proxy being the filter proxy (eg. Clip) and not the proxy that has the
# widget i.e. (Clip.ClipType), we explicitly handle it by iterating of
# proxy list properties and then invoking the event on their value proxies
# too.
for smproperty in proxy:
if smproperty.FindDomain("vtkSMProxyListDomain"):
_Invoke3DWidgetUserEvent(smproperty.GetData(), event)
def ExportView(filename, view=None, **params):
"""Export a view to the specified output file."""
view = view if view else GetActiveView()
if not view:
raise ValueError ("No 'view' was provided and no active view was found.")
if not filename:
raise ValueError ("No filename specified")
# ensure that the view is up-to-date.
view.StillRender()
helper = servermanager.vtkSMViewExportHelper()
proxy = helper.CreateExporter(filename, view.SMProxy)
if not proxy:
raise RuntimeError ("Failed to create exporter for ", filename)
proxy.UnRegister(None)
proxy = servermanager._getPyProxy(proxy)
SetProperties(proxy, **params)
proxy.Write()
del proxy
del helper
def ResetProperty(propertyName, proxy=None, restoreFromSettings=True):
if proxy == None:
proxy = GetActiveSource()
propertyToReset = proxy.SMProxy.GetProperty(propertyName)
if propertyToReset != None:
propertyToReset.ResetToDefault()
if restoreFromSettings:
settings = paraview.servermanager.vtkSMSettings.GetInstance()
settings.GetPropertySetting(propertyToReset)
proxy.SMProxy.UpdateVTKObjects()
#==============================================================================
# Usage and demo code set
#==============================================================================
def demo1():
"""
Simple demo that create the following pipeline::
sphere - shrink +
cone + > append
"""
# Create a sphere of radius = 2, theta res. = 32
# This object becomes the active source.
ss = Sphere(Radius=2, ThetaResolution=32)
# Apply the shrink filter. The Input property is optional. If Input
# is not specified, the filter is applied to the active source.
shr = Shrink(Input=ss)
# Create a cone source.
cs = Cone()
# Append cone and shrink
app = AppendDatasets()
app.Input = [shr, cs]
# Show the output of the append filter. The argument is optional
# as the app filter is now the active object.
Show(app)
# Render the default view.
Render()
# -----------------------------------------------------------------------------
def demo2(fname="/Users/berk/Work/ParaView/ParaViewData/Data/disk_out_ref.ex2"):
"""This demo shows the use of readers, data information and display
properties."""
# Create the exodus reader and specify a file name
reader = ExodusIIReader(FileName=fname)
# Get the list of point arrays.
avail = reader.PointVariables.Available
print (avail)
# Select all arrays
reader.PointVariables = avail
# Turn on the visibility of the reader
Show(reader)
# Set representation to wireframe
SetDisplayProperties(Representation = "Wireframe")
# Black background is not pretty
SetViewProperties(Background = [0.4, 0.4, 0.6])
Render()
# Change the elevation of the camera. See VTK documentation of vtkCamera
# for camera parameters.
# NOTE: THIS WILL BE SIMPLER
GetActiveCamera().Elevation(45)
Render()
# Now that the reader executed, let's get some information about it's
# output.
pdi = reader[0].PointData
# This prints a list of all read point data arrays as well as their
# value ranges.
print ('Number of point arrays:', len(pdi))
for i in range(len(pdi)):
ai = pdi[i]
print ("----------------")
print ("Array:", i, " ", ai.Name, ":")
numComps = ai.GetNumberOfComponents()
print ("Number of components:", numComps)
for j in range(numComps):
print ("Range:", ai.GetRange(j))
# White is boring. Let's color the geometry using a variable.
# First create a lookup table. This object controls how scalar
# values are mapped to colors. See VTK documentation for
# details.
# Map min (0.00678) to blue, max (0.0288) to red
SetDisplayProperties(LookupTable = MakeBlueToRedLT(0.00678, 0.0288))
# Color by point array called Pres
SetDisplayProperties(ColorArrayName = ("POINTS", "Pres"))
Render()
#==============================================================================
# Set of Internal functions
#==============================================================================
def _initializeSession(connection):
"""Internal method used to initialize a session. Users don't need to
call this directly. Whenever a new session is created this method is called
by API in this module."""
if not connection:
raise RuntimeError ("'connection' cannot be empty.")
controller = servermanager.ParaViewPipelineController()
controller.InitializeSession(connection.Session)
def _create_func(key, module, skipRegisteration=False):
"Internal function."
def CreateObject(*input, **params):
"""This function creates a new proxy. For pipeline objects that accept inputs,
all non-keyword arguments are assumed to be inputs. All keyword arguments are
assumed to be property,value pairs and are passed to the new proxy."""
# Create a controller instance.
controller = servermanager.ParaViewPipelineController()
# Instantiate the actual object from the given module.
px = paraview._backwardscompatibilityhelper.GetProxy(module, key)
# preinitialize the proxy.
controller.PreInitializeProxy(px)
# Make sure non-keyword arguments are valid
for inp in input:
if inp != None and not isinstance(inp, servermanager.Proxy):
if px.GetProperty("Input") != None:
raise RuntimeError ("Expecting a proxy as input.")
else:
raise RuntimeError ("This function does not accept non-keyword arguments.")
# Assign inputs
inputName = servermanager.vtkSMCoreUtilities.GetInputPropertyName(px.SMProxy, 0)
if px.GetProperty(inputName) != None:
if len(input) > 0:
px.SetPropertyWithName(inputName, input)
else:
# If no input is specified, try the active pipeline object
if px.GetProperty(inputName).GetRepeatable() and active_objects.get_selected_sources():
px.SetPropertyWithName(inputName, active_objects.get_selected_sources())
elif active_objects.source:
px.SetPropertyWithName(inputName, active_objects.source)
else:
if len(input) > 0:
raise RuntimeError ("This function does not expect an input.")
registrationName = None
for nameParam in ['registrationName', 'guiName']:
if nameParam in params:
registrationName = params[nameParam]
del params[nameParam]
# Pass all the named arguments as property,value pairs
SetProperties(px, **params)
# post initialize
controller.PostInitializeProxy(px)
if not skipRegisteration:
# Register the proxy with the proxy manager (assuming we are only using
# these functions for pipeline proxies or animation proxies.
if isinstance(px, servermanager.SourceProxy):
controller.RegisterPipelineProxy(px, registrationName)
elif px.GetXMLGroup() == "animation":
controller.RegisterAnimationProxy(px)
return px
return CreateObject
# -----------------------------------------------------------------------------
def _create_doc(new, old):
"Internal function."
import string
res = new + '\n'
ts = []
strpd = old.split('\n')
for s in strpd:
ts.append(s.lstrip())
res += ' '.join(ts)
res += '\n'
return res
# -----------------------------------------------------------------------------
def _func_name_valid(name):
"Internal function."
valid = True
for c in name:
if c == '(' or c ==')':
valid = False
break
return valid
# -----------------------------------------------------------------------------
def _get_proxymodules_to_import(connection):
"""
used in _add_functions, _get_generated_proxies, and _remove_functions to get
modules to import proxies from.
"""
if connection and connection.Modules:
modules = connection.Modules
return [modules.filters, modules.sources, modules.writers, modules.animation]
else:
return []
def _add_functions(g):
if not servermanager.ActiveConnection:
return
activeModule = servermanager.ActiveConnection.Modules
for m in _get_proxymodules_to_import(servermanager.ActiveConnection):
# Skip registering proxies in certain modules (currently only writers)
skipRegisteration = m is activeModule.writers
dt = m.__dict__
for key in dt.keys():
cl = dt[key]
if not isinstance(cl, str):
if not key in g and _func_name_valid(key):
#print "add %s function" % key
g[key] = _create_func(key, m, skipRegisteration)
exec ("g[key].__doc__ = _create_doc(m.%s.__doc__, g[key].__doc__)" % key)
# -----------------------------------------------------------------------------
def _get_generated_proxies():
proxies = []
for m in _get_proxymodules_to_import(servermanager.ActiveConnection):
dt = m.__dict__
for key in dt.keys():
cl = dt[key]
if not isinstance(cl, str):
if _func_name_valid(key):
proxies.append(key)
return proxies
# -----------------------------------------------------------------------------
def _remove_functions(g):
for m in _get_proxymodules_to_import(servermanager.ActiveConnection):
dt = m.__dict__
for key in dt.keys():
cl = dt[key]
if not isinstance(cl, str) and key in g:
g.pop(key)
#print "remove %s function" % key
# -----------------------------------------------------------------------------
def _find_writer(filename):
"Internal function."
extension = None
parts = filename.split('.')
if len(parts) > 1:
extension = parts[-1]
else:
raise RuntimeError ("Filename has no extension, please specify a write")
if extension == 'png':
return 'vtkPNGWriter'
elif extension == 'bmp':
return 'vtkBMPWriter'
elif extension == 'ppm':
return 'vtkPNMWriter'
elif extension == 'tif' or extension == 'tiff':
return 'vtkTIFFWriter'
elif extension == 'jpg' or extension == 'jpeg':
return 'vtkJPEGWriter'
else:
raise RuntimeError ("Cannot infer filetype from extension:", extension)
# -----------------------------------------------------------------------------
def _switchToActiveConnectionCallback(caller, event):
"""Callback called when the active session/connection changes in the
ServerManager. We update the Python state to reflect the change."""
if servermanager:
session = servermanager.vtkSMProxyManager.GetProxyManager().GetActiveSession()
connection = servermanager.GetConnectionFromSession(session)
SetActiveConnection(connection)
#==============================================================================
# Set of Internal classes
#==============================================================================
class _active_session_observer:
def __init__(self):
pxm = servermanager.vtkSMProxyManager.GetProxyManager()
self.ObserverTag = pxm.AddObserver(pxm.ActiveSessionChanged,
_switchToActiveConnectionCallback)
def __del__(self):
if servermanager:
if servermanager.vtkSMProxyManager:
servermanager.vtkSMProxyManager.GetProxyManager().RemoveObserver(self.ObserverTag)
# -----------------------------------------------------------------------------
class _active_objects(object):
"""This class manages the active objects (source and view). The active
objects are shared between Python and the user interface. This class
is for internal use. Use the :ref:`SetActiveSource`,
:ref:`GetActiveSource`, :ref:`SetActiveView`, and :ref:`GetActiveView`
methods for setting and getting active objects."""
def __get_selection_model(self, name, session=None):
"Internal method."
if session and session != servermanager.ActiveConnection.Session:
raise RuntimeError ("Try to set an active object with invalid active connection.")
pxm = servermanager.ProxyManager(session)
model = pxm.GetSelectionModel(name)
if not model:
model = servermanager.vtkSMProxySelectionModel()
pxm.RegisterSelectionModel(name, model)
return model
def set_view(self, view):
"Sets the active view."
active_view_model = self.__get_selection_model("ActiveView")
if view:
active_view_model = self.__get_selection_model("ActiveView", view.GetSession())
active_view_model.SetCurrentProxy(view.SMProxy,
active_view_model.CLEAR_AND_SELECT)
else:
active_view_model = self.__get_selection_model("ActiveView")
active_view_model.SetCurrentProxy(None,
active_view_model.CLEAR_AND_SELECT)
def get_view(self):
"Returns the active view."
return servermanager._getPyProxy(
self.__get_selection_model("ActiveView").GetCurrentProxy())
def set_source(self, source):
"Sets the active source."
active_sources_model = self.__get_selection_model("ActiveSources")
if source:
# 3 == CLEAR_AND_SELECT
active_sources_model = self.__get_selection_model("ActiveSources", source.GetSession())
active_sources_model.SetCurrentProxy(source.SMProxy,
active_sources_model.CLEAR_AND_SELECT)
else:
active_sources_model = self.__get_selection_model("ActiveSources")
active_sources_model.SetCurrentProxy(None,
active_sources_model.CLEAR_AND_SELECT)
def __convert_proxy(self, px):
"Internal method."
if not px:
return None
if px.IsA("vtkSMSourceProxy"):
return servermanager._getPyProxy(px)
else:
return servermanager.OutputPort(
servermanager._getPyProxy(px.GetSourceProxy()),
px.GetPortIndex())
def get_source(self):
"Returns the active source."
return self.__convert_proxy(
self.__get_selection_model("ActiveSources").GetCurrentProxy())
def get_selected_sources(self):
"Returns the set of sources selected in the pipeline browser."
model = self.__get_selection_model("ActiveSources")
proxies = []
for i in xrange(model.GetNumberOfSelectedProxies()):
proxies.append(self.__convert_proxy(model.GetSelectedProxy(i)))
return proxies
view = property(get_view, set_view)
source = property(get_source, set_source)
# -----------------------------------------------------------------------------
class _funcs_internals:
"Internal class."
first_render = True
#==============================================================================
# Start the session and initialize the ServerManager
#==============================================================================
if not paraview.options.satelite:
active_session_observer = _active_session_observer()
if not servermanager.ActiveConnection:
Connect()
else:
_add_functions(globals())
active_objects = _active_objects()
| 40.057959 | 142 | 0.614974 |
ace6233962108172de7ac2686bf7d40cd6263d2b | 1,976 | py | Python | programs/cp.py | RaInta/PyOS | 0e38faba3f3b9958316f77b2163118ec8eb8845f | [
"MIT"
] | null | null | null | programs/cp.py | RaInta/PyOS | 0e38faba3f3b9958316f77b2163118ec8eb8845f | [
"MIT"
] | null | null | null | programs/cp.py | RaInta/PyOS | 0e38faba3f3b9958316f77b2163118ec8eb8845f | [
"MIT"
] | null | null | null | # PyOS
# Made for Python 2.7
# programs/cp.py
# Import Libraries
# PyOS Scripts
import internal.extra
import os
import shutil
def displayCwdFiles():
"""Get files and folders in current working directory and display them prettily."""
file_list = os.listdir(os.getcwd())
# Make a list of all files in current folder
for Idx, file_name in enumerate(file_list):
print("| {0} | {1} ".format(Idx, file_name))
return file_list
def getFileOrigin(file_list):
"""Get input for file to be copied, and validate that it exists in the current folder (or filesystem)."""
origin_file = raw_input("[0-" + str(len(file_list)) + "] > ")
# Check to see if input was a valid number
if origin_file.isdigit() and (int(origin_file) >= 0) and (int(origin_file) <= len(file_list) - 1):
origin_file = file_list[int(origin_file)]
# Default file to copy is first element of the list
if origin_file == "":
origin_file = file_list[0]
return origin_file
def app():
print(internal.extra.colors.OKGREEN + "Copying files: " + internal.extra.colors.ENDC)
print(internal.extra.colors.BOLD + "File to copy from (enter filename or number of file): " + internal.extra.colors.ENDC)
file_list = displayCwdFiles()
origin_file = getFileOrigin(file_list)
# Validate chosen file to copy
while not os.path.isfile(origin_file):
print(internal.extra.colors.BOLD + "Warning! file " + origin_file + " does not yet exist.\n\n" + internal.extra.colors.ENDC)
origin_file = getFileOrigin(file_list)
print(internal.extra.colors.BOLD + origin_file + " selected for copying." + internal.extra.colors.ENDC)
target_file = raw_input(internal.extra.colors.BOLD + "Enter filename to copy to [Enter to backup]:" + internal.extra.colors.ENDC)
if target_file == "":
target_file = origin_file + ".bak"
print("Target file: " + target_file + " selected.")
shutil.copy(origin_file, target_file)
| 41.166667 | 133 | 0.688765 |
ace62364ea2cd509baf2c5c03777d2bd2b9a7127 | 495 | py | Python | upload_files.py | tingletech/aws-as | 9950faaffe7c3f85cabf4b75fa5eef91c2f1d765 | [
"Unlicense"
] | 2 | 2016-02-26T16:58:57.000Z | 2021-07-09T01:12:21.000Z | upload_files.py | tingletech/aws-as | 9950faaffe7c3f85cabf4b75fa5eef91c2f1d765 | [
"Unlicense"
] | null | null | null | upload_files.py | tingletech/aws-as | 9950faaffe7c3f85cabf4b75fa5eef91c2f1d765 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
import boto
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('tag', nargs=1)
args = parser.parse_args()
tag = args.tag[0]
c = boto.connect_s3()
fn = 'archivesspace.' + tag + '.zip'
fn_public = 'public-files/'
url_upload_public = c.generate_url(10800, 'PUT', 'archivesspace', fn_public + fn, headers={'x-amz-acl': 'public-read'})
print 'curl --request PUT --upload-file archivesspace.zip -H \'x-amz-acl: public-read\' "' + url_upload_public + '"'
| 27.5 | 119 | 0.70101 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.