id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
4800359 | <reponame>Dorothylyly/SAAT
import cv2
import imageio
import numpy as np
import os
from model import generate_model
import torchvision.transforms as trn
import torch
import argparse
from mean import get_mean, get_std
from spatial_transforms import (
Compose, Normalize, Scale, CenterCrop, CornerCrop, ToTensor)
def extract_feats(file_path, net, filenames, frame_num, batch_size, save_path):
"""Extract 3D features (saved in .npy) for a video. """
net.eval()
mean = get_mean(255, dataset='kinetics')
std = get_std(255)
transform = Compose([trn.ToPILImage(),
Scale(112),
CornerCrop(112, 'c'),
ToTensor(),
Normalize(mean, std)])
print("Network loaded")
#Read videos and extract features in batches
for file in filenames[start_idx:end_idx]:
feat_file = os.path.join(save_path, file[:-4] + '.npy')
if os.path.exists(feat_file):
continue
vid = imageio.get_reader(os.path.join(file_path, file), 'ffmpeg')
curr_frames = []
for frame in vid:
if len(frame.shape)<3:
frame = np.repeat(frame,3)
curr_frames.append(transform(frame).unsqueeze(0))
curr_frames = torch.cat(curr_frames, dim=0)
print("Shape of frames: {0}".format(curr_frames.shape))
idx = np.linspace(0, len(curr_frames)-1, frame_num).astype(int)
print("Captured {} clips: {}".format(len(idx), curr_frames.shape))
curr_feats = []
for i in range(0, len(idx), batch_size):
curr_batch = [curr_frames[x-8:x+8,...].unsqueeze(0) for x in idx[i:i+batch_size]]
curr_batch = torch.cat(curr_batch, dim=0).cuda()
out = net(curr_batch.transpose(1,2).cuda())
curr_feats.append(out.detach().cpu())
print("Appended {} features {}".format(i+1,out.shape))
curr_feats = torch.cat(curr_feats, 0)
del out
#set_trace()
np.save(feat_file,curr_feats.numpy())
print("Saved file {}\nExiting".format(file[:-4] + '.npy'))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='resnext')
parser.add_argument('--model_depth', type=int, default=101)
parser.add_argument('--pretrain_path', type=str, default='./checkpoints/resnext-101-kinetics.pth')
parser.add_argument('--n_classes', type=int, default=400)
parser.add_argument('--n_finetune_classes', type=int, default=400)
parser.add_argument('--ft_begin_index', type=int, default=0)
parser.add_argument('--resnet_shortcut', type=str, default='B')
parser.add_argument('--resnext_cardinality', type=int, default=32)
parser.add_argument('--sample_size', type=int, default=112)
parser.add_argument('--sample_duration', type=int, default=16)
parser.add_argument('--no_cuda', type=bool, default=False)
parser.add_argument('--no_train', type=bool, default=True)
parser.add_argument('--file_path', type=str, default='./Data')
parser.add_argument('--dataset_name', type=str, default='YouTubeClips')
parser.add_argument('--frame_per_video', type=int, default=28)
parser.add_argument('--start_idx', type=int, default=0)
parser.add_argument('--end_idx', type=int, default=1)
parser.add_argument('--batch_size', type=int, default=1)
opt = parser.parse_args()
opt.arch = '{}-{}'.format(opt.model, opt.model_depth)
model, _ = generate_model(opt)
namelist = os.listdir(os.path.join(opt.file_path, opt.dataset_name))
save_path = os.path.join(opt.file_path, 'Feature_3D')
extract_feats(opt.file_path, model, namelist[opt.start_idx:opt.end_idx], opt.frame_per_video, opt.batch_size, save_path)
| StarcoderdataPython |
6627648 | <reponame>y-akinobu/puppy
B = Rectangle(500, 950, width=1000, height=100, isStatic=true)
A = Ball(100,100,strokeStyle="yellow",lineWidth=30,width=100,height=100,fillStyle="green")
print("Hello")
def suzume_collision():
print("Bomb!")
def suzume_clicked():
print("Chun")
suzume = Circle(500,100,image='bird.png',width=270,clicked=suzume_clicked,collisionStart=suzume_collision)
for x in [100,200,300,400]:
print('Hi!!', font='48px Arial',fontStyle='green')
| StarcoderdataPython |
333686 | <reponame>josephsnyder/VistA-1
#
# This file is part of WinPexpect. WinPexpect is free software that is made
# available under the MIT license. Consult the file "LICENSE" that is
# distributed together with this file for the exact licensing terms.
#
# WinPexpect is copyright (c) 2008-2010 by the WinPexpect authors. See the
# file "AUTHORS" for a complete overview.
import os
import sys
import pywintypes
import itertools
import random
import time
import signal
from Queue import Queue, Empty
from threading import Thread, Lock
from pexpect import spawn, ExceptionPexpect, EOF, TIMEOUT
from subprocess import list2cmdline
from msvcrt import open_osfhandle
from win32api import (SetHandleInformation, GetCurrentProcess, OpenProcess,
PostMessage, SendMessage,
CloseHandle, GetCurrentThread, STD_INPUT_HANDLE)
from win32pipe import CreateNamedPipe, ConnectNamedPipe
from win32process import (STARTUPINFO, CreateProcess, CreateProcessAsUser,
GetExitCodeProcess, TerminateProcess, ExitProcess,
GetWindowThreadProcessId)
from win32event import WaitForSingleObject, INFINITE
from win32security import (LogonUser, OpenThreadToken, OpenProcessToken,
GetTokenInformation, TokenUser, ACL_REVISION_DS,
ConvertSidToStringSid, ConvertStringSidToSid,
SECURITY_ATTRIBUTES, SECURITY_DESCRIPTOR, ACL,
LookupAccountName)
from win32file import CreateFile, ReadFile, WriteFile, INVALID_HANDLE_VALUE, FILE_SHARE_READ
from win32console import (GetStdHandle, KEY_EVENT, ENABLE_WINDOW_INPUT, ENABLE_MOUSE_INPUT,
ENABLE_ECHO_INPUT, ENABLE_LINE_INPUT, ENABLE_PROCESSED_INPUT,
ENABLE_MOUSE_INPUT)
from win32con import (HANDLE_FLAG_INHERIT, STARTF_USESTDHANDLES,
STARTF_USESHOWWINDOW, CREATE_NEW_CONSOLE, SW_HIDE,
PIPE_ACCESS_DUPLEX, WAIT_OBJECT_0, WAIT_TIMEOUT,
LOGON32_PROVIDER_DEFAULT, LOGON32_LOGON_INTERACTIVE,
TOKEN_ALL_ACCESS, GENERIC_READ, GENERIC_WRITE,
OPEN_EXISTING, PROCESS_ALL_ACCESS, MAXIMUM_ALLOWED,
LEFT_CTRL_PRESSED,RIGHT_CTRL_PRESSED,
WM_CHAR, VK_RETURN, WM_KEYDOWN, WM_KEYUP)
from win32gui import EnumWindows
from winerror import (ERROR_PIPE_BUSY, ERROR_HANDLE_EOF, ERROR_BROKEN_PIPE,
ERROR_ACCESS_DENIED)
from pywintypes import error as WindowsError
# Compatibility with Python < 2.6
try:
from collections import namedtuple
except ImportError:
def namedtuple(name, fields):
d = dict(zip(fields, [None]*len(fields)))
return type(name, (object,), d)
# Compatbility wiht Python 3
if sys.version_info[0] == 3:
_WriteFile = WriteFile
def WriteFile(handle, s):
return _WriteFile(handle, s.encode('ascii'))
_ReadFile = ReadFile
def ReadFile(handle, size):
err, data = _ReadFile(handle, size)
return err, data.decode('ascii')
def split_command_line(cmdline):
"""Split a command line into a command and its arguments according to
the rules of the Microsoft C runtime."""
# http://msdn.microsoft.com/en-us/library/ms880421
s_free, s_in_quotes, s_in_escape = range(3)
state = namedtuple('state',
('current', 'previous', 'escape_level', 'argument'))
state.current = s_free
state.previous = s_free
state.argument = []
result = []
for c in itertools.chain(cmdline, ['EOI']): # Mark End of Input
if state.current == s_free:
if c == '"':
state.current = s_in_quotes
state.previous = s_free
elif c == '\\':
state.current = s_in_escape
state.previous = s_free
state.escape_count = 1
elif c in (' ', '\t', 'EOI'):
if state.argument or state.previous != s_free:
result.append(''.join(state.argument))
del state.argument[:]
else:
state.argument.append(c)
elif state.current == s_in_quotes:
if c == '"':
state.current = s_free
state.previous = s_in_quotes
elif c == '\\':
state.current = s_in_escape
state.previous = s_in_quotes
state.escape_count = 1
else:
state.argument.append(c)
elif state.current == s_in_escape:
if c == '\\':
state.escape_count += 1
elif c == '"':
nbs, escaped_delim = divmod(state.escape_count, 2)
state.argument.append(nbs * '\\')
if escaped_delim:
state.argument.append('"')
state.current = state.previous
else:
if state.previous == s_in_quotes:
state.current = s_free
else:
state.current = s_in_quotes
state.previous = s_in_escape
else:
state.argument.append(state.escape_count * '\\')
state.argument.append(c)
state.current = state.previous
state.previous = s_in_escape
if state.current != s_free:
raise ValueError, 'Illegal command line.'
return result
join_command_line = list2cmdline
def which(command):
path = os.environ.get('Path', '')
path = path.split(os.pathsep)
pathext = os.environ.get('Pathext', '.exe;.com;.bat;.cmd')
pathext = pathext.split(os.pathsep)
for dir in itertools.chain([''], path):
for ext in itertools.chain([''], pathext):
fname = os.path.join(dir, command) + ext
if os.access(fname, os.X_OK):
return fname
def _read_header(handle, bufsize=4096):
"""INTERNAL: read a stub header from a handle."""
header = ''
while '\n\n' not in header:
err, data = ReadFile(handle, bufsize)
header += data
return header
def _parse_header(header):
"""INTERNAL: pass the stub header format."""
parsed = {}
lines = header.split('\n')
for line in lines:
if not line:
break
p1 = line.find('=')
if p1 == -1:
if line.startswith(' '): # Continuation
if key is None:
raise ValueError, 'Continuation on first line.'
input[key] += '\n' + line[1:]
else:
raise ValueError, 'Expecting key=value format'
key = line[:p1]
parsed[key] = line[p1+1:]
return parsed
def _quote_header(s):
"""INTENAL: quote a string to be used in a stub header."""
return s.replace('\n', '\n ')
def _get_current_sid():
"""INTERNAL: get current SID."""
try:
token = OpenThreadToken(GetCurrentThread(), MAXIMUM_ALLOWED, True)
except WindowsError:
token = OpenProcessToken(GetCurrentProcess(), MAXIMUM_ALLOWED)
sid = GetTokenInformation(token, TokenUser)[0]
return sid
def _lookup_sid(domain, username):
"""INTERNAL: lookup the SID for a user in a domain."""
return LookupAccountName(domain, username)[0]
def _create_security_attributes(*sids, **kwargs):
"""INTERNAL: create a SECURITY_ATTRIBUTES structure."""
inherit = kwargs.get('inherit', 0)
access = kwargs.get('access', GENERIC_READ|GENERIC_WRITE)
attr = SECURITY_ATTRIBUTES()
attr.bInheritHandle = inherit
desc = SECURITY_DESCRIPTOR()
dacl = ACL()
for sid in sids:
dacl.AddAccessAllowedAce(ACL_REVISION_DS, access, sid)
desc.SetSecurityDescriptorDacl(True, dacl, False)
attr.SECURITY_DESCRIPTOR = desc
return attr
def _create_named_pipe(template, sids=None):
"""INTERNAL: create a named pipe."""
if sids is None:
sattrs = None
else:
sattrs = _create_security_attributes(*sids)
for i in range(100):
name = template % random.randint(0, 999999)
try:
pipe = CreateNamedPipe(name, PIPE_ACCESS_DUPLEX,
0, 1, 1, 1, 100000, sattrs)
SetHandleInformation(pipe, HANDLE_FLAG_INHERIT, 0)
except WindowsError, e:
if e.winerror != ERROR_PIPE_BUSY:
raise
else:
return pipe, name
raise ExceptionPexpect, 'Could not create pipe after 100 attempts.'
def _stub(cmd_name, stdin_name, stdout_name, stderr_name):
"""INTERNAL: Stub process that will start up the child process."""
# Open the 4 pipes (command, stdin, stdout, stderr)
cmd_pipe = CreateFile(cmd_name, GENERIC_READ|GENERIC_WRITE, 0, None,
OPEN_EXISTING, 0, None)
SetHandleInformation(cmd_pipe, HANDLE_FLAG_INHERIT, 1)
stdin_pipe = CreateFile(stdin_name, GENERIC_READ, 0, None,
OPEN_EXISTING, 0, None)
SetHandleInformation(stdin_pipe, HANDLE_FLAG_INHERIT, 1)
stdout_pipe = CreateFile(stdout_name, GENERIC_WRITE, 0, None,
OPEN_EXISTING, 0, None)
SetHandleInformation(stdout_pipe, HANDLE_FLAG_INHERIT, 1)
stderr_pipe = CreateFile(stderr_name, GENERIC_WRITE, 0, None,
OPEN_EXISTING, 0, None)
SetHandleInformation(stderr_pipe, HANDLE_FLAG_INHERIT, 1)
# Learn what we need to do..
header = _read_header(cmd_pipe)
input = _parse_header(header)
if 'command' not in input or 'args' not in input:
ExitProcess(2)
# http://msdn.microsoft.com/en-us/library/ms682499(VS.85).aspx
startupinfo = STARTUPINFO()
startupinfo.dwFlags |= STARTF_USESTDHANDLES | STARTF_USESHOWWINDOW
startupinfo.hStdInput = stdin_pipe
startupinfo.hStdOutput = stdout_pipe
startupinfo.hStdError = stderr_pipe
startupinfo.wShowWindow = SW_HIDE
# Grant access so that our parent can open its grandchild.
if 'parent_sid' in input:
mysid = _get_current_sid()
parent = ConvertStringSidToSid(input['parent_sid'])
sattrs = _create_security_attributes(mysid, parent,
access=PROCESS_ALL_ACCESS)
else:
sattrs = None
try:
res = CreateProcess(input['command'], input['args'], sattrs, None,
True, CREATE_NEW_CONSOLE, os.environ, os.getcwd(),
startupinfo)
except WindowsError, e:
message = _quote_header(str(e))
WriteFile(cmd_pipe, 'status=error\nmessage=%s\n\n' % message)
ExitProcess(3)
else:
pid = res[2]
# Pass back results and exit
err, nbytes = WriteFile(cmd_pipe, 'status=ok\npid=%s\n\n' % pid)
ExitProcess(0)
class ChunkBuffer(object):
"""A buffer that allows a chunk of data to be read in smaller reads."""
def __init__(self, chunk=''):
self.add(chunk)
def add(self, chunk):
self.chunk = chunk
self.offset = 0
def read(self, size):
data = self.chunk[self.offset:self.offset+size]
self.offset += size
return data
def __len__(self):
return max(0, len(self.chunk)-self.offset)
def run (command, timeout=-1, withexitstatus=False, events=None, extra_args=None, logfile=None, cwd=None, env=None, stub=None):
"""
This function runs the given command; waits for it to finish; then
returns all output as a string. STDERR is included in output. If the full
path to the command is not given then the path is searched.
Note that lines are terminated by CR/LF (\\r\\n) combination even on
UNIX-like systems because this is the standard for pseudo ttys. If you set
'withexitstatus' to true, then run will return a tuple of (command_output,
exitstatus). If 'withexitstatus' is false then this returns just
command_output.
The run() function can often be used instead of creating a spawn instance.
For example, the following code uses spawn::
from pexpect import *
child = spawn('scp foo <EMAIL>:.')
child.expect ('(?i)password')
child.sendline (mypassword)
The previous code can be replace with the following::
from pexpect import *
run ('scp foo <EMAIL>:.', events={'(?i)password': mypassword})
Examples
========
Start the apache daemon on the local machine::
from pexpect import *
run ("/usr/local/apache/bin/apachectl start")
Check in a file using SVN::
from pexpect import *
run ("svn ci -m 'automatic commit' my_file.py")
Run a command and capture exit status::
from pexpect import *
(command_output, exitstatus) = run ('ls -l /bin', withexitstatus=1)
Tricky Examples
===============
The following will run SSH and execute 'ls -l' on the remote machine. The
password '<PASSWORD>' will be sent if the '(?i)password' pattern is ever seen::
run ("ssh <EMAIL>@machine.<EMAIL>.com 'ls -l'", events={'(?i)password':'<PASSWORD>'})
This will start mencoder to rip a video from DVD. This will also display
progress ticks every 5 seconds as it runs. For example::
from pexpect import *
def print_ticks(d):
print d['event_count'],
run ("mencoder dvd://1 -o video.avi -oac copy -ovc copy", events={TIMEOUT:print_ticks}, timeout=5)
The 'events' argument should be a dictionary of patterns and responses.
Whenever one of the patterns is seen in the command out run() will send the
associated response string. Note that you should put newlines in your
string if Enter is necessary. The responses may also contain callback
functions. Any callback is function that takes a dictionary as an argument.
The dictionary contains all the locals from the run() function, so you can
access the child spawn object or any other variable defined in run()
(event_count, child, and extra_args are the most useful). A callback may
return True to stop the current run process otherwise run() continues until
the next event. A callback may also return a string which will be sent to
the child. 'extra_args' is not used by directly run(). It provides a way to
pass data to a callback function through run() through the locals
dictionary passed to a callback. """
if timeout == -1:
child = winspawn(command, maxread=2000, logfile=logfile, cwd=cwd, env=env, stub=None)
else:
child = winspawn(command, timeout=timeout, maxread=2000, logfile=logfile, cwd=cwd, env=env, stub=None)
if events is not None:
patterns = events.keys()
responses = events.values()
else:
patterns=None # We assume that EOF or TIMEOUT will save us.
responses=None
child_result_list = []
event_count = 0
while 1:
try:
index = child.expect (patterns)
if isinstance(child.after, basestring):
child_result_list.append(child.before + child.after)
else: # child.after may have been a TIMEOUT or EOF, so don't cat those.
child_result_list.append(child.before)
if isinstance(responses[index], basestring):
child.send(responses[index])
elif isinstance(responses[index], types.FunctionType):
callback_result = responses[index](locals())
#sys.stdout.flush()
if isinstance(callback_result, basestring):
child.send(callback_result)
elif callback_result:
child.expect(EOF)
break
else:
child.terminate()
raise TypeError ('The callback must be a string or function type.')
event_count = event_count + 1
except TIMEOUT, e:
child_result_list.append(child.before)
child.terminate()
break
except EOF, e:
child_result_list.append(child.before)
child.close()
break
child_result = ''.join(child_result_list)
if withexitstatus:
child.wait()
return (child_result, child.exitstatus)
else:
return child_result
class winspawn(spawn):
"""A version of pexpect.spawn for the Windows platform. """
# The Windows version of spawn is quite different when compared to the
# Posix version.
#
# The first difference is that it's not possible on Windows to select()
# on a file descriptor that corresponds to a file or a pipe. Therefore,
# to do non-blocking I/O, we need to use threads.
#
# Secondly, there is no way to pass /only/ the file descriptors
# corresponding to the redirected stdin/out/err to the child. Either all
# inheritable file descriptors are passed, or none. We solve this by
# indirectly executing our child via a stub for which we close all file
# descriptors. The stub communicates back to us via a named pipe.
#
# Finally, Windows does not have ptys. It does have the concept of a
# "Console" though but it's much less sophisticated. This code runs the
# child in a new console by passing the flag CREATE_NEW_CONSOLE to
# CreateProcess(). We create a new console for our child because this
# way it cannot interfere with the current console, and it is also
# possible to run the main program without a console (e.g. a Windows
# service).
#
# NOTE:
# Some special application will identify the input type. If its input handle
# is not the stdin, the child process will disable the interactive mode.
# For example: To run python as interactive mode, we should do like below:
# child = winspawn('python', ['-i'])
# option '-i' will force the python into interactive mode
#
pipe_buffer = 4096
pipe_template = r'\\.\pipe\winpexpect-%06d'
def __init__(self, command, args=[], timeout=30, maxread=2000,
searchwindowsize=None, logfile=None, cwd=None, env=None,
username=None, domain=None, password=<PASSWORD>, stub=None):
"""Constructor."""
self.username = username
self.domain = domain
self.password = password
self.stub = stub
self.child_hwnd = None
self.child_handle = None
self.child_output = Queue()
self.user_input = Queue()
self.chunk_buffer = ChunkBuffer()
self.stdout_handle = None
self.stdout_eof = False
self.stdout_reader = None
self.stderr_handle = None
self.stderr_eof = False
self.stderr_reader = None
self.stdin_reader = None # stdin of parent console
self.stdin_handle = None # stdin of parent console
self.interrupted = False
super(winspawn, self).__init__(command, args, timeout=timeout,
maxread=maxread, searchwindowsize=searchwindowsize,
logfile=logfile, cwd=cwd, env=env)
def __del__(self):
try:
self.terminate()
except WindowsError:
pass
def _spawn(self, command, args=None):
"""Start the child process. If args is empty, command will be parsed
according to the rules of the MS C runtime, and args will be set to
the parsed args."""
if args:
args = args[:] # copy
args.insert(0, command)
else:
args = split_command_line(command)
command = args[0]
self.command = command
self.args = args
command = which(self.command)
if command is None:
raise ExceptionPexpect, 'Command not found: %s' % self.command
args = join_command_line(self.args)
# Create the pipes
sids = [_get_current_sid()]
if self.username and self.password:
sids.append(_lookup_sid(self.domain, self.username))
cmd_pipe, cmd_name = _create_named_pipe(self.pipe_template, sids)
stdin_pipe, stdin_name = _create_named_pipe(self.pipe_template, sids)
stdout_pipe, stdout_name = _create_named_pipe(self.pipe_template, sids)
stderr_pipe, stderr_name = _create_named_pipe(self.pipe_template, sids)
startupinfo = STARTUPINFO()
startupinfo.dwFlags |= STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_HIDE
if self.stub == None or not getattr(sys, 'frozen', False):
# python = os.path.join(sys.exec_prefix, 'python.exe')
python = sys.executable
self_dir = os.path.normpath(os.path.dirname(os.path.abspath(__file__)))
pycmd = 'import sys; sys.path.insert(0, r"%s"); import winpexpect; winpexpect._stub(r"%s", r"%s", r"%s", r"%s")' \
% (self_dir, cmd_name, stdin_name, stdout_name, stderr_name)
pyargs = join_command_line([python, '-c', pycmd])
else:
python = self.stub
pyargs = join_command_line([python, cmd_name, stdin_name, stdout_name, stderr_name])
# Create a new token or run as the current process.
if self.username and self.password:
token = LogonUser(self.username, self.domain, self.password,
LOGON32_LOGON_INTERACTIVE, LOGON32_PROVIDER_DEFAULT)
res = CreateProcessAsUser(token, python, pyargs, None, None,
False, CREATE_NEW_CONSOLE, self.env,
self.cwd, startupinfo)
else:
token = None
res = CreateProcess(python, pyargs, None, None, False,
CREATE_NEW_CONSOLE, self.env, self.cwd,
startupinfo)
child_handle = res[0]
res[1].Close() # don't need thread handle
ConnectNamedPipe(cmd_pipe)
ConnectNamedPipe(stdin_pipe)
ConnectNamedPipe(stdout_pipe)
ConnectNamedPipe(stderr_pipe)
# Tell the stub what to do and wait for it to exit
WriteFile(cmd_pipe, 'command=%s\n' % command)
WriteFile(cmd_pipe, 'args=%s\n' % args)
if token:
parent_sid = ConvertSidToStringSid(_get_current_sid())
WriteFile(cmd_pipe, 'parent_sid=%s\n' % str(parent_sid))
WriteFile(cmd_pipe, '\n')
header = _read_header(cmd_pipe)
output = _parse_header(header)
if output['status'] != 'ok':
m = 'Child did not start up correctly. '
m += output.get('message', '')
raise ExceptionPexpect, m
self.pid = int(output['pid'])
self.child_handle = OpenProcess(PROCESS_ALL_ACCESS, False, self.pid)
WaitForSingleObject(child_handle, INFINITE)
# Start up the I/O threads
self.child_fd = open_osfhandle(stdin_pipe.Detach(), 0) # for pexpect
self.stdout_handle = stdout_pipe
self.stdout_reader = Thread(target=self._child_reader,
args=(self.stdout_handle,))
self.stdout_reader.start()
self.stderr_handle = stderr_pipe
self.stderr_reader = Thread(target=self._child_reader,
args=(self.stderr_handle,))
self.stderr_reader.start()
# find the handle of the child console window
find_hwnds = []
def cb_comparewnd (hwnd, lparam):
_, pid = GetWindowThreadProcessId(hwnd)
if pid == self.pid:
find_hwnds.append(hwnd)
return True
tmfind = time.time()
while True:
EnumWindows(cb_comparewnd, None)
if find_hwnds:
self.child_hwnd = find_hwnds[0]
break
if time.time() - tmfind > self.timeout:
raise ExceptionPexpect, 'Did not find child console window'
self.terminated = False
self.closed = False
def terminate(self, force=False):
"""Terminate the child process. This also closes all the file
descriptors."""
if self.child_handle is None or self.terminated:
return
self.__terminate(force)
self.close()
self.wait()
self.terminated = True
def close(self):
"""Close all communications channels with the child."""
if self.closed:
return
self.interrupted = True
if self.stdin_reader:
CloseHandle(self.stdin_handle)
self.stdin_reader.join()
os.close(self.child_fd)
CloseHandle(self.stdout_handle)
CloseHandle(self.stderr_handle)
# Now the threads are ready to be joined.
self.stdout_reader.join()
self.stderr_reader.join()
self.closed = True
def wait(self, timeout=None):
"""Wait until the child exits. If timeout is not specified this
blocks indefinately. Otherwise, timeout specifies the number of
seconds to wait."""
if self.exitstatus is not None:
return
if timeout is None:
timeout = INFINITE
else:
timeout = 1000 * timeout
ret = WaitForSingleObject(self.child_handle, timeout)
if ret == WAIT_TIMEOUT:
raise TIMEOUT, 'Timeout exceeded in wait().'
self.exitstatus = GetExitCodeProcess(self.child_handle)
return self.exitstatus
def isalive(self):
"""Return True if the child is alive, False otherwise."""
if self.exitstatus is not None:
return False
ret = WaitForSingleObject(self.child_handle, 0)
if ret == WAIT_OBJECT_0:
self.exitstatus = GetExitCodeProcess(self.child_handle)
return False
return True
def kill(self, signo):
"""The signal.CTRL_C_EVENT and signal.CTRL_BREAK_EVENT signals is
avaiable under windows from Python3.2. Any other value for sig will
cause the process to be unconditionally killed by the TerminateProcess
API,"""
if sys.version_info[0] == 3 and sys.version_info[1] >= 2:
super().kill(signo)
else:
raise ExceptionPexpect, 'Signals are not availalbe on Windows'
def __terminate(self, force=False):
"""This forces a child process to terminate. It starts nicely with
signal.CTRL_C_EVENT and signal.CTRL_BREAK_EVENT. If "force" is True
then moves onto TerminateProcess. This returns True if the child
was terminated. This returns False if the child could not be terminated.
For python earlier than 3.2, force parameter will be ignored and
TerminateProcess will be always used"""
if not self.isalive():
return True
if sys.version_info[0] == 3 and sys.version_info[1] >= 2:
try:
self.kill(signal.CTRL_C_EVENT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
self.kill(signal.CTRL_BREAK_EVENT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
if force:
# any value other than CTRL_C_EVENT and signal.CTRL_BREAK_EVENT
# will terminate the process by killed by the TerminateProcess
self.kill(123)
time.sleep(self.delayafterterminate)
return (not self.isalive())
return False
except OSError as e:
# I think there are kernel timing issues that sometimes cause
# this to happen. I think isalive() reports True, but the
# process is dead to the kernel.
# Make one last attempt to see if the kernel is up to date.
time.sleep(self.delayafterterminate)
return (not self.isalive())
else:
try:
TerminateProcess(self.child_handle, 1)
time.sleep(self.delayafterterminate)
return (not self.isalive())
except WindowsError, e:
# ERROR_ACCESS_DENIED (also) happens when the child has already
# exited.
return (e.winerror == ERROR_ACCESS_DENIED and not self.isalive())
def direct_send(self, s):
"""Some subprocess is using the getche() to get the input, the most
common case is the password input. The getche() doesn't listen at
the stdin. So the send() doesn't work on this case. Here we will send
the string to the console window by windows message: WM_KEYDOWN,
WM_KEYUP, WM_CHAR.
There is another way available to implement the direct-send function.
That is attach the child console from the stub process and write the
console input directly. Here is the implement steps:
1. In the stub process add below code and don't exit the stub process.
def _string2records(s):
records = []
for c in s:
rec = win32console.PyINPUT_RECORDType(KEY_EVENT)
rec.KeyDown = True
rec.RepeatCount = 1
rec.Char = c
rec.VirtualKeyCode = ord(c)
records.append(rec)
rec = win32console.PyINPUT_RECORDType(KEY_EVENT)
rec.KeyDown = False
rec.RepeatCount = 1
rec.Char = c
rec.VirtualKeyCode = ord(c)
records.append(rec)
return records
while True:
header = _read_header(cmd_pipe)
input = _parse_header(header)
if input['command'] == 'send':
try:
win32console.AttachConsole(pid)
s = input['string']
stdin_handle = GetStdHandle(STD_INPUT_HANDLE)
records = _string2records(s)
nrecords = stdin_handle.WriteConsoleInput(records)
win32console.FreeConsole()
except WindowsError as e:
message = _quote_header(str(e))
WriteFile(cmd_pipe,
'status=error\nmessage=%s\n\n' % message)
else:
WriteFile(cmd_pipe,
'status=ok\nnbytes=%d\n\n' % nrecords)
2. The stub executable must be win32gui type, using "pythonw.exe"
instead of "python.exe"
3. direct_send function can be implemented as below:
WriteFile(self.stub_pipe, 'command=send\nstring=%s\n\n' % s)
header = _read_header(self.stub_pipe)
output = _parse_header(header)
if output['status'] != 'ok':
m = 'send string failed: '
m += output.get('message', '')
raise ExceptionPexpect(m)
4. This way can not send the CRLF(don't know the reason). For send
the CRLF, we still need the SendMessage like the direct_sendline do.
Finally, I choose to use the windows message solution, just because
it looks like much simple than the attach-console solution.
"""
self._input_log(s)
for c in s:
PostMessage(self.child_hwnd, WM_CHAR, ord(c), 1)
def direct_sendline(self, s):
self.direct_send(s)
self._input_log('\r\n')
PostMessage(self.child_hwnd, WM_KEYDOWN, VK_RETURN, 0x001C0001)
PostMessage(self.child_hwnd, WM_KEYUP, VK_RETURN, 0xC01C0001)
def interact(self, escape_character = chr(29), input_filter = None, output_filter = None):
# Flush the buffer.
self.stdin_reader = Thread(target=self._stdin_reader)
self.stdin_reader.start()
self.interrupted = False
try:
while self.isalive():
data = self._interact_read(self.stdin_handle)
if data != None:
if input_filter: data = input_filter(data)
i = data.rfind(escape_character)
if i != -1:
data = data[:i]
os.write(self.child_fd, data.encode('ascii'))
break
os.write(self.child_fd, data.encode('ascii'))
data = self._interact_read(self.child_fd)
if data != None:
if output_filter: data = output_filter(data)
self._output_log(data)
if sys.stdout not in (self.logfile, self.logfile_read):
# interactive mode, the child output will be always output to stdout
sys.stdout.write(data)
# child exited, read all the remainder output
while self.child_output.qsize():
handle, status, data = self.child_output.get(block=False)
if status != 'data':
break
self._output_log(data)
if sys.stdout not in (self.logfile, self.logfile_read):
sys.stdout.write(data)
except KeyboardInterrupt:
self.interrupted = True
self.terminate()
return
self.close()
def _output_log(self, data):
if self.logfile is not None:
self.logfile.write (data)
self.logfile.flush()
if self.logfile_read is not None:
self.logfile_read.write(data)
self.logfile_read.flush()
def _input_log(self, data):
if self.logfile is not None:
self.logfile.write (data)
self.logfile.flush()
if self.logfile_send is not None:
self.logfile_send.write (data)
self.logfile_send.flush()
def _interact_read(self, fd):
"""This is used by the interact() method.
"""
data = None
try:
if fd == self.stdin_handle:
data = self.user_input.get(block=False)
else:
handle, status, data = self.child_output.get(timeout=0.1)
if status == 'eof':
self._set_eof(handle)
raise EOF, 'End of file in interact_read().'
elif status == 'error':
self._set_eof(handle)
raise OSError, data
except Exception as e:
data = None
return data
def _stdin_reader(self):
"""INTERNAL: Reader thread that reads stdin for user interaction"""
self.stdin_handle = GetStdHandle(STD_INPUT_HANDLE)
self.stdin_handle.SetConsoleMode(ENABLE_LINE_INPUT|ENABLE_ECHO_INPUT|ENABLE_MOUSE_INPUT|
ENABLE_WINDOW_INPUT|ENABLE_MOUSE_INPUT|ENABLE_PROCESSED_INPUT)
# Remove flag: ENABLE_PROCESSED_INPUT to deal with the ctrl-c myself
try:
while not self.interrupted:
ret = WaitForSingleObject(self.stdin_handle, 1000)
if ret == WAIT_OBJECT_0:
records = self.stdin_handle.PeekConsoleInput(1)
rec = records[0]
if rec.EventType == KEY_EVENT:
if not rec.KeyDown or ord(rec.Char) == 0:
self.stdin_handle.FlushConsoleInputBuffer()
continue
else:
# discard the events: FOCUS_EVENT/WINDOW_BUFFER_SIZE_EVENT/MENU_EVENT,
self.stdin_handle.FlushConsoleInputBuffer()
continue
err, data = ReadFile(self.stdin_handle, self.maxread)
#print('read finished:', [hex(ord(i)) for i in data], err)
self.user_input.put(data)
except Exception as e:
pass
def _child_reader(self, handle):
"""INTERNAL: Reader thread that reads stdout/stderr of the child
process."""
status = 'data'
while not self.interrupted:
try:
err, data = ReadFile(handle, self.maxread)
assert err == 0 # not expecting error w/o overlapped io
except WindowsError, e:
if e.winerror == ERROR_BROKEN_PIPE:
status = 'eof'
data = ''
else:
status = 'error'
data = e.winerror
self.child_output.put((handle, status, data))
if status != 'data':
break
def _set_eof(self, handle):
"""INTERNAL: mark a file handle as end-of-file."""
if handle == self.stdout_handle:
self.stdout_eof = True
elif handle == self.stderr_handle:
self.stderr_eof = True
def read_nonblocking(self, size=1, timeout=-1):
"""INTERNAL: Non blocking read."""
if len(self.chunk_buffer):
return self.chunk_buffer.read(size)
if self.stdout_eof and self.stderr_eof:
assert self.child_output.qsize() == 0
return ''
if timeout == -1:
timeout = self.timeout
try:
handle, status, data = self.child_output.get(timeout=timeout)
except Empty:
raise TIMEOUT, 'Timeout exceeded in read_nonblocking().'
if status == 'data':
self.chunk_buffer.add(data)
elif status == 'eof':
self._set_eof(handle)
raise EOF, 'End of file in read_nonblocking().'
elif status == 'error':
self._set_eof(handle)
raise OSError, data
buf = self.chunk_buffer.read(size)
self._output_log(buf)
return buf
| StarcoderdataPython |
3425593 | <reponame>cavayangtao/rmtt_ros<gh_stars>0
#!/usr/bin/env python3
# coding=utf-8
import rospy
import os
import cv2
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Image
import std_msgs.msg
from cv_bridge import CvBridge
from std_msgs.msg import Empty
import math
import sys
import mediapipe as mp
def callback(msg):
cap = bridge.imgmsg_to_cv2(msg)
cap, gest= detect(cap)
if gest == "stop":
speed = Twist()
speed.linear.x = 0.0
speed.linear.y = 0.0
speed.linear.z = 0.0
speed.angular.x = 0.0
speed.angular.y = 0.0
speed.angular.z = 0.0
pub.publish(speed)
elif gest == "up":
speed = Twist()
speed.linear.x = 0.0
speed.linear.y = 0.0
speed.linear.z = 0.2
speed.angular.x = 0.0
speed.angular.y = 0.0
speed.angular.z = 0.0
pub.publish(speed)
elif gest == "down":
speed = Twist()
speed.linear.x = 0.0
speed.linear.y = 0.0
speed.linear.z = -0.2
speed.angular.x = 0.0
speed.angular.y = 0.0
speed.angular.z = 0.0
pub.publish(speed)
elif gest == "left":
speed = Twist()
speed.linear.x = 0.0
speed.linear.y = -0.2
speed.linear.z = 0.0
speed.angular.x = 0.0
speed.angular.y = 0.0
speed.angular.z = 0.0
pub.publish(speed)
elif gest == "right":
speed = Twist()
speed.linear.x = 0.0
speed.linear.y = 0.2
speed.linear.z = 0.0
speed.angular.x = 0.0
speed.angular.y = 0.0
speed.angular.z = 0.0
pub.publish(speed)
elif gest == "turn":
speed = Twist()
speed.linear.x = 0.0
speed.linear.y = 0.0
speed.linear.z = 0.0
speed.angular.x = 0.0
speed.angular.y = 0.0
speed.angular.z = 0.2
pub.publish(speed)
elif gest == "backward":
speed = Twist()
speed.linear.x = -0.2
speed.linear.y = 0.0
speed.linear.z = 0.0
speed.angular.x = 0.0
speed.angular.y = 0.0
speed.angular.z = 0.0
pub.publish(speed)
elif gest == "forward":
speed = Twist()
speed.linear.x = 0.2
speed.linear.y = 0.0
speed.linear.z = 0.0
speed.angular.x = 0.0
speed.angular.y = 0.0
speed.angular.z = 0.0
pub.publish(speed)
elif gest == "land off":
speed = Twist()
speed.linear.x = 0.0
speed.linear.y = 0.0
speed.linear.z = 0.0
speed.angular.x = 0.0
speed.angular.y = 0.0
speed.angular.z = 0.0
land_pub = rospy.Publisher('/land', Empty, queue_size=1)
empty_msg = Empty()
# pub.publish(speed)
land_pub.publish(empty_msg)
cv2.imshow('Frame', cap)
cv2.waitKey(1)
def angle(v1,v2):
v1_x=v1[0]
v1_y=v1[1]
v2_x=v2[0]
v2_y=v2[1]
try:
angle_= math.degrees(math.acos((v1_x*v2_x+v1_y*v2_y)/(((v1_x**2+v1_y**2)**0.5)*((v2_x**2+v2_y**2)**0.5))))
except:
angle_ =65535.
if angle_ > 180.:
angle_ = 65535.
return angle_
def hand_angle(hand_):
angle_list = []
#---------------------------- thumb 大拇指角度
angle_ = angle(
((int(hand_[0][0])- int(hand_[2][0])),(int(hand_[0][1])-int(hand_[2][1]))),
((int(hand_[3][0])- int(hand_[4][0])),(int(hand_[3][1])- int(hand_[4][1])))
)
angle_list.append(angle_)
#---------------------------- index 食指角度
angle_ = angle(
((int(hand_[0][0])-int(hand_[6][0])),(int(hand_[0][1])- int(hand_[6][1]))),
((int(hand_[7][0])- int(hand_[8][0])),(int(hand_[7][1])- int(hand_[8][1])))
)
angle_list.append(angle_)
#---------------------------- middle 中指角度
angle_ = angle(
((int(hand_[0][0])- int(hand_[10][0])),(int(hand_[0][1])- int(hand_[10][1]))),
((int(hand_[11][0])- int(hand_[12][0])),(int(hand_[11][1])- int(hand_[12][1])))
)
angle_list.append(angle_)
#---------------------------- ring 无名指角度
angle_ = angle(
((int(hand_[0][0])- int(hand_[14][0])),(int(hand_[0][1])- int(hand_[14][1]))),
((int(hand_[15][0])- int(hand_[16][0])),(int(hand_[15][1])- int(hand_[16][1])))
)
angle_list.append(angle_)
#---------------------------- pink 小拇指角度
angle_ = angle(
((int(hand_[0][0])- int(hand_[18][0])),(int(hand_[0][1])- int(hand_[18][1]))),
((int(hand_[19][0])- int(hand_[20][0])),(int(hand_[19][1])- int(hand_[20][1])))
)
angle_list.append(angle_)
return angle_list
def h_gesture(hand_,angle_list):
thr_angle = 65.
thr_angle_thumb = 53.
thr_angle_s = 49.
gesture_str = None
if 65535. not in angle_list:
if (angle_list[0]>thr_angle_thumb) and (angle_list[1]>thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle):
gesture_str = "land off"
elif (angle_list[0]<thr_angle_s) and (angle_list[1]<thr_angle_s) and (angle_list[2]<thr_angle_s) and (angle_list[3]<thr_angle_s) and (angle_list[4]<thr_angle_s):
gesture_str = "stop"
elif (angle_list[0]<thr_angle_s) and (angle_list[1]<thr_angle_s) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle):
if hand_[8][0] > hand_[6][0]:
gesture_str = "left"
elif hand_[8][0] < hand_[6][0]:
gesture_str = "right"
elif (angle_list[0]<thr_angle_s) and (angle_list[1]<thr_angle_s) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]<thr_angle_s):
gesture_str = "turn"
elif (angle_list[0]>5) and (angle_list[1]<thr_angle_s) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle):
if hand_[4][1] > hand_[2][1]:
gesture_str = "down"
elif hand_[4][1] < hand_[2][1]:
gesture_str = "up"
elif (angle_list[0]>thr_angle_thumb) and (angle_list[1]<thr_angle_s) and (angle_list[2]<thr_angle_s) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle):
if hand_[4][1] > hand_[2][1]:
gesture_str = "backward"
elif hand_[4][1] < hand_[2][1]:
gesture_str = "forward"
return gesture_str
def detect(frame):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
#cap = cv2.VideoCapture(-1)
#ret, frame = frame.read()
gesture_str = None
results = hands.process(frame)
#frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(frame, hand_landmarks, mp_hands.HAND_CONNECTIONS)
hand_local = []
for i in range(21):
x = hand_landmarks.landmark[i].x*frame.shape[1]
y = hand_landmarks.landmark[i].y*frame.shape[0]
hand_local.append((x,y))
if hand_local:
angle_list = hand_angle(hand_local)
gesture_str = h_gesture(hand_local,angle_list)
cv2.putText(frame,gesture_str,(0,100),0,1.3,(0,0,255),3)
return frame, gesture_str
if __name__ == '__main__':
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
hands = mp_hands.Hands(
static_image_mode=False,
max_num_hands=1,
min_detection_confidence=0.75,
min_tracking_confidence=0.75)
bridge = CvBridge()
rospy.init_node('gesture', anonymous=True)
sub = rospy.Subscriber('/image_raw', Image, callback)
pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
rospy.spin()
| StarcoderdataPython |
8029223 | <gh_stars>0
class AsyncOperationManager(object):
""" Provides concurrency management for classes that support asynchronous method calls. This class cannot be inherited. """
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return AsyncOperationManager()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
@staticmethod
def CreateOperation(userSuppliedState):
"""
CreateOperation(userSuppliedState: object) -> AsyncOperation
Returns an System.ComponentModel.AsyncOperation for tracking the duration of a particular asynchronous operation.
userSuppliedState: An object used to associate a piece of client state,such as a task ID,with a particular asynchronous operation.
Returns: An System.ComponentModel.AsyncOperation that you can use to track the duration of an asynchronous method invocation.
"""
pass
SynchronizationContext=None
__all__=[
'CreateOperation',
]
| StarcoderdataPython |
8148174 | import re
from typing import Callable, List, Tuple, Union
from urllib.parse import parse_qsl, urlparse, urlsplit, urlunsplit, unquote_plus
import requests
from bs4 import BeautifulSoup
from tld import get_fld
from w3lib.url import url_query_cleaner
__author__ = "<NAME>"
__license__ = "MIT"
__version__ = "0.0.8"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
class NoMoreQS:
"""No more query string"""
headers: dict = {}
def __init__(self,
include_flds: Union[List[str], Tuple[str]] = (),
exclude_flds: Union[List[str], Tuple[str]] = (),
strict: bool = True):
"""
Parameters
----------
include_flds : Union[List[str], Tuple[str]], optional
first-level domains list which are allowed to clean query string,
by default []
exclude_flds : Union[List[str], Tuple[str]], optional
first-level domains which are disallowed to clean query string,
by default []
strict : bool, optional
mode of clean, by default True
"""
self.include_flds = include_flds
self.exclude_flds = exclude_flds
self.strict = strict
def clean(self, url: str, cookies: dict = {}, decode_percent_encode: bool = False) -> str:
"""
clean
Parameters
----------
url : str
Any useable url.
cookies : dict, optional
cookies for request
decode_percent_encode : bool, optional
decoded the percent-encoded
Returns
-------
str
cleaned url, fbclid is always be cleaned.
"""
fld = get_fld(url)
cleaner: Callable = _super_cleaner if self.strict else _fbclid_cleaner
is_allowed_fld = fld in self.exclude_flds
if is_allowed_fld:
cleaner = _fbclid_cleaner
is_not_allowed_fld = fld in self.include_flds
if is_not_allowed_fld:
cleaner = _super_cleaner
cleaned_url = cleaner(url, headers=self.headers, cookies=cookies)
if decode_percent_encode:
return unquote_plus(cleaned_url)
return cleaned_url
@staticmethod
def remove_fbclid(url: str, decode_percent_encode: bool = False) -> str:
"""
remove fbclid
if you affraid the power of super cleaner,
you can just clean the fbclid easily with this method.
Parameters
----------
url : str
Any useable url.
decode_percent_encode : bool, optional
decoded the percent-encoded
Returns
-------
str
cleaned url, fbclid is always be cleaned.
"""
cleaned_url = _fbclid_cleaner(url)
if decode_percent_encode:
return unquote_plus(cleaned_url)
return cleaned_url
def _super_cleaner(url: str, headers: dict = {}, cookies: dict = {}) -> str:
"""
super cleaner
Parameters
----------
url : str
Any useable url.
headers : dict, optional
Optional headers ``request`` takes.
cookeis : dict, optional
Optional cookies ``request takes.
Returns
-------
str
cleaned url, fbclid is always be cleaned.
"""
# reserve frament
fragment = urlparse(url).fragment
url = _fbclid_cleaner(url)
page = _get_page(url, headers, cookies)
if not page:
return url
canonical_url = _get_canonical_url(page)
og_url = _get_og_url(page)
origin_path_len = len(urlparse(url).path)
canonical_path_len = len(urlparse(canonical_url).path)
og_path_len = len(urlparse(og_url).path)
origin_qs_len = count_qs_length(url)
canonical_qs_len = count_qs_length(canonical_url)
og_qs_len = count_qs_length(og_url)
# Order weights: path_len -> qs_len -> -(netloc)
candidate_urls = sorted([
(origin_path_len, origin_qs_len, -len(urlparse(url).netloc), url),
(canonical_path_len, canonical_qs_len, -len(urlparse(canonical_url).netloc), canonical_url),
(og_path_len, og_qs_len, -len(urlparse(og_url).netloc), og_url)
])
for path_len, _, _, the_url in candidate_urls:
if path_len:
if fragment:
url_components = urlsplit(the_url)
url_components = url_components._replace(fragment=fragment)
the_url = urlunsplit(url_components)
return the_url
return url
def _fbclid_cleaner(url: str, **kwargs) -> str:
"""
Clean the fbclid!
Parameters
----------
url : str
Any useable url.
Returns
-------
str
cleaned url, fbclid is always be cleaned.
"""
url = url_query_cleaner(url, ("fbclid"), remove=True, keep_fragments=True)
if url.endswith("#"):
return url[:-1]
return url
def _get_canonical_url(page: BeautifulSoup) -> str:
"""
get canonical url
Parameters
----------
page : BeautifulSoup
BeautiifulSoup object
Returns
-------
str
link[canonical url]
"""
canonical_url = page.select_one("link[rel='canonical']")
if canonical_url:
return _fbclid_cleaner(canonical_url["href"])
return ''
def _get_og_url(page: BeautifulSoup) -> str:
"""
get og:url
Parameters
----------
page : BeautifulSoup
BeautiifulSoup object
Returns
-------
str
meta[og:url]
"""
og_url_selector = "meta[property='og:url']"
og_url = page.select_one(f"head > {og_url_selector}")
if not og_url:
og_url = page.select_one(f"body > {og_url_selector}")
if og_url:
return _fbclid_cleaner(og_url["content"])
return ''
def _get_page(url: str, headers: dict = {}, cookies: dict = {}) -> BeautifulSoup:
"""
Return page as BeautifulSoup object
Parameters
----------
url : str
a useable url
headers : dict, optional
headers, by default {}
cookies : dict, optional
cookies, by default {}
Returns
-------
BeautifulSoup
"""
# if the url is redirected, set final url as url.
response = requests.head(url, allow_redirects=True)
url = response.url
content_type = response.headers["content-type"]
if not re.search("text/html", content_type):
return False
response = requests.get(url, headers=headers, cookies=cookies)
if response.status_code > 400:
return False
page = BeautifulSoup(response.text, "lxml")
return page
def parse_url_qs_to_dict(url: str, as_set=False) -> Union[dict, set]:
"""
Return qs as dict, if no qs return {}
Parameters
----------
url : str
validate url
as_set : bool, optional
use ``set`` as return type
Returns
-------
Union[dict, set]
"""
if not url:
return {}
qs = urlparse(url).query
dict_qs = dict(parse_qsl(qs))
return set(dict_qs) if as_set else dict_qs
def count_qs_length(url: str) -> int:
"""query string counting"""
return len(parse_url_qs_to_dict(url)) if url else 0
def qs_delta(original_url: str, cleaned_url: str) -> set:
"""
query string delta as set
Parameters
----------
original_url : str
cleaned_url : str
Returns
-------
set
set query string delta
"""
original_qs = parse_url_qs_to_dict(original_url, as_set=True)
cleaned_qs = parse_url_qs_to_dict(cleaned_url, as_set=True)
return original_qs - cleaned_qs
| StarcoderdataPython |
5150287 | # Generated by Django 3.2.5 on 2021-07-27 13:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("connector_s3", "0016_alter_object_options"),
]
operations = [
migrations.RemoveField(
model_name="object",
name="parent",
),
migrations.AddField(
model_name="object",
name="s3_dirname",
field=models.TextField(default=""),
preserve_default=False,
),
]
| StarcoderdataPython |
230552 | <filename>Widget testing/KivyMD Buttons/FlatButton.py
from kivy.uix.screenmanager import Screen
from kivymd.app import MDApp
from kivymd.uix.button import MDRectangleFlatButton
class MyApp(MDApp):
def build(self):
screen = Screen()
screen.add_widget(
#Rectangle Flat Button
MDRectangleFlatButton(
text="WithSJ",
pos_hint={"center_x":0.5,"center_y":0.5}
),
)
return screen
if __name__ == "__main__":
MyApp().run() | StarcoderdataPython |
139271 | <filename>bldr/utils.py
import os
import pwd
from pathlib import Path
from pkg_resources import resource_filename
class BLDRError(Exception):
def __init__(self, msg: str, exitcode: int = 1) -> None:
self.msg = msg
self.exitcode = exitcode
def __str__(self) -> str:
return self.msg
class BLDRSetupFailed(BLDRError):
def __init__(self, msg: str, exitcode: int = 1) -> None:
msg = 'Setting up BLDR failed: {}'.format(msg)
super().__init__(msg, exitcode)
def get_resource(path: str) -> Path:
return Path(resource_filename('bldr', str(Path('data', path))))
def escape_docker_image_tag(tag: str) -> str:
return tag.replace(":", "-").replace("/", "-")
def get_home_dir() -> Path:
if 'SUDO_UID' in os.environ and os.geteuid() == 0:
uid = int(os.environ['SUDO_UID'])
else:
uid = os.getuid()
pw_entry = pwd.getpwuid(uid)
home = Path(pw_entry.pw_dir)
return home
def get_config_file_paths():
home_dir = get_home_dir()
config_file_paths = [
Path('/etc/bldr.conf'),
home_dir.joinpath('.config/bldr.conf'),
home_dir.joinpath('.bldr.conf'),
Path('bldr.conf'),
]
return config_file_paths
| StarcoderdataPython |
1791318 | # -*- coding: utf-8 -*-
'''
Created on 2015-8-21
@author: hustcc
'''
from flask.globals import request, session
# get / post data
def get_parameter(key, default=None):
'''
info:获得请求参数,包括get和post,其他类型的访问不管
'''
# post参数
if request.method == 'POST':
param = request.form.get(key, default)
# get
elif request.method == 'GET':
param = request.args.get(key, default)
else:
return default
return param
# login user from session
def get_login_user():
return session.get('u_id', {})
# set user login
def login_user(user):
session['u_id'] = user
# logou user, session pop
def logout():
session.pop('oauth_token', None)
session.pop('u_id', None)
| StarcoderdataPython |
1878685 | <reponame>oom-debugger/GraphZoo-1<gh_stars>1-10
"""Base manifold"""
from torch.nn import Parameter
from typing import Tuple
import torch
class Manifold(object):
"""
Abstract class to define operations on a manifold
"""
def __init__(self):
super().__init__()
self.eps = 10e-8
def sqdist(self, p1, p2, c):
"""Squared distance between pairs of points"""
raise NotImplementedError
def egrad2rgrad(self, p, dp, c):
"""Converts Euclidean Gradient to Riemannian Gradients"""
raise NotImplementedError
def proj(self, p, c):
"""Projects point p on the manifold"""
raise NotImplementedError
def proj_tan(self, u, p, c):
"""Projects u on the tangent space of p"""
raise NotImplementedError
def proj_tan0(self, u, c):
"""Projects u on the tangent space of the origin"""
raise NotImplementedError
def expmap(self, u, p, c):
"""Exponential map of u at point p"""
raise NotImplementedError
def logmap(self, p1, p2, c):
"""Logarithmic map of point p1 at point p2"""
raise NotImplementedError
def expmap0(self, u, c):
"""Exponential map of u at the origin"""
raise NotImplementedError
def logmap0(self, p, c):
"""Logarithmic map of point p at the origin"""
raise NotImplementedError
def mobius_add(self, x, y, c, dim=-1):
"""Adds points x and y"""
raise NotImplementedError
def mobius_matvec(self, m, x, c):
"""Performs hyperboic martrix-vector multiplication"""
raise NotImplementedError
def init_weights(self, w, c, irange=1e-5):
"""Initializes random weigths on the manifold"""
raise NotImplementedError
def inner(self, p, c, u, v=None, keepdim=False):
"""Inner product for tangent vectors at point x"""
raise NotImplementedError
def ptransp(self, x, y, u, c):
"""Parallel transport of u from x to y"""
raise NotImplementedError
def ptransp0(self, x, u, c):
"""Parallel transport of u from the origin to y"""
raise NotImplementedError
def retr(self, x: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
"""
Perform a retraction from point :math:`x` with given direction :math:`u`
Parameters
----------
x : torch.Tensor
point on the manifold
u : torch.Tensor
tangent vector at point :math:`x`
Returns
-------
torch.Tensor
transported point
"""
raise NotImplementedError
def transp(self, x: torch.Tensor, y: torch.Tensor, v: torch.Tensor) -> torch.Tensor:
r"""
Perform vector transport :math:`\mathfrak{T}_{x\to y}(v)`.
Parameters
----------
x : torch.Tensor
start point on the manifold
y : torch.Tensor
target point on the manifold
v : torch.Tensor
tangent vector at point :math:`x`
Returns
-------
torch.Tensor
transported tensor
"""
raise NotImplementedError
def retr_transp(
self, x: torch.Tensor, u: torch.Tensor, v: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Perform a retraction + vector transport at once.
Parameters
----------
x : torch.Tensor
point on the manifold
u : torch.Tensor
tangent vector at point :math:`x`
v : torch.Tensor
tangent vector at point :math:`x` to be transported
Returns
-------
Tuple[torch.Tensor, torch.Tensor]
transported point and vectors
Notes
-----
Sometimes this is a far more optimal way to preform retraction + vector transport
"""
y = self.retr(x, u)
v_transp = self.transp(x, y, v)
return y, v_transp
class ManifoldParameter(Parameter):
"""
Subclass of torch.nn.Parameter for Riemannian optimization.
"""
def __new__(cls, data, requires_grad, manifold, c):
return Parameter.__new__(cls, data, requires_grad)
def __init__(self, data, requires_grad, manifold, c):
self.c = c
self.manifold = manifold
def __repr__(self):
return '{} Parameter containing:\n'.format(self.manifold.name) + super(Parameter, self).__repr__()
| StarcoderdataPython |
6641427 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import os.path
import pickle
import random
import re
import traceback
from spidery.utils.func import write_file, cap_sentence, num_to_alpha
from .device_type import DeviceType
UA_BIN = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'ua.bin')
class Agent:
user_agents = pickle.load(open(UA_BIN, 'rb'))
def __init__(self, **kwargs):
params = {
'name': kwargs.get('name', None),
'brand': kwargs.get('brand', None),
'brand_code': kwargs.get('brand_code', None),
'code': kwargs.get('code', None),
'type': kwargs.get('type', DeviceType.BROWSER),
'category': kwargs.get('category', None),
'engine': kwargs.get('engine', None),
'family': kwargs.get('family', None),
'family_code': kwargs.get('family_code', None),
'family_vendor': kwargs.get('family_vendor', None),
'is_crawler': kwargs.get('is_crawler', False),
}
self._filtered = self._parse_kwargs(**params)
def get_random(self):
return random.choice(self._filtered) if len(self._filtered) else None
def __gen_class__(self):
C_K = {}
C_C = {}
for i, x in enumerate(self.user_agents):
for kk, kv in x.items():
print({type(kv): kv})
C_K[kk] = C_K.get(kk) if kk in C_K.keys() else set()
if type(kv) == str and kv:
C_K[kk].add(kv)
if type(kv) == dict:
for ck, cv in kv.items():
C_C[ck] = C_C.get(ck) if ck in C_C.keys() else set()
if type(cv) == str and cv:
C_C[ck].add(cv)
print(C_C.keys())
write_file('A.txt', str('\n').join(C_K.keys()))
for k, v in C_K.items():
if len(v):
write_file(f'A-{k}.txt', str('\n').join(list(v)))
write_file('B.txt', str('\n').join(C_C.keys()))
for k, v in C_C.items():
if len(v):
write_file(f'B-{k}.txt', str('\n').join(list(v)))
al = ['A.txt', 'B.txt']
for x in al:
print(x)
if os.path.isfile(x):
ls = open(x).read().splitlines()
h = x.rstrip('.txt')
for c in ls:
cx = f'{h}-{c}.txt'
print({cx: os.path.isfile(cx)})
if os.path.isfile(cx):
ad = str(re.sub("[^0-9a-zA-Z]", " ", c, 0, re.IGNORECASE)).capitalize()
ad = str(re.sub("[^0-9a-zA-Z]", "", cap_sentence(ad.strip()), 0, re.IGNORECASE))
an = str(re.sub("[^0-9a-zA-Z]", "_", c, 0, re.IGNORECASE))
fn = f'{str(an).lower()}.py'
ss = open(cx).read().splitlines()
aa = f"""from enum import Enum\n\nclass {ad}(Enum):"""
cuks = set()
for ln in ss:
cuk = str(re.sub("[^0-9a-zA-Z]", "_", ln, 0, re.IGNORECASE)).upper()
if cuk in cuks:
continue
match = re.search(r"^(\d+)([^\n]+)?", cuk, re.IGNORECASE)
if match:
c_a, c_b = match.groups()
mod = str('_').join(num_to_alpha(c_a).split(','))
mods = [mod,
str(re.sub("[^0-9a-zA-Z]", "_", c_b, 0, re.IGNORECASE)).upper()] if c_b else [
mod]
cuk = str('_').join(mods).upper()
cuk = re.sub("(_){1,}", r"\1", cuk, 0, re.IGNORECASE)
aa += f"""\n\t{cuk}='{ln}'"""
cuks.add(cuk)
write_file(fn, aa)
def _parse_kwargs(self, **kwargs):
flag = []
try:
current = self.user_agents
for k, v in kwargs.items():
try:
v = v.value if hasattr(v, 'value') else v
if v is None:
continue
if type(v) == bool:
filtered = []
for x in current:
for vv in x.values():
if type(vv) == dict and k in vv.keys():
if vv[k] == v:
filtered.append(x)
else:
filtered = [x for x in current if
k in x.keys() and x[k] and (
v in x[k].values() if type(x[k]) == dict else x[k] == v)]
current = filtered
except Exception as error:
logging.exception(
''.join(traceback.format_exception(etype=type(error), value=error, tb=error.__traceback__)))
flag = [x.get('ua') for x in current]
except Exception as error:
logging.exception(
''.join(traceback.format_exception(etype=type(error), value=error, tb=error.__traceback__)))
finally:
return flag
if __name__ == '__main__':
ag = Agent()
print(ag.get_random())
print(ag.get_random())
| StarcoderdataPython |
240808 | import torch
import torch.nn as nn
from torchvision import models
def double_conv(in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True)
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, out_channels, stride=1):
super().__init__()
# residual function
self.residual_function = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels * BasicBlock.expansion, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels * BasicBlock.expansion)
)
# shortcut
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != BasicBlock.expansion * out_channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels * BasicBlock.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels * BasicBlock.expansion)
)
def forward(self, x):
return nn.ReLU(inplace=True)(self.residual_function(x) + self.shortcut(x))
class ResNet(nn.Module):
def __init__(self, in_channel, out_channel, block, num_block):
super().__init__()
self.in_channels = 64
self.conv1 = nn.Sequential(
nn.Conv2d(in_channel, 64, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True))
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.conv2_x = self.ayer(block, 64, num_block[0], 2)
self.conv3_x = self.layer(block, 128, num_block[1], 2)
self.conv4_x = self.layer(block, 256, num_block[2], 2)
self.conv5_x = self.layer(block, 512, num_block[3], 2)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.dconv_up3 = double_conv(256 + 512, 256)
self.dconv_up2 = double_conv(128 + 256, 128)
self.dconv_up1 = double_conv(128 + 64, 64)
self.dconv_last = nn.Sequential(
nn.Conv2d(128, 64, 3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(64, out_channel, 1)
)
def make_layer(self, block, out_channels, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_channels, out_channels, stride))
self.in_channels = out_channels * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
conv1 = self.conv1(x)
temp = self.maxpool(conv1)
conv2 = self.conv2_x(temp)
conv3 = self.conv3_x(conv2)
conv4 = self.conv4_x(conv3)
bottle = self.conv5_x(conv4)
x = self.upsample(bottle)
x = torch.cat([x, conv4], dim=1)
x = self.dconv_up3(x)
x = self.upsample(x)
x = torch.cat([x, conv3], dim=1)
x = self.dconv_up2(x)
x = self.upsample(x)
x = torch.cat([x, conv2], dim=1)
x = self.dconv_up1(x)
x = self.upsample(x)
# print(x.shape)
# print(conv1.shape)
x = torch.cat([x, conv1], dim=1)
out = self.dconv_last(x)
def resnet34(in_channel, out_channel):
model = ResNet(in_channel, out_channel, BasicBlock, [3, 4, 6, 3])
return model
| StarcoderdataPython |
9693226 | <gh_stars>1-10
import os
import sys
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
def resource_path(another_way):
try:
usual_way = (
sys._MEIPASS
) # When in .exe, this code is executed, that enters temporary directory that is created automatically during runtime.
except Exception:
usual_way = os.path.dirname(
__file__
) # When the code in run from python console, it runs through this exception.
return os.path.join(usual_way, another_way)
def test_driver1():
options = Options()
options.add_argument("--log-level=3")
options.add_argument("--headless")
options.add_argument(
"User-Agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36"
)
driver = webdriver.Chrome(resource_path("./chrome89.exe"), options=options)
driver.implicitly_wait(5)
| StarcoderdataPython |
94724 | <reponame>digitalinteraction/openmovement-python<filename>src/openmovement/load/base_data.py
"""
Base class for timeseries data loader
"""
from abc import ABC, abstractmethod
class BaseData(ABC):
def __init__(self, filename, verbose=False):
"""
Construct a data object from a file.
:param filename: The path to the source file.
:param verbose: Output more detailed information.
"""
self.filename = filename
self.verbose = verbose
pass
# Nothing to do at start of 'with'
def __enter__(self):
return self
# Close handle at end of 'with'
def __exit__(self, exc_type, exc_value, traceback):
self.close()
# Close handle when destructed
def __del__(self):
self.close()
# Iterate
def __iter__(self):
return iter(self.get_sample_values())
@abstractmethod
def close(self):
pass
@abstractmethod
def get_sample_values(self):
"""
Get the sample values as a single ndarray.
:returns: An ndarray of (time, accel_x, accel_y, accel_z) or (time, accel_x, accel_y, accel_z, gyro_x, gyro_y, gyro_z)
where 'time' is in seconds since the epoch.
"""
pass
@abstractmethod
def get_samples(self, use_datetime64=True):
"""
Return an DataFrame for (time, accel_x, accel_y, accel_z) or (time, accel_x, accel_y, accel_z, gyro_x, gyro_y, gyro_z)
:param use_datetime64: (Default) time is in datetime64[ns]; otherwise in seconds since the epoch.
"""
pass
# Time of first sample (seconds since epoch)
@abstractmethod
def get_start_time(self):
pass
@abstractmethod
def get_sample_rate(self):
pass
# The total number of samples (only an estimate if not all loaded)
@abstractmethod
def get_num_samples(self):
pass
| StarcoderdataPython |
5117362 | from fastai.text.all import *
import torch
from transformers import GPT2TokenizerFast, GPT2LMHeadModel
import pandas as pd
import sys
pretrained_weights = 'gpt2'
tokenizer = GPT2TokenizerFast.from_pretrained(pretrained_weights)
def tokenize(text):
toks = tokenizer.tokenize(text)
return tensor(tokenizer.convert_tokens_to_ids(toks))
class TransformersTokenizer(Transform):
def __init__(self, tokenizer): self.tokenizer = tokenizer
def encodes(self, x):
return x if isinstance(x, Tensor) else tokenize(x)
def decodes(self, x): return TitledStr(self.tokenizer.decode(x.cpu().numpy()))
class DropOutput(Callback):
def after_pred(self):self.learn.pred=self.pred[0]
artists=['kanye','beiber','ariana','drake','miley','21pilots','adele','21savage','backstreet','coldplay','edsheeran','eminem','linkinpark','weeknd','taylor']
def generate(prompt,model):
learn=load_learner('models/{}.pkl'.format(model))
prompt_ids = tokenizer.encode(prompt)
inp = tensor(prompt_ids)[None]#.cuda()
preds=learn.model.generate(inp,
do_sample=True,
max_length=90,
min_length=5,
top_k=40,
num_return_sequences=1)
return tokenizer.decode(preds[0].cpu().tolist()).replace("\"\"","").replace("\n\n","\n")
for i in artists:
print(i)
print(generate("Test ",i).replace("\"\"",""))
print()
| StarcoderdataPython |
10586 | #
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 2}")
f1 = Input("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
b1 = Input("op3", "TENSOR_FLOAT32", "{4}")
pad0 = Int32Scalar("pad0", 0)
act = Int32Scalar("act", 0)
stride = Int32Scalar("stride", 1)
cm = Int32Scalar("channelMultiplier", 2)
output = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
model = model.Operation("DEPTHWISE_CONV_2D",
i1, f1, b1,
pad0, pad0, pad0, pad0,
stride, stride,
cm, act).To(output)
model = model.RelaxedExecution(True)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[10, 21, 10, 22, 10, 23,
10, 24, 10, 25, 10, 26,
10, 27, 10, 28, 10, 29],
f1:
[.25, 0, .2, 0,
.25, 0, 0, .3,
.25, 0, 0, 0,
.25, .1, 0, 0],
b1:
[1, 2, 3, 4]}
# (i1 (conv) f1) + b1
# filter usage:
# in_ch1 * f_1 --> output_d1
# in_ch1 * f_2 --> output_d2
# in_ch2 * f_3 --> output_d3
# in_ch3 * f_4 --> output_d4
output0 = {output: # output 0
[11, 3, 7.2, 10.6,
11, 3, 7.4, 10.9,
11, 3, 7.8, 11.5,
11, 3, 8.0, 11.8]}
# Instantiate an example
Example((input0, output0))
| StarcoderdataPython |
4810257 | <filename>atividade2/util.py
# -*- coding: utf-8 -*-
'''
Metodos a serem usados em mais de uma questao serao colocados aqui
'''
# Definicao de metodos
def truncar(valor):
if(valor < 0.0):
return 0.0
elif(valor > 255.0):
return 255.0
return valor
| StarcoderdataPython |
8169798 | <reponame>harry-consulting/SAEF1
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from model_bakery import baker
from users.models import User, ObjectPermission
from util.test_util import ClientLoginTestCase, ClientLoginDatalakeTestCase
class UserManagerTests(TestCase):
def test_create_user(self):
user = User.objects.create_user(email="<EMAIL>", password="<PASSWORD>")
self.assertEqual(user.email, "<EMAIL>")
self.assertFalse(user.is_active)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
def test_create_superuser(self):
admin_user = User.objects.create_superuser(email="<EMAIL>", password="<PASSWORD>")
self.assertEqual(admin_user.email, "<EMAIL>")
self.assertTrue(admin_user.is_active)
self.assertTrue(admin_user.is_staff)
self.assertTrue(admin_user.is_superuser)
def create_basic_groups():
baker.make("users.Organization", name="SAEF")
all_group = baker.make("users.OrganizationGroup", name="All")
admin_group = baker.make("users.OrganizationGroup", name="Admin", parent=all_group)
it_group = baker.make("users.OrganizationGroup", name="I.T.", parent=all_group)
team_a = baker.make("users.OrganizationGroup", name="Team A", parent=it_group)
return all_group, admin_group, it_group, team_a
class ObjectPermissionTests(ClientLoginTestCase):
def setUp(self):
super(ObjectPermissionTests, self).setUp()
self.all_group, self.admin_group, self.it_group, self.team_a = create_basic_groups()
self.job = baker.make("jobs.Job", name="test", owner=self.user)
self.level_2_perm = ObjectPermission.objects.filter(can_update=True).first()
def test_get_permission_string(self):
self.assertEqual("Can View/Update/Delete/Execute", self.level_2_perm.get_permission_string())
def test_get_object(self):
self.assertEqual(self.job, self.level_2_perm.get_object())
def test_get_permitted_groups(self):
self.all_group.object_permissions.add(self.level_2_perm)
self.it_group.object_permissions.add(self.level_2_perm)
# Note that the "Admin" groups gets the permissions by default.
self.assertEqual(["Admin", "All", "I.T."], self.level_2_perm.get_permitted_groups())
class OrganizationGroupTests(ClientLoginTestCase):
def setUp(self):
super(OrganizationGroupTests, self).setUp()
self.all_group, self.admin_group, self.it_group, self.team_a = create_basic_groups()
def test_get_ancestors(self):
self.assertCountEqual([self.it_group, self.all_group], self.team_a.get_ancestors())
def test_get_all_children(self):
self.assertCountEqual([self.admin_group, self.it_group, self.team_a], self.all_group.get_all_children())
def test_get_members(self):
"""Members of a group should include the members of the child groups."""
user_2 = baker.make("users.User")
self.user.organization_groups.add(self.it_group)
user_2.organization_groups.add(self.team_a)
self.assertCountEqual([self.user, user_2], self.it_group.get_members())
def test_get_permissions(self):
"""Permissions of a group should include the permissions of the groups hierarchically above the group."""
baker.make("jobs.Job", name="test", owner=self.user)
self.all_group.object_permissions.set(ObjectPermission.objects.all())
self.assertCountEqual(list(ObjectPermission.objects.all()), self.it_group.get_permissions())
class UserTests(ClientLoginDatalakeTestCase):
def setUp(self):
super(UserTests, self).setUp()
self.all_group, self.admin_group, self.it_group, self.team_a = create_basic_groups()
self.user.organization_groups.add(self.all_group)
self.user.organization_groups.add(self.it_group)
def set_permissions(self, user, group):
job_1 = baker.make("jobs.Job", name="test 1", owner=user)
job_2 = baker.make("jobs.Job", name="test 2", owner=user)
group.object_permissions.set(ObjectPermission.objects.filter(object_id=job_1.id))
user.object_permissions.set(ObjectPermission.objects.filter(object_id=job_2.id))
return job_1, job_2
def create_objects(self, user):
"""Create a connection, dataset and job object and return the created level 2 permissions that the owner has."""
connection = baker.make("datasets.Connection", name="test", owner=user, type="POSTGRES")
dataset = baker.make("datasets.Dataset", name="test", owner=user, type="TABLE")
job = baker.make("jobs.Job", name="test", owner=user)
connection_perm = ObjectPermission.objects.get(content_type=ContentType.objects.get(model="connection"),
object_id=connection.id, can_update=True)
dataset_perm = ObjectPermission.objects.get(content_type=ContentType.objects.get(model="dataset"),
object_id=dataset.id, can_update=True)
job_perm = ObjectPermission.objects.get(content_type=ContentType.objects.get(model="job"),
object_id=job.id, can_update=True)
return connection_perm, dataset_perm, job_perm
def test_get_group_names(self):
self.assertCountEqual(["All", "I.T."], sorted(self.user.get_group_names()))
def test_get_group_object_permissions(self):
"""Should only include the permissions of the users groups and not the users own permissions."""
job_1, job_2 = self.set_permissions(self.user, self.it_group)
self.assertCountEqual(list(ObjectPermission.objects.filter(object_id=job_1.id)),
self.user.get_group_object_permissions())
def test_get_all_object_permissions(self):
self.set_permissions(self.user, self.it_group)
self.assertCountEqual(list(ObjectPermission.objects.all()), self.user.get_all_object_permissions())
def test_has_permission(self):
job_1, job_2 = self.set_permissions(self.user, self.it_group)
job_3 = baker.make("jobs.Job", name="test")
self.assertFalse(self.user.has_permission("update_job", job_3))
self.assertTrue(self.user.has_permission("update_job", job_1))
def test_get_grouped_permissions(self):
connection_perm, dataset_perm, job_perm = self.create_objects(self.user)
expected_grouped_permissions = {"connection (level 2)": [connection_perm], "dataset (level 2)": [dataset_perm],
"job (level 2)": [job_perm]}
self.assertEqual(expected_grouped_permissions, self.user.get_grouped_permissions())
def test_get_grouped_permission_ids(self):
connection_perm, dataset_perm, job_perm = self.create_objects(self.user)
expected_grouped_permission_ids = {"connection": {"level_2": [connection_perm.object_id]},
"dataset": {"level_2": [dataset_perm.object_id]},
"job": {"level_2": [job_perm.object_id]}}
self.assertEqual(expected_grouped_permission_ids, self.user.get_grouped_permission_ids())
| StarcoderdataPython |
3331156 | <reponame>fyancy/Meta-Learning-in-Fault-Diagnosis
"""
Relation Networks programmed by <NAME>. (2021/8/30)
"""
import torch
import numpy as np
import learn2learn as l2l
import visdom
import os
import time
from Models.RelationNet.relation_model import encoder_net, relation_net
from Datasets.cwru_data import MAML_Dataset
from my_utils.train_utils import accuracy
from my_utils.init_utils import weights_init2
vis = visdom.Visdom(env='yancy_meta')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class RelationNet_learner(object):
def __init__(self, ways):
self.feature = encoder_net(in_chn=1, hidden_chn=64, cb_num=4).to(device)
embed_size = (1024//2**4)//2**2*64
self.relation = relation_net(hidden_chn=64, embed_size=embed_size, h_size=256).to(device)
self.ways = ways
def fast_adapt(self, batch, loss_fun, query_num, shots, ways):
data, labels = batch
data, labels = data.to(device), labels.to(device)
# Separate data into adaptation/evaluation sets
sort = torch.sort(labels)
data = data.squeeze(0)[sort.indices].squeeze(0)
labels = labels.squeeze(0)[sort.indices].squeeze(0)
support_indices = np.zeros(data.size(0), dtype=bool)
# print(data.size(0))
selection = np.arange(ways) * (shots + query_num) # 0, shot+q, 2*(shot+q), 3*(), ...
for offset in range(shots):
support_indices[selection + offset] = True # 0:shots, (shot+q):(shot+q+shots), ...
query_indices = torch.from_numpy(~support_indices) # shots:2*shots, (shot+q+shots):4*shots, ...
support_indices = torch.from_numpy(support_indices) # 0:shots, (shot+q):(shot+q+shots), ...
embeddings = self.feature(data)
support = embeddings[support_indices] # (n_support, chn, length)
query = embeddings[query_indices] # (n_query, chn, length)
labels = labels[query_indices].long() # (n_query)
support = support.reshape(ways, shots, *support.shape[-2:]).mean(dim=1) # (ways, chn, length)
support = support.unsqueeze(0).repeat(query.shape[0], 1, 1, 1) # (n_q, ways, chn, length)
query = query.unsqueeze(1).repeat(1, ways, 1, 1) # (n_q, ways, chn, length)
relation_pairs = torch.cat((support, query), 2).reshape(query.shape[0]*ways, -1, query.shape[-1])
scores = self.relation(relation_pairs).reshape(-1, ways) # (n_q, ways)
# print(scores.shape)
# exit()
error = loss_fun(scores, labels)
acc = accuracy(scores, labels)
return error, acc
@staticmethod
def build_tasks(mode='train', ways=10, shots=5, num_tasks=100, filter_labels=None):
dataset = l2l.data.MetaDataset(MAML_Dataset(mode=mode, ways=ways))
new_ways = len(filter_labels) if filter_labels is not None else ways
# label_shuffle_per_task = False if ways <=30 else True
assert shots * 2 * new_ways <= dataset.__len__() // ways * new_ways, "Reduce the number of shots!"
tasks = l2l.data.TaskDataset(dataset, task_transforms=[
l2l.data.transforms.FusedNWaysKShots(dataset, new_ways, 2 * shots, filter_labels=filter_labels),
l2l.data.transforms.LoadData(dataset),
# l2l.data.transforms.RemapLabels(dataset, shuffle=label_shuffle_per_task),
l2l.data.transforms.RemapLabels(dataset, shuffle=True),
# do not keep the original labels, use (0 ,..., n-1);
# if shuffle=True, to shuffle labels at each task.
l2l.data.transforms.ConsecutiveLabels(dataset),
# re-order samples and make their original labels as (0 ,..., n-1).
], num_tasks=num_tasks)
return tasks
def model_save(self, path):
filename = path+'(1)' if os.path.exists(path) else path
state_dict = {
'feature': self.feature.state_dict(),
'relation': self.relation.state_dict(),
}
torch.save(state_dict, filename)
print(f'Save model at: {filename}')
def train(self, save_path, shots):
train_ways = valid_ways = self.ways
query_num = shots
print(f"{train_ways}-ways, {shots}-shots for training ...")
train_tasks = self.build_tasks('train', train_ways, shots, 1000, None)
valid_tasks = self.build_tasks('validation', valid_ways, shots, 50, None)
# valid_loader = DataLoader(valid_tasks, pin_memory=True, shuffle=True)
self.feature.apply(weights_init2)
self.relation.apply(weights_init2)
# optimizer_f = torch.optim.Adam(self.feature.parameters(), lr=0.0005, weight_decay=2e-5) # 0.8568
# optimizer_r = torch.optim.Adam(self.relation.parameters(), lr=0.005, weight_decay=2e-5)
optimizer_f = torch.optim.Adam(self.feature.parameters(), lr=0.0001, weight_decay=2e-5) # 0.9160
optimizer_r = torch.optim.Adam(self.relation.parameters(), lr=0.001, weight_decay=2e-5)
# lr_scheduler_f = torch.optim.lr_scheduler.ExponentialLR(optimizer_f, gamma=0.99)
lr_scheduler_r = torch.optim.lr_scheduler.ExponentialLR(optimizer_r, gamma=0.99)
loss_fun = torch.nn.CrossEntropyLoss()
Epochs = 10000
Episodes = 40
counter = 0
for ep in range(Epochs):
# 1) training:
t0 = time.time()
self.feature.train(), self.relation.train()
meta_train_error = 0.0
meta_train_accuracy = 0.0
for epi in range(Episodes):
batch = train_tasks.sample()
loss, acc = self.fast_adapt(batch, loss_fun, query_num, shots, train_ways)
meta_train_error += loss.item()
meta_train_accuracy += acc.item()
optimizer_f.zero_grad()
optimizer_r.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.feature.parameters(), 0.5)
torch.nn.utils.clip_grad_norm_(self.relation.parameters(), 0.5)
optimizer_f.step()
optimizer_r.step()
# lr_scheduler_f.step()
lr_scheduler_r.step()
t1 = time.time()
print(f'*** Time /epoch {t1-t0:.3f} ***')
print(f'epoch {ep+1}, train, loss: {meta_train_error/Episodes:.3f}, '
f'acc: {meta_train_accuracy/Episodes:.3f}')
# 2) validation:
self.feature.eval(), self.relation.eval()
meta_valid_error = 0.0
meta_valid_accuracy = 0.0
for i, batch in enumerate(valid_tasks):
with torch.no_grad():
loss, acc = self.fast_adapt(batch, loss_fun, query_num, shots, train_ways)
meta_valid_error += loss.item()
meta_valid_accuracy += acc.item()
print(f'epoch {ep + 1}, validation, loss: {meta_valid_error / len(valid_tasks):.4f}, '
f'acc: {meta_valid_accuracy / len(valid_tasks):.4f}\n')
vis.line(Y=[[meta_train_error / Episodes, meta_valid_error / len(valid_tasks)]], X=[counter],
update=None if counter == 0 else 'append', win='Loss_RelationNet',
opts=dict(legend=['train', 'val'], title='Loss_RelationNet'))
vis.line(Y=[[meta_train_accuracy / Episodes, meta_valid_accuracy / len(valid_tasks)]], X=[counter],
update=None if counter == 0 else 'append', win='Acc_RelationNet',
opts=dict(legend=['train', 'val'], title='Acc_RelationNet'))
counter += 1
if (ep+1) >=200 and (ep+1)%2==0: # generally (ep+1) >=200
if input('\n== Stop training? == (y/n)\n').lower() == 'y':
new_save_path = save_path + rf'_ep{ep + 1}'
self.model_save(new_save_path)
break
elif input('\n== Save model? == (y/n)\n').lower() == 'y':
new_save_path = save_path + rf'_ep{ep + 1}'
self.model_save(new_save_path)
def test(self, load_path, shots):
state_dict = torch.load(load_path)
self.feature.load_state_dict(state_dict['feature'])
self.relation.load_state_dict(state_dict['relation'])
print(f'Load Model successfully from [{load_path}]...')
test_ways = self.ways
query_num = shots
print(f"{test_ways}-ways, {shots}-shots for testing ...")
test_tasks = self.build_tasks('test', test_ways, shots, 1000, None)
loss_fun = torch.nn.CrossEntropyLoss()
# 2) validation:
self.feature.eval(), self.relation.eval()
meta_valid_error = 0.0
meta_valid_accuracy = 0.0
t0 = time.time()
for i, batch in enumerate(test_tasks):
with torch.no_grad():
loss, acc = self.fast_adapt(batch, loss_fun, query_num, shots, test_ways)
meta_valid_error += loss.item()
meta_valid_accuracy += acc.item()
t1 = time.time()
print(f"*** Time for {len(test_tasks)} tasks: {t1 - t0:.4f} (s)")
print(f'Testing, loss: {meta_valid_error / len(test_tasks):.4f}, '
f'acc: {meta_valid_accuracy / len(test_tasks):.4f}')
if __name__ == "__main__":
from my_utils.init_utils import seed_torch
seed_torch(2021)
# Net = RelationNet_learner(ways=10) # T1
Net = RelationNet_learner(ways=4) # T2
if input('Train? y/n\n').lower() == 'y':
# path = r"G:\model_save\meta_learning\RelationNet\5shot\RelationNet_C30"
# Net.train(save_path=path, shots=5)
# path = r"G:\model_save\meta_learning\RelationNet\1shot\RelationNet_C30"
# Net.train(save_path=path, shots=1)
# path = r"G:\model_save\meta_learning\RelationNet\5shot\RelationNet_T2"
# Net.train(save_path=path, shots=5)
path = r"G:\model_save\meta_learning\RelationNet\1shot\RelationNet_T2"
Net.train(save_path=path, shots=1)
if input('Test? y/n\n').lower() == 'y':
# load_path = r"G:\model_save\meta_learning\RelationNet\5shot\RelationNet_C30_ep200"
# Net.test(load_path, shots=5)
# load_path = r"G:\model_save\meta_learning\RelationNet\1shot\RelationNet_C30_ep252"
# Net.test(load_path, shots=1)
# load_path = r"G:\model_save\meta_learning\RelationNet\5shot\RelationNet_T2_ep394"
# Net.test(load_path, shots=5)
load_path = r"G:\model_save\meta_learning\RelationNet\1shot\RelationNet_T2_ep284"
Net.test(load_path, shots=1)
| StarcoderdataPython |
4937767 | # -*- coding: utf-8 -*-
# !/usr/bin/env python3
class ImportError(Exception):
def __init__(self, fails):
super(ImportError, self).__init__("You need to install the following packages:" + str(fails) + "\n" + " " * 24 + "$ pip3 install <package>")
class UnsupportedHashingAlgorythm(Exception):
def __init__(self, function, algorythm, supported):
super(ImportError, self).__init__("The", function, "do not support the", algorythm, ".\nSupported algorythms:", supported)
class StrangeError(Exception):
def __init__(self, function, e):
super(ImportError, self).__init__(function, "- An undefined error occured!\n", e)
| StarcoderdataPython |
117065 | <filename>cloud2/test.py
from multiprocessing import Queue
import cv2
if __name__ == '__main__':
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
frame = cv2.rotate(frame, cv2.ROTATE_180)
cv2.imshow('image', frame)
cv2.waitKey(0)
# q = Queue(maxsize=1)
# print('Add first message')
# q.put([1, 2])
# # print('Add second message')
# # q.put([3, 4])
#
# print('Getting message')
# a = q.get()
# print('A', a)
| StarcoderdataPython |
1974941 | # coding: utf-8
"""Logging tools, built upon those from the logging standard library."""
import logging
import functools
import inspect
import os
from yaptools import check_type_validity
LOGGING_LEVELS = {
'debug': logging.DEBUG, 'info': logging.INFO,
'warning': logging.WARNING, 'warn': logging.WARN,
'error': logging.ERROR, 'fatal': logging.FATAL,
'critical': logging.CRITICAL
}
class Logger(logging.Logger):
"""Class adding a handler builder to logging.Logger."""
def __init__(self, name=None, folder='.', level=1, config=None):
"""Initialize the logger and set up its handlers.
name : optional logger name (str, default None)
folder : reference folder to write log files to (default '.')
level : minimum and default logging level (int, default 1)
config : optional list of dict specifying handlers' configuration
(see below for details -- default: stream handler)
Each dict in handlers may contain the following keys:
kind : kind of handler (required, either 'stream', 'file' or 'queue')
path : file path ('file' kind) or queue object ('queue' kind)
levels : optional list of record levels (int or str) to accept
"""
# Check basic arguments validity and primarily setup the logger.
check_type_validity(name, (str, type(None)), 'name')
check_type_validity(level, int, 'level')
level = max(0, level)
super().__init__(name, level)
# Set up and assign the folder attribute.
if not os.path.exists(folder):
os.makedirs(folder)
elif not os.path.isdir(folder):
raise ValueError("'%s' is not a folder." % folder)
self.folder = os.path.abspath(folder)
# Set up and assign the config attribute.
check_type_validity(config, (list, type(None)), 'config')
if config is None:
config = [{'kind': 'stream'}]
self.handlers_config = config
# Set up the logger's handlers based on its handlers_config attribute.
self._levels = LOGGING_LEVELS.copy()
self._levels['default'] = self.level
if not self.hasHandlers():
self.setup()
def setup(self):
"""Set up the logger's handlers."""
for config in self.handlers_config:
handler = self.build_handler(**config)
self.addHandler(handler)
def build_handler(self, kind, path=None, levels=None):
"""Return a logging handler of given configuration.
kind : kind of handler, either 'stream', 'file' or 'queue'
path : None, file path or queue (depending on kind)
levels : optional restrictive list of record levels to accept
"""
# Set up the handler.
if kind == 'stream':
handler = logging.StreamHandler()
elif kind == 'file':
assert path is not None, 'FileHandler requires a path.'
handler = logging.FileHandler(path, mode='a')
elif kind == 'queue':
assert hasattr(path, 'put_nowait'), 'invalid Queue to handle.'
handler = logging.handlers.QueueHandler(path)
else:
raise ValueError('Invalid handler kind: "%s"' % kind)
# Add formatter and optional levels filter.
if levels is not None:
levels_filter = LevelsFilter(levels)
handler.addFilter(levels_filter)
handler.setFormatter(logging.Formatter(
'%(asctime)s :{} %(message)s'.format(
'' if self.name == '' else ' %s :' % self.name
)
))
return handler
def log(self, level, msg, *args, **kwargs):
"""Log 'msg' with the severity 'level' (int or str).
See logging.log documentation for additional options.
"""
if isinstance(level, str):
level = self._levels.get(level, self.level)
elif not isinstance(level, int):
warning = (
"Invalid 'level' argument passed to 'log': %s (type %s)."
% (level, type(level).__name__)
)
super().log(logging.WARNING, warning)
level = self.level
super().log(level, msg, *args, **kwargs)
class LoggedObject:
"""Abstract class implementing logging methods.
A class inheriting from this one will (and must) be dotted with
a yaptools.logger.Logger 'logger' attribute which can be used
through the shortcut method 'log' and whose default settings
can be set by overriding the '_default_logger' property.
The 'logger' attribute will also be made serializable, meaning
that multiprocessed instances of inheriting classes will be able
to commonly log through it.
Usage:
>>> class Foo(LoggedObject):
... def __init__(self, x, logger=None):
... self.x = x
... super().__init__(logger)
... @property
... def _default_logger(self): # Overriding this method is optional.
... # optionally define a name, folder, level and/or configuration
... return Logger(name, folder, level, config)
...
>>> foo = Foo(42)
>>> foo.log(foo.x, level='info')
<asctime> : <foo.logger name> : 42
"""
def __init__(self, logger=None):
"""Initialize the logger attribute."""
if logger is None:
self.logger = self._default_logger
elif isinstance(logger, Logger):
self.logger = logger
else:
raise TypeError("Invalid 'logger' type: %s." % type(logger))
@property
def _default_logger(self):
"""Return a default logger. Meant to be overriden."""
return Logger()
def __getstate__(self):
"""On pickling, remove the logger system."""
statedict = self.__dict__.copy()
statedict['_logger_name'] = self.logger.name
statedict['_logger_folder'] = self.logger.folder
statedict['_logger_config'] = self.logger.handlers_config.copy()
del statedict['logger']
return statedict
def __setstate__(self, statedict):
"""On unpickling, restore the logger system."""
self.logger = Logger(
statedict['_logger_name'], statedict['_logger_folder'],
statedict['_logger_config']
)
del statedict['_logger_name']
del statedict['_logger_folder']
del statedict['_logger_config']
self.__dict__.update(statedict)
def log(self, msg, level='default'):
"""Log a given `msg` string with the severity `level` (str or int)."""
self.logger.log(level, msg)
def log_exception(self, exception, level='error'):
"""Log a given exception with a given severity level."""
msg = '%s: %s.' % (type(exception), ';'.join(map(str, exception.args)))
self.log(msg, level)
def loggedmethod(method):
"""Decorator for LoggedObject methods, ensuring exceptions logging.
Whenever an exception is raised by a method this function decorates,
its details are logged through the object's `log` method at 'error'
level before it is raised again.
This is useful when unexpected exceptions may be raised in a context
where they will not interrupt execution but need to be notified.
Usage:
>>> class Foo(LoggedObject):
... @loggedmethod
... def bar(self, x):
... if not isinstance(x, str);
... raise TypeError('Expected "x" to be a str.')
... self.log(x)
...
>>> foo = Foo()
>>> foo.bar('Some string.')
<asctime> : Some string.
>>> foo.bar(42)
<asctime> : TypeError at `Foo.bar`: Expected "x" to be a str.
TypeError: Expected "x" to be a str.
"""
if 'self' not in inspect.signature(method).parameters.keys():
raise RuntimeError(
"Attempt at decorating a function with no 'self' argument "
+ "using '@logged_method'."
)
@functools.wraps(method)
def logged_method(self, *args, **kwargs):
"""Wrapped method ensuring exceptions logging before raising."""
try:
return method(self, *args, **kwargs)
except Exception as exception:
method_name = getattr(method, '__qualname__', method.__name__)
msg = "%s at '%s': %s" % (
type(exception).__name__, method_name,
';'.join(map(str, exception.args))
)
self.log(msg=msg, level='error')
raise exception
return logged_method
class LevelsFilter(logging.Filter):
"""Logging filter setting an exhaustive set of passing levels."""
def __init__(self, levels):
"""Set up levels to filter out, provided as a list of int or str."""
super().__init__()
if isinstance(levels, (int, str)):
levels = [levels]
check_type_validity(levels, list, 'levels')
self.levels = [self.format_level(level) for level in levels]
@staticmethod
def format_level(level):
"""Return the integer value of a given logging level (int or str)."""
if isinstance(level, int):
return level
elif isinstance(level, str):
if level in LOGGING_LEVELS.keys():
return LOGGING_LEVELS[level]
else:
raise KeyError("Invalid level name: '%s'." % level)
else:
raise TypeError(
"Expected 'level' to be of type int or str, not %s."
% type(level).__name__
)
def filter(self, record):
"""Return whether a given record passes the filter or not."""
return record.levelno in self.levels
| StarcoderdataPython |
1616605 | <reponame>MatteoRomiti/Lightning-Network-Deanonymization<filename>src/load_data.py
# This script loads data to be imported from other scripts
import pandas as pd
import time
from utils import read_json, level1_folder, level2_folder, results_folder
from sort_mapping_entities import star_file, snake_file, collector_file, proxy_file
input_file_1 = level2_folder + 'channel.csv'
input_file_2 = level2_folder + 'node.csv'
input_file_3 = level2_folder + 'ip_address.csv'
input_file_4 = level1_folder + 'funding_txs.json'
input_file_5 = level1_folder + 'funding_address_entity.json'
input_file_6 = level1_folder + 'funded_address_settlement_txs.json'
input_file_7 = level1_folder + 'settlement_address_entity.json'
input_file_8 = level1_folder + 'settlement_addresses.json'
input_file_9 = level1_folder + 'settlement_txs.json'
input_file_10 = star_file
input_file_11 = snake_file
input_file_12 = collector_file
input_file_12b = proxy_file
input_file_13 = results_folder + 'star_sorted_mapping.json'
input_file_14 = results_folder + 'snake_sorted_mapping.json'
input_file_15 = results_folder + 'collector_sorted_mapping.json'
input_file_15b = results_folder + 'proxy_sorted_mapping.json'
def df_to_dicts_set(df, invert=False):
node_x = dict()
x_node = dict()
for el in df.values:
el0 = el[0]
el1 = el[1]
if invert:
el0 = el[1]
el1 = el[0]
if el0 not in node_x:
node_x[el0] = set()
node_x[el0].add(el1)
if el1 not in x_node:
x_node[el1] = set()
x_node[el1].add(el0)
return node_x, x_node
def create_mapping(components_df, component_sorted_mapping):
component_entities = dict()
for e in components_df.values:
component = int(e[0])
entity = int(e[1])
sorted_id = int(component_sorted_mapping[component])
if sorted_id not in component_entities:
component_entities[sorted_id] = []
component_entities[sorted_id].append(entity)
return component_entities
def invert_mapping_component(component_entities):
entity_component = dict()
for component, entities in component_entities.items():
for e in entities:
entity_component[e] = component
return entity_component
def replace_ids(mapping_dict, address_entity):
for address, entity in address_entity.items():
if entity in mapping_dict:
# use negative values to avoid collision with standard IDs
address_entity[address] = - mapping_dict[entity]
return address_entity
def mapping(csv_file, json_file, fae, sae):
# modify address-entity mapping: entities->components (negative IDs)
components_df = pd.read_csv(csv_file)
component_sorted_mapping = read_json(json_file, True)
# mapping
component_entities = create_mapping(components_df, component_sorted_mapping)
entity_component = invert_mapping_component(component_entities)
fae = replace_ids(entity_component, fae)
sae = replace_ids(entity_component, sae)
return fae, sae
def use_stars(fae, sae):
print('use_stars')
return mapping(input_file_10, input_file_13, fae, sae)
def use_snakes(fae, sae):
print('use_snakes')
return mapping(input_file_11, input_file_14, fae, sae)
def use_collectors(fae, sae):
print('use_collectors')
return mapping(input_file_12, input_file_15, fae, sae)
def use_proxies(fae, sae):
print('use_proxies')
return mapping(input_file_12b, input_file_15b, fae, sae)
def set_mapping(fae, sae, och):
if och['stars']:
fae, sae = use_stars(fae, sae)
if och['snakes']:
fae, sae = use_snakes(fae, sae)
# if och['collectors']:
# fae, sae = use_collectors(fae, sae)
if och['proxies']:
fae, sae = use_proxies(fae, sae)
return fae, sae
############### LN Data ###############
channels = pd.read_csv(input_file_1)
nodes = pd.read_csv(input_file_2)
ip_addresses = pd.read_csv(input_file_3)
node_channels = dict()
for channel in channels.values:
c, n1, n2 = channel
if n1 not in node_channels:
node_channels[n1] = set()
node_channels[n1].add(c)
if n2 not in node_channels:
node_channels[n2] = set()
node_channels[n2].add(c)
node_alias, alias_node = df_to_dicts_set(nodes)
node_ip, ip_node = df_to_dicts_set(ip_addresses)
############### BTC Data ###############
funding_txs = read_json(input_file_4)
funding_address_entity = read_json(input_file_5)
funded_address_settlement_txs = read_json(input_file_6)
settlement_address_entity = read_json(input_file_7)
settlement_addresses = read_json(input_file_8)
settlement_txs_list = read_json(input_file_9)
settlement_txs_hashes = [el['tx_hash'] for el in settlement_txs_list]
# Nodes on-chain activity
# for each node, create a list of timestamps of
# openings, closings and first/last_activity
node_openings_closings = dict()
for node, chnls in node_channels.items():
node_openings_closings[node] = {'openings': [], 'closings': []}
for chnl in chnls:
tx_hsh, out_index = chnl.split(':')
t_open = funding_txs[tx_hsh]['timestamp']
node_openings_closings[node]['openings'].append(t_open)
t_closed = 0
funded_address = funding_txs[tx_hsh]['outputs'][int(out_index)][
'address']
stxs = funded_address_settlement_txs[funded_address]
if stxs:
t_closed = stxs[0]['timestamp']
node_openings_closings[node]['closings'].append(t_closed)
node_openings_closings[node]['first_activity'] = min(
node_openings_closings[node]['openings'])
node_openings_closings[node]['last_activity'] = max(
max(node_openings_closings[node]['openings']),
max(node_openings_closings[node]['closings']))
if min(node_openings_closings[node]['closings']) == 0:
# still open -> now
node_openings_closings[node]['last_activity'] = int(time.time())
| StarcoderdataPython |
6563763 | from ... import microbuild
# Uses @microbuild.task form instead of @microbuild.task() form.
@microbuild.task
def clean():
pass
| StarcoderdataPython |
3253747 | #!packages/bin/python3
import sys
import os
import pickle
sys.path.append('scripts/')
from arduino_comms import Database, Monitor
from comms_emulate import EmuSystem
# EDIT ARDUINO PORT VALUE FOUND ON RPI
PORT = '/dev/ttyACM0'
# --------------------------- SETUP SYSTEM ----------------------------------- #
if __name__ == '__main__':
# If built on Rpi, create RPi monitor class
# Failed: Connect to simulator class
try:
mon = Monitor(PORT)
except:
mon = EmuSystem()
# Create base objects for the system
# & fill database with null values
pickle.dump([], open('dbBuffer.p', 'wb'))
db = Database('datadb')
db.createTable()
db.insertData([0,0,0,0,0,0,0,0,0,0])
pickle.dump(mon, open('monitor.p', 'wb'))
pickle.dump([], open('noteBuffer.p', 'wb'))
| StarcoderdataPython |
3412563 | <gh_stars>1-10
# this file is an adaptation from the work at mozilla deepspeech github.com/mozilla/DeepSpeech
import itertools
import kenlm
from heapq import heapify
from os.path import abspath, exists
import numpy as np
from pattern3.metrics import levenshtein
from util.ctc_util import get_alphabet
# the LER is just the Levenshtein/edit distance
ler = levenshtein
def wer(ground_truth, prediction):
"""
Calculates the Word Error Rate (WER) between two strings as the edit distance on word level
"""
return ler(ground_truth.split(), prediction.split())
def ler_norm(ground_truth, prediction):
"""
Calculates the normalized LER by dividing the LER by the length of the longer string. The result will be in [0,1]
"""
return ler(ground_truth, prediction) / float(max(len(ground_truth), 1))
def wer_norm(ground_truth, prediction):
"""
Calculates the normalized WER by dividing the WER by the length of the ground truth
"""
return wer(ground_truth, prediction) / float(len(ground_truth.split()))
def wers(ground_truths, predictions):
assert len(ground_truths) > 0, f'ERROR: no ground truths provided!'
assert len(ground_truths) == len(predictions), f'ERROR: # of ground truths does not match # of predictions!'
return np.array([wer_norm(ground_truth, pred) for (ground_truth, pred) in zip(ground_truths, predictions)])
def lers(ground_truths, predictions):
assert len(ground_truths) > 0, f'ERROR: no ground truths provided!'
assert len(ground_truths) == len(predictions), f'ERROR: # of ground truths does not match # of predictions!'
return np.array([ler(ground_truth, pred) for (ground_truth, pred) in zip(ground_truths, predictions)])
def lers_norm(ground_truths, predictions):
assert len(ground_truths) > 0, f'ERROR: no ground truths provided!'
assert len(ground_truths) == len(predictions), f'ERROR: # of ground truths does not match # of predictions!'
return np.array([ler_norm(ground_truth, pred) for (ground_truth, pred) in zip(ground_truths, predictions)])
def load_lm(lm_path):
global LANGUAGE_MODELS
if lm_path in LANGUAGE_MODELS:
lm = LANGUAGE_MODELS[lm_path]
print(f'using cached LM ({lm.order}-gram):')
return lm
lm_abs_path = abspath(lm_path)
if not exists(lm_abs_path):
raise ValueError(f'ERROR: LM not found at {lm_abs_path}')
print(f'loading LM from {lm_abs_path}...', end='')
lm = kenlm.Model(lm_abs_path)
print(f'done! Loaded {lm.order}-gram model.')
LANGUAGE_MODELS[lm_path] = lm
return lm
def load_vocab(vocab_path):
global LM_VOCABS
if vocab_path in LM_VOCABS:
lm_vocab = LM_VOCABS[vocab_path]
print(f'using cached LM vocab ({len(lm_vocab)} words)')
return lm_vocab
lm_vocab_abs_path = abspath(vocab_path)
if not exists(lm_vocab_abs_path):
raise ValueError(f'ERROR: LM vocabulary not found at {lm_vocab_abs_path}')
with open(lm_vocab_abs_path) as vocab_f:
print(f'loading LM vocab from {lm_vocab_abs_path}...', end='')
lm_vocab = vocab_f.read().split()
print(f'done! Loaded {len(lm_vocab)} words.')
LM_VOCABS[vocab_path] = lm_vocab
return lm_vocab
def score(word_list, lm):
"""
Use LM to calculate a log10-based probability for a given sentence (as a list of words)
:param word_list:
:return:
"""
return lm.score(' '.join(word_list), bos=False, eos=False)
def correction(sentence, language, lm=None, vocab=None):
"""
Get most probable spelling correction for a given sentence.
:param sentence:
:param language: the language of the sentence
:param lm: n-gram LM to use to score sentence
:param vocab: vocabulary of LM to use for spell checking
:return:
"""
assert language in ['en', 'de', 'fr', 'it', 'es'], 'language must be one of [\'en\', \'de\']'
if not lm or not vocab:
return sentence
alphabet = get_alphabet(language)
beam_width = 1024
layer = [(0, [])] # list of (score, 2-gram)-pairs
for word in sentence.split():
layer = [(-score(node + [word_c], lm), node + [word_c])
for word_c in candidate_words(word, vocab, alphabet)
for sc, node in layer]
heapify(layer)
layer = layer[:beam_width]
return ' '.join(layer[0][1])
def candidate_words(word, lm_vocab, alphabet):
"""
Generate possible spelling corrections for a given word.
:param word: single word as a string
:return: list of possible spelling corrections for each word
"""
return known_words([word], lm_vocab) \
or known_words(edits_1(word, alphabet), lm_vocab) \
or known_words(edits_2(word, alphabet), lm_vocab) \
or [word]
def known_words(word_list, lm_vocab):
"""
Filters out from a list of words the subset of words that appear in the vocabulary of KNOWN_WORDS.
:param word_list: list of word-strings
:return: set of unique words that appear in vocabulary
"""
return set(filter(lambda word: word in lm_vocab, word_list))
def edits_1(word_str, alphabet):
"""
generates a list of all words with edit distance 1 for a given word
Note that generators must be used for performance reasons.
:param word_str: single word as a string
:return: generator for all possible words with edit distance 1
"""
return itertools.chain(deletes(word_str),
swaps(word_str),
replaces(word_str, alphabet),
inserts(word_str, alphabet))
def splits(word_str):
"""
generates all possible splits of a word (e.g. 'abc' -> ('', 'abc'), ('a', 'bc'), ('ab', 'c') ('abc', '') )
:param word_str: the word as string
:return: generator for all possible splits for the word
"""
return ((word_str[:i], word_str[i:]) for i in range(len(word_str) + 1))
def deletes(word_str):
"""
generates all possible variants of a word with 1 character deleted (e.g. 'abc' -> 'ab', 'ac', 'bc')
:param word_str: the word as string
:return: generator for all variants of the word where 1 character is deleted
"""
return (L + R[1:] for L, R in splits(word_str) if R)
def swaps(word_str):
"""
generates all possible variants of a word where 2 adjacent characters are swapped (e.g. 'abc' -> 'bac', 'acb'
:param word_str: the word as string
:return: generator for all variants of the word where 2 adjacent characters are swapped
"""
return (L + R[1] + R[0] + R[2:] for L, R in splits(word_str) if len(R) > 1)
def replaces(word_str, alphabet):
"""
generates all possible variants of a word where 1 character is replaces
(e.g. 'abc' -> 'bbc', 'cbc', ..., 'aac', 'acc', ..., 'aba', 'abb', ... )
:param word_str: the word as string
:param alphabet: the alphabet to use for the language
:return: generator for all variants of the word where 1 character is replaced
"""
return (L + c + R[1:] for L, R in splits(word_str) if R for c in alphabet)
def inserts(words_str, alphabet):
"""
generates all possible variants of a word where 1 character is inserted. Identical elements may be present
(e.g. 'abc' -> 'aabc', 'babc', 'cabc', ..., 'aabc', 'abbc', 'acbc', ..., 'abac', 'abbc', 'abcc', ..., 'abca', 'abcb', 'abcc', ...
:param words_str: the word as string
:param alphabet: the alphabet to use for the language
:return: generator for all variants of the word where 1 character is inserted
"""
return (L + c + R for L, R in splits(words_str) for c in alphabet)
def edits_2(word, alphabet):
"""
generates a list of all words with edit distance 2 for a list of words
:param word: list of word-strings
:param alphabet: the alphabet to use for the language
:return:
"""
return (e2 for e1 in edits_1(word, alphabet) for e2 in edits_1(e1, alphabet))
# globals
LANGUAGE_MODELS = {}
LM_VOCABS = {}
| StarcoderdataPython |
1711884 | <reponame>nailgun/seedbox<gh_stars>10-100
import itertools
import json
from flask import request
from seedbox import models
def render(node, indent=False):
return IgnitionConfig(node).render(indent)
class IgnitionConfig(object):
def __init__(self, node):
self.node = node
self.cluster = node.cluster
def render(self, indent=False):
content = self.get_content()
if indent:
return json.dumps(content, indent=2)
else:
return json.dumps(content, separators=(',', ':'))
def get_content(self):
packages = [P(self.node, request.url_root) for P in self.get_package_classes()]
files = list(itertools.chain.from_iterable(p.get_files() for p in packages))
units = list(itertools.chain.from_iterable(p.get_units() for p in packages))
networkd_units = list(itertools.chain.from_iterable(p.get_networkd_units() for p in packages))
ssh_keys = self.get_ssh_keys()
return {
'ignition': {
'version': '2.0.0',
'config': {},
},
'storage': self.get_storage_config(files),
'networkd': {
'units': networkd_units
},
'passwd': {
'users': [{
'name': 'root',
'sshAuthorizedKeys': ssh_keys,
}, {
'name': 'core',
'sshAuthorizedKeys': ssh_keys,
}],
},
'systemd': {
'units': units
},
}
def get_package_classes(self):
from .system import SystemPackage
if self.node.maintenance_mode:
return [SystemPackage]
from .credentials import CredentialsPackage
from .flannel import FlannelPackage
packages = [
SystemPackage,
CredentialsPackage,
FlannelPackage,
]
if self.cluster.install_dnsmasq:
from .dnsmasq import DnsmasqPackage
packages += [
DnsmasqPackage,
]
if self.node.is_etcd_server:
from .etcd_server import EtcdServerPackage
packages += [
EtcdServerPackage,
]
if self.node.is_k8s_schedulable or self.node.is_k8s_master:
from .kubeconfig import KubeconfigPackage
from .kubelet import KubeletPackage
from .kube_proxy import KubeProxyPackage
packages += [
KubeconfigPackage,
KubeletPackage,
KubeProxyPackage,
]
if self.node.is_k8s_master:
from .k8s_master_manifests import K8sMasterManifestsPackage
packages += [
K8sMasterManifestsPackage,
]
return packages
def get_storage_config(self, files):
disks = []
filesystems = []
config = {
'disks': disks,
'filesystems': filesystems,
'files': files,
}
if self.node.maintenance_mode:
return config
root_fs = False
for disk in self.node.disks.filter_by(wipe_next_boot=True):
partitions = []
disks += [{
'device': disk.device,
'wipeTable': True,
'partitions': partitions,
}]
for partition in disk.partitions.all():
if partition.size_mibs:
size_sectors = partition.size_mibs * 1024 * 1024 // disk.sector_size_bytes
else:
size_sectors = 0
partitions += [{
'number': partition.number,
'start': 0,
'size': size_sectors,
'label': partition.label,
}]
filesystems += [partition2ignitionfs(partition)]
if partition.is_root:
root_fs = True
if not root_fs:
filesystems += [partition2ignitionfs(self.node.root_partition)]
return config
def get_ssh_keys(self):
return [user.ssh_key for user in self.cluster.users.filter(models.User.ssh_key != '')]
def partition2ignitionfs(partition):
fs = {
'mount': {
'device': partition.device,
'format': partition.format,
'create': {
'force': True,
'options': ['-L{}'.format(partition.label)],
},
},
}
if partition.is_root:
fs['name'] = 'root'
return fs
| StarcoderdataPython |
1925021 | #!/usr/bin/env python3
from flask import Flask, request, render_template
import pickle
import yaml
import ruamel.yaml
app = Flask(__name__)
global_variable = ['global_user', '454CA7B2A26E50D8C51572C4D8A023693DDC404F4C563C98DD15818DF83D4F9R']
@app.route('/', methods=['GET'])
def index():
local_variable = ['local_user', '29C8D2387B3EF2B306855AB19988AE08BB0F558366F5D29AF16732B91D52B62F']
parameter = request.values.get('parameter')
parameter1 = eval(parameter)
parameter2 = pickle.loads(parameter)
parameter3 = yaml.load(parameter)
parameter4 = ruamel.yaml.load(parameter)
res = render_template('index.html', parameter=parameter1)
return res
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8000)
| StarcoderdataPython |
8024100 | #!/usr/bin/env python3
# date: 2019.11.07
#
from bs4 import BeautifulSoup as BS
text = '<p class="A">text A</p> <p>text B</p> <p id="C">text C</p> <p data="D">text D</p>'
soup = BS(text, 'html.parser')
# --- without class and id
# `class` is reserved keyword so BS uses `class_`
all_items = soup.find_all('p', class_=False, id=False)
for item in all_items:
print(item.text)
# --- without class and id
all_items = soup.find_all('p', {'class': False, 'id': False})
for item in all_items:
print(item.text)
# --- without any attributes
all_items = soup.find_all('p')
for item in all_items:
if not item.attrs:
print(item.text)
| StarcoderdataPython |
6670477 | import os
secrets_directory = "/run/secrets"
def get(secret_name, default_value=None):
"""
Get a docker secret
:param secret_name:
:param default_value:
:return:
"""
secret_file = os.path.join(secrets_directory, secret_name)
try:
with open(secret_file, 'r') as fpt:
value = fpt.read()
except IOError:
if default_value is not None:
value = default_value
else:
raise ValueError(f"Unable to read secret {secret_name}")
return value
| StarcoderdataPython |
1951949 |
### WEB SERVER IMPORTS ###
from flask import Flask
from flask import json
from flask import request
### OTHER IMPORTS ###
import json
from libs import req
import os
import time
from datetime import datetime, timedelta
import hmac, hashlib
###########################
### LOADING SETTINGS
from config import mist_conf
from config import slack_conf
from config import msteams_conf
from config import color_config
from config import event_channels
from config import updown_channels
from config import alarm_channels
from libs.slack import Slack
slack = Slack(slack_conf)
from libs.msteams import Teams
teams = Teams(msteams_conf)
from libs.audit import audit
from libs.device_event import device_event
from libs.alarm import alarm
###########################
### LOGGING SETTINGS
try:
from config import debug
except:
debug = False
try:
from config import log_level
except:
log_level = 6
finally:
from libs.debug import Console
console = Console(log_level)
###########################
### VARS
try:
from config import port as server_port
except:
server_port = 51361
###########################
### FUNCTIONS
def _title(topic, time):
return "{0} - {1}".format(time, topic)
def _get_time(event):
if "timestamp" in event:
dt = datetime.fromtimestamp(event["timestamp"])
else:
dt = datetime.now()
return "{0} UTC".format(dt)
def new_event(topic, event):
console.info("{0}".format(topic))
message = []
for key in event:
console.info("%s: %s\r" %(key, event[key]))
message.append("%s: %s" %(key, event[key]))
if topic in color_config:
color = color_config[topic]
else:
color = None
if topic == "audits":
level, text, actions = audit(mist_host, approved_admins, event)
elif topic == "device-events":
level, text, actions = device_event(mist_host, event_channels, event)
elif topic == "device-updowns":
level, text, actions = device_event(mist_host, updown_channels, event)
elif topic == "alarms":
level, text, actions = alarm(mist_host, alarm_channels, event)
else:
text = []
level = "unknown"
actions = []
for mpart in message:
text.append(mpart)
time = _get_time(event)
if slack_conf["enabled"]: slack.send_manual_message(_title(topic, time), text, level, color, actions)
if msteams_conf["enabled"]: teams.send_manual_message(topic, time, text, level, color, actions)
print(event)
print(topic)
print(message)
print("------")
###########################
### CONF FUNCTIONS
def load_conf(value):
print("Loading {0} ".format(value).ljust(79, "."), end="", flush=True)
if value in mist_conf:
print("\033[92m\u2714\033[0m")
return mist_conf[value]
else:
print('\033[31m\u2716\033[0m')
exit(255)
def display_conf():
print("Mist Hist: {0}".format(mist_host))
print("API Token: {0}........{1}".format(apitoken[:6], apitoken[len(apitoken)-6:]))
print("Webhook Secret: {0}".format(mist_secret))
print("MWTT URI: {0}".format(server_uri))
print("Ignored Sites: {0}".format(site_id_ignored))
print("Approved Admins: {0}".format(approved_admins))
print("Debug Mode: {0}".format(debug))
###########################
### ENTRY POINT
print("Loading configuration ".center(80,"_"))
apitoken = load_conf("apitoken")
mist_host= load_conf("mist_host")
mist_secret= load_conf("mist_secret")
server_uri = load_conf("server_uri")
site_id_ignored= load_conf("site_id_ignored")
approved_admins= load_conf("approved_admins")
print("Configuraiton loaded".center(80, "_"))
display_conf()
app = Flask(__name__)
@app.route(server_uri, methods=["POST"])
def postJsonHandler():
signature = request.headers['X-Mist-Signature'] if "X-Mist-Signature" in request.headers else None
content = request.get_json()
key = str.encode(mist_secret)
message = request.data
digester = hmac.new(key, message, hashlib.sha1).hexdigest()
if signature == digester or mist_secret == None:
content = request.get_json()
if debug: print(content)
topic = content["topic"]
events = content["events"]
for event in events:
new_event(topic, event)
return '', 200
else:
return '', 401
if __name__ == '__main__':
print("Starting Server".center(80, "_"))
app.run(debug=False, host='0.0.0.0', port=server_port)
| StarcoderdataPython |
9699829 | <gh_stars>10-100
from grano.core import db, url_for
from grano.model.common import UUIDBase
from grano.model.property import Property, PropertyBase
class Entity(db.Model, UUIDBase, PropertyBase):
__tablename__ = 'grano_entity'
same_as = db.Column(db.Unicode, db.ForeignKey('grano_entity.id'),
nullable=True)
project_id = db.Column(db.Integer, db.ForeignKey('grano_project.id'))
author_id = db.Column(db.Integer, db.ForeignKey('grano_account.id'))
schema_id = db.Column(db.Integer, db.ForeignKey('grano_schema.id'),
index=True)
degree_in = db.Column(db.Integer)
degree_out = db.Column(db.Integer)
degree = db.Column(db.Integer)
inbound = db.relationship('Relation', lazy='dynamic', backref='target',
primaryjoin='Entity.id==Relation.target_id',
cascade='all, delete, delete-orphan')
outbound = db.relationship('Relation', lazy='dynamic', backref='source',
primaryjoin='Entity.id==Relation.source_id',
cascade='all, delete, delete-orphan')
properties = db.relationship(Property, backref='entity',
order_by=Property.created_at.desc(),
cascade='all, delete, delete-orphan',
lazy='joined')
@property
def names(self):
return [p for p in self.properties if p.name == 'name']
@classmethod
def by_name(cls, project, name, only_active=False):
q = cls.by_name_many(project, name, only_active=only_active)
return q.first()
@classmethod
def by_name_many(cls, project, name, only_active=False):
q = db.session.query(cls)
q = q.filter(cls.same_as == None) # noqa
q = q.filter(cls.project == project)
q = cls._filter_property(q, 'name', name, only_active=only_active)
return q
def to_dict_index(self):
""" Convert an entity to the REST API form. """
data = {
'id': self.id,
'degree': self.degree,
'degree_in': self.degree_in,
'degree_out': self.degree_out,
'project': self.project.to_dict_short(),
'schema': self.schema.to_dict_index(),
'api_url': url_for('entities_api.view', id=self.id),
'properties': {}
}
for prop in self.active_properties:
name, prop = prop.to_dict_kv()
data['properties'][name] = prop
if self.same_as:
data['same_as'] = self.same_as
data['same_as_url'] = url_for('entities_api.view', id=self.same_as)
return data
def to_dict(self):
""" Full serialization of the entity. """
data = self.to_dict_index()
data['created_at'] = self.created_at
data['updated_at'] = self.updated_at
if data['degree_in'] > 0:
data['inbound_url'] = url_for('relations_api.index', target=self.id)
if data['degree_out'] > 0:
data['outbound_url'] = url_for('relations_api.index', source=self.id)
return data
def to_index(self):
""" Convert an entity to a form appropriate for search indexing. """
data = self.to_dict()
data['names'] = []
for prop in self.properties:
if prop.name == 'name':
data['names'].append(prop.value)
return data
| StarcoderdataPython |
9633911 | import os
import csv
from sklearn.datasets import make_spd_matrix
import autograd.numpy as np
from numpy import linalg as la, random as rnd
import pymanopt
from pymanopt.manifolds import Sphere
from algorithms import ConjugateGradient, BetaTypes
def create_cost(A):
@pymanopt.function.Autograd
def cost(x):
return np.inner(x, A @ x)
return cost
if __name__ == "__main__":
experiment_name = 'rayleigh'
n_exp = 10
if not os.path.isdir('result'):
os.makedirs('result')
path = os.path.join('result', experiment_name + '.csv')
n = 10
for i in range(n_exp):
matrix = make_spd_matrix(n)
cost = create_cost(matrix)
manifold = Sphere(n)
problem = pymanopt.Problem(manifold, cost=cost, egrad=None)
res_list = []
for beta_type in BetaTypes:
solver = ConjugateGradient(beta_type=beta_type, maxiter=10000)
res = solver.solve(problem)
res_list.append(res[1])
res_list.append(res[2])
with open(path, 'a') as f:
writer = csv.writer(f)
writer.writerow(res_list) | StarcoderdataPython |
9794226 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""Tests for reflinks script."""
#
# (C) Pywikibot team, 2014-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
from scripts.reflinks import XmlDumpPageGenerator, ReferencesRobot, main
from tests import join_xml_data_path
from tests.aspects import unittest, TestCase, ScriptMainTestCase
class TestXMLPageGenerator(TestCase):
"""Test XML Page generator."""
family = 'wikipedia'
code = 'en'
dry = True
def test_non_bare_ref_urls(self):
"""Test pages without bare references are not processed."""
gen = XmlDumpPageGenerator(
filename=join_xml_data_path('article-pear-0.10.xml'),
start='Pear',
namespaces=[0, 1],
site=self.get_site())
pages = list(gen)
self.assertEqual(len(pages), 0)
def test_simple_bare_refs(self):
"""Test simple bare references in multiple namespaces."""
gen = XmlDumpPageGenerator(
filename=join_xml_data_path('dummy-reflinks.xml'),
start='Fake page',
namespaces=[0, 1],
site=self.get_site())
pages = list(gen)
self.assertPagelistTitles(pages, (u'Fake page', u'Talk:Fake page'),
site=self.get_site())
def test_namespace_empty_list(self):
"""Test namespaces=[] processes all namespaces."""
gen = XmlDumpPageGenerator(
filename=join_xml_data_path('dummy-reflinks.xml'),
start=u'Fake page',
namespaces=[],
site=self.get_site())
pages = list(gen)
self.assertPagelistTitles(pages, (u'Fake page', u'Talk:Fake page'),
site=self.get_site())
def test_namespace_None(self):
"""Test namespaces=None processes all namespaces."""
gen = XmlDumpPageGenerator(
filename=join_xml_data_path('dummy-reflinks.xml'),
start='Fake page',
namespaces=None,
site=self.get_site())
pages = list(gen)
self.assertPagelistTitles(pages, (u'Fake page', u'Talk:Fake page'),
site=self.get_site())
def test_namespace_string_ids(self):
"""Test namespaces with ids as string."""
gen = XmlDumpPageGenerator(
filename=join_xml_data_path('dummy-reflinks.xml'),
start='Fake page',
namespaces=["0", "1"],
site=self.get_site())
pages = list(gen)
self.assertPagelistTitles(pages, (u'Fake page', u'Talk:Fake page'),
site=self.get_site())
def test_namespace_names(self):
"""Test namespaces with namespace names."""
gen = XmlDumpPageGenerator(
filename=join_xml_data_path('dummy-reflinks.xml'),
start='Fake page',
namespaces=["Talk"],
site=self.get_site())
pages = list(gen)
self.assertPagelistTitles(pages, (u'Talk:Fake page', ),
site=self.get_site())
def test_start_with_underscore(self):
"""Test with underscore in start page title."""
gen = XmlDumpPageGenerator(
filename=join_xml_data_path('dummy-reflinks.xml'),
start='Fake_page',
namespaces=[0, 1],
site=self.get_site())
pages = list(gen)
self.assertPagelistTitles(pages, (u'Fake page', u'Talk:Fake page'),
site=self.get_site())
def test_without_start(self):
"""Test without a start page title."""
gen = XmlDumpPageGenerator(
filename=join_xml_data_path('dummy-reflinks.xml'),
start=None,
namespaces=[0, 1],
site=self.get_site())
pages = list(gen)
self.assertPagelistTitles(pages, (u'Fake page', u'Talk:Fake page'),
site=self.get_site())
def test_start_prefix(self):
"""Test with a prefix as a start page title."""
gen = XmlDumpPageGenerator(
filename=join_xml_data_path('dummy-reflinks.xml'),
start='Fake',
namespaces=[0, 1],
site=self.get_site())
pages = list(gen)
self.assertPagelistTitles(pages, (u'Fake page', u'Talk:Fake page'),
site=self.get_site())
class TestReferencesBotConstructor(ScriptMainTestCase):
"""
Test reflinks with run() removed.
These tests cant verify the order of the pages in the XML
as the constructor is given a preloading generator.
See APISite.preloadpages for details.
"""
family = 'wikipedia'
code = 'en'
def setUp(self):
"""Set up the script by patching the bot class."""
super(TestReferencesBotConstructor, self).setUp()
self._original_constructor = ReferencesRobot.__init__
self._original_run = ReferencesRobot.run
ReferencesRobot.__init__ = dummy_constructor
ReferencesRobot.run = lambda self: None
def tearDown(self):
"""Tear down the test by undoing the bot class patch."""
ReferencesRobot.__init__ = self._original_constructor
ReferencesRobot.run = self._original_run
super(TestReferencesBotConstructor, self).tearDown()
def test_xml_simple(self):
"""Test the generator without any narrowing."""
main('-xml:' + join_xml_data_path('dummy-reflinks.xml'))
gen = self.constructor_args[0]
self.assertPageTitlesCountEqual(gen, [u'Fake page', u'Talk:Fake page'],
site=self.get_site())
def test_xml_one_namespace(self):
"""Test the generator using one namespace id."""
main('-xml:' + join_xml_data_path('dummy-reflinks.xml'),
'-namespace:1')
gen = self.constructor_args[0]
pages = list(gen)
self.assertPagelistTitles(pages, [u'Talk:Fake page'],
site=self.get_site())
def test_xml_multiple_namespace_ids(self):
"""Test the generator using multiple separate namespaces parameters."""
main('-xml:' + join_xml_data_path('dummy-reflinks.xml'),
'-namespace:0', '-namespace:1', '-xmlstart:Fake page')
gen = self.constructor_args[0]
self.assertPageTitlesCountEqual(gen, [u'Fake page', u'Talk:Fake page'],
site=self.get_site())
def test_xml_multiple_namespace_ids_2(self):
"""Test the generator using multiple namespaces in one parameter."""
main('-xml:' + join_xml_data_path('dummy-reflinks.xml'),
'-namespace:0,1', '-xmlstart:Fake page')
gen = self.constructor_args[0]
self.assertPageTitlesCountEqual(gen, [u'Fake page', u'Talk:Fake page'],
site=self.get_site())
def test_xml_start_prefix(self):
"""Test the generator using a start partial page."""
main('-xml:' + join_xml_data_path('dummy-reflinks.xml'),
'-namespace:1', '-xmlstart:Fake')
gen = self.constructor_args[0]
pages = list(gen)
self.assertPagelistTitles(pages, [u'Talk:Fake page'],
site=self.get_site())
def test_xml_start_underscore(self):
"""Test the generator using a start page with an underscore."""
main('-xml:' + join_xml_data_path('dummy-reflinks.xml'),
'-namespace:1', '-xmlstart:Fake_page')
gen = self.constructor_args[0]
pages = list(gen)
self.assertPagelistTitles(pages, [u'Talk:Fake page'],
site=self.get_site())
def test_xml_namespace_name(self):
"""Test the generator using a namespace name."""
main('-xml:' + join_xml_data_path('dummy-reflinks.xml'),
'-namespace:Talk', '-xmlstart:Fake page')
gen = self.constructor_args[0]
pages = list(gen)
self.assertPagelistTitles(pages, [u'Talk:Fake page'],
site=self.get_site())
def dummy_constructor(self, *args, **kwargs):
"""A constructor faking the actual constructor."""
TestReferencesBotConstructor.constructor_args = args
TestReferencesBotConstructor.constructor_kwargs = kwargs
if __name__ == '__main__': # pragma: no cover
unittest.main()
| StarcoderdataPython |
5029265 | <filename>sitdown/views.py
import datetime
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict, OrderedDict, UserDict
from functools import total_ordering
from typing import List
from sitdown.core import Plottable, MutationSet
class MonthSet(UserDict, Plottable):
"""An ordered Dictionary-like object of Month: MonthBin. Ordered by month
"""
def __init__(self, mutations, description="Unnamed"):
"""
Parameters
----------
mutations: List[Mutations]
The mutations in this dataset
"""
super().__init__(self)
self.mutations = mutations
self.description = description
months = defaultdict(list)
for mutation in mutations:
months[Month(mutation.date)].append(mutation)
self.data = OrderedDict()
for x in sorted(list(months.keys())):
self.data[x] = MonthBin(mutations=months[x], month=x)
def __str__(self):
return f"Dataset {self.description}"
def months(self):
"""List of all months in this series, sorted by date
Returns
-------
List[Month]
"""
return list(self.data.keys())
def bins(self):
"""List of all MonthBins in this series, sorted by date
Returns
-------
List[MonthBin]
"""
return list(self.data.values())
def sums(self):
"""Summed amount of all mutations per month, sorted by date
Returns
-------
List[Decimal]
"""
return [x.sum() for x in self.bins()]
@property
def min_month(self):
if self.months():
return self.months()[0]
@property
def max_month(self):
if self.months():
return self.months()[-1]
@staticmethod
def get_month_range(from_month, to_month):
"""Get all months in between min and max months. For consistent plotting
Parameters
----------
from_month: Month
Start with this month.
to_month: Month
End with this month
"""
if from_month and to_month:
return [x for x in month_iterator(from_month, to_month)]
else:
return []
def get_series(self, from_month=None, to_month=None):
"""Get MonthBins for each month between the months given, creating empty bin_dict if there is no mutations
Parameters
----------
from_month: Month, optional
Start with this month. Defaults to first month in the mutations
to_month: Month, optional
End with this month. Defaults to last month in the mutations
Returns
-------
MonthSeries
series of bins for each month
"""
return MonthSeries.from_month_set(
self, from_month=from_month, to_month=to_month
)
def plot(self, ax=None):
"""Plot this mutations per month as a bar graph
Parameters
----------
ax: matplotlib.Axes, optional
plot into this axes. Defaults to None, in which case a new axes will be created
for this plot
Returns
-------
matplotlib.Axes
The axes into which this plot has been made
"""
if not ax:
_, ax = plt.subplots()
months = self.months()
sum_values = [x.sum() for x in self.bins()]
ind = np.arange(len(months))
ax.bar(x=ind, height=sum_values)
ax.set_ylabel(f"{self.description} (Euro)")
ax.set_xlabel("Month")
ax.grid(which="both", axis="y")
ax.set_xticks(ind)
ax.set_xticklabels([str(x) for x in months])
return ax
class MonthSeries(MonthSet):
"""A MonthSet that is guaranteed to have all consecutive months between min and max
Months without mutations will just have empty month bins"""
def __init__(
self, mutations, description="Unnamed", from_month=None, to_month=None
):
"""Create a consecutive series of month bins with the given mutations.
When from_month and/or to_month arg given, cut or pad with empty month bins if needed
Parameters
----------
mutations: List[Mutations]
The mutations in this dataset
from_month: Month, optional
Start with this month. Defaults to first month in the mutations
to_month: Month, optional
End with this month. Defaults to last month in the mutations
"""
super().__init__(mutations, description)
# MonthSet might have missing months. Make into range
self.data = self.make_into_series(self.data, from_month, to_month)
@classmethod
def from_month_set(cls, month_set, from_month=None, to_month=None):
"""Create a MonthSeries from A MonthSet.
For efficient casting from MonthSet. Without needing to sort all mutations again
Parameters
----------
month_set: MonthSet
The MonthSet instance to create MonthSeries from
from_month: Month, optional
Start with this month. Defaults to first month in the mutations
to_month: Month, optional
End with this month. Defaults to last month in the mutations
"""
if not from_month:
from_month = month_set.min_month
if not to_month:
to_month = month_set.max_month
series = cls(mutations=month_set.mutations, description=month_set.description)
series.data = series.make_into_series(
month_set.data, from_month=from_month, to_month=to_month
)
return series
def make_into_series(self, bin_dict, from_month, to_month):
"""Make the given dictionary of bins into a series of consecutive months
Parameters
----------
bin_dict: OrderedDict[Month, MonthBin]:
A collection of month bins. Does not need to be oredered or consecutive
from_month: Month, optional
Start with this month. Defaults to first month in the mutations
to_month: Month, optional
End with this month. Defaults to last month in the mutations
Returns
-------
OrderedDict[Month, MonthBin]
"""
if not bin_dict:
return bin_dict # handy empty input dict
if not from_month:
from_month = self.min_month
if not to_month:
to_month = self.max_month
bin_dict_series = OrderedDict()
for month in self.get_month_range(from_month, to_month):
if month in bin_dict:
bin_dict_series[month] = bin_dict[month]
else:
bin_dict_series[month] = MonthBin(mutations=[], month=month)
return bin_dict_series
@total_ordering
class Month:
"""Like date, but day is always 1"""
def __init__(self, date):
"""
Parameters
----------
date: datetime.date or str
When datetime.date, only the year and week are used.
When string, needs to have yyyy/mm format
"""
if type(date) == str:
self.date = datetime.datetime.strptime(date, "%Y/%m").date()
elif type(date) == datetime.date:
self.date = datetime.date(year=date.year, month=date.month, day=1)
else:
raise ValueError(
f"parameter date needs to be str or datetime.date, found {type(date)}"
)
def __str__(self):
return f"{self.date.year}/{self.date.month}"
def __lt__(self, other):
return self.date < other.date
def __eq__(self, other):
return self.date == other.date
def __hash__(self):
return hash(self.date)
class MonthBin:
"""A collection of mutations for a single month"""
def __init__(self, mutations, month: Month):
"""
Parameters
----------
mutations: List[mutations]
all mutations for this month
month: datetime.date
Should be the first of the month indicated
"""
self.mutations = mutations
self.month = month
def __len__(self):
return len(self.mutations)
def __iter__(self):
return self.mutations.__iter__()
@property
def date(self):
"""The date for this bin
Returns
-------
datetime.date
This month as a date, set on the 1st of that month
"""
return self.month.date
def __str__(self):
return f"Bin {self.month}"
def __lt__(self, other):
return self.month < other.month
def sum(self) -> float:
"""Sum of all amounts in this bin"""
return sum([x.amount for x in self.mutations])
def sum_in(self) -> float:
"""Sum of all incoming amounts in this bin"""
return sum([x.amount for x in self.mutations if x.amount > 0])
def sum_out(self) -> float:
"""Sum of all outgoing amounts in this bin"""
return sum([x.amount for x in self.mutations if x.amount < 0])
def month_iterator(start_month, end_month):
"""Generate all months between start and end, inclusive"""
current = start_month
while current <= end_month:
year = current.date.year
month = current.date.month
if month == 12:
year += 1
month = 1
else:
month += 1
new = Month(datetime.date(year=year, month=month, day=1))
yield current
current = new
class MonthMatrix(Plottable, UserDict):
"""Holds MonthSeries for a number of categories. Dictionary of str: MonthSeries
"""
def __init__(self, filtered_data_list):
"""
Parameters
----------
filtered_data_list: List[MutationSet]
"""
super().__init__()
# Separate each mutations list into months
sets = [MonthSet(mutations=x.mutations, description=x.description) for x in filtered_data_list]
# determine the full month range of all sets
self.min_month = min([x.min_month for x in sets])
self.max_month = max([x.max_month for x in sets])
# make all sets into series of the same length
series = {x.description: MonthSeries.from_month_set(x, self.min_month, self.max_month) for x in sets}
self.data = series
def descriptions(self):
return list(self.data.keys())
def get_month_range(self):
"""Get all months in between min and max months. For consistent plotting
"""
return [x for x in month_iterator(self.min_month, self.max_month)]
def matrix(self):
"""Data per month, per category as a 2D array
Returns
-------
Dict[Month, List[MonthBin]]
"""
matrix = {}
for per_month in self.per_month_list:
matrix[per_month.description] = per_month.get_series(
self.min_month, self.max_month
)
return matrix
def plot(self, ax=None):
"""Plot this matrix as a stacked graph
Parameters
----------
ax: matplotlib.Axes, optional
plot into this axes. Defaults to None, in which case a new axes will be created
for this plot
Returns
-------
matplotlib.Axes
The axes into which this plot has been made
"""
# get sums for each series rounded
sums = {x: [int(z) for z in y.sums()] for x, y in self.data.items()}
months = self.get_month_range()
ind = np.arange(len(months)) # the x locations for the bars
# work out bottom, height for each series by stacking for each month
test = 1
# start with a height of 0
current_height = [0] * len(months)
handles = []
for series_sums in sums.values():
handles.append(plt.bar(ind, series_sums, width=0.8, bottom=current_height))
current_height = piece_wise_add(current_height, series_sums)
# arange for len(months)
# plt.bar(ind, series, width, bottom)
plt.ylabel('amount')
#plt.title('Scores by group and gender')
plt.xticks(ind, [str(x.date.strftime("%b `%y")) for x in months])
#plt.yticks(np.arange(0, 81, 10))
plt.legend(reversed([x[0] for x in handles]), reversed(self.descriptions()))
def piece_wise_add(list_a, list_b):
"""Add each element of list a to list b.
After trying to get numpy to work with decimals for about 10 minuts
it was raising cryptic errors. Enough of that nonsense.
"""
return [x+y for x, y in zip(list_a, list_b)]
| StarcoderdataPython |
3598722 | # -*- coding: utf-8 -*-
"""
@date: 2022/5/9 下午3:48
@file: spoc.py
@author: zj
@description:
"""
import torch
spatial_weight_cache = dict()
def get_spatial_weight(h, w):
"""
Spatial weight with center prior.
"""
if (h, w) in spatial_weight_cache:
spatial_weight = spatial_weight_cache[(h, w)]
else:
sigma = min(h, w) / 2.0 / 3.0
x = torch.Tensor(range(w))
y = torch.Tensor(range(h))[:, None]
spatial_weight = torch.exp(-((x - (w - 1) / 2.0) ** 2 + (y - (h - 1) / 2.0) ** 2) / 2.0 / (sigma ** 2))
spatial_weight = spatial_weight[None, None, :, :]
spatial_weight_cache[(h, w)] = spatial_weight
return spatial_weight
| StarcoderdataPython |
6703218 | import RPi.GPIO as GPIO
# Named GPIO to physical pin mapping and mains plug with color of wire and default relay state (Normally Open, Normally Closed)
PINS = {
'heat' : {
"name" : "Heat Mat",
"gpio" : 23,
"color" : "grey",
"plug" : 4,
"pin" : 1,
"default" : GPIO.LOW,
"state" : None
},
'aerator' : {
"name" : "Aerator",
"gpio" : 24,
"color" : "brown",
"plug" : 3,
"pin" : 2,
"default" : GPIO.LOW,
"state" : None
},
'light' : {
"name" : "LED Light Array",
"gpio" : 17,
"color" : "red",
"plug" : 2,
"pin" : 3,
"default" : GPIO.LOW,
"state" : None
},
'psu' : {
"name" : "Power Supply",
"gpio" : 27,
"color" : "orange",
"plug" : 1,
"pin" : 4,
"default" : GPIO.LOW,
"state" : None
},
'pump' : {
"name" : "Water Pump",
"gpio" : 20,
"color" : "green",
"plug" : None,
"pin" : 1,
"default" : GPIO.HIGH,
"state" : None
},
'fan' : {
"name" : "Exhaust Fan (Top)",
"gpio" : 21,
"color" : "yellow",
"plug" : None,
"pin" : 2,
"default" : GPIO.HIGH,
"state" : None
}
}
| StarcoderdataPython |
12865703 | """
Defines caching before for user preferences
"""
import jwt
import time
from cachetools import TTLCache
from typing import Optional
class CredentialCache(TTLCache):
"""
Subclass of TTLCache that temporarily stores and retreives user login credentials
Arguments:
TTLCache {TTLCache} -- A TTLCache object
Returns:
CredentialCache -- [description]
"""
def cache_key(self, key):
"""
Adds an access key to the cache
Arguments:
key {str} -- Google access token.
"""
self["access_token"] = key
def get_key(self) -> Optional[str]:
"""
Retreive key from cache.
"""
if "access_token" in self and self["access_token"]:
try:
decode = jwt.decode(self["access_token"], verify=False)
exp = decode["exp"]
if time.time() > exp:
print(
"Your token has expired! Please log in to the web portal and get a new token."
)
self["access_token"] = None
return self["access_token"]
except jwt.exceptions.DecodeError:
print("This token is not a valid JWT!")
if self["access_token"]:
return self["access_token"]
else:
return None
| StarcoderdataPython |
11349661 | #!/usr/bin/python
#
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from torch.utils.data import Dataset
import torchvision.transforms as T
import numpy as np
import h5py, json
import PIL
from .utils import imagenet_preprocess, Resize
sg_task = True
def conv_src_to_target(voc_s, voc_t):
dic = {}
for k, val in voc_s['object_name_to_idx'].items():
dic[val] = voc_t['object_name_to_idx'][k]
return dic
class SceneGraphWithPairsDataset(Dataset):
def __init__(self, vocab, h5_path, image_dir, image_size=(256, 256),
normalize_images=True, max_objects=10, max_samples=None,
include_relationships=True, use_orphaned_objects=True,
mode='train', clean_repeats=True):
super(SceneGraphWithPairsDataset, self).__init__()
assert mode in ["train", "eval", "auto_withfeats", "auto_nofeats", "reposition", "remove", "replace"]
CLEVR_target_dir = os.path.split(h5_path)[0]
CLEVR_SRC_DIR = os.path.join(os.path.split(CLEVR_target_dir)[0], 'source')
vocab_json_s = os.path.join(CLEVR_SRC_DIR, "vocab.json")
vocab_json_t = os.path.join(CLEVR_target_dir, "vocab.json")
with open(vocab_json_s, 'r') as f:
vocab_src = json.load(f)
with open(vocab_json_t, 'r') as f:
vocab_t = json.load(f)
self.mode = mode
self.image_dir = image_dir
self.image_source_dir = os.path.join(os.path.split(image_dir)[0], 'source') #Azade
src_h5_path = os.path.join(self.image_source_dir, os.path.split(h5_path)[-1])
print(self.image_dir, src_h5_path)
self.image_size = image_size
self.vocab = vocab
self.vocab_src = vocab_src
self.vocab_t = vocab_t
self.num_objects = len(vocab['object_idx_to_name'])
self.use_orphaned_objects = use_orphaned_objects
self.max_objects = max_objects
self.max_samples = max_samples
self.include_relationships = include_relationships
self.evaluating = mode != 'train'
self.clean_repeats = clean_repeats
transform = [Resize(image_size), T.ToTensor()]
if normalize_images:
transform.append(imagenet_preprocess())
self.transform = T.Compose(transform)
self.data = {}
with h5py.File(h5_path, 'r') as f:
for k, v in f.items():
if k == 'image_paths':
self.image_paths = list(v)
else:
self.data[k] = torch.IntTensor(np.asarray(v))
self.data_src = {}
with h5py.File(src_h5_path, 'r') as f:
for k, v in f.items():
if k == 'image_paths':
self.image_paths_src = list(v)
else:
self.data_src[k] = torch.IntTensor(np.asarray(v))
def __len__(self):
num = self.data['object_names'].size(0)
if self.max_samples is not None:
return min(self.max_samples, num)
return num
def __getitem__(self, index):
"""
Returns a tuple of:
- image: FloatTensor of shape (C, H, W)
- objs: LongTensor of shape (num_objs,)
- boxes: FloatTensor of shape (num_objs, 4) giving boxes for objects in
(x0, y0, x1, y1) format, in a [0, 1] coordinate system.
- triples: LongTensor of shape (num_triples, 3) where triples[t] = [i, p, j]
means that (objs[i], p, objs[j]) is a triple.
"""
img_path = os.path.join(self.image_dir, self.image_paths[index])
img_source_path = os.path.join(self.image_source_dir, self.image_paths[index])
src_to_target_obj = conv_src_to_target(self.vocab_src, self.vocab_t)
with open(img_path, 'rb') as f:
with PIL.Image.open(f) as image:
WW, HH = image.size
image = self.transform(image.convert('RGB'))
with open(img_source_path, 'rb') as f:
with PIL.Image.open(f) as image_src:
#WW, HH = image.size
image_src = self.transform(image_src.convert('RGB'))
H, W = self.image_size
# Figure out which objects appear in relationships and which don't
obj_idxs_with_rels = set()
obj_idxs_without_rels = set(range(self.data['objects_per_image'][index].item()))
for r_idx in range(self.data['relationships_per_image'][index]):
s = self.data['relationship_subjects'][index, r_idx].item()
o = self.data['relationship_objects'][index, r_idx].item()
obj_idxs_with_rels.add(s)
obj_idxs_with_rels.add(o)
obj_idxs_without_rels.discard(s)
obj_idxs_without_rels.discard(o)
obj_idxs = list(obj_idxs_with_rels)
obj_idxs_without_rels = list(obj_idxs_without_rels)
if len(obj_idxs) > self.max_objects - 1:
obj_idxs = obj_idxs[:self.max_objects]
if len(obj_idxs) < self.max_objects - 1 and self.use_orphaned_objects:
num_to_add = self.max_objects - 1 - len(obj_idxs)
num_to_add = min(num_to_add, len(obj_idxs_without_rels))
obj_idxs += obj_idxs_without_rels[:num_to_add]
num_objs = len(obj_idxs) + 1
objs = torch.LongTensor(num_objs).fill_(-1)
boxes = torch.FloatTensor([[0, 0, 1, 1]]).repeat(num_objs, 1)
obj_idx_mapping = {}
for i, obj_idx in enumerate(obj_idxs):
objs[i] = self.data['object_names'][index, obj_idx].item()
x, y, w, h = self.data['object_boxes'][index, obj_idx].tolist()
x0 = float(x) / WW
y0 = float(y) / HH
x1 = float(x + w) / WW
y1 = float(y + h) / HH
boxes[i] = torch.FloatTensor([x0, y0, x1, y1])
obj_idx_mapping[obj_idx] = i
# The last object will be the special __image__ object
objs[num_objs - 1] = self.vocab['object_name_to_idx']['__image__']
triples = []
for r_idx in range(self.data['relationships_per_image'][index].item()):
if not self.include_relationships:
break
s = self.data['relationship_subjects'][index, r_idx].item()
p = self.data['relationship_predicates'][index, r_idx].item()
o = self.data['relationship_objects'][index, r_idx].item()
s = obj_idx_mapping.get(s, None)
o = obj_idx_mapping.get(o, None)
if s is not None and o is not None:
if self.clean_repeats and [s, p, o] in triples:
continue
triples.append([s, p, o])
# Add dummy __in_image__ relationships for all objects
in_image = self.vocab['pred_name_to_idx']['__in_image__']
for i in range(num_objs - 1):
triples.append([i, in_image, num_objs - 1])
triples = torch.LongTensor(triples)
#Source image
# Figure out which objects appear in relationships and which don't
obj_idxs_with_rels_src = set()
obj_idxs_without_rels_src = set(range(self.data_src['objects_per_image'][index].item()))
for r_idx in range(self.data_src['relationships_per_image'][index]):
s = self.data_src['relationship_subjects'][index, r_idx].item()
o = self.data_src['relationship_objects'][index, r_idx].item()
obj_idxs_with_rels_src.add(s)
obj_idxs_with_rels_src.add(o)
obj_idxs_without_rels_src.discard(s)
obj_idxs_without_rels_src.discard(o)
obj_idxs_src = list(obj_idxs_with_rels_src)
obj_idxs_without_rels_src = list(obj_idxs_without_rels_src)
if len(obj_idxs_src) > self.max_objects - 1:
obj_idxs_src = obj_idxs_src[:self.max_objects]
if len(obj_idxs_src) < self.max_objects - 1 and self.use_orphaned_objects:
num_to_add = self.max_objects - 1 - len(obj_idxs_src)
num_to_add = min(num_to_add, len(obj_idxs_without_rels_src))
obj_idxs_src += obj_idxs_without_rels_src[:num_to_add]
num_objs_src = len(obj_idxs_src) + 1
objs_src = torch.LongTensor(num_objs_src).fill_(-1)
boxes_src = torch.FloatTensor([[0, 0, 1, 1]]).repeat(num_objs_src, 1)
obj_idx_mapping_src = {}
for i, obj_idx in enumerate(obj_idxs_src):
objs_src[i] = src_to_target_obj[self.data_src['object_names'][index, obj_idx].item()]
x, y, w, h = self.data_src['object_boxes'][index, obj_idx].tolist()
x0 = float(x) / WW
y0 = float(y) / HH
x1 = float(x + w) / WW
y1 = float(y + h) / HH
boxes_src[i] = torch.FloatTensor([x0, y0, x1, y1])
obj_idx_mapping_src[obj_idx] = i
# The last object will be the special __image__ object
objs_src[num_objs_src - 1] = self.vocab_src['object_name_to_idx']['__image__']
triples_src = []
for r_idx in range(self.data_src['relationships_per_image'][index].item()):
if not self.include_relationships:
break
s = self.data_src['relationship_subjects'][index, r_idx].item()
p = self.data_src['relationship_predicates'][index, r_idx].item()
o = self.data_src['relationship_objects'][index, r_idx].item()
s = obj_idx_mapping_src.get(s, None)
o = obj_idx_mapping_src.get(o, None)
if s is not None and o is not None:
if self.clean_repeats and [s, p, o] in triples_src:
continue
triples_src.append([s, p, o])
# Add dummy __in_image__ relationships for all objects
in_image = self.vocab_src['pred_name_to_idx']['__in_image__']
for i in range(num_objs_src - 1):
triples_src.append([i, in_image, num_objs_src - 1])
triples_src = torch.LongTensor(triples_src)
return image, image_src, objs, objs_src, boxes, boxes_src, triples, triples_src
def collate_fn_withpairs(batch):
"""
Collate function to be used when wrapping a SceneGraphWithPairsDataset in a
DataLoader. Returns a tuple of the following:
- imgs, imgs_src: target and source FloatTensors of shape (N, C, H, W)
- objs, objs_src: target and source LongTensors of shape (num_objs,) giving categories for all objects
- boxes, boxes_src: target and source FloatTensors of shape (num_objs, 4) giving boxes for all objects
- triples, triples_src: target and source FloatTensors of shape (num_triples, 3) giving all triples, where
triples[t] = [i, p, j] means that [objs[i], p, objs[j]] is a triple
- obj_to_img: LongTensor of shape (num_objs,) mapping objects to images;
obj_to_img[i] = n means that objs[i] belongs to imgs[n]
- triple_to_img: LongTensor of shape (num_triples,) mapping triples to images;
triple_to_img[t] = n means that triples[t] belongs to imgs[n]
- imgs_masked: FloatTensor of shape (N, 4, H, W)
"""
# batch is a list, and each element is (image, objs, boxes, triples)
all_imgs, all_imgs_src, all_objs, all_objs_src, all_boxes, all_boxes_src, all_triples, all_triples_src = [], [], [], [], [], [], [], []
all_obj_to_img, all_triple_to_img = [], []
all_imgs_masked = []
obj_offset = 0
for i, (img, image_src, objs, objs_src, boxes, boxes_src, triples, triples_src) in enumerate(batch):
all_imgs.append(img[None])
all_imgs_src.append(image_src[None])
num_objs, num_triples = objs.size(0), triples.size(0)
all_objs.append(objs)
all_objs_src.append(objs_src)
all_boxes.append(boxes)
all_boxes_src.append(boxes_src)
triples = triples.clone()
triples_src = triples_src.clone()
triples[:, 0] += obj_offset
triples[:, 2] += obj_offset
all_triples.append(triples)
triples_src[:, 0] += obj_offset
triples_src[:, 2] += obj_offset
all_triples_src.append(triples_src)
all_obj_to_img.append(torch.LongTensor(num_objs).fill_(i))
all_triple_to_img.append(torch.LongTensor(num_triples).fill_(i))
# prepare input 4-channel image
# initialize mask channel with zeros
masked_img = image_src.clone()
mask = torch.zeros_like(masked_img)
mask = mask[0:1,:,:]
masked_img = torch.cat([masked_img, mask], 0)
all_imgs_masked.append(masked_img[None])
obj_offset += num_objs
all_imgs_masked = torch.cat(all_imgs_masked)
all_imgs = torch.cat(all_imgs)
all_imgs_src = torch.cat(all_imgs_src)
all_objs = torch.cat(all_objs)
all_objs_src = torch.cat(all_objs_src)
all_boxes = torch.cat(all_boxes)
all_boxes_src = torch.cat(all_boxes_src)
all_triples = torch.cat(all_triples)
all_triples_src = torch.cat(all_triples_src)
all_obj_to_img = torch.cat(all_obj_to_img)
all_triple_to_img = torch.cat(all_triple_to_img)
out = (all_imgs, all_imgs_src, all_objs, all_objs_src, all_boxes, all_boxes_src, all_triples, all_triples_src,
all_obj_to_img, all_triple_to_img, all_imgs_masked)
return out
| StarcoderdataPython |
8175079 | <reponame>xswz8015/infra<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: chromeperf/pinpoint/comparison.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='chromeperf/pinpoint/comparison.proto',
package='chromeperf.pinpoint',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n$chromeperf/pinpoint/comparison.proto\x12\x13\x63hromeperf.pinpoint\"\xef\x01\n\nComparison\x12=\n\x06result\x18\x01 \x01(\x0e\x32-.chromeperf.pinpoint.Comparison.CompareResult\x12\x0f\n\x07p_value\x18\x02 \x01(\x01\x12\x15\n\rlow_threshold\x18\x03 \x01(\x01\x12\x16\n\x0ehigh_threshold\x18\x04 \x01(\x01\"b\n\rCompareResult\x12\x1e\n\x1a\x43OMPARE_RESULT_UNSPECIFIED\x10\x00\x12\r\n\tDIFFERENT\x10\x01\x12\x08\n\x04SAME\x10\x02\x12\x0b\n\x07UNKNOWN\x10\x03\x12\x0b\n\x07PENDING\x10\x04\x62\x06proto3'
)
_COMPARISON_COMPARERESULT = _descriptor.EnumDescriptor(
name='CompareResult',
full_name='chromeperf.pinpoint.Comparison.CompareResult',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='COMPARE_RESULT_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DIFFERENT', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SAME', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PENDING', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=203,
serialized_end=301,
)
_sym_db.RegisterEnumDescriptor(_COMPARISON_COMPARERESULT)
_COMPARISON = _descriptor.Descriptor(
name='Comparison',
full_name='chromeperf.pinpoint.Comparison',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='chromeperf.pinpoint.Comparison.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='p_value', full_name='chromeperf.pinpoint.Comparison.p_value', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='low_threshold', full_name='chromeperf.pinpoint.Comparison.low_threshold', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='high_threshold', full_name='chromeperf.pinpoint.Comparison.high_threshold', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_COMPARISON_COMPARERESULT,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=62,
serialized_end=301,
)
_COMPARISON.fields_by_name['result'].enum_type = _COMPARISON_COMPARERESULT
_COMPARISON_COMPARERESULT.containing_type = _COMPARISON
DESCRIPTOR.message_types_by_name['Comparison'] = _COMPARISON
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Comparison = _reflection.GeneratedProtocolMessageType('Comparison', (_message.Message,), {
'DESCRIPTOR' : _COMPARISON,
'__module__' : 'chromeperf.pinpoint.comparison_pb2'
# @@protoc_insertion_point(class_scope:chromeperf.pinpoint.Comparison)
})
_sym_db.RegisterMessage(Comparison)
# @@protoc_insertion_point(module_scope)
| StarcoderdataPython |
11287244 | import os
import sys
import numpy as np
from random import shuffle
assert len(sys.argv) == 3, "Usage: Python util.py [index authora] [index authorb]" ;
def process(a,capa=None, capb=None):
a = str(a)
aout = open(a + '.out','w')
bout = open('many.out','w')
step = 15
start = int(step/2)
stop = 1000 - 3*start
if capa==None:
capa=-1
if capb==None:
capb=-1
counts = []
for i in range(51):
counts.append(0)
with open('data.csv','r') as f:
data = f.readline()
data = f.readline().strip().split(',')
while data != ['']:
if data[1] == a and capa != 0:
data = data[0].split(' ')
for i in np.arange(start, stop, step):
aout.write(''.join([j + ' ' for j in data[i:i+step]]) + '\n')
capa-=1
elif counts[int(data[1])] <= capb:
ind = int(data[1])
data = data[0].split(' ')
for i in np.arange(start, stop, step):
bout.write(''.join([j + ' ' for j in data[i:i+step]]) + '\n')
counts[ind]+=1
data = f.readline().strip().split(',')
def split(name, a,is0, train=0.9, dev=0.05, test=0.05):
assert train + dev + test == 1.0, "Split does not add up to 1.0";
assert is0 == 0 or is0 == 1, "Need bool";
if not os.path.exists("../data/" + name):
os.mkdir('../data/' + name)
name = "../data/" + name + '/' + name
a = str(a)
adata = open(a + '.out','r').read().strip().split('\n')
shuffle(adata)
total = len(adata)
train = int(total*train)
total -= train
dev = int(total*(dev/(dev+test)))
test = total - dev
print("Test {} Dev {} Train {}".format(test,dev,train))
with open(name + '.train' + '.' + str(is0), 'w') as f:
for i in range(train):
f.write(adata[i] + '\n')
with open(name + '.dev' + '.' + str(is0), 'w') as f:
for i in range(dev):
f.write(adata[train+i] + '\n')
with open(name + '.test' + '.' + str(is0), 'w') as f:
for i in range(test):
f.write(adata[train+dev+i] + '\n')
a = int(sys.argv[1])
process(a,capb=180)
name = str(a) + '-many'
split(name,a,0,train=0.7,test=0.2,dev=0.1)
split(name,'many',1,train=0.7,test=0.2,dev=0.1)
os.system('rm {}'.format(str(a) + '.out'))
os.system('rm {}'.format('many' + '.out'))
| StarcoderdataPython |
8000814 | """
Unit tests for the ska_tmc_cdm.schemas.subarray_node.configure.sdp module.
"""
import pytest
from ska_tmc_cdm.messages.subarray_node.configure import SDPConfiguration
from ska_tmc_cdm.schemas.subarray_node.configure.sdp import SDPConfigurationSchema
from ... import utils
VALID_JSON = """
{
"interface": "https://schema.skao.int/ska-sdp-configure/0.3",
"scan_type": "science_A"
}
"""
VALID_OBJECT = SDPConfiguration(scan_type="science_A")
@pytest.mark.parametrize(
"schema_cls,instance,modifier_fn,valid_json,invalid_json",
[
(SDPConfigurationSchema,
VALID_OBJECT,
None,
VALID_JSON,
None),
],
)
def test_releaseresources_serialisation_and_validation(
schema_cls, instance, modifier_fn, valid_json, invalid_json
):
"""
Verifies that the schema marshals, unmarshals, and validates correctly.
"""
utils.test_schema_serialisation_and_validation(
schema_cls, instance, modifier_fn, valid_json, invalid_json
)
| StarcoderdataPython |
1804934 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.WorldTicketType import WorldTicketType
class WorldOfflineDataInfo(object):
def __init__(self):
self._auth_mode = None
self._available_ticket_types = None
self._cert_type = None
self._config = None
self._data_from = None
self._expire_time = None
self._offline_data = None
self._private_key = None
self._qrcode = None
self._script_mac = None
self._script_name = None
self._script_type = None
self._upload_raw_code = None
self._use_script = None
@property
def auth_mode(self):
return self._auth_mode
@auth_mode.setter
def auth_mode(self, value):
self._auth_mode = value
@property
def available_ticket_types(self):
return self._available_ticket_types
@available_ticket_types.setter
def available_ticket_types(self, value):
if isinstance(value, list):
self._available_ticket_types = list()
for i in value:
if isinstance(i, WorldTicketType):
self._available_ticket_types.append(i)
else:
self._available_ticket_types.append(WorldTicketType.from_alipay_dict(i))
@property
def cert_type(self):
return self._cert_type
@cert_type.setter
def cert_type(self, value):
self._cert_type = value
@property
def config(self):
return self._config
@config.setter
def config(self, value):
self._config = value
@property
def data_from(self):
return self._data_from
@data_from.setter
def data_from(self, value):
self._data_from = value
@property
def expire_time(self):
return self._expire_time
@expire_time.setter
def expire_time(self, value):
self._expire_time = value
@property
def offline_data(self):
return self._offline_data
@offline_data.setter
def offline_data(self, value):
self._offline_data = value
@property
def private_key(self):
return self._private_key
@private_key.setter
def private_key(self, value):
self._private_key = value
@property
def qrcode(self):
return self._qrcode
@qrcode.setter
def qrcode(self, value):
self._qrcode = value
@property
def script_mac(self):
return self._script_mac
@script_mac.setter
def script_mac(self, value):
self._script_mac = value
@property
def script_name(self):
return self._script_name
@script_name.setter
def script_name(self, value):
self._script_name = value
@property
def script_type(self):
return self._script_type
@script_type.setter
def script_type(self, value):
self._script_type = value
@property
def upload_raw_code(self):
return self._upload_raw_code
@upload_raw_code.setter
def upload_raw_code(self, value):
self._upload_raw_code = value
@property
def use_script(self):
return self._use_script
@use_script.setter
def use_script(self, value):
self._use_script = value
def to_alipay_dict(self):
params = dict()
if self.auth_mode:
if hasattr(self.auth_mode, 'to_alipay_dict'):
params['auth_mode'] = self.auth_mode.to_alipay_dict()
else:
params['auth_mode'] = self.auth_mode
if self.available_ticket_types:
if isinstance(self.available_ticket_types, list):
for i in range(0, len(self.available_ticket_types)):
element = self.available_ticket_types[i]
if hasattr(element, 'to_alipay_dict'):
self.available_ticket_types[i] = element.to_alipay_dict()
if hasattr(self.available_ticket_types, 'to_alipay_dict'):
params['available_ticket_types'] = self.available_ticket_types.to_alipay_dict()
else:
params['available_ticket_types'] = self.available_ticket_types
if self.cert_type:
if hasattr(self.cert_type, 'to_alipay_dict'):
params['cert_type'] = self.cert_type.to_alipay_dict()
else:
params['cert_type'] = self.cert_type
if self.config:
if hasattr(self.config, 'to_alipay_dict'):
params['config'] = self.config.to_alipay_dict()
else:
params['config'] = self.config
if self.data_from:
if hasattr(self.data_from, 'to_alipay_dict'):
params['data_from'] = self.data_from.to_alipay_dict()
else:
params['data_from'] = self.data_from
if self.expire_time:
if hasattr(self.expire_time, 'to_alipay_dict'):
params['expire_time'] = self.expire_time.to_alipay_dict()
else:
params['expire_time'] = self.expire_time
if self.offline_data:
if hasattr(self.offline_data, 'to_alipay_dict'):
params['offline_data'] = self.offline_data.to_alipay_dict()
else:
params['offline_data'] = self.offline_data
if self.private_key:
if hasattr(self.private_key, 'to_alipay_dict'):
params['private_key'] = self.private_key.to_alipay_dict()
else:
params['private_key'] = self.private_key
if self.qrcode:
if hasattr(self.qrcode, 'to_alipay_dict'):
params['qrcode'] = self.qrcode.to_alipay_dict()
else:
params['qrcode'] = self.qrcode
if self.script_mac:
if hasattr(self.script_mac, 'to_alipay_dict'):
params['script_mac'] = self.script_mac.to_alipay_dict()
else:
params['script_mac'] = self.script_mac
if self.script_name:
if hasattr(self.script_name, 'to_alipay_dict'):
params['script_name'] = self.script_name.to_alipay_dict()
else:
params['script_name'] = self.script_name
if self.script_type:
if hasattr(self.script_type, 'to_alipay_dict'):
params['script_type'] = self.script_type.to_alipay_dict()
else:
params['script_type'] = self.script_type
if self.upload_raw_code:
if hasattr(self.upload_raw_code, 'to_alipay_dict'):
params['upload_raw_code'] = self.upload_raw_code.to_alipay_dict()
else:
params['upload_raw_code'] = self.upload_raw_code
if self.use_script:
if hasattr(self.use_script, 'to_alipay_dict'):
params['use_script'] = self.use_script.to_alipay_dict()
else:
params['use_script'] = self.use_script
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = WorldOfflineDataInfo()
if 'auth_mode' in d:
o.auth_mode = d['auth_mode']
if 'available_ticket_types' in d:
o.available_ticket_types = d['available_ticket_types']
if 'cert_type' in d:
o.cert_type = d['cert_type']
if 'config' in d:
o.config = d['config']
if 'data_from' in d:
o.data_from = d['data_from']
if 'expire_time' in d:
o.expire_time = d['expire_time']
if 'offline_data' in d:
o.offline_data = d['offline_data']
if 'private_key' in d:
o.private_key = d['private_key']
if 'qrcode' in d:
o.qrcode = d['qrcode']
if 'script_mac' in d:
o.script_mac = d['script_mac']
if 'script_name' in d:
o.script_name = d['script_name']
if 'script_type' in d:
o.script_type = d['script_type']
if 'upload_raw_code' in d:
o.upload_raw_code = d['upload_raw_code']
if 'use_script' in d:
o.use_script = d['use_script']
return o
| StarcoderdataPython |
11301387 | <reponame>hrnoh/SpeechSplit<filename>conversion.py
import torch
import pickle
import numpy as np
from hparams import hparams
from utils import pad_seq_to_2
from utils import quantize_f0_numpy
from model import Generator_3 as Generator
from model import Generator_6 as F0_Converter
import matplotlib.pyplot as plt
import os
import glob
out_len = 408
device = 'cuda:1'
G = Generator(hparams).eval().to(device)
g_checkpoint = torch.load('run/models/234000-G.ckpt', map_location=lambda storage, loc: storage)
G.load_state_dict(g_checkpoint['model'])
metadata = pickle.load(open('/hd0/speechsplit/preprocessed/spmel/train.pkl', "rb"))
sbmt_i = metadata[0]
emb_org = torch.from_numpy(sbmt_i[1]).unsqueeze(0).to(device)
root_dir = "/hd0/speechsplit/preprocessed/spmel"
feat_dir = "/hd0/speechsplit/preprocessed/raptf0"
# mel-spectrogram, f0 contour load
x_org = np.load(os.path.join(root_dir, sbmt_i[2]))
f0_org = np.load(os.path.join(feat_dir, sbmt_i[2]))
len_org = x_org.shape[0]
uttr_org_pad, len_org_pad = pad_seq_to_2(x_org[np.newaxis,:,:], out_len)
uttr_org_pad = torch.from_numpy(uttr_org_pad).to(device)
f0_org_pad = np.pad(f0_org, (0, out_len-len_org), 'constant', constant_values=(0, 0))
f0_org_quantized = quantize_f0_numpy(f0_org_pad)[0]
f0_org_onehot = f0_org_quantized[np.newaxis, :, :]
f0_org_onehot = torch.from_numpy(f0_org_onehot).to(device)
uttr_f0_org = torch.cat((uttr_org_pad, f0_org_onehot), dim=-1)
sbmt_j = metadata[1]
emb_trg = torch.from_numpy(sbmt_j[1]).unsqueeze(0).to(device)
#x_trg, f0_trg, len_trg, uid_trg = sbmt_j[2]
x_trg = np.load(os.path.join(root_dir, sbmt_j[2]))
f0_trg = np.load(os.path.join(feat_dir, sbmt_j[2]))
len_trg = x_org.shape[0]
# 지금은 안씀
# uttr_trg_pad, len_trg_pad = pad_seq_to_2(x_trg[np.newaxis,:,:], out_len)
# uttr_trg_pad = torch.from_numpy(uttr_trg_pad).to(device)
# f0_trg_pad = np.pad(f0_trg, (0, out_len-len_trg), 'constant', constant_values=(0, 0))
# f0_trg_quantized = quantize_f0_numpy(f0_trg_pad)[0]
# f0_trg_onehot = f0_trg_quantized[np.newaxis, :, :]
# f0_trg_onehot = torch.from_numpy(f0_trg_onehot).to(device)
x_identic_val = G(uttr_f0_org, uttr_org_pad, emb_org)
output = x_identic_val.squeeze().cpu().detach().numpy()
plt.imshow(output.T)
plt.show()
plt.gca().invert_yaxis() | StarcoderdataPython |
11293588 | <filename>Lists/Sort_With_ Sorted.py
animals = ['chicken', 'cow', 'snail', 'elephant']
print(animals) # ['chicken', 'cow', 'snail', 'elephant']
s = sorted(animals)
print(s) # ['chicken', 'cow', 'elephant', 'snail']
print(animals) # ['chicken', 'cow', 'snail', 'elephant']
r = sorted(animals, reverse=True, key=len)
print(r) # ['elephant', 'chicken', 'snail', 'cow']
print(animals) # ['chicken', 'cow', 'snail', 'elephant']Lists 114
# sort vs. sorted
# The sort() method will sort a list in-place and return None.
# The built-in sorted() function will return the sorted list and leave the original list intact.
# key sort with sorted
# To sort the list according to length using sorted
animals = ['snail', 'cow', 'elephant', 'chicken']
animals_in_abc = sorted(animals)
print(animals)
print(animals_in_abc)
animals_by_length = sorted(animals, key=len)
print(animals_by_length)
['snail', 'cow', 'elephant', 'chicken']
['chicken', 'cow', 'elephant', 'snail']
['cow', 'snail', 'chicken', 'elephant']
# Sorting characters of a string
letters = 'axzb'
print(letters) # 'axzb'
s = sorted(letters)
print(s) # ['a', 'b', 'x', 'z']
print(letters) # 'axzb'
r = ''.join(sorted(letters))
print(r) # abxz | StarcoderdataPython |
5010044 | <reponame>exogen/80sheep
import logging
__all__ = ['String', 'Base32', 'Set', 'Flag', 'Delimited', 'Integer',
'Boolean', 'BitField', 'Parameter', 'ParameterCollection']
log = logging.getLogger(__name__)
class ParameterType(object):
def encode(self, value):
return value
def decode(self, data):
return data
def extract_data(self, data):
return data[0][self.slice]
class String(ParameterType):
pass
class Base32(ParameterType):
def encode(self, value):
yield base64.b32encode(value)
def decode(self, data):
return base64.b32decode(data)
class Set(ParameterType):
def __init__(self, finder=None, type=None):
self.finder = finder
self.type = type
def decode(self, data):
log.debug("Decoding %r...", data)
values = set([])
for value in data:
if self.type is not None:
value = self.type(value)
values.add(value)
log.debug("Decoded: %r", values)
return values
def encode(self, values):
for value in values:
yield unicode(value)
def extract_data(self, data):
return data[self.slice]
class Flag(ParameterType):
def encode(self, value):
if value:
yield ''
else:
yield None
def decode(self, data):
return True
class Delimited(ParameterType):
def __init__(self, delimiter=','):
self.delimiter = delimiter
def encode(self, value):
return self.delimiter.join(map(unicode, value))
def decode(self, data):
return data.split(self.delimiter)
class Integer(ParameterType):
def encode(self, value):
yield unicode(value)
def decode(self, data):
return int(data)
class Boolean(ParameterType):
def encode(self, value):
if value:
yield '1'
else:
yield '0'
def decode(self, data):
if data in '0':
return False
else:
return True
class BitField(ParameterType):
def __init__(self, bits):
self.bits = bits
def encode(self, value):
bits = 0
for item in value:
try:
index = self.bits.index(item)
except ValueError:
pass
else:
bits |= index + 1
return str(bits)
def decode(self, data):
bits = int(data)
value = set([])
for i, item in enumerate(self.bits):
if bits & (2 ** i):
value.add(item)
return value
class Parameter(object):
STATE_ATTR = '_param_state'
def __init__(self, key, type=String(), default=None):
if not isinstance(type, ParameterType) and callable(type):
type = type()
self.key = key
self.type = type
self.default = default
self.slice = slice(None)
def __get__(self, instance, owner):
if instance is not None:
_param_state = instance.__dict__.get(self.STATE_ATTR, {})
return _param_state.get(self, self.default)
else:
return self
def __set__(self, instance, value):
_param_state = instance.__dict__.setdefault(self.STATE_ATTR, {})
_param_state[self] = value
def __getitem__(self, item):
self.slice = item
def encode(self, value):
return self.type.encode(value)
def decode(self, data):
return self.type.decode(data)
def extract_data(self, data):
return self.type.extract_data(data)
class ParameterMapper(object):
pass
class ParameterCollection(object):
def __init__(self):
self.params = set([])
self.keys = {}
def add(self, param):
self.params.add(param)
self.keys.setdefault(param.key, []).append(param)
def __contains__(self, param):
return param in self.params
class DeclarativeParameterMeta(type):
def __new__(cls, name, bases, attrs):
# params = ParameterCollection()
# for attr, value in attrs.iteritems():
# if isinstance(value, Parameter):
# params.add(attr, value)
# attrs['_params'] = params
return type.__new__(cls, name, bases, attrs)
| StarcoderdataPython |
6643021 | import pandas as pd
crime_csvs = [
]
def load_and_reshape_police(filename):
df = pd.read_csv(filename, usecols=['REF_DATE', 'GEO', 'Statistics', 'VALUE'])
index = (df["Statistics"] == "Police officers per 100,000 population")
df = df[index]
df = df.pivot(index='REF_DATE', columns='GEO', values='VALUE')
df.index.rename('Year', inplace=True)
return df
police_dfs = [load_and_reshape_police(f) for f in [
'data/3510007601_databaseLoadingData_NB.csv',
'data/3510007601_databaseLoadingData_NL.csv',
'data/3510007601_databaseLoadingData_NS.csv',
'data/3510007601_databaseLoadingData_ON.csv',
'data/3510007601_databaseLoadingData_PE.csv',
'data/3510007601_databaseLoadingData_QC.csv']]
police = pd.concat(police_dfs, axis=1)
def load_and_reshape_crime(filename):
df = pd.read_csv(filename, usecols=['REF_DATE', 'GEO', 'Statistics', 'VALUE'])
index = (df["Statistics"] == "Rate per 100,000 population")
df = df[index]
df = df.pivot(index='REF_DATE', columns='GEO', values='VALUE')
df.rename(columns=lambda name: ' '.join(name.split()[0:-1]), inplace=True)
df.index.rename('Year', inplace=True)
return df
crime_dfs = [load_and_reshape_crime(f) for f in [
'data/3510017701_databaseLoadingData_NB.csv',
'data/3510017701_databaseLoadingData_NL.csv',
'data/3510017701_databaseLoadingData_NS.csv',
'data/3510017701_databaseLoadingData_ON.csv',
'data/3510017701_databaseLoadingData_PE.csv',
'data/3510017701_databaseLoadingData_QC.csv'
]]
crime = pd.concat(crime_dfs, axis=1)
| StarcoderdataPython |
354163 | <reponame>adamcharnock/factorio-status-ui<gh_stars>1-10
#!/usr/bin/python
import asyncio
import socket
import struct
import sys
import logging
from factorio_status_ui.state import application_config
MESSAGE_TYPE_AUTH = 3
MESSAGE_TYPE_AUTH_RESP = 2
MESSAGE_TYPE_COMMAND = 2
MESSAGE_TYPE_RESP = 0
MESSAGE_ID = 0
logger = logging.getLogger(__name__)
class RconConnectionError(Exception): pass
class RconTimeoutError(Exception): pass
class RconAuthenticatedFailed(Exception): pass
async def send_message(writer, command_string, message_type):
"""Packages up a command string into a message and sends it"""
logger.debug('Send message to RCON server: {}'.format(command_string))
try:
# size of message in bytes:
# id=4 + type=4 + body=variable + null terminator=2 (1 for python string and 1 for message terminator)
message_size = (4 + 4 + len(command_string) + 2)
message_format = ''.join(['=lll', str(len(command_string)), 's2s'])
packed_message = struct.pack(
message_format, message_size, MESSAGE_ID, message_type, command_string.encode('utf8'), b'\x00\x00'
)
writer.write(packed_message)
response_data = await asyncio.wait_for(
writer.drain(),
timeout=application_config.rcon_timeout
)
except asyncio.TimeoutError:
raise RconTimeoutError('Timeout sending RCON message. type={}, command={}'.format(message_type, command_string))
async def get_response(reader):
"""Gets the message response to a sent command and unpackages it"""
response_id = -1
response_type = -1
try:
response_size, = struct.unpack('=l', await reader.read(4))
message_format = ''.join(['=ll', str(response_size - 9), 's1s'])
response_data = await asyncio.wait_for(
reader.read(response_size),
timeout=application_config.rcon_timeout
)
response_id, response_type, response_string, response_dummy \
= struct.unpack(message_format, response_data)
response_string = response_string.rstrip(b'\x00\n')
return response_string, response_id, response_type
except asyncio.TimeoutError:
raise RconTimeoutError('Timeout receiving RCON response')
class RconConnection():
async def __aenter__(self):
logger.debug('Authenticating with RCON server {}:{} using password "{}"'.format(
application_config.rcon_host,
application_config.rcon_port,
application_config.rcon_password,
))
try:
self.reader, self.writer = await asyncio.wait_for(
asyncio.open_connection(application_config.rcon_host, application_config.rcon_port),
timeout=application_config.rcon_timeout
)
except asyncio.TimeoutError:
raise RconTimeoutError('Timeout connecting to RCON server {}:{}'.format(
application_config.rcon_host,
application_config.rcon_port
))
except ConnectionRefusedError:
raise RconConnectionError('Server {} refused attempted RCON connection on port {}'.format(
application_config.rcon_host,
application_config.rcon_port
))
await send_message(self.writer, application_config.rcon_password, MESSAGE_TYPE_AUTH)
response_string, response_id, response_type = await get_response(self.reader)
if response_id == -1:
raise RconAuthenticatedFailed('Failed to authenticate with RCON server {}:{} using password "{}"'.format(
application_config.rcon_host,
application_config.rcon_port,
application_config.rcon_password,
))
else:
logger.debug('Successfully authenticated with RCON server')
return self
async def __aexit__(self, exc_type, exc, tb):
self.writer.close()
async def run_command(self, command: str):
await send_message(self.writer, command, MESSAGE_TYPE_COMMAND)
response_string, response_id, response_type = await get_response(self.reader)
# See: https://developer.valvesoftware.com/wiki/Source_RCON_Protocol#Multiple-packet_Responses
# Basically we get an empty packet after each response
if command.startswith('/config'):
# ServerConfig commands seem to be multi-packet responses
await get_response(self.reader)
logger.debug('RCON command "{}" executed, answer : {}'.format(command, response_string))
return response_string
| StarcoderdataPython |
5158991 | import typing
def main() -> typing.NoReturn:
w, a, b = map(int, input().split())
if a > b: a, b = b, a
print(max(0, b - (a + w)))
main() | StarcoderdataPython |
6648944 | """
A simple CLI to deploy models to the spam detection API.
It supports:
- tagging models with human-readable names
- deploying by tag
- rollback
"""
import argparse
from typing import List, Optional, Sequence, TextIO
_DB_RESERVED_CHAR = "="
def db_set(*, db: TextIO, key: str, value: str) -> None:
if _DB_RESERVED_CHAR in key:
raise ValueError(f"'key' cannot contain '{_DB_RESERVED_CHAR}'")
elif _DB_RESERVED_CHAR in value:
raise ValueError(f"'value' cannot contain '{_DB_RESERVED_CHAR}'")
line = f"{key}={value}\n"
db.write(line)
def db_find(*, db: TextIO, key: str) -> Optional[str]:
"""
This is a very inefficient search method, O(N) where N is the
number of insertions into the DB. But it's performant enough
for this toy system where N will never really exceed 20.
"""
matches: List[str] = []
for line in db.readlines():
candidate, value = line.strip().split(_DB_RESERVED_CHAR)
if candidate == key:
matches.append(value)
if not matches:
return None
return matches[-1] # last write is up-to-date value.
def tag_model(tag: str, model_sha: str) -> None:
"""
This method will always overwrite any existing tag.
"""
...
def deploy_model(tag: str) -> None:
...
def rollback_model(tag: str) -> None:
"""
Rollback deployed model to previous
by swapping back the pointed at model, and
reloading the model API server.
"""
...
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser(prog="PROG")
subparsers = parser.add_subparsers(dest="subparser_name", help="sub-command help")
parser_a = subparsers.add_parser("deploy", help="a help")
parser_a.add_argument("--tag", type=str, help="bar help", required=True)
parser_a.set_defaults(func=deploy_model)
parser_b = subparsers.add_parser("tag", help="b help")
parser_b.add_argument("--tag", type=str, help="baz help", required=True)
parser_b.add_argument("--model", type=str, required=True)
parser_b.set_defaults(func=tag_model)
args = parser.parse_args(argv)
if args.subparser_name == "deploy":
print("DEPLOY")
elif args.subparser_name == "tag":
print("TAG")
else:
raise AssertionError
return 0
if __name__ == "__main__":
raise SystemExit(main())
| StarcoderdataPython |
9789640 | <filename>tests/functional/context/validation/test_potential_types.py
import pytest
from vyper.context.types.indexable.sequence import ArrayDefinition
from vyper.context.types.value.address import AddressDefinition
from vyper.context.types.value.boolean import BoolDefinition
from vyper.context.types.value.numeric import Int128Definition
from vyper.context.validation.utils import get_possible_types_from_node
from vyper.exceptions import (
ArrayIndexException,
InvalidOperation,
InvalidReference,
StructureException,
TypeMismatch,
UndeclaredDefinition,
UnknownAttribute,
)
INTEGER_LITERALS = [(42, 31337), (-1, 1), (69, 2 ** 128)]
DECIMAL_LITERALS = [("4.2", "-1.337")]
BOOL_LITERALS = [(True, False), (True, True), (False, False)]
STRING_LITERALS = [("'hi'", "'there'"), ("'foo'", "'bar'"), ("'longer'", "'short'")]
def test_attribute(build_node, namespace):
node = build_node("self.foo")
type_def = Int128Definition()
with namespace.enter_scope():
namespace["self"].add_member("foo", type_def)
assert get_possible_types_from_node(node) == [type_def]
def test_attribute_missing_self(build_node, namespace):
node = build_node("foo")
with namespace.enter_scope():
namespace["self"].add_member("foo", Int128Definition())
with pytest.raises(InvalidReference):
get_possible_types_from_node(node)
def test_attribute_not_in_self(build_node, namespace):
node = build_node("self.foo")
with namespace.enter_scope():
namespace["foo"] = Int128Definition()
with pytest.raises(InvalidReference):
get_possible_types_from_node(node)
def test_attribute_unknown(build_node, namespace):
node = build_node("foo.bar")
with namespace.enter_scope():
namespace["foo"] = AddressDefinition()
with pytest.raises(UnknownAttribute):
get_possible_types_from_node(node)
def test_attribute_not_member_type(build_node, namespace):
node = build_node("foo.bar")
with namespace.enter_scope():
namespace["foo"] = Int128Definition()
with pytest.raises(StructureException):
get_possible_types_from_node(node)
@pytest.mark.parametrize("op", "+-*/%")
@pytest.mark.parametrize("left,right", INTEGER_LITERALS + DECIMAL_LITERALS)
def test_binop(build_node, namespace, op, left, right):
node = build_node(f"{left}{op}{right}")
with namespace.enter_scope():
get_possible_types_from_node(node)
@pytest.mark.parametrize("op", "+-*/%")
@pytest.mark.parametrize("left,right", [(42, "2.3"), (-1, 2 ** 128)])
def test_binop_type_mismatch(build_node, namespace, op, left, right):
node = build_node(f"{left}{op}{right}")
with namespace.enter_scope():
with pytest.raises(TypeMismatch):
get_possible_types_from_node(node)
def test_binop_invalid_decimal_pow(build_node, namespace):
node = build_node("2.1 ** 2.1")
with namespace.enter_scope():
with pytest.raises(InvalidOperation):
get_possible_types_from_node(node)
@pytest.mark.parametrize("left, right", STRING_LITERALS + BOOL_LITERALS)
@pytest.mark.parametrize("op", "+-*/%")
def test_binop_invalid_op(build_node, namespace, op, left, right):
node = build_node(f"{left} {op} {right}")
with namespace.enter_scope():
with pytest.raises(InvalidOperation):
get_possible_types_from_node(node)
@pytest.mark.parametrize("left, right", BOOL_LITERALS)
@pytest.mark.parametrize("op", ["and", "or"])
def test_boolop(build_node, namespace, op, left, right):
node = build_node(f"{left} {op} {right}")
with namespace.enter_scope():
types_list = get_possible_types_from_node(node)
assert len(types_list) == 1
assert isinstance(types_list[0], BoolDefinition)
@pytest.mark.parametrize("left, right", INTEGER_LITERALS + DECIMAL_LITERALS + STRING_LITERALS)
@pytest.mark.parametrize("op", ["and", "or"])
def test_boolop_invalid_op(build_node, namespace, op, left, right):
node = build_node(f"{left} {op} {right}")
with namespace.enter_scope():
with pytest.raises(InvalidOperation):
get_possible_types_from_node(node)
@pytest.mark.parametrize("left, right", INTEGER_LITERALS + DECIMAL_LITERALS)
@pytest.mark.parametrize("op", ["<", "<=", ">", ">="])
def test_compare_lt_gt(build_node, namespace, op, left, right):
node = build_node(f"{left} {op} {right}")
with namespace.enter_scope():
types_list = get_possible_types_from_node(node)
assert len(types_list) == 1
assert isinstance(types_list[0], BoolDefinition)
@pytest.mark.parametrize(
"left, right", INTEGER_LITERALS + DECIMAL_LITERALS + BOOL_LITERALS + STRING_LITERALS
)
@pytest.mark.parametrize("op", ["==", "!="])
def test_compare_eq_ne(build_node, namespace, op, left, right):
node = build_node(f"{left} {op} {right}")
with namespace.enter_scope():
types_list = get_possible_types_from_node(node)
assert len(types_list) == 1
assert isinstance(types_list[0], BoolDefinition)
@pytest.mark.parametrize("left, right", BOOL_LITERALS + STRING_LITERALS)
@pytest.mark.parametrize("op", ["<", "<=", ">", ">="])
def test_compare_invalid_op(build_node, namespace, op, left, right):
node = build_node(f"{left} {op} {right}")
with namespace.enter_scope():
with pytest.raises(InvalidOperation):
get_possible_types_from_node(node)
def test_name(build_node, namespace):
node = build_node("foo")
type_def = Int128Definition()
namespace["foo"] = type_def
assert get_possible_types_from_node(node) == [type_def]
def test_name_unknown(build_node, namespace):
node = build_node("foo")
with pytest.raises(UndeclaredDefinition):
get_possible_types_from_node(node)
@pytest.mark.parametrize("left, right", INTEGER_LITERALS + DECIMAL_LITERALS + BOOL_LITERALS)
def test_list(build_node, namespace, left, right):
node = build_node(f"[{left}, {right}]")
with namespace.enter_scope():
types_list = get_possible_types_from_node(node)
assert types_list
for item in types_list:
assert isinstance(item, ArrayDefinition)
def test_subscript(build_node, namespace):
node = build_node("foo[1]")
type_def = Int128Definition()
namespace["foo"] = ArrayDefinition(type_def, 3)
assert get_possible_types_from_node(node) == [type_def]
def test_subscript_out_of_bounds(build_node, namespace):
node = build_node("foo[5]")
type_def = Int128Definition()
namespace["foo"] = ArrayDefinition(type_def, 3)
with pytest.raises(ArrayIndexException):
get_possible_types_from_node(node)
def test_subscript_negative(build_node, namespace):
node = build_node("foo[-1]")
type_def = Int128Definition()
namespace["foo"] = ArrayDefinition(type_def, 3)
with pytest.raises(ArrayIndexException):
get_possible_types_from_node(node)
def test_tuple(build_node, namespace):
node = build_node("(foo, bar)")
namespace["foo"] = Int128Definition()
namespace["bar"] = AddressDefinition()
types_list = get_possible_types_from_node(node)
assert types_list[0].value_type == [namespace["foo"], namespace["bar"]]
def test_tuple_subscript(build_node, namespace):
node = build_node("(foo, bar)[1]")
namespace["foo"] = Int128Definition()
namespace["bar"] = AddressDefinition()
types_list = get_possible_types_from_node(node)
assert types_list == [namespace["bar"]]
| StarcoderdataPython |
8083393 | <filename>pyEOM/datasets/predefined/MODIS/MYD13Q1.py
__author__ = 'we32zac'
from pyEOM.datasets import Dataset as DatasetAbs
class Dataset(DatasetAbs):
shortname = 'MYD13Q1'
platform = 'Aqua'
collection = '005'
rastertype = 'Tile'
timeInterval = 'P16D'
host = 'http://e4ftl01.cr.usgs.gov'
dir = '/MODIS_Composites/MOLA/MYD13Q1.005'
sources = ['LPDAAC']
def getDownloadInfo(self):
return dict(shortname=self.shortname, platform=self.platform, collection=self.collection, rastertype=self.rastertype, host=self.host, directory=self.dir, sources=self.sources)
def getBands(self):
return self.bands
def getThematicBands(self):
return [self.bands['EVI'], self.bands['NDVI']]
def getQualityBands(self):
return [self.bands['PR'], self.bands['QC']]
bands = dict(PR={
'name': 'MODIS_Grid_16DAY_250m_500m_VI:250m 16 days pixel reliability',
'nodata': -1,
'scale': 1,
'offset': None,
'imagetype': 'qualityInformation',
'identifier': 'MODIS_MYD13_Q1_PR_Series',
'title': '16-daily Vegetation Indices Pixel Reliability from MODIS Aqua',
'abstract': 'Pixel Reliability from time-series of 16-daily Aqua MODIS Vegetation Indices at 250 m spatial resolution. Original MODIS data retrieved from the Land Processes Distributed Active Archive Center (ftp://e4ftl01.cr.usgs.gov/MOLT/).',
'keywords': 'MODIS,Aqua,Quality,Pixel,Reliability,Vegetation,NDVI,EVI,Global,16-daily,Series',
'lineage': 'Original MODIS data retrieved from the Land Processes Distributed Active Archive Center (ftp://e4ftl01.cr.usgs.gov/MOLT/) and processed with GDAL 1.9.0.',
'datasetname': 'Pixel Reliability',
'datatype': 'RASTER',
'resolution': 250.0,
'layername': 'myd13q1_pr',
'templates': 'template_header_evi.html',
'wcs_description': 'MODIS Aqua VI Pixel Reliability 16-daily',
'wms_description': 'MODIS Aqua VI Pixel Reliability 16-daily',
'colormap': 'vi_pr_colormap.map',
'resolution_unit': 'm',
'unit': 'Index'
},QC={
'name': 'MODIS_Grid_16DAY_250m_500m_VI:250m 16 days VI Quality',
'nodata': 65535,
'scale': 1,
'offset': None,
'imagetype': 'qualityInformation',
'identifier': 'MODIS_MYD13_Q1_QC_Series',
'title': '16-daily Vegetation Indices Quality from MODIS Aqua',
'abstract': 'Quality data from time-series of 16-daily Aqua MODIS Vegetation Indices at 250 m spatial resolution. Original MODIS data retrieved from the Land Processes Distributed Active Archive Center (ftp://e4ftl01.cr.usgs.gov/MOLT/).',
'keywords': 'MODIS,Aqua,Quality,Vegetation,NDVI,EVI,Global,16-daily,Series',
'lineage': 'Original MODIS data retrieved from the Land Processes Distributed Active Archive Center (ftp://e4ftl01.cr.usgs.gov/MOLT/) and processed with GDAL 1.9.0.',
'datasetname': 'Quality Flags',
'datatype': 'RASTER',
'resolution': 250.0,
'layername': 'myd13q1_qc',
'templates': 'template_header_evi.html',
'wcs_description': 'MODIS Aqua VI Quality 16-daily',
'wms_description': 'MODIS Aqua VI Quality 16-daily',
'colormap': 'vi_qc_colormap.map',
'resolution_unit': 'm',
'unit': 'Index'
},NDVI={
'name': 'MODIS_Grid_16DAY_250m_500m_VI:250m 16 days NDVI',
'nodata': -3000,
'scale': 0.0001,
'offset': None,
'imagetype': 'thematicClassification',
'identifier': 'MODIS_MYD13_Q1_NDVI_Series',
'title': '16-daily Normalized Difference Vegetation Index from MODIS Aqua',
'abstract': 'Time-series of 16-daily Aqua MODIS Normalized Difference Vegetation Index (NDVI) at 250 m spatial resolution. To retrieve actual values a scale factor of 0.0001 has to be applied. The unscaled nodata value is encoded as 0. Original MODIS data retrieved from the Land Processes Distributed Active Archive Center (ftp://e4ftl01.cr.usgs.gov/MOLA/).',
'keywords': 'MODIS,Aqua,Siberia,NDVI,Normalized Difference Vegetation Index,Vegetation,Index,Global,16-daily,Series',
'lineage': 'Original MODIS data retrieved from the Land Processes Distributed Active Archive Center (ftp://e4ftl01.cr.usgs.gov/MOLA/) and processed with GDAL 1.9.0.',
'datasetname': 'Normalized Difference Vegetation Index',
'datatype': 'RASTER',
'resolution': 250.0,
'layername': 'myd13q1_ndvi',
'templates': 'template_header_evi.html',
'wcs_description': 'MODIS Aqua NDVI 16-daily',
'wms_description': 'MODIS Aqua NDVI 16-daily',
'colormap': 'ndvi_colorbar.map',
'resolution_unit': 'm',
'unit': 'None'
},EVI={
'name': 'MODIS_Grid_16DAY_250m_500m_VI:250m 16 days EVI',
'nodata': -3000,
'scale': 0.0001,
'offset': None,
'imagetype': 'thematicClassification',
'identifier': 'MODIS_MYD13_Q1_EVI_Series',
'title': '16-daily Enhanced Vegetation Index from MODIS Aqua',
'abstract': 'Time-series of 16-daily Aqua MODIS Enhanced Vegetation Index (EVI) at 250 m spatial resolution. To retrieve actual values a scale factor of 0.0001 has to be applied. The unscaled nodata value is encoded as 0. Original MODIS data retrieved from the Land Processes Distributed Active Archive Center (ftp://e4ftl01.cr.usgs.gov/MOLA/).',
'keywords': 'MODIS,Aqua,Siberia,EVI,Enhanced Vegetation Index,Vegetation,Index,Global,16-daily,Series',
'lineage': 'Original MODIS data retrieved from the Land Processes Distributed Active Archive Center (ftp://e4ftl01.cr.usgs.gov/MOLA/) and processed with GDAL 1.9.0.',
'datasetname': 'Enhanced Vegetation Index',
'datatype': 'RASTER',
'resolution': 250.0,
'layername': 'myd13q1_evi',
'templates': 'template_header_evi.html',
'wcs_description': 'MODIS Aqua EVI 16-daily',
'wms_description': 'MODIS Aqua EVI 16-daily',
'colormap': 'evi_colorbar.map',
'resolution_unit': 'm',
'unit': 'None'
}
) | StarcoderdataPython |
65210 | <filename>modules/guidance.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 1 22:24:28 2018
@author: jvdgoltz
"""
import numpy as np
from cvxopt import matrix, solvers, sparse, spdiag, spmatrix
def solve(x0,targetalt,targetvel,amax,dt=0.5,t_max=12*60):
r = np.array([0,targetalt,targetvel,0,0,0,0,9.81])
t = np.arange(0,t_max+dt,dt)
# Inequality Constraints
G = spmatrix(np.zeros([1,6*len(t)]), range(6*len(t)), range(6*len(t)), (6*len(t),8*len(t)))
h = np.zeros([6*len(t),1])
for i in range(len(t)):
G[2*i:2*i+2,8*i:8*i+2] = -np.eye(2)
G[2*len(t)+4*i:2*len(t)+4*i+4,8*i+6:8*i+8] = np.array([[1,0],[-1,0],[0,1],[0,-1]])
if i<len(amax):
h[2*len(t)+4*i:2*len(t)+4*i+4,0] = np.ones(4)*amax[i]
else:
h[2*len(t)+4*i:2*len(t)+4*i+4,0] = np.zeros(4)
h = matrix(h)
# Equality Constraints
A = np.array([[1,0,dt,0,0,0],
[0,1,0,dt,0,0],
[0,0,1,0,dt,0],
[0,0,0,1,0,dt],
[0,0,0,0,0,0],
[0,0,0,0,0,0]])
B = np.array([[0,0],
[0,0],
[0,0],
[0,0],
[1,0],
[0,1]])
Aeq = spmatrix(np.zeros([1,6*len(t)]),range(6*len(t)),range(6*len(t)),(6*len(t),8*len(t)))
Aeq[:6,:6] = sparse(matrix(np.eye(6)))
c = sparse(matrix(np.concatenate((-A,-B,np.eye(6)),axis=1)))
for i in range(len(t)-1):
Aeq[6*i+6:6*i+12,8*i:8*i+14] = c
beq = np.zeros([6*len(t),1])
beq[:6,0] = x0
for i in range(len(t)):
beq[6*i+5,0] = -9.81
beq = matrix(beq)
# Objective
Q = matrix(2*np.eye(8))
Q[0,0] = 0
QQ = sparse(Q)
for i in range(len(t)-1):
QQ = spdiag([QQ,Q])
p = -r.T.dot(Q)
pp = matrix(np.kron(np.ones([1,len(t)]), p).T)
sol = solvers.qp(QQ, pp, G, h, Aeq, beq)
x = np.array(sol['x']).reshape((-1,8))
return t,x | StarcoderdataPython |
1774693 | # AUTOGENERATED! DO NOT EDIT! File to edit: source_nbs/06_read_write_tfrecord.ipynb (unless otherwise specified).
__all__ = ['serialize_fn', 'make_tfrecord_local', 'make_tfrecord_pyspark', 'make_tfrecord', 'chain_processed_data',
'write_tfrecord', 'make_feature_desc', 'reshape_tensors_in_dataset', 'add_loss_multiplier',
'set_shape_for_dataset', 'get_dummy_features', 'add_dummy_features_to_dataset', 'read_tfrecord']
# Cell
import json
import os
from fastcore.basics import partial
from glob import glob
from typing import Dict, Iterator, Callable
import tempfile
from loguru import logger
import numpy as np
import tensorflow as tf
from fastcore.basics import listify
from .bert_preprocessing.create_bert_features import create_multimodal_bert_features
from .special_tokens import EVAL, TRAIN
from .params import Params
from .utils import get_is_pyspark
# Cell
def _float_list_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _int64_list_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _bytes_list_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def serialize_fn(features: dict, return_feature_desc=False):
features_tuple = {}
feature_desc = {}
for feature_name, feature in features.items():
if type(feature) is list:
feature = np.array(feature)
if type(feature) is np.ndarray:
if issubclass(feature.dtype.type, np.integer):
features_tuple[feature_name] = _int64_list_feature(
feature.flatten())
feature_desc[feature_name] = 'int64'
# elif issubclass(feature.dtype.type, np.float):
else:
features_tuple[feature_name] = _float_list_feature(
feature.flatten())
feature_desc[feature_name] = 'float32'
features_tuple['{}_shape'.format(
feature_name)] = _int64_list_feature(feature.shape)
feature_desc['{}_shape_value'.format(feature_name)] = feature.shape
feature_desc['{}_shape'.format(
feature_name)] = 'int64'
# this seems not a good idea
if len(feature.shape) > 1:
feature_desc['{}_shape_value'.format(feature_name)] = [
None] + list(feature.shape[1:])
else:
feature_desc['{}_shape_value'.format(feature_name)] = [
None for _ in feature.shape]
elif np.issubdtype(type(feature), np.float):
features_tuple[feature_name] = _float_feature(feature)
features_tuple['{}_shape'.format(
feature_name)] = _int64_list_feature([])
feature_desc[feature_name] = 'float32'
feature_desc['{}_shape'.format(
feature_name)] = 'int64'
feature_desc['{}_shape_value'.format(feature_name)] = []
elif np.issubdtype(type(feature), np.integer):
features_tuple[feature_name] = _int64_feature(feature)
features_tuple['{}_shape'.format(
feature_name)] = _int64_list_feature([])
feature_desc[feature_name] = 'int64'
feature_desc['{}_shape'.format(
feature_name)] = 'int64'
feature_desc['{}_shape_value'.format(feature_name)] = []
else:
if isinstance(feature, str):
feature = feature.encode('utf8')
features_tuple[feature_name] = _bytes_feature(feature)
features_tuple['{}_shape'.format(
feature_name)] = _int64_list_feature([])
feature_desc[feature_name] = 'string'
feature_desc['{}_shape'.format(
feature_name)] = 'int64'
feature_desc['{}_shape_value'.format(feature_name)] = []
example_proto = tf.train.Example(
features=tf.train.Features(feature=features_tuple)).SerializeToString()
if return_feature_desc:
return example_proto, feature_desc
return example_proto
# Cell
def make_tfrecord_local(data_list, output_dir, serialize_fn, mode='train', example_per_file=100000, prefix='', **kwargs) -> int:
"""
make tf record and return total number of records
"""
# create output tfrecord path
os.makedirs(os.path.join(
output_dir, prefix), exist_ok=True)
def _write_fn(d_list, path, serialize_fn, mode='train'):
logger.debug('Writing {}'.format(path))
feature_desc_path = os.path.join(os.path.dirname(
path), '{}_feature_desc.json'.format(mode))
with tf.io.TFRecordWriter(path) as writer:
for features in d_list:
example, feature_desc = serialize_fn(
features, return_feature_desc=True)
writer.write(example)
if not os.path.exists(feature_desc_path):
json.dump(feature_desc, open(
feature_desc_path, 'w', encoding='utf8'))
_write_part_fn = partial(_write_fn, serialize_fn=serialize_fn, mode=mode)
x = []
total_count = 0
shard_count = 0
for idx, example in enumerate(data_list):
total_count += 1
x.append(example)
if idx % example_per_file == 0 and idx: # pragma: no cover
path = os.path.join(
output_dir, prefix, '{}_{:05d}.tfrecord'.format(mode, shard_count))
shard_count += 1
_write_part_fn(d_list=x, path=path)
x = []
# add remaining
if x:
path = os.path.join(
output_dir, prefix, '{}_{:05d}.tfrecord'.format(mode, shard_count))
_write_part_fn(d_list=x, path=path)
total_count += len(x)
return total_count
def make_tfrecord_pyspark(data_list, output_dir: str, serialize_fn: Callable, mode='train', example_per_file=100000, prefix='', **kwargs) -> int:
"""
make tf record and return total number of records with pyspark
"""
from .pyspark_utils import Hdfs, repar_rdd
from pyspark import RDD
# write RDD to TFRecords
# ref: https://github.com/yahoo/TensorFlowOnSpark/blob/master/examples/mnist/mnist_data_setup.py
# just for type hint
data_list: RDD = data_list
# since single record might not contain all problem labels
# we create feature desc for all record and aggregate
# TODO: poor performance, optimize this
feat_desc_tfrecord_tuple_rdd = data_list.map(
lambda x: serialize_fn(x, return_feature_desc=True)
)
feat_desc_tfrecord_tuple_rdd = feat_desc_tfrecord_tuple_rdd.cache()
rdd_count = int(feat_desc_tfrecord_tuple_rdd.sample(
False, 0.01).count() * 100)
feat_desc_tfrecord_tuple_rdd = repar_rdd(
rdd=feat_desc_tfrecord_tuple_rdd,
rdd_count=rdd_count,
example_per_par=example_per_file
)
feature_desc_pair_rdd = feat_desc_tfrecord_tuple_rdd.map(
lambda x: (0, x[1]))
tfrecord_rdd = feat_desc_tfrecord_tuple_rdd.map(
lambda x: (bytearray(x[0]), None))
tfrecord_rdd.saveAsNewAPIHadoopFile(
path=output_dir,
outputFormatClass="org.tensorflow.hadoop.io.TFRecordFileOutputFormat",
keyClass="org.apache.hadoop.io.BytesWritable",
valueClass="org.apache.hadoop.io.NullWritable"
)
# create feature desc
def _update_dict(ld: dict, rd: dict) -> dict:
ld.update(rd)
return ld
feature_desc = feature_desc_pair_rdd.reduceByKeyLocally(_update_dict)[0]
local_feature_desc_path = '{}_feature_desc.json'.format(mode)
json.dump(feature_desc, open(local_feature_desc_path, 'w'), indent=4)
hdfs_client = Hdfs()
hdfs_client.copyFromLocalFile(
local_feature_desc_path,
os.path.join(output_dir, local_feature_desc_path))
return rdd_count
def make_tfrecord(data_list, output_dir, serialize_fn, mode='train', example_per_file=100000, prefix='', **kwargs):
if get_is_pyspark():
output_dir = kwargs['pyspark_dir']
output_dir = os.path.join(output_dir, mode)
return make_tfrecord_pyspark(data_list=data_list,
output_dir=output_dir,
serialize_fn=serialize_fn,
mode=mode,
example_per_file=example_per_file,
prefix=prefix, **kwargs)
return make_tfrecord_local(data_list=data_list,
output_dir=output_dir,
serialize_fn=serialize_fn,
mode=mode,
example_per_file=example_per_file,
prefix=prefix, **kwargs)
# Cell
def chain_processed_data(problem_preproc_gen_dict: Dict[str, Iterator]) -> Iterator:
# problem chunk size is 1, return generator directly
if len(problem_preproc_gen_dict) == 1:
return next(iter(problem_preproc_gen_dict.values()))
if get_is_pyspark():
from pyspark import RDD
from .pyspark_utils import join_dict_of_rdd
rdd = join_dict_of_rdd(rdd_dict=problem_preproc_gen_dict)
return rdd
logger.warning('Chaining problems with & may consume a lot of memory if'
' data is not pyspark RDD.')
data_dict = {}
column_list = []
for pro in problem_preproc_gen_dict:
data_dict[pro] = listify(problem_preproc_gen_dict[pro])
try:
column_list.append(list(data_dict[pro][0].keys()))
except IndexError:
raise IndexError("Problem {} has no data".format(pro))
# get intersection and use as ensure features are the same
join_key = list(set(column_list[0]).intersection(*column_list[1:]))
flat_data_list = []
first_problem = next(iter(problem_preproc_gen_dict.keys()))
while data_dict[first_problem]:
d = {}
for pro in data_dict:
if not d:
d = data_dict[pro].pop(0)
else:
for k in join_key:
assert d[k] == data_dict[pro][0][k], 'At iteration {}, feature {} not align. Expected {}, got: {}'.format(
len(flat_data_list), k, d[k], data_dict[pro][0][k]
)
d.update(data_dict[pro].pop(0))
flat_data_list.append(d)
return flat_data_list
def write_tfrecord(params: Params, replace=False):
"""Write TFRecord for every problem chunk
Output location: params.tmp_file_dir
Arguments:
params {params} -- params
Keyword Arguments:
replace {bool} -- Whether to replace existing tfrecord (default: {False})
"""
read_data_fn_dict = params.read_data_fn
path_list = []
for problem_list in params.problem_chunk:
problem_str = '_'.join(sorted(problem_list))
file_dir = os.path.join(params.tmp_file_dir, problem_str)
if params.pyspark_output_path is not None:
pyspark_dir = os.path.join(params.pyspark_output_path, problem_str)
else:
pyspark_dir = None
if not os.path.exists(file_dir) or replace:
for mode in [TRAIN, EVAL]:
problem_preproc_gen_dict = {}
for p in problem_list:
problem_preproc_gen_dict[p] = read_data_fn_dict[p](
params=params, mode=mode)
chained_data = chain_processed_data(problem_preproc_gen_dict)
total_count = make_tfrecord(data_list=chained_data, output_dir=file_dir,
mode=mode, serialize_fn=serialize_fn, pyspark_dir=pyspark_dir,
example_per_file=params.example_per_file)
if mode == TRAIN:
params.set_problem_info(
problem=problem_str, info_name='data_num', info=total_count)
if get_is_pyspark():
from .pyspark_utils import Hdfs, get_text_file_from_executor
# upload problem_info if pyspark
local_problem_info_path = params.get_problem_info_path(problem_str)
tempfile_name = tempfile.NamedTemporaryFile().name
get_text_file_from_executor(local_problem_info_path, tempfile_name)
params.merge_problem_info_file(tempfile_name)
Hdfs().copyFromLocalFile(local_problem_info_path, pyspark_dir)
# Cell
def make_feature_desc(feature_desc_dict: dict):
feature_desc = {}
for feature_name, feature_type in feature_desc_dict.items():
if feature_type == 'int64':
feature_desc[feature_name] = tf.io.VarLenFeature(tf.int64)
elif feature_type == 'float32':
feature_desc[feature_name] = tf.io.VarLenFeature(tf.float32)
return feature_desc
def reshape_tensors_in_dataset(example, feature_desc_dict: dict):
"""Reshape serialized tensor back to its original shape
Arguments:
example {Example} -- Example
Returns:
Example -- Example
"""
for feature_key in example:
example[feature_key] = tf.sparse.to_dense(example[feature_key])
@tf.function
def _reshape_tensor(tensor: tf.Tensor, shape_tensor: tf.Tensor, shape_tensor_in_dict: tf.Tensor):
"""
avoid empty tensor reshape error
we need to fill tensor with zeros to make sure
that loss multiplier aligns with features correctly
"""
if tf.equal(tf.size(tensor), 0):
# scalar
if tf.equal(tf.size(shape_tensor), 0):
return tf.zeros(shape=shape_tensor_in_dict, dtype=tensor.dtype)
else:
return tf.zeros(shape=shape_tensor, dtype=tensor.dtype)
return tf.reshape(tensor, shape=shape_tensor)
for feature_key in example:
if '_shape' in feature_key:
continue
shape_tensor = example['{}_shape'.format(feature_key)]
shape_tensor_in_dict = tf.convert_to_tensor(
feature_desc_dict[feature_key+'_shape_value'], dtype=tf.int32)
example[feature_key] = _reshape_tensor(
example[feature_key], shape_tensor, shape_tensor_in_dict)
for feature_key in list(example.keys()):
if '_shape' in feature_key:
del example[feature_key]
return example
def add_loss_multiplier(example, problem): # pragma: no cover
loss_multiplier_name = '{}_loss_multiplier'.format(problem)
if loss_multiplier_name not in example:
example[loss_multiplier_name] = tf.constant(
value=1, shape=(), dtype=tf.int32)
return example
def set_shape_for_dataset(example, feature_desc_dict): # pragma: no cover
for feature_key in example:
example[feature_key].set_shape(
feature_desc_dict['{}_shape_value'.format(feature_key)])
return example
def get_dummy_features(dataset_dict, feature_desc_dict):
"""Get dummy features.
Dummy features are used to make sure every feature dict
at every iteration has the same keys.
Example:
problem A: {'input_ids': [1,2,3], 'A_label_ids': 1}
problem B: {'input_ids': [1,2,3], 'B_label_ids': 2}
Then dummy features:
{'A_label_ids': 0, 'B_label_ids': 0}
At each iteration, we sample a problem, let's say we sampled A
Then:
feature dict without dummy:
{'input_ids': [1,2,3], 'A_label_ids': 1}
feature dict with dummy:
{'input_ids': [1,2,3], 'A_label_ids': 1, 'B_label_ids':0}
Arguments:
dataset_dict {dict} -- dict of datasets of all problems
Returns:
dummy_features -- dict of dummy tensors
"""
feature_keys = [list(d.element_spec.keys())
for _, d in dataset_dict.items()]
common_features_accross_problems = set(
feature_keys[0]).intersection(*feature_keys[1:])
dummy_features = {}
for problem, problem_dataset in dataset_dict.items():
output_types = {k: v.dtype for k,
v in problem_dataset.element_spec.items()}
dummy_features.update({
k: tf.cast(
tf.constant(shape=[1 if s is None else s for s in feature_desc_dict.get('{}_shape_value'.format(k), [])],
value=0),
v)
for k, v in output_types.items()
if k not in common_features_accross_problems})
return dummy_features
def add_dummy_features_to_dataset(example, dummy_features): # pragma: no cover
"""Add dummy features to dataset
feature dict without dummy:
{'input_ids': [1,2,3], 'A_label_ids': 1}
feature dict with dummy:
{'input_ids': [1,2,3], 'A_label_ids': 1, 'B_label_ids':0}
Arguments:
example {data example} -- dataset example
dummy_features {dict} -- dict of dummy tensors
"""
for feature_name in dummy_features:
if feature_name not in example:
example[feature_name] = tf.identity(dummy_features[feature_name])
return example
def read_tfrecord(params: Params, mode: str):
"""Read and parse TFRecord for every problem
The returned dataset is parsed, reshaped, to_dense tensors
with dummy features.
Arguments:
params {params} -- params
mode {str} -- mode, train, eval or predict
Returns:
dict -- dict with keys: problem name, values: dataset
"""
dataset_dict = {}
all_feature_desc_dict = {}
for problem_list in params.problem_chunk:
problem = '_'.join(sorted(problem_list))
file_dir = os.path.join(params.tmp_file_dir, problem)
# pyspark path is different
local_mode_feature_desc_path = os.path.join(
file_dir, '{}_feature_desc.json'.format(mode))
if not os.path.exists(local_mode_feature_desc_path):
tfrecord_path_list = glob(os.path.join(
file_dir, mode, 'part*'))
feature_desc_dict = json.load(
open(os.path.join(file_dir, mode, '{}_feature_desc.json'.format(mode))))
else:
tfrecord_path_list = glob(os.path.join(
file_dir, '{}_*.tfrecord'.format(mode)))
feature_desc_dict = json.load(
open(os.path.join(file_dir, '{}_feature_desc.json'.format(mode))))
all_feature_desc_dict.update(feature_desc_dict)
feature_desc = make_feature_desc(feature_desc_dict)
dataset = tf.data.TFRecordDataset(
tfrecord_path_list, num_parallel_reads=tf.data.experimental.AUTOTUNE)
# when using hvd, we need to shard dataset
if params.use_horovod:
import horovod.tensorflow.keras as hvd
dataset = dataset.shard(hvd.size(), hvd.rank())
dataset = dataset.map(lambda x: tf.io.parse_single_example(
serialized=x, features=feature_desc), num_parallel_calls=tf.data.experimental.AUTOTUNE)
feature_desc_dict_replace_none = {}
for name, desc in feature_desc_dict.items():
if not isinstance(desc, list):
feature_desc_dict_replace_none[name] = desc
else:
desc_without_none = [i if i is not None else 1 for i in desc]
feature_desc_dict_replace_none[name] = desc_without_none
dataset = dataset.map(
lambda x: reshape_tensors_in_dataset(x, feature_desc_dict_replace_none),
num_parallel_calls=tf.data.experimental.AUTOTUNE).map( # pylint: disable=no-member
lambda x: set_shape_for_dataset(
x, feature_desc_dict),
num_parallel_calls=tf.data.experimental.AUTOTUNE # pylint: disable=no-member
)
for p in problem_list:
dataset = dataset.map(lambda x: add_loss_multiplier(x, p),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset_dict[problem] = dataset
# add dummy features
dummy_features = get_dummy_features(dataset_dict, all_feature_desc_dict)
for idx, problem in enumerate(params.get_problem_chunk(as_str=True)):
dataset_dict[problem] = dataset_dict[problem].map(
lambda x: add_dummy_features_to_dataset(x, dummy_features),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
return dataset_dict
| StarcoderdataPython |
1602466 | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 3 23:08:16 2020
@author: ninjaac
"""
#poision distribution
import math
def poision_D(k,lamp):
return (lamp**k)*(math.exp(-lamp))/math.factorial(k)
result=poision_D(5,2.5)
print('%.3f'%result)
#poision challeng 2
averageX, averageY = [float(num) for num in input().split(" ")]
# Cost
CostX = 160 + 40*(averageX + averageX**2)
CostY = 128 + 40*(averageY + averageY**2)
print(round(CostX, 3))
print(round(CostY, 3)) | StarcoderdataPython |
4877270 | <reponame>ferrumie/multi-pay<filename>api/extenal/ravepayment.py<gh_stars>0
import os
from api.exceptions import FlutterException
from api.payment import PaymentInterface
from api.request import Request
from transaction.models import Transaction
class RavePayment(Request, PaymentInterface):
'''
Extends the custom Request class
'''
def __init__(self):
url = os.getenv("FLUTTERWAVE_API_URL")
super(RavePayment, self).__init__(base=url)
def pay(self, payload):
user = payload.get("user")
tx_ref = payload.get("tx_ref")
amount = payload.get("amount")
title = payload.get("title")
logo = payload.get('logo')
description = payload.get("description")
redirect_url = payload.get("redirect_url")
currency = payload.get('currency')
api_key = payload.get('api_key')
payload = {
"user_id": user.id,
"tx_ref": tx_ref,
"amount": str(amount),
"currency": currency,
"meta": {
"user_id": user.id
},
"payment_options": "card, account, banktransfer, ussd, barter, credit, payattitude, paga",
"redirect_url": redirect_url,
"customer": {
"name": f'{user.first_name} {user.last_name}',
"email": user.email
},
"customizations": {
"title": title,
"logo": logo,
"description": description
}
}
self.method = 'post'
self.api = 'payments'
self.headers['Authorization'] = f'Bearer {api_key}'
self.data = payload
response = dict()
response = super(RavePayment, self).send()
# Extracting Transaction id from the link
link = response['data']['link']
link_list = link.split('/')
transaction_id = link_list[-1]
res = {
"link": response['data']['link'],
"status": response['status'],
"transaction_id": transaction_id
}
return res
def verify(self, payload):
user = payload.get("user")
api_key = payload.get('api_key')
transaction_id = payload.get("transaction_id")
transaction_ref = payload.get("transaction_ref")
method = payload.get("method")
self.method = 'get'
self.api = f'transactions/{transaction_id}/verify'
self.headers['Authorization'] = f'Bearer {api_key}'
response = dict()
try:
if transaction_id:
response = super(RavePayment, self).send()
tran = Transaction.objects.filter(user=user).filter(transaction_id=transaction_id)
if not tran:
transaction = {
'amount': response['data']['amount'],
'transaction_id': transaction_id,
'transaction_ref': transaction_ref,
'platform': method,
'user': user,
'status': response['status'],
'payment_type': response['data']['payment_type'],
'account_id': response['data']['account_id']
}
transact = Transaction.objects.create(**transaction)
transact.save()
return response
raise ValueError({"message": "Transaction id is required"})
except Exception as e:
raise FlutterException(str(e))
| StarcoderdataPython |
12839355 | <reponame>leo60228/everestbot<gh_stars>1-10
import time
import config
import discord
from discord.ext import commands
class EverestPins:
def __init__(self, bot):
self.bot = bot
@commands.command()
async def ahorn(self, ctx):
embed = discord.Embed(title="Ahorn Downloads",
url="https://github.com/CelestialCartographers/Ahorn",
description=("• [install_ahorn.jl](https://raw.githubusercontent.com/CelestialCartographers/Ahorn/master/install_ahorn.jl): Cross-platform (Windows, macOS, and Linux)\n"
"• [Ahorn for Windows](https://thoas.feralhosting.com/oddstr13/sharex/file/setup-Ahorn-0.0.2.exe): Windows-only quick installer"))
embed.set_thumbnail(url="https://github.com/CelestialCartographers/Ahorn/blob/master/docs/logo-256.png?raw=true")
await ctx.send(embed=embed)
@commands.command()
async def wiki(self, ctx):
await ctx.send(embed=discord.Embed(title="Everest Wiki", url="https://github.com/EverestAPI/Resources/wiki"))
@commands.command()
async def modstruct(self, ctx):
await ctx.send(embed=discord.Embed(title="Modstruct Tutorial", url="https://everestapi.github.io/tutorials/modstruct.html", description="Tutorial on how to package your maps"))
@commands.command()
async def codemods(self, ctx):
await ctx.send(embed=discord.Embed(title="Codemodding Tutorial", url="https://everestapi.github.io/tutorials/firstcodemod.html", description="Tutorial on how to modify the game's code"))
@commands.command()
async def piracy(self, ctx):
await ctx.send("Everest does not support pirated copies of the game. These are banned on the server, and are often too outdated for many maps. Please purchase the game legitimately.")
@commands.command()
async def tutorials(self, ctx):
embed = discord.Embed(title="Tutorial List",
description=("• [How do I package my maps?](https://everestapi.github.io/tutorials/modstruct.html)\n"
"• [How do I modify the game's code?](https://everestapi.github.io/tutorials/firstcodemod.html)\n"
"• [How do I install mods on PC?](https://github.com/EverestAPI/Resources/wiki/How-do-I-install-mods-on-PC%3F)\n"
"• [How do I make maps on PC?](https://github.com/EverestAPI/Resources/wiki/How-do-I-make-maps-on-PC%3F)\n"
"• [How do I play Celeste with others over the internet? (GhostNet)](https://github.com/EverestAPI/Resources/wiki/How-do-I-play-Celeste-with-others-over-the-internet%3F-%28GhostNet%29)\n"
"• [What's going on with mods on Nintendo Switch?](https://github.com/EverestAPI/Resources/wiki/What%27s-going-on-with-mods-on-Nintendo-Switch%3F)"))
embed.set_thumbnail(url="https://everestapi.github.io/logo.png")
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(EverestPins(bot))
| StarcoderdataPython |
142492 | from sqlobject import *
from sqlobject.tests.dbtest import *
########################################
## Expiring, syncing
########################################
class SyncTest(SQLObject):
name = StringCol(length=50, alternateID=True, dbName='name_col')
def test_expire():
setupClass(SyncTest)
SyncTest(name='bob')
SyncTest(name='tim')
conn = SyncTest._connection
b = SyncTest.byName('bob')
conn.query("UPDATE sync_test SET name_col = 'robert' WHERE id = %i"
% b.id)
assert b.name == 'bob'
b.expire()
assert b.name == 'robert'
conn.query("UPDATE sync_test SET name_col = 'bobby' WHERE id = %i"
% b.id)
b.sync()
assert b.name == 'bobby'
| StarcoderdataPython |
3539427 | <reponame>DavidBitner/Aprendizado-Python
n = int(input())
for i in range(0, n):
resultado = 0
A, B = input().split(" ")
x, y = int(A), int(B)
if x % 2 == 0:
x += 1
base = x + y * 2
for impar in range(x, base, 2):
resultado += impar
print(resultado)
| StarcoderdataPython |
5149589 | from .gpu import set_gpu, run_and_release
from .keras_tuner_hiplot.kt2hip import fetch_my_experiment
from .debug import inspect_distances
from .visualize import (visualize_distance_distribution, plot_history,
visualize_pairs, visualize_distances)
| StarcoderdataPython |
1625069 | <filename>corehq/apps/tzmigration/templatetags/tzmigration.py
from __future__ import absolute_import
from django import template
from corehq.apps.domain_migration_flags.api import get_migration_status
from corehq.apps.tzmigration.api import TZMIGRATION_SLUG
register = template.Library()
@register.filter
def tzmigration_status(request):
if request.domain:
return get_migration_status(request.domain, TZMIGRATION_SLUG)
else:
return None
| StarcoderdataPython |
102338 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from npdl import activations
def test_activation():
from npdl.activations import Activation
act = Activation()
with pytest.raises(NotImplementedError):
act.forward((10, 10))
with pytest.raises(NotImplementedError):
act.derivative()
assert str(act) == 'Activation'
def test_get():
with pytest.raises(ValueError):
activations.get(1)
with pytest.raises(ValueError):
activations.get('l')
class TestActivations(object):
@pytest.mark.parametrize('activation',
['sigmoid',
'tanh',
'relu',
'linear',
'softmax',
'elliot',
'SymmetricElliot',
'SoftPlus',
'SoftSign'])
def test_activation(self, activation):
input = np.arange(24).reshape((4, 6))
npdl_act = activations.get(activation)
if activation == 'sigmoid':
f_res = npdl_act.forward(input)
assert 0. <= np.all(f_res) <= 1.
assert npdl_act.derivative().shape == input.shape
elif activation == 'tanh':
f_res = npdl_act.forward(input)
assert -1. <= np.all(f_res) <= 1.0
assert npdl_act.derivative().shape == input.shape
elif activation == 'relu':
f_res = npdl_act.forward(input)
assert np.all(f_res) >= 0.
assert npdl_act.derivative().shape == input.shape
assert np.all(npdl_act.derivative()) <= 1.
elif activation == 'linear':
f_res = npdl_act.forward(input)
assert np.allclose(f_res, input)
assert npdl_act.derivative().shape == input.shape
assert np.all(npdl_act.derivative()) == 1.
elif activation == 'softmax':
f_res = npdl_act.forward(input)
assert 0. <= np.all(f_res) <= 1.0
assert npdl_act.derivative().shape == input.shape
assert np.all(npdl_act.derivative()) == 1.
elif activation == 'elliot':
f_res = npdl_act.forward(input)
assert f_res.shape == input.shape
assert npdl_act.derivative().shape == input.shape
elif activation == 'SymmetricElliot':
f_res = npdl_act.forward(input)
assert f_res.shape == input.shape
assert npdl_act.derivative().shape == input.shape
elif activation == 'SoftPlus':
f_res = npdl_act.forward(input)
assert f_res.shape == input.shape
assert npdl_act.derivative().shape == input.shape
elif activation == 'SoftSign':
f_res = npdl_act.forward(input)
assert f_res.shape == input.shape
assert npdl_act.derivative().shape == input.shape
| StarcoderdataPython |
4827265 | # -*- coding: utf-8 -*-
"""
Created on 06 Jan 2021 16:57:17
@author: jiahuei
python -m unittest coco_caption/test_coco_caption.py
"""
import unittest
import os
from coca.coco_caption.eval import evaluate_caption_json
from coca.data.mscoco import MscocoDataset
from .paths import TEST_DATA_DIRPATH
class TestCocoCaption(unittest.TestCase):
METRICS = ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4", "METEOR", "ROUGE_L", "CIDEr", "SPICE"]
def test_mscoco_score(self):
scores, scores_detailed, coco_eval = evaluate_caption_json(
res_file=os.path.join(TEST_DATA_DIRPATH, "caption_00156000.json"), ann_file=MscocoDataset.ANNOTATION_FILE
)
scores = [round(scores[_], 3) for _ in self.METRICS]
self.assertEqual(
scores, [0.806, 0.655, 0.514, 0.398, 0.288, 0.584, 1.311, 0.220], "Scores are different from expected."
)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
6434974 | <filename>tests/unit/factory/test_table.py
__author__ = "<NAME>, <NAME>"
__credits__ = "<NAME>"
import unittest
from nose.plugins.attrib import attr
import os
from jnpr.junos import Device
from jnpr.junos.factory.table import Table
from mock import patch
from lxml import etree
from jnpr.junos.op.phyport import PhyPortTable
from ncclient.manager import Manager, make_device_handler
from ncclient.transport import SSHSession
@attr('unit')
class TestFactoryTable(unittest.TestCase):
@patch('ncclient.manager.connect')
def setUp(self, mock_connect):
mock_connect.side_effect = self._mock_manager
self.dev = Device(host='1.1.1.1', user='rick', password='<PASSWORD>',
gather_facts=False)
self.dev.open()
self.table = Table(dev=self.dev)
self.ppt = PhyPortTable(self.dev)
def test_config_constructor(self):
self.assertTrue(isinstance(self.table.D, Device))
def test_table_hostname(self):
self.assertEqual(self.table.hostname, '1.1.1.1')
def test_table_is_container(self):
self.assertTrue(self.table.is_container)
def test_table_repr_xml_none(self):
self.assertEqual(repr(self.table), 'Table:1.1.1.1 - Table empty')
def test_table_view_setter_ValueError(self):
try:
self.table.view = 'test'
except Exception as ex:
self.assertEqual(ex.__class__, ValueError)
@patch('jnpr.junos.Device.execute')
def test_keys_RuntimeError(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.ppt.get('ge-0/0/0')
self.ppt.ITEM_NAME_XPATH = 1
self.assertRaises(RuntimeError, self.ppt.keys)
@patch('jnpr.junos.Device.execute')
def test_keys__keys_composite(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.ppt.get('ge-0/0/0')
self.ppt.ITEM_NAME_XPATH = ['name', 'missing', 'mtu']
self.assertEqual(self.ppt.keys(),
[('ge-0/0/0', None, '1514'), ('ge-0/0/1', None, '1514')])
@patch('jnpr.junos.Device.execute')
def test_keys__keys_pipe(self, mock_execute):
from jnpr.junos.op.lldp import LLDPNeighborTable
mock_execute.side_effect = self._mock_manager
self.lldp = LLDPNeighborTable(self.dev)
self.lldp.get()
self.assertEqual(self.lldp.keys(), ['et-0/0/48', 'et-0/0/49', 'xe-0/0/13'])
@patch('jnpr.junos.Device.execute')
def test_table_repr_xml_not_none(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.ppt.get('ge-0/0/0')
self.table.xml = self.ppt.xml
self.table.ITEM_XPATH = self.ppt.ITEM_XPATH
self.assertEqual(repr(self.table), 'Table:1.1.1.1: 2 items')
@patch('jnpr.junos.Device.execute')
def test_table_get_keys_values(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.ppt.get('ge-0/0/0')
self.assertEqual(self.ppt.keys(), ['ge-0/0/0', 'ge-0/0/1'])
self.assertEqual(len(self.ppt.values()), 2)
self.ppt.view = None
self.assertEqual(len(self.ppt.values()), 2)
@patch('jnpr.junos.Device.execute')
def test_table__getitem__(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.ppt.get('ge-0/0/0')
self.assertEqual(self.ppt[0].ITEM_NAME_XPATH, 'name')
@patch('jnpr.junos.Device.execute')
def test_table__getitem__slice(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.ppt.get('ge-0/0/0')
self.assertEqual(self.ppt[:1][0].__class__.__name__, 'PhyPortView')
@patch('jnpr.junos.Device.execute')
def test_table__getitem__tuple(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.ppt.get('ge-0/0/0')
self.assertEqual(self.ppt[('ge-0/0/0',)], None)
@patch('jnpr.junos.Device.execute')
def test_table__contains__(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.ppt.get('ge-0/0/0')
self.assertTrue('ge-0/0/0' in self.ppt)
@patch('jnpr.junos.Device.execute')
def test_table_items(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.ppt.get('ge-0/0/0')
self.assertEqual(len(self.ppt.items()[1][1]), 8)
def test_table_get_return_none(self):
self.assertEqual(self.table.get('ge-0/0/0'), None)
def test_table_get_RuntimeError(self):
self.assertRaises(RuntimeError, self.table._keys)
@patch('jnpr.junos.Device.execute')
@patch('__builtin__.file')
def test_table_savexml(self, mock_file, mock_execute):
mock_execute.side_effect = self._mock_manager
self.ppt.xml = etree.XML('<root><a>test</a></root>')
self.ppt.savexml('/vasr/tmssp/foo.xml', hostname=True, append='test')
mock_file.assert_called_once_with('/vasr/tmssp/foo_1.1.1.1_test.xml',
'w')
self.ppt.savexml('/vasr/tmssp/foo.xml', hostname=True, timestamp=True)
self.assertEqual(mock_file.call_count, 2)
def _read_file(self, fname):
from ncclient.xml_ import NCElement
fpath = os.path.join(os.path.dirname(__file__),
'rpc-reply', fname)
foo = open(fpath).read()
rpc_reply = NCElement(foo, self.dev._conn.
_device_handler.transform_reply())\
._NCElement__doc[0]
return rpc_reply
def _mock_manager(self, *args, **kwargs):
if kwargs:
if 'normalize' in kwargs and args:
return self._read_file(args[0].tag + '.xml')
device_params = kwargs['device_params']
device_handler = make_device_handler(device_params)
session = SSHSession(device_handler)
return Manager(session, device_handler)
if args:
return self._read_file(args[0].tag + '.xml')
| StarcoderdataPython |
3450029 | from flask.testing import FlaskClient
from tests import endpoint
def test_widgets_return_models(client: FlaskClient, admin_login: dict):
resp = client.get(endpoint('/widgets'))
assert resp.status_code == 401
assert resp.json['error'] == 'Token is missing!'
resp = client.get(endpoint('/widgets'), headers=admin_login)
assert resp.status_code == 200
assert isinstance(resp.json, list)
assert len(resp.json) == 9
assert resp.json[0]['class'] == 'dal.models.Project'
assert len(resp.json[0]['fields']) == 5
assert len(resp.json[0]['relationships']) == 1
assert resp.json[0]['relationships'][0]['class'] == 'dal.models.Room'
assert resp.json[0]['relationships'][0]['name'] == 'rooms'
def test_widget_create_widget(client: FlaskClient, admin_login: dict):
from dal.models import CompanyProfile
from dal.models import User
resp = client.post(endpoint('/widgets'), json={}, headers=admin_login)
assert resp.status_code == 400
assert 'description' in resp.json['error']
assert 'name' in resp.json['error']
assert 'schema' in resp.json['error']
schema = {
'name': 'Something else',
'description': 'A description to show',
'schema': []
}
resp = client.post(endpoint('/widgets'), json=schema, headers=admin_login)
assert resp.status_code == 400
assert resp.json['error']['name'] == 'Name may consists of letters, dashes and underscores'
schema = {
'name': 'schema_name',
'description': 'A description to show',
'schema': {
'model': 'dal.models.Balance',
'conditions': [{'AND': [{'column': 'dal.models.Balance.due_date', 'value': 'today', 'comparator': 'lt'}]}],
'limit': 1,
'order_dir': 'desc',
'order_by': 'dal.models.Balance.due_date',
'fields': [
'dal.models.Balance.balance',
'dal.models.Balance.due_date',
'dal.models.Tenant.last_name',
'dal.models.RentalAgreement.id',
'dal.models.Room.name'
],
'relationships': [
'dal.models.RentalAgreement', 'dal.models.TenantHistory', 'dal.models.Tenant',
'dal.models.Room'
]
}
}
resp = client.post(endpoint('/widgets'), json=schema, headers=admin_login)
assert resp.status_code == 200
c = CompanyProfile.query.first()
assert 'widgets' in c.settings
assert 'schema_name' in c.settings['widgets']
assert c.settings['widgets']['schema_name']['name'] == 'schema_name'
assert c.settings['widgets']['schema_name']['description'] == 'A description to show'
assert c.settings['widgets']['schema_name']['schema']['model'] == 'dal.models.Balance'
schema2 = {
'name': 'new_users',
'private': True,
'description': 'A 2ns description',
'schema': {
'model': 'dal.models.User',
'conditions': [{'AND': [{'column': 'dal.models.User.created_at', 'value': 'today', 'comparator': 'le'}]}],
'fields': ['dal.models.User.id', 'dal.models.User.first_name', 'dal.models.User.last_name']
}
}
resp = client.post(endpoint('/widgets'), json=schema2, headers=admin_login)
assert resp.status_code == 200
admin = User.query.filter_by(email='<EMAIL>').first()
assert 'widgets' in admin.attributes.preferences
assert 'new_users' in admin.attributes.preferences['widgets']
assert admin.attributes.preferences['widgets']['new_users']['name'] == 'new_users'
assert admin.attributes.preferences['widgets']['new_users']['description'] == 'A 2ns description'
assert admin.attributes.preferences['widgets']['new_users']['schema']['model'] == 'dal.models.User'
def test_run_widget(client: FlaskClient, admin_login: dict):
resp = client.get(endpoint('/widgets/dont-exist'), headers=admin_login)
assert resp.status_code == 404
resp = client.get(endpoint('/widgets/schema_name'), headers=admin_login)
assert resp.status_code == 200
assert type(resp.json) == list
resp = client.get(endpoint('/widgets/new_users?type=private'), headers=admin_login)
assert resp.status_code == 200
assert type(resp.json) == list
assert len(resp.json) == 1
| StarcoderdataPython |
224984 | <reponame>Emily3403/Emily_password<filename>src/emily_password/share/config.py
#!/usr/bin/env python3.10
import json
import os
import string
import sys
import base64
from emily_password.share.constants import *
from emily_password.share.utils import *
my_name = "emily_password"
config_name_mapping = {
"encrypted_file_name": "Encrypted File Name",
"salt_file_name": "Salt File Name",
"temp_file_name": "Temporary File Name",
"is_demo": "Toggle Demo Mode",
"demo_prefix": "Demo Prefix",
"password_contents": "Password Content",
"password_length": "Password <PASSWORD>",
"show_password_num_initial": "Number of Passwords to show",
"exit": "Exit button",
"website": "Enter the Website",
"username": "Enter the Username",
"email": "Enter the E-Mail",
"password": "<PASSWORD>",
}
class Config:
__config = {}
__file_contents = {}
__password = None
def __init__(self, content: Dict[str, str] = None, password: str = None):
if password is not None:
Config.__password = password
# Prevent multiple file reads
if Config.__config and Config.__file_contents:
return
content = content or {}
try:
with open(path(config_file_name)) as f:
Config.__config.update(json.load(f))
except FileNotFoundError:
print("I could not find the config.json file. Please regenerate it!")
# Config.__config |= content
def load_file_into_config(name, mode="r", is_demo=False):
try:
with open(path(Config.__config["demo_prefix"] * is_demo + Config.__config[name + "_file_name"]), mode) as f:
# TODO: Why??
# the_name = f.read()
# try:
# the_name = the_name.decode()
# except ValueError:
# pass
Config.__file_contents |= {"demo_" * is_demo + name: f.read()}
except FileNotFoundError:
# Config.__file_contents |= {"demo_" * is_demo + name: ""}
pass
load_file_into_config("encrypted", "rb")
load_file_into_config("salt", "rb")
load_file_into_config("encrypted", "rb", is_demo=True)
load_file_into_config("salt", "rb", is_demo=True)
Config.__config |= {"backup_demo": Config.__config["is_demo"]}
@classmethod
def from_file(cls):
with open(path(config_file_name)) as f:
return cls(json.load(f))
def save(self):
with open(path(config_file_name), "w+") as f:
f.write(self.to_json())
@staticmethod
def password():
return Config.__password
@staticmethod
def encrypted_file_contents():
return Config.__file_contents[Config.demo() * "demo_" + "encrypted"]
@staticmethod
def legacy_file_contents():
return Config.__file_contents["legacy"]
@staticmethod
def salt():
return Config.__file_contents["salt"]
@staticmethod
def demo():
return Config.__config["is_demo"]
@staticmethod
def set_demo(value: bool):
Config.__config["backup_demo"] = Config.__config["is_demo"]
Config.__config["is_demo"] = value
@staticmethod
def restore_demo():
Config.__config["is_demo"] =Config.__config["backup_demo"]
@staticmethod
def encrypted_file():
return Config.demo() * Config.__config["demo_prefix"] + Config.__config["encrypted_file_name"]
@staticmethod
def legacy_file():
return Config.__config["legacy_file_name"]
@staticmethod
def salt_file():
return Config.__config["salt_file_name"]
def __getitem__(self, item):
return Config.__config[item]
@staticmethod
def __setitem__(key, value):
Config.__config[key] = value
def __delitem__(self, key):
del Config.__config[key]
def __len__(self):
return len(Config.__config)
def __iter__(self):
return iter(Config.__config)
def to_json(self):
return json.dumps(Config.__config, indent=4)
def generate_default_config():
default_config = {
"encrypted_file_name": "PasswordsEncrypted.txt",
"salt_file_name": "PasswordsSalt.txt",
"temp_file_name": "PasswordsTemp.txt",
"is_demo": True,
"demo_prefix": "Demo",
"password_contents": string.printable[:94], # A string of characters → i.e. "<PASSWORD>"
"password_length": 63,
"show_password_num_initial": 13,
"password_input_mapping": {
"exit": "esc",
"website": "end",
"username": "home",
"email": "page_down",
"password": "<PASSWORD>",
}
}
config = Config(default_config)
config.save()
def make_files_from_config():
# TODO
config = Config()
# if not os.path.exists((filename := path(config["salt_file"]))):
# with open(filename, "wb+") as f:
# f.write(os.urandom(salt_length))
def setup_config():
ensure_directories()
generate_default_config()
make_files_from_config()
if __name__ == '__main__':
setup_config()
# try:
# config
# except NameError:
# config = Config()
| StarcoderdataPython |
1711988 | """Unit test to test get list of data."""
from unittest import TestCase
from adefa import cli
from adefa.tests import runner
import mock
@mock.patch('adefa.cli.print_api_response')
class TestList(TestCase):
"""Unit test class to test get list of data."""
def test_list(self, mocked_print):
items = ['devices', 'projects', 'groups', 'uploads', 'runs', 'jobs']
for pos, item in enumerate(items):
cli.client = mock.MagicMock()
if pos > 1:
with mock.patch('click.prompt') as mocked_click:
result = runner.invoke(cli.list, [item])
self.assertTrue(mocked_click.called)
else:
result = runner.invoke(cli.list, [item])
self.assertTrue(mocked_print.called)
self.assertEqual(result.exit_code, 0)
| StarcoderdataPython |
1925729 | <reponame>fadamsyah/final-project
import numpy as np
import pandas as pd
import numba as nb
from controller_2D import Controller_v1
import rospy
from pkg_ta.msg import Control
from nav_msgs.msg import Odometry
freq = 10 # Hz
waypoints_np = np.load('waypoints/waypoints_interpolated.npy')
# In the Arduino, CW is positive and CCW is negative
# On the other hand, in the controller algoritm, CCW is positive and CW is negative
max_steer = 35.; min_steer = -28. # For the path following control algoritm ~ degree
max_steer_arduino = 28.; min_steer_arduino = -35. # For the Arduino ~ degree
max_brake = 2.8; max_throttle = 1.
kp = 0.15; ki = 0.03; kd = 0.0
ff_long = np.array([0.0, 0.0]) # no feed-forward
sat_long = np.array([-np.abs(max_brake), np.abs(max_throttle)])
sat_lat = np.array([min_steer, max_steer]) * np.pi / 180.
state = {'x': 0., 'y': 0., 'yaw': 0., 'v': 0.}
@nb.njit()
def to_euler(x, y, z, w):
"""Dari Coursera: Return as xyz (roll pitch yaw) Euler angles."""
roll = np.arctan2(2 * (w * x + y * z), 1 - 2 * (x**2 + y**2))
pitch = np.arcsin(2 * (w * y - z * x))
yaw = np.arctan2(2 * (w * z + x * y), 1 - 2 * (y**2 + z**2))
return np.array([roll, pitch, yaw])
# Compile the to_euler
_ = to_euler(1.5352300785980803e-15, -1.3393747145983517e-15, -0.7692164172827881, 0.638988343698562)
def main():
# Create the controller object
controller = Controller_v1(kp, ki, kd, ff_long, sat_long,
2.5, 1.0, 2.5, 0.01, sat_lat,
waypoints_np)
# Create the callback function
def callback(data):
global state
q = data.pose.pose.orientation
euler = to_euler(q.x, q.y, q.z, q.w)
state['x'] = data.pose.pose.position.x
state['y'] = data.pose.pose.position.y
state['yaw'] = euler[2]
state['v'] = np.sqrt(data.twist.twist.linear.x**2 + data.twist.twist.linear.y**2) # m/s
rospy.init_node('control')
rospy.Subscriber('/odometry/filtered_map', Odometry, callback)
pub = rospy.Publisher('/control_signal', Control, queue_size=1)
rate = rospy.Rate(freq) # Hz
msg = Control()
msg.header.frame_id = 'path_following_control'
msg.header.seq = 0
msg.header.stamp = rospy.Time.now()
last_time = msg.header.stamp.to_sec() - 1./freq
while not rospy.is_shutdown():
# Calculate the actual sampling time
msg.header.stamp = rospy.Time.now()
delta_t = msg.header.stamp.to_sec() - last_time
last_time = msg.header.stamp.to_sec()
# Calculate the control signal
long, lat = controller.calculate_control_signal(delta_t, state['x'],
state['y'], state['v'],
state['yaw'])
# Get the error profile
err = controller.get_error()
# Get the reference
ref = controller.get_instantaneous_setpoint()
# Send the message
msg.header.seq += 1
msg.action_steer = max(min(-lat*180/np.pi, max_steer_arduino), min_steer_arduino) # lat ~ radian
msg.action_throttle = max(min(long, max_throttle), 0.)
msg.action_brake = max(min(-long, max_brake), 0.)
msg.error_speed = err[0]
msg.error_lateral = err[1]
msg.error_yaw = err[2]
msg.actual_x = state['x']
msg.actual_y = state['y']
msg.actual_yaw = state['yaw']
msg.actual_speed = state['v']
msg.wp_idx = controller.get_closest_index()
msg.ref_x = ref[0]
msg.ref_y = ref[1]
msg.ref_yaw = ref[2]
msg.ref_speed = ref[3]
msg.ref_curvature = ref[4]
pub.publish(msg)
rate.sleep()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
| StarcoderdataPython |
3410759 | """
Script to find and fix problematic jobs for Slurm.
"""
import sys
import re
import pymysql
import argparse
import subprocess
DB_CONFIG_FILENAME = '/etc/slurm-llnl/slurmdbd.conf'
KILL_SCRIPT_FILENAME = '/tmp/SlurmFixer-kill-orphans.sh'
CLUSTER_NAME = 'linux'
# list job IDS for currently running processes without printing a header
QUEUE_LIST_COMMAND = ['squeue', '-h', '-o', '"%A"']
NODE_LIST_COMMAND = ['sinfo', '-h', '-N', '-o', '%N']
EXPAND_NODE_NAMES_BASE_COMMAND = ['scontrol', 'show', 'hostname']
SKIP_USERS = [
'root',
'postfix',
'ntp',
'rpc',
'rpcuser',
'dbus',
'munge',
'ganglia',
'nscd',
'68',
]
FIND_ORPHAN_DASHES = 80
class Config(object):
"""
Parses slurmdbd.conf config file.
"""
def __init__(self, filename):
"""
Setup config based on slurm database config filename.
:param filename: str: path to slurmdbd.conf
"""
with open(filename, 'r') as infile:
config_dict = Config.get_config_dictionary(infile)
self.host = config_dict['StorageHost']
self.port = config_dict['StoragePort']
self.user = config_dict['StorageUser']
self.password = config_dict['Storage<PASSWORD>']
self.db_name = config_dict['StorageLoc']
@staticmethod
def get_config_dictionary(infile):
"""
Return a dictionary of key/value pairs based on lines in a file.
:param infile: file to read text from.
:return: dict: settings from the file
"""
result = {}
for line in infile.readlines():
line = line.strip()
if line.startswith("#") or not line:
continue
parts = line.split("=")
key = parts[0]
value = parts[1]
result[key] = value
return result
def get_db_connection(config_filename):
"""
Create a database connection to the mysql database associated with slurm.
:param config_filename: path to slurmdbd.conf
:return: database connection
"""
config = Config(config_filename)
port = None
if config.port:
port = int(config.port)
return pymysql.connect(host=config.host,
user=config.user,
password=<PASSWORD>,
db=config.db_name,
port=port,
cursorclass=pymysql.cursors.DictCursor)
def find_unfinished_jobs(db):
"""
Find jobs in the slurm database that have no end time or state isn't one of the finished states.
:param db: database connection to query against
:return: results from the query
"""
with db.cursor() as cursor:
# Read a single record
sql = "select *, from_unixtime(time_start) as start " \
" from {}_job_table where state < 3 or time_end = 0" \
" order by time_start".format(CLUSTER_NAME)
cursor.execute(sql)
result = cursor.fetchall()
return result
def find_running_jobs():
"""
Get a list of slurm jobs running based on squeue.
:return: list of job ids that are currently running
"""
lines = subprocess.check_output(QUEUE_LIST_COMMAND).decode("utf-8")
return [int(line.replace('"', '')) for line in lines.split("\n") if line]
def find_bad_jobs(db):
"""
Return a list of jobs that are not finished according to the database
but squeue doesn't think they are running.
:param db: database connection to query against
:return: list of jobs, where each job is a dict
"""
bad_jobs = []
running_jobs = set(find_running_jobs())
for job in find_unfinished_jobs(db):
job_id = job['id_job']
if not job_id in running_jobs:
bad_jobs.append(job)
return bad_jobs
def find_bad():
"""
Print out jobs that have finished but the slurm database is unaware they have finished.
"""
db = get_db_connection(DB_CONFIG_FILENAME)
print_bad_job_line("JOBID", "STARTED", "ACCOUNT", "USERID", "STATE", "JOB NAME")
for job in find_bad_jobs(db):
try:
print_bad_job_line(str(job['id_job']), str(job['start']),
job['account'], str(job['id_user']),
str(job['state']), job['job_name'])
except:
pass
def print_bad_job_line(job_id, start, account, user_id, state, job_name):
"""
Print out job info in column format.
"""
print(job_id.ljust(10), start.ljust(20), account.ljust(12), state.ljust(5), user_id.ljust(10), job_name)
def fix_bad():
"""
Print out SQL that will fix the slurm database for jobs that have finished but are not updated in the slurm database.
"""
db = get_db_connection(DB_CONFIG_FILENAME)
fix_bad_jobs(db)
def fix_bad_jobs(db):
"""
Print sql to fix the bad jobs.
:param db:
:return:
"""
bad_jobs = find_bad_jobs(db)
print("Fixing", len(bad_jobs), "jobs.")
with db.cursor() as cursor:
for job in bad_jobs:
sql = "update {}_job_table " \
" set state = 5, time_end = time_start + 1 " \
" where id_job = {};".format(CLUSTER_NAME, job['id_job'])
print(sql)
def find_orphans():
"""
Print jobs that are running on non-service accounts on the nodes that squeue isn't aware of.
"""
# make a list of orphans
node_names = get_node_names()
running_user_node_names = set(get_running_user_node_names())
orphan_list = []
for node_name, user, pid, cmd in get_node_processes(node_names):
try:
user_node_name = "{}|{}".format(user, node_name)
if not user_node_name in running_user_node_names:
orphan_list.append((node_name, user, pid, cmd))
except:
sys.stderr.write("Failed to check node {}\n.".format(node_name))
create_kill_orphan_script(orphan_list, KILL_SCRIPT_FILENAME)
# Print out results
print("")
print_find_orphans_dashes()
print_orphan("NODE", "USER", "PID", "CMD")
print_find_orphans_dashes()
for node_name, user, pid, cmd in orphan_list:
print_orphan(node_name, user, pid, cmd)
print_find_orphans_dashes()
print("")
print("Script to kill orphans written to {}".format(KILL_SCRIPT_FILENAME))
print("")
def print_find_orphans_dashes():
"""
Print out a row of dashes for displaying table of orphan processes.
"""
print("=" * FIND_ORPHAN_DASHES)
def create_kill_orphan_script(orphan_list, output_filename):
"""
Write out a text file to output_filename with commands to kill processes in orphan_list.
:param orphan_list: [(node_name,user,pid,cmd)]: list of processes we should add kill commands for
:param output_filename: str: where we should write kill commands to
"""
with open(output_filename, 'w') as outfile:
for node_name, user, pid, cmd in orphan_list:
line = 'ssh {} kill {}\n'.format(node_name, pid)
outfile.write(line)
def print_orphan(node_name, user, pid, cmd):
"""
Print details about orphan process in column format.
"""
print(node_name.ljust(20), user.ljust(10), pid.ljust(8), cmd)
def get_node_names():
"""
Get the compsite node names that make up our cluster.
"""
node_names = []
composite_names_str = subprocess.check_output(NODE_LIST_COMMAND).decode("utf-8").strip()
return composite_names_str
def get_node_processes(composite_node_name):
"""
Get a list of (node_name, user, pid, command) of all processes running on a node.
:param composite_node_name: str: composite name of the nodes we will ssh to and get process listing.
:return: [(node_name,user,pid,command),...]: processes running the node
"""
node_processes = []
ps_command = ["sudo", "pdsh", "-w", composite_node_name, "ps", "-e", "--no-headers", "-o", "\"%U|%p|%a\""]
lines = subprocess.check_output(ps_command).decode("utf-8").strip().split('\n')
for line in lines:
parts = [part.strip() for part in line.strip().split('|')]
node_name_and_user_parts = parts[0]
pid = parts[1]
cmd = parts[2]
node_name,user = [part.strip() for part in node_name_and_user_parts.split(':')]
if not user in SKIP_USERS:
node_processes.append((node_name, user, pid, cmd))
return node_processes
def name_string_to_list(composite_names):
"""
Split comma separated slurm composite node name format.
Composite node names contain commas so ignore them within brackets.
:param composite_names: str: a comma separated nodename list
:return: list of composite node names
"""
splitter = re.compile(r'(?:[^,\[]|\[[^\]]*\])+')
return splitter.findall(composite_names)
def get_running_user_node_names():
"""
Get a list of 'user|node_name' for all running processes.
:return: [str]: list of 'user|node_name'
"""
squeue_cmd = ["squeue", "-o", "%u|%N"]
lines = subprocess.check_output(squeue_cmd).decode("utf-8").strip().split('\n')
return lines
def add_sub_command(child_parsers, name, help, func):
"""
Add sub command to our command line parser.
"""
child_parser = child_parsers.add_parser(name, help=help)
child_parser.set_defaults(func=func)
def main():
parser = argparse.ArgumentParser()
child_parsers = parser.add_subparsers(help='commands')
add_sub_command(child_parsers, 'find_bad', 'Find bad(unfinished) jobs', find_bad)
add_sub_command(child_parsers, 'fix_bad', 'Fix bad(unfinished) jobs', fix_bad)
add_sub_command(child_parsers, 'find_orphans', 'Find orphaned/rogue processes', find_orphans)
parsed_args = parser.parse_args()
if hasattr(parsed_args, 'func'):
parsed_args.func()
else:
parser.print_help()
if __name__ == '__main__':
main()
| StarcoderdataPython |
11329628 | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import os
import pickle
import sys
import warnings
from abc import ABC, abstractmethod
from contextlib import contextmanager
from os.path import abspath, exists
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
from ..file_utils import add_end_docstrings, is_tf_available, is_torch_available
from ..modelcard import ModelCard
from ..tokenization_utils import PreTrainedTokenizer, TruncationStrategy
from ..utils import logging
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TFAutoModel
if is_torch_available():
import torch
from ..models.auto.modeling_auto import AutoModel
if TYPE_CHECKING:
from ..modeling_tf_utils import TFPreTrainedModel
from ..modeling_utils import PreTrainedModel
logger = logging.get_logger(__name__)
def infer_framework_from_model(model, model_classes: Optional[Dict[str, type]] = None, revision: Optional[str] = None):
"""
Select framework (TensorFlow or PyTorch) to use from the :obj:`model` passed. Returns a tuple (framework, model).
If :obj:`model` is instantiated, this function will just infer the framework from the model class. Otherwise
:obj:`model` is actually a checkpoint name and this method will try to instantiate it using :obj:`model_classes`.
Since we don't want to instantiate the model twice, this model is returned for use by the pipeline.
If both frameworks are installed and available for :obj:`model`, PyTorch is selected.
Args:
model (:obj:`str`, :class:`~transformers.PreTrainedModel` or :class:`~transformers.TFPreTrainedModel`):
The model to infer the framework from. If :obj:`str`, a checkpoint name. The model to infer the framewrok
from.
model_classes (dictionary :obj:`str` to :obj:`type`, `optional`):
A mapping framework to class.
revision (:obj:`str`, `optional`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
Returns:
:obj:`Tuple`: A tuple framework, model.
"""
if not is_tf_available() and not is_torch_available():
raise RuntimeError(
"At least one of TensorFlow 2.0 or PyTorch should be installed. "
"To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ "
"To install PyTorch, read the instructions at https://pytorch.org/."
)
if isinstance(model, str):
if is_torch_available() and not is_tf_available():
model_class = model_classes.get("pt", AutoModel)
model = model_class.from_pretrained(model, revision=revision)
elif is_tf_available() and not is_torch_available():
model_class = model_classes.get("tf", TFAutoModel)
model = model_class.from_pretrained(model, revision=revision)
else:
try:
model_class = model_classes.get("pt", AutoModel)
model = model_class.from_pretrained(model, revision=revision)
except OSError:
model_class = model_classes.get("tf", TFAutoModel)
model = model_class.from_pretrained(model, revision=revision)
framework = "tf" if model.__class__.__name__.startswith("TF") else "pt"
return framework, model
def get_framework(model, revision: Optional[str] = None):
"""
Select framework (TensorFlow or PyTorch) to use.
Args:
model (:obj:`str`, :class:`~transformers.PreTrainedModel` or :class:`~transformers.TFPreTrainedModel`):
If both frameworks are installed, picks the one corresponding to the model passed (either a model class or
the model name). If no specific model is provided, defaults to using PyTorch.
"""
warnings.warn(
"`get_framework` is deprecated and will be removed in v5, use `infer_framework_from_model` instead.",
FutureWarning,
)
if not is_tf_available() and not is_torch_available():
raise RuntimeError(
"At least one of TensorFlow 2.0 or PyTorch should be installed. "
"To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ "
"To install PyTorch, read the instructions at https://pytorch.org/."
)
if isinstance(model, str):
if is_torch_available() and not is_tf_available():
model = AutoModel.from_pretrained(model, revision=revision)
elif is_tf_available() and not is_torch_available():
model = TFAutoModel.from_pretrained(model, revision=revision)
else:
try:
model = AutoModel.from_pretrained(model, revision=revision)
except OSError:
model = TFAutoModel.from_pretrained(model, revision=revision)
framework = "tf" if model.__class__.__name__.startswith("TF") else "pt"
return framework
def get_default_model(targeted_task: Dict, framework: Optional[str], task_options: Optional[Any]) -> str:
"""
Select a default model to use for a given task. Defaults to pytorch if ambiguous.
Args:
targeted_task (:obj:`Dict` ):
Dictionary representing the given task, that should contain default models
framework (:obj:`str`, None)
"pt", "tf" or None, representing a specific framework if it was specified, or None if we don't know yet.
task_options (:obj:`Any`, None)
Any further value required by the task to get fully specified, for instance (SRC, TGT) languages for
translation task.
Returns
:obj:`str` The model string representing the default model for this pipeline
"""
if is_torch_available() and not is_tf_available():
framework = "pt"
elif is_tf_available() and not is_torch_available():
framework = "tf"
defaults = targeted_task["default"]
if task_options:
if task_options not in defaults:
raise ValueError("The task does not provide any default models for options {}".format(task_options))
default_models = defaults[task_options]["model"]
elif "model" in defaults:
default_models = targeted_task["default"]["model"]
else:
# XXX This error message needs to be updated to be more generic if more tasks are going to become
# parametrized
raise ValueError('The task defaults can\'t be correctly selected. You probably meant "translation_XX_to_YY"')
if framework is None:
framework = "pt"
return default_models[framework]
class PipelineException(Exception):
"""
Raised by a :class:`~transformers.Pipeline` when handling __call__.
Args:
task (:obj:`str`): The task of the pipeline.
model (:obj:`str`): The model used by the pipeline.
reason (:obj:`str`): The error message to display.
"""
def __init__(self, task: str, model: str, reason: str):
super().__init__(reason)
self.task = task
self.model = model
class ArgumentHandler(ABC):
"""
Base interface for handling arguments for each :class:`~transformers.pipelines.Pipeline`.
"""
@abstractmethod
def __call__(self, *args, **kwargs):
raise NotImplementedError()
class PipelineDataFormat:
"""
Base class for all the pipeline supported data format both for reading and writing. Supported data formats
currently includes:
- JSON
- CSV
- stdin/stdout (pipe)
:obj:`PipelineDataFormat` also includes some utilities to work with multi-columns like mapping from datasets
columns to pipelines keyword arguments through the :obj:`dataset_kwarg_1=dataset_column_1` format.
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
SUPPORTED_FORMATS = ["json", "csv", "pipe"]
def __init__(
self,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite: bool = False,
):
self.output_path = output_path
self.input_path = input_path
self.column = column.split(",") if column is not None else [""]
self.is_multi_columns = len(self.column) > 1
if self.is_multi_columns:
self.column = [tuple(c.split("=")) if "=" in c else (c, c) for c in self.column]
if output_path is not None and not overwrite:
if exists(abspath(self.output_path)):
raise OSError("{} already exists on disk".format(self.output_path))
if input_path is not None:
if not exists(abspath(self.input_path)):
raise OSError("{} doesnt exist on disk".format(self.input_path))
@abstractmethod
def __iter__(self):
raise NotImplementedError()
@abstractmethod
def save(self, data: Union[dict, List[dict]]):
"""
Save the provided data object with the representation for the current
:class:`~transformers.pipelines.PipelineDataFormat`.
Args:
data (:obj:`dict` or list of :obj:`dict`): The data to store.
"""
raise NotImplementedError()
def save_binary(self, data: Union[dict, List[dict]]) -> str:
"""
Save the provided data object as a pickle-formatted binary data on the disk.
Args:
data (:obj:`dict` or list of :obj:`dict`): The data to store.
Returns:
:obj:`str`: Path where the data has been saved.
"""
path, _ = os.path.splitext(self.output_path)
binary_path = os.path.extsep.join((path, "pickle"))
with open(binary_path, "wb+") as f_output:
pickle.dump(data, f_output)
return binary_path
@staticmethod
def from_str(
format: str,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite=False,
) -> "PipelineDataFormat":
"""
Creates an instance of the right subclass of :class:`~transformers.pipelines.PipelineDataFormat` depending on
:obj:`format`.
Args:
format: (:obj:`str`):
The format of the desired pipeline. Acceptable values are :obj:`"json"`, :obj:`"csv"` or :obj:`"pipe"`.
output_path (:obj:`str`, `optional`):
Where to save the outgoing data.
input_path (:obj:`str`, `optional`):
Where to look for the input data.
column (:obj:`str`, `optional`):
The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
Returns:
:class:`~transformers.pipelines.PipelineDataFormat`: The proper data format.
"""
if format == "json":
return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == "csv":
return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == "pipe":
return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
else:
raise KeyError("Unknown reader {} (Available reader are json/csv/pipe)".format(format))
class CsvPipelineDataFormat(PipelineDataFormat):
"""
Support for pipelines using CSV data format.
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
def __init__(
self,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite=False,
):
super().__init__(output_path, input_path, column, overwrite=overwrite)
def __iter__(self):
with open(self.input_path, "r") as f:
reader = csv.DictReader(f)
for row in reader:
if self.is_multi_columns:
yield {k: row[c] for k, c in self.column}
else:
yield row[self.column[0]]
def save(self, data: List[dict]):
"""
Save the provided data object with the representation for the current
:class:`~transformers.pipelines.PipelineDataFormat`.
Args:
data (:obj:`List[dict]`): The data to store.
"""
with open(self.output_path, "w") as f:
if len(data) > 0:
writer = csv.DictWriter(f, list(data[0].keys()))
writer.writeheader()
writer.writerows(data)
class JsonPipelineDataFormat(PipelineDataFormat):
"""
Support for pipelines using JSON file format.
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
def __init__(
self,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite=False,
):
super().__init__(output_path, input_path, column, overwrite=overwrite)
with open(input_path, "r") as f:
self._entries = json.load(f)
def __iter__(self):
for entry in self._entries:
if self.is_multi_columns:
yield {k: entry[c] for k, c in self.column}
else:
yield entry[self.column[0]]
def save(self, data: dict):
"""
Save the provided data object in a json file.
Args:
data (:obj:`dict`): The data to store.
"""
with open(self.output_path, "w") as f:
json.dump(data, f)
class PipedPipelineDataFormat(PipelineDataFormat):
"""
Read data from piped input to the python process. For multi columns data, columns should separated by \t
If columns are provided, then the output will be a dictionary with {column_x: value_x}
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
def __iter__(self):
for line in sys.stdin:
# Split for multi-columns
if "\t" in line:
line = line.split("\t")
if self.column:
# Dictionary to map arguments
yield {kwargs: l for (kwargs, _), l in zip(self.column, line)}
else:
yield tuple(line)
# No dictionary to map arguments
else:
yield line
def save(self, data: dict):
"""
Print the data.
Args:
data (:obj:`dict`): The data to store.
"""
print(data)
def save_binary(self, data: Union[dict, List[dict]]) -> str:
if self.output_path is None:
raise KeyError(
"When using piped input on pipeline outputting large object requires an output file path. "
"Please provide such output path through --output argument."
)
return super().save_binary(data)
class _ScikitCompat(ABC):
"""
Interface layer for the Scikit and Keras compatibility.
"""
@abstractmethod
def transform(self, X):
raise NotImplementedError()
@abstractmethod
def predict(self, X):
raise NotImplementedError()
PIPELINE_INIT_ARGS = r"""
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`):
The framework to use, either :obj:`"pt"` for PyTorch or :obj:`"tf"` for TensorFlow. The specified framework
must be installed.
If no framework is specified, will default to the one currently installed. If no framework is specified and
both frameworks are installed, will default to the framework of the :obj:`model`, or to PyTorch if no model
is provided.
task (:obj:`str`, defaults to :obj:`""`):
A task-identifier for the pipeline.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to -1):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on
the associated CUDA device id.
binary_output (:obj:`bool`, `optional`, defaults to :obj:`False`):
Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.
"""
@add_end_docstrings(PIPELINE_INIT_ARGS)
class Pipeline(_ScikitCompat):
"""
The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across
different pipelines.
Base class implementing pipelined operations. Pipeline workflow is defined as a sequence of the following
operations:
Input -> Tokenization -> Model Inference -> Post-Processing (task dependent) -> Output
Pipeline supports running on CPU or GPU through the device argument (see below).
Some pipeline, like for instance :class:`~transformers.FeatureExtractionPipeline` (:obj:`'feature-extraction'` )
output large tensor object as nested-lists. In order to avoid dumping such large structure as textual data we
provide the :obj:`binary_output` constructor argument. If set to :obj:`True`, the output will be stored in the
pickle format.
"""
default_input_names = None
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
task: str = "",
args_parser: ArgumentHandler = None,
device: int = -1,
binary_output: bool = False,
):
if framework is None:
framework, model = infer_framework_from_model(model)
self.task = task
self.model = model
self.tokenizer = tokenizer
self.modelcard = modelcard
self.framework = framework
self.device = device if framework == "tf" else torch.device("cpu" if device < 0 else "cuda:{}".format(device))
self.binary_output = binary_output
# Special handling
if self.framework == "pt" and self.device.type == "cuda":
self.model = self.model.to(self.device)
# Update config with task specific parameters
task_specific_params = self.model.config.task_specific_params
if task_specific_params is not None and task in task_specific_params:
self.model.config.update(task_specific_params.get(task))
def save_pretrained(self, save_directory: str):
"""
Save the pipeline's model and tokenizer.
Args:
save_directory (:obj:`str`):
A path to the directory where to saved. It will be created if it doesn't exist.
"""
if os.path.isfile(save_directory):
logger.error("Provided path ({}) should be a directory, not a file".format(save_directory))
return
os.makedirs(save_directory, exist_ok=True)
self.model.save_pretrained(save_directory)
self.tokenizer.save_pretrained(save_directory)
if self.modelcard is not None:
self.modelcard.save_pretrained(save_directory)
def transform(self, X):
"""
Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
"""
return self(X=X)
def predict(self, X):
"""
Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
"""
return self(X=X)
@contextmanager
def device_placement(self):
"""
Context Manager allowing tensor allocation on the user-specified device in framework agnostic way.
Returns:
Context manager
Examples::
# Explicitly ask for tensor allocation on CUDA device :0
pipe = pipeline(..., device=0)
with pipe.device_placement():
# Every framework specific tensor allocation will be done on the request device
output = pipe(...)
"""
if self.framework == "tf":
with tf.device("/CPU:0" if self.device == -1 else "/device:GPU:{}".format(self.device)):
yield
else:
if self.device.type == "cuda":
torch.cuda.set_device(self.device)
yield
def ensure_tensor_on_device(self, **inputs):
"""
Ensure PyTorch tensors are on the specified device.
Args:
inputs (keyword arguments that should be :obj:`torch.Tensor`): The tensors to place on :obj:`self.device`.
Return:
:obj:`Dict[str, torch.Tensor]`: The same as :obj:`inputs` but on the proper device.
"""
return {name: tensor.to(self.device) for name, tensor in inputs.items()}
def check_model_type(self, supported_models: Union[List[str], dict]):
"""
Check if the model class is in supported by the pipeline.
Args:
supported_models (:obj:`List[str]` or :obj:`dict`):
The list of models supported by the pipeline, or a dictionary with model class values.
"""
if not isinstance(supported_models, list): # Create from a model mapping
supported_models = [item[1].__name__ for item in supported_models.items()]
if self.model.__class__.__name__ not in supported_models:
raise PipelineException(
self.task,
self.model.base_model_prefix,
f"The model '{self.model.__class__.__name__}' is not supported for {self.task}. Supported models are {supported_models}",
)
def _parse_and_tokenize(
self, inputs, padding=True, add_special_tokens=True, truncation=TruncationStrategy.DO_NOT_TRUNCATE, **kwargs
):
"""
Parse arguments and tokenize
"""
# Parse arguments
inputs = self.tokenizer(
inputs,
add_special_tokens=add_special_tokens,
return_tensors=self.framework,
padding=padding,
truncation=truncation,
)
return inputs
def __call__(self, *args, **kwargs):
inputs = self._parse_and_tokenize(*args, **kwargs)
return self._forward(inputs)
def _forward(self, inputs, return_tensors=False):
"""
Internal framework specific forward dispatching
Args:
inputs: dict holding all the keyword arguments for required by the model forward method.
return_tensors: Whether to return native framework (pt/tf) tensors rather than numpy array
Returns:
Numpy array
"""
# Encode for forward
with self.device_placement():
if self.framework == "tf":
# TODO trace model
predictions = self.model(inputs.data, training=False)[0]
else:
with torch.no_grad():
inputs = self.ensure_tensor_on_device(**inputs)
predictions = self.model(**inputs)[0].cpu()
if return_tensors:
return predictions
else:
return predictions.numpy()
| StarcoderdataPython |
6656503 | """How we configure our Interact Client connection."""
import os
import json
class Configuration:
"""How our client is configured."""
def __init__(
self,
pod,
api_version,
api_folder,
api_list,
profile_extension_table_alias,
supplemental_table_alias,
primary_key_alias,
riid_generator_length,
caste_nonstr_to_str,
local_content_library_folder,
remote_content_library_folder,
test_campaign_name,
test_local_content_library_folder,
test_remote_content_library_folder,
user_cerficates=False
):
"""Initialize the Interact Configuration."""
self.pod = pod
self.api_folder = api_folder
self.api_list = api_list
self.profile_extension_table_alias = profile_extension_table_alias
self.supplemental_table_alias = supplemental_table_alias
self.primary_key_alias = primary_key_alias
self.riid_generator_length = riid_generator_length
self.caste_nonstr_to_str = caste_nonstr_to_str
self.test_campaign_name = test_campaign_name
self.api_version = api_version
self.login_url = 'http://login{p}.responsys.net/rest/api/v{a}/auth/token'.format(
p=self.pod,
a=self.api_version)
self.user_cerficates = user_cerficates
self.local_content_library_folder = local_content_library_folder
self.test_local_content_library_folder = test_local_content_library_folder
self.remote_content_library_folder = remote_content_library_folder
self.test_remote_content_library_folder = test_remote_content_library_folder
def __repr__(self):
"""Text representation."""
return "Configuration"
@property
def pod(self):
"""Get pod."""
return self.__pod
@pod.setter
def pod(self, pod):
"""Set the pod.
Only known pods are 2 and 5.
"""
if str(int(pod)) in ['2', '5']:
self.__pod = pod
else:
raise ValueError('Only pods 2 and 5 are supported.')
@property
def api_folder(self):
"""Get API folder."""
return self.__api_folder
@api_folder.setter
def api_folder(self, api_folder):
"""Set the API folder."""
self.__api_folder = api_folder
@property
def api_list(self):
"""Get API list."""
return self.__api_list
@api_list.setter
def api_list(self, api_list):
"""Set API list."""
self.__api_list = api_list
@property
def profile_extension_table_alias(self):
"""Get profile extension table alias."""
return self.__profile_extension_table_alias
@profile_extension_table_alias.setter
def profile_extension_table_alias(self, profile_extension_table_alias):
"""Set profile extension table alias."""
self.__profile_extension_table_alias = profile_extension_table_alias
@property
def supplemental_table_alias(self):
"""Get supplemental table alias."""
return self.__supplemental_table_alias
@supplemental_table_alias.setter
def supplemental_table_alias(self, supplemental_table_alias):
"""Set supplemental table alias."""
self.__supplemental_table_alias = supplemental_table_alias
@property
def primary_key_alias(self):
"""Get primary key alias."""
return self.__primary_key_alias
@primary_key_alias.setter
def primary_key_alias(self, primary_key_alias):
"""Set primary key alias."""
self.__primary_key_alias = primary_key_alias
@property
def riid_generator_length(self):
"""Get riid generator length."""
return self.__riid_generator_length
@riid_generator_length.setter
def riid_generator_length(self, riid_generator_length):
"""Set riid generator length."""
self.__riid_generator_length = riid_generator_length
@property
def test_email_address(self):
"""Get test email address."""
return self.__test_email_address
@test_email_address.setter
def test_email_address(self, test_email_address):
"""Set test email address."""
self.__test_email_address = test_email_address
@property
def test_campaign_name(self):
"""Get test campaign name."""
return self.__test_campaign_name
@test_campaign_name.setter
def test_campaign_name(self, test_campaign_name):
"""Set test campaign name."""
self.__test_campaign_name = test_campaign_name
@property
def api_url(self):
"""API url partial."""
return 'rest/api/v{a}'.format(a=self.api_version)
@api_url.setter
def api_url(self, api_url):
"""API url partial setter."""
return self.__api_url
@property
def api_version(self):
"""Get the API version."""
return self.__api_version
@api_version.setter
def api_version(self, api_version):
"""Set the API version."""
self.__api_version = api_version
@property
def login_url(self):
"""Get the login URL."""
return self.__login_url
@login_url.setter
def login_url(self, login_url):
"""Set the login URL."""
self.__login_url = 'http://login{p}.responsys.net/rest/api/v{a}/auth/token'.format(
p=self.pod,
a=self.api_version)
@property
def local_content_library_folder(self):
"""Get the login URL."""
return self.__local_content_library_folder
@local_content_library_folder.setter
def local_content_library_folder(self, local_content_library_folder):
"""Set the login URL."""
self.__local_content_library_folder = local_content_library_folder
@property
def test_local_content_library_folder(self):
"""Get the login URL."""
return self.__test_local_content_library_folder
@test_local_content_library_folder.setter
def test_local_content_library_folder(self, test_local_content_library_folder):
"""Set the login URL."""
self.__test_local_content_library_folder = test_local_content_library_folder
@property
def remote_content_library_folder(self):
"""Get the login URL."""
return self.__remote_content_library_folder
@remote_content_library_folder.setter
def remote_content_library_folder(self, remote_content_library_folder):
"""Set the login URL."""
self.__remote_content_library_folder = remote_content_library_folder
@property
def test_remote_content_library_folder(self):
"""Get the login URL."""
return self.__test_remote_content_library_folder
@test_remote_content_library_folder.setter
def test_remote_content_library_folder(self, test_remote_content_library_folder):
"""Set the login URL."""
self.__test_remote_content_library_folder = test_remote_content_library_folder
# login_url = f'http://login{pod}.responsys.net/rest/api/v{api_version}/'
def from_json(f):
"""Load configuration from json."""
with open(f) as f:
user_config = json.load(f)
config = Configuration(
pod=user_config["pod"],
api_version=user_config["api_version"],
api_folder=user_config["api_folder"],
api_list=user_config["api_list"],
profile_extension_table_alias=user_config["profile_extension_table_alias"],
supplemental_table_alias=user_config["supplemental_table_alias"],
primary_key_alias=user_config["primary_key_alias"],
riid_generator_length=user_config["riid_generator_length"],
caste_nonstr_to_str=user_config["caste_nonstr_to_str"],
local_content_library_folder=user_config["local_content_library_folder"],
remote_content_library_folder=user_config["remote_content_library_folder"],
test_campaign_name=user_config["test_campaign_name"],
test_local_content_library_folder=user_config["test_local_content_library_folder"],
test_remote_content_library_folder=user_config["test_remote_content_library_folder"]
)
return config
def auto():
"""Load any config.json file."""
# traverse root directory looking for credentials
for root, dirs, files in os.walk("."):
for f in files:
if f == 'config.json':
try:
return from_json(f)
except(ValueError):
raise ValueError('Could not open {f}'.format(f=f))
break
| StarcoderdataPython |
4862714 | import os
import sys
import glob
import sconstest.eval
def usage():
print "graph_scaling.py <scons|make|both> results_idx <build|update>"
prefixes = {'scons' : 'scons_cleanbuild',
'make' : 'make_cleanbuild'}
ford = ['small', 'middle', 'large', 'vlarge', 'vvlarge']
files = {'small' : 5000,
'middle' : 10000,
'large' : 20000,
'vlarge' : 30000,
'vvlarge' : 40000}
def timefile(idx, project, build):
""" Helper function, constructing the path to the current time result file. """
return os.path.join('results%s' % idx, project, prefixes[build] + '.times')
def main():
if len(sys.argv) < 4:
usage()
sys.exit(0)
project = sys.argv[2]
builds = sys.argv[1]
both = False
if builds == "both":
builds = ['make', 'scons']
both = True
else:
builds = [builds]
ptitle = "Clean build"
if sys.argv[3] == "update":
prefixes['scons'] = 'scons_update_implicit'
prefixes['make'] = 'make_update'
ptitle = "Update"
# Loop over all result folders
flist = glob.glob('results*')
xdata = []
ydata = []
if both:
ca = sconstest.eval.DataCurve()
ca.info.color = 'red'
ca.info.title = 'make'
cb = sconstest.eval.DataCurve()
cb.info.color = 'green'
cb.info.title = 'scons'
for i in range(len(ford)):
if both:
# Pick project for 'make'
r, u, s = sconstest.eval.getTimeData(timefile(sys.argv[2], ford[i], builds[0]))
if r != 0.0:
ca.xdata.append(files[ford[i]])
ca.ydata.append(r)
else:
print "Is zero for %d (make)!" % (i+1)
# Pick project for 'scons'
r, u, s = sconstest.eval.getTimeData(timefile(sys.argv[2], ford[i], builds[1]))
if r != 0.0:
cb.xdata.append(files[ford[i]])
cb.ydata.append(r)
else:
print "Is zero for %d (scons)!" % (i+1)
else:
# Pick project
r, u, s = sconstest.eval.getTimeData(timefile(sys.argv[2], ford[i], sys.argv[1]))
if r != 0.0:
# Store time
xdata.append(files[ford[i]])
ydata.append(r)
else:
print "Is zero for %d!" % (i+1)
if both:
if not os.path.isdir('scaling'):
os.makedirs('scaling')
sconstest.eval.plotDataCurves([ca, cb], 'scaling/scaling_%s_%s.png' % (project, sys.argv[3]), ptitle, 'CPP files', legend=True)
else:
sconstest.eval.plotData(xdata, ydata, 'scaling_%s.png' % project, 'Linear scaling', 'CPP files')
if __name__ == "__main__":
main()
| StarcoderdataPython |
196590 | <reponame>p-p-m/nodeconductor
from __future__ import unicode_literals
import functools
import datetime
import logging
import time
import calendar
from django.db import models as django_models
from django.db import transaction, IntegrityError
from django.db.models import Q
from django.conf import settings as django_settings
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.utils import timezone
from django_fsm import TransitionNotAllowed
import django_filters
from rest_framework import exceptions
from rest_framework import filters
from rest_framework import mixins
from rest_framework import permissions, status
from rest_framework import viewsets, views
from rest_framework.response import Response
from rest_framework.decorators import detail_route, list_route
import reversion
from nodeconductor.core import mixins as core_mixins
from nodeconductor.core import models as core_models
from nodeconductor.core import exceptions as core_exceptions
from nodeconductor.core import serializers as core_serializers
from nodeconductor.core.filters import DjangoMappingFilterBackend, CategoryFilter, SynchronizationStateFilter
from nodeconductor.core.models import SynchronizationStates
from nodeconductor.core.utils import sort_dict, datetime_to_timestamp
from nodeconductor.iaas import models
from nodeconductor.iaas import serializers
from nodeconductor.iaas import tasks
from nodeconductor.iaas.serializers import ServiceSerializer
from nodeconductor.iaas.serializers import QuotaTimelineStatsSerializer
from nodeconductor.iaas.log import event_logger
from nodeconductor.quotas import filters as quota_filters
from nodeconductor.structure import filters as structure_filters
from nodeconductor.structure.views import UpdateOnlyByPaidCustomerMixin
from nodeconductor.structure.managers import filter_queryset_for_user
from nodeconductor.structure.models import ProjectRole, Project, Customer, ProjectGroup, CustomerRole
logger = logging.getLogger(__name__)
ZABBIX_ENABLED = getattr(django_settings, 'NODECONDUCTOR', {}).get('MONITORING', {}).get('ZABBIX', {}).get('server')
def schedule_transition():
def decorator(view_fn):
@functools.wraps(view_fn)
def wrapped(self, request, *args, **kwargs):
supported_operations = {
# code: (scheduled_celery_task, instance_marker_state)
'start': ('schedule_starting', tasks.schedule_starting),
'stop': ('schedule_stopping', tasks.schedule_stopping),
'restart': ('schedule_restarting', tasks.schedule_restarting),
'destroy': ('schedule_deletion', tasks.schedule_deleting),
'flavor change': ('schedule_resizing', tasks.resize_flavor),
'disk extension': ('schedule_resizing', tasks.extend_disk),
}
# Define them in inner scope but call when transaction complete
response, processing_task, logger_info = None, None, None
try:
with transaction.atomic():
instance = self.get_object()
membership = instance.cloud_project_membership
is_admin = membership.project.has_user(request.user, ProjectRole.ADMINISTRATOR)
if not is_admin and not request.user.is_staff:
raise exceptions.PermissionDenied()
# Important! We are passing back the instance from current transaction to a view
options = view_fn(self, request, instance, *args, **kwargs)
if isinstance(options, tuple):
# Expecting operation, logger_info and optional celery_kwargs from a view
operation, logger_info = options[:2]
celery_kwargs = options[2] if len(options) >= 3 else {}
change_instance_state, processing_task = supported_operations[operation]
transition = getattr(instance, change_instance_state)
transition()
instance.save(update_fields=['state'])
else:
# Break execution by return from a view
response = options
raise RuntimeError
except TransitionNotAllowed:
message = "Performing %s operation from instance state '%s' is not allowed"
return Response({'status': message % (operation, instance.get_state_display())},
status=status.HTTP_409_CONFLICT)
except IntegrityError:
return Response({'status': '%s was not scheduled' % operation},
status=status.HTTP_400_BAD_REQUEST)
except RuntimeError:
assert isinstance(response, Response)
return response
else:
# Call celery task AFTER transaction has been commited
processing_task.delay(instance.uuid.hex, **celery_kwargs)
if logger_info is not None:
event_logger.instance.info(
logger_info['message'],
event_type=logger_info['event_type'],
event_context=logger_info['event_context'])
return Response({'status': '%s was scheduled' % operation},
status=status.HTTP_202_ACCEPTED)
return wrapped
return decorator
class InstanceFilter(django_filters.FilterSet):
project_group_name = django_filters.CharFilter(
name='cloud_project_membership__project__project_groups__name',
distinct=True,
lookup_type='icontains',
)
project_name = django_filters.CharFilter(
name='cloud_project_membership__project__name',
distinct=True,
lookup_type='icontains',
)
project_group = django_filters.CharFilter(
name='cloud_project_membership__project__project_groups__uuid',
distinct=True,
)
project = django_filters.CharFilter(
name='cloud_project_membership__project__uuid',
distinct=True,
lookup_type='icontains',
)
customer = django_filters.CharFilter(
name='cloud_project_membership__project__customer__uuid',
distinct=True,
)
customer_name = django_filters.CharFilter(
name='cloud_project_membership__project__customer__name',
distinct=True,
lookup_type='icontains',
)
customer_native_name = django_filters.CharFilter(
name='cloud_project_membership__project__customer__native_name',
distinct=True,
lookup_type='icontains',
)
customer_abbreviation = django_filters.CharFilter(
name='cloud_project_membership__project__customer__abbreviation',
distinct=True,
lookup_type='icontains',
)
template_name = django_filters.CharFilter(
name='template__name',
lookup_type='icontains',
)
name = django_filters.CharFilter(lookup_type='icontains')
state = django_filters.NumberFilter()
description = django_filters.CharFilter(
lookup_type='icontains',
)
# In order to return results when an invalid value is specified
strict = False
class Meta(object):
model = models.Instance
fields = [
'name',
'customer',
'customer_name',
'customer_native_name',
'customer_abbreviation',
'state',
'project_name',
'project_group_name',
'project',
'project_group',
'template_name',
'start_time',
'cores',
'ram',
'system_volume_size',
'data_volume_size',
'description',
'created',
'type',
'backend_id',
]
order_by = [
'name',
'-name',
'state',
'-state',
'cloud_project_membership__project__customer__name',
'-cloud_project_membership__project__customer__name',
'cloud_project_membership__project__customer__native_name',
'-cloud_project_membership__project__customer__native_name',
'cloud_project_membership__project__customer__abbreviation',
'-cloud_project_membership__project__customer__abbreviation',
'cloud_project_membership__project__name',
'-cloud_project_membership__project__name',
'cloud_project_membership__project__project_groups__name',
'-cloud_project_membership__project__project_groups__name',
'template__name',
'-template__name',
'-cores',
'ram',
'-ram',
'system_volume_size',
'-system_volume_size',
'data_volume_size',
'-data_volume_size',
'created',
'-created',
'type',
'-type',
'installation_state',
'-installation_state',
]
order_by_mapping = {
# Proper field naming
'customer_name': 'cloud_project_membership__project__customer__name',
'customer_native_name': 'cloud_project_membership__project__customer__native_name',
'customer_abbreviation': 'cloud_project_membership__project__customer__abbreviation',
'project_name': 'cloud_project_membership__project__name',
'project_group_name': 'cloud_project_membership__project__project_groups__name',
'template_name': 'template__name',
# Backwards compatibility
'project__customer__name': 'cloud_project_membership__project__customer__name',
'project__name': 'cloud_project_membership__project__name',
'project__project_groups__name': 'cloud_project_membership__project__project_groups__name',
}
class InstanceViewSet(UpdateOnlyByPaidCustomerMixin,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
"""List of VM instances that are accessible by this user.
http://nodeconductor.readthedocs.org/en/latest/api/api.html#vm-instance-management
"""
class PaidControl:
customer_path = 'cloud_project_membership__cloud__customer'
queryset = models.Instance.objects.all()
serializer_class = serializers.InstanceSerializer
lookup_field = 'uuid'
filter_backends = (structure_filters.GenericRoleFilter, DjangoMappingFilterBackend)
permission_classes = (permissions.IsAuthenticated, permissions.DjangoObjectPermissions)
filter_class = InstanceFilter
def get_queryset(self):
queryset = super(InstanceViewSet, self).get_queryset()
order = self.request.query_params.get('o', None)
if order == 'start_time':
queryset = queryset.extra(select={
'is_null': 'CASE WHEN start_time IS NULL THEN 0 ELSE 1 END'}) \
.order_by('is_null', 'start_time')
elif order == '-start_time':
queryset = queryset.extra(select={
'is_null': 'CASE WHEN start_time IS NULL THEN 0 ELSE 1 END'}) \
.order_by('-is_null', '-start_time')
# XXX: Hack. This filtering should be refactored in NC-580
installation_states = self.request.query_params.getlist('installation_state')
if installation_states:
query = Q()
for installation_state in installation_states:
if installation_state == 'FAIL':
query |= ~Q(state=models.Instance.States.ONLINE) | Q(installation_state=installation_state)
else:
query |= Q(state=models.Instance.States.ONLINE, installation_state=installation_state)
queryset = queryset.filter(query)
return queryset
def get_serializer_class(self):
if self.request.method == 'POST':
return serializers.InstanceCreateSerializer
elif self.request.method in ('PUT', 'PATCH'):
return serializers.InstanceUpdateSerializer
return super(InstanceViewSet, self).get_serializer_class()
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
context = super(InstanceViewSet, self).get_serializer_context()
context['user'] = self.request.user
return context
def initial(self, request, *args, **kwargs):
if self.action in ('update', 'partial_update', 'destroy'):
instance = self.get_object()
if instance and instance.state not in instance.States.STABLE_STATES:
raise core_exceptions.IncorrectStateException(
'Modification allowed in stable states only')
# TODO: Replace it with schedule_transition and common transition flow
elif self.action in ('stop', 'start', 'resize'):
instance = self.get_object()
if instance and instance.state == instance.States.PROVISIONING_SCHEDULED:
raise core_exceptions.IncorrectStateException(
'Provisioning scheduled. Disabled modifications.')
return super(InstanceViewSet, self).initial(request, *args, **kwargs)
def perform_create(self, serializer):
serializer.validated_data['agreed_sla'] = serializer.validated_data['template'].sla_level
# check if connected cloud_project_membership is in a sane state - fail modification operation otherwise
membership = serializer.validated_data['cloud_project_membership']
if membership.state == core_models.SynchronizationStates.ERRED:
raise core_exceptions.IncorrectStateException(
detail='Cannot modify an instance if it is connected to a cloud project membership in erred state.'
)
membership.project.customer.validate_quota_change({'nc_resource_count': 1}, raise_exception=True)
instance = serializer.save()
event_logger.instance.info(
'Virtual machine {instance_name} creation has been scheduled.',
event_type='iaas_instance_creation_scheduled',
event_context={'instance': instance})
tasks.provision_instance.delay(instance.uuid.hex, backend_flavor_id=instance.flavor.backend_id)
def perform_update(self, serializer):
membership = self.get_object().cloud_project_membership
if membership.state == core_models.SynchronizationStates.ERRED:
raise core_exceptions.IncorrectStateException(
detail='Cannot modify an instance if it is connected to a cloud project membership in erred state.'
)
instance = serializer.save()
event_logger.instance.info(
'Virtual machine {instance_name} has been updated.',
event_type='iaas_instance_update_succeeded',
event_context={'instance': instance})
from nodeconductor.iaas.tasks import push_instance_security_groups
push_instance_security_groups.delay(instance.uuid.hex)
@detail_route(methods=['post'])
@schedule_transition()
def stop(self, request, instance, uuid=None):
logger_info = dict(
message='Virtual machine {instance_name} has been scheduled to stop.',
event_type='iaas_instance_stop_scheduled',
event_context={'instance': instance}
)
return 'stop', logger_info
@detail_route(methods=['post'])
@schedule_transition()
def start(self, request, instance, uuid=None):
logger_info = dict(
message='Virtual machine {instance_name} has been scheduled to start.',
event_type='iaas_instance_start_scheduled',
event_context={'instance': instance}
)
return 'start', logger_info
@detail_route(methods=['post'])
@schedule_transition()
def restart(self, request, instance, uuid=None):
logger_info = dict(
message='Virtual machine {instance_name} has been scheduled to restart.',
event_type='iaas_instance_restart_scheduled',
event_context={'instance': instance}
)
return 'restart', logger_info
@schedule_transition()
def destroy(self, request, instance, uuid):
# check if deletion is allowed
# TODO: it duplicates the signal check, but signal-based is useless when deletion is done in bg task
# TODO: come up with a better way for checking
try:
from nodeconductor.iaas.handlers import prevent_deletion_of_instances_with_connected_backups
prevent_deletion_of_instances_with_connected_backups(None, instance)
except django_models.ProtectedError as e:
return Response({'detail': e.args[0]}, status=status.HTTP_409_CONFLICT)
logger_info = dict(
message='Virtual machine {instance_name} has been scheduled to deletion.',
event_type='iaas_instance_deletion_scheduled',
event_context={'instance': instance}
)
return 'destroy', logger_info
@detail_route(methods=['post'])
@schedule_transition()
def resize(self, request, instance, uuid=None):
if instance.state != models.Instance.States.OFFLINE:
return Response({'detail': 'Instance must be offline'},
status=status.HTTP_409_CONFLICT)
serializer = serializers.InstanceResizeSerializer(instance, data=request.data)
serializer.is_valid(raise_exception=True)
flavor = serializer.validated_data.get('flavor')
# Serializer makes sure that exactly one of the branches will match
if flavor is not None:
instance_cloud = instance.cloud_project_membership.cloud
if flavor.cloud != instance_cloud:
return Response({'flavor': "New flavor is not within the same cloud"},
status=status.HTTP_400_BAD_REQUEST)
# System volume size does not get updated since some backends
# do not support resizing of a root volume
# instance.system_volume_size = flavor.disk
instance.ram = flavor.ram
instance.cores = flavor.cores
instance.flavor_name = flavor.name
instance.save(update_fields=['ram', 'cores', 'flavor_name'])
event_logger.instance_flavor.info(
'Virtual machine {instance_name} has been scheduled to change flavor.',
event_type='iaas_instance_flavor_change_scheduled',
event_context={'instance': instance, 'flavor': flavor}
)
return 'flavor change', None, dict(flavor_uuid=flavor.uuid.hex)
else:
new_size = serializer.validated_data['disk_size']
if new_size <= instance.data_volume_size:
return Response({'disk_size': "Disk size must be strictly greater than the current one"},
status=status.HTTP_400_BAD_REQUEST)
instance.data_volume_size = new_size
instance.save(update_fields=['data_volume_size'])
event_logger.instance_volume.info(
'Virtual machine {instance_name} has been scheduled to extend disk.',
event_type='iaas_instance_volume_extension_scheduled',
event_context={'instance': instance, 'volume_size': new_size}
)
return 'disk extension', None
@detail_route()
def usage(self, request, uuid):
# XXX: hook. Should be removed after zabbix refactoring
if not ZABBIX_ENABLED:
raise Http404()
instance = self.get_object()
if not instance.backend_id or instance.state in (models.Instance.States.PROVISIONING_SCHEDULED,
models.Instance.States.PROVISIONING):
raise Http404()
hour = 60 * 60
now = time.time()
data = {
'start_timestamp': request.query_params.get('from', int(now - hour)),
'end_timestamp': request.query_params.get('to', int(now)),
'segments_count': request.query_params.get('datapoints', 6),
'item': request.query_params.get('item'),
}
serializer = serializers.UsageStatsSerializer(data=data)
serializer.is_valid(raise_exception=True)
stats = serializer.get_stats([instance], is_paas=instance.type == models.Instance.Services.PAAS)
# Hack that adds zero as start points
created_ts = datetime_to_timestamp(instance.created)
for stat in stats:
if stat['from'] >= created_ts and stat['to'] - created_ts < hour / 2 and 'value' not in stat:
stat['value'] = 0
return Response(stats, status=status.HTTP_200_OK)
@detail_route()
def calculated_usage(self, request, uuid):
"""
Find max or min utilization of cpu, memory and storage of the instance within timeframe.
"""
# XXX: hook. Should be removed after zabbix refactoring
if not ZABBIX_ENABLED:
raise Http404()
instance = self.get_object()
if not instance.backend_id:
return Response({'detail': 'calculated usage is not available for instance without backend_id'},
status=status.HTTP_409_CONFLICT)
default_start = timezone.now() - datetime.timedelta(hours=1)
timestamp_interval_serializer = core_serializers.TimestampIntervalSerializer(data={
'start': request.query_params.get('from', datetime_to_timestamp(default_start)),
'end': request.query_params.get('to', datetime_to_timestamp(timezone.now()))
})
timestamp_interval_serializer.is_valid(raise_exception=True)
filter_data = timestamp_interval_serializer.get_filter_data()
start = datetime_to_timestamp(filter_data['start'])
end = datetime_to_timestamp(filter_data['end'])
mapped = {
'items': request.query_params.getlist('item'),
'method': request.query_params.get('method'),
}
serializer = serializers.CalculatedUsageSerializer(data={k: v for k, v in mapped.items() if v})
serializer.is_valid(raise_exception=True)
results = serializer.get_stats(instance, start, end)
return Response(results, status=status.HTTP_200_OK)
@detail_route(methods=['post'])
def assign_floating_ip(self, request, uuid):
"""
Assign floating IP to the instance.
"""
instance = self.get_object()
serializer = serializers.AssignFloatingIpSerializer(instance, data=request.data)
serializer.is_valid(raise_exception=True)
if not instance.cloud_project_membership.external_network_id:
return Response({'detail': 'External network ID of the cloud project membership is missing.'},
status=status.HTTP_409_CONFLICT)
elif instance.cloud_project_membership.state in SynchronizationStates.UNSTABLE_STATES:
return Response({'detail': 'Cloud project membership of instance should be in stable state.'},
status=status.HTTP_409_CONFLICT)
elif instance.state in models.Instance.States.UNSTABLE_STATES:
raise core_exceptions.IncorrectStateException(
detail='Cannot add floating IP to instance in unstable state.')
tasks.assign_floating_ip.delay(serializer.validated_data['floating_ip_uuid'], uuid)
return Response({'detail': 'Assigning floating IP to the instance has been scheduled.'},
status=status.HTTP_202_ACCEPTED)
class TemplateFilter(django_filters.FilterSet):
name = django_filters.CharFilter(
lookup_type='icontains',
)
os_type = CategoryFilter(
categories=models.Template.OsTypes.CATEGORIES
)
application_type = django_filters.CharFilter(
name='application_type__slug',
)
class Meta(object):
model = models.Template
fields = (
'os',
'os_type',
'name',
'type',
'application_type',
'is_active',
)
class TemplateViewSet(viewsets.ModelViewSet):
"""
List of VM templates that are accessible by this user.
http://nodeconductor.readthedocs.org/en/latest/api/api.html#templates
"""
queryset = models.Template.objects.all()
serializer_class = serializers.TemplateSerializer
permission_classes = (permissions.IsAuthenticated, permissions.DjangoObjectPermissions)
lookup_field = 'uuid'
filter_backends = (DjangoMappingFilterBackend,)
filter_class = TemplateFilter
def get_serializer_class(self):
if self.request.method in ('POST', 'PUT', 'PATCH'):
return serializers.TemplateCreateSerializer
return super(TemplateViewSet, self).get_serializer_class()
def get_queryset(self):
queryset = super(TemplateViewSet, self).get_queryset()
user = self.request.user
if self.request.method == 'GET':
cloud_uuid = self.request.query_params.get('cloud')
if cloud_uuid is not None:
cloud_queryset = filter_queryset_for_user(models.Cloud.objects.all(), user)
try:
cloud = cloud_queryset.get(uuid=cloud_uuid)
except models.Cloud.DoesNotExist:
return queryset.none()
queryset = queryset.filter(images__cloud=cloud)
return queryset
class TemplateLicenseViewSet(viewsets.ModelViewSet):
"""List of template licenses that are accessible by this user.
http://nodeconductor.readthedocs.org/en/latest/api/api.html#template-licenses
"""
queryset = models.TemplateLicense.objects.all()
serializer_class = serializers.TemplateLicenseSerializer
permission_classes = (permissions.IsAuthenticated, permissions.DjangoObjectPermissions)
lookup_field = 'uuid'
def initial(self, request, *args, **kwargs):
super(TemplateLicenseViewSet, self).initial(request, *args, **kwargs)
if self.action != 'stats' and not self.request.user.is_staff:
raise Http404
def get_queryset(self):
queryset = super(TemplateLicenseViewSet, self).get_queryset()
if 'customer' in self.request.query_params:
customer_uuid = self.request.query_params['customer']
queryset = queryset.filter(templates__images__cloud__customer__uuid=customer_uuid)
return queryset
def _filter_queryset(self, queryset):
if 'customer' in self.request.query_params:
customer_uuid = self.request.query_params['customer']
queryset = queryset.filter(instance__cloud_project_membership__project__customer__uuid=customer_uuid)
if 'name' in self.request.query_params:
queryset = queryset.filter(template_license__name=self.request.query_params['name'])
if 'type' in self.request.query_params:
queryset = queryset.filter(template_license__license_type=self.request.query_params['type'])
return queryset
@list_route()
def stats(self, request):
queryset = filter_queryset_for_user(models.InstanceLicense.objects.all(), request.user)
queryset = self._filter_queryset(queryset)
aggregate_parameters = self.request.query_params.getlist('aggregate', [])
aggregate_parameter_to_field_map = {
'project': [
'instance__cloud_project_membership__project__uuid',
'instance__cloud_project_membership__project__name',
],
'project_group': [
'instance__cloud_project_membership__project__project_groups__uuid',
'instance__cloud_project_membership__project__project_groups__name',
],
'customer': [
'instance__cloud_project_membership__project__customer__uuid',
'instance__cloud_project_membership__project__customer__name',
'instance__cloud_project_membership__project__customer__abbreviation',
],
'type': ['template_license__license_type'],
'name': ['template_license__name'],
}
aggregate_fields = []
for aggregate_parameter in aggregate_parameters:
if aggregate_parameter not in aggregate_parameter_to_field_map:
return Response('Licenses statistics can not be aggregated by %s' % aggregate_parameter,
status=status.HTTP_400_BAD_REQUEST)
aggregate_fields += aggregate_parameter_to_field_map[aggregate_parameter]
queryset = queryset.values(*aggregate_fields).annotate(count=django_models.Count('id', distinct=True))
# This hack can be removed when https://code.djangoproject.com/ticket/16735 will be closed
# Replace databases paths by normal names. Ex: instance__project__uuid is replaced by project_uuid
name_replace_map = {
'instance__cloud_project_membership__project__uuid': 'project_uuid',
'instance__cloud_project_membership__project__name': 'project_name',
'instance__cloud_project_membership__project__project_groups__uuid': 'project_group_uuid',
'instance__cloud_project_membership__project__project_groups__name': 'project_group_name',
'instance__cloud_project_membership__project__customer__uuid': 'customer_uuid',
'instance__cloud_project_membership__project__customer__name': 'customer_name',
'instance__cloud_project_membership__project__customer__abbreviation': 'customer_abbreviation',
'template_license__license_type': 'type',
'template_license__name': 'name',
}
for d in queryset:
for db_name, output_name in name_replace_map.iteritems():
if db_name in d:
d[output_name] = d[db_name]
del d[db_name]
# XXX: hack for portal only. (Provide project group data if aggregation was done by project)
if 'project' in aggregate_parameters and 'project_group' not in aggregate_parameters:
for item in queryset:
project = Project.objects.get(uuid=item['project_uuid'])
if project.project_group is not None:
item['project_group_uuid'] = project.project_group.uuid.hex
item['project_group_name'] = project.project_group.name
return Response(queryset)
class ResourceFilter(django_filters.FilterSet):
project_group_name = django_filters.CharFilter(
name='cloud_project_membership__project__project_groups__name',
distinct=True,
lookup_type='icontains',
)
project_name = django_filters.CharFilter(
name='cloud_project_membership__project__name',
distinct=True,
lookup_type='icontains',
)
project_uuid = django_filters.CharFilter(
name='cloud_project_membership__project__uuid'
)
# FIXME: deprecated, use project_group_name instead
project_groups = django_filters.CharFilter(
name='cloud_project_membership__project__project_groups__name',
distinct=True,
lookup_type='icontains',
)
name = django_filters.CharFilter(lookup_type='icontains')
customer = django_filters.CharFilter(
name='cloud_project_membership__project__customer__uuid'
)
customer_name = django_filters.CharFilter(
name='cloud_project_membership__project__customer__name',
lookup_type='icontains',
)
customer_abbreviation = django_filters.CharFilter(
name='cloud_project_membership__project__customer__abbreviation',
lookup_type='icontains',
)
customer_native_name = django_filters.CharFilter(
name='cloud_project_membership__project__customer__native_name',
lookup_type='icontains',
)
template_name = django_filters.CharFilter(
name='template__name',
lookup_type='icontains',
)
agreed_sla = django_filters.NumberFilter()
actual_sla = django_filters.NumberFilter(
name='slas__value',
distinct=True,
)
class Meta(object):
model = models.Instance
fields = [
'name',
'template_name',
'customer',
'customer_name',
'customer_native_name',
'customer_abbreviation',
'project_name',
'project_uuid',
'project_groups',
'agreed_sla',
'actual_sla',
]
order_by = [
'name',
'template__name',
'cloud_project_membership__project__customer__name',
'cloud_project_membership__project__customer__abbreviation',
'cloud_project_membership__project__customer__native_name',
'cloud_project_membership__project__name',
'cloud_project_membership__project__project_groups__name',
'agreed_sla',
'slas__value',
# desc
'-name',
'-template__name',
'-cloud_project_membership__project__customer__name',
'-cloud_project_membership__project__customer__abbreviation',
'-cloud_project_membership__project__customer__native_name',
'-cloud_project_membership__project__name',
'-cloud_project_membership__project__project_groups__name',
'-agreed_sla',
'-slas__value',
]
order_by_mapping = {
# Proper field naming
'customer_name': 'cloud_project_membership__project__customer__name',
'customer_abbreviation': 'cloud_project_membership__project__customer__abbreviation',
'customer_native_name': 'cloud_project_membership__project__customer__native_name',
'project_name': 'cloud_project_membership__project__name',
'project_group_name': 'cloud_project_membership__project__project_groups__name',
'template_name': 'template__name',
'actual_sla': 'slas__value',
# Backwards compatibility
'project__customer__name': 'cloud_project_membership__project__customer__name',
'project__name': 'cloud_project_membership__project__name',
'project__project_groups__name': 'cloud_project_membership__project__project_groups__name',
}
# XXX: This view has to be rewritten or removed after haystack implementation
class ResourceViewSet(viewsets.ReadOnlyModelViewSet):
queryset = models.Instance.objects.exclude(
state=models.Instance.States.DELETING,
)
serializer_class = ServiceSerializer
lookup_field = 'uuid'
filter_backends = (structure_filters.GenericRoleFilter, DjangoMappingFilterBackend)
filter_class = ResourceFilter
def get_queryset(self):
period = self._get_period()
if '-' in period:
year, month = map(int, period.split('-'))
else:
year = int(period)
month = 12
last_day = calendar.monthrange(year, month)[1]
return super(ResourceViewSet, self).get_queryset().filter(created__lte=datetime.date(year, month, last_day))
def _get_period(self):
period = self.request.query_params.get('period')
if period is None:
today = datetime.date.today()
period = '%s-%s' % (today.year, today.month)
return period
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
context = super(ResourceViewSet, self).get_serializer_context()
context['period'] = self._get_period()
return context
@detail_route()
def events(self, request, uuid):
service = self.get_object()
period = self._get_period()
# TODO: this should use a generic resource model
history = get_object_or_404(models.InstanceSlaHistory, instance__uuid=service.uuid, period=period)
history_events = list(history.events.all().order_by('-timestamp').values('timestamp', 'state'))
serializer = serializers.SlaHistoryEventSerializer(data=history_events,
many=True)
serializer.is_valid(raise_exception=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class ResourceStatsView(views.APIView):
def _check_user(self, request):
if not request.user.is_staff:
raise exceptions.PermissionDenied()
def get(self, request, format=None):
self._check_user(request)
auth_url = request.query_params.get('auth_url')
# TODO: auth_url should be coming as a reference to NodeConductor object. Consider introducing this concept.
if auth_url is None:
return Response(
{'detail': 'GET parameter "auth_url" has to be defined'},
status=status.HTTP_400_BAD_REQUEST,
)
cloud = models.Cloud.objects.filter(auth_url=auth_url).first()
if cloud is None:
return Response(
{'detail': 'No clouds with auth url: %s' % auth_url},
status=status.HTTP_400_BAD_REQUEST,
)
memberships = models.CloudProjectMembership.objects.filter(cloud__auth_url=auth_url)
quota_values = models.CloudProjectMembership.get_sum_of_quotas_as_dict(
memberships, ('vcpu', 'ram', 'storage'), fields=['limit'])
# for backward compatibility we need to use this names:
quota_stats = {
'vcpu_quota': quota_values['vcpu'],
'storage_quota': quota_values['storage'],
'memory_quota': quota_values['ram'],
}
stats = cloud.get_statistics()
stats.update(quota_stats)
return Response(sort_dict(stats), status=status.HTTP_200_OK)
class CustomerStatsView(views.APIView):
def get(self, request, format=None):
customer_statistics = []
customer_queryset = filter_queryset_for_user(Customer.objects.all(), request.user)
for customer in customer_queryset:
projects_count = filter_queryset_for_user(
Project.objects.filter(customer=customer), request.user).count()
project_groups_count = filter_queryset_for_user(
ProjectGroup.objects.filter(customer=customer), request.user).count()
instances_count = filter_queryset_for_user(
models.Instance.objects.filter(cloud_project_membership__project__customer=customer),
request.user).count()
customer_statistics.append({
'name': customer.name,
'abbreviation': customer.abbreviation,
'projects': projects_count,
'project_groups': project_groups_count,
'instances': instances_count,
})
return Response(customer_statistics, status=status.HTTP_200_OK)
class UsageStatsView(views.APIView):
aggregate_models = {
'customer': {'model': Customer, 'path': models.Instance.Permissions.customer_path},
'project_group': {'model': ProjectGroup, 'path': models.Instance.Permissions.project_group_path},
'project': {'model': Project, 'path': models.Instance.Permissions.project_path},
}
def _get_aggregate_queryset(self, request, aggregate_model_name):
model = self.aggregate_models[aggregate_model_name]['model']
return filter_queryset_for_user(model.objects.all(), request.user)
def _get_aggregate_filter(self, aggregate_model_name, obj):
path = self.aggregate_models[aggregate_model_name]['path']
return {path: obj}
def get(self, request, format=None):
# XXX: hook. Should be removed after zabbix refactoring
if not ZABBIX_ENABLED:
raise Http404()
usage_stats = []
aggregate_model_name = request.query_params.get('aggregate', 'customer')
if aggregate_model_name not in self.aggregate_models.keys():
return Response(
'Get parameter "aggregate" can take only this values: %s' % ', '.join(self.aggregate_models.keys()),
status=status.HTTP_400_BAD_REQUEST)
# This filters out the things we group by (aka aggregate root) to those that can be seen
# by currently logged in user.
aggregate_queryset = self._get_aggregate_queryset(request, aggregate_model_name)
if 'uuid' in request.query_params:
aggregate_queryset = aggregate_queryset.filter(uuid=request.query_params['uuid'])
# This filters out the vm Instances to those that can be seen
# by currently logged in user. This is done within each aggregate root separately.
visible_instances = filter_queryset_for_user(models.Instance.objects.all(), request.user)
for aggregate_object in aggregate_queryset:
# Narrow down the instance scope to aggregate root.
instances = visible_instances.filter(
**self._get_aggregate_filter(aggregate_model_name, aggregate_object))
if instances:
hour = 60 * 60
data = {
'start_timestamp': request.query_params.get('from', int(time.time() - hour)),
'end_timestamp': request.query_params.get('to', int(time.time())),
'segments_count': request.query_params.get('datapoints', 6),
'item': request.query_params.get('item'),
}
serializer = serializers.UsageStatsSerializer(data=data)
serializer.is_valid(raise_exception=True)
stats = serializer.get_stats(instances)
usage_stats.append({'name': aggregate_object.name, 'datapoints': stats})
else:
usage_stats.append({'name': aggregate_object.name, 'datapoints': []})
return Response(usage_stats, status=status.HTTP_200_OK)
class FlavorViewSet(viewsets.ReadOnlyModelViewSet):
"""List of VM instance flavors that are accessible by this user.
http://nodeconductor.readthedocs.org/en/latest/api/api.html#flavor-management
"""
queryset = models.Flavor.objects.all()
serializer_class = serializers.FlavorSerializer
lookup_field = 'uuid'
filter_backends = (structure_filters.GenericRoleFilter,)
class CloudFilter(django_filters.FilterSet):
name = django_filters.CharFilter(lookup_type='icontains')
customer = django_filters.CharFilter(
name='customer__uuid',
)
customer_name = django_filters.CharFilter(
lookup_type='icontains',
name='customer__name',
)
customer_native_name = django_filters.CharFilter(
lookup_type='icontains',
name='customer__native_name',
)
project = django_filters.CharFilter(
name='cloudprojectmembership__project__uuid',
distinct=True,
)
# project_uuid is alias of project for consistency with structure filters
project_uuid = django_filters.CharFilter(
name='cloudprojectmembership__project__uuid',
distinct=True,
)
project_name = django_filters.CharFilter(
name='cloudprojectmembership__project__name',
lookup_type='icontains',
distinct=True,
)
class Meta(object):
model = models.Cloud
fields = [
'name',
'customer',
'customer_name',
'customer_native_name',
'project',
'project_name',
]
class CloudViewSet(UpdateOnlyByPaidCustomerMixin,
core_mixins.UpdateOnlyStableMixin,
viewsets.ModelViewSet):
"""List of clouds that are accessible by this user.
http://nodeconductor.readthedocs.org/en/latest/api/api.html#cloud-model
"""
class PaidControl:
customer_path = 'customer'
queryset = models.Cloud.objects.all().prefetch_related('flavors')
serializer_class = serializers.CloudSerializer
lookup_field = 'uuid'
permission_classes = (
permissions.IsAuthenticated,
permissions.DjangoObjectPermissions,
)
filter_backends = (structure_filters.GenericRoleFilter, filters.DjangoFilterBackend)
filter_class = CloudFilter
def _can_create_or_update_cloud(self, serializer):
if self.request.user.is_staff:
return True
if serializer.validated_data['customer'].has_user(self.request.user, CustomerRole.OWNER):
return True
def perform_create(self, serializer):
if not self._can_create_or_update_cloud(serializer):
raise exceptions.PermissionDenied()
# XXX This is a hack as sync_services expects only IN_SYNC objects and newly created cloud is created
# with SYNCING_SCHEDULED
cloud = serializer.save(state=SynchronizationStates.IN_SYNC)
tasks.sync_services.delay([cloud.uuid.hex])
def perform_update(self, serializer):
if not self._can_create_or_update_cloud(serializer):
raise exceptions.PermissionDenied()
super(CloudViewSet, self).perform_update(serializer)
class CloudProjectMembershipFilter(quota_filters.QuotaFilterSetMixin, django_filters.FilterSet):
cloud = django_filters.CharFilter(
name='cloud__uuid',
)
project = django_filters.CharFilter(
name='project__uuid',
)
project_name = django_filters.CharFilter(
name='project__name',
distinct=True,
lookup_type='icontains',
)
project_group = django_filters.CharFilter(
name='project__project_groups__uuid',
)
project_group_name = django_filters.CharFilter(
name='project__project_groups__name',
distinct=True,
lookup_type='icontains',
)
ram = quota_filters.QuotaFilter(
quota_name='ram',
quota_field='limit',
)
vcpu = quota_filters.QuotaFilter(
quota_name='vcpu',
quota_field='limit',
)
storage = quota_filters.QuotaFilter(
quota_name='storage',
quota_field='limit',
)
max_instances = quota_filters.QuotaFilter(
quota_name='max_instances',
quota_field='limit',
)
class Meta(object):
model = models.CloudProjectMembership
fields = [
'cloud',
'project', 'project_name',
'project_group', 'project_group_name',
'ram', 'vcpu', 'storage', 'max_instances',
'tenant_id',
]
order_by = [
'project__name',
'-project__name',
'project__project_groups__name',
'-project__project_groups__name',
'quotas__limit__ram',
'-quotas__limit__ram',
'quotas__limit__vcpu',
'-quotas__limit__vcpu',
'quotas__limit__storage',
'-quotas__limit__storage',
'quotas__limit__max_instances',
'-quotas__limit__max_instances',
'quotas__limit',
'-quotas__limit',
]
order_by_mapping = {
'project_name': 'project__name',
'project_group_name': 'project__project_groups__name',
'vcpu': 'quotas__limit__vcpu',
'ram': 'quotas__limit__ram',
'max_instances': 'quotas__limit__max_instances',
'storage': 'quotas__limit__storage',
}
class CloudProjectMembershipViewSet(UpdateOnlyByPaidCustomerMixin,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
mixins.ListModelMixin,
core_mixins.UpdateOnlyStableMixin,
viewsets.GenericViewSet):
"""
List of project-cloud connections
http://nodeconductor.readthedocs.org/en/latest/api/api.html#link-cloud-to-a-project
"""
class PaidControl:
customer_path = 'cloud__customer'
queryset = models.CloudProjectMembership.objects.all()
serializer_class = serializers.CloudProjectMembershipSerializer
filter_backends = (structure_filters.GenericRoleFilter, DjangoMappingFilterBackend)
permission_classes = (permissions.IsAuthenticated, permissions.DjangoObjectPermissions)
filter_class = CloudProjectMembershipFilter
def perform_create(self, serializer):
membership = serializer.save()
tasks.sync_cloud_membership.delay(membership.pk, is_membership_creation=True)
@detail_route(methods=['post'])
def set_quotas(self, request, **kwargs):
if not request.user.is_staff:
raise exceptions.PermissionDenied()
instance = self.get_object()
if instance.state != core_models.SynchronizationStates.IN_SYNC:
return Response({'detail': 'Cloud project membership must be in sync state for setting quotas'},
status=status.HTTP_409_CONFLICT)
serializer = serializers.CloudProjectMembershipQuotaSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
data = dict(serializer.validated_data)
if data.get('max_instances') is not None:
quotas = django_settings.NODECONDUCTOR.get('OPENSTACK_QUOTAS_INSTANCE_RATIOS', {})
volume_ratio = quotas.get('volumes', 4)
snapshots_ratio = quotas.get('snapshots', 20)
data['volumes'] = volume_ratio * data['max_instances']
data['snapshots'] = snapshots_ratio * data['max_instances']
instance.schedule_syncing()
instance.save()
tasks.push_cloud_membership_quotas.delay(instance.pk, quotas=data)
return Response({'status': 'Quota update was scheduled'},
status=status.HTTP_202_ACCEPTED)
@detail_route(methods=['post'])
def import_instance(self, request, **kwargs):
membership = self.get_object()
is_admin = membership.project.has_user(request.user, ProjectRole.ADMINISTRATOR)
if not is_admin and not request.user.is_staff:
raise exceptions.PermissionDenied()
if membership.state == core_models.SynchronizationStates.ERRED:
return Response({'detail': 'Cloud project membership must be in non-erred state for instance import to work'},
status=status.HTTP_409_CONFLICT)
serializer = serializers.CloudProjectMembershipLinkSerializer(data=request.data,
context={'membership': membership})
serializer.is_valid(raise_exception=True)
instance_id = serializer.validated_data['id']
template = serializer.validated_data.get('template')
template_id = template.uuid.hex if template else None
tasks.import_instance.delay(membership.pk, instance_id=instance_id, template_id=template_id)
event_logger.instance_import.info(
'Virtual machine with backend id {instance_id} has been scheduled for import.',
event_type='iaas_instance_import_scheduled',
event_context={'instance_id': instance_id})
return Response({'status': 'Instance import was scheduled'},
status=status.HTTP_202_ACCEPTED)
@detail_route(methods=['post', 'delete'])
def external_network(self, request, pk=None):
if request.method == 'DELETE':
membership = self.get_object()
if membership.external_network_id:
tasks.delete_external_network.delay(pk)
return Response({'status': 'External network deletion has been scheduled.'},
status=status.HTTP_202_ACCEPTED)
else:
return Response({'status': 'External network does not exist.'},
status=status.HTTP_204_NO_CONTENT)
serializer = serializers.ExternalNetworkSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
tasks.create_external_network.delay(pk, serializer.data)
return Response({'status': 'External network creation has been scheduled.'},
status=status.HTTP_202_ACCEPTED)
@detail_route(methods=['post'])
def allocate_floating_ip(self, request, pk=None):
"""
Allocate floating IP from external network.
"""
membership = self.get_object()
if not membership.external_network_id:
return Response({'detail': 'Cloud project membership should have an external network ID.'},
status=status.HTTP_409_CONFLICT)
elif membership.state in core_models.SynchronizationStates.UNSTABLE_STATES:
raise core_exceptions.IncorrectStateException(
detail='Cloud project membership must be in stable state.')
tasks.allocate_floating_ip.delay(pk)
return Response({'detail': 'Floating IP allocation has been scheduled.'},
status=status.HTTP_202_ACCEPTED)
class SecurityGroupFilter(django_filters.FilterSet):
name = django_filters.CharFilter(
name='name',
lookup_type='icontains',
)
description = django_filters.CharFilter(
name='description',
lookup_type='icontains',
)
cloud = django_filters.CharFilter(
name='cloud_project_membership__cloud__uuid',
)
project = django_filters.CharFilter(
name='cloud_project_membership__project__uuid',
)
state = SynchronizationStateFilter()
class Meta(object):
model = models.SecurityGroup
fields = [
'name',
'description',
'cloud',
'project',
'state',
]
class SecurityGroupViewSet(core_mixins.UpdateOnlyStableMixin, viewsets.ModelViewSet):
"""
List of security groups
http://nodeconductor.readthedocs.org/en/latest/api/api.html#security-group-management
"""
queryset = models.SecurityGroup.objects.all()
serializer_class = serializers.SecurityGroupSerializer
lookup_field = 'uuid'
permission_classes = (permissions.IsAuthenticated,
permissions.DjangoObjectPermissions)
filter_class = SecurityGroupFilter
filter_backends = (structure_filters.GenericRoleFilter, filters.DjangoFilterBackend,)
def perform_create(self, serializer):
security_group = serializer.save()
tasks.create_security_group.delay(security_group.uuid.hex)
def perform_update(self, serializer):
super(SecurityGroupViewSet, self).perform_update(serializer)
security_group = self.get_object()
security_group.schedule_syncing()
security_group.save()
tasks.update_security_group.delay(serializer.instance.uuid.hex)
def destroy(self, request, *args, **kwargs):
security_group = self.get_object()
security_group.schedule_syncing()
security_group.save()
tasks.delete_security_group.delay(security_group.uuid.hex)
return Response({'status': 'Deletion was scheduled'}, status=status.HTTP_202_ACCEPTED)
class IpMappingFilter(django_filters.FilterSet):
project = django_filters.CharFilter(
name='project__uuid',
)
class Meta(object):
model = models.IpMapping
fields = [
'project',
'private_ip',
'public_ip',
]
class IpMappingViewSet(viewsets.ModelViewSet):
"""
List of mappings between public IPs and private IPs
http://nodeconductor.readthedocs.org/en/latest/api/api.html#ip-mappings
"""
queryset = models.IpMapping.objects.all()
serializer_class = serializers.IpMappingSerializer
lookup_field = 'uuid'
filter_backends = (structure_filters.GenericRoleFilter, filters.DjangoFilterBackend,)
permission_classes = (permissions.IsAuthenticated,
permissions.DjangoObjectPermissions)
filter_class = IpMappingFilter
class FloatingIPFilter(django_filters.FilterSet):
project = django_filters.CharFilter(
name='cloud_project_membership__project__uuid',
)
cloud = django_filters.CharFilter(
name='cloud_project_membership__cloud__uuid',
)
class Meta(object):
model = models.FloatingIP
fields = [
'project',
'cloud',
'status',
]
class FloatingIPViewSet(viewsets.ReadOnlyModelViewSet):
"""
List of floating ips
"""
queryset = models.FloatingIP.objects.all()
serializer_class = serializers.FloatingIPSerializer
lookup_field = 'uuid'
permission_classes = (permissions.IsAuthenticated, permissions.DjangoObjectPermissions)
filter_backends = (structure_filters.GenericRoleFilter, filters.DjangoFilterBackend)
filter_class = FloatingIPFilter
class QuotaStatsView(views.APIView):
def get(self, request, format=None):
serializer = serializers.StatsAggregateSerializer(data=request.query_params)
serializer.is_valid(raise_exception=True)
memberships = serializer.get_memberships(request.user)
sum_of_quotas = models.CloudProjectMembership.get_sum_of_quotas_as_dict(
memberships, ['vcpu', 'ram', 'storage', 'max_instances'])
return Response(sum_of_quotas, status=status.HTTP_200_OK)
# XXX: This view is deprecated. It has to be replaced with quotas history endpoints
class QuotaTimelineStatsView(views.APIView):
"""
Count quota usage and limit history statistics
"""
def get(self, request, format=None):
stats = self.get_stats(request)
return Response(stats, status=status.HTTP_200_OK)
def get_quota_scopes(self, request):
serializer = serializers.StatsAggregateSerializer(data=request.query_params)
serializer.is_valid(raise_exception=True)
scopes = serializer.get_memberships(request.user)
return scopes
def get_stats(self, request):
mapped = {
'start_time': request.query_params.get('from'),
'end_time': request.query_params.get('to'),
'interval': request.query_params.get('interval'),
'item': request.query_params.get('item'),
}
data = {key: val for (key, val) in mapped.items() if val}
serializer = QuotaTimelineStatsSerializer(data=data)
serializer.is_valid(raise_exception=True)
scopes = self.get_quota_scopes(request)
date_points = self.get_date_points(
start_time=serializer.validated_data['start_time'],
end_time=serializer.validated_data['end_time'],
interval=serializer.validated_data['interval']
)
reversed_dates = date_points[::-1]
dates = zip(reversed_dates[:-1], reversed_dates[1:])
items = [serializer.validated_data['item']] if 'item' in serializer.validated_data else serializer.ITEM_CHOICES
stats = [{'from': datetime_to_timestamp(start), 'to': datetime_to_timestamp(end)} for start, end in dates]
def _add(*args):
args = [arg if arg is not None else (0, 0) for arg in args]
return [sum(q) for q in zip(*args)]
for item in items:
item_stats = [self.get_stats_for_scope(item, scope, dates) for scope in scopes]
item_stats = map(_add, *item_stats)
for date_item_stats, date_stats in zip(item_stats, stats):
limit, usage = date_item_stats
date_stats['{}_limit'.format(item)] = limit
date_stats['{}_usage'.format(item)] = usage
return stats[::-1]
def get_stats_for_scope(self, quota_name, scope, dates):
stats_data = []
quota = scope.quotas.get(name=quota_name)
versions = reversion.get_for_object(quota).select_related('reversion').filter(
revision__date_created__lte=dates[0][0]).iterator()
version = None
for end, start in dates:
try:
while version is None or version.revision.date_created > end:
version = versions.next()
stats_data.append((version.object_version.object.limit, version.object_version.object.usage))
except StopIteration:
break
return stats_data
def get_date_points(self, start_time, end_time, interval):
if interval == 'hour':
start_point = start_time.replace(second=0, minute=0, microsecond=0)
interval = datetime.timedelta(hours=1)
elif interval == 'day':
start_point = start_time.replace(hour=0, second=0, minute=0, microsecond=0)
interval = datetime.timedelta(days=1)
elif interval == 'week':
start_point = start_time.replace(hour=0, second=0, minute=0, microsecond=0)
interval = datetime.timedelta(days=7)
elif interval == 'month':
start_point = start_time.replace(hour=0, second=0, minute=0, microsecond=0)
interval = datetime.timedelta(days=30)
points = [start_time]
current_point = start_point
while current_point <= end_time:
points.append(current_point)
current_point += interval
if points[-1] != end_time:
points.append(end_time)
return [p for p in points if start_time <= p <= end_time]
| StarcoderdataPython |
11382234 | <filename>douban/douban/itemdang.py<gh_stars>1-10
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class DangdangItem(scrapy.Item):
_id = scrapy.Field()
title = scrapy.Field()
comments = scrapy.Field()
time = scrapy.Field()
press = scrapy.Field() #出版社
price = scrapy.Field()
discount = scrapy.Field()
category1 = scrapy.Field() # 种类(小)
category2 = scrapy.Field() # 种类(大)
# class PicItem(scrapy.Item):
# pic = scrapy.Item()
# link = scrapy.Item() | StarcoderdataPython |
192967 | # coding: utf-8
"""
flask_oauthlib.provider.oauth2
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Implemnts OAuth2 provider support for Flask.
:copyright: (c) 2013 - 2014 by <NAME>.
"""
import os
import logging
import datetime
from functools import wraps
from flask import request, url_for, jsonify, json
from flask import redirect, abort
from werkzeug import cached_property
from werkzeug.utils import import_string
from oauthlib import oauth2
from oauthlib.oauth2 import RequestValidator, Server
from oauthlib.common import to_unicode, Request
from ..utils import extract_params, decode_base64, create_response
__all__ = ('OAuth2Provider', 'OAuth2RequestValidator')
log = logging.getLogger('flask_oauthlib')
class OAuth2Provider(object):
"""Provide secure services using OAuth2.
The server should provide an authorize handler and a token hander,
But before the handlers are implemented, the server should provide
some getters for the validation.
Like many other Flask extensions, there are two usage modes. One is
binding the Flask app instance::
app = Flask(__name__)
oauth = OAuth2Provider(app)
The second possibility is to bind the Flask app later::
oauth = OAuth2Provider()
def create_app():
app = Flask(__name__)
oauth.init_app(app)
return app
Configure :meth:`tokengetter` and :meth:`tokensetter` to get and
set tokens. Configure :meth:`grantgetter` and :meth:`grantsetter`
to get and set grant tokens. Configure :meth:`clientgetter` to
get the client.
Configure :meth:`usergetter` if you need password credential
authorization.
With everything ready, implement the authorization workflow:
* :meth:`authorize_handler` for consumer to confirm the grant
* :meth:`token_handler` for client to exchange access token
And now you can protect the resource with scopes::
@app.route('/api/user')
@oauth.require_oauth('email', 'username')
def user(oauth):
return jsonify(oauth.user)
"""
def __init__(self, app=None):
self._before_request_funcs = []
self._after_request_funcs = []
if app:
self.init_app(app)
def init_app(self, app):
"""
This callback can be used to initialize an application for the
oauth provider instance.
"""
self.app = app
app.extensions = getattr(app, 'extensions', {})
app.extensions['oauthlib.provider.oauth2'] = self
@cached_property
def error_uri(self):
"""The error page URI.
When something turns error, it will redirect to this error page.
You can configure the error page URI with Flask config::
OAUTH2_PROVIDER_ERROR_URI = '/error'
You can also define the error page by a named endpoint::
OAUTH2_PROVIDER_ERROR_ENDPOINT = 'oauth.error'
"""
error_uri = self.app.config.get('OAUTH2_PROVIDER_ERROR_URI')
if error_uri:
return error_uri
error_endpoint = self.app.config.get('OAUTH2_PROVIDER_ERROR_ENDPOINT')
if error_endpoint:
return url_for(error_endpoint)
return '/oauth/errors'
@cached_property
def server(self):
"""
All in one endpoints. This property is created automaticly
if you have implemented all the getters and setters.
However, if you are not satisfied with the getter and setter,
you can create a validator with :class:`OAuth2RequestValidator`::
class MyValidator(OAuth2RequestValidator):
def validate_client_id(self, client_id):
# do something
return True
And assign the validator for the provider::
oauth._validator = MyValidator()
"""
expires_in = self.app.config.get('OAUTH2_PROVIDER_TOKEN_EXPIRES_IN')
token_generator = self.app.config.get(
'OAUTH2_PROVIDER_TOKEN_GENERATOR', None
)
if token_generator and not callable(token_generator):
token_generator = import_string(token_generator)
if hasattr(self, '_validator'):
return Server(
self._validator,
token_expires_in=expires_in,
token_generator=token_generator,
)
if hasattr(self, '_clientgetter') and \
hasattr(self, '_tokengetter') and \
hasattr(self, '_tokensetter') and \
hasattr(self, '_grantgetter') and \
hasattr(self, '_grantsetter'):
usergetter = None
if hasattr(self, '_usergetter'):
usergetter = self._usergetter
validator = OAuth2RequestValidator(
clientgetter=self._clientgetter,
tokengetter=self._tokengetter,
grantgetter=self._grantgetter,
usergetter=usergetter,
tokensetter=self._tokensetter,
grantsetter=self._grantsetter,
)
self._validator = validator
return Server(
validator,
token_expires_in=expires_in,
token_generator=token_generator,
)
raise RuntimeError('application not bound to required getters')
def before_request(self, f):
"""Register functions to be invoked before accessing the resource.
The function accepts nothing as parameters, but you can get
information from `Flask.request` object. It is usually useful
for setting limitation on the client request::
@oauth.before_request
def limit_client_request():
client_id = request.values.get('client_id')
if not client_id:
return
client = Client.get(client_id)
if over_limit(client):
return abort(403)
track_request(client)
"""
self._before_request_funcs.append(f)
return f
def after_request(self, f):
"""Register functions to be invoked after accessing the resource.
The function accepts ``valid`` and ``request`` as parameters,
and it should return a tuple of them::
@oauth.after_request
def valid_after_request(valid, oauth):
if oauth.user in black_list:
return False, oauth
return valid, oauth
"""
self._after_request_funcs.append(f)
return f
def clientgetter(self, f):
"""Register a function as the client getter.
The function accepts one parameter `client_id`, and it returns
a client object with at least these information:
- client_id: A random string
- client_secret: A random string
- client_type: A string represents if it is `confidential`
- redirect_uris: A list of redirect uris
- default_redirect_uri: One of the redirect uris
- default_scopes: Default scopes of the client
The client may contain more information, which is suggested:
- allowed_grant_types: A list of grant types
- allowed_response_types: A list of response types
- validate_scopes: A function to validate scopes
Implement the client getter::
@oauth.clientgetter
def get_client(client_id):
client = get_client_model(client_id)
# Client is an object
return client
"""
self._clientgetter = f
return f
def usergetter(self, f):
"""Register a function as the user getter.
This decorator is only required for password credential
authorization::
@oauth.usergetter
def get_user(username=username, password=password,
*args, **kwargs):
return get_user_by_username(username, password)
"""
self._usergetter = f
return f
def tokengetter(self, f):
"""Register a function as the token getter.
The function accepts an `access_token` or `refresh_token` parameters,
and it returns a token object with at least these information:
- access_token: A string token
- refresh_token: A string token
- client_id: ID of the client
- scopes: A list of scopes
- expires: A `datetime.datetime` object
- user: The user object
The implementation of tokengetter should accepts two parameters,
one is access_token the other is refresh_token::
@oauth.tokengetter
def bearer_token(access_token=None, refresh_token=None):
if access_token:
return get_token(access_token=access_token)
if refresh_token:
return get_token(refresh_token=refresh_token)
return None
"""
self._tokengetter = f
return f
def tokensetter(self, f):
"""Register a function to save the bearer token.
The setter accepts two parameters at least, one is token,
the other is request::
@oauth.tokensetter
def set_token(token, request, *args, **kwargs):
save_token(token, request.client, request.user)
The parameter token is a dict, that looks like::
{
u'access_token': u'<KEY>',
u'token_type': u'Bearer',
u'expires_in': 3600,
u'scope': u'email address'
}
The request is an object, that contains an user object and a
client object.
"""
self._tokensetter = f
return f
def grantgetter(self, f):
"""Register a function as the grant getter.
The function accepts `client_id`, `code` and more::
@oauth.grantgetter
def grant(client_id, code):
return get_grant(client_id, code)
It returns a grant object with at least these information:
- delete: A function to delete itself
"""
self._grantgetter = f
return f
def grantsetter(self, f):
"""Register a function to save the grant code.
The function accepts `client_id`, `code`, `request` and more::
@oauth.grantsetter
def set_grant(client_id, code, request, *args, **kwargs):
save_grant(client_id, code, request.user, request.scopes)
"""
self._grantsetter = f
return f
def authorize_handler(self, f):
"""Authorization handler decorator.
This decorator will sort the parameters and headers out, and
pre validate everything::
@app.route('/oauth/authorize', methods=['GET', 'POST'])
@oauth.authorize_handler
def authorize(*args, **kwargs):
if request.method == 'GET':
# render a page for user to confirm the authorization
return render_template('oauthorize.html')
confirm = request.form.get('confirm', 'no')
return confirm == 'yes'
"""
@wraps(f)
def decorated(*args, **kwargs):
# raise if server not implemented
server = self.server
uri, http_method, body, headers = extract_params()
if request.method == 'GET':
redirect_uri = request.args.get('redirect_uri', None)
# log.debug('Found redirect_uri %s.', redirect_uri)
log.debug('Found redirect_uri %s.', request.args)
try:
ret = server.validate_authorization_request(
uri, http_method, body, headers
)
scopes, credentials = ret
kwargs['scopes'] = scopes
kwargs.update(credentials)
return f(*args, **kwargs)
except oauth2.FatalClientError as e:
log.debug('Fatal client error %r', e)
return redirect(e.in_uri(self.error_uri))
if request.method == 'POST':
redirect_uri = request.values.get('redirect_uri', None) or request.json.get('redirect_uri', None)
if not f(*args, **kwargs):
# denied by user
e = oauth2.AccessDeniedError()
return redirect(e.in_uri(redirect_uri))
return self.confirm_authorization_request()
return decorated
def confirm_authorization_request(self):
"""When consumer confirm the authrozation."""
server = self.server
scope = request.values.get('scope') or request.json.get('scope', None)
scopes = scope.split()
credentials = dict(
client_id=request.values.get('client_id', None) or request.json.get('client_id', None),
redirect_uri=request.values.get('redirect_uri', None) or request.json.get('redirect_uri', None),
response_type=request.values.get('response_type', None) or request.json.get('response_type', None),
state=request.values.get('state', None) or request.json.get('state', None)
)
log.debug('Fetched credentials from request %r.', credentials)
redirect_uri = credentials.get('redirect_uri')
log.debug('Found redirect_uri %s.', redirect_uri)
uri, http_method, body, headers = extract_params()
try:
ret = server.create_authorization_response(
uri, http_method, body, headers, scopes, credentials)
log.debug('Authorization successful.')
if credentials.get('state') == 'json':
log.debug('Request made with JSON, return JSON as well')
"""
Since we are using the Location return here, we need to grab
the results and then deserialize it from URL format to JSON
format. We do this because we are returning JSON.
"""
json_document = {}
serialized_data = ret[0].get('Location').replace(redirect_uri + '#', '').split('&')
for element in serialized_data:
new_object = element.split('=')
json_document[new_object[0]] = new_object[1]
return jsonify(json_document), 200
else:
return create_response(*ret)
except oauth2.FatalClientError as e:
return redirect(e.in_uri(self.error_uri))
except oauth2.OAuth2Error as e:
return redirect(e.in_uri(redirect_uri))
def token_handler(self, f):
"""Access/refresh token handler decorator.
The decorated function should return an dictionary or None as
the extra credentials for creating the token response.
You can control the access method with standard flask route mechanism.
If you only allow the `POST` method::
@app.route('/oauth/token', methods=['POST'])
@oauth.token_handler
def access_token():
return None
"""
@wraps(f)
def decorated(*args, **kwargs):
server = self.server
uri, http_method, body, headers = extract_params()
credentials = f(*args, **kwargs) or {}
log.debug('Fetched extra credentials, %r.', credentials)
ret = server.create_token_response(
uri, http_method, body, headers, credentials
)
return create_response(*ret)
return decorated
def require_oauth(self, *scopes):
"""Protect resource with specified scopes."""
def wrapper(f):
@wraps(f)
def decorated(*args, **kwargs):
for func in self._before_request_funcs:
func()
server = self.server
uri, http_method, body, headers = extract_params()
valid, req = server.verify_request(
uri, http_method, body, headers, scopes
)
for func in self._after_request_funcs:
valid, req = func(valid, req)
if not valid:
return abort(403)
return f(*((req,) + args), **kwargs)
return decorated
return wrapper
class OAuth2RequestValidator(RequestValidator):
"""Subclass of Request Validator.
:param clientgetter: a function to get client object
:param tokengetter: a function to get bearer token
:param tokensetter: a function to save bearer token
:param grantgetter: a function to get grant token
:param grantsetter: a function to save grant token
"""
def __init__(self, clientgetter, tokengetter, grantgetter,
usergetter=None, tokensetter=None, grantsetter=None):
self._clientgetter = clientgetter
self._tokengetter = tokengetter
self._usergetter = usergetter
self._tokensetter = tokensetter
self._grantgetter = grantgetter
self._grantsetter = grantsetter
def client_authentication_required(self, request, *args, **kwargs):
return request.grant_type in ('password', 'refresh_token')
def authenticate_client(self, request, *args, **kwargs):
"""Authenticate itself in other means.
Other means means is described in `Section 3.2.1`_.
.. _`Section 3.2.1`: http://tools.ietf.org/html/rfc6749#section-3.2.1
"""
auth = request.headers.get('Authorization', None)
log.debug('Authenticate client %r', auth)
if auth:
try:
_, s = auth.split(' ')
client_id, client_secret = decode_base64(s).split(':')
client_id = to_unicode(client_id, 'utf-8')
client_secret = to_unicode(client_secret, 'utf-8')
except Exception as e:
log.debug('Authenticate client failed with exception: %r', e)
return False
else:
client_id = request.client_id
client_secret = request.client_secret
client = self._clientgetter(client_id)
if not client:
log.debug('Authenticate client failed, client not found.')
return False
request.client = client
if client.client_secret != client_secret:
log.debug('Authenticate client failed, secret not match.')
return False
if client.client_type != 'confidential':
log.debug('Authenticate client failed, not confidential.')
return False
log.debug('Authenticate client success.')
return True
def authenticate_client_id(self, client_id, request, *args, **kwargs):
"""Authenticate a non-confidential client.
:param client_id: Client ID of the non-confidential client
:param request: The Request object passed by oauthlib
"""
log.debug('Authenticate client %r.', client_id)
client = request.client or self._clientgetter(client_id)
if not client:
log.debug('Authenticate failed, client not found.')
return False
# attach client on request for convenience
request.client = client
# authenticate non-confidential client_type only
# most of the clients are of public client_type
if client.client_type == 'confidential':
log.debug('Authenticate client failed, confidential client.')
return False
return True
def confirm_redirect_uri(self, client_id, code, redirect_uri, client,
*args, **kwargs):
"""Ensure client is authorized to redirect to the redirect_uri.
This method is used in the authorization code grant flow. It will
compare redirect_uri and the one in grant token strictly, you can
add a `validate_redirect_uri` function on grant for a customized
validation.
"""
log.debug('Confirm redirect uri for client %r and code %r.',
client_id, code)
grant = self._grantgetter(client_id=client_id, code=code)
if not grant:
log.debug('Grant not found.')
return False
if hasattr(grant, 'validate_redirect_uri'):
return grant.validate_redirect_uri(redirect_uri)
log.debug('Compare redirect uri for grant %r and %r.',
grant.redirect_uri, redirect_uri)
if os.environ.get('DEBUG') and redirect_uri is None:
# For testing
return True
return grant.redirect_uri == redirect_uri
def get_original_scopes(self, refresh_token, request, *args, **kwargs):
"""Get the list of scopes associated with the refresh token.
This method is used in the refresh token grant flow. We return
the scope of the token to be refreshed so it can be applied to the
new access token.
"""
log.debug('Obtaining scope of refreshed token.')
tok = self._tokengetter(refresh_token=refresh_token)
return tok.scopes
def confirm_scopes(self, refresh_token, scopes, request, *args, **kwargs):
"""Ensures the requested scope matches the scope originally granted
by the resource owner. If the scope is omitted it is treated as equal
to the scope originally granted by the resource owner.
DEPRECATION NOTE: This method will cease to be used in oauthlib>0.4.2,
future versions of ``oauthlib`` use the validator method
``get_original_scopes`` to determine the scope of the refreshed token.
"""
if not scopes:
log.debug('Scope omitted for refresh token %r', refresh_token)
return True
log.debug('Confirm scopes %r for refresh token %r',
scopes, refresh_token)
tok = self._tokengetter(refresh_token=refresh_token)
return set(tok.scopes) == set(scopes)
def get_default_redirect_uri(self, client_id, request, *args, **kwargs):
"""Default redirect_uri for the given client."""
request.client = request.client or self._clientgetter(client_id)
redirect_uri = request.client.default_redirect_uri
log.debug('Found default redirect uri %r', redirect_uri)
return redirect_uri
def get_default_scopes(self, client_id, request, *args, **kwargs):
"""Default scopes for the given client."""
request.client = request.client or self._clientgetter(client_id)
scopes = request.client.default_scopes
log.debug('Found default scopes %r', scopes)
return scopes
def invalidate_authorization_code(self, client_id, code, request,
*args, **kwargs):
"""Invalidate an authorization code after use.
We keep the temporary code in a grant, which has a `delete`
function to destroy itself.
"""
log.debug('Destroy grant token for client %r, %r', client_id, code)
grant = self._grantgetter(client_id=client_id, code=code)
if grant:
grant.delete()
def save_authorization_code(self, client_id, code, request,
*args, **kwargs):
"""Persist the authorization code."""
log.debug(
'Persist authorization code %r for client %r',
code, client_id
)
request.client = request.client or self._clientgetter(client_id)
self._grantsetter(client_id, code, request, *args, **kwargs)
return request.client.default_redirect_uri
def save_bearer_token(self, token, request, *args, **kwargs):
"""Persist the Bearer token."""
log.debug('Save bearer token %r', token)
self._tokensetter(token, request, *args, **kwargs)
return request.client.default_redirect_uri
def validate_bearer_token(self, token, scopes, request):
"""Validate access token.
:param token: A string of random characters
:param scopes: A list of scopes
:param request: The Request object passed by oauthlib
The validation validates:
1) if the token is available
2) if the token has expired
3) if the scopes are available
"""
log.debug('Validate bearer token %r', token)
tok = self._tokengetter(access_token=token)
if not tok:
log.debug('Bearer token not found.')
return False
# validate expires
if datetime.datetime.utcnow() > tok.expires:
log.debug('Bearer token is expired.')
return False
# validate scopes
if not set(tok.scopes).issuperset(set(scopes)):
log.debug('Bearer token scope not valid.')
return False
request.access_token = tok
request.user = tok.user
request.scopes = scopes
if hasattr(tok, 'client'):
request.client = tok.client
elif hasattr(tok, 'client_id'):
request.client = self._clientgetter(tok.client_id)
return True
def validate_client_id(self, client_id, request, *args, **kwargs):
"""Ensure client_id belong to a valid and active client."""
log.debug('Validate client %r', client_id)
client = request.client or self._clientgetter(client_id)
if client:
# attach client to request object
request.client = client
return True
return False
def validate_code(self, client_id, code, client, request, *args, **kwargs):
"""Ensure the grant code is valid."""
log.debug(
'Validate code for client %r and code %r', client_id, code
)
grant = self._grantgetter(client_id=client_id, code=code)
if not grant:
log.debug('Grant not found.')
return False
if hasattr(grant, 'expires') and \
datetime.datetime.utcnow() > grant.expires:
log.debug('Grant is expired.')
return False
request.state = kwargs.get('state')
request.user = grant.user
request.scopes = grant.scopes
return True
def validate_grant_type(self, client_id, grant_type, client, request,
*args, **kwargs):
"""Ensure the client is authorized to use the grant type requested.
It will allow any of the four grant types (`authorization_code`,
`password`, `client_credentials`, `refresh_token`) by default.
Implemented `allowed_grant_types` for client object to authorize
the request.
It is suggested that `allowed_grant_types` should contain at least
`authorization_code` and `refresh_token`.
"""
if self._usergetter is None and grant_type == 'password':
log.debug('Password credential authorization is disabled.')
return False
if grant_type not in ('authorization_code', 'password',
'client_credentials', 'refresh_token'):
return False
if hasattr(client, 'allowed_grant_types'):
return grant_type in client.allowed_grant_types
if grant_type == 'client_credentials':
if hasattr(client, 'user'):
request.user = client.user
return True
log.debug('Client should has a user property')
return False
return True
def validate_redirect_uri(self, client_id, redirect_uri, request,
*args, **kwargs):
"""Ensure client is authorized to redirect to the redirect_uri.
This method is used in the authorization code grant flow and also
in implicit grant flow. It will detect if redirect_uri in client's
redirect_uris strictly, you can add a `validate_redirect_uri`
function on grant for a customized validation.
"""
request.client = request.client = self._clientgetter(client_id)
client = request.client
if hasattr(client, 'validate_redirect_uri'):
return client.validate_redirect_uri(redirect_uri)
return redirect_uri in client.redirect_uris
def validate_refresh_token(self, refresh_token, client, request,
*args, **kwargs):
"""Ensure the token is valid and belongs to the client
This method is used by the authorization code grant indirectly by
issuing refresh tokens, resource owner password credentials grant
(also indirectly) and the refresh token grant.
"""
token = self._tokengetter(refresh_token=refresh_token)
if token and token.client_id == client.client_id:
# Make sure the request object contains user and client_id
request.client_id = token.client_id
request.user = token.user
return True
return False
def validate_response_type(self, client_id, response_type, client, request,
*args, **kwargs):
"""Ensure client is authorized to use the response type requested.
It will allow any of the two (`code`, `token`) response types by
default. Implemented `allowed_response_types` for client object
to authorize the request.
"""
if response_type not in ('code', 'token'):
return False
if hasattr(client, 'allowed_response_types'):
return response_type in client.allowed_response_types
return True
def validate_scopes(self, client_id, scopes, client, request,
*args, **kwargs):
"""Ensure the client is authorized access to requested scopes."""
if hasattr(client, 'validate_scopes'):
return client.validate_scopes(scopes)
return set(client.default_scopes).issuperset(set(scopes))
def validate_user(self, username, password, client, request,
*args, **kwargs):
"""Ensure the username and password is valid.
Attach user object on request for later using.
"""
log.debug('Validating username %r and password %r',
username, password)
if self._usergetter is not None:
user = self._usergetter(
username, password, client, request, *args, **kwargs
)
if user:
request.user = user
return True
return False
log.debug('Password credential authorization is disabled.')
return False
| StarcoderdataPython |
161875 | <reponame>ScottehMax/pyshowdown
import asyncio
import configparser
import importlib
import os
import ssl
import sys
from http.cookies import SimpleCookie
from typing import Optional, List, Dict, TYPE_CHECKING
import aiohttp
from pyshowdown import connection, message
if TYPE_CHECKING:
from pyshowdown.plugins.plugin import BasePlugin
from pyshowdown.room import Room
class Client:
def __init__(
self,
host: str,
port: int,
path: str,
ssl_context: Optional[ssl.SSLContext] = None,
):
"""Client class constructor.
Args:
host (str): Hostname or IP address of the server.
port (int): The port number of the server.
path (str): The path to the server.
ssl_context (ssl.SSLContext, optional): The SSL context. Defaults to None.
"""
self.conn = connection.Connection(host, port, path, ssl_context=ssl_context)
self.connected = False
self.cookies: Optional[SimpleCookie[str]] = None
self.load_config()
self.plugins: List["BasePlugin"] = []
self.load_plugins()
self.rooms: Dict[str, "Room"] = {}
def load_config(self) -> None:
"""Load config from config.ini."""
self.config = configparser.ConfigParser()
self.config.read("config.ini")
self.username = self.config["user"]["username"]
self.password = self.config["user"]["password"]
self.plugin_dir = self.config["user"].get("plugin_dir", "system")
self.plugin_list = self.config["user"].get("plugins").split(",")
async def connect(self) -> None:
"""Connect to the server."""
await self.conn.connect()
async def keep_connected(self) -> None:
"""Keeps the client connected to the server."""
self.connected = False
timeout = 1
while not self.connected:
try:
await asyncio.sleep(timeout)
await self.connect()
self.connected = True
timeout = 1
await self.receive_forever()
except Exception as e:
print(e)
timeout += 1
async def close(self) -> None:
"""Close the connection."""
await self.conn.close()
async def send(self, room: str, message: str) -> None:
"""Sends message to the server.
Args:
message (str): The message to send.
"""
m = f"{room}|{message}"
print(">> " + m)
await self.conn.send(m)
async def send_pm(self, user: str, message: str) -> None:
"""Sends a private message to the user.
Args:
user (str): The user to send the message to.
message (str): The message to send.
"""
await self.send("", f"/w {user}, {message}")
async def receive(self) -> aiohttp.WSMessage:
"""Receives data from the server.
Returns:
aiohttp.WSMessage: The data received.
"""
return await self.conn.receive()
async def receive_forever(self) -> None:
"""Receives data from the server forever.
Raises:
ConnectionError: If no connection is established.
"""
if self.conn.ws is None:
raise ConnectionError("Not connected to server.")
async for ws_message in self.conn.ws:
if ws_message.type == aiohttp.WSMsgType.TEXT:
message: str = ws_message.data
if message:
# some messages are actually multiple messages
# separated by a newline
messages = message.split("\n")
if messages and messages[0] and messages[0][0] == ">":
room = messages.pop(0)[1:]
else:
room = ""
for single_message in messages:
if single_message:
await self.handle_message(room, single_message)
self.connected = False
def load_plugins(self) -> None:
"""Loads all the plugins from the directory set in config.
It should first import them, then instantiate the class
which is a subclass of BasePlugin.
"""
print("Loading plugins...")
# always load the system plugins
sys.path.append(os.path.join(os.path.dirname(__file__), "plugins"))
if self.plugin_dir != "system":
sys.path.append(self.plugin_dir)
for plugin_name in self.plugin_list:
try:
plugin_module = importlib.import_module(plugin_name)
plugins = plugin_module.setup(self)
for plugin in plugins:
self.plugins.append(plugin)
except Exception as e:
print("Error loading plugin {}: {}".format(plugin_name, e))
async def handle_message(self, room: str, msg_str: str) -> None:
"""Handles a message from the server.
Iterates through all the loaded plugins, determines whether
any of them can handle the message, and if so, calls the
response method of the plugin.
Args:
room (str): The room the message was sent from.
msg_str (str): The message received.
"""
print("<< " + msg_str)
m = message.Message(room, msg_str)
for plugin in self.plugins:
matched = await plugin.match(m)
if matched:
resp = await plugin.response(m)
if resp:
if m.type == "pm":
await self.send_pm(m.sender, resp)
else:
await self.send(m.room, resp)
async def join(self, room: str) -> None:
"""Joins the given room.
Args:
room (str): The room to join.
"""
await self.send("", f"/join {room}")
async def leave(self, room: str) -> None:
"""Leaves the given room.
Args:
room (str): The room to leave.
"""
await self.send(room, "/leave")
def __str__(self) -> str:
"""Returns a string representation of the client.
Returns:
str: The string representation of the client.
"""
return "Client({}, {}, {})".format(
self.conn.host, self.conn.port, self.conn.path
)
def __repr__(self) -> str:
"""Returns a representation of the client.
Returns:
str: The representation of the client.
"""
return self.__str__()
| StarcoderdataPython |
11329156 | <filename>alab_management/__init__.py
"""
Managing everything in the autonomous lab.
"""
__version__ = "0.4.1"
from .device_view.device import BaseDevice, add_device
from .sample_view import Sample, SamplePosition
from .task_view.task import BaseTask, add_task
from .utils.module_ops import import_task_definitions, import_device_definitions
| StarcoderdataPython |
208679 | <filename>qtt/__init__.py
""" Quantum Technology Toolbox
The QTT package contains functionality for the tuning and calibration of spin-qubits. The package is
divided into subpacakges:
- Measurements: functionality to perform measurements on devices
- Algorithms: functionality to analyse measurements
- Simulation: contains simulations of quantom dot systems
- Tools: misc tools
- Gui: Several gui element for visualization of data
- Instrument drivers: contains QCoDeS drivers for various instruments
For more information see https://github.com/qutech-delft/qtt
"""
# flake8: noqa (we don't need the "<...> imported but unused" error)
import copy
import warnings
import importlib
import distutils
import distutils.version
import qcodes
import qtt.utilities.tools
import qtt.data
import qtt.algorithms
import qtt.measurements
import qtt.exceptions
from qtt.version import __version__
from qtt.measurements.storage import save_state, load_state
try:
import pyqtgraph
import qtt.gui.live_plotting
import qtt.gui.parameterviewer
from qtt.gui.parameterviewer import createParameterWidget
from qtt.gui.dataviewer import DataViewer
except ImportError:
# no gui available
warnings.warn('pyqtgraph could not be imported, gui elements not available')
#%% Check packages
def check_version(version, module=qcodes, optional = False, install_message=None):
""" Check whether a module has the corret version """
if isinstance(module, str):
try:
m = importlib.import_module(module)
module = m
except ModuleNotFoundError:
if optional:
warnings.warn('optional package %s is not available' % module, qtt.exceptions.MissingOptionalPackageWarning)
return
else:
if install_message is not None:
print(install_message)
raise Exception('could not load module %s' % module)
mversion = getattr(module, '__version__', None)
if mversion is None:
raise Exception(' module %s has no __version__ attribute' % (module,))
if distutils.version.StrictVersion(mversion) < distutils.version.StrictVersion(version):
if optional:
warnings.warn('package %s has incorrect version' % module, qtt.exceptions.PackageVersionWarning)
else:
raise Exception(' from %s need version %s (version is %s)' % (module, version, mversion))
# we make an explicit check on versions, since people often upgrade their installation without upgrading the required packages
check_version('1.0', 'qtpy')
check_version('0.18', 'scipy')
check_version('0.1', 'colorama')
check_version('0.1', 'redis', optional=True)
check_version('0.1.10', qcodes) # version of qcodes required
check_version('3.0', 'Polygon', install_message= "use command 'pip install Polygon3' to install the package")
#%% Load often used constructions
from qtt.gui.live_plotting import start_measurement_control
@qtt.utilities.tools.rdeprecated(expire='Aug 1 2018')
def start_dataviewer():
from qtt.gui.dataviewer import DataViewer
dv = DataViewer()
dv.show()
return dv
#%% Add hook to abort measurement
# connect to redis server
_redis_connection = None
try:
import redis
_redis_connection = redis.Redis(host='127.0.0.1', port=6379)
_redis_connection.set('qtt_abort_running_measurement', 0)
except:
_redis_connection = None
def _abort_measurement(value=None):
""" Return True if the currently running measurement should be aborted """
if _redis_connection is None:
return 0
if value is not None:
_redis_connection.set('qtt_abort_running_measurement', value)
v = _redis_connection.get('qtt_abort_running_measurement')
if v is None:
v = 0
return int(v)
def reset_abort(value = 0):
""" reset qtt_abort_running_measurement """
_redis_connection.set('qtt_abort_running_measurement', value)
def _redisStrValue(var = 'qtt_live_value1'):
""" Return live control value retrieved from redis server
and convert to string """
if _redis_connection is None:
return 0
v = _redis_connection.get(var)
return v.decode('utf-8')
def _redisStrSet(value, var = 'qtt_live_value1'):
""" Set live control value on redis server """
_redis_connection.set(var, value)
liveValue = _redisStrValue
liveValueSet = _redisStrSet
abort_measurements = _abort_measurement
# patch the qcodes abort function
qcodes.loops.abort_measurements = _abort_measurement
qtt._dummy_mc = []
#%% Override default location formatter
from qcodes.data.location import FormatLocation
FormatLocation.default_fmt = '{date}/{time}_{name}_{label}'
qcodes.DataSet.location_provider = FormatLocation(
fmt='{date}/{time}_{name}_{label}', record={'name': 'qtt', 'label': 'generic'})
def set_location_name(name, verbose=1):
if verbose:
print('setting location name tag to %s' % name)
qcodes.DataSet.location_provider.base_record['name'] = name
#%%
def _copy_to_str(x, memo):
return str(x)
# black magic to make qcodes objects work with deepcopy
from qcodes import Parameter, Instrument, StandardParameter, ManualParameter, Station
for c in [Parameter, Instrument, StandardParameter, ManualParameter, Station]:
copy._deepcopy_dispatch[c] = _copy_to_str
# make a qcodes instrument pickable
qcodes.Instrument.__getstate__ = lambda self: str(self)
qcodes.Parameter.__getstate__ = lambda self: str(self)
def _setstate(self, d):
self.name = d
self._instrument = None
def _get():
print('instrument %s was serialized, no get available' % self.name)
raise Exception('no get function defined')
self.get = _get
qcodes.Instrument.__setstate__ = _setstate
qcodes.Parameter.__setstate__ = _setstate
#%% Enhance the qcodes functionality
try:
from qtpy.QtCore import Qt
from qtpy import QtWidgets
from qcodes.plots.pyqtgraph import QtPlot
def _qtt_keyPressEvent(self, e):
''' Patch to add a callback to the QtPlot figure window '''
if e.key() == Qt.Key_P:
print('key P pressed: copy figure window to powerpoint')
qtt.utilities.tools.addPPTslide(fig=self)
super(QtPlot, self).keyPressEvent(e)
# update the keypress callback function
QtPlot.keyPressEvent = _qtt_keyPressEvent
except:
pass
#%% Enhance the qcodes functionality
try:
import pyqtgraph as pg
def _copyToClipboard(self):
''' Copy the current image to a the system clipboard '''
app = pg.mkQApp()
clipboard = app.clipboard()
clipboard.setPixmap(self.win.grab())
QtPlot.copyToClipboard = _copyToClipboard
except:
pass
| StarcoderdataPython |
12185 | if __name__ == "__main__":
print("Nothing yet...")
| StarcoderdataPython |
364773 | <reponame>ltowarek/budget-supervisor
# coding: utf-8
"""
Salt Edge Account Information API
API Reference for services # noqa: E501
OpenAPI spec version: 5.0.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Merchant(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'names': 'list[MerchantNames]',
'address': 'MerchantAddress',
'contact': 'list[MerchantContact]'
}
attribute_map = {
'id': 'id',
'names': 'names',
'address': 'address',
'contact': 'contact'
}
def __init__(self, id=None, names=None, address=None, contact=None): # noqa: E501
"""Merchant - a model defined in Swagger""" # noqa: E501
self._id = None
self._names = None
self._address = None
self._contact = None
self.discriminator = None
self.id = id
self.names = names
self.address = address
self.contact = contact
@property
def id(self):
"""Gets the id of this Merchant. # noqa: E501
the `id` of the merchant # noqa: E501
:return: The id of this Merchant. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Merchant.
the `id` of the merchant # noqa: E501
:param id: The id of this Merchant. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def names(self):
"""Gets the names of this Merchant. # noqa: E501
merchant names that are used to name a company, corporation, brand name, franchise name or any other entity who is participating in the customer's transaction. # noqa: E501
:return: The names of this Merchant. # noqa: E501
:rtype: list[MerchantNames]
"""
return self._names
@names.setter
def names(self, names):
"""Sets the names of this Merchant.
merchant names that are used to name a company, corporation, brand name, franchise name or any other entity who is participating in the customer's transaction. # noqa: E501
:param names: The names of this Merchant. # noqa: E501
:type: list[MerchantNames]
"""
if names is None:
raise ValueError("Invalid value for `names`, must not be `None`") # noqa: E501
self._names = names
@property
def address(self):
"""Gets the address of this Merchant. # noqa: E501
:return: The address of this Merchant. # noqa: E501
:rtype: MerchantAddress
"""
return self._address
@address.setter
def address(self, address):
"""Sets the address of this Merchant.
:param address: The address of this Merchant. # noqa: E501
:type: MerchantAddress
"""
if address is None:
raise ValueError("Invalid value for `address`, must not be `None`") # noqa: E501
self._address = address
@property
def contact(self):
"""Gets the contact of this Merchant. # noqa: E501
contact information via which the merchant can be accessed, eg. via website, phone or social media # noqa: E501
:return: The contact of this Merchant. # noqa: E501
:rtype: list[MerchantContact]
"""
return self._contact
@contact.setter
def contact(self, contact):
"""Sets the contact of this Merchant.
contact information via which the merchant can be accessed, eg. via website, phone or social media # noqa: E501
:param contact: The contact of this Merchant. # noqa: E501
:type: list[MerchantContact]
"""
if contact is None:
raise ValueError("Invalid value for `contact`, must not be `None`") # noqa: E501
self._contact = contact
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Merchant, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Merchant):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| StarcoderdataPython |
8189911 | <filename>hic3defdr/analysis/alternatives.py<gh_stars>0
"""
Experimental module exposing variants of the HiC3DeFDR model for benchmarking
purposes.
"""
import numpy as np
import scipy.stats as stats
from hic3defdr.analysis import HiC3DeFDR
from hic3defdr.util.printing import eprint
from hic3defdr.util.dispersion import mme_per_pixel
from hic3defdr.util.clusters import load_clusters
from hic3defdr.util.parallelization import parallel_apply
import hic3defdr.util.dispersion as dispersion
def poisson_fit_mu_hat(raw, f):
return np.average(raw / f, weights=f, axis=1)
def poisson_logpmf(x, mu):
return stats.poisson(mu).logpmf(x)
def poisson_lrt(raw, f, design, refit_mu=True):
if refit_mu:
mu_hat_null = poisson_fit_mu_hat(raw, f)
mu_hat_alt = np.array(
[poisson_fit_mu_hat(raw[:, design[:, c]], f[:, design[:, c]])
for c in range(design.shape[1])]
).T
else:
mu_hat_null = np.mean(raw / f, axis=1)
mu_hat_alt = np.array(
[np.mean(raw[:, design[:, c]] / f[:, design[:, c]], axis=1)
for c in range(design.shape[1])])
mu_hat_alt_wide = np.dot(mu_hat_alt, design.T)
null_ll = np.sum(poisson_logpmf(raw, mu_hat_null[:, None] * f), axis=1)
alt_ll = np.sum(poisson_logpmf(raw, mu_hat_alt_wide * f), axis=1)
llr = null_ll - alt_ll
pvalues = stats.chi2(design.shape[1] - 1).sf(-2 * llr)
return pvalues, llr, mu_hat_null, mu_hat_alt
class Poisson3DeFDR(HiC3DeFDR):
def estimate_disp(self, estimator='qcml', frac=None, auto_frac_factor=15.,
weighted_lowess=True, n_threads=-1):
# note: all kwargs are ignored
eprint('estimating dispersion')
estimator = dispersion.__dict__[estimator] \
if estimator in dispersion.__dict__ else estimator
eprint(' loading data')
disp_idx, _ = self.load_data('disp_idx', 'all')
row, offsets = self.load_data('row', 'all', idx=disp_idx)
col, _ = self.load_data('col', 'all', idx=disp_idx)
scaled, _ = self.load_data('scaled', 'all', idx=disp_idx)
eprint(' computing pixel-wise mean per condition')
disp_per_dist = np.zeros((self.dist_thresh_max+1, self.design.shape[1]))
disp = np.zeros((disp_idx.sum(), self.design.shape[1]))
def disp_fn(mean):
return np.zeros_like(mean)
for c, cond in enumerate(self.design.columns):
self.save_disp_fn(cond, disp_fn)
eprint(' saving estimated dispersions to disk')
self.save_data(disp, 'disp', offsets)
self.save_data(disp_per_dist, 'disp_per_dist')
def lrt(self, chrom=None, refit_mu=True, n_threads=-1, verbose=True):
if chrom is None:
if n_threads:
parallel_apply(
self.lrt,
[{'chrom': c, 'refit_mu': refit_mu, 'verbose': False}
for c in self.chroms],
n_threads=n_threads
)
else:
for chrom in self.chroms:
self.lrt(chrom=chrom, refit_mu=refit_mu)
return
eprint('running LRT for chrom %s' % chrom)
eprint(' loading data', skip=not verbose)
bias = self.load_bias(chrom)
size_factors = self.load_data('size_factors', chrom)
row = self.load_data('row', chrom)
col = self.load_data('col', chrom)
raw = self.load_data('raw', chrom)
disp_idx = self.load_data('disp_idx', chrom)
eprint(' computing LRT results', skip=not verbose)
f = bias[row, :][disp_idx, :] * bias[col, :][disp_idx, :] * \
size_factors[disp_idx, :]
pvalues, llr, mu_hat_null, mu_hat_alt = poisson_lrt(
raw[disp_idx, :], f, self.design.values, refit_mu=True)
if self.loop_patterns:
eprint(' making loop_idx', skip=not verbose)
loop_pixels = set().union(
*sum((load_clusters(pattern.replace('<chrom>', chrom))
for pattern in self.loop_patterns.values()), []))
loop_idx = np.array([True if pixel in loop_pixels else False
for pixel in zip(row[disp_idx],
col[disp_idx])])
self.save_data(loop_idx, 'loop_idx', chrom)
eprint(' saving results to disk', skip=not verbose)
self.save_data(pvalues, 'pvalues', chrom)
self.save_data(llr, 'llr', chrom)
self.save_data(mu_hat_null, 'mu_hat_null', chrom)
self.save_data(mu_hat_alt, 'mu_hat_alt', chrom)
class Unsmoothed3DeFDR(HiC3DeFDR):
def estimate_disp(self, estimator='qcml', frac=None, auto_frac_factor=15.,
weighted_lowess=True, n_threads=-1):
# note: all kwargs are ignored
eprint('estimating dispersion')
eprint(' loading data')
disp_idx, _ = self.load_data('disp_idx', 'all')
row, offsets = self.load_data('row', 'all', idx=disp_idx)
col, _ = self.load_data('col', 'all', idx=disp_idx)
scaled, _ = self.load_data('scaled', 'all', idx=disp_idx)
eprint(' computing pixel-wise mean per condition')
disp = np.zeros((disp_idx.sum(), self.design.shape[1]))
for c, cond in enumerate(self.design.columns):
eprint(' estimating dispersion for condition %s' % cond)
disp[:, c] = np.maximum(mme_per_pixel(
scaled[:, self.design[cond]]), 1e-7)
eprint(' saving estimated dispersions to disk')
self.save_data(disp, 'disp', offsets)
class Global3DeFDR(HiC3DeFDR):
def estimate_disp(self, estimator='qcml', frac=None, auto_frac_factor=15.,
weighted_lowess=True, n_threads=-1):
# note: all kwargs except estimator are ignored
eprint('estimating dispersion')
estimator = dispersion.__dict__[estimator] \
if estimator in dispersion.__dict__ else estimator
eprint(' loading data')
disp_idx, disp_idx_offsets = self.load_data('disp_idx', 'all')
loop_idx, _ = self.load_data('loop_idx', 'all')
row, offsets = self.load_data('row', 'all', idx=disp_idx)
col, _ = self.load_data('col', 'all', idx=disp_idx)
raw, _ = self.load_data('raw', 'all', idx=disp_idx)
f = np.ones_like(raw, dtype=float)
for i, chrom in enumerate(self.chroms):
chrom_slice = slice(offsets[i], offsets[i+1])
row_chrom = row[chrom_slice]
col_chrom = col[chrom_slice]
disp_idx_chrom = disp_idx[disp_idx_offsets[i]:disp_idx_offsets[i+1]]
bias = self.load_bias(chrom)
size_factors = self.load_data('size_factors', chrom)[disp_idx_chrom]
f[chrom_slice] = bias[row_chrom, :] * bias[col_chrom, :] \
* size_factors
disp = np.zeros((disp_idx.sum(), self.design.shape[1]))
disp_per_dist = np.zeros((self.dist_thresh_max+1, self.design.shape[1]))
for c, cond in enumerate(self.design.columns):
eprint(' estimating dispersion for condition %s' % cond)
global_disp = estimator(raw[loop_idx, :][:, self.design[cond]],
f=f[loop_idx, :][:, self.design[cond]])
disp[:, c] = global_disp
disp_per_dist[:, c] = global_disp
def disp_fn(mean):
return np.ones_like(mean) * global_disp
self.save_disp_fn(cond, disp_fn)
eprint(' saving estimated dispersions to disk')
self.save_data(disp, 'disp', offsets)
self.save_data(disp_per_dist, 'disp_per_dist')
| StarcoderdataPython |
228653 | <filename>setup.py
import setuptools
with open('README.md', mode='r') as fh:
long_description = fh.read()
setuptools.setup(
name='dicom-factory',
version='0.0.4',
author='<NAME>',
author_email='<EMAIL>',
description='DICOM data generator for (mainly) testing purposes',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/jdecid/DICOM-Factory',
packages=setuptools.find_packages(),
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
python_requires='>3'
)
| StarcoderdataPython |
5163194 | <reponame>pauliacomi/adsutils
"""Define and perform conversions between different units used."""
from pygaps.utilities.exceptions import ParameterError
_MOLAR_UNITS = {
"mmol": 0.001,
"mol": 1,
"kmol": 1000,
"cm3(STP)": 4.461e-5,
"ml(STP)": 4.461e-5,
}
_MASS_UNITS = {
'amu': 1.66054e-27,
'mg': 0.001,
'cg': 0.01,
'dg': 0.1,
'g': 1,
'kg': 1000,
}
_VOLUME_UNITS = {
'cm3': 1,
'mL': 1,
'dm3': 1e3,
'L': 1e3,
'm3': 1e6,
}
_PRESSURE_UNITS = {
"Pa": 1,
"kPa": 1000,
"MPa": 1000000,
"mbar": 100,
"bar": 100000,
"atm": 101325,
"mmHg": 133.322,
"torr": 133.322,
}
_TEMPERATURE_UNITS = {
"K": -273.15,
"°C": 273.15,
}
def _check_unit(unit, units, utype):
if not unit:
raise ParameterError("Specify units to convert.")
if unit not in units:
raise ParameterError(
f"Unit selected for {utype} ({unit}) is not an option. "
f"Viable units are {list(units.keys())}"
)
def c_unit(unit_list, value, unit_from, unit_to, sign=1):
"""
Convert units based on their proportions in a dictionary.
Parameters
----------
unit_list : dict
The dictionary with the units and their relationship.
value : dict
The value to convert.
unit_from : str
Unit from which to convert.
unit_from : str
Unit to which to convert.
sign : int
If the conversion is inverted or not.
Returns
-------
float
Value converted as requested.
Raises
------
``ParameterError``
If the unit selected is not an option.
"""
_check_unit(unit_to, unit_list, 'conversion')
_check_unit(unit_from, unit_list, 'conversion')
return value * \
(unit_list[unit_from] / unit_list[unit_to]) ** sign
| StarcoderdataPython |
4827278 | #! /usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
# Copyright 2021 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""Add RTP task jobid entry to the M&C database with a start_time of "now".
This script can be used for either single obsid tasks or multiple obsid tasks. For
multiple obsid tasks, pass the list of obsids to the `--obsid_list` parameter.
This script adds entries with a start_time of "now" meaning that the job was just
started. To update the table with past times, use the appropriate methods on the
MCSession object.
"""
from astropy.time import Time
import hera_mc.mc as mc
import hera_mc.utils as mcutils
if __name__ == "__main__":
parser = mc.get_mc_argument_parser()
parser.add_argument(
"filename",
type=str,
help=(
"file processed by RTP corresponding to obsid, or obsid_start for "
"multiple obsid tasks."
),
)
parser.add_argument(
"task_name", type=str, help="RTP task name"
)
parser.add_argument(
"job_id", type=int, help="Slurm Job ID of the RTP task."
)
parser.add_argument(
"--file_list",
dest="file_list",
nargs='+',
type=str,
default=None,
help="List of files included in this task, only used for multiple obsid tasks. "
"Will add entries to the `rtp_task_multiple_track` and "
"`rtp_task_multiple_resource_record` tables rather than to the "
"`rtp_task_jobid` table."
)
args = parser.parse_args()
# extract obsid from input file
obsid = mcutils.get_obsid_from_file(args.filename)
if args.file_list is not None:
# extract obsid for each file
obsid_list = []
for filename in args.file_list:
oid = mcutils.get_obsid_from_file(filename)
obsid_list.append(oid)
db = mc.connect_to_mc_db(args)
with db.sessionmaker() as session:
if args.file_list is not None:
for oid in obsid_list:
# check to see if this has already been added
rows = session.get_rtp_task_multiple_track(
obsid_start=obsid, task_name=args.task_name, obsid=oid
)
if len(rows) == 0:
# add the mapping
session.add_rtp_task_multiple_track(
obsid_start=obsid,
task_name=args.task_name,
obsid=oid,
)
session.add_rtp_task_multiple_jobid(
obsid_start=obsid,
task_name=args.task_name,
start_time=Time.now(),
job_id=args.job_id,
)
else:
session.add_rtp_task_jobid(
obsid=obsid,
task_name=args.task_name,
start_time=Time.now(),
job_id=args.job_id,
)
| StarcoderdataPython |
149555 | from enum import Enum
"""
AUTOR: <NAME>
"""
class EnvironmentMetric(Enum):
"""Enumeration of possible environment metrics in the cell matrix"""
EUCLIDEAN = 'Euclidean'
MANHATTAN = 'Manhattan'
| StarcoderdataPython |
11276327 | <reponame>shrev/mydig-webservice-new
# Memex cluster oozie url - http://10.1.94.54:11000/oozie
import requests
class OozieJobs(object):
def __init__(self, oozie_url='https://oozie.memexproxy.com/'):
self.oozie_url = oozie_url
def submit_oozie_jobs(self, property_dict):
oozie_url = self.oozie_url + "/v1/jobs?action=start"
# open files in binary mode
# config_xml = codecs.open('config.xml, 'r')
headers = {'Content-Type': 'application/xml'}
payload = OozieJobs.create_worfklow_xml(property_dict)
print payload
response = requests.post(oozie_url, data=payload, headers=headers)
return response
def manage_job(self, job_id, action):
# action can be 'start', 'suspend', 'resume', 'kill' and 'rerun'
# curl - i - X PUT "http://localhost:11000/oozie/v1/job/0000000-130524111605784-oozie-rkan-W?action=kill"
oozie_url = '{}/v1/job/{}?action={}'.format(self.oozie_url, job_id, action)
response = requests.put(oozie_url)
return response
@staticmethod
def append_property_toXML(XML, name, value):
XML += "<property><name>{}</name><value>{}</value></property>".format(name, value)
return XML
@staticmethod
def create_worfklow_xml(property_dict):
payload = "<?xml version=\"1.0\" encoding=\"UTF-8\"?><configuration>"
for key in property_dict.keys():
payload = OozieJobs.append_property_toXML(payload, key, property_dict[key])
payload += "</configuration>"
return payload
if __name__ == '__main__':
property_dict = dict()
property_dict["user.name"] = "skaraman"
property_dict["oozie.wf.application.path"] = "<hdfs_path>"
property_dict["jobTracker"] = "memex-rm.xdata.data-tactics-corp.com:8032"
property_dict["nameNode"] = "hdfs://memex"
property_dict["DAYTOPROCESS"] = "2017-04-02"
property_dict["TABLE_SHA1"] = "escorts_images_sha1_infos_ext_dev"
property_dict["TABLE_UPDATE"] = "escorts_images_updates_dev"
property_dict["ES_DOMAIN"] = "escorts"
oj = OozieJobs()
oj.submit_oozie_jobs(property_dict)
"""
Sample config.xml
<configuration>
<property>
<name>user.name</name>
<value>rkanter</value>
</property>
<property>
<name>oozie.wf.application.path</name>
<value>${nameNode}/user/${user.name}/${examplesRoot}/apps/no-op</value>
</property>
<property>
<name>queueName</name>
<value>default</value>
</property>
<property>
<name>nameNode</name>
<value>hdfs://localhost:8020</value>
</property>
<property>
<name>jobTracker</name>
<value>localhost:8021</value>
</property>
<property>
<name>examplesRoot</name>
<value>examples</value>
</property>
</configuration>
"""
| StarcoderdataPython |
1858415 | <filename>invest_natcap/sdr/sdr.py
"""InVEST Sediment Delivery Ratio (SDR) module"""
import os
import csv
import logging
from osgeo import gdal
from osgeo import ogr
import numpy
import pygeoprocessing.geoprocessing
import pygeoprocessing.routing
import pygeoprocessing.routing.routing_core
logging.basicConfig(format='%(asctime)s %(name)-20s %(levelname)-8s \
%(message)s', level=logging.DEBUG, datefmt='%m/%d/%Y %H:%M:%S ')
LOGGER = logging.getLogger('invest_natcap.sdr.sdr')
def execute(args):
"""This function invokes the SDR model given
URI inputs of files. It may write log, warning, or error messages to
stdout.
args - a python dictionary with at the following possible entries:
args['workspace_dir'] - a uri to the directory that will write output
and other temporary files during calculation. (required)
args['results_suffix'] - a string to append to any output file name (optional)
args['dem_uri'] - a uri to a digital elevation raster file (required)
args['erosivity_uri'] - a uri to an input raster describing the
rainfall eroisivity index (required)
args['erodibility_uri'] - a uri to an input raster describing soil
erodibility (required)
args['lulc_uri'] - a uri to a land use/land cover raster whose
LULC indexes correspond to indexs in the biophysical table input.
Used for determining soil retention and other biophysical
properties of the landscape. (required)
args['watersheds_uri'] - a uri to an input shapefile of the watersheds
of interest as polygons. (required)
args['biophysical_table_uri'] - a uri to an input CSV file with
biophysical information about each of the land use classes.
args['threshold_flow_accumulation'] - an integer describing the number
of upstream cells that must flow int a cell before it's considered
part of a stream. required if 'stream_uri' is not provided.
args['k_param'] - k calibration parameter (see user's guide for values)
args['sdr_max'] - the max value the SDR can be
args['ic_0_param'] - ic_0 calibration parameter (see user's guide for
values)
args['drainage_uri'] - An optional GIS raster dataset mask, that
indicates areas that drain to the watershed. Format is that 1's
indicate drainage areas and 0's or nodata indicate areas with no
additional drainage. This model is most accurate when the drainage
raster aligns with the DEM.
args['_prepare'] - (optional) The preprocessed set of data created by the
sdr._prepare call. This argument could be used in cases where the
call to this function is scripted and can save a significant amount
of runtime.
returns nothing."""
#append a _ to the suffix if it's not empty and doens't already have one
try:
file_suffix = args['results_suffix']
if file_suffix != "" and not file_suffix.startswith('_'):
file_suffix = '_' + file_suffix
except KeyError:
file_suffix = ''
csv_dict_reader = csv.DictReader(open(args['biophysical_table_uri'], 'rU'))
biophysical_table = {}
for row in csv_dict_reader:
biophysical_table[int(row['lucode'])] = row
#Test to see if c or p values are outside of 0..1
for table_key in ['usle_c', 'usle_p']:
for (lulc_code, table) in biophysical_table.iteritems():
try:
float_value = float(table[table_key])
if float_value < 0 or float_value > 1:
raise Exception(
'Value should be within range 0..1 offending value '
'table %s, lulc_code %s, value %s' % (
table_key, str(lulc_code), str(float_value)))
except ValueError as e:
raise Exception(
'Value is not a floating point value within range 0..1 '
'offending value table %s, lulc_code %s, value %s' % (
table_key, str(lulc_code), table[table_key]))
intermediate_dir = os.path.join(args['workspace_dir'], 'intermediate')
output_dir = os.path.join(args['workspace_dir'], 'output')
#Sets up the intermediate and output directory structure for the workspace
pygeoprocessing.geoprocessing.create_directories([output_dir, intermediate_dir])
#check if we've already prepared the DEM
if '_prepare' in args:
preprocessed_data = args['_prepare']
else:
preprocessed_data = _prepare(**args)
aligned_dem_uri = preprocessed_data['aligned_dem_uri']
aligned_erosivity_uri = preprocessed_data['aligned_erosivity_uri']
aligned_erodibility_uri = preprocessed_data['aligned_erodibility_uri']
thresholded_slope_uri = preprocessed_data['thresholded_slope_uri']
flow_accumulation_uri = preprocessed_data['flow_accumulation_uri']
flow_direction_uri = preprocessed_data['flow_direction_uri']
ls_uri = preprocessed_data['ls_uri']
#this section is to align the lulc with the prepared data, we need to make
#a garbage tempoary dem to conform to the align_dataset_list API that
#requires as many outputs as inputs
aligned_lulc_uri = os.path.join(intermediate_dir, 'aligned_lulc.tif')
out_pixel_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(
preprocessed_data['aligned_dem_uri'])
tmp_dem_uri = pygeoprocessing.geoprocessing.temporary_filename()
pygeoprocessing.geoprocessing.align_dataset_list(
[aligned_dem_uri, args['lulc_uri']], [tmp_dem_uri, aligned_lulc_uri],
['nearest'] * 2, out_pixel_size, 'dataset',
0, dataset_to_bound_index=0, aoi_uri=args['watersheds_uri'])
os.remove(tmp_dem_uri)
#classify streams from the flow accumulation raster
LOGGER.info("Classifying streams from flow accumulation raster")
stream_uri = os.path.join(intermediate_dir, 'stream%s.tif' % file_suffix)
pygeoprocessing.routing.stream_threshold(flow_accumulation_uri,
float(args['threshold_flow_accumulation']), stream_uri)
stream_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(stream_uri)
dem_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(args['dem_uri'])
if 'drainage_uri' in args and args['drainage_uri'] != '':
def add_drainage(stream, drainage):
return numpy.where(drainage == 1, 1, stream)
stream_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(stream_uri)
#add additional drainage to the stream
drainage_uri = os.path.join(output_dir, 'drainage%s.tif' % file_suffix)
pygeoprocessing.geoprocessing.vectorize_datasets(
[stream_uri, args['drainage_uri']], add_drainage, drainage_uri,
gdal.GDT_Byte, stream_nodata, out_pixel_size, "intersection",
dataset_to_align_index=0, vectorize_op=False)
stream_uri = drainage_uri
#Calculate the W factor
LOGGER.info('calculate per pixel W')
original_w_factor_uri = os.path.join(
intermediate_dir, 'w_factor%s.tif' % file_suffix)
thresholded_w_factor_uri = os.path.join(
intermediate_dir, 'thresholded_w_factor%s.tif' % file_suffix)
#map lulc to biophysical table
lulc_to_c = dict(
[(lulc_code, float(table['usle_c'])) for
(lulc_code, table) in biophysical_table.items()])
lulc_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(aligned_lulc_uri)
w_nodata = -1.0
pygeoprocessing.geoprocessing.reclassify_dataset_uri(
aligned_lulc_uri, lulc_to_c, original_w_factor_uri, gdal.GDT_Float64,
w_nodata, exception_flag='values_required')
def threshold_w(w_val):
'''Threshold w to 0.001'''
w_val_copy = w_val.copy()
nodata_mask = w_val == w_nodata
w_val_copy[w_val < 0.001] = 0.001
w_val_copy[nodata_mask] = w_nodata
return w_val_copy
pygeoprocessing.geoprocessing.vectorize_datasets(
[original_w_factor_uri], threshold_w, thresholded_w_factor_uri,
gdal.GDT_Float64, w_nodata, out_pixel_size, "intersection",
dataset_to_align_index=0, vectorize_op=False)
cp_factor_uri = os.path.join(
intermediate_dir, 'cp_factor%s.tif' % file_suffix)
lulc_to_cp = dict(
[(lulc_code, float(table['usle_c']) * float(table['usle_p'])) for
(lulc_code, table) in biophysical_table.items()])
cp_nodata = -1.0
pygeoprocessing.geoprocessing.reclassify_dataset_uri(
aligned_lulc_uri, lulc_to_cp, cp_factor_uri, gdal.GDT_Float64,
cp_nodata, exception_flag='values_required')
LOGGER.info('calculating rkls')
rkls_uri = os.path.join(output_dir, 'rkls%s.tif' % file_suffix)
calculate_rkls(
ls_uri, aligned_erosivity_uri, aligned_erodibility_uri,
stream_uri, rkls_uri)
LOGGER.info('calculating USLE')
usle_uri = os.path.join(output_dir, 'usle%s.tif' % file_suffix)
nodata_rkls = pygeoprocessing.geoprocessing.get_nodata_from_uri(rkls_uri)
nodata_cp = pygeoprocessing.geoprocessing.get_nodata_from_uri(cp_factor_uri)
nodata_usle = -1.0
def mult_rkls_cp(rkls, cp_factor, stream):
return numpy.where((rkls == nodata_rkls) | (cp_factor == nodata_cp),
nodata_usle, rkls * cp_factor * (1 - stream))
pygeoprocessing.geoprocessing.vectorize_datasets(
[rkls_uri, cp_factor_uri, stream_uri], mult_rkls_cp, usle_uri,
gdal.GDT_Float64, nodata_usle, out_pixel_size, "intersection",
dataset_to_align_index=0, aoi_uri=args['watersheds_uri'],
vectorize_op=False)
#calculate W_bar
zero_absorption_source_uri = pygeoprocessing.geoprocessing.temporary_filename()
loss_uri = pygeoprocessing.geoprocessing.temporary_filename()
#need this for low level route_flux function
pygeoprocessing.geoprocessing.make_constant_raster_from_base_uri(
aligned_dem_uri, 0.0, zero_absorption_source_uri)
flow_accumulation_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(
flow_accumulation_uri)
w_accumulation_uri = os.path.join(
intermediate_dir, 'w_accumulation%s.tif' % file_suffix)
s_accumulation_uri = os.path.join(
intermediate_dir, 's_accumulation%s.tif' % file_suffix)
for factor_uri, accumulation_uri in [
(thresholded_w_factor_uri, w_accumulation_uri),
(thresholded_slope_uri, s_accumulation_uri)]:
LOGGER.info("calculating %s", accumulation_uri)
pygeoprocessing.routing.route_flux(
flow_direction_uri, aligned_dem_uri, factor_uri,
zero_absorption_source_uri, loss_uri, accumulation_uri, 'flux_only',
aoi_uri=args['watersheds_uri'])
LOGGER.info("calculating w_bar")
w_bar_uri = os.path.join(intermediate_dir, 'w_bar%s.tif' % file_suffix)
w_bar_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(w_accumulation_uri)
s_bar_uri = os.path.join(intermediate_dir, 's_bar%s.tif' % file_suffix)
s_bar_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(s_accumulation_uri)
for bar_nodata, accumulation_uri, bar_uri in [
(w_bar_nodata, w_accumulation_uri, w_bar_uri),
(s_bar_nodata, s_accumulation_uri, s_bar_uri)]:
LOGGER.info("calculating %s", accumulation_uri)
def bar_op(base_accumulation, flow_accumulation):
return numpy.where(
(base_accumulation != bar_nodata) & (flow_accumulation != flow_accumulation_nodata),
base_accumulation / flow_accumulation, bar_nodata)
pygeoprocessing.geoprocessing.vectorize_datasets(
[accumulation_uri, flow_accumulation_uri], bar_op, bar_uri,
gdal.GDT_Float32, bar_nodata, out_pixel_size, "intersection",
dataset_to_align_index=0, vectorize_op=False)
LOGGER.info('calculating d_up')
d_up_uri = os.path.join(intermediate_dir, 'd_up%s.tif' % file_suffix)
cell_area = out_pixel_size ** 2
d_up_nodata = -1.0
def d_up(w_bar, s_bar, flow_accumulation):
"""Calculate the d_up index
w_bar * s_bar * sqrt(upstream area) """
d_up_array = w_bar * s_bar * numpy.sqrt(flow_accumulation * cell_area)
return numpy.where(
(w_bar != w_bar_nodata) & (s_bar != s_bar_nodata) &
(flow_accumulation != flow_accumulation_nodata), d_up_array,
d_up_nodata)
pygeoprocessing.geoprocessing.vectorize_datasets(
[w_bar_uri, s_bar_uri, flow_accumulation_uri], d_up, d_up_uri,
gdal.GDT_Float32, d_up_nodata, out_pixel_size, "intersection",
dataset_to_align_index=0, vectorize_op=False)
LOGGER.info('calculate WS factor')
ws_factor_inverse_uri = os.path.join(
intermediate_dir, 'ws_factor_inverse%s.tif' % file_suffix)
ws_nodata = -1.0
slope_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(
preprocessed_data['thresholded_slope_uri'])
def ws_op(w_factor, s_factor):
#calculating the inverse so we can use the distance to stream factor function
return numpy.where(
(w_factor != w_nodata) & (s_factor != slope_nodata),
1.0 / (w_factor * s_factor), ws_nodata)
pygeoprocessing.geoprocessing.vectorize_datasets(
[thresholded_w_factor_uri, thresholded_slope_uri], ws_op, ws_factor_inverse_uri,
gdal.GDT_Float32, ws_nodata, out_pixel_size, "intersection",
dataset_to_align_index=0, vectorize_op=False)
LOGGER.info('calculating d_dn')
d_dn_uri = os.path.join(intermediate_dir, 'd_dn%s.tif' % file_suffix)
pygeoprocessing.routing.routing_core.distance_to_stream(
flow_direction_uri, stream_uri, d_dn_uri, factor_uri=ws_factor_inverse_uri)
LOGGER.info('calculate ic')
ic_factor_uri = os.path.join(intermediate_dir, 'ic_factor%s.tif' % file_suffix)
ic_nodata = -9999.0
d_up_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(d_up_uri)
d_dn_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(d_dn_uri)
def ic_op(d_up, d_dn):
nodata_mask = (d_up == d_up_nodata) | (d_dn == d_dn_nodata)
return numpy.where(
nodata_mask, ic_nodata, numpy.log10(d_up/d_dn))
pygeoprocessing.geoprocessing.vectorize_datasets(
[d_up_uri, d_dn_uri], ic_op, ic_factor_uri,
gdal.GDT_Float32, ic_nodata, out_pixel_size, "intersection",
dataset_to_align_index=0, vectorize_op=False)
LOGGER.info('calculate sdr')
sdr_factor_uri = os.path.join(intermediate_dir, 'sdr_factor%s.tif' % file_suffix)
sdr_nodata = -9999.0
k = float(args['k_param'])
ic_0 = float(args['ic_0_param'])
sdr_max = float(args['sdr_max'])
def sdr_op(ic_factor, stream):
nodata_mask = (ic_factor == ic_nodata)
sdr = numpy.where(
nodata_mask, sdr_nodata, sdr_max/(1+numpy.exp((ic_0-ic_factor)/k)))
#mask out the stream layer
return numpy.where(stream == 1, 0.0, sdr)
pygeoprocessing.geoprocessing.vectorize_datasets(
[ic_factor_uri, stream_uri], sdr_op, sdr_factor_uri,
gdal.GDT_Float32, sdr_nodata, out_pixel_size, "intersection",
dataset_to_align_index=0, vectorize_op=False)
LOGGER.info('calculate sed export')
sed_export_uri = os.path.join(output_dir, 'sed_export%s.tif' % file_suffix)
sed_export_nodata = -1.0
def sed_export_op(usle, sdr):
nodata_mask = (usle == nodata_usle) | (sdr == sdr_nodata)
return numpy.where(
nodata_mask, sed_export_nodata, usle * sdr)
pygeoprocessing.geoprocessing.vectorize_datasets(
[usle_uri, sdr_factor_uri], sed_export_op, sed_export_uri,
gdal.GDT_Float32, sed_export_nodata, out_pixel_size, "intersection",
dataset_to_align_index=0, vectorize_op=False)
LOGGER.info('calculate sediment retention index')
def sediment_index_op(rkls, usle, sdr_factor):
nodata_mask = (rkls == nodata_rkls) | (usle == nodata_usle) | (sdr_factor == sdr_nodata)
return numpy.where(
nodata_mask, nodata_sed_retention_index, (rkls - usle) * sdr_factor / sdr_max)
nodata_sed_retention_index = -1
sed_retention_index_uri = os.path.join(
output_dir, 'sed_retention_index%s.tif' % file_suffix)
pygeoprocessing.geoprocessing.vectorize_datasets(
[rkls_uri, usle_uri, sdr_factor_uri], sediment_index_op, sed_retention_index_uri,
gdal.GDT_Float32, nodata_sed_retention_index, out_pixel_size, "intersection",
dataset_to_align_index=0, vectorize_op=False)
LOGGER.info('calculate sediment retention')
d_up_bare_soil_uri = os.path.join(intermediate_dir, 'd_up_bare_soil%s.tif' % file_suffix)
d_up_nodata = -1.0
def d_up_bare_soil_op(s_bar, flow_accumulation):
"""Calculate the d_up index for bare soil
1.0 * s_bar * sqrt(upstream area) """
d_up_array = s_bar * numpy.sqrt(flow_accumulation * cell_area)
return numpy.where(
(s_bar != s_bar_nodata) &
(flow_accumulation != flow_accumulation_nodata), d_up_array,
d_up_nodata)
pygeoprocessing.geoprocessing.vectorize_datasets(
[s_bar_uri, flow_accumulation_uri], d_up_bare_soil_op, d_up_bare_soil_uri,
gdal.GDT_Float32, d_up_nodata, out_pixel_size, "intersection",
dataset_to_align_index=0, vectorize_op=False)
#when calculating d_dn_bare the c factors are all 1,
#so we invert just s, then accumulate it downstream
s_factor_inverse_uri = os.path.join(
intermediate_dir, 's_factor_inverse%s.tif' % file_suffix)
s_nodata = -1.0
def s_op(s_factor):
#calculating the inverse so we can use the distance to stream factor function
return numpy.where(s_factor != slope_nodata, 1.0 / s_factor, s_nodata)
pygeoprocessing.geoprocessing.vectorize_datasets(
[thresholded_slope_uri], s_op, s_factor_inverse_uri,
gdal.GDT_Float32, s_nodata, out_pixel_size, "intersection",
dataset_to_align_index=0, vectorize_op=False)
d_dn_bare_soil_uri = os.path.join(intermediate_dir, 'd_dn_bare_soil%s.tif' % file_suffix)
d_up_nodata = -1.0
pygeoprocessing.routing.routing_core.distance_to_stream(
flow_direction_uri, stream_uri, d_dn_bare_soil_uri, factor_uri=s_factor_inverse_uri)
ic_factor_bare_soil_uri = os.path.join(
intermediate_dir, 'ic_factor_bare_soil%s.tif' % file_suffix)
ic_bare_soil_nodata = -9999.0
def ic_bare_soil_op(d_up_bare_soil, d_dn_bare_soil):
nodata_mask = (d_up_bare_soil == d_up_nodata) | (d_dn_bare_soil == d_dn_nodata)
return numpy.where(
nodata_mask, ic_nodata, numpy.log10(d_up_bare_soil/d_dn_bare_soil))
pygeoprocessing.geoprocessing.vectorize_datasets(
[d_up_bare_soil_uri, d_dn_bare_soil_uri], ic_bare_soil_op, ic_factor_bare_soil_uri,
gdal.GDT_Float32, ic_nodata, out_pixel_size, "intersection",
dataset_to_align_index=0, vectorize_op=False)
sdr_factor_bare_soil_uri = os.path.join(intermediate_dir, 'sdr_factor_bare_soil%s.tif' % file_suffix)
def sdr_bare_soil_op(ic_bare_soil_factor, stream):
nodata_mask = (ic_bare_soil_factor == ic_nodata)
sdr_bare_soil = numpy.where(
nodata_mask, sdr_nodata, sdr_max/(1+numpy.exp((ic_0-ic_bare_soil_factor)/k)))
#mask out the stream layer
return numpy.where(stream == 1, 0.0, sdr_bare_soil)
pygeoprocessing.geoprocessing.vectorize_datasets(
[ic_factor_bare_soil_uri, stream_uri], sdr_bare_soil_op, sdr_factor_bare_soil_uri,
gdal.GDT_Float32, sdr_nodata, out_pixel_size, "intersection",
dataset_to_align_index=0, vectorize_op=False)
def sediment_retention_bare_soil_op(rkls, usle, stream_factor, sdr_factor, sdr_factor_bare_soil):
nodata_mask = (
(rkls == nodata_rkls) | (usle == nodata_usle) |
(stream_factor == stream_nodata) | (sdr_factor == sdr_nodata) |
(sdr_factor_bare_soil == sdr_nodata))
return numpy.where(
nodata_mask, nodata_sediment_retention,
(rkls * sdr_factor_bare_soil - usle * sdr_factor) * (1 - stream_factor))
nodata_sediment_retention = -1
sed_retention_bare_soil_uri = os.path.join(
intermediate_dir, 'sed_retention%s.tif' % file_suffix)
pygeoprocessing.geoprocessing.vectorize_datasets(
[rkls_uri, usle_uri, stream_uri, sdr_factor_uri, sdr_factor_bare_soil_uri],
sediment_retention_bare_soil_op, sed_retention_bare_soil_uri,
gdal.GDT_Float32, nodata_sediment_retention, out_pixel_size, "intersection",
dataset_to_align_index=0, vectorize_op=False)
LOGGER.info('generating report')
esri_driver = ogr.GetDriverByName('ESRI Shapefile')
field_summaries = {
'usle_tot': pygeoprocessing.geoprocessing.aggregate_raster_values_uri(usle_uri, args['watersheds_uri'], 'ws_id').total,
'sed_export': pygeoprocessing.geoprocessing.aggregate_raster_values_uri(sed_export_uri, args['watersheds_uri'], 'ws_id').total,
'sed_retent': pygeoprocessing.geoprocessing.aggregate_raster_values_uri(sed_retention_bare_soil_uri, args['watersheds_uri'], 'ws_id').total,
}
original_datasource = ogr.Open(args['watersheds_uri'])
watershed_output_datasource_uri = os.path.join(output_dir, 'watershed_results_sdr%s.shp' % file_suffix)
#If there is already an existing shapefile with the same name and path, delete it
#Copy the input shapefile into the designated output folder
if os.path.isfile(watershed_output_datasource_uri):
os.remove(watershed_output_datasource_uri)
datasource_copy = esri_driver.CopyDataSource(original_datasource, watershed_output_datasource_uri)
layer = datasource_copy.GetLayer()
for field_name in field_summaries:
field_def = ogr.FieldDefn(field_name, ogr.OFTReal)
layer.CreateField(field_def)
#Initialize each feature field to 0.0
for feature_id in xrange(layer.GetFeatureCount()):
feature = layer.GetFeature(feature_id)
for field_name in field_summaries:
try:
ws_id = feature.GetFieldAsInteger('ws_id')
feature.SetField(field_name, float(field_summaries[field_name][ws_id]))
except KeyError:
LOGGER.warning('unknown field %s' % field_name)
feature.SetField(field_name, 0.0)
#Save back to datasource
layer.SetFeature(feature)
original_datasource.Destroy()
datasource_copy.Destroy()
for ds_uri in [zero_absorption_source_uri, loss_uri]:
try:
os.remove(ds_uri)
except OSError as e:
LOGGER.warn("couldn't remove %s because it's still open", ds_uri)
LOGGER.warn(e)
def calculate_ls_factor(
flow_accumulation_uri, slope_uri, aspect_uri, ls_factor_uri, ls_nodata):
"""Calculates the LS factor as Equation 3 from "Extension and validation
of a geographic information system-based method for calculating the
Revised Universal Soil Loss Equation length-slope factor for erosion
risk assessments in large watersheds"
(Required that all raster inputs are same dimensions and projections
and have square cells)
flow_accumulation_uri - a uri to a single band raster of type float that
indicates the contributing area at the inlet of a grid cell
slope_uri - a uri to a single band raster of type float that indicates
the slope at a pixel given as a percent
aspect_uri - a uri to a single band raster of type float that indicates the
direction that slopes are facing in terms of radians east and
increase clockwise: pi/2 is north, pi is west, 3pi/2, south and
0 or 2pi is east.
ls_factor_uri - (input) a string to the path where the LS raster will
be written
returns nothing"""
flow_accumulation_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(
flow_accumulation_uri)
slope_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(slope_uri)
aspect_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(aspect_uri)
#Assumes that cells are square
cell_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(flow_accumulation_uri)
cell_area = cell_size ** 2
def ls_factor_function(aspect_angle, percent_slope, flow_accumulation):
"""Calculate the ls factor
aspect_angle - flow direction in radians
percent_slope - slope in terms of percent
flow_accumulation - upstream pixels at this point
returns the ls_factor calculation for this point"""
#Skip the calculation if any of the inputs are nodata
nodata_mask = (
(aspect_angle == aspect_nodata) | (percent_slope == slope_nodata) |
(flow_accumulation == flow_accumulation_nodata))
#Here the aspect direction can range from 0 to 2PI, but the purpose
#of the term is to determine the length of the flow path on the
#pixel, thus we take the absolute value of each trigonometric
#function to keep the computation in the first quadrant
xij = (numpy.abs(numpy.sin(aspect_angle)) +
numpy.abs(numpy.cos(aspect_angle)))
contributing_area = (flow_accumulation-1) * cell_area
#To convert to radians, we need to divide the percent_slope by 100 since
#it's a percent.
slope_in_radians = numpy.arctan(percent_slope / 100.0)
#From Equation 4 in "Extension and validation of a geographic
#information system ..."
slope_factor = numpy.where(percent_slope < 9.0,
10.8 * numpy.sin(slope_in_radians) + 0.03,
16.8 * numpy.sin(slope_in_radians) - 0.5)
#Set the m value to the lookup table that's Table 1 in
#InVEST Sediment Model_modifications_10-01-2012_RS.docx in the
#FT Team dropbox
beta = ((numpy.sin(slope_in_radians) / 0.0896) /
(3 * numpy.sin(slope_in_radians)**0.8 + 0.56))
#slope table in percent
slope_table = [1., 3.5, 5., 9.]
exponent_table = [0.2, 0.3, 0.4, 0.5]
#Look up the correct m value from the table
m_exp = beta/(1+beta)
for i in range(4):
m_exp[percent_slope <= slope_table[i]] = exponent_table[i]
#The length part of the ls_factor:
l_factor = (
((contributing_area + cell_area)**(m_exp+1) -
contributing_area ** (m_exp+1)) /
((cell_size ** (m_exp + 2)) * (xij**m_exp) * (22.13**m_exp)))
#From the McCool paper "as a final check against excessively long slope
#length calculations ... cap of 333m"
l_factor[l_factor > 333] = 333
#This is the ls_factor
return numpy.where(nodata_mask, ls_nodata, l_factor * slope_factor)
#Call vectorize datasets to calculate the ls_factor
dataset_uri_list = [aspect_uri, slope_uri, flow_accumulation_uri]
pygeoprocessing.geoprocessing.vectorize_datasets(
dataset_uri_list, ls_factor_function, ls_factor_uri, gdal.GDT_Float32,
ls_nodata, cell_size, "intersection", dataset_to_align_index=0,
vectorize_op=False)
base_directory = os.path.dirname(ls_factor_uri)
xi_uri = os.path.join(base_directory, "xi.tif")
s_factor_uri = os.path.join(base_directory, "slope_factor.tif")
beta_uri = os.path.join(base_directory, "beta.tif")
m_uri = os.path.join(base_directory, "m.tif")
def m_op(aspect_angle, percent_slope, flow_accumulation):
slope_in_radians = numpy.arctan(percent_slope / 100.0)
beta = ((numpy.sin(slope_in_radians) / 0.0896) /
(3 * numpy.sin(slope_in_radians)**0.8 + 0.56))
#slope table in percent
slope_table = [1., 3.5, 5., 9.]
exponent_table = [0.2, 0.3, 0.4, 0.5]
#Look up the correct m value from the table
m_exp = beta/(1+beta)
for i in range(4):
m_exp[percent_slope <= slope_table[i]] = exponent_table[i]
return m_exp
pygeoprocessing.geoprocessing.vectorize_datasets(
dataset_uri_list, m_op, m_uri, gdal.GDT_Float32,
ls_nodata, cell_size, "intersection", dataset_to_align_index=0,
vectorize_op=False)
def beta_op(aspect_angle, percent_slope, flow_accumulation):
slope_in_radians = numpy.arctan(percent_slope / 100.0)
#Set the m value to the lookup table that's Table 1 in
#InVEST Sediment Model_modifications_10-01-2012_RS.docx in the
#FT Team dropbox
return ((numpy.sin(slope_in_radians) / 0.0896) /
(3 * numpy.sin(slope_in_radians)**0.8 + 0.56))
pygeoprocessing.geoprocessing.vectorize_datasets(
dataset_uri_list, beta_op, beta_uri, gdal.GDT_Float32,
ls_nodata, cell_size, "intersection", dataset_to_align_index=0,
vectorize_op=False)
def s_factor_op(aspect_angle, percent_slope, flow_accumulation):
slope_in_radians = numpy.arctan(percent_slope / 100.0)
#From Equation 4 in "Extension and validation of a geographic
#information system ..."
return numpy.where(percent_slope < 9.0,
10.8 * numpy.sin(slope_in_radians) + 0.03,
16.8 * numpy.sin(slope_in_radians) - 0.5)
pygeoprocessing.geoprocessing.vectorize_datasets(
dataset_uri_list, s_factor_op, s_factor_uri, gdal.GDT_Float32,
ls_nodata, cell_size, "intersection", dataset_to_align_index=0,
vectorize_op=False)
def xi_op(aspect_angle, percent_slope, flow_accumulation):
return (numpy.abs(numpy.sin(aspect_angle)) +
numpy.abs(numpy.cos(aspect_angle)))
pygeoprocessing.geoprocessing.vectorize_datasets(
dataset_uri_list, xi_op, xi_uri, gdal.GDT_Float32,
ls_nodata, cell_size, "intersection", dataset_to_align_index=0,
vectorize_op=False)
def calculate_rkls(
ls_factor_uri, erosivity_uri, erodibility_uri, stream_uri,
rkls_uri):
"""Calculates per-pixel potential soil loss using the RKLS (revised
universial soil loss equation with no C or P).
ls_factor_uri - GDAL uri with the LS factor pre-calculated
erosivity_uri - GDAL uri with per pixel erosivity
erodibility_uri - GDAL uri with per pixel erodibility
stream_uri - GDAL uri indicating locations with streams
(0 is no stream, 1 stream)
rkls_uri - string input indicating the path to disk
for the resulting potential soil loss raster
returns nothing"""
ls_factor_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(ls_factor_uri)
erosivity_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(erosivity_uri)
erodibility_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(erodibility_uri)
stream_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(stream_uri)
usle_nodata = -1.0
cell_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(ls_factor_uri)
cell_area_ha = cell_size ** 2 / 10000.0
def rkls_function(ls_factor, erosivity, erodibility, stream):
"""Calculates the USLE equation
ls_factor - length/slope factor
erosivity - related to peak rainfall events
erodibility - related to the potential for soil to erode
stream - 1 or 0 depending if there is a stream there. If so, no
potential soil loss due to USLE
returns ls_factor * erosivity * erodibility * usle_c_p if all arguments
defined, nodata if some are not defined, 0 if in a stream
(stream)"""
rkls = numpy.where(
stream == 1, 0.0,
ls_factor * erosivity * erodibility * cell_area_ha)
return numpy.where(
(ls_factor == ls_factor_nodata) | (erosivity == erosivity_nodata) |
(erodibility == erodibility_nodata) | (stream == stream_nodata),
usle_nodata, rkls)
dataset_uri_list = [
ls_factor_uri, erosivity_uri, erodibility_uri, stream_uri]
#Aligning with index 3 that's the stream and the most likely to be
#aligned with LULCs
pygeoprocessing.geoprocessing.vectorize_datasets(
dataset_uri_list, rkls_function, rkls_uri, gdal.GDT_Float32,
usle_nodata, cell_size, "intersection", dataset_to_align_index=3,
vectorize_op=False)
def _prepare(**args):
"""A function to preprocess the static data that goes into the SDR model
that is unlikely to change when running a batch process.
args['dem_uri'] - dem layer
args['erosivity_uri'] - erosivity data that will be used to align and
precalculate rkls
args['erodibility_uri'] - erodibility data that will be used to align
and precalculate rkls
args['workspace_dir'] - output directory for the generated rasters
return a dictionary with the keys:
'aligned_dem_uri' - input dem aligned with the rest of the inputs
'aligned_erosivity_uri' - input erosivity aligned with the inputs
'aligned_erodibility_uri' - input erodability aligned with the
inputs
"""
out_pixel_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(args['dem_uri'])
intermediate_dir = os.path.join(args['workspace_dir'], 'prepared_data')
if not os.path.exists(intermediate_dir):
os.makedirs(intermediate_dir)
tiled_dem_uri = os.path.join(intermediate_dir, 'tiled_dem.tif')
pygeoprocessing.geoprocessing.tile_dataset_uri(args['dem_uri'], tiled_dem_uri, 256)
aligned_dem_uri = os.path.join(intermediate_dir, 'aligned_dem.tif')
aligned_erosivity_uri = os.path.join(
intermediate_dir, 'aligned_erosivity.tif')
aligned_erodibility_uri = os.path.join(
intermediate_dir, 'aligned_erodibility.tif')
input_list = [tiled_dem_uri, args['erosivity_uri'], args['erodibility_uri']]
dataset_out_uri_list = [
aligned_dem_uri, aligned_erosivity_uri, aligned_erodibility_uri]
pygeoprocessing.geoprocessing.align_dataset_list(
input_list, dataset_out_uri_list,
['nearest'] * len(dataset_out_uri_list), out_pixel_size, 'intersection',
0, aoi_uri=args['watersheds_uri'])
#Calculate slope
LOGGER.info("Calculating slope")
original_slope_uri = os.path.join(intermediate_dir, 'slope.tif')
thresholded_slope_uri = os.path.join(
intermediate_dir, 'thresholded_slope.tif')
pygeoprocessing.geoprocessing.calculate_slope(aligned_dem_uri, original_slope_uri)
slope_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(original_slope_uri)
def threshold_slope(slope):
'''Convert slope to m/m and clamp at 0.005 and 1.0 as
desribed in Cavalli et al., 2013. '''
slope_copy = slope / 100
nodata_mask = slope == slope_nodata
slope_copy[slope_copy < 0.005] = 0.005
slope_copy[slope_copy > 1.0] = 1.0
slope_copy[nodata_mask] = slope_nodata
return slope_copy
pygeoprocessing.geoprocessing.vectorize_datasets(
[original_slope_uri], threshold_slope, thresholded_slope_uri,
gdal.GDT_Float64, slope_nodata, out_pixel_size, "intersection",
dataset_to_align_index=0, vectorize_op=False)
#Calculate flow accumulation
LOGGER.info("calculating flow accumulation")
flow_accumulation_uri = os.path.join(
intermediate_dir, 'flow_accumulation.tif')
flow_direction_uri = os.path.join(
intermediate_dir, 'flow_direction.tif')
pygeoprocessing.routing.flow_direction_d_inf(aligned_dem_uri, flow_direction_uri)
pygeoprocessing.routing.flow_accumulation(
flow_direction_uri, aligned_dem_uri, flow_accumulation_uri)
#Calculate LS term
LOGGER.info('calculate ls term')
ls_uri = os.path.join(intermediate_dir, 'ls.tif')
ls_nodata = -1.0
calculate_ls_factor(
flow_accumulation_uri, original_slope_uri, flow_direction_uri, ls_uri,
ls_nodata)
return {
'aligned_dem_uri': aligned_dem_uri,
'aligned_erosivity_uri': aligned_erosivity_uri,
'aligned_erodibility_uri': aligned_erodibility_uri,
'thresholded_slope_uri': thresholded_slope_uri,
'flow_accumulation_uri': flow_accumulation_uri,
'flow_direction_uri': flow_direction_uri,
'ls_uri': ls_uri,
}
| StarcoderdataPython |
6665591 | from rest_framework import serializers
from django.contrib.auth import authenticate
from django.contrib.auth.password_validation import validate_password
from .models import *
from ..jobs.models import *
from ..departments.models import *
class EmployeeRegisterSerializer(serializers.ModelSerializer):
employee_number = serializers.CharField(source='username')
job_id = serializers.SlugRelatedField(slug_field='job_id', queryset=Job.objects.all(), allow_null=True)
supervisor = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=True)
department_id = serializers.SlugRelatedField(slug_field='department_id', queryset=Department.objects.all(), allow_null=True)
class Meta:
model = Employee
fields = ['employee_id','employee_number', 'sa_no', 'email', 'job_id', 'supervisor', 'department_id', 'department_role', 'company_id', 'date_employed', 'employee_status',
'salary', 'sick_leave_count', 'vac_leave_count', 'name', 'nickname', 'other_name', 'employee_image', 'civil_status', 'citizenship', 'gender', 'weight', 'height',
'date_of_birth', 'place_of_birth', 'city_address', 'prov_address', 'tel_no', 'cel_no', 'religion', 'acr_no', 'acr_date', 'dept_labor_no', 'dept_labor_date',
'tin_no', 'sss_no', 'pagibig_no', 'philhealth_no']
def validate(self, data):
username = data['username']
sa_no = data['sa_no']
email = data['email']
date_of_birth = data['date_of_birth']
errors = []
# Check if employee number exists
try:
Employee.objects.get(username=username)
errors.append('Employee number is already taken.')
except Employee.DoesNotExist:
pass
# Check if s.a. number exists
try:
Employee.objects.get(sa_no=sa_no)
errors.append('S.A. number is already taken.')
except Employee.DoesNotExist:
if not sa_no:
errors.append('S.A. number must not be blank.')
else:
pass
# Check if bday is blank
if not date_of_birth:
errors.append('Date of Birth must not be blank.')
else:
pass
# Check if email exists
try:
Employee.objects.get(email=email)
errors.append('Email Address is already taken.')
except Employee.DoesNotExist:
if not email:
errors.append('Email Address must not be blank.')
else:
pass
if len(errors):
error_message = {'errors': errors}
raise serializers.ValidationError(error_message)
return data
def create(self, validated_data):
username = validated_data['username']
sa_no = validated_data['sa_no']
email = validated_data['email']
job_id = validated_data['job_id']
supervisor = validated_data['supervisor']
department_id = validated_data['department_id']
department_role = validated_data['department_role']
company_id = validated_data['company_id']
date_employed = validated_data['date_employed']
employee_status = validated_data['employee_status']
salary = validated_data['salary']
sick_leave_count = validated_data['sick_leave_count']
vac_leave_count = validated_data['vac_leave_count']
name = validated_data['name']
nickname = validated_data['nickname']
other_name = validated_data['other_name']
employee_image = validated_data['employee_image']
civil_status = validated_data['civil_status']
citizenship = validated_data['citizenship']
gender = validated_data['gender']
weight = validated_data['weight']
height = validated_data['height']
date_of_birth = validated_data['date_of_birth']
place_of_birth = validated_data['place_of_birth']
city_address = validated_data['city_address']
prov_address = validated_data['prov_address']
tel_no = validated_data['tel_no']
cel_no = validated_data['cel_no']
religion = validated_data['religion']
acr_no = validated_data['acr_no']
acr_date = validated_data['acr_date']
dept_labor_no = validated_data['dept_labor_no']
dept_labor_date = validated_data['dept_labor_date']
tin_no = validated_data['tin_no']
sss_no = validated_data['sss_no']
pagibig_no = validated_data['pagibig_no']
philhealth_no = validated_data['philhealth_no']
password = username + str(date_of_birth)
new_employee = Employee(
username=username,
sa_no=sa_no,
email=email,
job_id=job_id,
supervisor=supervisor,
department_id=department_id,
department_role=department_role,
company_id=company_id,
date_employed=date_employed,
employee_status=employee_status,
salary=salary,
sick_leave_count=sick_leave_count,
vac_leave_count=vac_leave_count,
name = name,
nickname = nickname,
other_name = other_name,
employee_image = employee_image,
civil_status = civil_status,
citizenship = citizenship,
gender = gender,
weight = weight,
height = height,
date_of_birth = date_of_birth,
place_of_birth = place_of_birth,
city_address = city_address,
prov_address = prov_address,
tel_no = tel_no,
cel_no = cel_no,
religion = religion,
acr_no = acr_no,
acr_date = acr_date,
dept_labor_no = dept_labor_no,
dept_labor_date = dept_labor_date,
tin_no = tin_no,
sss_no = sss_no,
pagibig_no = pagibig_no,
philhealth_no = philhealth_no,
password = password
)
new_employee.set_password(password)
new_employee.save()
return new_employee
class EmployeeHRLoginSerializer(serializers.ModelSerializer):
employee_number = serializers.CharField(source='username', write_only=True)
class Meta:
model = Employee
fields = ['employee_id', 'employee_number', 'password']
extra_kwargs = {
'employee_number': {'write_only': True},
'password': {'write_only': True},
}
def validate(self, data):
username = data['username']
password = data['password']
employee = authenticate(username=username, password=password)
if employee:
if employee.is_active:
try:
if employee.department_role == 'HR':
data['employee'] = employee
return data
else:
raise serializers.ValidationError({'error': "Only HR is allowed to login."})
except AttributeError:
raise serializers.ValidationError({'error': "User has no Department role yet."})
else:
raise serializers.ValidationError({'error': "Account is no longer valid."})
else:
raise serializers.ValidationError({'error': "Incorrect login credentials."})
class EmployeeLoginSerializer(serializers.ModelSerializer):
employee_number = serializers.CharField(source='username', write_only=True)
class Meta:
model = Employee
fields = ['employee_id', 'employee_number', 'password']
extra_kwargs = {
'employee_number': {'write_only': True},
'password': {'write_only': True},
}
def validate(self, data):
username = data['username']
password = data['password']
employee = authenticate(username=username, password=password)
if employee:
if employee.is_active:
try:
data['employee'] = employee
return data
except AttributeError:
raise serializers.ValidationError({'error': "User has no Department yet."})
else:
raise serializers.ValidationError({'errors': "Account is no longer valid."})
else:
raise serializers.ValidationError({'errors': "Incorrect login credentials."})
class IsAuthenticatedSerializer(serializers.Serializer):
employee = serializers.UUIDField()
class EmployeeUpdateSerializer(serializers.ModelSerializer):
job_id = serializers.SlugRelatedField(slug_field='job_id', queryset=Job.objects.all(), allow_null=True)
supervisor = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=True)
department_id = serializers.SlugRelatedField(slug_field='department_id', queryset=Department.objects.all(), allow_null=True)
class Meta:
model = Employee
fields = ['employee_id', 'email', 'job_id', 'supervisor', 'department_id', 'department_role', 'company_id', 'date_employed', 'employee_status', 'salary',
'sick_leave_count', 'vac_leave_count', 'name', 'nickname', 'other_name', 'employee_image', 'civil_status', 'citizenship', 'gender',
'weight', 'height', 'date_of_birth','place_of_birth', 'city_address', 'prov_address', 'tel_no', 'cel_no', 'religion', 'acr_no', 'acr_date',
'dept_labor_no', 'dept_labor_date', 'tin_no', 'sss_no', 'pagibig_no', 'philhealth_no', 'is_active']
def update(self, instance, validated_data):
instance.email = validated_data.get('email', instance.email)
instance.job_id = validated_data.get('job_id', instance.job_id)
instance.supervisor = validated_data.get('supervisor', instance.supervisor)
instance.department_id = validated_data.get('department_id', instance.department_id)
instance.department_role = validated_data.get('department_role', instance.department_role)
instance.company_id = validated_data.get('company_id', instance.company_id)
instance.date_employed = validated_data.get('date_employed', instance.date_employed)
instance.employee_status = validated_data.get('employee_status', instance.employee_status)
instance.salary = validated_data.get('salary', instance.salary)
instance.sick_leave_count = validated_data.get('sick_leave_count', instance.sick_leave_count)
instance.vac_leave_count = validated_data.get('vac_leave_count', instance.vac_leave_count)
instance.name = validated_data.get('name', instance.name)
instance.nickname = validated_data.get('nickname', instance.nickname)
instance.other_name = validated_data.get('other_name', instance.other_name)
instance.employee_image = validated_data.get('employee_image', instance.employee_image)
instance.civil_status = validated_data.get('civil_status', instance.civil_status)
instance.citizenship = validated_data.get('citizenship', instance.citizenship)
instance.gender = validated_data.get('gender', instance.gender)
instance.weight = validated_data.get('weight', instance.weight)
instance.height = validated_data.get('height', instance.height)
instance.date_of_birth = validated_data.get('date_of_birth', instance.date_of_birth)
instance.place_of_birth = validated_data.get('place_of_birth', instance.place_of_birth)
instance.city_address = validated_data.get('city_address', instance.city_address)
instance.prov_address = validated_data.get('prov_address', instance.prov_address)
instance.tel_no = validated_data.get('tel_no', instance.tel_no)
instance.cel_no = validated_data.get('cel_no', instance.cel_no)
instance.religion = validated_data.get('religion', instance.religion)
instance.acr_no = validated_data.get('acr_no', instance.acr_no)
instance.acr_date = validated_data.get('acr_date', instance.acr_date)
instance.dept_labor_no = validated_data.get('dept_labor_no', instance.dept_labor_no)
instance.dept_labor_date = validated_data.get('dept_labor_date', instance.dept_labor_date)
instance.tin_no = validated_data.get('tin_no', instance.tin_no)
instance.sss_no = validated_data.get('sss_no', instance.sss_no)
instance.pagibig_no = validated_data.get('pagibig_no', instance.pagibig_no)
instance.philhealth_no = validated_data.get('philhealth_no', instance.philhealth_no)
instance.is_active = validated_data.get('is_active', instance.is_active)
instance.save()
return instance
class EmployeeViewSerializer(serializers.ModelSerializer):
employee_number = serializers.CharField(source='username')
class Meta:
model = Employee
fields = ['employee_id', 'employee_number', 'email', 'name', 'cel_no', 'department_id', 'department_role', 'company_id', 'employee_status']
class EmployeeRetrieveSerializer(serializers.ModelSerializer):
job_id = serializers.SlugRelatedField(slug_field='job_id', read_only=True)
supervisor = serializers.SlugRelatedField(slug_field='employee_id', read_only=True)
department_id = serializers.SlugRelatedField(slug_field='department_id', read_only=True)
employee_number = serializers.CharField(source='username')
class Meta:
model = Employee
fields = ['employee_id', 'employee_number', 'sa_no', 'email', 'job_id', 'supervisor', 'department_id', 'department_role', 'company_id', 'date_employed',
'employee_status', 'salary', 'sick_leave_count', 'vac_leave_count', 'name', 'nickname', 'other_name', 'employee_image', 'civil_status', 'citizenship',
'gender', 'weight', 'height', 'date_of_birth', 'place_of_birth', 'city_address', 'prov_address', 'tel_no', 'cel_no', 'religion', 'acr_no', 'acr_date',
'dept_labor_no', 'dept_labor_date', 'tin_no', 'sss_no', 'pagibig_no', 'philhealth_no', 'is_active']
class ChangePasswordSerializer(serializers.ModelSerializer):
old_password = serializers.CharField()
password1 = serializers.CharField()
password2 = serializers.CharField()
class Meta:
model = Employee
fields = ['old_password', 'password1', 'password2']
def validate(self, data):
old_password = data['<PASSWORD>password']
password1 = data['password1']
password2 = data['password2']
employee = self.context['request'].user
errors = []
# Check if password matches
if password1 != password2:
errors.append('Password does not match.')
# Check password requirements
try:
validate_password(password1)
except:
errors.append(
'Passwords should be at least 8 characters consisted of characters, letters and numbers, and cannot be too common.')
# Check if old_password matches current user's password
if not employee.check_password(old_password):
errors.append('Old password is incorrect.')
# Check if new password matches current user's password
if employee.check_password(password1):
errors.append('You cannot use your old password as new password.')
if len(errors):
error_message = {'errors': errors}
raise serializers.ValidationError(error_message)
data['employee'] = employee
return data
def create(self, validated_data):
password = validated_data['<PASSWORD>']
employee = validated_data['employee']
employee.set_password(password)
employee.save()
return employee
# Education
class EmployeeEducationSerializer(serializers.ModelSerializer):
employee_id = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=False)
class Meta:
model = EmployeeEducation
fields = '__all__'
class EmployeeEducationCreateSerializer(serializers.ModelSerializer):
employee_id = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=False)
class Meta:
model = EmployeeEducation
fields = '__all__'
def validate(self, data):
employee_id = data['employee_id']
errors = []
# Check if employee id exists
try:
EmployeeEducation.objects.get(employee_id=employee_id)
errors.append('Employee Education Record already existed.')
except EmployeeEducation.DoesNotExist:
pass
if len(errors):
error_message = {'errors': errors}
raise serializers.ValidationError(error_message)
return data
class EmployeeEducationUpdateSerializer(serializers.ModelSerializer):
class Meta:
model = EmployeeEducation
fields = ['primary_school', 'primary_address', 'primary_grad', 'sec_school', 'sec_address', 'sec_grad',
'col_school', 'col_address', 'col_degree', 'col_grad', 'grad_school', 'grad_address', 'grad_degree', 'grad_grad',
'others']
# Job History
class EmployeeJobHistorySerializer(serializers.ModelSerializer):
employee_id = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=False)
class Meta:
model = EmployeeJobHistory
fields = '__all__'
# Exams Taken
class EmployeeExamsTakenSerializer(serializers.ModelSerializer):
employee_id = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=False)
class Meta:
model = EmployeeExamsTaken
fields = '__all__'
# Seminars Taken
class EmployeeSeminarsTakenSerializer(serializers.ModelSerializer):
employee_id = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=False)
class Meta:
model = EmployeeSeminarsTaken
fields = '__all__'
# Skills
class EmployeeSkillsSerializer(serializers.ModelSerializer):
employee_id = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=False)
class Meta:
model = EmployeeSkills
fields = '__all__'
class EmployeeSkillsCreateSerializer(serializers.ModelSerializer):
employee_id = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=False)
class Meta:
model = EmployeeSkills
fields = '__all__'
def validate(self, data):
employee_id = data['employee_id']
errors = []
# Check if employee id exists
try:
EmployeeSkills.objects.get(employee_id=employee_id)
errors.append('Employee Skills Record already existed.')
except EmployeeSkills.DoesNotExist:
pass
if len(errors):
error_message = {'errors': errors}
raise serializers.ValidationError(error_message)
return data
class EmployeeSkillsUpdateSerializer(serializers.ModelSerializer):
class Meta:
model = EmployeeSkills
fields = ['skill_name']
# Family Background
class EmployeeFamilySerializer(serializers.ModelSerializer):
employee_id = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=False)
class Meta:
model = EmployeeFamily
fields = '__all__'
class EmployeeFamilyCreateSerializer(serializers.ModelSerializer):
employee_id = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=False)
class Meta:
model = EmployeeFamily
fields = '__all__'
def validate(self, data):
employee_id = data['employee_id']
errors = []
# Check if employee id exists
try:
EmployeeFamily.objects.get(employee_id=employee_id)
errors.append('Employee Family Record already existed.')
except EmployeeFamily.DoesNotExist:
pass
if len(errors):
error_message = {'errors': errors}
raise serializers.ValidationError(error_message)
return data
class EmployeeFamilyUpdateSerializer(serializers.ModelSerializer):
class Meta:
model = EmployeeFamily
fields = ['father_name', 'father_birth', 'father_age', 'father_occu', 'father_employer',
'mother_name', 'mother_birth', 'mother_age', 'mother_occu', 'mother_employer',
'family_address', 'family_contact_no']
# Sibling
class EmployeeSiblingSerializer(serializers.ModelSerializer):
employee_id = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=False)
class Meta:
model = EmployeeSibling
fields = '__all__'
# Married
class EmployeeMarriedSerializer(serializers.ModelSerializer):
employee_id = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=False)
class Meta:
model = EmployeeMarried
fields = '__all__'
class EmployeeMarriedCreateSerializer(serializers.ModelSerializer):
employee_id = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=False)
class Meta:
model = EmployeeMarried
fields = '__all__'
def validate(self, data):
employee_id = data['employee_id']
errors = []
# Check if employee id exists
try:
EmployeeMarried.objects.get(employee_id=employee_id)
errors.append('Employee Married Record already existed.')
except EmployeeMarried.DoesNotExist:
pass
if len(errors):
error_message = {'errors': errors}
raise serializers.ValidationError(error_message)
return data
class EmployeeMarriedUpdateSerializer(serializers.ModelSerializer):
class Meta:
model = EmployeeMarried
fields = ['spouse_name', 'spouse_address', 'spouse_birth', 'spouse_age', 'spouse_occupation', 'spouse_employer']
# Children
class EmployeeChildrenSerializer(serializers.ModelSerializer):
employee_id = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=False)
class Meta:
model = EmployeeChildren
fields = '__all__'
# Medical History
class EmployeeMedicalHistorySerializer(serializers.ModelSerializer):
employee_id = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=False)
class Meta:
model = EmployeeMedicalHistory
fields = '__all__'
class EmployeeMedicalHistoryCreateSerializer(serializers.ModelSerializer):
employee_id = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=False)
class Meta:
model = EmployeeMedicalHistory
fields = '__all__'
def validate(self, data):
employee_id = data['employee_id']
errors = []
# Check if employee id exists
try:
EmployeeMedicalHistory.objects.get(employee_id=employee_id)
errors.append('Medical History Record already existed.')
except EmployeeMedicalHistory.DoesNotExist:
pass
if len(errors):
error_message = {'errors': errors}
raise serializers.ValidationError(error_message)
return data
def create(self, validated_data):
employee_id = validated_data['employee_id']
had_illness = validated_data['had_illness']
illness_details = validated_data['illness_details']
hospitalized = validated_data['hospitalized']
hospitalized_details = validated_data['hospitalized_details']
last_checkup_date = validated_data['last_checkup_date']
last_checkup_purpose = validated_data['last_checkup_purpose']
distinguishing_marks = validated_data['distinguishing_marks']
return EmployeeMedicalHistory.objects.create(
employee_id= employee_id,
had_illness = had_illness,
illness_details = illness_details,
hospitalized = hospitalized,
hospitalized_details = hospitalized_details,
last_checkup_date = last_checkup_date,
last_checkup_purpose = last_checkup_purpose,
distinguishing_marks = distinguishing_marks
)
class EmployeeMedicalHistoryUpdateSerializer(serializers.ModelSerializer):
class Meta:
model = EmployeeMedicalHistory
fields = ['had_illness', 'illness_details', 'hospitalized', 'hospitalized_details',
'last_checkup_purpose', 'last_checkup_place', 'last_checkup_date', 'distinguishing_marks']
class EmployeeMedicalHistoryViewSerializer(serializers.ModelSerializer):
employee_id = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=False)
class Meta:
model = EmployeeMedicalHistory
fields = '__all__'
# Reference
class EmployeeReferenceSerializer(serializers.ModelSerializer):
employee_id = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=False)
class Meta:
model = EmployeeReference
fields = '__all__'
# Organization
class EmployeeOrganizationSerializer(serializers.ModelSerializer):
employee_id = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=False)
class Meta:
model = EmployeeOrganization
fields = '__all__'
# Offense
class EmployeeOffenseSerializer(serializers.ModelSerializer):
employee_id = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=False)
class Meta:
model = EmployeeOffense
fields = '__all__'
class EmployeeOffenseCreateSerializer(serializers.ModelSerializer):
employee_id = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=False)
class Meta:
model = EmployeeOffense
fields = '__all__'
def validate(self, data):
employee_id = data['employee_id']
errors = []
# Check if employee id exists
try:
EmployeeOffense.objects.get(employee_id=employee_id)
errors.append('Record already existed.')
except EmployeeOffense.DoesNotExist:
pass
if len(errors):
error_message = {'errors': errors}
raise serializers.ValidationError(error_message)
return data
def create(self, validated_data):
employee_id = validated_data['employee_id']
convicted = validated_data['convicted']
offense_details = validated_data['offense_details']
offense_court = validated_data['offense_court']
date_filed = validated_data['date_filed']
return EmployeeOffense.objects.create(
employee_id= employee_id,
convicted = convicted,
offense_details = offense_details,
offense_court = offense_court,
date_filed = date_filed
)
class EmployeeOffenseUpdateSerializer(serializers.ModelSerializer):
class Meta:
model = EmployeeOffense
fields = ['convicted', 'offense_details', 'offense_court', 'date_filed',
'termination_record', 'revocation_record', 'injunction_record', 'arrest_record']
# In case of Emergency
class EmployeeEmergencySerializer(serializers.ModelSerializer):
employee_id = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=False)
class Meta:
model = EmployeeEmergency
fields = '__all__'
class EmployeeEmergencyCreateSerializer(serializers.ModelSerializer):
employee_id = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=False)
class Meta:
model = EmployeeEmergency
fields = '__all__'
def validate(self, data):
employee_id = data['employee_id']
errors = []
# Check if employee id exists
try:
EmployeeEmergency.objects.get(employee_id=employee_id)
errors.append('Record already existed.')
except EmployeeEmergency.DoesNotExist:
pass
if len(errors):
error_message = {'errors': errors}
raise serializers.ValidationError(error_message)
return data
class EmployeeEmergencyUpdateSerializer(serializers.ModelSerializer):
class Meta:
model = EmployeeEmergency
fields = ['person_name', 'person_relationship', 'person_address', 'person_phone']
# Signature
class EmployeeSignatureSerializer(serializers.ModelSerializer):
employee_id = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=False)
class Meta:
model = EmployeeSignature
fields = '__all__'
# Documents
class EmployeeDocumentsSerializer(serializers.ModelSerializer):
employee_id = serializers.SlugRelatedField(slug_field='employee_id', queryset=Employee.objects.all(), allow_null=False)
class Meta:
model = EmployeeDocuments
fields = '__all__' | StarcoderdataPython |
11376073 | '''
Here we import the following libraries:
1 - requests: To get the information from the source;
2 - pandas: To have better organization of the openings and endings songs
of the animes, as well as its authors;
3 - graphGenerator: Function that generates the graph.
'''
import requests
import pandas as pd
from .graphGenerator import graphGenerator
# Function responsible for getting the anime data requested.
def getAnimeData(animeID):
# Trying to obtain the data.
try:
# Simple message to inform the user about what's happening.
print("Obtaining anime " + str(animeID) + "...\n")
# Source from where the program requests the data.
source = "https://api.jikan.moe/v3/anime/" + str(animeID)
# Getting the data from the source above.
data = requests.get(source)
# Dictionary with the data parsed in JSON.
jsonData = data.json()
'''
Chain of conditions to check if anything went wrong with
the request, all the possible errors that aren't in the
user end (like internet connection) are treated here.
'''
if("status" in jsonData and jsonData["status"] == 400):
raise Exception("Error 400: Bad Request\n")
elif("status" in jsonData and jsonData["status"] == 404):
raise Exception("Error 404: Anime Not Found\n")
elif("status" in jsonData and jsonData["status"] == 405):
raise Exception("Error 405: Method Not Allowed\n")
elif("status" in jsonData and jsonData["status"] == 429):
raise Exception("Error 429: Too Many Requests\n")
elif("status" in jsonData and jsonData["status"] == 500):
raise Exception("Error 500: Internal Server Error\n")
# Without any errors, the program proceed to get the data.
else:
# The dictionary that will store all the information.
animeData = {
"Title": jsonData["title"],
"Synopsis": jsonData["synopsis"],
"URL": jsonData["url"],
"Format": jsonData["type"],
"Source": jsonData["source"],
"Episodes": jsonData["episodes"],
"Rating": jsonData["rating"]
}
'''
Those conditions that check if a value on the jsonData
dict differ from None are from values that may be not
available depending on the anime, if they aren't None
the data is stored in the dict.
'''
if(jsonData["aired"]["from"] != None):
animeData["Start Date"] = jsonData["aired"]["from"].split("T")[0]
else:
animeData["Start Date"] = "Start date not available."
if(jsonData["aired"]["to"] != None):
animeData["End Date"] = jsonData["aired"]["to"].split("T")[0]
else:
animeData["End Date"] = "End date not available."
if(jsonData["trailer_url"] != None):
animeData["Trailer"] = jsonData["trailer_url"]
if(jsonData["title_english"] != None):
animeData["Title English"] = jsonData["title_english"]
if(jsonData["broadcast"] != None):
animeData["Broadcast"] = jsonData["broadcast"]
'''
The following data may not even be on the data obtained,
so it's checked if they exist, if they exist the program
checks if the list isn't empty, then the data is obtained
and stored on the dict.
'''
if ("Adaptation" in jsonData["related"] and len(jsonData["related"]["Adaptation"]) != 0):
adapt = []
for links in jsonData["related"]["Adaptation"]:
temp = str(links["url"])
adapt.append(temp)
animeData["Adaptation"] = adapt
if ("Prequel" in jsonData["related"] and len(jsonData["related"]["Prequel"]) != 0):
prequel = []
for links in jsonData["related"]["Prequel"]:
temp = str(links["mal_id"])
prequel.append(temp)
animeData["Prequel"] = prequel
if ("Side story" in jsonData["related"] and len(jsonData["related"]["Side story"]) != 0):
sideStory = []
for links in jsonData["related"]["Side story"]:
temp = str(links["mal_id"])
sideStory.append(temp)
animeData["Side Story"] = sideStory
if ("Sequel" in jsonData["related"] and len(jsonData["related"]["Sequel"]) != 0):
sequel = []
for links in jsonData["related"]["Sequel"]:
temp = str(links["mal_id"])
sequel.append(temp)
animeData["Sequel"] = sequel
if ("producers" in jsonData and len(jsonData["producers"]) != 0):
prod = []
for links in jsonData["producers"]:
temp = str(links["name"])
prod.append(temp)
animeData["Producers"] = prod
if ("licensors" in jsonData and len(jsonData["licensors"]) != 0):
linc = []
for links in jsonData["licensors"]:
temp = str(links["name"])
linc.append(temp)
animeData["Licensors"] = linc
if ("studios" in jsonData and len(jsonData["studios"]) != 0):
studio = []
for links in jsonData["studios"]:
temp = str(links["name"])
studio.append(temp)
animeData["Studios"] = studio
if ("genres" in jsonData and len(jsonData["genres"]) != 0):
genres = []
for links in jsonData["genres"]:
temp = str(links["name"])
genres.append(temp)
animeData["Genres"] = genres
'''
A special attention is needed on the last two pieces
of data to be obtained, because it's a string with a
music name and the author, the program splits the music
name and the author in two separate lists and removes
any japanese character as well in the process.
'''
if ("opening_themes" in jsonData and len(jsonData["opening_themes"]) != 0):
op = []
bands = []
for music in jsonData["opening_themes"]:
temp = str(music)
temp = temp.split(", by")[0]
temp = temp.split(" by")[0]
temp = temp.split(" (")[0]
temp = temp.replace("\"", "")
temp = temp.replace("<", "(")
temp = temp.replace(">", ")")
op.append(temp)
temp = str(music)
if(")\"" in temp):
temp = temp.split(")\" by ")[1]
else:
temp = temp.split(" by ")[1]
temp = temp.split(" (")[0]
bands.append(temp)
opDF = pd.DataFrame(data={"Music": op, "Band": bands})
animeData["Openings"] = opDF
if ("ending_themes" in jsonData and len(jsonData["ending_themes"]) != 0):
ed = []
bands = []
for music in jsonData["ending_themes"]:
temp = str(music)
temp = temp.split(", by")[0]
temp = temp.split(" by")[0]
temp = temp.split(" (")[0]
temp = temp.replace("\"", "")
temp = temp.replace("<", "(")
temp = temp.replace(">", ")")
ed.append(temp)
temp = str(music)
if(")\"" in temp):
temp = temp.split(")\" by ")[1]
else:
temp = temp.split(" by ")[1]
temp = temp.split(" (")[0]
bands.append(temp)
edDF = pd.DataFrame(data={"Music": ed, "Band": bands})
animeData["Endings"] = edDF
'''
Calling the function to save the data in the graph,
we pass the animeID to create a URI unique to the anime
on the graph, as well as the data.
'''
graphGenerator(animeID, animeData)
# If anything goes wrong the program is redirected here.
except Exception as e:
print(e) | StarcoderdataPython |
377374 | from .resnet import res50
from .resnet_cifar import res32_cifar
| StarcoderdataPython |
196031 | #!/usr/bin/env python3
#######
# Imports and functions
#######
import json
import pandas as pd
import argparse
def get_arguments():
parser = argparse.ArgumentParser(description='')
parser.add_argument("-in", "--input", help ="name of input file", required=True, type=str)
parser.add_argument("-out", "--output", help ="name of output file", required=True, type=str)
return parser.parse_args()
args = get_arguments()
pred_file = args.input
output_name = args.output
def prob2crisp(name):
"""
input one model name that will convert last bit of p to c
"""
new_model_name = name.split('|')[: -1]
new_model_name.append('c')#add c for crisp
new_model_name = '|'.join(new_model_name)
return new_model_name
def qc_prediction(PREDICTION_C):
import re
# 1. Search and remove class if in string. classACC:2 -> ACC:2
wrong = re.match('class', PREDICTION_C)
if wrong:
PREDICTION_C = re.sub('class', '', PREDICTION_C)
#print(PREDICTION_C)
# 2. Search and replace subtype name. ACC:2 -> ACC:ACC_2
tumor, subtype = re.split(r":", PREDICTION_C)
if tumor not in subtype:
PREDICTION_C = re.sub(subtype, tumor+"_"+subtype, PREDICTION_C)
#print(PREDICTION_C)
return PREDICTION_C
#####
# Read in
#####
# Read in file
print(pred_file)
raw_pred = pd.read_csv(pred_file, sep='\t')
# raw_pred = pd.read_csv(pred_file, skiprows=4178, sep='\t', index_col=0)
#raw_pred
#####
# Create template of new file of crisp predictions
#####
matrix_crisp = raw_pred.iloc[:, :4]
#check format Labels col. ACC:2 -> ACC:ACC_2
tmp = []
for i in raw_pred["Label"]:
if i != qc_prediction(i):
i = qc_prediction(i)
tmp.append(i)
else:
tmp.append(i)
#add Label col to matrix
matrix_crisp['Label']= tmp
#matrix_crisp
######
# Create crisp matrix
######
# create df of just model predictions
df = raw_pred.iloc[:,5:]
models = df.columns
col_ct = 0 # debugging
row_ct = 0 # debugging
for m in models:
#print("###", i, "###")
# get crisp label from probabilites in a given cell
new_col = []
for row in df[m]:
row = qc_prediction(row)
# contents_dict = json.loads(row)
# for k,v in contents_dict.items():
# if k=='classification':
# #print(v)
# subtype_crisp = max(v, key=v.get)
# subtype_crisp = qc_prediction(subtype_crisp)
# #print(subtype_crisp)
# new_col.append(subtype_crisp)
new_col.append(row)
row_ct+=1
#add crisp to matrix_crisp
# i = prob2crisp(m)
# matrix_crisp[i] = new_col
matrix_crisp[m] = new_col
col_ct+=1
#rename col
matrix_crisp=matrix_crisp.rename(columns = {'Unnamed: 0':'Sample_ID'})
# save output
matrix_crisp.to_csv(output_name, sep='\t', index=False)
print('created ', output_name)
| StarcoderdataPython |
1867056 | # Time: O(n)
# Space: O(n)
class Solution:
def maxDistToClosest(self, seats: List[int]) -> int:
left_arr = [float('inf')]*len(seats)
right_arr = [float('inf')]*len(seats)
for i in range(len(seats)):
if seats[i]!=1 and i>0:
left_arr[i] = left_arr[i-1]+1
elif seats[i]==1:
left_arr[i] = 0
for i in range(len(seats)-1,-1,-1):
if seats[i]!=1 and i<len(seats)-1:
right_arr[i] = right_arr[i+1]+1
elif seats[i]==1:
right_arr[i] = 0
max_dist = float('-inf')
for i in range(len(seats)):
max_dist = max(max_dist, min(left_arr[i], right_arr[i]))
return max_dist
# Time: O(n)
# Spaec: O(n)
class Solution:
def maxDistToClosest(self, seats: List[int]) -> int:
max_dist = -1
last_seat = '#'
arr = [float('inf')]*len(seats)
for index, seat in enumerate(seats):
if seat:
last_seat = index
arr[index] = 0
else:
if last_seat=='#':
continue
arr[index] = index-last_seat
print(arr)
last_seat = '#'
for index in reversed(range(len(seats))):
if seats[index]:
last_seat = index
else:
if last_seat=='#':
continue
arr[index] = min(arr[index],last_seat-index)
return max(arr)
| StarcoderdataPython |
1741341 | import flask
import functools
def login_required(method):
@functools.wraps(method)
def wrapper(*args, **kwargs):
if 'username' in flask.session:
return method(*args, **kwargs)
else:
flask.flash("A login is required to see the page!")
return flask.redirect(flask.url_for('login'))
return wrapper
| StarcoderdataPython |
3384039 | <reponame>simonw/optfunc
import unittest
import optfunc
from StringIO import StringIO
class TestOptFunc(unittest.TestCase):
def test_three_positional_args(self):
has_run = [False]
def func(one, two, three):
has_run[0] = True
# Should only have the -h help option
parser, required_args = optfunc.func_to_optionparser(func)
self.assertEqual(len(parser.option_list), 1)
self.assertEqual(str(parser.option_list[0]), '-h/--help')
# Should have three required args
self.assertEqual(required_args, ['one', 'two', 'three'])
# Running it with the wrong number of arguments should cause an error
for argv in (
['one'],
['one', 'two'],
['one', 'two', 'three', 'four'],
):
e = StringIO()
optfunc.run(func, argv, stderr=e)
self.assert_('Required 3 arguments' in e.getvalue(), e.getvalue())
self.assertEqual(has_run[0], False)
# Running it with the right number of arguments should be fine
e = StringIO()
optfunc.run(func, ['one', 'two', 'three'], stderr=e)
self.assertEqual(e.getvalue(), '')
self.assertEqual(has_run[0], True)
def test_one_arg_one_option(self):
has_run = [False]
def func(one, option=''):
has_run[0] = (one, option)
# Should have -o option as well as -h option
parser, required_args = optfunc.func_to_optionparser(func)
self.assertEqual(len(parser.option_list), 2)
strs = [str(o) for o in parser.option_list]
self.assert_('-h/--help' in strs)
self.assert_('-o/--option' in strs)
# Should have one required arg
self.assertEqual(required_args, ['one'])
# Should execute
self.assert_(not has_run[0])
optfunc.run(func, ['the-required', '-o', 'the-option'])
self.assert_(has_run[0])
self.assertEqual(has_run[0], ('the-required', 'the-option'))
# Option should be optional
has_run[0] = False
optfunc.run(func, ['required2'])
self.assert_(has_run[0])
self.assertEqual(has_run[0], ('required2', ''))
def test_options_are_correctly_named(self):
def func1(one, option='', verbose=False):
pass
parser, required_args = optfunc.func_to_optionparser(func1)
strs = [str(o) for o in parser.option_list]
self.assertEqual(strs, ['-h/--help', '-o/--option', '-v/--verbose'])
def test_option_with_hyphens(self):
def func2(option_with_hyphens=True):
pass
parser, required_args = optfunc.func_to_optionparser(func2)
strs = [str(o) for o in parser.option_list]
self.assertEqual(strs, ['-h/--help', '-o/--option-with-hyphens'])
def test_options_with_same_inital_use_next_letter(self):
def func1(one, version='', verbose=False):
pass
parser, required_args = optfunc.func_to_optionparser(func1)
strs = [str(o) for o in parser.option_list]
self.assertEqual(strs, ['-h/--help', '-v/--version', '-e/--verbose'])
def func2(one, host=''):
pass
parser, required_args = optfunc.func_to_optionparser(func2)
strs = [str(o) for o in parser.option_list]
self.assertEqual(strs, ['-h/--help', '-o/--host'])
def test_short_option_can_be_named_explicitly(self):
def func1(one, option='', q_verbose=False):
pass
parser, required_args = optfunc.func_to_optionparser(func1)
strs = [str(o) for o in parser.option_list]
self.assertEqual(strs, ['-h/--help', '-o/--option', '-q/--verbose'])
e = StringIO()
optfunc.run(func1, ['one', '-q'], stderr=e)
self.assertEqual(e.getvalue().strip(), '')
def test_notstrict(self):
"@notstrict tells optfunc to tolerate missing required arguments"
def strict_func(one):
pass
e = StringIO()
optfunc.run(strict_func, [], stderr=e)
self.assertEqual(e.getvalue().strip(), 'Required 1 arguments, got 0')
@optfunc.notstrict
def notstrict_func(one):
pass
e = StringIO()
optfunc.run(notstrict_func, [], stderr=e)
self.assertEqual(e.getvalue().strip(), '')
def test_arghelp(self):
"@arghelp('foo', 'help about foo') sets help text for parameters"
@optfunc.arghelp('foo', 'help about foo')
def foo(foo = False):
pass
parser, required_args = optfunc.func_to_optionparser(foo)
opt = parser.option_list[1]
self.assertEqual(str(opt), '-f/--foo')
self.assertEqual(opt.help, 'help about foo')
def test_multiple_invalid_subcommand(self):
"With multiple subcommands, invalid first arg should raise an error"
def one(arg):
pass
def two(arg):
pass
def three(arg):
pass
# Invalid first argument should raise an error
e = StringIO()
optfunc.run([one, two], ['three'], stderr=e)
self.assertEqual(
e.getvalue().strip(), "Unknown command: try 'one' or 'two'"
)
e = StringIO()
optfunc.run([one, two, three], ['four'], stderr=e)
self.assertEqual(
e.getvalue().strip(),
"Unknown command: try 'one', 'two' or 'three'"
)
# No argument at all should raise an error
e = StringIO()
optfunc.run([one, two, three], [], stderr=e)
self.assertEqual(
e.getvalue().strip(),
"Unknown command: try 'one', 'two' or 'three'"
)
def test_multiple_valid_subcommand_invalid_argument(self):
"Subcommands with invalid arguments should report as such"
def one(arg):
executed.append(('one', arg))
def two(arg):
executed.append(('two', arg))
e = StringIO()
executed = []
optfunc.run([one, two], ['one'], stderr=e)
self.assertEqual(
e.getvalue().strip(), 'one: Required 1 arguments, got 0'
)
def test_multiple_valid_subcommand_valid_argument(self):
"Subcommands with valid arguments should execute as expected"
def one(arg):
executed.append(('one', arg))
def two(arg):
executed.append(('two', arg))
e = StringIO()
executed = []
optfunc.run([one, two], ['two', 'arg!'], stderr=e)
self.assertEqual(e.getvalue().strip(), '')
self.assertEqual(executed, [('two', 'arg!')])
def test_run_class(self):
class Class:
def __init__(self, one, option=''):
self.has_run = [(one, option)]
class NoInitClass:
pass
# Should execute
e = StringIO()
c = optfunc.run(Class, ['the-required', '-o', 'the-option'], stderr=e)
self.assertEqual(e.getvalue().strip(), '')
self.assert_(c.has_run[0])
self.assertEqual(c.has_run[0], ('the-required', 'the-option'))
# Option should be optional
c = None
e = StringIO()
c = optfunc.run(Class, ['required2'], stderr=e)
self.assertEqual(e.getvalue().strip(), '')
self.assert_(c.has_run[0])
self.assertEqual(c.has_run[0], ('required2', ''))
# Classes without init should work too
c = None
e = StringIO()
c = optfunc.run(NoInitClass, [], stderr=e)
self.assert_(c)
self.assertEqual(e.getvalue().strip(), '')
def test_stdin_special_argument(self):
consumed = []
def func(stdin):
consumed.append(stdin.read())
class FakeStdin(object):
def read(self):
return "hello"
optfunc.run(func, stdin=FakeStdin())
self.assertEqual(consumed, ['hello'])
def test_stdout_special_argument(self):
def upper(stdin, stdout):
stdout.write(stdin.read().upper())
class FakeStdin(object):
def read(self):
return "hello"
class FakeStdout(object):
written = ''
def write(self, w):
self.written = w
stdout = FakeStdout()
self.assertEqual(stdout.written, '')
optfunc.run(upper, stdin=FakeStdin(), stdout=stdout)
self.assertEqual(stdout.written, 'HELLO')
def test_stderr_special_argument(self):
def upper(stderr):
stderr.write('an error')
class FakeStderr(object):
written = ''
def write(self, w):
self.written = w
stderr = FakeStderr()
self.assertEqual(stderr.written, '')
optfunc.run(upper, stderr=stderr)
self.assertEqual(stderr.written, 'an error')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1987565 | <filename>client-tests/testWorkspaceService.py
import unittest
from biokbase.auth.auth_token import get_token
from biokbase.workspaceService.Client import workspaceService
from datetime import datetime
import os
import subprocess
class TestWorkspaces(unittest.TestCase):
@classmethod
def setUpClass(cls):
token_obj = get_token(username ='kbasetest', password ='<PASSWORD>')
cls.token = token_obj['access_token']
test_dir = os.path.dirname(os.path.abspath(__file__))
cleanup_file = os.path.join(test_dir, 'cleanup.pl')
subprocess.call(['perl', cleanup_file])
def setUp(self):
self.impl = workspaceService('http://localhost:7058')
# FIXME: Right now you can't delete so we'll create a new one each time.
self.ws_name = "testWS_%s" % datetime.utcnow().strftime('%s%f')
self.conf = {"workspace": self.ws_name,"default_permission": "a", "auth": self.__class__.token }
self.ws_meta = self.impl.create_workspace(self.conf)
def testCreate(self):
"""
Test Workspace Creation
"""
impl = self.impl
ws_name = self.ws_name
conf = self.conf
ws_meta = self.ws_meta
self.assertEquals(ws_meta[0], ws_name)
self.assertEquals(ws_meta[1], 'kbasetest')
self.assertEquals(ws_meta[3], 0)
self.assertEquals(ws_meta[4], 'a')
self.assertEquals(ws_meta[5], 'a')
impl.delete_workspace({"workspace": ws_name, "auth": self.__class__.token})
def testRevert(self):
"""
Test revert object
"""
impl = self.impl
ws_name = self.ws_name
conf = self.conf
ws_meta = self.ws_meta
data1 = {"name":"testgenome3", "string":"ACACGATTACA"}
test_object3 = {
"id": "test_object_id3",
"type": "Genome",
"data": data1,
"workspace": ws_name,
"command": "something",
"metadata": {"origin":"shreyas"},
"auth": self.__class__.token
}
# Save test object
obj_meta1 = impl.save_object(test_object3)
# Get the object version
ver = obj_meta1[3]
obj = impl.get_object({"workspace":ws_name,"id": "test_object_id3", "type": "Genome","auth": self.__class__.token})
# Make sure version matches
self.assertEquals(obj['metadata'][3], ver)
data2 = {"bogus": "data"}
# Update the data field
test_object3['data']=data2
obj_meta2 = impl.save_object(test_object3)
ver += 1
obj = impl.get_object({"workspace":ws_name,"id": "test_object_id3", "type": "Genome","auth": self.__class__.token})
# Make sure version is incremented
self.assertEquals(obj['metadata'][3], ver)
# Make sure new data is stored, and old data is no longer present
self.assertEquals(obj['data']['bogus'], 'data')
self.assertIn("bogus", obj['data'].keys())
self.assertNotIn("name", obj['data'].keys())
self.assertNotIn("string", obj['data'].keys())
impl.revert_object({"workspace":ws_name,"id": "test_object_id3", "type": "Genome","auth": self.__class__.token})
obj = impl.get_object({"workspace":ws_name,"id": "test_object_id3", "type": "Genome","auth": self.__class__.token})
ver += 1
# Make sure version is incremented
self.assertEquals(obj['metadata'][3], ver)
# Make sure old data is reverted, and new data is no longer present
self.assertEquals(obj['data']['name'], 'testgenome3')
self.assertEquals(obj['data']['string'], 'ACACGATTACA')
self.assertNotIn("bogus", obj['data'].keys())
self.assertIn("name", obj['data'].keys())
self.assertIn("string", obj['data'].keys())
def testClone(self):
"""
Test Workspace Cloning
"""
impl = self.impl
ws_name = self.ws_name
conf = self.conf
ws_meta = self.ws_meta
test_object1 = {
"id": "test_object_id1",
"type": "Genome",
"data": {"name":"testgenome1", "string":"ACACGATTACA"},
"workspace": ws_name,
"command": "something",
"metadata": {"origin":"shreyas"},
"auth": self.__class__.token
}
obj_meta1 = impl.save_object(test_object1)
clone_ws_name = "clone_%s" % ws_name
clone = impl.clone_workspace({
"new_workspace": clone_ws_name,
"current_workspace": ws_name,
"default_permission": "n",
"auth": self.__class__.token
})
self.assertEquals(clone[0], clone_ws_name)
self.assertTrue(impl.has_object({
"workspace":clone_ws_name,
"id": "test_object_id1",
"type": "Genome",
"auth": self.__class__.token
}))
impl.delete_workspace({"workspace": ws_name, "auth": self.__class__.token})
impl.delete_workspace({"workspace": clone_ws_name, "auth": self.__class__.token})
def testDelete(self):
"""
Test Workspace Delete
"""
impl = self.impl
ws_name = self.ws_name
conf = self.conf
ws_meta = self.ws_meta
impl.delete_workspace({"workspace": ws_name, "auth": self.__class__.token})
# assert true as long as we didn't throw an exception
self.assert_(True)
def testListWorkspaces(self):
"""
Test Workspace List
"""
impl = self.impl
ws_name = self.ws_name
conf = self.conf
ws_meta = self.ws_meta
ws_name2 = "testWS_%s" % datetime.utcnow().strftime('%s')
conf2 = {"workspace": ws_name2,"default_permission": "a", "auth": self.__class__.token }
ws_meta2 = self.impl.create_workspace(conf2)
ws_list = impl.list_workspaces({ "auth": self.__class__.token })
ws_names = [ w[0] for w in ws_list ]
self.assertIn(ws_name, ws_names)
self.assertIn(ws_name2, ws_names)
impl.delete_workspace({"workspace": ws_name, "auth": self.__class__.token})
impl.delete_workspace({"workspace": ws_name2, "auth": self.__class__.token})
def testListWorkspaceObjects(self):
impl = self.impl
ws_name = self.ws_name
conf = self.conf
ws_meta = self.ws_meta
test_object1 = {
"id": "test_object_id1",
"type": "Genome",
"data": {"name":"testgenome1", "string":"ACACGATTACA"},
"workspace": ws_name,
"command": "something",
"metadata": {"origin":"shreyas"},
"auth": self.__class__.token
}
test_object2 = {
"id": "test_object_id2",
"type": "Genome",
"data": {"name":"testgenome2", "string":"ACAAAAGGATTACA"},
"workspace": ws_name,
"command": "noop",
"metadata": {"origin":"shreyas"},
"auth": self.__class__.token
}
obj_meta1 = impl.save_object(test_object1)
obj_meta2 = impl.save_object(test_object2)
self.assertEquals(obj_meta1[0], "test_object_id1")
self.assertEquals(obj_meta2[0], "test_object_id2")
ws_objects = impl.list_workspace_objects({"workspace": ws_name, "auth": self.__class__.token })
self.assertEquals(len(ws_objects),2)
# get names of objects
obj_list = [ o[0] for o in ws_objects ]
self.assertIn("test_object_id1", obj_list)
self.assertIn("test_object_id2", obj_list)
impl.delete_workspace({"workspace": ws_name, "auth": self.__class__.token})
def testSaveObject(self):
"""
Make sure object gets saved
"""
impl = self.impl
ws_name = self.ws_name
conf = self.conf
ws_meta = self.ws_meta
test_object1 = {
"id": "test_object_id1",
"type": "Genome",
"data": {"name":"testgenome1", "string":"ACACGATTACA"},
"workspace": ws_name,
"command": "something",
"metadata": {"origin":"shreyas"},
"auth": self.__class__.token
}
obj_meta1 = impl.save_object(test_object1)
self.assertEquals(obj_meta1[0], "test_object_id1")
self.assertEquals(obj_meta1[1], "Genome")
self.assertRegexpMatches(obj_meta1[2], '\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d')
self.assertEquals(obj_meta1[3], 0)
self.assertEquals(obj_meta1[4], 'something')
self.assertEquals(obj_meta1[5], 'kbasetest')
self.assertEquals(obj_meta1[6], 'kbasetest')
def testGetObject(self):
"""
Test Retrieve Object
"""
impl = self.impl
ws_name = self.ws_name
conf = self.conf
ws_meta = self.ws_meta
test_object3 = {
"id": "test_object_id3",
"type": "Genome",
"data": {"name":"testgenome3", "string":"ACACGATTACA"},
"workspace": ws_name,
"command": "something",
"metadata": {"origin":"shreyas"},
"auth": self.__class__.token
}
obj_meta3 = impl.save_object(test_object3)
obj = impl.get_object({"workspace":ws_name,"id": "test_object_id3", "type": "Genome","auth": self.__class__.token})
self.assertEquals(obj['data']['name'],"testgenome3")
self.assertEquals(obj['data']['string'], "ACACGATTACA")
self.assertIn("test_object_id3", obj['metadata'])
def testGetObjectMetadata(self):
"""
Test that we can retrieve object metadata
"""
impl = self.impl
ws_name = self.ws_name
conf = self.conf
ws_meta = self.ws_meta
test_object4 = {
"id": "test_object_id4",
"type": "Genome",
"data": {"name":"testgenome4", "string":"ACACGATTACA"},
"workspace": ws_name,
"command": "something",
"metadata": {"origin":"shreyas"},
"auth": self.__class__.token
}
obj_meta4 = impl.save_object(test_object4)
obj = impl.get_objectmeta({"workspace":ws_name,"id": "test_object_id4", "type": "Genome","auth": self.__class__.token})
self.assertIn({"origin":"shreyas"}, obj)
def testCopy(self):
"""
Test that we can copy object
"""
impl = self.impl
ws_name = self.ws_name
conf = self.conf
ws_meta = self.ws_meta
test_object5 = {
"id": "test_object_id5",
"type": "Genome",
"data": {"name":"testgenome5", "string":"ACACGATTACA"},
"workspace": ws_name,
"command": "something",
"metadata": {"origin":"shreyas"},
"auth": self.__class__.token
}
obj_meta5 = impl.save_object(test_object5)
ws_name2 = "testWS_%s" % datetime.utcnow().strftime('%s')
conf2 = {"workspace": ws_name2,"default_permission": "a", "auth": self.__class__.token }
ws_meta2 = self.impl.create_workspace(conf2)
impl.copy_object({
"new_id": "new_object_id5",
"new_workspace": ws_name2,
"source_id": "test_object_id5",
"source_workspace": ws_name,
"type": "Genome",
"auth": self.__class__.token
})
has_object = impl.has_object({
"id": "new_object_id5",
"workspace": ws_name2,
"type": "Genome",
"auth": self.__class__.token
})
self.assertTrue(has_object)
def testMove(self):
"""
Test that we can copy object
"""
impl = self.impl
ws_name = self.ws_name
conf = self.conf
ws_meta = self.ws_meta
test_object5 = {
"id": "test_object_id5",
"type": "Genome",
"data": {"name":"testgenome5", "string":"ACACGATTACA"},
"workspace": ws_name,
"command": "something",
"metadata": {"origin":"shreyas"},
"auth": self.__class__.token
}
obj_meta5 = impl.save_object(test_object5)
ws_name2 = "testWS_%s" % datetime.utcnow().strftime('%s')
conf2 = {"workspace": ws_name2,"default_permission": "a", "auth": self.__class__.token }
ws_meta2 = self.impl.create_workspace(conf2)
impl.move_object({
"new_id": "new_object_id5",
"new_workspace": ws_name2,
"source_id": "test_object_id5",
"source_workspace": ws_name,
"type": "Genome",
"auth": self.__class__.token
})
has_object = impl.has_object({
"id": "new_object_id5",
"workspace": ws_name2,
"type": "Genome",
"auth": self.__class__.token
})
self.assertEquals(has_object, 1)
has_orig_object = impl.has_object({
"id": "test_object_id5",
"workspace": ws_name,
"type": "Genome",
"auth": self.__class__.token
})
self.assertEquals(has_orig_object, 0)
@classmethod
def tearDownClass(self):
test_dir = os.path.dirname(os.path.abspath(__file__))
cleanup_file = os.path.join(test_dir, 'cleanup.pl')
subprocess.call(['perl', cleanup_file])
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6541800 | import numpy as np
def norm_Frobenius(A):
f = np.sqrt(np.sum(A ** 2))
return f
def divergence_KullbackLeible(A, B):
B[B == 0] = 1e-6
AdivB = A / B
AdivB[AdivB == 0] = 1e-6
d = np.sum(A * np.log(AdivB) - A + B)
return d
| StarcoderdataPython |
3380787 |
def handle_data(data_files):
from sklearn.preprocessing import MinMaxScaler
data = []
for file in data_files:
df = pd.read_csv(file)
data += list(df.values)
data = np.asarray(data)
demand = data[:-48, 4]
normalizer = MinMaxScaler(feature_range=(0,1))
data_norm = normalizer.fit_transform(demand.reshape(-1, 1))
return data_norm, normalizer
def slide_window(data_norm):
X = []
y = []
# Slide Window = 90
for i in range(90, data_norm.shape[0]):
X.append(data_norm[i-90:i, 0])
y.append(data_norm[i,0])
X = np.array(X)
y = np.array(y)
return X, y
def get_model_xgboost(model_name):
import xgboost as xgb
cpu_dict = {
'objective': 'reg:squarederror'
}
gpu_dict = {
'objective': 'reg:squarederror',
'tree_method': 'gpu_hist'
}
n_estimator = 1000
reg = xgb.XGBRegressor(n_estimators=n_estimator, **cpu_dict)
reg.load_model(model_name)
return reg
def get_model_lstm(model_name):
n_estimator = 1000
reg = xgb.XGBRegressor(n_estimators=n_estimator, **cpu_dict)
reg.load_model(model_name)
return
def get_model_random_forest(model_name):
n_estimator = 1000
reg = xgb.XGBRegressor(n_estimators=n_estimator, **cpu_dict)
reg.load_model(model_name)
return
def plot_data(plot_values):
for value in plot_values:
#plt.plot(plot_value[-350:])
plt.plot(value)
#plt.ylim((0, 1))
plt.legend()
plt.show()
if __name__ == '__main__':
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error, mean_absolute_percentage_error, r2_score
files_path = glob.glob('./data/*.csv')
data_norm, normalizer = handle_data(files_path)
X_test, y_test = slide_window(data_norm)
y_test = normalizer.inverse_transform(y_test.reshape(-1, 1))
rmse = []
mae = []
mape = []
r2 = []
model_name = 'xgboost' # lstm || rf || xgboost
#model_number = 0
model_path = glob.glob('./models/%s/*'%(model_name))
out_path = './outputs/%s/'%model_name
for model_number in range(len(model_path)):
reg = get_model_xgboost(model_path[model_number])
predictions = reg.predict(X_test)
predictions = normalizer.inverse_transform(predictions.reshape(-1, 1))
rmse.append(mean_squared_error(y_true=y_test, y_pred=predictions, squared=False))
mae.append(mean_absolute_error(y_true=y_test, y_pred=predictions))
mape.append(mean_absolute_percentage_error(y_true=y_test, y_pred=predictions))
r2.append(r2_score(y_true=y_test, y_pred=predictions))
np.savetxt(out_path + 'prediction_%s_%i.txt'%(model_name, model_number), predictions)
plt.plot(y_test)
plt.plot(predictions)
plt.legend(["Expected", "Predicted"])
plt.show()
d = {
"RMSE": rmse,
"MAE": mae,
"MAPE": mape,
"R2": r2,
}
df = pd.DataFrame(d)
df.to_excel(out_path + 'all_metrics_%s.xlsx'%(model_name))
| StarcoderdataPython |
1652675 | from rest_framework_mongoengine import serializers
from .models import restaurants
class restaurantsSerializer(serializers.DocumentSerializer):
class Meta:
model = restaurants
fields = ('restaurant_id', 'name', 'cuisine', 'borough', 'address', 'image', 'city')
class restaurantListSerializer(serializers.DocumentSerializer):
class Meta:
model = restaurants
fields = ('restaurant_id', 'name') | StarcoderdataPython |
6698347 | from airflow.models import DAG
from airflow_ext.gfw.models import DagFactory
from airflow_ext.gfw.operators.python_operator import ExecutionDateBranchOperator
from datetime import datetime, timedelta
PIPELINE = 'pipe_vms_belize'
#
# PIPE_VMS_BELIZE
#
class PipelineDagFactory(DagFactory):
"""Concrete class to handle the DAG for pipe_vms_belize"""
def __init__(self, pipeline=PIPELINE, **kwargs):
"""
Constructs the DAG.
:@param pipeline: The pipeline name. Default value the PIPELINE.
:@type pipeline: str.
:@param kwargs: A dict of optional parameters.
:@param kwargs: dict.
"""
super(PipelineDagFactory, self).__init__(pipeline=pipeline, **kwargs)
def format_date_sharded_table(self, table, date=None):
"""
Override of format_date_sharded_table method.
:@param table: The BigQuery table.
:@type table: str.
:@param date: The date sharded. Default None.
:@type date: str.
"""
return "{}${}".format(table, date) if date is not None else table
def build(self, dag_id):
"""
Override of build method.
:@param dag_id: The id of the DAG.
:@type table: str.
"""
config = self.config
config['source_paths'] = ','.join(self.source_table_paths())
config['source_dates'] = ','.join(self.source_date_range())
config['check_tables'] = self.config.get('source_table') or self.config.get('source_tables')
with DAG(dag_id, schedule_interval=self.schedule_interval, default_args=self.default_args) as dag:
source_exists = self.tables_checker(
retries=2*24*2, # Retries 2 days with 30 minutes.
execution_timeout=timedelta(days=2) # TimeOut of 2 days.
)
fetch_normalized_daily = self.build_docker_task({
'task_id':'fetch_normalized_daily',
'pool':'k8operators_limit',
'docker_run':'{docker_run}'.format(**config),
'image':'{docker_image}'.format(**config),
'name':'fetch-normalized-daily',
'dag':dag,
'arguments':['fetch_normalized_belize_vms',
'{source_dates}'.format(**config),
'daily',
'{source_paths}'.format(**config),
'{project_id}:{pipeline_dataset}.{normalized}'.format(**config)]
})
for sensor in source_exists:
dag >> sensor >> fetch_normalized_daily
return dag
for mode in ['daily','monthly', 'yearly']:
dag_id = '{}_{}'.format(PIPELINE, mode)
globals()[dag_id] = PipelineDagFactory(schedule_interval='@{}'.format(mode)).build(dag_id)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.