id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
11324982 | #!/usr/bin/env python3
'''
# Autor: <NAME>
# Datum: 21.04.2018
# Soubor: isj_proj07_xsnase06.py
'''
''' DEKLARACE ZÁSTUPNÝCH PROMNĚNÝCH '''
NULA=0
JEDNA=1
DVA=2
TRI=3
CTYRI=4
''' Konec deklarace '''
''' import knihoven '''
import math
''' konec importu '''
class TooManyCallsError(Exception):
''' Třída TooManyCallsError pro generování chybové zpávy '''
def __init__(self, zprava):
''' Konstruktor chybové zprávy '''
self.zprava = zprava;
def limit_calls(max_calls=DVA, error_message_tail='called too often'):
''' Dekodér limit_calls, max_calls je maximalní pocer volání funkce, error_message_tail vypis teto zpávy při větším počtu volání než je max_calls '''
def _limit_calls(parametr):
''' funkce pro návrat funkce cteni + nulování poctu cteni'''
def cteni(a, b):
''' funkce pro zjištěni počtu čtení a vyhodnocení nového čtení '''
if cteni.calls < max_calls:
cteni.calls += JEDNA
return parametr(a, b)
else: raise TooManyCallsError('function "'+parametr.__name__+'" - '+str(error_message_tail))
cteni.calls = 0
return cteni
return _limit_calls
@limit_calls(1, 'that is too much')
def pyth(a,b):
''' funkce na výpočet pythagorovi věty dle zadání '''
return math.sqrt(a**DVA + b**DVA)
def ordered_merge(*args, selector=None):
''' funkce postupně generuje prvky, args -> zvolení generování, selector -> poradi generovani ze vstupu, return -> navrat prvu v danem pořadí '''
tmp = []
if selector is not None:
if selector.__len__() is not NULA:
def iterate(prvek):
''' iteruje/generuje prvky na základě vstupních dat '''
for i in prvek:
try: yield i
except: raise StopIteration
[tmp.append(iterate(prvek)) for prvek in args]
for j in range(selector.__len__()): yield next(tmp[selector[j]])
return []
class Log():
''' trida log, zapisuje do soubor vybrané data '''
def __init__(self, soubor):
''' soubor, značí jaký soubor má být otevřen '''
self.f = open(soubor, 'w')
#with open(soubor, 'w') as outfile, open(outfile, 'r', encoding='utf-8') as infile:
# outfile.write()
def __enter__(self):
''' zápis do souboru '''
self.f.write('Begin\n')
return self
def __exit__(self, exc_type, exc_val, exc_tb):
''' ukoncenu zápisu do souboru a přidání konce '''
self.f.write('End\n')
self.f.close()
def logging(self, parametry):
''' zápise do souboru to co je v poarametru funkce '''
self.f.write(parametry + "\n")
| StarcoderdataPython |
9755803 | <reponame>sutasu/tortuga<gh_stars>10-100
# Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from tortuga.resourceAdapterConfiguration import settings
def test_boolean():
bs = settings.BooleanSetting()
#
# Assert that a non boolean value raises a validation error
#
with pytest.raises(settings.SettingValidationError):
bs.validate('1')
#
# Assert that a boolean value does not raise a validation error
#
bs.validate('True')
bs.validate('False')
#
# Assert that list validation works too
#
bs.list = True
bs.validate('True, False')
bs.list = False
#
# Assert value is in values list
#
bs.values = ['True']
with pytest.raises(settings.SettingValidationError):
bs.validate('False')
bs.validate('True')
#
# Assert dump returns valid values
#
assert bs.dump('True') is True
bs.list = True
assert bs.dump('True, False') == [True, False]
def test_string():
ss = settings.StringSetting()
#
# Assert that a non-string value raises a validation error
#
with pytest.raises(settings.SettingValidationError):
ss.validate(1)
#
# Assert that a string value does not raise a validation error
#
ss.validate('abc')
#
# Assert value is in values list
#
ss.values = ['abc', 'def']
with pytest.raises(settings.SettingValidationError):
ss.validate('ghi')
ss.validate('def')
#
# Assert dump returns valid values
#
assert ss.dump('abc') == 'abc'
ss.list = True
assert ss.dump('abc, def') == ['abc', 'def']
def test_integer():
is_ = settings.IntegerSetting()
#
# Assert that a non-integer value raises a validation error
#
with pytest.raises(settings.SettingValidationError):
is_.validate('abc')
#
# Assert that an integer value does not raise a validation error
#
is_.validate('3')
#
# Assert that list validation works too
#
is_.list = True
is_.validate('3, 4, 5')
is_.list = False
#
# Assert value is in values list
#
is_.values = ['1', '2']
with pytest.raises(settings.SettingValidationError):
is_.validate('3')
is_.validate('1')
#
# Assert dump returns valid values
#
assert is_.dump('1') == 1
is_.list = True
assert is_.dump('1, 2') == [1, 2]
def test_file():
fs = settings.FileSetting(must_exist=False)
#
# Assert that a non-string value raises a validation error
#
with pytest.raises(settings.SettingValidationError):
fs.validate(1)
#
# Assert that a string value does not raise a validation error
#
fs.validate('abc')
#
# Assert that if a file doesn't exist, a validation error is raised
#
fs.must_exist = True
with pytest.raises(settings.SettingValidationError):
fs.validate('abc')
#
# Assert that if a file does exist, nothing happens
#
fs.must_exist = True
fs.validate(__file__)
#
# Assert value is in values list
#
fs.must_exist = False
fs.values = ['abc', 'def']
with pytest.raises(settings.SettingValidationError):
fs.validate('ghi')
fs.validate('def')
| StarcoderdataPython |
3546820 | <filename>gym-BuildingControls/gym_BuildingControls/envs/bldg_models.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""@author: <NAME>
Variable Names
Uin: Conductance matrix input by user, upper triangle only, (nN x nN) (W/K)
U: Conductance matrix (symmetrical) with added capacitance for diagonal term, (nN x nN) (W/K)
U_inv: inverse of U; U^-1
C: Capacitance vector, (nN x 1) (J/K)
F: Conductance matrix of nodes connected to a known temperature source, (nN x nM) (W/K)
nN: Number of nodes
nM: Number of nodes with known temperatures / boundaries
These models are made for convenience. The method is generalized to any number
of interior and boundary nodes as in 'mUxFxCx' model definition below.
"""
def mF1C1(F_in, C_in, dt):
"""Simple room modeled as a single internal node with capacitance. Connects to
exterior (F-matrix) with an effective conductance representing the whole
wall with windows with air infiltration.
Node Number: Object
0: effective room node, connected to capacitor and T_ambient (in the Ms)
Node Number with known temperatures: Object
0: ambient air
"""
# Load dependencies
from numpy import zeros
from numpy import sum as npsum
from numpy.linalg import inv
# #### Control
nN = 1 # number of nodes
nM = 1 # number of nodes with known temperatures
#%% Nodal Connections
# Declare variables
Uin = zeros((nN,nN)) # W/K
F = zeros((nN,nM)) # W/K
C = zeros((nN,1)) # J/K
# How are the nodes connected?
# Uin[0,1] = (1/R + U + dx/kA)**-1
# Connected to temperature sources
F[0,0] = F_in
# Nodes with capacitance
C[0] = C_in
#%% U-matrix completion, and its inverse
U = -Uin - Uin.T # U is symmetrical, non-diagonals are -ve
s = -npsum(U,1)
for i in range(0,nN):
U[i,i] = s[i] + npsum(F[i,]) + C[i]/dt
Uinv = inv(U)
#%% Ship it
return (Uinv, F, C, nN, nM)
def mU1F1C2(U_in, F_in, C_in, C_slab, dt):
""" Model of a simple room that has heating/cooling applied in a different node
than that of the air, eg.: a radiant slab system.
Node Number: Object
0: room air node, connected to ambient air (F0) node
1: under slab node, connected to capacitor 1 (slab) and Node 0
Node Number with known temperatures: Object
0: ambient air
External input:
U_in: conductance under slab to slab surface
F_in: conductance room air to slab surface
C_in: capacitance of air
C_slab: capacitance of slab
"""
# Load dependencies
from numpy import zeros
from numpy import sum as npsum
from numpy.linalg import inv
nN = 2 # number of nodes
nM = 1 # number of nodes with known temperatures
#%% Nodal Connections
# Declare variables
Uin = zeros((nN,nN)) # W/K
F = zeros((nN,nM)) # W/K
C = zeros((nN,1)) # J/K
# How are the nodes connected?
Uin[0,1] = U_in
# Connected to temperature sources
F[0,0] = F_in
# Nodes with capacitance
C[0] = C_in
C[1] = C_slab
#%% U-matrix completion, and its inverse
U = -Uin - Uin.T # U is symmetrical, non-diagonals are -ve
s = -npsum(U,1)
for i in range(0,nN):
U[i,i] = s[i] + npsum(F[i,]) + C[i]/dt
Uinv = inv(U)
#%% Ship it
return (Uinv, F, C, nN, nM)
def mUxFxCx(Uin, F, C, dt):
""" Generic model.
"""
# Load dependencies
from numpy import zeros
from numpy import sum as npsum
from numpy.linalg import inv
nN = len(Uin) # number of nodes
#%% U-matrix completion, and its inverse
U = -Uin - Uin.T # U is symmetrical, non-diagonals are -ve
s = -npsum(U,1)
for i in range(0,nN):
U[i,i] = s[i] + npsum(F[i,]) + C[i]/dt
Uinv = inv(U)
#%% Ship it
return (Uinv, F, C, nN, nM)
| StarcoderdataPython |
11394613 | import sys
sys.path.append('../src')
import unittest
from posture import PostureWatcher
class TestPostureWatcherClass(unittest.TestCase):
def setUp(self):
self.pw = PostureWatcher()
def test_class_has_correct_attributes(self):
self.assertTrue(hasattr(self.pw, 'detector'))
self.assertTrue(hasattr(self.pw, 'deviation'))
self.assertTrue(hasattr(self.pw, 'cap'))
self.assertTrue(hasattr(self.pw, 'last_fps_calc_timestamp'))
self.assertTrue(hasattr(self.pw, 'base_posture'))
self.assertTrue(hasattr(self.pw, 'deviation_algorithm'))
self.assertTrue(hasattr(self.pw, 'deviation_interval'))
self.assertTrue(hasattr(self.pw, 'deviation_adjustment'))
self.assertTrue(hasattr(self.pw, 'thread'))
self.assertTrue(hasattr(self.pw, 'debug'))
self.assertTrue(hasattr(self.pw, 'logger'))
def test_class_has_default_parameters(self):
self.assertEqual(self.pw.deviation_algorithm, 2)
self.assertEqual(self.pw.deviation_interval, 5)
self.assertEqual(self.pw.deviation_adjustment, 5)
self.assertEqual(self.pw.deviation.deviation_threshold, 25)
self.assertEqual(self.pw.deviation.max_buffer, 3)
def tearDown(self):
self.pw.stop()
def suite():
test_suite = unittest.TestSuite()
test_suite.addTest(TestPostureWatcherClass('test_default_parameters'))
return test_suite
if __name__ == '__main__':
unittest.main()
runner = unittest.TextTestRunner()
runner.run(suite())
| StarcoderdataPython |
4991137 | <filename>metaprogramming/descriptor_demo.py<gh_stars>0
'Simple script showing how to use descriptors for type checking'
class descriptor:
def __init__(self, name, inst_typ):
self.name = name
self.inst_type = inst_typ
def __get__(self, instance, cls):
print("getting inst var", self.name)
return instance.__dict__[self.name]
def __delete__(self, instance):
print("deleting inst var", self.name)
del instance.__dict__[self.name]
def __set__(self, instance, val):
print("setting inst var {} to val {}".format(self.name, val))
if not isinstance(val, self.inst_type):
raise TypeError(val, 'is not of type ', self.inst_type)
instance.__dict__[self.name] = val
class Stock:
_fields = ['name', 'shares', 'price']
def __init__(self, *args, **kwargs):
for k, v in zip(self.__class__._fields, args):
setattr(self, k, v)
shares = descriptor('shares', int)
s1 = Stock('GOOG', 100, 490.1)
print('trying to set shares to a string and catching a TypeError')
try:
s1.shares = 'foo'
except TypeError as e:
print('found exception ', e)
| StarcoderdataPython |
5061639 | #@title Apache 2.0 License
#
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import wx.adv
from edza_web import *
from edza_req import *
class MyPanel(wx.Panel):
def __init__(self, parent, id):
wx.Panel.__init__(self, parent, id)
self.SetBackgroundColour("white")
gif_fname = wx.adv.Animation("imgs/edza_face.gif")
global gif
gif = wx.adv.AnimationCtrl(self, id, gif_fname, pos=(1, 1))
gif.GetAnimation()
self.gif = gif
self.Show()
self.conwin = wx.TextCtrl(self, id=wx.ID_ANY, value="Hi, How May I Help You.", pos=(500, 130),
size=(400, 200),
style=wx.TE_MULTILINE | wx.TE_READONLY | wx.TE_CENTER | wx.BORDER_NONE)
f1 = wx.Font(12, wx.MODERN, wx.NORMAL, wx.NORMAL, False, u'Lucida Sans')
self.conwin.SetFont(f1)
self.hinwin = wx.TextCtrl(self, id=wx.ID_ANY, value=" ", pos=(500, 330),
size=(400, 20), style=wx.TE_READONLY | wx.BORDER_NONE | wx.TE_CENTER)
self.hinwin.SetFont(f1)
bmp = wx.Bitmap("imgs/microphone (1).png", wx.BITMAP_TYPE_ANY)
self.button = wx.BitmapButton(self, id=wx.ID_ANY, bitmap=bmp,
size=(bmp.GetWidth() + 10, bmp.GetHeight() + 10), pos=(500, 400))
self.button.Bind(wx.EVT_BUTTON, self.on_button)
bmp2 = wx.Bitmap("imgs/git.png", wx.BITMAP_TYPE_ANY)
self.button2 = wx.BitmapButton(self, id=wx.ID_ANY, bitmap=bmp2,
size=(32, 32), pos=(950, 527))
self.button2.Bind(wx.EVT_BUTTON, self.on_button)
self.lbl = wx.StaticText(self, label=" Chat", pos=(500, 445),
size=(40, 32))
bmp3 = wx.Bitmap("imgs/search.png", wx.BITMAP_TYPE_ANY)
self.but_fact = wx.BitmapButton(self, id=wx.ID_ANY, bitmap=bmp3,
size=(42, 42), pos=(625, 400))
self.but_fact.Bind(wx.EVT_BUTTON, self.on_fact)
self.lbl2 = wx.StaticText(self, label="Fact Search", pos=(615, 445),
size=(70, 32))
bmp6 = wx.Bitmap("imgs/google.png", wx.BITMAP_TYPE_ANY)
self.button6 = wx.BitmapButton(self, id=wx.ID_ANY, bitmap=bmp6,
size=(42, 42), pos=(745, 400))
self.button6.Bind(wx.EVT_BUTTON, self.on_google)
self.lbl3 = wx.StaticText(self, label="Google Search", pos=(725, 445),
size=(80, 32))
bmp7 = wx.Bitmap("imgs/wikipedia.png", wx.BITMAP_TYPE_ANY)
self.button7 = wx.BitmapButton(self, id=wx.ID_ANY, bitmap=bmp7,
size=(42, 42), pos=(855, 400))
self.button7.Bind(wx.EVT_BUTTON, self.on_wiki)
self.lbl4 = wx.StaticText(self, label="Wikipedia", pos=(850, 445),
size=(70, 32))
bmp4 = wx.Bitmap("imgs/twitter.png", wx.BITMAP_TYPE_ANY)
self.button4 = wx.BitmapButton(self, id=wx.ID_ANY, bitmap=bmp4,
size=(32, 32), pos=(918, 527))
self.button4.Bind(wx.EVT_BUTTON, self.on_button)
bmp5 = wx.Bitmap("imgs/facebook.png", wx.BITMAP_TYPE_ANY)
self.button5 = wx.BitmapButton(self, id=wx.ID_ANY, bitmap=bmp5,
size=(32, 32), pos=(886, 527))
self.button5.Bind(wx.EVT_BUTTON, self.on_button)
bmp8 = wx.Bitmap("imgs/icons8-mind-map-96.png", wx.BITMAP_TYPE_ANY)
wx.StaticBitmap(self, -1, bmp8, (880, 0), (bmp8.GetWidth(), bmp8.GetHeight()))
bmp9 = wx.Bitmap("imgs/logotext.png", wx.BITMAP_TYPE_ANY)
wx.StaticBitmap(self, -1, bmp9, (790, 30), (bmp9.GetWidth(), bmp9.GetHeight()))
global engine
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
volume = engine.getProperty('volume')
engine.setProperty('volume', 10.0)
global kernel
kernel = aiml.Kernel()
if os.path.isfile("bot_brain.brn"):
kernel.bootstrap(brainFile="bot_brain.brn")
else:
kernel.bootstrap(learnFiles="brnld.xml", commands="load edza brain")
kernel.saveBrain("bot_brain.brn")
def on_button(self, event):
def chat_class():
while True:
r = sr.Recognizer()
with sr.Microphone() as source:
winsound.Beep(500, 500)
r.adjust_for_ambient_noise(source)
self.hinwin.SetValue(" ")
self.hinwin.AppendText("Listening...")
audio = r.listen(source)
winsound.Beep(500, 250)
winsound.Beep(700, 250)
try:
self.hinwin.SetValue(" ")
self.conwin.AppendText("\n\nYou: " + r.recognize_google(audio))
except sr.UnknownValueError:
self.hinwin.AppendText("Sorry, I didn't get you.")
chat_class()
rep = kernel.respond(r.recognize_google(audio))
self.conwin.AppendText("\n\n" + rep)
self.gif.Play()
engine.say(rep)
engine.runAndWait()
self.gif.Play()
chat_class()
def on_wiki(self, event):
r = sr.Recognizer()
with sr.Microphone() as source:
winsound.Beep(500, 500)
r.adjust_for_ambient_noise(source)
self.hinwin.SetValue(" ")
self.hinwin.AppendText("Listening...")
audio = r.listen(source)
winsound.Beep(500, 250)
winsound.Beep(700, 250)
try:
self.hinwin.SetValue(" ")
self.conwin.AppendText("\n\nYou: " + r.recognize_google(audio))
except sr.UnknownValueError:
self.hinwin.AppendText("Sorry, I didn't get you. Please try again")
wiki_dat = wikipedia.summary(r.recognize_google(audio))
self.conwin.AppendText("\n\n" + wiki_dat)
self.gif.Play()
engine.say(wiki_dat)
engine.runAndWait()
self.gif.Stop()
def on_fact(self, event):
r = sr.Recognizer()
with sr.Microphone() as source:
winsound.Beep(500, 500)
r.adjust_for_ambient_noise(source)
self.hinwin.SetValue(" ")
self.hinwin.AppendText("Listening...")
audio = r.listen(source)
winsound.Beep(500, 250)
winsound.Beep(700, 250)
try:
self.hinwin.SetValue(" ")
self.conwin.AppendText("\n\nYou: " + r.recognize_google(audio))
except sr.UnknownValueError:
self.hinwin.AppendText("Sorry, I didn't get you. Please repeat that again")
pass
app_id = "GJRQVK-GY36XW7K8H"
client = wolframalpha.Client(app_id)
res = client.query(r.recognize_google(audio))
answer = next(res.results).text
self.conwin.AppendText("\n\n" + answer)
self.gif.Play()
engine.say(answer)
engine.runAndWait()
self.gif.Stop()
def on_google(self, event):
r = sr.Recognizer()
with sr.Microphone() as source:
winsound.Beep(500, 500)
r.adjust_for_ambient_noise(source)
self.hinwin.SetValue(" ")
self.hinwin.AppendText("Listening...")
audio = r.listen(source)
winsound.Beep(500, 250)
winsound.Beep(700, 250)
try:
self.hinwin.SetValue(" ")
self.conwin.AppendText("\n\nYou: " + r.recognize_google(audio))
except sr.UnknownValueError:
self.hinwin.AppendText("Sorry, I didn't get you. Please repeat that again")
pass
if __name__ == '__main__':
app = wx.App()
dialog = MyBrowser(None, -1)
req = "https://google.co.in/search?q=" + r.recognize_google(audio)
dialog.browser.LoadURL(req)
dialog.Show()
app.MainLoop()
def splash_scr():
bitmap = wx.Bitmap('imgs/splash_screen.png')
splash = wx.adv.SplashScreen(bitmap, wx.adv.SPLASH_CENTER_ON_SCREEN | wx.adv.SPLASH_NO_TIMEOUT, 0, None, -1)
splash.Show()
return splash
if __name__ == "__main__":
app = wx.App()
splashsc = splash_scr()
frame = wx.Frame(None,
pos=wx.DefaultPosition, size=wx.Size(1000, 600),
style=wx.MINIMIZE_BOX | wx.SYSTEM_MENU | wx.CAPTION | wx.HELP |
wx.CLOSE_BOX | wx.CLIP_CHILDREN,
title="Edza 3.0")
frame.SetIcon(wx.Icon("imgs/icons8-mind-map-48.ico"))
MyPanel(frame, -1)
frame.Show(True)
splashsc.Destroy()
app.MainLoop()
| StarcoderdataPython |
1676000 | <filename>file/read-lines-between-two-values/main-find.py
#!/usr/bin/env python3
text = '''DATA_out file
values
DATA_LINE 1
DATA_LINE 2
DATA_LINE 3
DATA_LINE 4
total
'''
start = text.find('values')
end = text.find('total', start)
if start > -1 and end > -1:
start += len("values")
print(text[start:end])
| StarcoderdataPython |
3499798 | #!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods to get information about system DNS servers."""
__author__ = '<EMAIL> (<NAME>)'
import glob
import os
import subprocess
import sys
import time
if __name__ == '__main__':
sys.path.append('../../third_party')
# 3rd party libraries
import dns.resolver
# local libs
from . import addr_util
MAX_LEASE_AGE = 24 * 3600
MIN_LEASE_FILE_SIZE = 1024
def GetAllSystemNameServers():
servers = list(set(GetCurrentNameServers() + GetAssignedNameServers()))
print(servers)
return servers
def GetCurrentNameServers():
"""Return list of DNS server IP's used by the host via dnspython"""
try:
servers = dns.resolver.Resolver().nameservers
except:
print("Unable to get list of internal DNS servers.")
servers = []
# dnspython does not always get things right on Windows, particularly in
# versions with right-to-left languages. Fall back to ipconfig /all
if not servers and sys.platform[:3] == 'win':
return _GetNameServersFromWinIpConfig()
return servers
def GetAssignedNameServers():
"""Servers assigned by DHCP."""
if sys.platform == 'darwin':
return _GetNameServersFromMacIpConfig()
else:
return _GetNameServersFromDhclient()
def _GetNameServersFromMacIpConfig():
servers = []
ifcount = subprocess.Popen(['ipconfig', 'ifcount'], stdout=subprocess.PIPE).stdout.read()
interfaces = ["en%s" % (int(x)-1) for x in range(int(ifcount))]
for iface in interfaces:
output = subprocess.Popen(['ipconfig', 'getpacket', iface], stdout=subprocess.PIPE).stdout.read()
for line in output.split('\n'):
if 'domain_name_server' in line:
# print "%s domain_name_server: %s" % (iface, line)
servers.extend(addr_util.ExtractIPsFromString(line))
return servers
def _GetNameServersFromWinIpConfig():
"""Return a list of DNS servers via ipconfig (Windows only)"""
servers = []
output = subprocess.Popen(['ipconfig', '/all'], stdout=subprocess.PIPE).stdout.read()
for line in output.split('\r\n'):
if 'DNS Servers' in line:
print(("ipconfig: %s" % line))
servers.extend(addr_util.ExtractIPsFromString(line))
return servers
def _GetNameServersFromDhclient():
path = _FindNewestDhclientLeaseFile()
if not path:
return []
# We want the last matching line in the file
for line in open(path):
if 'option domain-name-servers' in line:
ns_string = line
if ns_string:
return addr_util.ExtractIPsFromString(ns_string)
else:
return []
def _FindNewestDhclientLeaseFile():
paths = [
'/var/lib/dhcp3/dhclient.*leases'
]
found = []
for path in paths:
for filename in glob.glob(path):
if os.path.getsize(filename) < MIN_LEASE_FILE_SIZE:
continue
elif time.time() - os.path.getmtime(filename) > MAX_LEASE_AGE:
continue
else:
try:
fp = open(filename, 'rb')
fp.close()
found.append(filename)
except:
continue
if found:
return sorted(found, key=os.path.getmtime)[-1]
else:
return None
if __name__ == '__main__':
print(("Current: %s" % GetCurrentNameServers()))
print(("Assigned: %s" % GetAssignedNameServers()))
print(("System: %s" % GetAllSystemNameServers()))
| StarcoderdataPython |
6487241 | <gh_stars>1-10
import os
import numpy as np
from scipy.sparse import csr_matrix
from perform.constants import REAL_TYPE, RES_NORM_PRIM_DEFAULT
from perform.solution.solution_phys import SolutionPhys
class SolutionInterior(SolutionPhys):
"""Physical solution of interior cells.
This SolutionPhys represents the interior finite volume cells of a SolutionDomain,
i.e. all cells except the boundary ghost cells. There is only one SolutionInterior per SolutionDomain.
A few constructs, such as the source term, residual, residual Jacobian, etc., are only meaningful for the interior
cells and so are represented here specifically.
Additionally, this class provides member methods which handle the output of snapshot matrices and residual norms,
as there are also only meaningful for the interior cells.
Args:
gas: GasModel associated with the SolutionDomain with which this SolutionPhys is associated.
sol_prim_in: NumPy array of the primitive state profiles that this SolutionPhys represents.
solver: SystemSolver containing global simulation parameters.
num_cells: Number of finite volume cells represented by this SolutionPhys.
num_reactions: Number of reactions to be modeled.
time_int: TimeIntegrator associated with the SolutionDomain with which this SolutionInterior is associated.
Attributes:
wf:
NumPy array of the rate-of-progress profiles for the num_reactions reactions,
if modeling reactions by a finite-rate reaction model.
reaction_source:
NumPy array of the reaction source term profiles for the num_species species transport equations.
heat_release: NumPy array of the unsteady heat release profile.
rhs: NumPy array of the evaluation of the right-hand side function of the semi-discrete governing ODE.
sol_hist_cons: List of NumPy arrays of the prior time_int.time_order conservative state profiles.
sol_hist_prim: List of NumPy arrays of the prior time_int.time_order primitive state profiles.
rhs_hist: List of NumPy arrays of the prior time_int.time_order RHS function profiles.
prim_snap: NumPy array of the primitive state snapshot array to be written to disk.
cons_snap: NumPy array of the conservative state snapshot array to be written to disk.
reaction_source_snap: NumPy array of the source term snapshot array to be written to disk.
rhs_snap: NumPy array of the RHS function snapshot array to be written to disk.
res: NumPy array of the full-discrete residual function profile.
res_norm_l2: L2 norm of the Newton iteration linear solve residual, normalized.
res_norm_l1: L1 norm of the Newton iteration linear solve residual, normalized.
res_norm_hist: NumPy array of the time history of the L2 and L1 linear solver residual norm.
jacob_dim_first: Leading dimension of the residual Jacobian.
jacob_dim_second: Trailing dimension of the residual Jacobian.
jacob_row_idxs:
NumPy array of row indices within the 2D residual Jacobian at which the
flattened 3D residual Jacobian will be emplaced.
jacob_col_idxs:
NumPy array of column indices within the 2D residual Jacobian at which the
flattened 3D residual Jacobian will be emplaced.
d_sol_norm_l2: L2 norm of the primitive solution change, normalized.
d_sol_norm_l1: L1 norm of the primitive solution change, normalized.
d_sol_norm_hist: NumPy array of the time history of the L2 and L1 primitive solution change norm.
"""
def __init__(self, gas, sol_prim_in, solver, num_cells, num_reactions, time_int):
super().__init__(gas, num_cells, sol_prim_in=sol_prim_in, time_order=time_int.time_order)
gas = self.gas_model
if num_reactions > 0:
self.wf = np.zeros((num_reactions, num_cells), dtype=REAL_TYPE)
self.reaction_source = np.zeros((gas.num_species_full, num_cells), dtype=REAL_TYPE)
self.heat_release = np.zeros(num_cells, dtype=REAL_TYPE)
self.rhs = np.zeros((gas.num_eqs, num_cells), dtype=REAL_TYPE)
# Add bulk velocity and update state if requested
if solver.vel_add != 0.0:
self.sol_prim[1, :] += solver.vel_add
self.update_state(from_prim=True)
# indicate whether time integrator needs a cold or hot start
if sol_prim_in.ndim == 2:
time_int.cold_start_iter = 1
else:
time_int.cold_start_iter = sol_prim_in.shape[-1]
# RHS storage for multi-stage schemes
self.rhs_hist = [self.rhs.copy()] * (time_int.time_order + 1)
# Snapshot storage matrices, store initial condition
if solver.prim_out:
self.prim_snap = np.zeros((gas.num_eqs, num_cells, solver.num_snaps + 1), dtype=REAL_TYPE)
self.prim_snap[:, :, 0] = self.sol_prim.copy()
if solver.cons_out:
self.cons_snap = np.zeros((gas.num_eqs, num_cells, solver.num_snaps + 1), dtype=REAL_TYPE)
self.cons_snap[:, :, 0] = self.sol_cons.copy()
# These don't include the profile associated with the final solution
if solver.source_out:
self.reaction_source_snap = np.zeros((gas.num_species_full, num_cells, solver.num_snaps), dtype=REAL_TYPE)
if solver.hr_out:
self.heat_release_snap = np.zeros((num_cells, solver.num_snaps), dtype=REAL_TYPE)
if solver.rhs_out:
self.rhs_snap = np.zeros((gas.num_eqs, num_cells, solver.num_snaps), dtype=REAL_TYPE)
if (time_int.time_type == "implicit") or (solver.run_steady):
# Norm normalization constants
if (len(solver.res_norm_prim) == 1) and (solver.res_norm_prim[0] is None):
solver.res_norm_prim = [None] * gas.num_eqs
else:
assert len(solver.res_norm_prim) == gas.num_eqs
for var_idx in range(gas.num_eqs):
if solver.res_norm_prim[var_idx] is None:
# 0: pressure, 1: velocity, 2: temperature, >=3: species
solver.res_norm_prim[var_idx] = RES_NORM_PRIM_DEFAULT[min(var_idx, 3)]
# Residual norm storage
if time_int.time_type == "implicit":
self.res = np.zeros((gas.num_eqs, num_cells), dtype=REAL_TYPE)
self.res_norm_l2 = 0.0
self.res_norm_l1 = 0.0
self.res_norm_hist = np.zeros((solver.num_steps, 2), dtype=REAL_TYPE)
if (time_int.dual_time) and (time_int.adapt_dtau):
self.srf = np.zeros(num_cells, dtype=REAL_TYPE)
# CSR matrix indices
num_elements = gas.num_eqs ** 2 * num_cells
self.jacob_dim_first = gas.num_eqs * num_cells
self.jacob_dim_second = self.jacob_dim_first
row_idxs_center = np.zeros(num_elements, dtype=np.int32)
col_idxs_center = np.zeros(num_elements, dtype=np.int32)
row_idxs_upper = np.zeros(num_elements - gas.num_eqs ** 2, dtype=np.int32)
col_idxs_upper = np.zeros(num_elements - gas.num_eqs ** 2, dtype=np.int32)
row_idxs_lower = np.zeros(num_elements - gas.num_eqs ** 2, dtype=np.int32)
col_idxs_lower = np.zeros(num_elements - gas.num_eqs ** 2, dtype=np.int32)
# TODO: definitely a faster way to do this
lin_idx_A = 0
lin_idx_B = 0
lin_idx_C = 0
for i in range(gas.num_eqs):
for j in range(gas.num_eqs):
for k in range(num_cells):
row_idxs_center[lin_idx_A] = i * num_cells + k
col_idxs_center[lin_idx_A] = j * num_cells + k
lin_idx_A += 1
if k < (num_cells - 1):
row_idxs_upper[lin_idx_B] = i * num_cells + k
col_idxs_upper[lin_idx_B] = j * num_cells + k + 1
lin_idx_B += 1
if k > 0:
row_idxs_lower[lin_idx_C] = i * num_cells + k
col_idxs_lower[lin_idx_C] = j * num_cells + k - 1
lin_idx_C += 1
self.jacob_row_idxs = np.concatenate((row_idxs_center, row_idxs_lower, row_idxs_upper))
self.jacob_col_idxs = np.concatenate((col_idxs_center, col_idxs_lower, col_idxs_upper))
# "Steady" convergence measures
if solver.run_steady:
self.d_sol_norm_l2 = 0.0
self.d_sol_norm_l1 = 0.0
self.d_sol_norm_hist = np.zeros((solver.num_steps, 2), dtype=REAL_TYPE)
def calc_sol_jacob(self, inverse, samp_idxs=np.s_[:]):
"""Compute Jacobian of conservative solution w/r/t primitive solution, or vice versa.
Utility function for computing Gamma or Gamma^-1 for residual Jacobian calculations.
Updates density and enthalpy derivatives, as this is required for Gamma or Gamma^-1
Args:
inverse: Boolean flag. If True, calculate Gamma^-1. If False, calculate Gamma.
calc_enthalpies: Boolean flag indicating whether to calculate species and stagnation enthalpy.
calc_derivs: Boolean flag indicating whether to calculate density and stagnation enthalpy derivatives.
Returns:
3D NumPy array of the solution Jacobian.
"""
self.update_density_enthalpy_derivs()
if inverse:
sol_jacob = self.calc_d_sol_prim_d_sol_cons(samp_idxs=samp_idxs)
else:
sol_jacob = self.calc_d_sol_cons_d_sol_prim(samp_idxs=samp_idxs)
return sol_jacob
def calc_d_sol_prim_d_sol_cons(self, samp_idxs=np.s_[:]):
"""Compute the Jacobian of the primitive state w/r/t/ the conservative state
This Jacobian is calculated when computing the approximate residual Jacobian w/r/t the conservative
state when dual_time == False. It is also implicitly used when calculating the Roe dissipation term,
but is not explicitly calculated there.
Assumes that the stagnation enthalpy, derivatives of density,
and derivatives of stagnation enthalpy have already been computed.
This appears as Gamma^{-1} in the solver theory documentation, please refer to the solver theory documentation
for a detailed derivation of this matrix.
Args:
samp_idxs:
Either a NumPy slice or NumPy array for selecting sampled cells to compute the Jacobian at.
Used for hyper-reduction of projection-based reduced-order models.
Returns:
3D NumPy array of the Jacobian of the primitive state w/r/t the conservative state.
"""
# TODO: some repeated calculations
# TODO: add option for preconditioning d_rho_d_press
gas = self.gas_model
# Initialize Jacobian
if type(samp_idxs) is slice:
num_cells = self.num_cells
else:
num_cells = samp_idxs.shape[0]
gamma_matrix_inv = np.zeros((gas.num_eqs, gas.num_eqs, num_cells))
# For clarity
rho = self.sol_cons[0, samp_idxs]
vel = self.sol_prim[1, samp_idxs]
mass_fracs = self.sol_prim[3:, samp_idxs]
d_rho_d_press = self.d_rho_d_press[samp_idxs]
d_rho_d_temp = self.d_rho_d_temp[samp_idxs]
d_rho_d_mass_frac = self.d_rho_d_mass_frac[:, samp_idxs]
d_enth_d_press = self.d_enth_d_press[samp_idxs]
d_enth_d_temp = self.d_enth_d_temp[samp_idxs]
d_enth_d_mass_frac = self.d_enth_d_mass_frac[:, samp_idxs]
h0 = self.h0[samp_idxs]
# Some reused terms
d = rho * d_rho_d_press * d_enth_d_temp - d_rho_d_temp * (rho * d_enth_d_press - 1.0)
vel_sq = np.square(vel)
# Density row
gamma_matrix_inv[0, 0, :] = (
rho * d_enth_d_temp
+ d_rho_d_temp * (h0 - vel_sq)
+ np.sum(
mass_fracs * (d_rho_d_mass_frac * d_enth_d_temp[None, :] - d_rho_d_temp[None, :] * d_enth_d_mass_frac),
axis=0,
)
) / d
gamma_matrix_inv[0, 1, :] = vel * d_rho_d_temp / d
gamma_matrix_inv[0, 2, :] = -d_rho_d_temp / d
gamma_matrix_inv[0, 3:, :] = (
d_rho_d_temp[None, :] * d_enth_d_mass_frac - d_rho_d_mass_frac * d_enth_d_temp[None, :]
) / d[None, :]
# Momentum row
gamma_matrix_inv[1, 0, :] = -vel / rho
gamma_matrix_inv[1, 1, :] = 1.0 / rho
# Energy row
gamma_matrix_inv[2, 0, :] = (
-d_rho_d_press * (h0 - vel_sq)
- (rho * d_enth_d_press - 1.0)
+ np.sum(
mass_fracs
* (
(rho * d_rho_d_press)[None, :] * d_enth_d_mass_frac
+ d_rho_d_mass_frac * (rho * d_enth_d_press - 1.0)[None, :]
),
axis=0,
)
/ rho
) / d
gamma_matrix_inv[2, 1, :] = -vel * d_rho_d_press / d
gamma_matrix_inv[2, 2, :] = d_rho_d_press / d
gamma_matrix_inv[2, 3:, :] = (
-(
(rho * d_rho_d_press)[None, :] * d_enth_d_mass_frac
+ d_rho_d_mass_frac * (rho * d_enth_d_press - 1.0)[None, :]
)
/ (rho * d)[None, :]
)
# Species row(s)
gamma_matrix_inv[3:, 0, :] = -mass_fracs / rho[None, :]
for i in range(3, gas.num_eqs):
gamma_matrix_inv[i, i, :] = 1.0 / rho
return gamma_matrix_inv
def calc_d_sol_cons_d_sol_prim(self, samp_idxs=np.s_[:]):
"""Compute the Jacobian of the conservative state w/r/t/ the primitive state
This Jacobian is calculated when computing the residual Jacobian w/r/t the primitive
state when dual_time == True.
Assumes that the stagnation enthalpy, derivatives of density,
and derivatives of stagnation enthalpy have already been computed.
This appears as Gamma in the solver theory documentation, please refer to
the solver theory documentation for a detailed derivation of this matrix.
Args:
samp_idxs:
Either a NumPy slice or NumPy array for selecting sampled cells to compute the Jacobian at.
Used for hyper-reduction of projection-based reduced-order models.
Returns:
3D NumPy array of the Jacobian of the conservative state w/r/t the primitive state.
"""
# TODO: add option for preconditioning d_rho_d_press
gas = self.gas_model
# Initialize Jacobian
if type(samp_idxs) is slice:
num_cells = self.num_cells
else:
num_cells = samp_idxs.shape[0]
gamma_matrix = np.zeros((gas.num_eqs, gas.num_eqs, num_cells))
# For clarity
rho = self.sol_cons[0, samp_idxs]
vel = self.sol_prim[1, samp_idxs]
mass_fracs = self.sol_prim[3:, samp_idxs]
d_rho_d_press = self.d_rho_d_press[samp_idxs]
d_rho_d_temp = self.d_rho_d_temp[samp_idxs]
d_rho_d_mass_frac = self.d_rho_d_mass_frac[:, samp_idxs]
d_enth_d_press = self.d_enth_d_press[samp_idxs]
d_enth_d_temp = self.d_enth_d_temp[samp_idxs]
d_enth_d_mass_frac = self.d_enth_d_mass_frac[:, samp_idxs]
h0 = self.h0[samp_idxs]
# Density row
gamma_matrix[0, 0, :] = d_rho_d_press
gamma_matrix[0, 2, :] = d_rho_d_temp
gamma_matrix[0, 3:, :] = d_rho_d_mass_frac
# Momentum row
gamma_matrix[1, 0, :] = vel * d_rho_d_press
gamma_matrix[1, 1, :] = rho
gamma_matrix[1, 2, :] = vel * d_rho_d_temp
gamma_matrix[1, 3:, :] = vel[None, :] * d_rho_d_mass_frac
# Total energy row
gamma_matrix[2, 0, :] = d_rho_d_press * h0 + rho * d_enth_d_press - 1.0
gamma_matrix[2, 1, :] = rho * vel
gamma_matrix[2, 2, :] = d_rho_d_temp * h0 + rho * d_enth_d_temp
gamma_matrix[2, 3:, :] = h0[None, :] * d_rho_d_mass_frac + rho[None, :] * d_enth_d_mass_frac
# Species row
gamma_matrix[3:, 0, :] = mass_fracs[gas.mass_frac_slice, :] * d_rho_d_press[None, :]
gamma_matrix[3:, 2, :] = mass_fracs[gas.mass_frac_slice, :] * d_rho_d_temp[None, :]
for i in range(3, gas.num_eqs):
for j in range(3, gas.num_eqs):
gamma_matrix[i, j, :] = (i == j) * rho + mass_fracs[i - 3, :] * d_rho_d_mass_frac[j - 3, :]
return gamma_matrix
def res_jacob_assemble(self, center_block, lower_block, upper_block):
"""Assembles residual Jacobian into a sparse 2D matrix for Newton's method linear solve.
The components of the residual Jacobian are provided as 3D arrays representing the center, lower,
and upper block diagonal of the residual Jacobian if the residual were flattened in column-major
order (i.e. variables first, then cells). This makes populating the components easier, but makes
calculating the linear solve more tedious. Thus, the residual Jacobian is constructed for a residual
flattened in row-major order (i.e. cells first, then variables).
Note that the returned Jacobian is a scipy.sparse matrix; care must be taken when performing math operations
with this as the resulting object may devolve to a NumPy matrix, which is deprecated. If this problem
cannot be avoided, use .toarray() to convert the sparse matrix to a NumPy array. This is not ideal as it
generates a large dense matrix.
Args:
center_block: 3D NumPy array of the center block diagonal of column-major residual Jacobian.
lower_block: 3D NumPy array of the lower block diagonal of column-major residual Jacobian.
upper_block: 3D NumPy array of the upper block diagonal of column-major residual Jacobian.
Returns:
scipy.sparse.csr_matrix of row-major residual Jacobian.
"""
# TODO: my God, this is still the single most expensive operation
# How can this be any simpler/faster???
# Preallocating "jacob_diags" is *slower*
jacob_diags = np.concatenate((center_block.ravel("C"), lower_block.ravel("C"), upper_block.ravel("C")))
res_jacob = csr_matrix(
(jacob_diags, (self.jacob_row_idxs, self.jacob_col_idxs)),
shape=(self.jacob_dim_first, self.jacob_dim_second),
dtype=REAL_TYPE,
)
return res_jacob
def update_sol_hist(self):
"""Update time history of solution and RHS function for multi-stage time integrators.
After each physical time iteration, the primitive solution, conservative solution, and RHS profile
histories are pushed back by one and the first entry is replaced by the current time step's profiles.
"""
# Primitive and conservative state history
self.sol_hist_cons[1:] = self.sol_hist_cons[:-1]
self.sol_hist_prim[1:] = self.sol_hist_prim[:-1]
self.sol_hist_cons[0] = self.sol_cons.copy()
self.sol_hist_prim[0] = self.sol_prim.copy()
# RHS function history
self.rhs_hist[1:] = self.rhs_hist[:-1]
self.rhs_hist[0] = self.rhs.copy()
def update_snapshots(self, solver):
"""Update snapshot arrays.
Adds current solution, source, RHS, etc. profiles to snapshots arrays.
At the end of a simulation (completed or failed), these will be written to disk.
Args:
solver: SystemSolver containing global simulation parameters.
"""
store_idx = int((solver.iter - 1) / solver.out_interval) + 1
if solver.prim_out:
self.prim_snap[:, :, store_idx] = self.sol_prim
if solver.cons_out:
self.cons_snap[:, :, store_idx] = self.sol_cons
# TODO: need to subsample these profiles
if solver.source_out:
self.reaction_source_snap[:, :, store_idx - 1] = self.reaction_source
if solver.hr_out:
self.heat_release_snap[:, store_idx - 1] = self.heat_release
if solver.rhs_out:
self.rhs_snap[:, :, store_idx - 1] = self.rhs
def write_snapshots(self, solver, intermediate=False, failed=False):
"""Save snapshot matrices to disk after completed/failed simulation.
Args:
solver: SystemSolver containing global simulation parameters.
intermediate: Boolean flag indicating whether these results are intermediate results
failed: Boolean flag indicating whether a simulation has failed before completion.
"""
assert not (
intermediate and failed
), "Something went wrong, tried to write intermediate and failed snapshots at same time"
unsteady_output_dir = solver.unsteady_output_dir
# Account for failed simulation dump
# TODO: need to account for non-unity out_interval
if failed:
offset = 1
else:
offset = 2
suffix = solver.sim_type
if intermediate:
suffix += "_ITMDT"
if not solver.out_itmdt_match:
offset -= 1
elif failed:
suffix += "_FAILED"
final_idx = int((solver.iter - 1) / solver.out_interval) + offset
if solver.prim_out:
sol_prim_file = os.path.join(unsteady_output_dir, "sol_prim_" + suffix + ".npy")
np.save(sol_prim_file, self.prim_snap[:, :, :final_idx])
if solver.cons_out:
sol_cons_file = os.path.join(unsteady_output_dir, "sol_cons_" + suffix + ".npy")
np.save(sol_cons_file, self.cons_snap[:, :, :final_idx])
if solver.source_out:
source_file = os.path.join(unsteady_output_dir, "source_" + suffix + ".npy")
np.save(source_file, self.reaction_source_snap[:, :, : final_idx - 1])
if solver.hr_out:
hr_file = os.path.join(unsteady_output_dir, "heat_release_" + suffix + ".npy")
np.save(hr_file, self.heat_release_snap[:, : final_idx - 1])
if solver.rhs_out:
sol_rhs_file = os.path.join(unsteady_output_dir, "rhs_" + suffix + ".npy")
np.save(sol_rhs_file, self.rhs_snap[:, :, : final_idx - 1])
def delete_itmdt_snapshots(self, solver):
"""Delete intermediate snapshot data
Args:
solver: SystemSolver containing global simulation parameters.
"""
unsteady_output_dir = solver.unsteady_output_dir
suffix = solver.sim_type + "_ITMDT"
if solver.prim_out:
sol_prim_file = os.path.join(unsteady_output_dir, "sol_prim_" + suffix + ".npy")
os.remove(sol_prim_file)
if solver.cons_out:
sol_cons_file = os.path.join(unsteady_output_dir, "sol_cons_" + suffix + ".npy")
os.remove(sol_cons_file)
if solver.source_out:
source_file = os.path.join(unsteady_output_dir, "source_" + suffix + ".npy")
os.remove(source_file)
if solver.hr_out:
hr_file = os.path.join(unsteady_output_dir, "heat_release_" + suffix + ".npy")
os.remove(hr_file)
if solver.rhs_out:
sol_rhs_file = os.path.join(unsteady_output_dir, "rhs_" + suffix + ".npy")
os.remove(sol_rhs_file)
def write_restart_file(self, solver):
"""Write restart files to disk.
Restart files contain the primitive and conservative fields current associated physical time.
Args:
solver: SystemSolver containing global simulation parameters.
"""
# Write restart file to zipped file
restart_file = os.path.join(solver.restart_output_dir, "restart_file_" + str(solver.restart_iter) + ".npz")
sol_prim_out = np.stack(self.sol_hist_prim[-1:0:-1], axis=-1)
sol_cons_out = np.stack(self.sol_hist_cons[-1:0:-1], axis=-1)
np.savez(restart_file, sol_time=solver.sol_time, sol_prim=sol_prim_out, sol_cons=sol_cons_out)
# Write iteration number files
restartIterFile = os.path.join(solver.restart_output_dir, "restart_iter.dat")
with open(restartIterFile, "w") as f:
f.write(str(solver.restart_iter) + "\n")
restart_phys_iter_file = os.path.join(
solver.restart_output_dir, "restart_iter_" + str(solver.restart_iter) + ".dat"
)
with open(restart_phys_iter_file, "w") as f:
f.write(str(solver.iter) + "\n")
# Iterate file count
if solver.restart_iter < solver.num_restarts:
solver.restart_iter += 1
else:
solver.restart_iter = 1
def write_steady_data(self, solver):
"""Write "steady" solve "convergence" norms and current solution profiles to disk.
The "convergence" norms file provides a history of the steady solve convergence at each iteration,
while the primitive and conservative solution file are continuously overwritten with the most recent solution.
Args:
solver: SystemSolver containing global simulation parameters.
"""
# Write norm data to ASCII file
steady_file = os.path.join(solver.unsteady_output_dir, "steady_convergence.dat")
if solver.iter == 1:
f = open(steady_file, "w")
else:
f = open(steady_file, "a")
out_string = ("%8i %18.14f %18.14f\n") % (solver.time_iter - 1, self.d_sol_norm_l2, self.d_sol_norm_l1)
f.write(out_string)
f.close()
# Write field data
sol_prim_file = os.path.join(solver.unsteady_output_dir, "sol_prim_steady.npy")
np.save(sol_prim_file, self.sol_prim)
sol_cons_file = os.path.join(solver.unsteady_output_dir, "sol_cons_steady.npy")
np.save(sol_cons_file, self.sol_cons)
def calc_norms(self, arr_in, norm_facs):
"""Compute average, normalized L1 and L2 norms of an array.
In reality, this computes the RMS measure as it is divided by the square root of the number of elements.
arr_in assumed to be in [num_vars, num_cells] order, as is the case for, e.g., sol_prim and sol_cons.
Args:
arr_in: NumPy array for which norms are to be calculated.
norm_facs: NumPy array of factors by which to normalize each profile in arr_in before computing norms.
Returns:
Average, normalized L2 and L1 norms of arr_in.
"""
arr_abs = np.abs(arr_in)
# L2 norm
arr_norm_l2 = np.sum(np.square(arr_abs), axis=1)
arr_norm_l2[:] /= arr_in.shape[1]
arr_norm_l2 /= np.square(norm_facs)
arr_norm_l2 = np.sqrt(arr_norm_l2)
arr_norm_l2 = np.mean(arr_norm_l2)
# L1 norm
arr_norm_l1 = np.sum(arr_abs, axis=1)
arr_norm_l1[:] /= arr_in.shape[1]
arr_norm_l1 /= norm_facs
arr_norm_l1 = np.mean(arr_norm_l1)
return arr_norm_l2, arr_norm_l1
def calc_d_sol_norms(self, solver, time_type):
"""Calculate and print solution change norms for "steady" solve "convergence".
Computes L2 and L1 norms of the change in the primitive solution between time steps.
This measure will be used to determine whether the "steady" solve has "converged".
Note that outputs are orders of magnitude (i.e. 1e-X, where X is the order of magnitude)
Args:
solver: SystemSolver containing global simulation parameters.
time_type: Either "explicit" or "implicit", affects how solution profiles are retrieved.
"""
# Calculate solution change and its norms
if time_type == "implicit":
d_sol = self.sol_hist_prim[0] - self.sol_hist_prim[1]
else:
d_sol = self.sol_prim - self.sol_hist_prim[0]
norm_l2, norm_l1 = self.calc_norms(d_sol, solver.res_norm_prim)
# Print to terminal
norm_out_l2 = np.log10(norm_l2)
norm_out_l1 = np.log10(norm_l1)
out_string = ("%8i: L2: %18.14f, L1: %18.14f") % (solver.time_iter, norm_out_l2, norm_out_l1)
if solver.stdout:
print(out_string)
self.d_sol_norm_l2 = norm_l2
self.d_sol_norm_l1 = norm_l1
self.d_sol_norm_hist[solver.iter - 1, :] = [norm_l2, norm_l1]
def calc_res_norms(self, solver, subiter):
"""Calculate and print implicit time integration linear solve residual norms.
Computes L2 and L1 norms of the Newton's method iterative solve for implicit time integration.
This measure will be used to determine whether Newton's method has converged.
Note that outputs are orders of magnitude (i.e. 1e-X, where X is the order of magnitude)
Args:
solver: SystemSolver containing global simulation parameters.
subiter: Time step subiteration number, for terminal output.
"""
norm_l2, norm_l1 = self.calc_norms(self.res, solver.res_norm_prim)
# Don't print for "steady" solve
if not solver.run_steady:
norm_out_l2 = np.log10(norm_l2)
norm_out_l1 = np.log10(norm_l1)
out_string = (str(subiter + 1) + ":\tL2: %18.14f, \tL1: %18.14f") % (norm_out_l2, norm_out_l1)
if solver.stdout:
print(out_string)
self.res_norm_l2 = norm_l2
self.res_norm_l1 = norm_l1
self.res_norm_hist[solver.iter - 1, :] = [norm_l2, norm_l1]
| StarcoderdataPython |
3200660 | <reponame>adiitya-dey/datastructures
import logging
import numpy as np
class BasicSort:
def __init__(self, arr, sortfn):
logging.debug("Sorting class is initialized.")
self.__arr = arr
print(sortfn)
self.__sortfn = sortfn.lower()
if self.__sortfn == "insertsort":
self.insertsort()
elif self.__sortfn == "bubblesort":
self.bubblesort()
elif self.__sortfn == "mergesort":
p = 0
r = (len(self.__arr)-1)
self.mergesort(p,r)
elif self.__sortfn == "quicksort":
p = 0
r = (len(self.__arr)-1)
self.quicksort(p,r)
elif self.__sortfn == "pythonsort":
self.pythonsort()
elif self.__sortfn == "numpysort":
self.numpysort()
#Insert Sort
def insertsort(self):
logging.debug("Initializing InsertSort Function.")
n = len(self.__arr)
for i in range(1, n):
key = self.__arr[i]
j = i - 1
while j >=0 and self.__arr[j] > key:
self.__arr[j+1] = self.__arr[j]
j = j - 1
self.__arr[j+1] = key
logging.debug("Sorted Array after InsertSort : {}".format(self.__arr))
logging.debug("InsertSort Function completed successfully.")
#Bubble Sort
def bubblesort(self):
logging.debug("Initializing BubbleSort Function.")
n = len(self.__arr)
for j in range(1, n):
for i in range(0, n - 1):
if self.__arr[i] > self.__arr[i+1]:
temp = self.__arr[i]
self.__arr[i] = self.__arr[i+1]
self.__arr[i+1] = temp
logging.debug("Sorted Array after BubbleSort : {}".format(self.__arr))
logging.debug("BubbleSort Function completed successfully.")
# Merge Sort
def mergesort(self, p, r):
logging.debug("Initializing Merge Sort Function.")
if p < r:
q = (p + r) // 2 ##Double slash divides down to nearest whole number
self.mergesort(p, q)
self.mergesort(q + 1, r)
self.merge(p, q, r)
logging.debug("Sorted Array after MergeSort : {}".format(self.__arr))
logging.debug("MergeSort Function completed successfully.")
def merge(self, p, q, r):
A = self.__arr
n1 = q - p + 1
n2 = r - q
left = [0] * (n1 + 1)
right = [0] * (n2 + 1)
for i in range(0, n1):
left[i] = A[p + i]
for j in range(0, n2):
right[j] = A[q + j + 1]
left[n1] = float('inf')
right[n2] = float('inf')
i = 0
j = 0
for k in range(p, r + 1):
if left[i] <= right[j]:
A[k] = left[i]
i = i + 1
else:
A[k] = right[j]
j = j + 1
# return A
# Quick Sort
def quicksort(self, p, r):
if p < r:
q = self.partition(p, r)
self.quicksort(p, q - 1)
self.quicksort(q + 1, r)
logging.debug("Sorted Array after Quick Sort : {}".format(self.__arr))
logging.debug("QuickSort Function completed successfully.")
# Partition Function works with Quick Sort
def partition(self, p, r):
A = self.__arr
x = A[r]
i = p - 1
for j in range(p, r):
if A[j] <= x:
i = i + 1
A[i], A[j] = A[j], A[i]
A[i + 1], A[r] = A[r], A[i + 1]
return i + 1 | StarcoderdataPython |
11263753 | <reponame>rimmartin/cctbx_project<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import
from cctbx.xray import ext
from cctbx.xray import structure_factors
from cctbx import miller
from cctbx import crystal
from cctbx import sgtbx
import cctbx.eltbx.xray_scattering
from cctbx import adptbx
from cctbx import eltbx
from cctbx.array_family import flex
import scitbx.math
from scitbx import matrix
import math
from itertools import count
import types
import sys
import random
from libtbx.utils import count_max, Sorry, Keep
from libtbx.test_utils import approx_equal
from libtbx import group_args
from libtbx.assert_utils import is_string
from cctbx.eltbx.neutron import neutron_news_1992_table
from cctbx import eltbx
from libtbx.utils import format_float_with_standard_uncertainty \
as format_float_with_su
from cctbx import covariance
class scattering_type_registry_params(object):
def __init__(self,
custom_dict = None,
d_min = None,
table = None,
types_without_a_scattering_contribution = None):
self.custom_dict = custom_dict
self.d_min = d_min
self.table = table
self.types_without_a_scattering_contribution = \
types_without_a_scattering_contribution
class structure(crystal.special_position_settings):
"""A class to describe and handle information related to a crystal structure.
It offers various methods to modify the crystallographic information contained.
Important members are:
- .special_position_settings (base class)
- .scatterers
- .site_symmetry
- .crystal_symmetry
"""
def __init__(self,
special_position_settings=None,
scatterers=None,
site_symmetry_table=None,
non_unit_occupancy_implies_min_distance_sym_equiv_zero=False,
scattering_type_registry=None,
crystal_symmetry=None):
assert [special_position_settings, crystal_symmetry].count(None) == 1
assert scatterers is not None or site_symmetry_table is None
if (special_position_settings is None):
special_position_settings = crystal.special_position_settings(
crystal_symmetry=crystal_symmetry)
crystal.special_position_settings._copy_constructor(
self, special_position_settings)
self.erase_scatterers()
self._non_unit_occupancy_implies_min_distance_sym_equiv_zero \
= non_unit_occupancy_implies_min_distance_sym_equiv_zero
self._scattering_type_registry = scattering_type_registry
if (scatterers is not None):
self.add_scatterers(
scatterers=scatterers,
site_symmetry_table=site_symmetry_table,
non_unit_occupancy_implies_min_distance_sym_equiv_zero=
self._non_unit_occupancy_implies_min_distance_sym_equiv_zero)
self.scattering_type_registry_params = None
self.inelastic_form_factors_source = None
def _copy_constructor(self, other):
crystal.special_position_settings._copy_constructor(
self, special_position_settings)
self._scatterers = other._scatterers
self._site_symmetry_table = other._site_symmetry_table
self._non_unit_occupancy_implies_min_distance_sym_equiv_zero \
= other._non_unit_occupancy_implies_min_distance_sym_equiv_zero
self._scattering_type_registry = other._scattering_type_registry
self._scattering_type_registry_is_out_of_date \
= other._scattering_type_registry_is_out_of_date
self.inelastic_form_factors_source = other.inelastic_form_factors_source
def crystal_symmetry(self):
"""Get crystal symmetry of the structure
:returns: a new crystal symmetry object
:rtype: cctbx.crystal.symmetry
"""
return crystal.symmetry(
unit_cell = self.unit_cell(),
space_group_info = self.space_group_info())
def set_non_unit_occupancy_implies_min_distance_sym_equiv_zero(self, value):
self._non_unit_occupancy_implies_min_distance_sym_equiv_zero = value
def erase_scatterers(self):
"""Remove all scatterers from structure
:returns: none
"""
self._scatterers = flex.xray_scatterer()
self._site_symmetry_table = sgtbx.site_symmetry_table()
self._scattering_type_registry_is_out_of_date = True
self.inelastic_form_factors_source = None
def deep_copy_scatterers(self):
"""Create a deep copy of the structure with all scatterers
:returns: a new cctbx.xray.structure object
:rtype: cctbx.xray.structure
"""
cp = structure(self,
scattering_type_registry=self._scattering_type_registry,
non_unit_occupancy_implies_min_distance_sym_equiv_zero
=self._non_unit_occupancy_implies_min_distance_sym_equiv_zero)
cp._scatterers = self._scatterers.deep_copy()
cp._site_symmetry_table = self._site_symmetry_table.deep_copy()
return cp
def customized_copy(self,
crystal_symmetry=Keep,
unit_cell=Keep,
space_group_info=Keep,
non_unit_occupancy_implies_min_distance_sym_equiv_zero=Keep):
if (crystal_symmetry is Keep):
crystal_symmetry = self
crystal_symmetry = crystal.symmetry.customized_copy(
crystal_symmetry,
unit_cell=unit_cell,
space_group_info=space_group_info)
if (non_unit_occupancy_implies_min_distance_sym_equiv_zero is Keep):
non_unit_occupancy_implies_min_distance_sym_equiv_zero \
= self._non_unit_occupancy_implies_min_distance_sym_equiv_zero
str = structure(
special_position_settings=crystal.special_position_settings(
crystal_symmetry=crystal_symmetry,
min_distance_sym_equiv=self._min_distance_sym_equiv,
u_star_tolerance=self._u_star_tolerance,
assert_min_distance_sym_equiv=self._assert_min_distance_sym_equiv),
scatterers=self._scatterers,
non_unit_occupancy_implies_min_distance_sym_equiv_zero
=non_unit_occupancy_implies_min_distance_sym_equiv_zero,
scattering_type_registry=self._scattering_type_registry)
str.inelastic_form_factors_source = self.inelastic_form_factors_source
return str
def scatterers(self):
"""Get all scatterers of the structure
:returns: a reference to an array of cctbx.xray.scatterer
:rtype: cctbx.xray.scatterer[]
"""
return self._scatterers
def non_unit_occupancy_implies_min_distance_sym_equiv_zero(self):
return self._non_unit_occupancy_implies_min_distance_sym_equiv_zero
def set_u_iso(self, value = None, values = None, selection = None):
"""Set isotropic mean thermic displacements of scatterers
:param value: a single double value to set all u_iso of selected \
scatterers to
:type value: double
:param values: an array of double values to set all u_iso of selected \
scatterers to
:type values: double[]
:param selection: an array of bools to select scatterers to be updated \
with new u_iso values
:type selection: boolean[]
:returns: the modified base object
:rtype: cctbx.xray.structure
"""
assert [value, values].count(None) == 1
s = self._scatterers
if(selection is None): selection = flex.bool(s.size(), True)
else: assert selection.size() == s.size()
if(value is not None):
s.set_u_iso(flex.double(s.size(), value), selection, self.unit_cell())
else:
assert values.size() == s.size()
s.set_u_iso(values, selection, self.unit_cell())
return self
def set_b_iso(self, value = None, values = None, selection = None):
"""Set isotropic Debye-Waller/temperature/B factors with automatic conversion
to u_iso
:param value: a single double value to set all b_iso of selected \
scatterers to
:type value: double
:param values: an array of double values to set all b_iso of selected \
scatterers to
:type values: double[]
:param selection: an array of bools to select scatterers to be updated with \
new b_iso values
:type selection: boolean[]
:returns: the modified base object
:rtype: cctbx.xray.structure
"""
assert [value, values].count(None) == 1
s = self._scatterers
if(value is not None):
self.set_u_iso(value = adptbx.b_as_u(value), selection = selection)
else:
assert values.size() == s.size()
b_iso = values
u_iso_values = b_iso*adptbx.b_as_u(1)
self.set_u_iso(values = u_iso_values, selection = selection)
return self
def random_remove_sites_selection(self, fraction):
scatterers_size = self._scatterers.size()
if(abs(fraction-0.0) < 1.e-3):
return flex.bool(scatterers_size, True)
if(fraction < 0.01 or fraction > 0.99):
raise RuntimeError("fraction must be between 0.01 and 0.99.")
tol = 999.
selection = None
l = max(fraction - 0.05, 0.0)
r = min(fraction + 0.05, 1.0)
for i in xrange(5):
while l <= r:
arr = flex.random_double(scatterers_size)-l
sel = arr > 0.0
deleted = float((scatterers_size - sel.count(True))) / scatterers_size
if abs(fraction - deleted) < tol:
tol = abs(fraction - deleted)
selection = sel
l += 0.0001
return selection
def replace_sites_frac(self, new_sites, selection=None):
if(selection is not None):
new_sites = self.sites_frac().set_selected(selection, new_sites)
cp = structure(self,
non_unit_occupancy_implies_min_distance_sym_equiv_zero
=self._non_unit_occupancy_implies_min_distance_sym_equiv_zero,
scattering_type_registry=self._scattering_type_registry)
new_scatterers = self._scatterers.deep_copy()
new_scatterers.set_sites(new_sites)
cp._scatterers = new_scatterers
cp._site_symmetry_table = self._site_symmetry_table.deep_copy()
return cp
def replace_sites_cart(self, new_sites, selection=None):
return self.replace_sites_frac(
new_sites=self.unit_cell().fractionalize(sites_cart=new_sites),
selection=selection)
def adjust_u_iso(self):
self._scatterers.adjust_u_iso()
def adjust_occupancy(self, occ_max, occ_min, selection = None):
"""Adjust site occupancy factor for selected sites to be between occ_min and
occ_max.
:param occ_max: maximal site occupancy factor
:type occ_max: float
:param occ_min: minimal site occupancy factor
:type occ_min: float
:param selection: an array of bools to select scatterers to be adjusted
:type selection: boolean[]
:returns: none
"""
if(selection is not None):
if(("%s"%selection.__class__).count("array_family_flex_ext.size_t") > 0):
selection = flex.bool(self._scatterers.size(), selection)
occ = self._scatterers.extract_occupancies()
sel = (occ >= occ_max)
occ = occ.set_selected(sel, occ_max)
sel = (occ <= occ_min)
occ = occ.set_selected(sel, occ_min)
if(selection is None):
self._scatterers.set_occupancies(occ)
else:
self._scatterers.set_occupancies(occ, selection)
def all_selection(self):
"""Get a selector array for all scatterers of the structure.
:returns: an array to select all scatterers of the structure
:rtype: boolean[]
"""
return flex.bool(self._scatterers.size(), True)
def translate(self, x=0, y=0, z=0):
"""Translates all scatterers of this structure by x,y,z.
:param x: x component of the translation vector
:type x: float
:param y: y component of the translation vector
:type y: float
:param z: z component of the translation vector
:type z: float
:returns: a new translated copy of the structure
:rtype: cctbx.xray.structure
"""
sites_cart = self.sites_cart()
cp = structure(self,
non_unit_occupancy_implies_min_distance_sym_equiv_zero
=self._non_unit_occupancy_implies_min_distance_sym_equiv_zero,
scattering_type_registry=self._scattering_type_registry)
new_scatterers = self._scatterers.deep_copy()
new_scatterers.set_sites(
self.unit_cell().fractionalize(
sites_cart=sites_cart+flex.vec3_double(sites_cart.size(),[x,y,z])))
cp._scatterers = new_scatterers
cp._site_symmetry_table = self._site_symmetry_table.deep_copy()
return cp
def distances(self, other, selection = None):
"""Calculates pairwise distances between the atoms of this structure and
another structure with the same number of scatterers.
:param other: the other structure
:type other: cctbx.xray.structure
:param selection: an array of bools to select scatterers to be taken into \
calculation
:type selection: boolean[]
:returns: an array of distances for the selected scatterers
:rtype: float[]
"""
if(selection is None): selection = flex.bool(self._scatterers.size(), True)
s1 = self.sites_cart().select(selection)
s2 = other.sites_cart().select(selection)
if(s1.size() != s2.size()):
raise RuntimeError("Models must be of equal size.")
return flex.sqrt((s1 - s2).dot())
def max_distance(self, other, selection = None):
"""Calculates the maximum pairwise distance between the atoms of this
structure and another structure with the same number of scatterers.
:param other: the other structure
:type other: cctbx.xray.structure
:param selection: an array of bools to select scatterers to be taken into \
calculation
:type selection: boolean[]
:returns: the maximum distance of two corresponding scatterers out of the \
selected scatterers
:rtype: float
"""
return flex.max( self.distances(other = other, selection = selection) )
def min_distance(self, other, selection = None):
"""Calculates the minimum pairwise distance between the atoms of this
structure and another structure with the same number of scatterers.
:param other: the other structure
:type other: cctbx.xray.structure
:param selection: an array of bools to select scatterers to be taken into \
calculation
:type selection: boolean[]
:returns: the minimum distance of two corresponding scatterers out of the \
selected scatterers
:rtype: float
"""
return flex.min( self.distances(other = other, selection = selection) )
def mean_distance(self, other, selection = None):
"""Calculates the arithmetic mean pairwise distance between the atoms
of this structure and another structure with the same number of scatterers.
:param other: the other structure
:type other: cctbx.xray.structure
:param selection: an array of bools to select scatterers to be taken into \
calculation
:type selection: boolean[]
:returns: the mean pairwise distance of the selected scatterers
:rtype: float
"""
return flex.mean( self.distances(other = other, selection = selection) )
def scale_adp(self, factor, selection=None):
"""Scale the atomic displacement parameters of the selected scatterers
of the structure with the specified factor.
If no selection is given, all scatterers will be handled as if selected.
:param factor: scale factor to apply to the adps of the selected scatterers
:type factor: float
:param selection: an array of bools to select the scatterers to have their \
adps scaled
:type selection: boolean[]
:returns: none
"""
if(selection is not None):
assert selection.size() == self._scatterers.size()
else:
selection = flex.bool(self._scatterers.size(), True)
for sc,sel in zip(self._scatterers, selection):
if(sel and sc.flags.use()):
if(sc.flags.use_u_iso()):
sc.u_iso = sc.u_iso * factor
if(sc.flags.use_u_aniso()):
result = []
for i in xrange(6): result.append(sc.u_star[i] * factor)
sc.u_star = result
def shake_adp(self, b_max=None, b_min=None, spread=10.0, aniso_spread=0.1,
keep_anisotropic=False, random_u_cart_scale=1.0, selection=None):
assert [b_max, b_min].count(None) in [0,2]
if([b_max, b_min].count(None) == 0): assert spread == 0.0
if([b_max, b_min].count(None) == 2):
u_isos = self.extract_u_iso_or_u_equiv().select(self.use_u_iso())
if(u_isos.size() > 0):
b_mean = adptbx.u_as_b(flex.mean(u_isos))
b_max = int(b_mean + spread)
b_min = int(max(0.0, b_mean - spread))
assert b_min <= b_max, [b_min,b_max,spread,b_mean]
if(selection is not None):
assert selection.size() == self._scatterers.size()
else:
selection = flex.bool(self._scatterers.size(), True)
is_special_position = self.site_symmetry_table().is_special_position
for i_seq,sc,sel in zip(count(), self._scatterers, selection):
if(sel and sc.flags.use()):
if(sc.flags.use_u_iso() and b_min != b_max):
r = max(0, random.randrange(b_min, b_max, 1) + random.random())
sc.u_iso=adptbx.b_as_u(r)
if(sc.flags.use_u_aniso() and not keep_anisotropic):
result = []
for i in xrange(6):
result.append(sc.u_star[i]+sc.u_star[i]*random.choice(
(-aniso_spread,aniso_spread)))
if(is_special_position(i_seq=i_seq)):
result = self.space_group().average_u_star(result)
sc.u_star = result
def shake_adp_if_all_equal(self, b_iso_tolerance = 0.1):
performed = False
if(self.use_u_aniso().count(True) == 0):
u_isos = self.extract_u_iso_or_u_equiv()
b_max = adptbx.u_as_b(flex.max(u_isos))
b_min = adptbx.u_as_b(flex.min(u_isos))
b_mean = adptbx.u_as_b(flex.mean(u_isos))
if(abs(b_max - b_mean) <= b_iso_tolerance and
abs(b_min - b_mean) <= b_iso_tolerance):
self.shake_adp()
performed = True
return performed
def min_u_cart_eigenvalue(self):
u_carts = self._scatterers.extract_u_cart_plus_u_iso(
unit_cell=self.unit_cell())
result = flex.double()
for i_seq, sc in enumerate(self._scatterers):
if(sc.flags.use_u_iso() or sc.flags.use_u_aniso()):
result.append(min(adptbx.eigenvalues(u_carts[i_seq])))
return flex.min(result)
def shake_occupancies(self, selection = None):
s = self._scatterers
q_new = flex.random_double(s.size())*2.
if(selection is None):
s.set_occupancies(q_new)
else:
assert selection.size() == s.size()
s.set_occupancies(q_new, selection)
def set_occupancies(self, value, selection = None):
if(selection is not None and isinstance(selection, flex.size_t)):
selection = flex.bool(self._scatterers.size(), selection)
s = self._scatterers
if(hasattr(value, 'size')):
values = value
if(selection is not None):
assert values.size() == selection.size()
else:
values = flex.double(s.size(), value)
if(selection is None):
s.set_occupancies(values)
else:
assert selection.size() == s.size()
s.set_occupancies(values, selection)
return self
def coordinate_degrees_of_freedom_counts(self, selection=None):
assert selection is None or selection.size() == self._scatterers.size()
site_symmetry_table = self._site_symmetry_table
assert site_symmetry_table.indices().size() == self._scatterers.size()
result = {
0: 0,
1: 0,
2: 0,
3: -site_symmetry_table.special_position_indices().size()}
if (selection is None):
result[3] += self._scatterers.size()
else:
result[3] += selection.count(True)
for i in site_symmetry_table.special_position_indices():
if (selection is None or selection[i]):
result[site_symmetry_table
.get(i).site_constraints()
.n_independent_params()] += 1
return result
def guess_scattering_type_neutron(self):
ac,bc,cc = 0,0,0
result = False
for ugl in self.scattering_type_registry().unique_gaussians_as_list():
ac += len(ugl.array_of_a())
bc += len(ugl.array_of_b())
cc += ugl.c()
if(ac+bc == 0 and cc != 0): result = True
return result
def guess_scattering_type_is_a_mixture_of_xray_and_neutron(self):
has_xray = False
has_neutron = False
for ugl in self._scattering_type_registry.unique_gaussians_as_list():
if ugl is None:
continue
if len(ugl.array_of_a())!=0 or len(ugl.array_of_b())!=0:
has_xray = True
elif ugl.c() != 0:
has_neutron = True
return has_neutron and has_xray
def scattering_dictionary_as_string(self):
result = ""
if self._scattering_type_registry is None:
return result
for tg in self._scattering_type_registry.as_type_gaussian_dict().items():
stype = tg[0]
gaussian = tg[1]
aa = "None"
ab = "None"
c = "None"
if gaussian is not None:
aa = gaussian.array_of_a()
ab = gaussian.array_of_b()
c = gaussian.c()
result += "\n Element: "+stype +" a: "+ str(aa)+ " b: "+ str(ab)+ " c: "+str(c)
return result
def scattering_types_counts_and_occupancy_sums(self):
result = []
reg = self.scattering_type_registry()
unique_counts = reg.unique_counts
if (flex.sum(unique_counts) != self._scatterers.size()):
raise RuntimeError("scattering_type_registry out of date.")
occupancy_sums = reg.occupancy_sums(self._scatterers)
unit_cell_occupancy_sums = reg.unit_cell_occupancy_sums(self._scatterers)
for scattering_type,unique_index in reg.type_index_pairs_as_dict().items():
result.append(group_args(
scattering_type=scattering_type,
count=unique_counts[unique_index],
occupancy_sum=occupancy_sums[unique_index],
unit_cell_occupancy_sum=unit_cell_occupancy_sums[unique_index]))
return result
def crystal_density(self):
"""Get the value of the diffraction-determined density for the crystal,
suitable for the CIF item _exptl_crystal_density_diffrn
Density values are calculated from the crystal cell and contents. The
units are megagrams per cubic metre (=grams per cubic centimetre).
Equivalent to:
1.66042 * _chemical_formula_weight * _cell_formula_units_Z / _cell_volume
:returns: chemical density in megagrams per cubic metre (=grams per cm^3)
:rtype: float
"""
from cctbx.eltbx import tiny_pse
numerator = sum([
tiny_pse.table(elt.scattering_type).weight() * elt.unit_cell_occupancy_sum
for elt in self.scattering_types_counts_and_occupancy_sums()])
denominator = self.unit_cell().volume()
return 1.66042 * numerator/denominator
def f_000(self, include_inelastic_part=False):
"""Get the effective number of electrons in the crystal unit cell
contributing to F(000), suitable for the CIF item _exptl_crystal_F_000.
According to the CIF definition, this item **may** contain dispersion
contributions.
:param include_inelastic_part: If 'True' contributions due to dispersion \
are included in F(000).
:type include_inelastic_part: boolean
:returns: F(000)
:rtype: float
"""
elastic_part = 0
reg = self.scattering_type_registry()
unique_counts = reg.unique_counts
if (flex.sum(unique_counts) != self._scatterers.size()):
raise RuntimeError("scattering_type_registry out of date.")
unit_cell_occupancy_sums = reg.unit_cell_occupancy_sums(self._scatterers)
unique_form_factors_at_origin = reg.unique_form_factors_at_d_star_sq(0)
for scattering_type,unique_index in reg.type_index_pairs_as_dict().items():
elastic_part += unit_cell_occupancy_sums[unique_index] \
* unique_form_factors_at_origin[unique_index]
if not include_inelastic_part:
return elastic_part
inelastic_part_real = 0
inelastic_part_imag = 0
for sc in self.scatterers():
if sc.fp:
inelastic_part_real += sc.fp * sc.occupancy * sc.multiplicity()
if sc.fdp:
inelastic_part_imag += sc.fdp * sc.occupancy * sc.multiplicity()
return abs(complex(elastic_part+inelastic_part_real, inelastic_part_imag))
def shake_sites_in_place(self,
rms_difference=None,
mean_distance=None,
selection=None,
allow_all_fixed=False,
random_double=None):
"""Shake the coordinates of the selected scatterers in this structure.
:param rms_difference: radial mean square displacement (>=0) to apply to \
selected scatterers
:type rms_difference: float
:param mean_distance: a mean distance shift (>=0) to apply to selected \
scatterers
:type mean_distance: float
:param selection: an array of bools to select scatterers to be shaken
:type selection: boolean[]
:param allow_all_fixed: if set to 'True' shaking a structure with all \
scatterers on fixed special positions will not cause an error
:type allow_all_fixed: boolean
:param random_double: "random" numbers to use for displacements
:type random_double: float[]
:returns: 'True' if at least one scatterer was moved, 'False' otherwise
:rtype: boolean
"""
assert [rms_difference, mean_distance].count(None) == 1
if (rms_difference is not None):
assert rms_difference >= 0
target_difference = rms_difference
else:
assert mean_distance >= 0
target_difference = mean_distance
if (target_difference == 0): return
assert self._scatterers.size() > 0
site_symmetry_table = self._site_symmetry_table
assert site_symmetry_table.indices().size() == self._scatterers.size()
if (selection is not None):
assert selection.size() == self._scatterers.size()
n_variable = selection.count(True)
if (n_variable == 0):
raise RuntimeError("No scatterers selected.")
all = " selected"
else:
n_variable = self._scatterers.size()
all = ""
selection_fixed = flex.size_t()
for i in site_symmetry_table.special_position_indices():
if (site_symmetry_table.get(i)
.site_constraints()
.n_independent_params() == 0):
if (selection is None or selection[i]):
selection_fixed.append(i)
n_variable -= selection_fixed.size()
if (n_variable == 0):
if (allow_all_fixed):
return False
raise RuntimeError(
"All%s scatterers are fixed on special positions." % all)
if (n_variable == self._scatterers.size()):
selection = None
scatterers = self._scatterers
frac = self.unit_cell().fractionalize
orth = self.unit_cell().orthogonalize
if (random_double is None):
random_double = flex.random_double
for i in count_max(assert_less_than=10):
shifts_cart = flex.vec3_double(random_double(
size=self._scatterers.size()*3, factor=2) - 1)
if (selection is not None):
shifts_cart.set_selected(~selection, (0,0,0))
shifts_cart.set_selected(selection_fixed, (0,0,0))
for i in site_symmetry_table.special_position_indices():
site_frac_orig = matrix.col(scatterers[i].site)
site_frac = site_symmetry_table.get(i).special_op() \
* (site_frac_orig + matrix.col(frac(shifts_cart[i])))
shifts_cart[i] = orth(matrix.col(site_frac) - site_frac_orig)
if (rms_difference is not None):
difference = (flex.sum(shifts_cart.dot()) / n_variable) ** 0.5
else:
difference = flex.sum(flex.sqrt(shifts_cart.dot())) / n_variable
if (difference > 1.e-6): break # to avoid numerical problems
shifts_cart *= (target_difference / difference)
self.set_sites_frac(
self.sites_frac() + self.unit_cell().fractionalize(shifts_cart))
return True
def shift_sites_in_place(self, shift_length, mersenne_twister=None):
"""Shifts the coordinates of all scatterers in this structure.
:param shift_length: the distance to shift each scatterer with
:type shift_length: float
:param mersenne_twister: a mersenne twister to use as entropy source
:type mersenne_twister: flex.mersenne_twister
:returns: none
"""
if (shift_length == 0): return
sst = self._site_symmetry_table
assert sst.indices().size() == self._scatterers.size()
frac = self.unit_cell().fractionalize
orth = self.unit_cell().orthogonalize
if (mersenne_twister is None):
mersenne_twister = flex.mersenne_twister(seed=0)
col = matrix.col
for i_sc,sc in enumerate(self._scatterers):
site_frac = col(sc.site)
ss = sst.get(i_sc)
constr = ss.site_constraints()
np = constr.n_independent_params()
if (np == 0):
continue
if (np == 3):
def find_3():
sl = shift_length
while (sl != 0):
for i_trial in xrange(10):
shift_frac = col(frac(
col(mersenne_twister.random_double_point_on_sphere()) * sl))
site_mod = site_frac + shift_frac
ss_mod = self.site_symmetry(site=site_mod)
if (ss_mod.is_point_group_1()):
sc.site = site_mod
return
sl *= 0.5
find_3()
elif (np == 2):
plane_vectors = []
for s0 in [-1,0,1]:
for s1 in [-1,0,1]:
indep = list(constr.independent_params(site_frac))
indep[0] += s0
indep[1] += s1
plane_vectors.append(col(orth(
col(constr.all_params(indep)) - site_frac)))
assert len(plane_vectors) == 9
axis = None
axis_length = None
for i in xrange(8):
vi = plane_vectors[i]
for j in xrange(i+1,9):
vj = plane_vectors[j]
cross = vi.cross(vj)
length = cross.length()
if (axis is None or length > axis_length):
axis = cross
axis_length = length
assert axis is not None
assert axis_length != 0
v_max = None
l_max = None
for v in plane_vectors:
l = v.length()
if (l_max is None or l > l_max):
v_max = v
l_max = l
assert v_max is not None
def find_2():
sl = shift_length
while (sl != 0):
for i_trial in count_max(assert_less_than=10):
r = axis.axis_and_angle_as_r3_rotation_matrix(
angle = mersenne_twister.random_double() * 2 * math.pi)
shift_frac = col(frac((r * v).normalize() * sl))
site_mod = site_frac + shift_frac
ss_mod = self.site_symmetry(site=site_mod)
if (ss_mod.special_op() == ss.special_op()):
sc.site = site_mod
return
sl *= 0.5
find_2()
else:
def find_1():
sl = shift_length
while (sl != 0):
if (mersenne_twister.random_double() < 0.5):
us = [1, -1]
else:
us = [-1, 1]
for u in us:
indep = list(constr.independent_params(site_frac))
indep[0] += u
v = col(orth(col(constr.all_params(indep)) - site_frac))
assert v.length() != 0
shift_frac = col(frac(v.normalize() * shift_length))
site_mod = site_frac + shift_frac
ss_mod = self.site_symmetry(site=site_mod)
if (ss_mod.special_op() == ss.special_op()):
sc.site = site_mod
return
sl *= 0.5
find_1()
def b_iso_min_max_mean(self):
"""Get the minimal, maximal and mean isotropic Debye-Waller/temperature/B \
factors of all scatterers in this structure.
:returns: minimal b_iso, maximal b_iso, mean b_iso
:rtype: float, float, float
"""
b_isos = self._scatterers.extract_u_iso()/adptbx.b_as_u(1)
b_min = flex.min(b_isos)
b_max = flex.max(b_isos)
b_mean = flex.mean(b_isos)
return b_min, b_max, b_mean
def discard_scattering_type_registry(self):
self._scattering_type_registry_is_out_of_date = True
def n_undefined_multiplicities(self):
return ext.n_undefined_multiplicities(self._scatterers)
def sites_frac(self):
return self._scatterers.extract_sites()
def use_u_iso(self):
return self._scatterers.extract_use_u_iso()
def use_u_aniso(self):
return self._scatterers.extract_use_u_aniso()
def set_sites_frac(self, sites_frac):
"""Set the the fractional coordinates of all sites of the structure to \
'sites_frac'.
:param sites_frac: a list of the fractional coordinates for all scatterers
:type sites_frac: scitbx.array_family.flex.vec3_double
:returns: none
"""
assert sites_frac.size() == self._scatterers.size()
self._scatterers.set_sites(sites_frac)
def sites_cart(self):
"""Get the the cartesian coordinates of all sites of the structure.
:returns: a list of the sites of the structure in cartesian coordinates
:rtype: scitbx.array_family.flex.vec3_double
"""
return self.unit_cell().orthogonalize(sites_frac=self.sites_frac())
def set_sites_cart(self, sites_cart):
"""Set the the cartesian coordinates of all sites of the structure to \
'sites_cart'.
:param sites_cart: a list of the cartesian coordinates for all scatterers
:type sites_cart: scitbx.array_family.flex.vec3_double
:returns: none
"""
self.set_sites_frac(self.unit_cell().fractionalize(sites_cart=sites_cart))
def scattering_types(self):
result = flex.std_string()
for sct in self._scatterers.extract_scattering_types():
result.append(sct.strip().upper())
return result
def extract_u_cart_plus_u_iso(self):
return self._scatterers.extract_u_cart_plus_u_iso(
unit_cell=self.unit_cell())
def extract_u_iso_or_u_equiv(self):
return self._scatterers.extract_u_iso_or_u_equiv(
unit_cell=self.unit_cell())
def scale_adps(self, scale_factor):
return self._scatterers.scale_adps(scale_factor)
def switch_to_neutron_scattering_dictionary(self):
# XXX First step. In future: better to do bookkeeping and be able to swith
# XXX back and forth between original scat_dict and neutron.
# XXX Add regression test.
return self.scattering_type_registry(table="neutron").as_type_gaussian_dict()
def hd_selection(self):
"""Get a selector array for all hydrogen and deuterium scatterers of the structure.
:returns: an array to select all H and D scatterers of the structure
:rtype: boolean[]
"""
scattering_types = self._scatterers.extract_scattering_types()
result = flex.bool()
for sct in scattering_types:
if(sct.strip() in ['H','D']): result.append(True)
else: result.append(False)
return result
def heavy_selection(self,
ignore_atoms_with_alternative_conformations=False,
include_only_atoms_with_alternative_conformations=False,
only_protein=False,
):
"""Get a selector array for heavy (= non H or D) scatterers of the structure.
:param ignore_atoms_with_alternative_conformations: select normal atoms/conformations only
:type ignore_atoms_with_alternative_conformations: boolean
:param include_only_atoms_with_alternative_conformations: select only scatterers with alternative conformations
:type include_only_atoms_with_alternative_conformations: boolean
:param only_protein: select only protein scatterers
:type only_protein: boolean
:returns: an array to select the desired scatterers of the structure
:rtype: boolean[]
"""
if ( ignore_atoms_with_alternative_conformations and
include_only_atoms_with_alternative_conformations
):
raise Sorry("Mutually exclusive alternative location options")
scattering_types = self._scatterers.extract_scattering_types()
scatterers = self.scatterers()
result = flex.bool()
for i, sct in enumerate(scattering_types):
if ignore_atoms_with_alternative_conformations:
if scatterers[i].label[9:10]!=" ":
result.append(False)
continue
if include_only_atoms_with_alternative_conformations:
if scatterers[i].label[9:10]==" ":
result.append(False)
continue
if only_protein:
for resname in [
"ALA",
"CYS",
"ASP",
"GLU",
"PHE",
"GLY",
"HIS",
"ILE",
"LYS",
"LEU",
"MET",
"ASN",
"PRO",
"GLN",
"ARG",
"SER",
"THR",
"VAL",
"TRP",
"TYR",
]:
if scatterers[i].label[10:13]==resname:
break
else:
result.append(False)
continue
if(sct.strip() in ['H','D']): result.append(False)
else: result.append(True)
return result
def atom_names_selection(self,
atom_names=["C","O","CA","N"],
ignore_atoms_with_alternative_conformations=False,
include_only_atoms_with_alternative_conformations=False,
):
"""Get a selector array for scatterers of the structure by "atom" names.
:param atom_names: list of labels of scatterers to select
:type atom_names: list(string)
:param ignore_atoms_with_alternative_conformations: select normal atoms/conformations only
:type ignore_atoms_with_alternative_conformations: boolean
:param include_only_atoms_with_alternative_conformations: select only scatterers with alternative conformations
:type include_only_atoms_with_alternative_conformations: boolean
:returns: an array to select the desired scatterers of the structure
:rtype: boolean[]
"""
#XXX may need a better function
if ( ignore_atoms_with_alternative_conformations and
include_only_atoms_with_alternative_conformations
):
raise Sorry("Mutually exclusive alternative location options")
scattering_types = self._scatterers.extract_scattering_types()
result = flex.bool()
for sc in self.scatterers():
if(sc.label.find("HOH")>-1):
result.append(False)
continue
if ignore_atoms_with_alternative_conformations:
if sc.label[9:10]!=" ":
result.append(False)
continue
if include_only_atoms_with_alternative_conformations:
if sc.label[9:10]==" ":
result.append(False)
continue
for name in atom_names:
if(sc.label[5:9].find("%s " % name)>-1 and
sc.scattering_type==name[0]
):
result.append(True)
break
else: result.append(False)
return result
def backbone_selection(self,
atom_names=["C","O","CA","N"],
ignore_atoms_with_alternative_conformations=False,
include_only_atoms_with_alternative_conformations=False,
):
"""Get a selector array for scatterers of the backbone of the structure.
(This is just an alias for atom_names_selection.)
:param atom_names: list of labels of scatterers to select
:type atom_names: list(string)
:param ignore_atoms_with_alternative_conformations: select normal atoms/conformations only
:type ignore_atoms_with_alternative_conformations: boolean
:param include_only_atoms_with_alternative_conformations: select only scatterers with alternative conformations
:type include_only_atoms_with_alternative_conformations: boolean
:returns: an array to select the desired scatterers of the structure
:rtype: boolean[]
"""
return self.atom_names_selection(
atom_names=atom_names,
ignore_atoms_with_alternative_conformations=ignore_atoms_with_alternative_conformations,
include_only_atoms_with_alternative_conformations=include_only_atoms_with_alternative_conformations,
)
def main_chain_selection(self,
atom_names=["C","O","CA","N","CB"],
ignore_atoms_with_alternative_conformations=False,
include_only_atoms_with_alternative_conformations=False,
):
"""Get a selector array for scatterers of the main chain of the structure.
(This is just an alias for atom_names_selection with a different default selection.)
:param atom_names: list of labels of scatterers to select
:type atom_names: list(string)
:param ignore_atoms_with_alternative_conformations: select normal atoms/conformations only
:type ignore_atoms_with_alternative_conformations: boolean
:param include_only_atoms_with_alternative_conformations: select only scatterers with alternative conformations
:type include_only_atoms_with_alternative_conformations: boolean
:returns: an array to select the desired scatterers of the structure
:rtype: boolean[]
"""
return self.atom_names_selection(
atom_names=atom_names,
ignore_atoms_with_alternative_conformations=ignore_atoms_with_alternative_conformations,
include_only_atoms_with_alternative_conformations=include_only_atoms_with_alternative_conformations,
)
def peptide_dihedral_selection(self,
atom_names=["C","CA","N"],
ignore_atoms_with_alternative_conformations=False,
include_only_atoms_with_alternative_conformations=False,
):
"""Get a selector array for peptide dihedral scatterers of the structure.
(This is just an alias for atom_names_selection with a different default selection.)
:param atom_names: list of labels of scatterers to select
:type atom_names: list(string)
:param ignore_atoms_with_alternative_conformations: select normal atoms/conformations only
:type ignore_atoms_with_alternative_conformations: boolean
:param include_only_atoms_with_alternative_conformations: select only scatterers with alternative conformations
:type include_only_atoms_with_alternative_conformations: boolean
:returns: an array to select the desired scatterers of the structure
:rtype: boolean[]
"""
return self.atom_names_selection(
atom_names=atom_names,
ignore_atoms_with_alternative_conformations=ignore_atoms_with_alternative_conformations,
include_only_atoms_with_alternative_conformations=include_only_atoms_with_alternative_conformations,
)
def element_selection(self, *elements):
"""Get a selector array for scatterers of specified element type(s) of the structure.
:param elements: tuple of element symbols to select
:type elements: list(string) or set(string) or tuple(string)
:returns: an array to select the desired scatterers of the structure
:rtype: boolean[]
"""
return flex.bool([ sc.element_symbol().strip() in elements
for sc in self.scatterers() ])
def label_selection(self, *labels):
"""Get a selector array for scatterers of specified labels from the structure.
:param labels: tuple of scatterer labels to select
:type labels: list(string) or set(string) or tuple(string)
:returns: an array to select the desired scatterers of the structure
:rtype: boolean[]
"""
return flex.bool([ sc.label in labels for sc in self.scatterers() ])
def label_regex_selection(self, label_regex):
"""Get a selector array for scatterers of specified labels (via regular
expression) from the structure.
:param label_regex: a regular expression matching scatterer labels to select
:type label_regex: string or re
:returns: an array to select the desired scatterers of the structure
:rtype: boolean[]
"""
if is_string(label_regex):
import re
label_regex = re.compile(label_regex)
return flex.bool([ label_regex.search(sc.label) is not None
for sc in self.scatterers() ])
def by_index_selection(self, selected_scatterers):
"""Get a selector array for scatterers with specified index from the
structure. For example you can select scatterers 1,4 and 5 by passing
(1,4,5) as argument.
:param selected_scatterers: list of scatterers to select
:type selected_scatterers: list(int)
:returns: an array to select the desired scatterers of the structure
:rtype: flex.bool[]
"""
result = flex.bool(self._scatterers.size(), False)
try:
result.set_selected(flex.size_t(selected_scatterers), True)
except(RuntimeError):
raise(IndexError("Tried to select a scatterer by index with index => "+\
"of scatterers for this structuture."))
return result
def apply_rigid_body_shift(self, rot, trans, selection = None,
recompute_site_symmetries=True):
if(selection is None):
selection = flex.bool(self._scatterers.size(), True).iselection()
rbs_obj = self.apply_rigid_body_shift_obj(
sites_cart = self.sites_cart(),
sites_frac = self.sites_frac(),
rot = rot,
trans = trans,
selection = selection,
unit_cell = self.unit_cell(),
atomic_weights = self.atomic_weights())
self.set_sites_frac(sites_frac = rbs_obj.sites_frac)
if(recompute_site_symmetries):
scatterers = self.scatterers()
self.erase_scatterers()
self.add_scatterers(scatterers = scatterers)
def apply_rigid_body_shift_obj(self,
sites_cart,
sites_frac,
rot,
trans,
selection,
unit_cell,
atomic_weights):
return ext.apply_rigid_body_shift(sites_cart = sites_cart,
sites_frac = sites_frac,
rot = rot,
trans = trans,
atomic_weights = atomic_weights,
unit_cell = unit_cell,
selection = selection)
def convert_to_isotropic(self, selection=None):
# FIXME selection must be size_t!
if(selection is None):
self._scatterers.convert_to_isotropic(unit_cell=self.unit_cell())
else:
self._scatterers.convert_to_isotropic(unit_cell=self.unit_cell(),
selection=selection)
def convert_to_anisotropic(self, selection=None):
if(selection is None):
self._scatterers.convert_to_anisotropic(unit_cell=self.unit_cell())
else:
self._scatterers.convert_to_anisotropic(unit_cell=self.unit_cell(),
selection=selection)
def show_u_statistics(self, text="", out=None, use_hydrogens=False):
if(out is None): out = sys.stdout
size = self._scatterers.size()
if(use_hydrogens):
hd_selection = flex.bool(size, True)
else:
hd_selection = self.hd_selection()
epis = 8*math.pi**2
use_u_aniso = self.use_u_aniso().select(~hd_selection)
use_u_iso = self.use_u_iso().select(~hd_selection)
sel_used = use_u_aniso | use_u_iso
n_anisotropic = use_u_aniso.count(True)
n_isotropic = use_u_iso.count(True)
ipd = self.is_positive_definite_u().select(~hd_selection)
npd = ipd.count(True)
nnpd = ipd.count(False)
beq = (self.extract_u_iso_or_u_equiv() * epis).select(~hd_selection).select(sel_used)
bisos = (self.scatterers().extract_u_iso() * epis).select(~hd_selection).select(use_u_iso)
if(bisos.size() == 0): bisos = beq
part1 = "|-"+text
part2 = "-|"
n = 79 - len(part1+part2)
print >> out, part1 + "-"*n + part2
n = 79 - len(part1 + "|")
print >> out, "| iso = %-5d aniso = %-5d pos. def. = %-5d "\
"non-pos. def. = %-5d |"%(n_isotropic,n_anisotropic,npd,nnpd)
print >> out, "| Total B(isotropic equivalent): min = %-6.2f "\
"max = %-6.2f mean = %-6.2f"%(flex.min(beq),flex.max(beq),
flex.mean(beq))+" "*2+"|"
print >> out, "| Isotropic B only: min = %-6.2f "\
"max = %-6.2f mean = %-6.2f"%(flex.min(bisos),
flex.max(bisos),flex.mean(bisos))+" "*2+"|"
print >> out, "| "+"- "*38+"|"
print >> out, "| Distribution of isotropic B-factors:"\
" |"
print >> out, "| Isotropic | Total "\
" |"
histogram_1 = flex.histogram(data = bisos, n_slots = 10)
low_cutoff_1 = histogram_1.data_min()
histogram_2 = flex.histogram(data = beq, n_slots = 10)
low_cutoff_2 = histogram_2.data_min()
for (i_1,n_1),(i_2,n_2) in zip(enumerate(histogram_1.slots()),
enumerate(histogram_2.slots())):
high_cutoff_1 = histogram_1.data_min() + histogram_1.slot_width()*(i_1+1)
high_cutoff_2 = histogram_2.data_min() + histogram_2.slot_width()*(i_2+1)
print >> out, "| %9.3f -%9.3f:%8d | %9.3f -%9.3f:%8d |" % \
(low_cutoff_1,high_cutoff_1,n_1,low_cutoff_2,high_cutoff_2,n_2)
low_cutoff_1 = high_cutoff_1
low_cutoff_2 = high_cutoff_2
print >> out, "|" +"-"*77+"|"
def site_symmetry_table(self):
return self._site_symmetry_table
def special_position_indices(self):
return self._site_symmetry_table.special_position_indices()
def scattering_type_registry(self,
custom_dict=None,
d_min=None,
table=None,
types_without_a_scattering_contribution=None,
explicitly_allow_mixing=False):
assert table in [None, "n_gaussian", "it1992", "wk1995", "xray", "electron",
"neutron"]
if (table == "it1992"): assert d_min in [0,None] or d_min >= 1/4.
if (table == "wk1995"): assert d_min in [0,None] or d_min >= 1/12.
if (table == "electron"): assert d_min in [0,None] or d_min >= 1/4.
if ( self._scattering_type_registry_is_out_of_date
or custom_dict is not None
or d_min is not None
or table is not None
or types_without_a_scattering_contribution is not None):
self.scattering_type_registry_params = \
scattering_type_registry_params(
custom_dict = custom_dict,
d_min = d_min,
table = table,
types_without_a_scattering_contribution = \
types_without_a_scattering_contribution)
new_dict = {"const": eltbx.xray_scattering.gaussian(1)}
old_dict = {}
last_used_scattering_table = None
if (self._scattering_type_registry is not None):
last_used_scattering_table = self._scattering_type_registry.last_table()
ugs = self._scattering_type_registry.unique_gaussians_as_list()
tip = self._scattering_type_registry.type_index_pairs_as_dict()
for t,i in tip.items():
if (ugs[i] is not None): old_dict[t] = ugs[i]
if (d_min is None and table is None):
new_dict.update(old_dict)
if (types_without_a_scattering_contribution is not None):
for t in types_without_a_scattering_contribution:
new_dict[t] = eltbx.xray_scattering.gaussian(0)
if (custom_dict is not None):
new_dict.update(custom_dict)
if (d_min is None): d_min = 0
self._scattering_type_registry = ext.scattering_type_registry()
self._scattering_type_registry.process(self._scatterers)
for t_undef in self._scattering_type_registry.unassigned_types():
val = new_dict.get(t_undef, None)
if (val is None):
std_lbl = eltbx.xray_scattering.get_standard_label(
label=t_undef, exact=True, optional=True)
if (std_lbl is not None):
if table is None:
table = last_used_scattering_table
else:
last_used_scattering_table = table
if (table == "it1992"):
val = eltbx.xray_scattering.it1992(std_lbl, True).fetch()
elif (table == "wk1995"):
val = eltbx.xray_scattering.wk1995(std_lbl, True).fetch()
elif (table == "electron"):
from cctbx.eltbx.e_scattering import \
ito_vol_c_2011_table_4_3_2_2_entry_as_gaussian as _
val = _(label=std_lbl, exact=True)
elif (table == "neutron"):
scattering_info = neutron_news_1992_table(std_lbl, True)
b = scattering_info.bound_coh_scatt_length()
# TODO:
# b.imag is ignored here. It depends on wavelength, so values
# from neutron_news_1992 table are not very useful.
# Warning is printed by scattering_registry::show.
val = eltbx.xray_scattering.gaussian(b.real)
else:
# TODO mrt: this may lead to a mix of xray/neutron dictionary
val = eltbx.xray_scattering.n_gaussian_table_entry(
std_lbl, d_min, 0).gaussian()
last_used_scattering_table = "n_gaussian"
if (val is not None):
self._scattering_type_registry.assign(t_undef, val)
if last_used_scattering_table is not None:
self._scattering_type_registry.set_last_table(last_used_scattering_table)
self._scattering_type_registry_is_out_of_date = False
if not explicitly_allow_mixing:
if self.guess_scattering_type_is_a_mixture_of_xray_and_neutron():
errmsg = "Mixed xray and neutron scattering table! "
errmsg += self.scattering_dictionary_as_string()
raise RuntimeError(errmsg)
return self._scattering_type_registry
def set_inelastic_form_factors(self, photon, table, set_use_fp_fdp=True):
if table == "sasaki":
set_inelastic_ff = ext.set_inelastic_form_factors_from_sasaki
elif table == "henke":
set_inelastic_ff = ext.set_inelastic_form_factors_from_henke
else:
raise RuntimeError("Unknown inelastic form factors table: %s" % table)
self.inelastic_form_factors_source = table
set_inelastic_ff(self.scatterers(), photon, set_use_fp_fdp)
def set_custom_inelastic_form_factors(self, table, set_use_fp_fdp=True,
source="custom"):
""" Expects a dictionary of tuples like 'C' : (fp, fdp). If an element is
not in the dictionary, the fp and fdp are reset to 0 and the use_fp_fdp is
set to false.
"""
for sc in self.scatterers():
fp_fdp = table.get(sc.scattering_type, None)
if fp_fdp is None:
sc.fp = 0
sc.fdp = 0
sc.use_fp_fdp = False
else:
sc.fp = fp_fdp[0]
sc.fdp = fp_fdp[1]
if set_use_fp_fdp:
sc.flags.set_use_fp_fdp(True)
self.inelastic_form_factors_source = source
def mean_scattering_density(self):
r = self.scattering_type_registry()
return r.sum_of_scattering_factors_at_diffraction_angle_0() \
/ self.unit_cell().volume()
def __getitem__(self, slice_object):
assert type(slice_object) == types.SliceType
assert self.scatterers() is not None
sel = flex.slice_indices(
array_size=self._scatterers.size(),
python_slice=slice_object)
return structure(
special_position_settings=self,
scatterers=self._scatterers.select(sel),
site_symmetry_table=self._site_symmetry_table.select(sel),
scattering_type_registry=self._scattering_type_registry)
def select(self, selection, negate=False):
assert self.scatterers() is not None
if (negate): selection = ~selection
return structure(
special_position_settings=self,
scatterers=self._scatterers.select(selection),
site_symmetry_table=self._site_symmetry_table.select(selection),
scattering_type_registry=self._scattering_type_registry)
def select_inplace(self, selection):
assert self.scatterers() is not None
self._scatterers = self._scatterers.select(selection)
self._site_symmetry_table = self._site_symmetry_table.select(selection)
self._scattering_type_registry_is_out_of_date = True
def add_scatterer(self,
scatterer,
site_symmetry_ops=None,
insert_at_index=None):
if (insert_at_index is None):
insert_at_index = self._scatterers.size()
self._scatterers.insert(insert_at_index, scatterer)
if (site_symmetry_ops is None):
site_symmetry_ops = self._scatterers[insert_at_index].apply_symmetry(
unit_cell=self.unit_cell(),
space_group=self.space_group(),
min_distance_sym_equiv=self.min_distance_sym_equiv(),
u_star_tolerance=self.u_star_tolerance(),
assert_min_distance_sym_equiv=self.assert_min_distance_sym_equiv())
else:
self._scatterers[insert_at_index].apply_symmetry(
site_symmetry_ops=site_symmetry_ops,
u_star_tolerance=self.u_star_tolerance())
self._site_symmetry_table.process(
insert_at_index=insert_at_index, site_symmetry_ops=site_symmetry_ops)
self._scattering_type_registry_is_out_of_date = True
def add_scatterers(self,
scatterers,
site_symmetry_table=None,
non_unit_occupancy_implies_min_distance_sym_equiv_zero=False):
if (site_symmetry_table is None):
site_symmetry_table = sgtbx.site_symmetry_table()
else:
assert site_symmetry_table.indices().size() == scatterers.size()
assert not non_unit_occupancy_implies_min_distance_sym_equiv_zero
self._scatterers.extend(scatterers)
ext.add_scatterers_ext(
unit_cell=self.unit_cell(),
space_group=self.space_group(),
scatterers=self._scatterers,
site_symmetry_table=self._site_symmetry_table,
site_symmetry_table_for_new=site_symmetry_table,
min_distance_sym_equiv=self.min_distance_sym_equiv(),
u_star_tolerance=self.u_star_tolerance(),
assert_min_distance_sym_equiv=self.assert_min_distance_sym_equiv(),
non_unit_occupancy_implies_min_distance_sym_equiv_zero=
non_unit_occupancy_implies_min_distance_sym_equiv_zero)
self._scattering_type_registry_is_out_of_date = True
def concatenate(self, other):
result = self.deep_copy_scatterers()
result.add_scatterers(
scatterers=other._scatterers,
site_symmetry_table=other._site_symmetry_table)
return result
def concatenate_inplace(self, other):
d1 = self.scattering_type_registry().as_type_gaussian_dict()
d2 = other.scattering_type_registry().as_type_gaussian_dict()
for key1, item1 in zip(d1.keys(), d1.items()):
for key2, item2 in zip(d2.keys(), d2.items()):
if(key1 == key2):
i1 = item1[1]
i2 = item2[1]
problem_flag = False
for a1, a2 in zip(i1.array_of_a(), i2.array_of_a()):
if(not approx_equal(a1, a2)): problem_flag = True
for b1, b2 in zip(i1.array_of_b(), i2.array_of_b()):
if(not approx_equal(b1, b2)): problem_flag = True
if(not approx_equal(i1.c(), i2.c())): problem_flag = True
if(problem_flag):
raise RuntimeError("Cannot concatenate: conflicting scatterers")
self.add_scatterers(scatterers = other._scatterers,
site_symmetry_table = other._site_symmetry_table)
strp1 = self.scattering_type_registry_params
strp2 = other.scattering_type_registry_params
self.scattering_type_registry(
custom_dict = strp1.custom_dict,
d_min = strp1.d_min,
table = strp1.table,
types_without_a_scattering_contribution =
strp1.types_without_a_scattering_contribution)
self.scattering_type_registry(
custom_dict = strp2.custom_dict,
d_min = strp2.d_min,
table = strp2.table,
types_without_a_scattering_contribution =
strp2.types_without_a_scattering_contribution)
def selection_within(self, radius, selection):
assert radius > 0
assert self.special_position_settings() is not None
return crystal.neighbors_fast_pair_generator(
asu_mappings=self.special_position_settings().asu_mappings(
buffer_thickness=radius,
sites_cart=self.sites_cart()),
distance_cutoff=radius).neighbors_of(
primary_selection=selection)
def replace_scatterers(self, scatterers, site_symmetry_table="existing"):
if (site_symmetry_table == "existing"):
site_symmetry_table = self._site_symmetry_table
if (site_symmetry_table is not None):
assert site_symmetry_table.indices().size() == self.scatterers().size()
self.erase_scatterers()
self.add_scatterers(
scatterers=scatterers,
site_symmetry_table=site_symmetry_table)
def structure_factors(self, anomalous_flag=None, d_min=None,
algorithm=None,
cos_sin_table=False,
quality_factor=None,
u_base=None,
b_base=None,
wing_cutoff=None):
"""
Calculate structure factors for the current scatterers using either direct
summation or FFT method; by default the appropriate method will be guessed
automatically.
:param anomalous_flag: toggles whether the returned structure factors are
anomalous or not
:type anomalous_flag: bool
:param d_min: resolution cutoff (required)
:type d_min: float
:param algorithm: specifies 'direct' or 'fft', or if None (default), the
algorithm will be chosen automatically
:type algorithm: str or None
:param cos_sin_table: if True, uses interpolation of values from a
pre-calculated lookup table for trigonometric function calls in the
structure factor calculations in preference to calling the system
libraries
:type cos_sin_table: bool
:param quality_factor: determines accuracy of sampled density in fft method
:type quality_factor: float
:param u_base: additional smearing B-factor for scatterers which have too
small Bs so they fall between the grid nodes
:type u_base: float
:param b_base: same as u_base (but 8*pi^2 larger)
:type b_base: float
:param wing_cutoff: is how far away from atomic center you sample density
around atom
:type wing_cutoff: float
:returns: a custom Python object (exact type depends on method used), from
which f_calc() may be called
:rtype: derived from cctbx.xray.structure_factors.manager.managed_calculation_base
"""
if (anomalous_flag is None):
if (self.scatterers().count_anomalous() != 0):
anomalous_flag = True
else:
anomalous_flag = False
elif (not anomalous_flag):
if (self.scatterers().count_anomalous() != 0):
raise RuntimeError(
"xray.structure with anomalous scatterers"
+ " but miller.array is non-anomalous.")
miller_set = miller.build_set(self, anomalous_flag, d_min)
return structure_factors.from_scatterers(
crystal_symmetry=self,
d_min=d_min,
cos_sin_table=cos_sin_table,
quality_factor=quality_factor,
u_base=u_base,
b_base=b_base,
wing_cutoff=wing_cutoff)(
xray_structure=self,
miller_set=miller_set,
algorithm=algorithm)
def as_py_code(self, indent=""):
"""eval(self.as_py_code()) is similar (but sometimes not identical)
to self and meant to enable quick formatting of self as Python code
for unit tests.
"""
r0 = (
"%sxray.structure(\n"
"%s crystal_symmetry=%s,\n"
"%s scatterers=flex.xray_scatterer([\n"
% (indent,
indent, self.crystal_symmetry().as_py_code(indent=indent+" "),
indent))
r1 = []
for i,sc in enumerate(self.scatterers()):
r1.append(
indent+" "
+ sc.as_py_code(indent=indent+" ", comment=" #%d" % i))
return r0 + ",\n".join(r1) + "]))"
def show_summary(self, f=None, prefix=""):
if (f is None): f = sys.stdout
print >> f, prefix + "Number of scatterers:", \
self.scatterers().size()
print >> f, prefix + "At special positions:", \
self.special_position_indices().size()
crystal.symmetry.show_summary(self, f=f, prefix=prefix)
return self
def show_scatterers(self, f=None, special_positions_only=False):
if (f is None): f = sys.stdout
print >> f, ("Label, Scattering, Multiplicity, Coordinates, Occupancy, "
"Uiso, Ustar as Uiso")
scatterers = self.scatterers()
if (special_positions_only):
scatterers = scatterers.select(self.special_position_indices())
for sc in scatterers:
sc.show(f=f, unit_cell=self.unit_cell())
return self
def show_special_position_shifts(self,
sites_frac_original=None,
sites_cart_original=None,
out=None,
prefix=""):
self._site_symmetry_table.show_special_position_shifts(
special_position_settings=self,
site_labels=self.scatterers().extract_labels(),
sites_frac_original=sites_frac_original,
sites_cart_original=sites_cart_original,
sites_frac_exact=self.scatterers().extract_sites(),
out=out,
prefix=prefix)
def is_positive_definite_u(self, u_cart_tolerance=None):
if (u_cart_tolerance is None):
return ext.is_positive_definite_u(
scatterers=self._scatterers,
unit_cell=self.unit_cell())
else:
return ext.is_positive_definite_u(
scatterers=self._scatterers,
unit_cell=self.unit_cell(),
u_cart_tolerance=u_cart_tolerance)
def tidy_us(self, u_min = 1.e-6, u_max = adptbx.b_as_u(999.99),
anisotropy_min=0.25):
"""
Clean up atomic displacements so they fall within a sensible range (this
is especially important when writing out PDB format).
"""
assert u_min < u_max
ext.tidy_us(
scatterers=self._scatterers,
unit_cell=self.unit_cell(),
site_symmetry_table=self._site_symmetry_table,
u_min=u_min,
u_max=u_max,
anisotropy_min=anisotropy_min)
def shift_us(self, u_shift=None, b_shift=None, selection=None):
assert [u_shift, b_shift].count(None) == 1
if (u_shift is None):
u_shift = adptbx.b_as_u(b_shift)
if(selection is None):
ext.shift_us(
scatterers=self._scatterers,
unit_cell=self.unit_cell(),
u_shift=u_shift)
else:
ext.shift_us(scatterers = self._scatterers,
unit_cell = self.unit_cell(),
u_shift = u_shift,
selection = selection)
def shift_occupancies(self, q_shift, selection=None):
if(selection is not None):
ext.shift_occupancies(scatterers = self._scatterers,
q_shift = q_shift,
selection = selection)
else:
ext.shift_occupancies(scatterers = self._scatterers,
q_shift = q_shift)
def apply_symmetry_sites(self):
ext.apply_symmetry_sites(
site_symmetry_table=self._site_symmetry_table,
scatterers=self._scatterers)
def apply_symmetry_u_stars(self):
ext.apply_symmetry_u_stars(
site_symmetry_table=self._site_symmetry_table,
scatterers=self._scatterers,
u_star_tolerance=self.u_star_tolerance())
def re_apply_symmetry(self, i_scatterer):
self._scatterers[i_scatterer].apply_symmetry(
site_symmetry_ops=self._site_symmetry_table.get(i_scatterer),
u_star_tolerance=self.u_star_tolerance())
def apply_special_position_ops_d_target_d_site(self, d_target_d_site):
for i in self.special_position_indices():
special_op = self._site_symmetry_table.get(i).special_op()
r = matrix.sqr(special_op.r().as_double())
d_target_d_site[i] = (matrix.row(d_target_d_site[i]) * r).elems
def asymmetric_unit_in_p1(self):
new_structure = structure(
crystal.special_position_settings(
crystal.symmetry.cell_equivalent_p1(self)),
scattering_type_registry=self._scattering_type_registry)
new_structure._scatterers = self.scatterers().deep_copy()
new_structure._site_symmetry_table = self.site_symmetry_table().deep_copy()
return new_structure
def change_basis(self, cb_op):
if (isinstance(cb_op, str)):
cb_op = sgtbx.change_of_basis_op(cb_op)
return structure(
special_position_settings
=crystal.special_position_settings.change_basis(self, cb_op),
scatterers=ext.change_basis(scatterers=self._scatterers, cb_op=cb_op),
site_symmetry_table=self._site_symmetry_table.change_basis(cb_op=cb_op),
scattering_type_registry=self._scattering_type_registry)
def change_hand(self):
ch_op = self.space_group_info().type().change_of_hand_op()
return self.change_basis(ch_op)
def expand_to_p1(self,
append_number_to_labels=False,
sites_mod_positive=False):
"""Get the current structure expanded into spacegroup P1.
This turns all symmetry induced scatterers into independent
individual scatterers. The expanded structure may have sites
with negative or > 1.0 coordinates. Use '.sites_mod_positive()'
or '.sites_mod_short()' on the result, or alternatively
set sites_mod_positive to 'True' in case you want to change this behaviour.
:param append_number_to_labels: If set to 'True' scatterers generated from \
symmetry will be labelled with a numerical suffix
:type append_number_to_labels: boolean
:param sites_mod_positive: If set to 'True' xyz coordinates of the \
scatterers will be kept inside [0,1[
:type sites_mod_positive: boolean
:returns: a new instance of the structure expanded into P1
:rtype: cctbx.xray.structure
"""
result = structure(
special_position_settings
=crystal.special_position_settings(
crystal.symmetry.cell_equivalent_p1(self)),
scatterers=ext.expand_to_p1(
unit_cell=self.unit_cell(),
space_group=self.space_group(),
scatterers=self._scatterers,
site_symmetry_table=self._site_symmetry_table,
append_number_to_labels=append_number_to_labels),
scattering_type_registry=self._scattering_type_registry)
if (sites_mod_positive):
result = result.sites_mod_positive()
return result
def sites_mod_positive(self):
"""Get the current structure converted into a structure with x,y,z of all
scatterers in the interval [0,1[
:returns: the same instance of the structure with only posive coordinates \
of its scatterers
:rtype: cctbx.xray.structure
"""
return structure(
special_position_settings=self,
scatterers=self.scatterers().sites_mod_positive(),
scattering_type_registry=self._scattering_type_registry)
def sites_mod_short(self):
"""Get the current structure converted into a structure with short
coordinates vectors of all scatterers
:returns: the same instance of the structure with only short coordinates \
vectors of its scatterers
:rtype: cctbx.xray.structure
"""
return structure(
special_position_settings=self,
scatterers=self.scatterers().sites_mod_short(),
scattering_type_registry=self._scattering_type_registry)
def apply_shift(self, shift, recompute_site_symmetries=False):
shifted_scatterers = self.scatterers().deep_copy()
shifted_scatterers.set_sites(shifted_scatterers.extract_sites() + shift)
if (recompute_site_symmetries):
site_symmetry_table = None
else:
site_symmetry_table = self._site_symmetry_table
return structure(
special_position_settings=self,
scatterers=shifted_scatterers,
site_symmetry_table=site_symmetry_table,
scattering_type_registry=self._scattering_type_registry)
def random_shift_sites(self, max_shift_cart=0.2):
shifts = flex.vec3_double(
(flex.random_double(self.scatterers().size()*3)*2-1) * max_shift_cart)
return self.apply_shift(self.unit_cell().fractionalize(sites_cart=shifts))
def sort(self, by_value="occupancy", reverse=False):
assert by_value in ("occupancy",)
assert reverse in (False, True)
p = flex.sort_permutation(
data=self.scatterers().extract_occupancies(),
reverse=reverse)
return structure(
special_position_settings=self,
scatterers=self._scatterers.select(p),
site_symmetry_table=self._site_symmetry_table.select(p),
scattering_type_registry=self._scattering_type_registry)
def as_emma_model(self):
from cctbx import euclidean_model_matching as emma
positions = []
for scatterer in self.scatterers():
positions.append(emma.position(scatterer.label, scatterer.site))
return emma.model(self, positions)
def atomic_weights(self):
from cctbx.eltbx import tiny_pse
result = flex.double()
for scatterer in self.scatterers():
std_lbl = eltbx.xray_scattering.get_standard_label(
label=scatterer.scattering_type, exact=True, optional=True)
if (std_lbl is None):
raise RuntimeError(
"Unknown atomic weight: " + scatterer.scattering_type)
result.append(tiny_pse.table(std_lbl).weight())
return result
def center_of_mass(self, atomic_weights=None):
if (atomic_weights is None):
atomic_weights = self.atomic_weights()
return self.sites_cart().mean_weighted(weights=atomic_weights)
def principal_axes_of_inertia(self, atomic_weights=None):
if (atomic_weights is None):
atomic_weights = self.atomic_weights()
return scitbx.math.principal_axes_of_inertia(
points=self.sites_cart(),
weights=atomic_weights)
def set_u_cart(self, u_cart, selection = None):
assert self._scatterers.size() == u_cart.size()
if(selection is not None):
self._scatterers.set_u_cart(unit_cell = self.unit_cell(),
u_cart = u_cart,
selection = selection)
else:
self._scatterers.set_u_cart(unit_cell = self.unit_cell(),
u_cart = u_cart)
def show_scatterer_flags_summary(self, out=None):
# XXX move to C++
if (out is None): out = sys.stdout
n_use = 0
n_use_u_both = 0
n_use_u_iso = 0
n_use_u_aniso = 0
n_use_u_none = 0
n_grad_site = 0
n_grad_u_iso = 0
n_grad_u_aniso = 0
n_grad_occupancy = 0
n_grad_fp = 0
n_grad_fdp = 0
for scatterer in self.scatterers():
flags = scatterer.flags
if (flags.use()): n_use += 1
i, a = flags.use_u_iso(), flags.use_u_aniso()
if (i and a): n_use_u_both += 1
elif (i): n_use_u_iso += 1
elif (a): n_use_u_aniso += 1
else: n_use_u_none += 1
if (flags.grad_site()): n_grad_site += 1
if (flags.grad_u_iso()): n_grad_u_iso += 1
if (flags.grad_u_aniso()): n_grad_u_aniso += 1
if (flags.grad_occupancy()): n_grad_occupancy += 1
if (flags.grad_fp()): n_grad_fp += 1
if (flags.grad_fdp()): n_grad_fdp += 1
print >> out, "n_use = ", n_use
if (n_use_u_none != 0):
print >> out, "n_use_u_none = ", n_use_u_none
if (n_use_u_both != 0):
print >> out, "n_use_u_both = ", n_use_u_both
print >> out, "n_use_u_iso = ", n_use_u_iso
print >> out, "n_use_u_aniso = ", n_use_u_aniso
print >> out, "n_grad_site = ", n_grad_site
print >> out, "n_grad_u_iso = ", n_grad_u_iso
print >> out, "n_grad_u_aniso = ", n_grad_u_aniso
print >> out, "n_grad_occupancy = ", n_grad_occupancy
print >> out, "n_grad_fp = ", n_grad_fp
print >> out, "n_grad_fdp = ", n_grad_fdp
print >> out, "total number of scatterers = ", self.scatterers().size()
def scatterer_flags(self):
return ext.shared_scatterer_flags(self.scatterers())
def set_scatterer_flags(self, scatterer_flags):
scatterer_flags.assign_to(self.scatterers())
def n_parameters(self, considering_site_symmetry_constraints=False):
# XXX move to C++
result = 0
if (considering_site_symmetry_constraints):
sstab = self.site_symmetry_table()
else:
sstab = None
for i_sc,sc in enumerate(self.scatterers()):
flags = sc.flags
if (sstab is None):
site_symmetry = None
else:
site_symmetry = sstab.get(i_sc)
if (site_symmetry.is_point_group_1()):
site_symmetry = None
if (flags.grad_site()):
if (site_symmetry is None):
result += 3
else:
result += site_symmetry.site_constraints().n_independent_params()
if ( flags.grad_u_iso()
and flags.use_u_iso()): result += 1
if ( flags.grad_u_aniso()
and flags.use_u_aniso()):
if (site_symmetry is None):
result += 6
else:
result += site_symmetry.adp_constraints().n_independent_params()
if (flags.grad_occupancy()): result += 1
if (flags.grad_fp()): result += 1
if (flags.grad_fdp()): result += 1
return result
def n_grad_u_iso(self):
return self.scatterers().n_grad_u_iso()
def n_grad_u_aniso(self):
return self.scatterers().n_grad_u_aniso()
def parameter_map(self):
return cctbx.xray.parameter_map(self.scatterers())
def truncate_at_pdb_format_precision(self):
uc = self.unit_cell()
fra = uc.fractionalize
ort = uc.orthogonalize
for sc in self.scatterers():
sc.site = fra([float("%8.3f"%i) for i in ort(sc.site)])
if(sc.u_iso != -1.0):
sc.u_iso = adptbx.b_as_u(float("%6.2f"%adptbx.u_as_b(sc.u_iso)))
sc.occupancy = float("%5.2f"%sc.occupancy)
if(sc.u_star != (-1,-1,-1,-1,-1,-1)):
u_pdb = [int(i*10000) for i in adptbx.u_star_as_u_cart(uc, sc.u_star)]
sc.u_star = adptbx.u_cart_as_u_star(uc, [float(i)/10000 for i in u_pdb])
def grads_and_curvs_target_simple(self, miller_indices, da_db, daa_dbb_dab):
return ext.structure_factors_curvatures_simple_grads_and_curvs_target(
unit_cell=self.unit_cell(),
space_group=self.space_group(),
scatterers=self.scatterers(),
scattering_type_registry=self.scattering_type_registry(),
site_symmetry_table=self.site_symmetry_table(),
miller_indices=miller_indices,
da_db=da_db,
daa_dbb_dab=daa_dbb_dab)
def pair_sym_table_show(self,
pair_sym_table,
is_full_connectivity=False,
out=None):
if (is_full_connectivity):
site_symmetry_table = None
else:
site_symmetry_table = self.site_symmetry_table()
return pair_sym_table.show(
f=out,
site_labels=self.scatterers().extract_labels(),
site_symmetry_table=site_symmetry_table,
sites_frac=self.sites_frac(),
unit_cell=self.unit_cell())
def pair_sym_table_show_distances(self,
pair_sym_table,
show_cartesian=False,
skip_j_seq_less_than_i_seq=False,
skip_sym_equiv=False,
out=None):
return pair_sym_table.show_distances(
unit_cell=self.unit_cell(),
site_symmetry_table=self.site_symmetry_table(),
site_labels=self.scatterers().extract_labels(),
sites_frac=self.sites_frac(),
show_cartesian=show_cartesian,
skip_j_seq_less_than_i_seq=skip_j_seq_less_than_i_seq,
skip_sym_equiv=skip_sym_equiv,
out=out)
def asu_mappings(self, buffer_thickness, asu_is_inside_epsilon=None):
result = crystal.symmetry.asu_mappings(self,
buffer_thickness=buffer_thickness,
asu_is_inside_epsilon=asu_is_inside_epsilon)
ext.asu_mappings_process(
asu_mappings=result,
scatterers=self._scatterers,
site_symmetry_table=self._site_symmetry_table)
return result
def pair_asu_table(self,
distance_cutoff=None,
asu_mappings_buffer_thickness=None,
asu_is_inside_epsilon=None,
min_cubicle_edge=5):
assert (distance_cutoff is not None
or asu_mappings_buffer_thickness is not None)
if (asu_mappings_buffer_thickness is None):
asu_mappings_buffer_thickness = distance_cutoff
asu_mappings = self.asu_mappings(
buffer_thickness=asu_mappings_buffer_thickness,
asu_is_inside_epsilon=asu_is_inside_epsilon)
pair_asu_table = crystal.pair_asu_table(asu_mappings=asu_mappings)
if (distance_cutoff is not None):
pair_asu_table.add_all_pairs(
distance_cutoff=distance_cutoff,
min_cubicle_edge=min_cubicle_edge)
return pair_asu_table
def show_distances(self,
distance_cutoff=None,
asu_mappings_buffer_thickness=None,
asu_is_inside_epsilon=None,
min_cubicle_edge=5,
pair_asu_table=None,
show_cartesian=False,
keep_pair_asu_table=False,
out=None):
assert [distance_cutoff, pair_asu_table].count(None) == 1
if (pair_asu_table is None):
pair_asu_table = self.pair_asu_table(
distance_cutoff=distance_cutoff,
asu_mappings_buffer_thickness=asu_mappings_buffer_thickness,
asu_is_inside_epsilon=asu_is_inside_epsilon,
min_cubicle_edge=min_cubicle_edge)
return pair_asu_table.show_distances(
site_labels=self.scatterers().extract_labels(),
sites_frac=self.sites_frac(),
show_cartesian=show_cartesian,
keep_pair_asu_table=keep_pair_asu_table,
out=out)
def show_angles(self,
distance_cutoff=None,
asu_mappings_buffer_thickness=None,
asu_is_inside_epsilon=None,
pair_asu_table=None,
keep_pair_asu_table=False,
out=None):
assert [distance_cutoff, pair_asu_table].count(None) == 1
if (pair_asu_table is None):
pair_asu_table = self.pair_asu_table(
distance_cutoff=distance_cutoff,
asu_mappings_buffer_thickness=asu_mappings_buffer_thickness,
asu_is_inside_epsilon=asu_is_inside_epsilon)
return pair_asu_table.show_angles(
site_labels=self.scatterers().extract_labels(),
sites_frac=self.sites_frac(),
keep_pair_asu_table=keep_pair_asu_table,
out=out)
def conservative_pair_proxies(self, bond_sym_table, conserve_angles):
return conservative_pair_proxies(
structure=self,
bond_sym_table=bond_sym_table,
conserve_angles=conserve_angles)
def difference_vectors_cart(self, other):
return other.sites_cart() - self.sites_cart()
def rms_difference(self, other):
return self.sites_cart().rms_difference(other.sites_cart())
def closest_distances(self, sites_frac, distance_cutoff, use_selection=None):
class map_next_to_model_and_find_closest_distances(object):
def __init__(self, xray_structure, sites_frac, use_selection):
asu_mappings = xray_structure.asu_mappings(buffer_thickness =
distance_cutoff)
asu_mappings.process_sites_frac(sites_frac, min_distance_sym_equiv =
xray_structure.min_distance_sym_equiv())
pair_generator = crystal.neighbors_fast_pair_generator(asu_mappings =
asu_mappings, distance_cutoff = distance_cutoff)
n_xray = xray_structure.scatterers().size()
new_sites_frac = sites_frac.deep_copy()
smallest_distances_sq = flex.double(sites_frac.size(),
distance_cutoff**2+1)
i_seqs = flex.int(sites_frac.size(), -1)
for pair in pair_generator:
if(pair.i_seq < n_xray):
if (pair.j_seq < n_xray): continue
# i_seq = molecule
# j_seq = site
rt_mx_i = asu_mappings.get_rt_mx_i(pair)
rt_mx_j = asu_mappings.get_rt_mx_j(pair)
rt_mx_ji = rt_mx_i.inverse().multiply(rt_mx_j)
i_seq_new_site_frac = pair.j_seq - n_xray
new_site_frac = rt_mx_ji * sites_frac[i_seq_new_site_frac]
jn = pair.i_seq
else:
if(pair.j_seq >= n_xray): continue
# i_seq = site
# j_seq = molecule
rt_mx_i = asu_mappings.get_rt_mx_i(pair)
rt_mx_j = asu_mappings.get_rt_mx_j(pair)
rt_mx_ij = rt_mx_j.inverse().multiply(rt_mx_i)
i_seq_new_site_frac = pair.i_seq - n_xray
new_site_frac = rt_mx_ij * sites_frac[i_seq_new_site_frac]
jn = pair.j_seq
if(use_selection[jn]):
if(smallest_distances_sq[i_seq_new_site_frac] >= pair.dist_sq):
smallest_distances_sq[i_seq_new_site_frac] = pair.dist_sq
new_sites_frac[i_seq_new_site_frac] = new_site_frac
i_seqs[i_seq_new_site_frac] = jn
self.remove_selection = smallest_distances_sq > distance_cutoff**2
self.sites_frac = new_sites_frac
self.smallest_distances = flex.sqrt(
smallest_distances_sq).set_selected(self.remove_selection, -1)
self.smallest_distances_sq = smallest_distances_sq.set_selected(
self.remove_selection, -1)
self.i_seqs = i_seqs
if(use_selection is not None):
assert use_selection.size() == self._scatterers.size()
else:
use_selection = flex.bool(self._scatterers.size(), True)
result = map_next_to_model_and_find_closest_distances(
xray_structure = self, sites_frac = sites_frac, use_selection =
use_selection)
return result
def orthorhombic_unit_cell_around_centered_scatterers(self, buffer_size):
sites_cart = self.sites_cart()
sites_cart_min = sites_cart.min()
abc = [2*buffer_size+a-i for i,a in zip(sites_cart_min,sites_cart.max())]
sites_cart += [buffer_size-i for i in sites_cart_min]
result = structure(
crystal_symmetry=crystal.symmetry(
unit_cell=abc,
space_group_symbol="P1"),
scatterers=self.scatterers())
result.set_sites_cart(sites_cart)
return result
def cubic_unit_cell_around_centered_scatterers(self, buffer_size):
sites_cart = self.sites_cart()
sites_cart_min = sites_cart.min()
span = [a-i for i,a in zip(sites_cart_min, sites_cart.max())]
a = max(span) + 2 * buffer_size
sites_cart += [(a-s)/2-i for i,s in zip(sites_cart_min, span)]
result = structure(
crystal_symmetry=crystal.symmetry(
unit_cell=[a,a,a],
space_group_symbol="P1"),
scatterers=self.scatterers())
result.set_sites_cart(sites_cart)
return result
def as_cif_simple(self, out=None, data_name="global"):
if out is None: out = sys.stdout
import iotbx.cif
cif = iotbx.cif.model.cif()
cif[data_name] = self.as_cif_block()
print >> out, cif
def as_cif_block(self, covariance_matrix=None,
cell_covariance_matrix=None, format="mmCIF"):
from iotbx.cif import atom_type_cif_loop, model
cs_cif_block = self.crystal_symmetry().as_cif_block(
format=format,
cell_covariance_matrix=cell_covariance_matrix)
# crystal_symmetry_as_cif_block.__init__(
# self, xray_structure.crystal_symmetry(),
# cell_covariance_matrix=cell_covariance_matrix)
scatterers = self.scatterers()
uc = self.unit_cell()
if covariance_matrix is not None:
param_map = self.parameter_map()
covariance_diagonal = covariance_matrix.matrix_packed_u_diagonal()
u_star_to_u_cif_linear_map_pow2 = flex.pow2(flex.double(
uc.u_star_to_u_cif_linear_map()))
u_star_to_u_iso_linear_form = matrix.row(
uc.u_star_to_u_iso_linear_form())
fmt = "%.6f"
# _atom_site_* loop
atom_site_loop = model.loop(header=(
'_atom_site_label', '_atom_site_type_symbol',
'_atom_site_fract_x', '_atom_site_fract_y', '_atom_site_fract_z',
'_atom_site_U_iso_or_equiv', '_atom_site_adp_type',
'_atom_site_occupancy'))
for i_seq, sc in enumerate(scatterers):
site = occu = u_iso_or_equiv = None
# site
if covariance_matrix is not None:
params = param_map[i_seq]
if sc.flags.grad_site() and params.site >= 0:
site = []
for i in range(3):
site.append(format_float_with_su(sc.site[i],
math.sqrt(covariance_diagonal[params.site+i])))
#occupancy
if sc.flags.grad_occupancy() and params.occupancy >= 0:
occu = format_float_with_su(sc.occupancy,
math.sqrt(covariance_diagonal[params.occupancy]))
#Uiso/eq
if sc.flags.grad_u_iso() or sc.flags.grad_u_aniso():
if sc.flags.grad_u_iso():
u_iso_or_equiv = format_float_with_su(
sc.u_iso, math.sqrt(covariance.variance_for_u_iso(
i_seq, covariance_matrix, param_map)))
else:
cov = covariance.extract_covariance_matrix_for_u_aniso(
i_seq, covariance_matrix, param_map).matrix_packed_u_as_symmetric()
var = (u_star_to_u_iso_linear_form * matrix.sqr(cov)
).dot(u_star_to_u_iso_linear_form)
u_iso_or_equiv = format_float_with_su(
sc.u_iso_or_equiv(uc), math.sqrt(var))
if site is None:
site = [fmt % sc.site[i] for i in range(3)]
if occu is None:
occu = fmt % sc.occupancy
if u_iso_or_equiv is None:
u_iso_or_equiv = fmt % sc.u_iso_or_equiv(uc)
if sc.flags.use_u_aniso():
adp_type = 'Uani'
else:
adp_type = 'Uiso'
atom_site_loop.add_row((
sc.label, sc.scattering_type, site[0], site[1], site[2], u_iso_or_equiv,
adp_type, occu))
cs_cif_block.add_loop(atom_site_loop)
# _atom_site_aniso_* loop
aniso_scatterers = scatterers.select(scatterers.extract_use_u_aniso())
if aniso_scatterers.size():
labels = list(scatterers.extract_labels())
aniso_loop = model.loop(header=('_atom_site_aniso_label',
'_atom_site_aniso_U_11',
'_atom_site_aniso_U_22',
'_atom_site_aniso_U_33',
'_atom_site_aniso_U_12',
'_atom_site_aniso_U_13',
'_atom_site_aniso_U_23'))
for sc in aniso_scatterers:
u_cif = adptbx.u_star_as_u_cif(uc, sc.u_star)
if covariance_matrix is not None:
row = [sc.label]
idx = param_map[labels.index(sc.label)].u_aniso
if idx > -1:
var = covariance_diagonal[idx:idx+6] * u_star_to_u_cif_linear_map_pow2
for i in range(6):
if var[i] > 0:
row.append(
format_float_with_su(u_cif[i], math.sqrt(var[i])))
else:
row.append(fmt%u_cif[i])
else:
row = [sc.label] + [fmt%u_cif[i] for i in range(6)]
else:
row = [sc.label] + [fmt%u_cif[i] for i in range(6)]
aniso_loop.add_row(row)
cs_cif_block.add_loop(aniso_loop)
cs_cif_block.add_loop(atom_type_cif_loop(self, format=format))
return cs_cif_block
def as_pdb_file(self,
remark=None,
remarks=[],
fractional_coordinates=False,
resname=None,
connect=None):
import iotbx.pdb.xray_structure
return iotbx.pdb.xray_structure.as_pdb_file(
self=self,
remark=remark,
remarks=remarks,
fractional_coordinates=fractional_coordinates,
resname=resname,
connect=connect)
def from_shelx(cls, *args, **kwds):
import iotbx.shelx
return iotbx.shelx.cctbx_xray_structure_from(cls, *args, **kwds)
from_shelx = classmethod(from_shelx)
def from_cif(cls, file_object=None, file_path=None, data_block_name=None):
import iotbx.cif
from iotbx.cif import builders
result = iotbx.cif.cctbx_data_structures_from_cif(
file_object=file_object, file_path=file_path,
data_block_name=data_block_name,
data_structure_builder=builders.crystal_structure_builder)
if len(result.xray_structures):
if data_block_name is not None:
return result.xray_structures[data_block_name]
else:
return result.xray_structures
else:
raise Sorry("Could not extract an xray.structure from the given input")
from_cif = classmethod(from_cif)
def unit_cell_content(self, omit=None):
""" The content of the unit cell as a chemical formula """
return dict([ (r.scattering_type, r.unit_cell_occupancy_sum)
for r in self.scattering_types_counts_and_occupancy_sums()
if not omit or r.scattering_type not in omit ])
def make_scatterer_labels_shelx_compatible_in_place(self):
result = []
upper = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
digits = "0123456789"
def is_useful_label(lbl):
if (len(lbl) == 0): return False
if (len(lbl) > 4): return False
if (lbl[0] not in upper): return False
for c in lbl[1:]:
if ( c not in upper
and c not in digits):
return False
return True
lbl_set = set()
def reset(label):
if (sc.label != label):
result.append((sc.label, label))
sc.label = label
lbl_set.add(label)
for sc in self.scatterers():
lbl = sc.label.strip().replace(" ", "").upper()
lbl_is_useful = is_useful_label(lbl)
if (lbl not in lbl_set and lbl_is_useful):
reset(label=lbl)
else:
def find_tail_replacement(fmt, n):
for i in xrange(1,n):
s = fmt % i
trial = lbl[:4-len(s)]+s
if (trial not in lbl_set):
reset(label=trial)
return True
return False
def find_replacement_using_scattering_type():
from cctbx.eltbx.xray_scattering import get_element_and_charge_symbols
e, _ = get_element_and_charge_symbols(
scattering_type=sc.scattering_type,
exact=False)
if (len(e) == 0): return False
assert len(e) <= 2
if (len(e) == 1): fmt, n = "%03d", 1000
else: fmt, n = "%02d", 100
e = e.upper()
for i in xrange(1,n):
trial = e + fmt % i
if (trial not in lbl_set):
reset(label=trial)
return True
return False
def find_complete_replacement():
for c in upper:
for i in xrange(1,1000):
trial = c + "%03d" % i
if (trial not in lbl_set):
reset(label=trial)
return True
return False
if (lbl_is_useful):
if (find_tail_replacement("%d", 10)): continue
if (find_tail_replacement("%02d", 100)): continue
if (find_tail_replacement("%03d", 1000)): continue
if (find_replacement_using_scattering_type()): continue
if (find_complete_replacement()): continue
raise RuntimeError(
"Unable to find unused SHELX-compatible scatterer label.")
return result
def intersection_of_scatterers(self, i_seq, j_seq):
"""
For a pair of scatterers, calculates their overlap given the coordinates
and displacement parameters (using adptbx.intersection).
"""
sc1 = self.scatterers()[i_seq]
sc2 = self.scatterers()[j_seq]
if (sc1.flags.use_u_aniso()):
u1 = sc1.u_star
else :
u1 = sc1.u_iso
if (sc2.flags.use_u_aniso()):
u2 = sc2.u_star
else :
u2 = sc2.u_iso
return adptbx.intersection(
u_1=u1,
u_2=u2,
site_1=sc1.site,
site_2=sc2.site,
unit_cell=self.unit_cell())
class conservative_pair_proxies(object):
def __init__(self, structure, bond_sym_table, conserve_angles):
from cctbx import geometry_restraints
buffer_thickness = flex.max_default(
values=crystal.get_distances(
pair_sym_table=bond_sym_table,
orthogonalization_matrix
=structure.unit_cell().orthogonalization_matrix(),
sites_frac=structure.sites_frac()),
default=1)
if (conserve_angles): buffer_thickness *= 2
asu_mappings = structure.asu_mappings(buffer_thickness=buffer_thickness)
bond_pair_asu_table = crystal.pair_asu_table(asu_mappings=asu_mappings)
bond_pair_asu_table.add_pair_sym_table(sym_table=bond_sym_table)
self.bond = geometry_restraints.bond_sorted_asu_proxies(
pair_asu_table=bond_pair_asu_table)
if (not conserve_angles):
self.angle = None
else:
angle_pair_asu_table = bond_pair_asu_table.angle_pair_asu_table()
self.angle = geometry_restraints.bond_sorted_asu_proxies(
pair_asu_table=angle_pair_asu_table)
class meaningful_site_cart_differences(object):
""" Differences between the Cartesian coordinates of corresponding sites
in two structures, cancelling continuous origin shifts if any.
This is especially useful to compare a refined
structure to a reference structure as the former may have drifted along
a continuous shift direction during refinement, therefore spoiling
a naive comparison of corresponding sites.
"""
def __init__(self, xs1, xs2):
self.labels = [ sc.label for sc in xs1.scatterers() ]
self.delta = canonical_delta = xs1.sites_cart() - xs2.sites_cart()
if xs1.space_group() == xs2.space_group():
ssi = sgtbx.structure_seminvariants(xs1.space_group())\
.select(discrete=False)
if ssi.size():
shifts = [ matrix.col(xs1.unit_cell().orthogonalize(vm.v))
for vm in ssi.vectors_and_moduli() ]
if len(shifts) == 1:
e0 = shifts[0].normalize()
e1 = e0.ortho()
e2 = e0.cross(e1)
elif len(shifts) == 2:
e0 = shifts[0].normalize()
v = shifts[1]
e1 = (e0 - 1/e0.dot(v)*v).normalize()
e2 = e0.cross(e1)
elif len(shifts) == 3:
e0, e1, e2 = [ (1,0,0), (0,1,0), (0,0,1) ]
deltas = [ canonical_delta.dot(e) for e in (e0, e1, e2) ]
means = [ flex.mean(d) for d in deltas ]
if len(shifts) == 1:
means_correction = (means[0], 0, 0)
elif len(shifts) == 2:
means_correction = (means[0], means[1], 0)
elif len(shifts) == 3:
means_correction = tuple(means)
self.delta = flex.vec3_double(deltas[0], deltas[1], deltas[2])
self.delta -= means_correction
def max_absolute(self):
return flex.max_absolute(self.delta.as_double())
def show(self):
import itertools
for lbl, diff in itertools.izip(self.labels, self.delta):
print "%6s: (%.6f, %.6f, %.6f)" % ((lbl,) + diff)
| StarcoderdataPython |
258561 | """Utility functions."""
from genologics.entities import Workflow
import config
def char_to_bool(letter):
"""Transform character (J/N) to Bool."""
if letter.upper() == 'J':
return True
elif letter.upper() == 'N':
return False
else:
raise ValueError('Invalid character, only J or N allowed.')
def transform_sex(value):
"""Transform helix sex/geslacht value to lims sex/geslacht value."""
if value.strip():
if value.upper() == 'M':
return 'Man'
elif value.upper() == 'V':
return 'Vrouw'
elif value.upper() == 'O':
return 'Onbekend'
else:
raise ValueError('Invalid sex character, only M, V or O allowed.')
else:
return value
def transform_sample_name(value):
"""Transform legacy name to new sample name."""
if '/' in value:
sample_name = ''.join(value.split('/'))
return sample_name
else:
return value
def stoftestcode_to_workflow(lims, stoftestcode):
"""Return workflow based on helix stoftestcode."""
if stoftestcode in config.stoftestcode_workflow:
return Workflow(lims, id=config.stoftestcode_workflow[stoftestcode])
else:
return None
| StarcoderdataPython |
6559330 | <gh_stars>10-100
# Copyright 2021 FIRST
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from google.cloud import datastore
DS_ANNOUNCEMENT = 'Announcements'
#
# Returns a list of announcement entities
#
def get_announcements():
datastore_client = datastore.Client()
query = datastore_client.query(kind=DS_ANNOUNCEMENT)
return list(query.fetch())
#
# Could this be done with an indexed query? Perhaps, but despite all the documentation
# from Google, built in indexes don't seem to be supported when using Firestore in datastore mode.
# And it seems silly to create a single property index when all the Google documentation says
# single property indexes already exist. So, this is just easier than fighting the datastore api.
#
def get_unexpired_announcements():
elems = get_announcements()
announcements = list(filter(lambda a: a['expires'].replace(tzinfo=None) > datetime.now(), elems))
return announcements
| StarcoderdataPython |
5039946 | from .twoRoundDeterministicRewardEnv import TwoRoundDeterministicRewardEnv
| StarcoderdataPython |
8046752 | <reponame>Fangyh09/pysteps<filename>pysteps/io/importers.py<gh_stars>1-10
"""
pysteps.io.importers
====================
Methods for importing files containing 2d precipitation fields.
The methods in this module implement the following interface::
import_xxx(filename, optional arguments)
where **xxx** is the name (or abbreviation) of the file format and filename
is the name of the input file.
The output of each method is a three-element tuple containing a two-dimensional
precipitation field, the corresponding quality field and a metadata dictionary.
If the file contains no quality information, the quality field is set to None.
Pixels containing missing data are set to nan.
The metadata dictionary contains the following mandatory key-value pairs:
.. tabularcolumns:: |p{2cm}|L|
+------------------+----------------------------------------------------------+
| Key | Value |
+==================+==========================================================+
| projection | PROJ.4-compatible projection definition |
+------------------+----------------------------------------------------------+
| x1 | x-coordinate of the lower-left corner of the data raster |
| | (meters) |
+------------------+----------------------------------------------------------+
| y1 | y-coordinate of the lower-left corner of the data raster |
| | (meters) |
+------------------+----------------------------------------------------------+
| x2 | x-coordinate of the upper-right corner of the data raster|
| | (meters) |
+------------------+----------------------------------------------------------+
| y2 | y-coordinate of the upper-right corner of the data raster|
| | (meters) |
+------------------+----------------------------------------------------------+
| xpixelsize | grid resolution in x-direction (meters) |
+------------------+----------------------------------------------------------+
| ypixelsize | grid resolution in y-direction (meters) |
+------------------+----------------------------------------------------------+
| yorigin | a string specifying the location of the first element in |
| | the data raster w.r.t. y-axis: |
| | 'upper' = upper border |
| | 'lower' = lower border |
+------------------+----------------------------------------------------------+
| institution | name of the institution who provides the data |
+------------------+----------------------------------------------------------+
| timestep | time step of the input data (minutes) |
+------------------+----------------------------------------------------------+
| unit | the physical unit of the data: 'mm/h', 'mm' or 'dBZ' |
+------------------+----------------------------------------------------------+
| transform | the transformation of the data: None, 'dB', 'Box-Cox' or |
| | others |
+------------------+----------------------------------------------------------+
| accutime | the accumulation time in minutes of the data, float |
+------------------+----------------------------------------------------------+
| threshold | the rain/no rain threshold with the same unit, |
| | transformation and accutime of the data. |
+------------------+----------------------------------------------------------+
| zerovalue | the value assigned to the no rain pixels with the same |
| | unit, transformation and accutime of the data. |
+------------------+----------------------------------------------------------+
Available Importers
-------------------
.. autosummary::
:toctree: ../generated/
import_bom_rf3
import_fmi_pgm
import_mch_gif
import_mch_hdf5
import_mch_metranet
import_odim_hdf5
import_knmi_hdf5
"""
import gzip
from matplotlib.pyplot import imread
import numpy as np
import os
from pysteps.exceptions import DataModelError
from pysteps.exceptions import MissingOptionalDependency
try:
import h5py
h5py_imported = True
except ImportError:
h5py_imported = False
try:
import metranet
metranet_imported = True
except ImportError:
metranet_imported = False
try:
import netCDF4
netcdf4_imported = True
except ImportError:
netcdf4_imported = False
try:
import PIL
pil_imported = True
except ImportError:
pil_imported = False
try:
import pyproj
pyproj_imported = True
except ImportError:
pyproj_imported = False
def import_bom_rf3(filename, **kwargs):
"""Import a NetCDF radar rainfall product from the BoM Rainfields3.
Parameters
----------
filename : str
Name of the file to import.
Returns
-------
out : tuple
A three-element tuple containing the rainfall field in mm/h imported
from the Bureau RF3 netcdf, the quality field and the metadata. The
quality field is currently set to None.
"""
if not netcdf4_imported:
raise MissingOptionalDependency(
"netCDF4 package is required to import BoM Rainfields3 products "
"but it is not installed"
)
R = _import_bom_rf3_data(filename)
geodata = _import_bom_rf3_geodata(filename)
metadata = geodata
# TODO(import_bom_rf3): Add missing georeferencing data.
metadata["transform"] = None
metadata["zerovalue"] = np.nanmin(R)
if np.any(np.isfinite(R)):
metadata["threshold"] = np.nanmin(R[R > np.nanmin(R)])
else:
metadata["threshold"] = np.nan
return R, None, metadata
def _import_bom_rf3_data(filename):
ds_rainfall = netCDF4.Dataset(filename)
if "precipitation" in ds_rainfall.variables.keys():
precipitation = ds_rainfall.variables["precipitation"][:]
else:
precipitation = None
ds_rainfall.close()
return precipitation
def _import_bom_rf3_geodata(filename):
geodata = {}
ds_rainfall = netCDF4.Dataset(filename)
if "proj" in ds_rainfall.variables.keys():
projection = ds_rainfall.variables["proj"]
if getattr(projection, "grid_mapping_name") == "albers_conical_equal_area":
projdef = "+proj=aea "
lon_0 = getattr(projection, "longitude_of_central_meridian")
projdef += " +lon_0=" + f"{lon_0:.3f}"
lat_0 = getattr(projection, "latitude_of_projection_origin")
projdef += " +lat_0=" + f"{lat_0:.3f}"
standard_parallels = getattr(projection, "standard_parallel")
projdef += " +lat_1=" + f"{standard_parallels[0]:.3f}"
projdef += " +lat_2=" + f"{standard_parallels[1]:.3f}"
else:
projdef = None
geodata["projection"] = projdef
if "valid_min" in ds_rainfall.variables["x"].ncattrs():
xmin = getattr(ds_rainfall.variables["x"], "valid_min")
xmax = getattr(ds_rainfall.variables["x"], "valid_max")
ymin = getattr(ds_rainfall.variables["y"], "valid_min")
ymax = getattr(ds_rainfall.variables["y"], "valid_max")
else:
xmin = min(ds_rainfall.variables["x"])
xmax = max(ds_rainfall.variables["x"])
ymin = min(ds_rainfall.variables["y"])
ymax = max(ds_rainfall.variables["y"])
xpixelsize = (
abs(ds_rainfall.variables["x"][1] - ds_rainfall.variables["x"][0])
)
ypixelsize = (
abs(ds_rainfall.variables["y"][1] - ds_rainfall.variables["y"][0])
)
factor_scale = 1.0
if "units" in ds_rainfall.variables["x"].ncattrs():
if getattr(ds_rainfall.variables["x"], "units") == "km":
factor_scale = 1000.
geodata["x1"] = xmin * factor_scale
geodata["y1"] = ymin * factor_scale
geodata["x2"] = xmax * factor_scale
geodata["y2"] = ymax * factor_scale
geodata["xpixelsize"] = xpixelsize * factor_scale
geodata["ypixelsize"] = ypixelsize * factor_scale
geodata["yorigin"] = "upper" # TODO(_import_bom_rf3_geodata): check this
# get the accumulation period
valid_time = None
if "valid_time" in ds_rainfall.variables.keys():
times = ds_rainfall.variables["valid_time"]
calendar = 'standard'
if 'calendar' in times.ncattrs():
calendar = times.calendar
valid_time = netCDF4.num2date(times[:],
units=times.units,
calendar=calendar,
)
start_time = None
if "start_time" in ds_rainfall.variables.keys():
times = ds_rainfall.variables["start_time"]
calendar = 'standard'
if 'calendar' in times.ncattrs():
calendar = times.calendar
start_time = netCDF4.num2date(times[:],
units=times.units,
calendar=calendar,
)
time_step = None
if start_time is not None:
if valid_time is not None:
time_step = (valid_time - start_time).seconds // 60
geodata["accutime"] = time_step
# get the unit of precipitation
if "units" in ds_rainfall.variables["precipitation"].ncattrs():
units = getattr(ds_rainfall.variables["precipitation"], "units")
if units in ("kg m-2", "mm"):
geodata["unit"] = "mm"
geodata["institution"] = "Commonwealth of Australia, Bureau of Meteorology"
ds_rainfall.close()
return geodata
def import_fmi_pgm(filename, **kwargs):
"""Import a 8-bit PGM radar reflectivity composite from the FMI archive.
Parameters
----------
filename : str
Name of the file to import.
Other Parameters
----------------
gzipped : bool
If True, the input file is treated as a compressed gzip file.
Returns
-------
out : tuple
A three-element tuple containing the reflectivity composite in dBZ
and the associated quality field and metadata. The quality field is
currently set to None.
"""
if not pyproj_imported:
raise MissingOptionalDependency(
"pyproj package is required to import "
"FMI's radar reflectivity composite "
"but it is not installed"
)
gzipped = kwargs.get("gzipped", False)
pgm_metadata = _import_fmi_pgm_metadata(filename, gzipped=gzipped)
if gzipped is False:
R = imread(filename)
else:
R = imread(gzip.open(filename, "r"))
geodata = _import_fmi_pgm_geodata(pgm_metadata)
MASK = R == pgm_metadata["missingval"]
R = R.astype(float)
R[MASK] = np.nan
R = (R - 64.0) / 2.0
metadata = geodata
metadata["institution"] = "Finnish Meteorological Institute"
metadata["accutime"] = 5.0
metadata["unit"] = "dBZ"
metadata["transform"] = "dB"
metadata["zerovalue"] = np.nanmin(R)
if np.any(np.isfinite(R)):
metadata["threshold"] = np.nanmin(R[R > np.nanmin(R)])
else:
metadata["threshold"] = np.nan
return R, None, metadata
def _import_fmi_pgm_geodata(metadata):
geodata = {}
projdef = ""
if metadata["type"][0] != "stereographic":
raise ValueError("unknown projection %s" % metadata["type"][0])
projdef += "+proj=stere "
projdef += " +lon_0=" + metadata["centrallongitude"][0] + "E"
projdef += " +lat_0=" + metadata["centrallatitude"][0] + "N"
projdef += " +lat_ts=" + metadata["truelatitude"][0]
# These are hard-coded because the projection definition is missing from the
# PGM files.
projdef += " +a=6371288"
projdef += " +x_0=380886.310"
projdef += " +y_0=3395677.920"
projdef += " +no_defs"
#
geodata["projection"] = projdef
ll_lon, ll_lat = [float(v) for v in metadata["bottomleft"]]
ur_lon, ur_lat = [float(v) for v in metadata["topright"]]
pr = pyproj.Proj(projdef)
x1, y1 = pr(ll_lon, ll_lat)
x2, y2 = pr(ur_lon, ur_lat)
geodata["x1"] = x1
geodata["y1"] = y1
geodata["x2"] = x2
geodata["y2"] = y2
geodata["xpixelsize"] = float(metadata["metersperpixel_x"][0])
geodata["ypixelsize"] = float(metadata["metersperpixel_y"][0])
geodata["yorigin"] = "upper"
return geodata
def _import_fmi_pgm_metadata(filename, gzipped=False):
metadata = {}
if not gzipped:
f = open(filename, "rb")
else:
f = gzip.open(filename, "rb")
l = f.readline()
while not l.startswith(b"#"):
l = f.readline()
while l.startswith(b"#"):
x = l.decode()
x = x[1:].strip().split(" ")
if len(x) >= 2:
k = x[0]
v = x[1:]
metadata[k] = v
else:
l = f.readline()
continue
l = f.readline()
l = f.readline().decode()
metadata["missingval"] = int(l)
f.close()
return metadata
def import_mch_gif(filename, product, unit, accutime):
"""Import a 8-bit gif radar reflectivity composite from the MeteoSwiss
archive.
Parameters
----------
filename : str
Name of the file to import.
product : {"AQC", "CPC", "RZC", "AZC"}
The name of the MeteoSwiss QPE product.\n
Currently supported prducts:
+------+----------------------------+
| Name | Product |
+======+============================+
| AQC | Acquire |
+------+----------------------------+
| CPC | CombiPrecip |
+------+----------------------------+
| RZC | Precip |
+------+----------------------------+
| AZC | RZC accumulation |
+------+----------------------------+
unit : {"mm/h", "mm", "dBZ"}
the physical unit of the data
accutime : float
the accumulation time in minutes of the data
Returns
-------
out : tuple
A three-element tuple containing the precipitation field in mm/h imported
from a MeteoSwiss gif file and the associated quality field and metadata.
The quality field is currently set to None.
"""
if not pil_imported:
raise MissingOptionalDependency(
"PIL package is required to import "
"radar reflectivity composite from MeteoSwiss"
"but it is not installed"
)
geodata = _import_mch_geodata()
metadata = geodata
# import gif file
B = PIL.Image.open(filename)
if product.lower() in ["azc", "rzc", "precip"]:
# convert 8-bit GIF colortable to RGB values
Brgb = B.convert("RGB")
# load lookup table
if product.lower() == "azc":
lut_filename = os.path.join(
os.path.dirname(__file__), "mch_lut_8bit_Metranet_AZC_V104.txt"
)
else:
lut_filename = os.path.join(
os.path.dirname(__file__), "mch_lut_8bit_Metranet_v103.txt"
)
lut = np.genfromtxt(lut_filename, skip_header=1)
lut = dict(zip(zip(lut[:, 1], lut[:, 2], lut[:, 3]), lut[:, -1]))
# apply lookup table conversion
R = np.zeros(len(Brgb.getdata()))
for i, dn in enumerate(Brgb.getdata()):
R[i] = lut.get(dn, np.nan)
# convert to original shape
width, height = B.size
R = R.reshape(height, width)
# set values outside observational range to NaN,
# and values in non-precipitating areas to zero.
R[R < 0] = 0
R[R > 9999] = np.nan
elif product.lower() in ["aqc", "cpc", "acquire ", "combiprecip"]:
# convert digital numbers to physical values
B = np.array(B, dtype=int)
# build lookup table [mm/5min]
lut = np.zeros(256)
A = 316.0
b = 1.5
for i in range(256):
if (i < 2) or (i > 250 and i < 255):
lut[i] = 0.0
elif i == 255:
lut[i] = np.nan
else:
lut[i] = (10.0 ** ((i - 71.5) / 20.0) / A) ** (1.0 / b)
# apply lookup table
R = lut[B]
else:
raise ValueError("unknown product %s" % product)
metadata["accutime"] = accutime
metadata["unit"] = unit
metadata["transform"] = None
metadata["zerovalue"] = np.nanmin(R)
if np.any(R > np.nanmin(R)):
metadata["threshold"] = np.nanmin(R[R > np.nanmin(R)])
else:
metadata["threshold"] = np.nan
metadata["institution"] = "MeteoSwiss"
metadata["product"] = product
return R, None, metadata
def import_mch_hdf5(filename, **kwargs):
"""Import a precipitation field (and optionally the quality field) from a HDF5
file conforming to the ODIM specification.
Parameters
----------
filename : str
Name of the file to import.
Other Parameters
----------------
qty : {'RATE', 'ACRR', 'DBZH'}
The quantity to read from the file. The currently supported identitiers
are: 'RATE'=instantaneous rain rate (mm/h), 'ACRR'=hourly rainfall
accumulation (mm) and 'DBZH'=max-reflectivity (dBZ). The default value
is 'RATE'.
Returns
-------
out : tuple
A three-element tuple containing the OPERA product for the requested
quantity and the associated quality field and metadata. The quality
field is read from the file if it contains a dataset whose quantity
identifier is 'QIND'.
"""
if not h5py_imported:
raise MissingOptionalDependency(
"h5py package is required to import "
"radar reflectivity composites using ODIM HDF5 specification "
"but it is not installed"
)
qty = kwargs.get("qty", "RATE")
if qty not in ["ACRR", "DBZH", "RATE"]:
raise ValueError(
"unknown quantity %s: the available options are 'ACRR', 'DBZH' and 'RATE'"
)
f = h5py.File(filename, "r")
R = None
Q = None
for dsg in f.items():
if dsg[0][0:7] == "dataset":
what_grp_found = False
# check if the "what" group is in the "dataset" group
if "what" in list(dsg[1].keys()):
qty_, gain, offset, nodata, undetect = _read_mch_hdf5_what_group(
dsg[1]["what"]
)
what_grp_found = True
for dg in dsg[1].items():
if dg[0][0:4] == "data":
# check if the "what" group is in the "data" group
if "what" in list(dg[1].keys()):
qty_, gain, offset, nodata, undetect = _read_mch_hdf5_what_group(
dg[1]["what"]
)
elif not what_grp_found:
raise DataModelError(
"Non ODIM compilant file: "
"no what group found from {} "
"or its subgroups".format(dg[0])
)
if qty_.decode() in [qty, "QIND"]:
ARR = dg[1]["data"][...]
MASK_N = ARR == nodata
MASK_U = ARR == undetect
MASK = np.logical_and(~MASK_U, ~MASK_N)
if qty_.decode() == qty:
R = np.empty(ARR.shape)
R[MASK] = ARR[MASK] * gain + offset
R[MASK_U] = np.nan
R[MASK_N] = np.nan
elif qty_.decode() == "QIND":
Q = np.empty(ARR.shape, dtype=float)
Q[MASK] = ARR[MASK]
Q[~MASK] = np.nan
if R is None:
raise IOError("requested quantity %s not found" % qty)
where = f["where"]
proj4str = where.attrs["projdef"].decode() # is emtpy ...
geodata = _import_mch_geodata()
metadata = geodata
# TODO: use those from the hdf5 file instead
# xpixelsize = where.attrs["xscale"] * 1000.0
# ypixelsize = where.attrs["yscale"] * 1000.0
# xsize = where.attrs["xsize"]
# ysize = where.attrs["ysize"]
if qty == "ACRR":
unit = "mm"
transform = None
elif qty == "DBZH":
unit = "dBZ"
transform = "dB"
else:
unit = "mm/h"
transform = None
if np.any(np.isfinite(R)):
thr = np.nanmin(R[R > np.nanmin(R)])
else:
thr = np.nan
metadata.update(
{
"yorigin": "upper",
"institution": "MeteoSwiss",
"accutime": 5.0,
"unit": unit,
"transform": transform,
"zerovalue": np.nanmin(R),
"threshold": thr,
}
)
f.close()
return R, Q, metadata
def import_mch_metranet(filename, product, unit, accutime):
"""Import a 8-bit bin radar reflectivity composite from the MeteoSwiss
archive.
Parameters
----------
filename : str
Name of the file to import.
product : {"AQC", "CPC", "RZC", "AZC"}
The name of the MeteoSwiss QPE product.\n
Currently supported prducts:
+------+----------------------------+
| Name | Product |
+======+============================+
| AQC | Acquire |
+------+----------------------------+
| CPC | CombiPrecip |
+------+----------------------------+
| RZC | Precip |
+------+----------------------------+
| AZC | RZC accumulation |
+------+----------------------------+
unit : {"mm/h", "mm", "dBZ"}
the physical unit of the data
accutime : float
the accumulation time in minutes of the data
Returns
-------
out : tuple
A three-element tuple containing the precipitation field in mm/h imported
from a MeteoSwiss gif file and the associated quality field and metadata.
The quality field is currently set to None.
"""
if not metranet_imported:
raise MissingOptionalDependency(
"metranet package needed for importing MeteoSwiss "
"radar composites but it is not installed"
)
ret = metranet.read_file(filename, physic_value=True, verbose=False)
R = ret.data
geodata = _import_mch_geodata()
# read metranet
metadata = geodata
metadata["institution"] = "MeteoSwiss"
metadata["accutime"] = accutime
metadata["unit"] = unit
metadata["transform"] = None
metadata["zerovalue"] = np.nanmin(R)
if np.isnan(metadata["zerovalue"]):
metadata["threshold"] = np.nan
else:
metadata["threshold"] = np.nanmin(R[R > metadata["zerovalue"]])
return R, None, metadata
def _import_mch_geodata():
"""Swiss radar domain CCS4
These are all hard-coded because the georeferencing is missing from the gif files.
"""
geodata = {}
# LV03 Swiss projection definition in Proj4
projdef = ""
projdef += "+proj=somerc "
projdef += " +lon_0=7.43958333333333"
projdef += " +lat_0=46.9524055555556"
projdef += " +k_0=1"
projdef += " +x_0=600000"
projdef += " +y_0=200000"
projdef += " +ellps=bessel"
projdef += " +towgs84=674.374,15.056,405.346,0,0,0,0"
projdef += " +units=m"
projdef += " +no_defs"
geodata["projection"] = projdef
geodata["x1"] = 255000.0
geodata["y1"] = -160000.0
geodata["x2"] = 965000.0
geodata["y2"] = 480000.0
geodata["xpixelsize"] = 1000.0
geodata["ypixelsize"] = 1000.0
geodata["yorigin"] = "upper"
return geodata
def import_odim_hdf5(filename, **kwargs):
"""Import a precipitation field (and optionally the quality field) from a
HDF5 file conforming to the ODIM specification.
Parameters
----------
filename : str
Name of the file to import.
Other Parameters
----------------
qty : {'RATE', 'ACRR', 'DBZH'}
The quantity to read from the file. The currently supported identitiers
are: 'RATE'=instantaneous rain rate (mm/h), 'ACRR'=hourly rainfall
accumulation (mm) and 'DBZH'=max-reflectivity (dBZ). The default value
is 'RATE'.
Returns
-------
out : tuple
A three-element tuple containing the OPERA product for the requested
quantity and the associated quality field and metadata. The quality
field is read from the file if it contains a dataset whose quantity
identifier is 'QIND'.
"""
if not h5py_imported:
raise MissingOptionalDependency(
"h5py package is required to import "
"radar reflectivity composites using ODIM HDF5 specification "
"but it is not installed"
)
qty = kwargs.get("qty", "RATE")
if qty not in ["ACRR", "DBZH", "RATE"]:
raise ValueError(
"unknown quantity %s: the available options are 'ACRR', 'DBZH' and 'RATE'"
)
f = h5py.File(filename, "r")
R = None
Q = None
for dsg in f.items():
if dsg[0][0:7] == "dataset":
what_grp_found = False
# check if the "what" group is in the "dataset" group
if "what" in list(dsg[1].keys()):
qty_, gain, offset, nodata, undetect = _read_odim_hdf5_what_group(
dsg[1]["what"]
)
what_grp_found = True
for dg in dsg[1].items():
if dg[0][0:4] == "data":
# check if the "what" group is in the "data" group
if "what" in list(dg[1].keys()):
qty_, gain, offset, nodata, undetect = _read_odim_hdf5_what_group(
dg[1]["what"]
)
elif not what_grp_found:
raise DataModelError(
"Non ODIM compilant file: "
"no what group found from {} "
"or its subgroups".format(dg[0])
)
if qty_.decode() in [qty, "QIND"]:
ARR = dg[1]["data"][...]
MASK_N = ARR == nodata
MASK_U = ARR == undetect
MASK = np.logical_and(~MASK_U, ~MASK_N)
if qty_.decode() == qty:
R = np.empty(ARR.shape)
R[MASK] = ARR[MASK] * gain + offset
R[MASK_U] = 0.0
R[MASK_N] = np.nan
elif qty_.decode() == "QIND":
Q = np.empty(ARR.shape, dtype=float)
Q[MASK] = ARR[MASK]
Q[~MASK] = np.nan
if R is None:
raise IOError("requested quantity %s not found" % qty)
where = f["where"]
proj4str = where.attrs["projdef"].decode()
pr = pyproj.Proj(proj4str)
LL_lat = where.attrs["LL_lat"]
LL_lon = where.attrs["LL_lon"]
UR_lat = where.attrs["UR_lat"]
UR_lon = where.attrs["UR_lon"]
if (
"LR_lat" in where.attrs.keys()
and "LR_lon" in where.attrs.keys()
and "UL_lat" in where.attrs.keys()
and "UL_lon" in where.attrs.keys()
):
LR_lat = float(where.attrs["LR_lat"])
LR_lon = float(where.attrs["LR_lon"])
UL_lat = float(where.attrs["UL_lat"])
UL_lon = float(where.attrs["UL_lon"])
full_cornerpts = True
else:
full_cornerpts = False
LL_x, LL_y = pr(LL_lon, LL_lat)
UR_x, UR_y = pr(UR_lon, UR_lat)
if full_cornerpts:
LR_x, LR_y = pr(LR_lon, LR_lat)
UL_x, UL_y = pr(UL_lon, UL_lat)
x1 = min(LL_x, UL_x)
y1 = min(LL_y, LR_y)
x2 = max(LR_x, UR_x)
y2 = max(UL_y, UR_y)
else:
x1 = LL_x
y1 = LL_y
x2 = UR_x
y2 = UR_y
if "xscale" in where.attrs.keys() and "yscale" in where.attrs.keys():
xpixelsize = where.attrs["xscale"]
ypixelsize = where.attrs["yscale"]
else:
xpixelsize = None
ypixelsize = None
if qty == "ACRR":
unit = "mm"
transform = None
elif qty == "DBZH":
unit = "dBZ"
transform = "dB"
else:
unit = "mm/h"
transform = None
if np.any(np.isfinite(R)):
thr = np.nanmin(R[R > np.nanmin(R)])
else:
thr = np.nan
metadata = {
"projection": proj4str,
"ll_lon": LL_lon,
"ll_lat": LL_lat,
"ur_lon": UR_lon,
"ur_lat": UR_lat,
"x1": x1,
"y1": y1,
"x2": x2,
"y2": y2,
"xpixelsize": xpixelsize,
"ypixelsize": ypixelsize,
"yorigin": "upper",
"institution": "Odyssey datacentre",
"accutime": 15.0,
"unit": unit,
"transform": transform,
"zerovalue": np.nanmin(R),
"threshold": thr,
}
f.close()
return R, Q, metadata
def _read_mch_hdf5_what_group(whatgrp):
qty = whatgrp.attrs["quantity"] if "quantity" in whatgrp.attrs.keys() else "RATE"
gain = whatgrp.attrs["gain"] if "gain" in whatgrp.attrs.keys() else 1.0
offset = whatgrp.attrs["offset"] if "offset" in whatgrp.attrs.keys() else 0.0
nodata = whatgrp.attrs["nodata"] if "nodata" in whatgrp.attrs.keys() else 0
undetect = whatgrp.attrs["undetect"] if "undetect" in whatgrp.attrs.keys() else -1.0
return qty, gain, offset, nodata, undetect
def _read_odim_hdf5_what_group(whatgrp):
qty = whatgrp.attrs["quantity"]
gain = whatgrp.attrs["gain"] if "gain" in whatgrp.attrs.keys() else 1.0
offset = whatgrp.attrs["offset"] if "offset" in whatgrp.attrs.keys() else 0.0
nodata = whatgrp.attrs["nodata"] if "nodata" in whatgrp.attrs.keys() else np.nan
undetect = whatgrp.attrs["undetect"] if "undetect" in whatgrp.attrs.keys() else 0.0
return qty, gain, offset, nodata, undetect
def import_knmi_hdf5(filename, **kwargs):
"""Import a precipitation field (and optionally the quality field) from a
HDF5 file conforming to the KNMI Data Centre specification.
Parameters
----------
filename : str
Name of the file to import.
Other Parameters
----------------
accutime : float
The accumulation time of the dataset in minutes.
pixelsize: float
The pixelsize of a raster cell in meters.
Returns
-------
out : tuple
A three-element tuple containing precipitation accumulation of the KNMI
product, the associated quality field and metadata. The quality
field is currently set to None.
"""
# TODO: Add quality field.
if not h5py_imported:
raise Exception("h5py not imported")
# Generally, the 5 min. data is used, but also hourly, daily and monthly
# accumulations are present.
accutime = kwargs.get("accutime", 5.0)
pixelsize = kwargs.get(
"pixelsize", 1000.0
) # 1.0 or 2.4 km datasets are available - give pixelsize in meters
####
# Precipitation fields
####
f = h5py.File(filename, "r")
dset = f["image1"]["image_data"]
R_intermediate = np.copy(dset) # copy the content
R = np.where(R_intermediate == 65535, np.NaN, R_intermediate / 100.0)
# R is divided by 100.0, because the data is saved as hundreds of mm (so, as integers)
# 65535 is the no data value
# Precision of the data is two decimals (0.01 mm).
if R is None:
raise IOError("requested quantity [mm] not found")
####
# Meta data
####
metadata = {}
# The 'where' group of mch- and Opera-data, is called 'geographic' in the
# KNMI data.
geographic = f["geographic"]
proj4str = geographic["map_projection"].attrs["projection_proj4_params"].decode()
pr = pyproj.Proj(proj4str)
metadata["projection"] = proj4str
# Get coordinates
latlon_corners = geographic.attrs["geo_product_corners"]
LL_lat = latlon_corners[1]
LL_lon = latlon_corners[0]
UR_lat = latlon_corners[5]
UR_lon = latlon_corners[4]
LR_lat = latlon_corners[7]
LR_lon = latlon_corners[6]
UL_lat = latlon_corners[3]
UL_lon = latlon_corners[2]
LL_x, LL_y = pr(LL_lon, LL_lat)
UR_x, UR_y = pr(UR_lon, UR_lat)
LR_x, LR_y = pr(LR_lon, LR_lat)
UL_x, UL_y = pr(UL_lon, UL_lat)
x1 = min(LL_x, UL_x)
y2 = min(LL_y, LR_y)
x2 = max(LR_x, UR_x)
y1 = max(UL_y, UR_y)
# Fill in the metadata
metadata["x1"] = x1 * 1000.
metadata["y1"] = y1 * 1000.
metadata["x2"] = x2 * 1000.
metadata["y2"] = y2 * 1000.
metadata["xpixelsize"] = pixelsize
metadata["ypixelsize"] = pixelsize
metadata["yorigin"] = "upper"
metadata["institution"] = "KNMI - Royal Netherlands Meteorological Institute"
metadata["accutime"] = accutime
metadata["unit"] = "mm"
metadata["transform"] = None
metadata["zerovalue"] = 0.0
metadata["threshold"] = np.nanmin(R[R > np.nanmin(R)])
f.close()
return R, None, metadata
| StarcoderdataPython |
3490054 | """
Asyncio client for Zyte Data API
""" | StarcoderdataPython |
9673146 | import asyncio
import logging
import re
from datetime import date, datetime
from itertools import cycle
from urllib.parse import urlencode
import aiohttp
from django.conf import settings
from django.utils.translation import ugettext as _
from ..constants import colors, matomo_periods
logger = logging.getLogger(__name__)
class MatomoException(Exception):
"""
Custom Exception class for errors during interaction with Matomo
"""
class MatomoApiManager:
"""
This class helps to interact with Matomo API.
There are three functions which can be used publicly:
* :func:`~cms.utils.matomo_api_manager.MatomoApiManager.get_matomo_id`: Retrieve the Metomo ID belonging to the given Matomo access token
* :func:`~cms.utils.matomo_api_manager.MatomoApiManager.get_total_visits`: Retrieve the total visits for the current region
* :func:`~cms.utils.matomo_api_manager.MatomoApiManager.get_visits_per_language`: Retrieve the visits for the current region by language
"""
#: Matomo API-key
matomo_token = None
#: Matomo ID
matomo_id = None
#: The active languages
languages = None
def __init__(self, region):
"""
Constructor initializes the class variables
:param region: The region this Matomo API Manager connects to
:type region: ~cms.models.regions.region.Region
"""
self.matomo_token = region.matomo_token
self.matomo_id = region.matomo_id
self.languages = region.languages
async def fetch(self, session, **kwargs):
"""
Uses :meth:`aiohttp.ClientSession.get` to perform an asynchronous GET request to the Matomo API.
:param session: The session object which is used for the request
:type session: aiohttp.ClientSession
:param kwargs: The parameters which are passed to the Matomo API
:type kwargs: dict
:raises ~cms.utils.matomo_api_manager.MatomoException: When a :class:`~aiohttp.ClientError` was raised during a
Matomo API request
:return: The parsed :mod:`json` result
:rtype: dict
"""
# The default get parameters for all requests
query_params = {
"format": "JSON",
"module": "API",
"token_auth": self.matomo_token,
}
# Update with the custom params for this request
query_params.update(kwargs)
url = f"{settings.MATOMO_URL}/?{urlencode(query_params)}"
logger.debug(
"Requesting %r: %s",
query_params.get("method"),
# Mask auth token in log
re.sub(r"&token_auth=[^&]+", "&token_auth=********", url),
)
try:
async with session.get(url) as response:
response_data = await response.json()
if (
isinstance(response_data, dict)
and response_data.get("result") == "error"
):
raise MatomoException(response_data["message"])
return response_data
except aiohttp.ClientError as e:
raise MatomoException(str(e)) from e
async def get_matomo_id_async(self, **query_params):
"""
Async wrapper to fetch the Matomo ID with :mod:`aiohttp`.
Opens a :class:`~aiohttp.ClientSession` and calls :func:`~cms.utils.matomo_api_manager.MatomoApiManager.fetch`.
Called from :func:`~cms.utils.matomo_api_manager.MatomoApiManager.get_matomo_id`.
:param query_params: The parameters which are passed to the Matomo API
:type query_params: dict
:raises ~cms.utils.matomo_api_manager.MatomoException: When a :class:`~aiohttp.ClientError` was raised during a
Matomo API request
:return: The parsed :mod:`json` result
:rtype: list
"""
async with aiohttp.ClientSession() as session:
return await self.fetch(session, **query_params)
def get_matomo_id(self, token_auth):
"""
Returns the matomo website id based on the provided authentication key.
:param token_auth: The Matomo authentication token which should be used
:type token_auth: str
:raises ~cms.utils.matomo_api_manager.MatomoException: When a :class:`~aiohttp.ClientError` was raised during a
Matomo API request or the access token is not correct
:return: ID of the connected Matomo instance
:rtype: int
"""
# Initialize async event loop
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# Execute async request to Matomo API
response = loop.run_until_complete(
self.get_matomo_id_async(
token_auth=token_auth, method="SitesManager.getSitesIdWithAdminAccess"
)
)
try:
return response[0]
except IndexError as e:
# If no id is returned, there is no user with the given access token
raise MatomoException("The access token is not correct.") from e
async def get_total_visits_async(self, query_params):
"""
Async wrapper to fetch the total visits with :mod:`aiohttp`.
Opens a :class:`~aiohttp.ClientSession` and calls :func:`~cms.utils.matomo_api_manager.MatomoApiManager.fetch`.
Called from :func:`~cms.utils.matomo_api_manager.MatomoApiManager.get_total_visits`.
:param query_params: The parameters which are passed to the Matomo API
:type query_params: dict
:raises ~cms.utils.matomo_api_manager.MatomoException: When a :class:`~aiohttp.ClientError` was raised during a
Matomo API request
:return: The parsed :mod:`json` result
:rtype: dict
"""
async with aiohttp.ClientSession() as session:
return await self.fetch(
session,
**query_params,
)
def get_total_visits(self, start_date, end_date, period=matomo_periods.DAY):
"""
Returns the total calls within a time range for all languages.
:param start_date: Start date
:type start_date: ~datetime.date
:param end_date: End date
:type end_date: ~datetime.date
:param period: The period (one of :attr:`cms.constants.matomo_periods.CHOICES` -
defaults to :attr:`~cms.constants.matomo_periods.DAY`)
:type period: str
:raises ~cms.utils.matomo_api_manager.MatomoException: When a :class:`~aiohttp.ClientError` was raised during a
Matomo API request
:return: The total visits in the ChartData format expected by ChartJs
:rtype: dict
"""
query_params = {
"date": f"{start_date},{end_date}",
"idSite": self.matomo_id,
"method": "VisitsSummary.getVisits",
"period": period,
}
# Initialize async event loop
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# Execute async request to Matomo API
dataset = loop.run_until_complete(self.get_total_visits_async(query_params))
return {
# Send original labels for usage in the CSV export (convert to list because type dict_keys is not JSON-serializable)
"exportLabels": list(dataset.keys()),
# Return the data in the ChartData format expected by ChartJs
"chartData": {
# Make labels more readable
"labels": self.simplify_date_labels(dataset.keys(), period),
"datasets": [
{
"label": _("All languages"),
"borderColor": colors.DEFAULT,
"data": list(dataset.values()),
}
],
},
}
async def get_visits_per_language_async(self, loop, query_params, languages):
"""
Async wrapper to fetch the total visits with :mod:`aiohttp`.
Opens a :class:`~aiohttp.ClientSession`, creates a :class:`~asyncio.Task` for each language to call
:func:`~cms.utils.matomo_api_manager.MatomoApiManager.fetch` and waits for all tasks to finish with
:func:`~asyncio.gather`.
The returned list of gathered results has the correct order in which the tasks were created (at first the
ordered list of languages and the last element is the task for the total visits).
Called from :func:`~cms.utils.matomo_api_manager.MatomoApiManager.get_visits_per_language`.
:param loop: The asyncio event loop
:type loop: asyncio.AbstractEventLoop
:param query_params: The parameters which are passed to the Matomo API
:type query_params: dict
:param languages: The list of languages which should be retrieved
:type languages: list [ ~cms.models.languages.language.Language ]
:raises ~cms.utils.matomo_api_manager.MatomoException: When a :class:`~aiohttp.ClientError` was raised during a
Matomo API request
:return: The list of gathered results
:rtype: list
"""
async with aiohttp.ClientSession() as session:
# Create tasks for visits by language
tasks = [
loop.create_task(
self.fetch(
session,
**query_params,
segment=f"pageUrl=@/{language.slug}/wp-json/",
)
)
for language in languages
]
# Create task for total visits in all languages
tasks.append(
loop.create_task(
self.fetch(
session,
**query_params,
)
)
)
# Wait for all tasks to finish and collect the results
# (the results are sorted in the order the tasks were created)
return await asyncio.gather(*tasks)
def get_visits_per_language(self, start_date, end_date, period):
"""
Returns the total unique visitors in a timerange as defined in period
:param start_date: Start date
:type start_date: ~datetime.date
:param end_date: End date
:type end_date: ~datetime.date
:param period: The period (one of :attr:`cms.constants.matomo_periods.CHOICES`)
:type period: str
:raises ~cms.utils.matomo_api_manager.MatomoException: When a :class:`~aiohttp.ClientError` was raised during a
Matomo API request
:return: The visits per language in the ChartData format expected by ChartJs
:rtype: dict
"""
query_params = {
"date": f"{start_date},{end_date}",
"expanded": "1",
"filter_limit": "-1",
"format_metrics": "1",
"idSite": self.matomo_id,
"method": "VisitsSummary.getVisits",
"period": period,
}
logger.debug(
"Query params: %r",
query_params,
)
# Convert languages to a list to force an evaluation in the sync function
# (in Django, database queries cannot be executed in async functions without more ado)
languages = list(self.languages)
# Convert colors to cycle to make sure it doesn't run out of elements if there are more languages than colors
color_cycle = cycle(colors.CHOICES)
# Initialize async event loop
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# Execute async request to Matomo API
logger.debug("Fetching visits for languages %r asynchronously.", languages)
datasets = loop.run_until_complete(
self.get_visits_per_language_async(loop, query_params, languages)
)
logger.debug("All asynchronous fetching tasks have finished.")
# The last dataset contains the total visits
total_visits = datasets.pop()
return {
# Send original labels for usage in the CSV export (convert to list because type dict_keys is not JSON-serializable)
"exportLabels": list(total_visits.keys()),
# Return the data in the ChartData format expected by ChartJs
"chartData": {
# Make labels more readable
"labels": self.simplify_date_labels(total_visits.keys(), period),
"datasets":
# The datasets for the visits by language
[
{
"label": language.translated_name,
"borderColor": next(color_cycle),
"data": list(dataset.values()),
}
# zip aggregates two lists into tuples, e.g. zip([1,2,3], [4,5,6])=[(1,4), (2,5), (3,6)]
# In this case, it matches the languages to their respective dataset (because the datasets are ordered)
for language, dataset in zip(languages, datasets)
]
# The dataset for total visits
+ [
{
"label": _("All languages"),
"borderColor": colors.DEFAULT,
"data": list(total_visits.values()),
}
],
},
}
@staticmethod
def simplify_date_labels(date_labels, period):
"""
Convert the dates returned by Matomo to more readable labels
:param date_labels: The date labels returned by Matomo
:type date_labels: list [ str ]
:param period: The period of the labels (determines the format)
:type period: str
:return: The readable labels
:rtype: list [ str ]
"""
simplified_date_labels = []
if period == matomo_periods.DAY:
# Convert string labels to date objects (the format for daily period is the iso format YYYY-MM-DD)
date_objects = [
date.fromisoformat(date_label) for date_label in date_labels
]
# Convert date objects to more readable labels
if date.today().year == date_objects[0].year:
# If the first label is in the current year, obmit the year for all dates
simplified_date_labels = [
date_obj.strftime("%d.%m.") for date_obj in date_objects
]
else:
# Else, include the year
simplified_date_labels = [
date_obj.strftime("%d.%m.%Y") for date_obj in date_objects
]
elif period == matomo_periods.WEEK:
# Convert string labels to date objects (the format for weekly period is YYYY-MM-DD,YYYY-MM-DD)
date_objects = [
datetime.strptime(date_label.split(",")[0], "%Y-%m-%d").date()
for date_label in date_labels
]
# Convert date objects to more readable labels
if date.today().year == date_objects[0].year:
# If the first label is in the current year, obmit the year for all dates
simplified_date_labels = [
_("CW") + date_obj.strftime(" %W") for date_obj in date_objects
]
else:
# Else, include the year
simplified_date_labels = [
date_obj.strftime("%Y ") + _("CW") + date_obj.strftime(" %W")
for date_obj in date_objects
]
elif period == matomo_periods.MONTH:
# Convert string labels to date objects (the format for monthly period is YYYY-MM)
date_objects = [
datetime.strptime(date_label, "%Y-%m").date()
for date_label in date_labels
]
# Convert date objects to more readable labels
if date.today().year == date_objects[0].year:
# If the first label is in the current year, obmit the year for all dates
simplified_date_labels = [
_(date_obj.strftime("%B")) for date_obj in date_objects
]
else:
# Else, include the year
simplified_date_labels = [
_(date_obj.strftime("%B")) + date_obj.strftime(" %Y")
for date_obj in date_objects
]
else:
# This means the period is "year" (convert to list because type dict_keys is not JSON-serializable)
simplified_date_labels = list(date_labels)
return simplified_date_labels
| StarcoderdataPython |
11221746 | <reponame>mpimp-comas/npfc
"""
Module filter
==============
This modules contains the class Filter, which is used to filter molecules using
molecular descriptors.
"""
# data handling
import logging
import re
# chemoinformatics
from rdkit.Chem import Mol
from rdkit.Chem import Crippen
from rdkit.Chem import Descriptors
from rdkit.Chem import rdMolDescriptors
# docs
from typing import List
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FUNCTIONS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def count_violations_lipinski(molecular_weight, slogp, num_hbd, num_hba):
"""Lipinski, J Pharmacol Toxicol Methods. 2000 Jul-Aug;44(1):235-49.
"""
n = 0
if molecular_weight < 150 or molecular_weight > 500:
n += 1
if slogp > 5:
n += 1
if num_hbd > 5:
n += 1
if num_hba > 10:
n += 1
return n
def count_violations_veber(num_rotatable_bonds, tpsa):
"""<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Kopple KD (June 2002).
"Molecular properties that influence the oral bioavailability of drug candidates".
J. Med. Chem. 45 (12): 2615–23.
"""
n = 0
if num_rotatable_bonds > 10:
n += 1
if tpsa > 140:
n += 1
return n
def count_violations_lead_like(molecular_weight, slogp, num_rotatable_bonds):
"""http://zinc.docking.org/browse/subsets/
Teague, Davis, Leeson, Oprea, Angew Chem Int Ed Engl. 1999 Dec 16;38(24):3743-3748.
"""
n = 0
if molecular_weight < 250 or molecular_weight > 350:
n += 1
if slogp > 3.5:
n += 1
if num_rotatable_bonds > 7:
n += 1
return n
def count_violations_ppi_like(molecular_weight, slogp, num_hba, num_rings):
"""<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., 2013.
2P2IHUNTER: a tool for filtering orthosteric protein–protein interaction modulators via a dedicated support vector machine.
Journal of The Royal Society Interface 11. doi:10.1098/rsif.2013.0860
"""
n = 0
if molecular_weight > 400:
n += 1
if slogp < 4:
n += 1
if num_hba < 4:
n += 1
if num_rings < 4:
n += 1
return n
def count_violations_fragment_like(molecular_weight, slogp, num_hba, num_hbd):
"""<NAME>., <NAME>., <NAME>., <NAME>., 2003. A “Rule of Three” for fragment-based lead discovery?
Drug Discovery Today 8, 876–877. doi:10.1016/S1359-6446(03)02831-9
"""
n = 0
if molecular_weight >= 300:
n += 1
if slogp > 3:
n += 1
if num_hba > 3:
n += 1
if num_hbd > 3:
n += 1
return n
def count_violations_fragment_like_ext(num_fragment_like_violations, tpsa, num_rotatable_bonds):
"""<NAME>., <NAME>., <NAME>., <NAME>., 2003.
A “Rule of Three” for fragment-based lead discovery? Drug Discovery Today 8, 876–877.
doi:10.1016/S1359-6446(03)02831-9
"""
n = num_fragment_like_violations
if tpsa > 60:
n += 1
if num_rotatable_bonds:
n += 1
return n
def get_min_max_ring_sizes(mol):
"""Return a tuple wih (minimum, maximum) ring sizes of the input molecule.
In case the molecule is linear, (0, 0) is returned.
"""
ring_sizes = [len(x) for x in mol.GetRingInfo().AtomRings()]
if len(ring_sizes) > 0:
min_ring_size = min(ring_sizes)
max_ring_size = max(ring_sizes)
else:
min_ring_size = 0
max_ring_size = 0
return (min_ring_size, max_ring_size)
DESCRIPTORS = {
# classical molecular descriptors
'num_heavy_atoms': lambda x: x.GetNumAtoms(),
'molecular_weight': lambda x: round(Descriptors.ExactMolWt(x), 4),
'num_rings': lambda x: rdMolDescriptors.CalcNumRings(x),
'num_rings_arom': lambda x: rdMolDescriptors.CalcNumAromaticRings(x),
'elements': lambda x: set([a.GetSymbol() for a in x.GetAtoms()]),
'molecular_formula': lambda x: rdMolDescriptors.CalcMolFormula(x),
'num_hbd': lambda x: rdMolDescriptors.CalcNumLipinskiHBD(x),
'num_hba': lambda x: rdMolDescriptors.CalcNumLipinskiHBA(x),
'slogp': lambda x: round(Crippen.MolLogP(x), 4),
'tpsa': lambda x: round(rdMolDescriptors.CalcTPSA(x), 4),
'num_rotatable_bonds': lambda x: rdMolDescriptors.CalcNumRotatableBonds(x),
'num_atoms_oxygen': lambda x: len([a for a in x.GetAtoms() if a.GetAtomicNum() == 8]),
'num_atoms_nitrogen': lambda x: len([a for a in x.GetAtoms() if a.GetAtomicNum() == 7]),
# custom molecular descriptors
# ring_sizes:
# it would have been faster to access only once RingInfo for both min and max,
# but this is tricky because I would have to start making exceptions in the way
# the functions are accessed or more complicated downstream process.
# Indeed, I do not think it is possible to set two dict keys at once
# within a dict comprehension and it is not that bad for
# performance to call it twice anyway.
'ring_size_min': lambda x: min([len(y) for y in x.GetRingInfo().AtomRings()]),
'ring_size_max': lambda x: max([len(y) for y in x.GetRingInfo().AtomRings()]),
}
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CLASSES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
class Filter:
"""A class for filtering molecules based on molecular descriptors."""
def __init__(self, descriptors: list = DESCRIPTORS):
"""Create a Filter object."""
self.descriptors = descriptors
def compute_descriptors(self, mol: Mol, descriptors: List = None) -> dict:
"""Compute descriptors. A subset of descriptors can be computed if a list
of descriptor names is provided. To get an idea of what descriptors can be computed,
the method get_possible_descriptors can be used.
:param mol: the input molecule
:param descriptors: the list of descriptors to compute. If none is provided, all possible descriptors are computed.
:return: a dictionary with all descriptors
"""
# if no descriptor is specified, compute them all
if descriptors is None:
descriptors = list(self.descriptors.keys())
if len(descriptors) == 0:
raise ValueError('Error! No descriptor is specified for computation!')
return {descriptors[i]: self.descriptors[descriptors[i]](mol) for i in range(len(descriptors))}
def get_possible_descriptors(self) -> List:
"""Return a list of all descriptors that can be computed using this module.
:return: the list of descriptors that can be computed
"""
return sorted(list((self.descriptors.keys())))
def filter_mol(self, mol: Mol, expr: str) -> bool:
"""Filter a molecule based on an expression.
Two types of expressions are currently supported:
- inclusion/exclusion
- 'elements not in C, N, O'
- 'elements in C, N, O'
- numeric
- 'num_heavy_atoms > 3'
- '100.0 < molecular_weight <= 1000.0'
- 'num_rings' != 0'
- 'num_rings == 0'
:param mol: the input molecule
:param expr: the filter to apply
:return: True if the molecule passes the filter, False otherwise
"""
# init
split_expr = [s.lower() for s in expr.lower().split()]
# filters of type: 'elements in C, N, O'
if 'in' in split_expr: # 'in' or 'not in'
return self._eval_set_expr(mol, expr)
# filters of type: 'num_heavy_atoms > 3'
return self._eval_numeric_expr(mol, expr.lower())
def _eval_numeric_expr(self, mol, expr):
"""
Evaluate if the statements stored in the expression are True or False.
For now statement is composed of either 3 elements (['molweiht', '<=', '1000'])
or 5 elements: (['0', '<=', 'molecular_weight', '<=', '1000']).
### No check has been added on this number because there might be an expanded functionality
later on (combining statements with ';'?).
Descriptors used for the comparisons need to be provided as a dictionary (name: value).
Possible values for how: numeric, set or literal.
"""
mol = Mol(mol)
expr = expr.replace(" ", "")
split_expr = self._split_expr(expr) # something like 'molecular_weight', '<=', '1000'
# replace descriptor names by their values
split_expr = [self.descriptors[k](mol) if k in self.descriptors.keys() else k for k in split_expr] # now it is '250.0', '<=', '1000'
logging.debug("Applying numeric filter: %s", ' '.join(str(v) for v in split_expr))
# convert all values extracted as string into their type
split_expr = [float(x) if x not in split_expr[1::2] else x for x in split_expr] # and now it is 250.0, '<=', 1000.0
# operators are always at odd positions, whereas values are at even positions
# and there is always a value on the left and on the right of an operator
for i in range(1, len(split_expr), 2):
operator = split_expr[i]
left = split_expr[i-1]
right = float(split_expr[i+1])
if operator == "<=":
if not left <= right:
return False
elif operator == "<":
if not left < right:
return False
elif operator == "==":
if not left == right:
return False
elif operator == "!=":
if not left != right:
return False
elif operator == ">=":
if not left >= right:
return False
elif operator == ">":
if not left > right:
return False
return True
def _eval_set_expr(self, mol, expr):
"""Helper function for _eval_expr.
Look for keywords ' in ' and ' not in ' in expression and check the condition
by defining left as the descriptor and right as the values, i.e.:
descriptor in values ('elements in H, C, N, O')
"""
for op in [' not in ', ' in ']:
pattern = re.compile(op) # raw string
hits = [(m.start(0), m.end(0)) for m in re.finditer(pattern, expr)]
if len(hits) > 0:
break # leave asap with op still set to the correct operator
# in case we did not find anything, just stop
if len(hits) == 0:
raise ValueError(f"expected ' not in ' or ' in ' in expr ({expr})")
expr_split = [e.replace(" ", "") for e in expr.split(op)]
descriptor = self.descriptors[expr_split[0]](mol) # left
values = set(expr_split[1].split(",")) # right
logging.debug("Applying inclusion/exclusion filter: %s", ''.join(str(v) for v in [descriptor, op, values]))
if (op == ' in ' and descriptor.issubset(values)) or (op == ' not in ' and not descriptor.issubset(values)):
return True
else:
return False
def _split_expr(self, expr):
"""Helper function for _eval_expr.
From a string containing an expression (i.e. 'molecular_weight < 1000'), return
a list of values and operators (['molecular_weight', '<', '1000']).
"""
opidx_eq = self._find_opidx("==", expr)
opidx_diff = self._find_opidx("!=", expr)
opidx_supeq = self._find_opidx(">=", expr)
opidx_infeq = self._find_opidx("<=", expr)
opidx_sup = self._find_opidx(">", expr)
opidx_inf = self._find_opidx("<", expr)
# filter sup and inf with supeq and infeq
opidx_sup = self._filter_wrong_matches(opidx_supeq, opidx_sup)
opidx_inf = self._filter_wrong_matches(opidx_infeq, opidx_inf)
# split expr into values and operators
# sorted operators so we can iterate over the expr from left to right
opidx_all = sorted(opidx_eq + opidx_diff + opidx_supeq + opidx_infeq + opidx_sup + opidx_inf, key=lambda x: x[0])
split_expr = []
split_expr.append(expr[:opidx_all[0][0]])
for i in range(len(opidx_all) - 1):
# always take on the value on the right side of the op, so init the first part outside of the loop
opidx_curr = opidx_all[i]
opidx_next = opidx_all[i+1]
operator = expr[opidx_curr[0]:opidx_curr[1]]
split_expr.append(operator)
value = expr[opidx_curr[1]:opidx_next[0]]
split_expr.append(value)
split_expr.append(expr[opidx_all[-1][0]:opidx_all[-1][1]])
split_expr.append(expr[opidx_all[-1][1]:])
return split_expr
def _find_opidx(self, op, expr):
""" Helper function for _split_expr.
Return all occurrences indices of a comparison operator (op) within an expr.
"""
# init possible operator symbols
pattern = re.compile(op) # raw string
return [(m.start(0), m.end(0)) for m in re.finditer(pattern, expr)]
def _filter_wrong_matches(self, opidx_larger, opidx_smaller):
"""Helper function for __split_expr.
Filter out false positives of comparison operators. For instance,
'<' beginning at the same position as '<=' should be discarded.
"""
invalid = []
for smaller in opidx_smaller:
for larger in opidx_larger:
if smaller[0] == larger[0]:
invalid.append(smaller)
return [smaller for smaller in opidx_smaller if smaller not in invalid]
| StarcoderdataPython |
5170223 | # package org.apache.helix.manager.zk
#from org.apache.helix.manager.zk import *
#from java.util.concurrent import BlockingQueue
#from java.util.concurrent import LinkedBlockingQueue
#from java.util.concurrent.atomic import AtomicInteger
#from org.I0Itec.zkclient.exception import ZkInterruptedException
#from org.apache.log4j import Logger
class ZkCacheEventThread(Thread):
"""
Java modifiers:
private final static
Type:
Logger
"""
LOG = Logger.getLogger(ZkCacheEventThread.class)
"""
Java modifiers:
private static
Type:
AtomicInteger
"""
_eventId = AtomicInteger(0)
Java modifiers:
abstract static
class ZkCacheEvent:
"""
Parameters:
String description
"""
def __init__(self, description):
self._description = description
def run(self):
"""
Returns void
Java modifiers:
abstract
Throws:
Exception
"""
pass
def toString(self):
"""
Returns String
@Override
"""
return "ZkCacheEvent[" + _description + "]"
"""
Parameters:
String name
"""
def __init__(self, name):
setDaemon(True)
setName("ZkCache-EventThread-" + getId() + "-" + name)
def run(self):
"""
Returns void
@Override
"""
LOG.info("Starting ZkCache event thread.")
try:
while (not isInterrupted():
# ZkCacheEvent
zkEvent = _events.take()
# int
eventId = _eventId.incrementAndGet()
LOG.debug("Delivering event #" + str(eventId)+ " " + str(zkEvent))
try:
zkEvent.run()
except InterruptedException, e:
interrupt()
except ZkInterruptedException, e:
interrupt()
except Throwable, e:
LOG.error("Error handling event " + str(zkEvent)+ str(e))
LOG.debug("Delivering event #" + str(eventId)+ " done")
except InterruptedException, e:
LOG.info("Terminate ZkClient event thread.")
def send(self, event):
"""
Returns void
Parameters:
event: ZkCacheEvent
"""
if not isInterrupted():
LOG.debug("New event: " + str(event))
_events.add(event)
| StarcoderdataPython |
1849636 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Define the Action class.
This file defines the `Action` class, which is returned by the policy and given to the environment.
"""
import copy
import collections
# from abc import ABCMeta, abstractmethod
import numpy as np
import torch
import gym
from pyrobolearn.utils.data_structures.orderedset import OrderedSet
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, PyRoboLearn"
__credits__ = ["<NAME>"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class Action(object):
r"""Action class.
The `Action` is produced by the policy in response to a certain state/observation. From a programming point of
view, compared to the `State` class, the action is a setter object. Thus, they have a very close relationship
and share many functionalities. Some actions are mutually exclusive and cannot be executed at the same time.
An action is defined as something that affects the environment; that forces the environment to go to the next
state. For instance, an action could be the desired joint positions, but also an abstract action such as
'open a door' which would then open a door in the simulator and load the next part of the world.
In our framework, the `Action` class is decoupled from the policy and environment rendering it more modular [1].
Nevertheless, the `Action` class still acts as a bridge between the policy and environment. In addition to be
the output of a policy/controller, it can also be the input to some value approximators, dynamic models, reward
functions, and so on.
This class also describes the `action_space` which has initially been defined in `gym.Env` [2].
References:
[1] "Wikipedia: Composition over Inheritance", https://en.wikipedia.org/wiki/Composition_over_inheritance
[2] "OpenAI gym": https://gym.openai.com/ and https://github.com/openai/gym
"""
def __init__(self, actions=(), data=None, space=None, name=None, ticks=1):
"""
Initialize the action. The action contains some kind of data, or is a combination of other actions.
Args:
actions (list/tuple of Action): list of actions to be combined together (if given, we can not specified
data)
data (np.ndarray): data associated to this action
space (gym.space): space associated with the given data
ticks (int): number of ticks to sleep before setting the next action data.
Warning:
Both arguments can not be provided to the action.
"""
# Check arguments
if actions is None:
actions = tuple()
if not isinstance(actions, (list, tuple, set, OrderedSet)):
raise TypeError("Expecting a list, tuple, or (ordered) set of actions.")
if len(actions) > 0 and data is not None:
raise ValueError("Please specify only one of the argument `actions` xor `data`, but not both.")
# Check if data is given
if data is not None:
if not isinstance(data, np.ndarray):
if isinstance(data, (list, tuple)):
data = np.array(data)
elif isinstance(data, (int, float)):
data = np.array([data])
else:
raise TypeError("Expecting a numpy array, a list/tuple of int/float, or an int/float for 'data'")
# The following attributes should normally be set in the child classes
self._data = data
self._torch_data = data if data is None else torch.from_numpy(data).float()
self._space = space
self._distribution = None # for sampling
self._normalizer = None
self._noiser = None # for noise
self.name = name
# create ordered set which is useful if this action is a combination of multiple actions
self._actions = OrderedSet()
if self._data is None:
self.add(actions)
# set ticks and counter
self.cnt = 0
self.ticks = int(ticks)
# reset action
# self.reset()
##############################
# Properties (Getter/Setter) #
##############################
@property
def actions(self):
"""
Get the list of actions.
"""
return self._actions
@actions.setter
def actions(self, actions):
"""
Set the list of actions.
"""
if self.has_data():
raise AttributeError("Trying to add internal actions to the current action while it already has some data. "
"A action should be a combination of actions or should contain some kind of data, "
"but not both.")
if isinstance(actions, collections.Iterable):
for action in actions:
if not isinstance(action, Action):
raise TypeError("One of the given actions is not an instance of Action.")
self.add(action)
else:
raise TypeError("Expecting an iterator (e.g. list, tuple, OrderedSet, set,...) over actions")
@property
def data(self):
"""
Get the data associated to this particular action, or the combined data associated to each action.
Returns:
list of np.ndarray: list of data associated to the action
"""
if self.has_data():
return [self._data]
return [action._data for action in self._actions]
@data.setter
def data(self, data):
"""
Set the data associated to this particular action, or the combined data associated to each action.
Each data will be clipped if outside the range/bounds of the corresponding action.
Args:
data: the data to set
"""
if self.has_actions(): # combined actions
if not isinstance(data, collections.Iterable):
raise TypeError("data is not an iterator")
if len(self._actions) != len(data):
raise ValueError("The number of actions is different from the number of data segments")
for action, d in zip(self._actions, data):
action.data = d
# one action: change the data
# if self.has_data():
else:
if self.is_discrete(): # discrete action
if isinstance(data, np.ndarray): # data action is a numpy array
# check if given logits or not
if data.shape[-1] != 1: # logits
data = np.array([np.argmax(data)])
elif isinstance(data, (float, np.integer)):
data = int(data)
else:
raise TypeError("Expecting the `data` action to be an int, numpy array, instead got: "
"{}".format(type(data)))
if not isinstance(data, np.ndarray):
if isinstance(data, (list, tuple)):
data = np.array(data)
if len(data) == 1 and self._data.shape != data.shape: # TODO: check this line
data = data[0]
elif isinstance(data, (int, float, np.integer)): # np.integer is for Py3.5
data = data * np.ones(self._data.shape)
else:
raise TypeError("Expecting a numpy array, a list/tuple of int/float, or an int/float for 'data'")
if self._data is not None and self._data.shape != data.shape:
raise ValueError("The given data does not have the same shape as previously.")
# clip the value using the space
if self.has_space():
if self.is_continuous(): # continuous case
low, high = self._space.low, self._space.high
data = np.clip(data, low, high)
else: # discrete case
n = self._space.n
if data.size == 1:
data = np.clip(data, 0, n)
self._data = data
self._torch_data = torch.from_numpy(data).float()
@property
def merged_data(self):
"""
Return the merged data.
"""
# fuse the data
fused_action = self.fuse()
# return the data
return fused_action.data
@property
def torch_data(self):
"""
Return the data as a list of torch tensors.
"""
if self.has_data():
return [self._torch_data]
return [action._torch_data for action in self._actions]
@torch_data.setter
def torch_data(self, data):
"""
Set the torch data and update the numpy version of the data.
Args:
data (torch.Tensor, list of torch.Tensors): data to set.
"""
if self.has_actions(): # combined actions
if not isinstance(data, collections.Iterable):
raise TypeError("data is not an iterator")
if len(self._actions) != len(data):
raise ValueError("The number of actions is different from the number of data segments")
for action, d in zip(self._actions, data):
action.torch_data = d
# one action: change the data
# if self.has_data():
else:
if isinstance(data, torch.Tensor):
data = data.float()
elif isinstance(data, np.ndarray):
data = torch.from_numpy(data).float()
elif isinstance(data, (list, tuple)):
data = torch.from_numpy(np.array(data)).float()
elif isinstance(data, (int, float)):
data = data * torch.ones(self._data.shape)
else:
raise TypeError("Expecting a Torch tensor, numpy array, a list/tuple of int/float, or an int/float for"
" 'data'")
if self._torch_data.shape != data.shape:
raise ValueError("The given data does not have the same shape as previously.")
# clip the value using the space
if self.has_space():
if self.is_continuous(): # continuous case
low, high = torch.from_numpy(self._space.low), torch.from_numpy(self._space.high)
data = torch.min(torch.max(data, low), high)
else: # discrete case
n = self._space.n
if data.size == 1:
data = torch.clamp(data, min=0, max=n)
self._torch_data = data
if data.requires_grad:
data = data.detach().numpy()
else:
data = data.numpy()
self._data = data
@property
def merged_torch_data(self):
"""
Return the merged torch data.
Returns:
list of torch.Tensor: list of data torch tensors.
"""
# fuse the data
fused_action = self.fuse()
# return the data
return fused_action.torch_data
@property
def vec_data(self):
"""
Return a vectorized form of the data.
Returns:
np.array[N]: all the data.
"""
return np.concatenate([data.reshape(-1) for data in self.merged_data])
@property
def vec_torch_data(self):
"""
Return a vectorized form of all the torch tensors.
Returns:
torch.Tensor([N]): all the torch tensors reshaped such that they are unidimensional.
"""
return torch.cat([data.reshape(-1) for data in self.merged_torch_data])
@property
def spaces(self):
"""
Get the corresponding spaces as a list of spaces.
"""
if self.has_space():
return [self._space]
return [action._space for action in self._actions]
@property
def space(self):
"""
Get the corresponding space.
"""
if self.has_space():
# return gym.spaces.Tuple([self._space])
return self._space
# return [action._space for action in self._actions]
return gym.spaces.Tuple([action._space for action in self._actions])
@space.setter
def space(self, space):
"""
Set the corresponding space. This can only be used one time!
"""
if self.has_data() and not self.has_space() and \
isinstance(space, (gym.spaces.Box, gym.spaces.Discrete, gym.spaces.MultiDiscrete)):
self._space = space
@property
def merged_space(self):
"""
Get the corresponding merged space. Note that all the spaces have to be of the same type.
"""
if self.has_space():
return self._space
spaces = self.spaces
result = []
dtype, prev_dtype = None, None
for space in spaces:
if isinstance(space, gym.spaces.Box):
dtype = 'box'
result.append([space.low, space.high])
elif isinstance(space, gym.spaces.Discrete):
dtype = 'discrete'
result.append(space.n)
else:
raise NotImplementedError
if prev_dtype is not None and dtype != prev_dtype:
return self.space
prev_dtype = dtype
if dtype == 'box':
low = np.concatenate([res[0] for res in result])
high = np.concatenate([res[1] for res in result])
return gym.spaces.Box(low=low, high=high, dtype=np.float32)
elif dtype == 'discrete':
return gym.spaces.Discrete(n=np.sum(result))
return self.space
@property
def name(self):
"""
Return the name of the action.
"""
if self._name is None:
return self.__class__.__name__
return self._name
@name.setter
def name(self, name):
"""
Set the name of the action.
"""
if name is None:
name = self.__class__.__name__
if not isinstance(name, str):
raise TypeError("Expecting the name to be a string.")
self._name = name
@property
def shape(self):
"""
Return the shape of each action. Some actions, such as camera actions have more than 1 dimension.
"""
# if self.has_actions():
return [data.shape for data in self.data]
# return [self.data.shape]
@property
def merged_shape(self):
"""
Return the shape of each merged action.
"""
return [data.shape for data in self.merged_data]
@property
def size(self):
"""
Return the size of each action.
"""
# if self.has_actions():
return [data.size for data in self.data]
# return [len(self.data)]
@property
def merged_size(self):
"""
Return the size of each merged action.
"""
return [data.size for data in self.merged_data]
@property
def dimension(self):
"""
Return the dimension (length of shape) of each action.
"""
return [len(data.shape) for data in self.data]
@property
def merged_dimension(self):
"""
Return the dimension (length of shape) of each merged state.
"""
return [len(data.shape) for data in self.merged_data]
@property
def num_dimensions(self):
"""
Return the number of different dimensions (length of shape).
"""
return len(np.unique(self.dimension))
# @property
# def distribution(self):
# """
# Get the current distribution used when sampling the action
# """
# return None
#
# @distribution.setter
# def distribution(self, distribution):
# """
# Set the distribution to the action.
# """
# # check if distribution is discrete/continuous
# pass
###########
# Methods #
###########
def is_combined_actions(self):
"""
Return a boolean value depending if the action is a combination of actions.
Returns:
bool: True if the action is a combination of actions, False otherwise.
"""
return len(self._actions) > 0
# alias
has_actions = is_combined_actions
def has_data(self):
return self._data is not None
def has_space(self):
return self._space is not None
def add(self, action):
"""
Add a action or a list of actions to the list of internal actions. Useful when combining different actions
together. This shouldn't be called if this action has some data set to it.
Args:
action (Action, list/tuple of Action): action(s) to add to the internal list of actions
"""
if self.has_data():
raise AttributeError("Undefined behavior: a action should be a combination of actions or should contain "
"some kind of data, but not both.")
if isinstance(action, Action):
self._actions.add(action)
elif isinstance(action, collections.Iterable):
for i, s in enumerate(action):
if not isinstance(s, Action):
raise TypeError("The item {} in the given list is not an instance of Action".format(i))
self._actions.add(s)
else:
raise TypeError("The 'other' argument should be an instance of Action, or an iterator over actions.")
# alias
append = add
extend = add
def _write(self, data):
pass
def write(self, data=None):
"""
Write the action values to the simulator for each action.
This has to be overwritten by the child class.
"""
# if time to write
if self.cnt % self.ticks == 0:
if self.has_data(): # write the current action
if data is None:
data = self._data
self._write(data)
else: # write each action
if self.actions:
if data is None:
data = [None] * len(self.actions)
for action, d in zip(self.actions, data):
if d is None:
d = action._data
action._write(d)
self.cnt += 1
# return the data
# return self.data
# def _reset(self):
# pass
#
# def reset(self):
# """
# Some actions need to be reset. It returns the initial action.
# This needs to be overwritten by the child class.
#
# Returns:
# initial action
# """
# if self.has_data(): # reset the current action
# self._reset()
# else: # reset each action
# for action in self.actions:
# action._reset()
#
# # return the first action data
# return self.write()
# def shape(self):
# """
# Return the shape of each action. Some actions, such as camera actions have more than 1 dimension.
# """
# return [d.shape for d in self.data]
#
# def dimension(self):
# """
# Return the dimension (length of shape) of each action.
# """
# return [len(d.shape) for d in self.data]
def max_dimension(self):
"""
Return the maximum dimension.
"""
return max(self.dimension)
# def size(self):
# """
# Return the size of each action.
# """
# return [d.size for d in self.data]
def total_size(self):
"""
Return the total size of the combined action.
"""
return sum(self.size)
def has_discrete_values(self):
"""
Does the action have discrete values?
"""
if self._data is None:
return [isinstance(action._space, (gym.spaces.Discrete, gym.spaces.MultiDiscrete))
for action in self._actions]
if isinstance(self._space, (gym.spaces.Discrete, gym.spaces.MultiDiscrete)):
return [True]
return [False]
def is_discrete(self):
"""
If all the actions are discrete, then it is discrete.
"""
values = self.has_discrete_values()
if len(values) == 0:
return False
return all(values)
def has_continuous_values(self):
"""
Does the action have continuous values?
"""
if self._data is None:
return [isinstance(action._space, gym.spaces.Box) for action in self._actions]
if isinstance(self._space, gym.spaces.Box):
return [True]
return [False]
def is_continuous(self):
"""
If one of the action is continuous, then the action is considered to be continuous.
"""
return any(self.has_continuous_values())
def bounds(self):
"""
If the action is continuous, it returns the lower and higher bounds of the action.
If the action is discrete, it returns the maximum number of discrete values that the action can take.
If the action is multi-discrete, it returns the maximum number of discrete values that each subaction can take.
Returns:
list/tuple: list of bounds if multiple actions, or bounds of this action
"""
if self._data is None:
return [action.bounds() for action in self._actions]
if isinstance(self._space, gym.spaces.Box):
return (self._space.low, self._space.high)
elif isinstance(self._space, gym.spaces.Discrete):
return (self._space.n,)
elif isinstance(self._space, gym.spaces.MultiDiscrete):
return (self._space.nvec,)
raise NotImplementedError
def apply(self, fct):
"""
Apply the given fct to the data of the action, and set it to the action.
"""
self.data = fct(self.data)
def contains(self, x): # parameter dependent of the action
"""
Check if the argument is within the range/bound of the action.
"""
return self._space.contains(x)
def sample(self, distribution=None): # parameter dependent of the action (discrete and continuous distributions)
"""
Sample some values from the action based on the given distribution.
If no distribution is specified, it samples from a uniform distribution (default value).
"""
if self.is_combined_actions():
return [action.sample() for action in self._actions]
if self._distribution is None:
return
else:
pass
raise NotImplementedError
def add_noise(self, noise=None, replace=True): # parameter dependent of the action
"""
Add some noise to the action, and returns it.
Args:
noise (np.ndarray, fct): array to be added or function to be applied on the data
"""
if self._data is None:
# apply noise
for action in self._actions:
action.add_noise(noise=noise)
else:
# add noise to the data
noisy_data = self.data + noise
# clip such that the data is within the bounds
self.data = noisy_data
def normalize(self, normalizer=None, replace=True): # parameter dependent of the action
"""
Normalize using the action data using the provided normalizer.
Args:
normalizer (sklearn.preprocessing.Normalizer): the normalizer to apply to the data.
replace (bool): if True, it will replace the `data` attribute by the normalized data.
Returns:
the normalized data
"""
pass
def fuse(self, other=None, axis=0):
"""
Fuse the actions that have the same shape together. The axis specified along which axis we concatenate the data.
If multiple actions with different shapes are present, the axis will be the one specified if possible,
otherwise it will be min(dimension, axis).
Examples:
a0 = JointPositionAction(robot)
a1 = JointVelocityAction(robot)
a = a0 & a1
print(a)
print(a.shape)
a = a0 + a1
a.fuse()
print(a)
print(a.shape)
"""
# check argument
if not (other is None or isinstance(other, Action)):
raise TypeError("The 'other' argument should be None or another action.")
# build list of all the actions
actions = [self] if self.has_data() else self._actions
if other is not None:
if other.has_data():
actions.append(other)
else:
actions.extend(other._actions)
# check if only one action
if len(actions) < 2:
return self # do nothing
# build the dictionary with key=dimension of shape, value=list of actions
dic = {}
for action in actions:
dic.setdefault(len(action._data.shape), []).append(action)
# traverse the dictionary and fuse corresponding shapes
actions = []
for key, value in dic.items():
if len(value) > 1:
# fuse
data = [action._data for action in value]
names = [action.name for action in value]
a = Action(data=np.concatenate(data, axis=min(axis, key)), name='+'.join(names))
actions.append(a)
else:
# only one action
actions.append(value[0])
# return the fused action
if len(actions) == 1:
return actions[0]
return Action(actions)
def lookfor(self, class_type):
"""
Look for the specified class type/name in the list of internal actions, and returns it.
Args:
class_type (type, str): class type or name
Returns:
Action: the corresponding instance of the Action class
"""
# if string, lowercase it
if isinstance(class_type, str):
class_type = class_type.lower()
# if there is one action
if self.has_data():
if self.__class__ == class_type or self.__class__.__name__.lower() == class_type:
return self
# the action has multiple actions, thus we go through each action
for action in self.actions:
if action.__class__ == class_type or action.__class__.__name__.lower() == class_type:
return action
########################
# Operator Overloading #
########################
def __str__(self):
"""Return a string describing the action."""
if self._data is None:
lst = [self.__class__.__name__ + '(']
for action in self.actions:
lst.append('\t' + action.__str__() + ',')
lst.append(')')
return '\n'.join(lst)
else:
return '%s(%s)' % (self.name, self._data)
def __call__(self, data=None):
"""
Compute/read the action and return it. It is an alias to the `self.write()` method.
"""
return self.write(data)
def __len__(self):
"""
Return the total number of actions contained in this class.
Example::
s1 = JntPositionAction(robot)
s2 = s1 + JntVelocityAction(robot)
print(len(s1)) # returns 1
print(len(s2)) # returns 2
"""
if self._data is None:
return len(self._actions)
return 1
def __iter__(self):
"""
Iterator over the actions.
"""
if self.is_combined_actions():
for action in self._actions:
yield action
else:
yield self
def __contains__(self, item):
"""
Check if the action item(s) is(are) in the combined action. If the item is the data associated with the action,
it checks that it is within the bounds.
Args:
item (Action, list/tuple of action, type): check if given action(s) is(are) in the combined action
Example:
s1 = JntPositionAction(robot)
s2 = JntVelocityAction(robot)
s = s1 + s2
print(s1 in s) # output True
print(s2 in s1) # output False
print((s1, s2) in s) # output True
"""
# check type of item
if not isinstance(item, (Action, np.ndarray, type)):
raise TypeError("Expecting an Action, a np.array, or a class type, instead got: {}".format(type(item)))
# if class type
if isinstance(item, type):
# if there is one action
if self.has_data():
return self.__class__ == item
# the action has multiple actions, thus we go through each action
for action in self.actions:
if action.__class__ == item:
return True
return False
# check if action item is in the combined action
if self._data is None and isinstance(item, Action):
return item in self._actions
# check if action/data is within the bounds
if isinstance(item, Action):
item = item.data
# check if continuous
# if self.is_continuous():
# low, high = self.bounds()
# return np.all(low <= item) and np.all(item <= high)
# else: # discrete case
# num = self.bounds()[0]
# # check the size of data
# if item.size > 1: # array
# return (item.size < num)
# else: # one number
# return (item[0] < num)
return self.contains(item)
def __getitem__(self, key):
"""
Get the corresponding item from the action(s)
"""
# if one action, slice the corresponding action data
if len(self._actions) == 0:
return self._data[key]
# if multiple actions
if isinstance(key, int):
# get one action
return self._actions[key]
elif isinstance(key, slice):
# get multiple actions
return Action(self._actions[key])
else:
raise TypeError("Expecting an int or slice for the key, but got instead {}".format(type(key)))
def __setitem__(self, key, value):
"""
Set the corresponding item/value to the corresponding key.
Args:
key (int, slice): index of the internal action, or index/indices for the action data
value (Action, int/float, array): value to be set
"""
if self.is_combined_actions():
# set/move the action to the specified key
if isinstance(value, Action) and isinstance(key, int):
self._actions[key] = value
else:
raise TypeError("Expecting key to be an int, and value to be a action.")
else:
# set the value on the data directly
self._data[key] = value
def __add__(self, other):
"""
Combine two different actions together. In this special case, the operation is not commutable.
This is the same as taking the union of the actions.
Args:
other (Action): another action
Returns:
Action: the combined action
Examples:
s1 = JntPositionAction(robot)
s2 = JntVelocityAction(robot)
s = s1 + s2 # = Action([JntPositionAction(robot), JntVelocityAction(robot)])
s1 = Action([JntPositionAction(robot), JntVelocityAction(robot)])
s2 = Action([JntPositionAction(robot), LinkPositionAction(robot)])
s = s1 + s2 # = Action([JntPositionAction(robot), JntVelocityAction(robot), LinkPositionAction(robot)])
"""
if not isinstance(other, Action):
raise TypeError("Expecting another action, instead got {}".format(type(other)))
s1 = self._actions if self._data is None else OrderedSet([self])
s2 = other._actions if other._data is None else OrderedSet([other])
s = s1 + s2
return Action(s)
def __iadd__(self, other):
"""
Add a action to the current one.
Args:
other (Action, list/tuple of Action): other action
Examples:
s = Action()
s += JntPositionAction(robot)
s += JntVelocityAction(robot)
"""
if self._data is not None:
raise AttributeError("The current class already has some data attached to it. This operation can not be "
"applied in this case.")
self.append(other)
def __sub__(self, other):
"""
Remove the other action(s) from the current action.
Args:
other (Action): action to be removed.
"""
if not isinstance(other, Action):
raise TypeError("Expecting another action, instead got {}".format(type(other)))
s1 = self._actions if self._data is None else OrderedSet([self])
s2 = other._actions if other._data is None else OrderedSet([other])
s = s1 - s2
if len(s) == 1: # just one element
return s[0]
return Action(s)
def __isub__(self, other):
"""
Remove one or several actions from the combined action.
Args:
other (Action): action to be removed.
"""
if not isinstance(other, Action):
raise TypeError("Expecting another action, instead got {}".format(type(other)))
if self._data is not None:
raise RuntimeError("This operation is only available for a combined action")
s = other._actions if other._data is None else OrderedSet([other])
self._actions -= s
def __copy__(self):
"""Return a shallow copy of the action. This can be overridden in the child class."""
return self.__class__(actions=self.actions, data=self._data, space=self._space, name=self.name,
ticks=self.ticks)
def __deepcopy__(self, memo={}):
"""Return a deep copy of the action. This can be overridden in the child class.
Args:
memo (dict): memo dictionary of objects already copied during the current copying pass
"""
if self in memo:
return memo[self]
actions = [copy.deepcopy(action, memo) for action in self.actions]
data = copy.deepcopy(self._data)
space = copy.deepcopy(self._space)
action = self.__class__(actions=actions, data=data, space=space, name=self.name, ticks=self.ticks)
memo[self] = action
return action
| StarcoderdataPython |
9608979 | import sys
try:
name = sys.argv[1]
except IndexError:
print(f'Error: please provide a name and a repeat count to the script')
sys.exit(1)
try:
repeat_count = int(sys.argv[2])
except IndexError:
print(f'Error: please provide a name and a repeat count to the script')
sys.exit(1)
except ValueError:
print(f'The repeat count needs to be an integer')
sys.exit(1)
try:
f = open('root_files/name_repeated.txt', 'w')
except (OSError, IOError) as err:
print(f"Error: unable to write file ({err})")
for i in range(1, repeat_count + 1):
print(f'{i} - {name}')
else:
names = [name + '\n' for i in range(1, repeat_count + 1)]
f.writelines(names)
f.close()
| StarcoderdataPython |
136227 | <reponame>Guillem96/ssd-pytorch<gh_stars>1-10
import click
import yaml
import torch
import ssd
import ssd.transforms as T
from ssd.coco.coco_utils import get_coco_api_from_dataset
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
@click.command()
@click.option('--dataset', required=True,
type=click.Choice(['labelme', 'VOC', 'COCO']))
@click.option('--dataset-root', required=True,
type=click.Path(exists=True, file_okay=False))
@click.option('--config', required=True,
type=click.Path(exists=True, dir_okay=False))
@click.option('--checkpoint', required=True,
type=click.Path(exists=True, dir_okay=False))
def evaluate(dataset, dataset_root, config, checkpoint):
cfg = yaml.safe_load(open(config))['config']
transform = T.get_transforms(cfg['image-size'], training=False)
if dataset == 'COCO':
dataset = ssd.data.COCODetection(root=dataset_root,
classes=cfg['classes'],
transform=transform)
elif dataset == 'VOC':
dataset = ssd.data.VOCDetection(root=dataset_root,
classes=cfg['classes'],
transform=transform)
else:
dataset = ssd.data.LabelmeDataset(root=dataset_root,
classes=cfg['classes'],
transform=transform)
print('Generating COCO dataset...', end=' ')
coco_dataset = get_coco_api_from_dataset(dataset)
print('done')
model = ssd.SSD300(cfg)
model.eval()
model.load_state_dict(torch.load(checkpoint, map_location=device))
model.to(device)
ssd.engine.evaluate(model, dataset, coco_dataset, device)
if __name__ == "__main__":
evaluate() | StarcoderdataPython |
3215978 | <gh_stars>10-100
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__))) | StarcoderdataPython |
1647596 | <gh_stars>1-10
'''
given this sequence:
1, 2, 3, 4, 3, 4, 5, 6, 5, 6, 7, 8 ...
deduce and between 1 and 9223372036854775807
return the corresponding numer of the sequence
'''
import math
def sequence(n):
if 1 <= n <= 9223372036854775807:
return a(n)
else:
return -1
def a(n):
if n % 2 == 0:
return 2*b(n)
else:
return 1 + 2*b(n)
def b(n):
n = math.floor(n/2) - math.floor((n+1)/4)
return n + 1
| StarcoderdataPython |
1859570 | <filename>vedastr/models/bodies/rectificators/__init__.py
from .tps_stn import TPS_STN
from .spin import SPIN
from .builder import build_rectificator
| StarcoderdataPython |
6494818 | from __future__ import division
import numpy as np
from resample.utils import eqf
from scipy.stats import (norm, laplace,
gamma, f as F,
t, beta, lognorm,
pareto, logistic,
invgauss, poisson)
def jackknife(a, f=None):
"""
Calculate jackknife estimates for a given sample
and estimator, return leave-one-out samples
if estimator is not specified
Parameters
----------
a : array-like
Sample
f : callable
Estimator
Returns
-------
y | X : np.array
Jackknife estimates
"""
arr = np.asarray([a] * len(a))
X = np.asarray([np.delete(x, i, 0) for i, x in enumerate(arr)])
if f is None:
return X
else:
return np.asarray([f(x) for x in X])
def jackknife_bias(a, f):
"""
Calculate jackknife estimate of bias
Parameters
----------
a : array-like
Sample
f : callable
Estimator
Returns
-------
y : float
Jackknife estimate of bias
"""
return (len(a) - 1) * np.mean(jackknife(a, f) - f(a))
def jackknife_variance(a, f):
"""
Calculate jackknife estimate of variance
Parameters
----------
a : array-like
Sample
f : callable
Estimator
Returns
-------
y : float
Jackknife estimate of variance
"""
x = jackknife(a, f)
return (len(a) - 1) * np.mean((x - np.mean(x))**2)
def empirical_influence(a, f):
"""
Calculate the empirical influence function for a given
sample and estimator using the jackknife method
Parameters
----------
a : array-like
Sample
f : callable
Estimator
Returns
-------
y : np.array
Empirical influence values
"""
return (len(a) - 1) * (f(a) - jackknife(a, f))
def bootstrap(a, f=None, b=100, method="balanced", family=None,
strata=None, smooth=False, random_state=None):
"""
Calculate function values from bootstrap samples or
optionally return bootstrap samples themselves
Parameters
----------
a : array-like
Original sample
f : callable or None
Function to be bootstrapped
b : int
Number of bootstrap samples
method : string
* 'ordinary'
* 'balanced'
* 'parametric'
family : string or None
* 'gaussian'
* 't'
* 'laplace'
* 'logistic'
* 'F'
* 'gamma'
* 'log-normal'
* 'inverse-gaussian'
* 'pareto'
* 'beta'
* 'poisson'
strata : array-like or None
Stratification labels, ignored when method
is parametric
smooth : boolean
Whether or not to add noise to bootstrap
samples, ignored when method is parametric
random_state : int or None
Random number seed
Returns
-------
y | X : np.array
Function applied to each bootstrap sample
or bootstrap samples if f is None
"""
np.random.seed(random_state)
a = np.asarray(a)
n = len(a)
# stratification not meaningful for parametric sampling
if strata is not None and (method != "parametric"):
strata = np.asarray(strata)
if len(strata) != len(a):
raise ValueError("a and strata must have"
" the same length")
# recursively call bootstrap without stratification
# on the different strata
masks = [strata == x for x in np.unique(strata)]
boot_strata = [bootstrap(a=a[m],
f=None,
b=b,
method=method,
strata=None,
random_state=random_state) for m in masks]
# concatenate resampled strata along first column axis
X = np.concatenate(boot_strata, axis=1)
else:
if method == "ordinary":
# i.i.d. sampling from ecdf of a
X = np.reshape(a[np.random.choice(range(a.shape[0]),
a.shape[0] * b)],
newshape=(b,) + a.shape)
elif method == "balanced":
# permute b concatenated copies of a
r = np.reshape([a] * b,
newshape=(b * a.shape[0],) + a.shape[1:])
X = np.reshape(r[np.random.permutation(range(r.shape[0]))],
newshape=(b,) + a.shape)
elif method == "parametric":
if len(a.shape) > 1:
raise ValueError("a must be one-dimensional")
# fit parameters by maximum likelihood and sample
if family == "gaussian":
theta = norm.fit(a)
arr = norm.rvs(size=n*b,
loc=theta[0],
scale=theta[1],
random_state=random_state)
elif family == "t":
theta = t.fit(a, fscale=1)
arr = t.rvs(size=n*b,
df=theta[0],
loc=theta[1],
scale=theta[2],
random_state=random_state)
elif family == "laplace":
theta = laplace.fit(a)
arr = laplace.rvs(size=n*b,
loc=theta[0],
scale=theta[1],
random_state=random_state)
elif family == "logistic":
theta = logistic.fit(a)
arr = logistic.rvs(size=n*b,
loc=theta[0],
scale=theta[1],
random_state=random_state)
elif family == "F":
theta = F.fit(a, floc=0, fscale=1)
arr = F.rvs(size=n*b,
dfn=theta[0],
dfd=theta[1],
loc=theta[2],
scale=theta[3],
random_state=random_state)
elif family == "gamma":
theta = gamma.fit(a, floc=0)
arr = gamma.rvs(size=n*b,
a=theta[0],
loc=theta[1],
scale=theta[2],
random_state=random_state)
elif family == "log-normal":
theta = lognorm.fit(a, floc=0)
arr = lognorm.rvs(size=n*b,
s=theta[0],
loc=theta[1],
scale=theta[2],
random_state=random_state)
elif family == "inverse-gaussian":
theta = invgauss.fit(a, floc=0)
arr = invgauss.rvs(size=n*b,
mu=theta[0],
loc=theta[1],
scale=theta[2],
random_state=random_state)
elif family == "pareto":
theta = pareto.fit(a, floc=0)
arr = pareto.rvs(size=n*b,
b=theta[0],
loc=theta[1],
scale=theta[2],
random_state=random_state)
elif family == "beta":
theta = beta.fit(a)
arr = beta.rvs(size=n*b,
a=theta[0],
b=theta[1],
loc=theta[2],
scale=theta[3],
random_state=random_state)
elif family == "poisson":
theta = np.mean(a)
arr = poisson.rvs(size=n*b,
mu=theta,
random_state=random_state)
else:
raise ValueError("Invalid family")
X = np.reshape(arr, newshape=(b, n))
else:
raise ValueError("method must be either 'ordinary'"
" , 'balanced', or 'parametric',"
" '{method}' was supplied".
format(method=method))
# samples are already smooth in the parametric case
if smooth and (method != "parametric"):
X += np.random.normal(size=X.shape,
scale=1 / np.sqrt(n))
if f is None:
return X
else:
return np.asarray([f(x) for x in X])
def bootstrap_ci(a, f, p=0.95, b=100, ci_method="percentile",
boot_method="balanced", family=None,
strata=None, smooth=False, random_state=None):
"""
Calculate bootstrap confidence intervals
Parameters
----------
a : array-like
Original sample
f : callable
Function to be bootstrapped
p : float
Confidence level
b : int
Number of bootstrap samples
ci_method : string
* 'percentile'
* 'bca'
* 't'
boot_method : string
* 'ordinary'
* 'balanced'
* 'parametric'
family : string (only used when boot_method is parametric)
* 'gaussian'
* 't'
* 'laplace'
* 'logistic'
* 'F'
* 'gamma'
* 'log-normal'
* 'inverse-gaussian'
* 'pareto'
* 'beta'
* 'poisson'
strata : array-like or None (only used when boot_method
is parametric)
Stratification labels
smooth : boolean (not used when boot_method is parametric)
Whether or not to add noise to bootstrap
samples
random_state : int or None
Random number seed
Returns
-------
(l, u) : tuple
Upper and lower confidence limits
"""
if not (0 < p < 1):
raise ValueError("p must be between zero and one")
if boot_method not in ["ordinary", "balanced", "parametric"]:
raise ValueError(("boot_method must be 'ordinary'"
" 'balanced', or 'parametric', '{method}' was"
" supplied".
format(method=boot_method)))
boot_est = bootstrap(a=a, f=f, b=b, method=boot_method,
family=family, strata=strata,
smooth=smooth, random_state=random_state)
q = eqf(boot_est)
alpha = 1 - p
if ci_method == "percentile":
return (q(alpha/2), q(1 - alpha/2))
elif ci_method == "bca":
theta = f(a)
# bias correction
z_naught = norm.ppf(np.mean(boot_est <= theta))
z_low = norm.ppf(alpha)
z_high = norm.ppf(1 - alpha)
# acceleration
jack_est = jackknife(a, f)
jack_mean = np.mean(jack_est)
acc = (np.sum((jack_mean - jack_est)**3) /
(6 * np.sum((jack_mean - jack_est)**2)**(3/2)))
p1 = (norm.cdf(z_naught + (z_naught + z_low) /
(1 - acc * (z_naught + z_low))))
p2 = (norm.cdf(z_naught + (z_naught + z_high) /
(1 - acc * (z_naught + z_high))))
return (q(p1), q(p2))
elif ci_method == "t":
theta = f(a)
theta_std = np.std(boot_est)
# quantile function of studentized bootstrap estimates
tq = eqf((boot_est - theta) / theta_std)
t1 = tq(1 - alpha)
t2 = tq(alpha)
return (theta - theta_std * t1, theta - theta_std * t2)
else:
raise ValueError(("ci_method must be 'percentile'"
" 'bca', or 't', '{method}'"
" was supplied".
format(method=ci_method)))
| StarcoderdataPython |
6533295 | <gh_stars>0
import usys
import unittest
class MockConfig:
def __init__(self):
self.max_degrees = 10
self.speed_limit = 10
self.speed_increment = 1
class MockMotor:
def __init__(self, speed):
self.speed = speed
def get_position(self):
return self.speed
usys.path.insert(0, 'src')
usys.path.insert(0, 'mock')
from ferris_wheel.control import FerrisWheelControl
class TestFerrisWheelControl(unittest.TestCase):
def test_get_speed(self):
ferris_wheel_config = MockConfig()
motor = MockMotor(5)
expected = 5
ferris_wheel_control = FerrisWheelControl(motor, ferris_wheel_config)
speed = ferris_wheel_control.get_speed()
self.assertEqual(speed, expected, "ferris wheel control get speed returns correct result")
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
147573 | # Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import os
import logging
from collections import OrderedDict
# access, , get components, internal
from yotta.lib import access
from yotta.lib import access_common
# pool, , shared thread pool, internal
#from pool import pool
# vcs, , represent version controlled directories, internal
from yotta.lib import vcs
# fsutils, , misc filesystem utils, internal
from yotta.lib import fsutils
# Pack, , common parts of Components/Targets, internal
from yotta.lib import pack
# !!! FIXME: should components lock their description file while they exist?
# If not there are race conditions where the description file is modified by
# another process (or in the worst case replaced by a symlink) after it has
# been opened and before it is re-written
# Constants
Modules_Folder = 'yotta_modules'
Targets_Folder = 'yotta_targets'
Component_Description_File = 'module.json'
Component_Description_File_Fallback = 'package.json'
Component_Definitions_File = 'defines.json'
Registry_Namespace = 'modules'
Schema_File = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'schema', 'module.json')
logger = logging.getLogger('components')
VVVERBOSE_DEBUG = logging.DEBUG - 8
def _truthyConfValue(v):
''' Determine yotta-config truthiness. In yotta config land truthiness is
different to python or json truthiness (in order to map nicely only
preprocessor and CMake definediness):
json -> python -> truthy/falsey
false -> False -> Falsey
null -> None -> Falsey
undefined -> None -> Falsey
0 -> 0 -> Falsey
"" -> "" -> Truthy (different from python)
"0" -> "0" -> Truthy
{} -> {} -> Truthy (different from python)
[] -> [] -> Truthy (different from python)
everything else is truthy
'''
if v is False:
return False
elif v is None:
return False
elif v == 0:
return False
else:
# everything else is truthy!
return True
# API
class Component(pack.Pack):
def __init__(
self,
path,
installed_linked = False,
latest_suitable_version = None,
test_dependency = False,
inherit_shrinkwrap = None
):
''' How to use a Component:
Initialise it with the directory into which the component has been
downloaded, (or with a symlink that points to a directory
containing the component)
Check that 'if component:' is true, which indicates that the
download is indeed a valid component.
Check that component.getVersion() returns the version you think
you've downloaded.
Use component.getDependencySpecs() to get the names of the
dependencies of the component, or component.getDependencies() to
get Component objects (which may not be valid unless the
dependencies have been installed) for each of the dependencies.
'''
self.description = OrderedDict()
logger.log(VVVERBOSE_DEBUG, "Component: " + path + ' installed_linked=' + str(installed_linked))
warn_deprecated_filename = False
if (not os.path.exists(os.path.join(path, Component_Description_File))) and \
os.path.exists(os.path.join(path, Component_Description_File_Fallback)):
warn_deprecated_filename = True
description_filename = Component_Description_File_Fallback
else:
description_filename = Component_Description_File
super(Component, self).__init__(
path,
description_filename = description_filename,
installed_linked = installed_linked,
schema_filename = Schema_File,
latest_suitable_version = latest_suitable_version,
inherit_shrinkwrap = inherit_shrinkwrap
)
if self.description and inherit_shrinkwrap is not None:
# when inheriting a shrinkwrap, check that this module is
# listed in the shrinkwrap, otherwise emit a warning:
if next((x for x in inherit_shrinkwrap.get('modules', []) if x['name'] == self.getName()), None) is None:
logger.warning("%s missing from shrinkwrap", self.getName())
if warn_deprecated_filename:
logger.warning(
"Component %s uses deprecated %s file, use %s instead." % (
self.getName(),
Component_Description_File_Fallback,
Component_Description_File
)
)
if 'bin' in self.description and 'lib' in self.description:
self.error = 'Both "lib" and "bin" are specified in module.json: '+\
'only one is allowed. If this is an executable module, then '+\
'it should not specify a "lib" subdirectory, and if this is '+\
'a re-usable library module, it should not specify a "bin" '+\
'subdirectory'
self.description = OrderedDict()
# specified in the description
self.installed_dependencies = False
self.dependencies_failed = False
self.is_test_dependency = test_dependency
# read definitions for applications
self.defines = {}
defines_path = os.path.join(path, Component_Definitions_File)
if os.path.isfile(defines_path):
if not self.isApplication():
# only applications can have definitions
logger.warning("%s ignored in library module '%s'" % (Component_Definitions_File, self.getName()))
else:
# TODO: define a schema for this file
self.defines = pack.tryReadJSON(defines_path, None)
def getDependencySpecs(self, target=None):
''' Returns [DependencySpec]
These are returned in the order that they are listed in the
component description file: this is so that dependency resolution
proceeds in a predictable way.
'''
deps = []
def specForDependency(name, version_spec, istest):
shrinkwrap = self.getShrinkwrapMapping()
shrinkwrap_version_req = None
if name in shrinkwrap:
# exact version, and pull from registry:
shrinkwrap_version_req = shrinkwrap[name]
logger.debug(
'respecting %s shrinkwrap version %s for %s', self.getName(), shrinkwrap_version_req, name
)
return pack.DependencySpec(
name,
version_spec,
istest,
shrinkwrap_version_req = shrinkwrap_version_req,
specifying_module = self.getName()
)
deps += [specForDependency(x[0], x[1], False) for x in self.description.get('dependencies', {}).items()]
target_deps = self.description.get('targetDependencies', {})
if target is not None:
for conf_key, target_conf_deps in target_deps.items():
if _truthyConfValue(target.getConfigValue(conf_key)) or conf_key in target.getSimilarTo_Deprecated():
logger.debug(
'Adding target-dependent dependency specs for target config %s to component %s' %
(conf_key, self.getName())
)
deps += [specForDependency(x[0], x[1], False) for x in target_conf_deps.items()]
deps += [specForDependency(x[0], x[1], True) for x in self.description.get('testDependencies', {}).items()]
target_deps = self.description.get('testTargetDependencies', {})
if target is not None:
for conf_key, target_conf_deps in target_deps.items():
if _truthyConfValue(target.getConfigValue(conf_key)) or conf_key in target.getSimilarTo_Deprecated():
logger.debug(
'Adding test-target-dependent dependency specs for target config %s to component %s' %
(conf_key, self.getName())
)
deps += [specForDependency(x[0], x[1], True) for x in target_conf_deps.items()]
# remove duplicates (use the first occurrence)
seen = set()
r = []
for dep in deps:
if not dep.name in seen:
r.append(dep)
seen.add(dep.name)
return r
def hasDependency(self, name, target=None, test_dependencies=False):
''' Check if this module has any dependencies with the specified name
in its dependencies list, or in target dependencies for the
specified target
'''
if name in self.description.get('dependencies', {}).keys():
return True
target_deps = self.description.get('targetDependencies', {})
if target is not None:
for conf_key, target_conf_deps in target_deps.items():
if _truthyConfValue(target.getConfigValue(conf_key)) or conf_key in target.getSimilarTo_Deprecated():
if name in target_conf_deps:
return True
if test_dependencies:
if name in self.description.get('testDependencies', {}).keys():
return True
if target is not None:
test_target_deps = self.description.get('testTargetDependencies', {})
for conf_key, target_conf_deps in test_target_deps.items():
if _truthyConfValue(target.getConfigValue(conf_key)) or conf_key in target.getSimilarTo_Deprecated():
if name in target_conf_deps:
return True
return False
def hasDependencyRecursively(self, name, target=None, test_dependencies=False):
''' Check if this module, or any of its dependencies, have a
dependencies with the specified name in their dependencies, or in
their targetDependencies corresponding to the specified target.
Note that if recursive dependencies are not installed, this test
may return a false-negative.
'''
# checking dependencies recursively isn't entirely straightforward, so
# use the existing method to resolve them all before checking:
dependencies = self.getDependenciesRecursive(
target = target,
test = test_dependencies
)
return (name in dependencies)
def getDependencies(self,
available_components = None,
search_dirs = None,
target = None,
available_only = False,
test = False,
warnings = True
):
''' Returns {component_name:component}
'''
if search_dirs is None:
search_dirs = [self.modulesPath()]
available_components = self.ensureOrderedDict(available_components)
components, errors = self.__getDependenciesWithProvider(
available_components = available_components,
search_dirs = search_dirs,
target = target,
update_installed = False,
provider = self.provideInstalled,
test = test
)
if warnings:
for error in errors:
logger.warning(error)
if available_only:
components = OrderedDict((k, v) for k, v in components.items() if v)
return components
def __getDependenciesWithProvider(self,
available_components = None,
search_dirs = None,
target = None,
update_installed = False,
provider = None,
test = False
):
''' Get installed components using "provider" to find (and possibly
install) components.
See documentation for __getDependenciesRecursiveWithProvider
returns (components, errors)
'''
# sourceparse, , parse version source urls, internal
from yotta.lib import sourceparse
errors = []
modules_path = self.modulesPath()
def satisfyDep(dspec):
try:
r = provider(
dspec,
available_components,
search_dirs,
modules_path,
update_installed,
self
)
if r and not sourceparse.parseSourceURL(dspec.versionReq()).semanticSpecMatches(r.getVersion()):
shrinkwrap_msg = ''
if dspec.isShrinkwrapped():
shrinkwrap_msg = 'shrinkwrap on '
msg = 'does not meet specification %s required by %s%s' % (
dspec.versionReq(), shrinkwrap_msg, self.getName()
)
logger.debug('%s %s', r.getName(), msg)
r.setError(msg)
return r
except access_common.Unavailable as e:
errors.append(e)
self.dependencies_failed = True
except vcs.VCSError as e:
errors.append(e)
self.dependencies_failed = True
specs = self.getDependencySpecs(target=target)
if not test:
# filter out things that aren't test dependencies if necessary:
specs = [x for x in specs if not x.is_test_dependency]
#dependencies = pool.map(
dependencies = map(
satisfyDep, specs
)
self.installed_dependencies = True
# stable order is important!
return (OrderedDict([((d and d.getName()) or specs[i].name, d) for i, d in enumerate(dependencies)]), errors)
def __getDependenciesRecursiveWithProvider(self,
available_components = None,
search_dirs = None,
target = None,
traverse_links = False,
update_installed = False,
provider = None,
test = False,
_processed = None
):
''' Get installed components using "provider" to find (and possibly
install) components.
This function is called with different provider functions in order
to retrieve a list of all of the dependencies, or install all
dependencies.
Returns
=======
(components, errors)
components: dictionary of name:Component
errors: sequence of errors
Parameters
==========
available_components:
None (default) or a dictionary of name:component. This is
searched before searching directories or fetching remote
components
search_dirs:
None (default), or sequence of directories to search for
already installed, (but not yet loaded) components. Used so
that manually installed or linked components higher up the
dependency tree are found by their users lower down.
These directories are searched in order, and finally the
current directory is checked.
target:
None (default), or a Target object. If specified the target
name and it's similarTo list will be used in resolving
dependencies. If None, then only target-independent
dependencies will be installed
traverse_links:
False (default) or True: whether to recurse into linked
dependencies. You normally want to set this to "True" when
getting a list of dependencies, and False when installing
them (unless the user has explicitly asked dependencies to
be installed in linked components).
provider: None (default) or function:
provider(
dependency_spec,
available_components,
search_dirs,
working_directory,
update_if_installed
)
test:
True, False, 'toplevel': should test-only dependencies be
included (yes, no, or only at this level, not recursively)
'''
def recursionFilter(c):
if not c:
logger.debug('do not recurse into failed component')
# don't recurse into failed components
return False
if c.getName() in _processed:
logger.debug('do not recurse into already processed component: %s' % c)
return False
if c.installedLinked() and not traverse_links:
return False
return True
available_components = self.ensureOrderedDict(available_components)
if search_dirs is None:
search_dirs = []
if _processed is None:
_processed = set()
assert(test in [True, False, 'toplevel'])
search_dirs.append(self.modulesPath())
logger.debug('process %s\nsearch dirs:%s' % (self.getName(), search_dirs))
if self.isTestDependency():
logger.debug("won't provide test dependencies recursively for test dependency %s", self.getName())
test = False
components, errors = self.__getDependenciesWithProvider(
available_components = available_components,
search_dirs = search_dirs,
update_installed = update_installed,
target = target,
provider = provider,
test = test
)
_processed.add(self.getName())
if errors:
errors = ['Failed to satisfy dependencies of %s:' % self.path] + errors
need_recursion = [x for x in filter(recursionFilter, components.values())]
available_components.update(components)
logger.debug('processed %s\nneed recursion: %s\navailable:%s\nsearch dirs:%s' % (self.getName(), need_recursion, available_components, search_dirs))
if test == 'toplevel':
test = False
# NB: can't perform this step in parallel, since the available
# components list must be updated in order
for c in need_recursion:
dep_components, dep_errors = c.__getDependenciesRecursiveWithProvider(
available_components = available_components,
search_dirs = search_dirs,
target = target,
traverse_links = traverse_links,
update_installed = update_installed,
provider = provider,
test = test,
_processed = _processed
)
available_components.update(dep_components)
components.update(dep_components)
errors += dep_errors
return (components, errors)
def provideInstalled(self,
dspec,
available_components,
search_dirs,
working_directory,
update_installed,
dep_of
):
#logger.info('%s provideInstalled: %s', dep_of.getName(), dspec.name)
r = access.satisfyFromAvailable(dspec.name, available_components)
if r:
if r.isTestDependency() and not dspec.is_test_dependency:
logger.debug('test dependency subsequently occurred as real dependency: %s', r.getName())
r.setTestDependency(False)
return r
update_if_installed = False
if update_installed is True:
update_if_installed = True
elif update_installed:
update_if_installed = dspec.name in update_installed
r = access.satisfyVersionFromSearchPaths(
dspec.name,
dspec.versionReq(),
search_dirs,
update_if_installed,
inherit_shrinkwrap = dep_of.getShrinkwrap()
)
if r:
r.setTestDependency(dspec.is_test_dependency)
return r
# return a module initialised to the path where we would have
# installed this module, so that it's possible to use
# getDependenciesRecursive to find a list of failed dependencies,
# as well as just available ones
# note that this Component object may still be valid (usable to
# attempt a build), if a different version was previously installed
# on disk at this location (which means we need to check if the
# existing version is linked)
default_path = os.path.join(self.modulesPath(), dspec.name)
r = Component(
default_path,
test_dependency = dspec.is_test_dependency,
installed_linked = fsutils.isLink(default_path),
inherit_shrinkwrap = dep_of.getShrinkwrap()
)
return r
def getDependenciesRecursive(self,
available_components = None,
processed = None,
search_dirs = None,
target = None,
available_only = False,
test = False
):
''' Get available and already installed components, don't check for
remotely available components. See also
satisfyDependenciesRecursive()
Returns {component_name:component}
'''
components, errors = self.__getDependenciesRecursiveWithProvider(
available_components = available_components,
search_dirs = search_dirs,
target = target,
traverse_links = True,
update_installed = False,
provider = self.provideInstalled,
test = test
)
for error in errors:
logger.error(error)
if available_only:
components = OrderedDict((k, v) for k, v in components.items() if v)
return components
def modulesPath(self):
return os.path.join(self.path, Modules_Folder)
def targetsPath(self):
return os.path.join(self.path, Targets_Folder)
def satisfyDependenciesRecursive(
self,
available_components = None,
search_dirs = None,
update_installed = False,
traverse_links = False,
target = None,
test = False
):
''' Retrieve and install all the dependencies of this component and its
dependencies, recursively, or satisfy them from a collection of
available_components or from disk.
Returns
=======
(components, errors)
components: dictionary of name:Component
errors: sequence of errors
Parameters
==========
available_components:
None (default) or a dictionary of name:component. This is
searched before searching directories or fetching remote
components
search_dirs:
None (default), or sequence of directories to search for
already installed, (but not yet loaded) components. Used so
that manually installed or linked components higher up the
dependency tree are found by their users lower down.
These directories are searched in order, and finally the
current directory is checked.
update_installed:
False (default), True, or set(): whether to check the
available versions of installed components, and update if a
newer version is available. If this is a set(), only update
things in the specified set.
traverse_links:
False (default) or True: whether to recurse into linked
dependencies when updating/installing.
target:
None (default), or a Target object. If specified the target
name and it's similarTo list will be used in resolving
dependencies. If None, then only target-independent
dependencies will be installed
test:
True, False, or 'toplevel: should test-only dependencies be
installed? (yes, no, or only for this module, not its
dependencies).
'''
def provider(
dspec,
available_components,
search_dirs,
working_directory,
update_installed,
dep_of=None
):
r = access.satisfyFromAvailable(dspec.name, available_components)
if r:
if r.isTestDependency() and not dspec.is_test_dependency:
logger.debug('test dependency subsequently occurred as real dependency: %s', r.getName())
r.setTestDependency(False)
return r
update_if_installed = False
if update_installed is True:
update_if_installed = True
elif update_installed:
update_if_installed = dspec.name in update_installed
r = access.satisfyVersionFromSearchPaths(
dspec.name,
dspec.versionReq(),
search_dirs,
update_if_installed,
inherit_shrinkwrap = dep_of.getShrinkwrap()
)
if r:
r.setTestDependency(dspec.is_test_dependency)
return r
# before resorting to install this module, check if we have an
# existing linked module (which wasn't picked up because it didn't
# match the version specification) - if we do, then we shouldn't
# try to install, but should return that anyway:
default_path = os.path.join(self.modulesPath(), dspec.name)
if fsutils.isLink(default_path):
r = Component(
default_path,
test_dependency = dspec.is_test_dependency,
installed_linked = fsutils.isLink(default_path),
inherit_shrinkwrap = dep_of.getShrinkwrap()
)
if r:
assert(r.installedLinked())
return r
else:
logger.error('linked module %s is invalid: %s', dspec.name, r.getError())
return r
r = access.satisfyVersionByInstalling(
dspec.name,
dspec.versionReq(),
self.modulesPath(),
inherit_shrinkwrap = dep_of.getShrinkwrap()
)
if not r:
logger.error('could not install %s' % dspec.name)
if r is not None:
r.setTestDependency(dspec.is_test_dependency)
return r
return self.__getDependenciesRecursiveWithProvider(
available_components = available_components,
search_dirs = search_dirs,
target = target,
traverse_links = traverse_links,
update_installed = update_installed,
provider = provider,
test = test
)
def satisfyTarget(self, target_name_and_version, update_installed=False, additional_config=None, install_missing=True):
''' Ensure that the specified target name (and optionally version,
github ref or URL) is installed in the targets directory of the
current component
returns (derived_target, errors)
'''
# Target, , represent an installed target, internal
from yotta.lib import target
application_dir = None
if self.isApplication():
application_dir = self.path
return target.getDerivedTarget(
target_name_and_version,
self.targetsPath(),
install_missing = install_missing,
application_dir = application_dir,
update_installed = update_installed,
additional_config = additional_config,
shrinkwrap = self.getShrinkwrap()
)
def getTarget(self, target_name_and_version, additional_config=None):
''' Return a derived target object representing the selected target: if
the target is not installed, or is invalid then the returned object
will test false in a boolean context.
Returns derived_target
Errors are not displayed.
'''
derived_target, errors = self.satisfyTarget(
target_name_and_version,
additional_config = additional_config,
install_missing = False
)
if len(errors):
return None
else:
return derived_target
def installedDependencies(self):
''' Return true if satisfyDependencies has been called.
Note that this is slightly different to when all of the
dependencies are actually satisfied, but can be used as if it means
that.
'''
return self.installed_dependencies
def isApplication(self):
''' Return true if this module is an application instead of a reusable
library '''
return bool(len(self.getBinaries()))
def getBinaries(self):
''' Return a dictionary of binaries to compile: {"dirname":"exename"},
this is used when automatically generating CMakeLists
Note that currently modules may define only a single executable
binary or library to be built by the automatic build system, by
specifying `"bin": "dir-to-be-built-into-binary"`, or `"lib":
"dir-to-be-built-into-library"`, and the bin/lib will always have
the same name as the module. The default behaviour if nothing is
specified is for the 'source' directory to be built into a library.
The module.json syntax may allow for other combinations in the
future (and callers of this function should not rely on it
returning only a single item). For example, a "bin": {"dirname":
"exename"} syntax might be supported, however currently more
complex builds must be controlled by custom CMakeLists.
'''
# the module.json syntax is a subset of the package.json syntax: a
# single string that defines the source directory to use to build an
# executable with the same name as the component. This may be extended
# to include the rest of the npm syntax in future (map of source-dir to
# exe name).
if 'bin' in self.description:
return {os.path.normpath(self.description['bin']): self.getName()}
else:
return {}
def getLibs(self, explicit_only=False):
''' Return a dictionary of libraries to compile: {"dirname":"libname"},
this is used when automatically generating CMakeLists.
If explicit_only is not set, then in the absence of both 'lib' and
'bin' sections in the module.json file, the "source" directory
will be returned.
Note that currently modules may define only a single executable
binary or library to be built by the automatic build system, by
specifying `"bin": "dir-to-be-built-into-binary"`, or `"lib":
"dir-to-be-built-into-library"`, and the bin/lib will always have
the same name as the module. The default behaviour if nothing is
specified is for the 'source' directory to be built into a library.
The module.json syntax may allow for other combinations in the
future (and callers of this function should not rely on it
returning only a single item). For example, a "bin": {"dirname":
"exename"} syntax might be supported, however currently more
complex builds must be controlled by custom CMakeLists.
'''
if 'lib' in self.description:
return {os.path.normpath(self.description['lib']): self.getName()}
elif 'bin' not in self.description and not explicit_only:
return {'source': self.getName()}
else:
return {}
def licenses(self):
''' Return a list of licenses that apply to this module. (Strings,
which may be SPDX identifiers)
'''
if 'license' in self.description:
return [self.description['license']]
else:
return [x['type'] for x in self.description['licenses']]
def getExtraIncludes(self):
''' Some components must export whole directories full of headers into
the search path. This is really really bad, and they shouldn't do
it, but support is provided as a concession to compatibility.
'''
if 'extraIncludes' in self.description:
return [os.path.normpath(x) for x in self.description['extraIncludes']]
else:
return []
def getExtraSysIncludes(self):
''' Some components (e.g. libc) must export directories of header files
into the system include search path. They do this by adding a
'extraSysIncludes' : [ array of directories ] field in their
package description. This function returns the list of directories
(or an empty list), if it doesn't exist.
'''
if 'extraSysIncludes' in self.description:
return [os.path.normpath(x) for x in self.description['extraSysIncludes']]
else:
return []
def getRegistryNamespace(self):
return Registry_Namespace
def setTestDependency(self, status):
self.is_test_dependency = status
def isTestDependency(self):
return self.is_test_dependency
def __saveSpecForComponent(self, component):
version = component.getVersion()
if version.isTip():
spec = '*'
elif version.major() == 0:
# for 0.x.x versions, when we save a dependency we don't use ^0.x.x
# a that would peg to the exact version - instead we use ~ to peg
# to the same minor version
spec = '~' + str(version)
else:
spec = '^' + str(version)
return spec
def saveDependency(self, component, spec=None):
if not 'dependencies' in self.description:
self.description['dependencies'] = OrderedDict()
if spec is None:
spec = self.__saveSpecForComponent(component)
self.description['dependencies'][component.getName()] = spec
return spec
def removeDependency(self, component):
if not component in self.description.get('dependencies', {}):
logger.error('%s is not listed as a dependency', component)
return False
del self.description['dependencies'][component]
return True
def getDefines(self):
return self.defines
| StarcoderdataPython |
8113200 | with open("primes/primes_1000.txt", "r") as file:
primes_1000 = list(map(int, file.readlines()[0].split(",")))
with open("primes/primes_1000000.txt", "r") as file:
primes_1000000 = list(map(int, file.readlines()[0].split(","))) | StarcoderdataPython |
5028063 | <filename>flopy/modflow/mfhfb.py
"""
mfhfb module. Contains the ModflowHfb class. Note that the user can access
the ModflowHfb class as `flopy.modflow.ModflowHfb`.
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/index.html?hfb6.htm>`_.
"""
import numpy as np
from flopy.mbase import Package
from numpy import atleast_2d
from flopy.modflow.mfparbc import ModflowParBc as mfparbc
from numpy.lib.recfunctions import stack_arrays
class ModflowHfb(Package):
"""
MODFLOW HFB6 - Horizontal Flow Barrier Package
Parameters
----------
model : model object
The model object (of type: class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
nphfb : int
Number of horizontal-flow barrier parameters. Note that for an HFB
parameter to have an effect in the simulation, it must be defined
and made active using NACTHFB to have an effect in the simulation
(default is 0).
mxfb : int
Maximum number of horizontal-flow barrier barriers that will be
defined using parameters (default is 0).
nhfbnp: int
Number of horizontal-flow barriers not defined by parameters. This
is calculated automatically by FloPy based on the information in
layer_row_column_data (default is 0).
hfb_data : list of records
In its most general form, this is a list of horizontal-flow
barrier records. A barrier is conceptualized as being located on
the boundary between two adjacent finite difference cells in the
same layer. The innermost list is the layer, row1, column1, row2,
column2, and hydrologic characteristics for a single hfb between
the cells. The hydraulic characteristic is the barrier hydraulic
conductivity divided by the width of the horizontal-flow barrier.
This gives the form of
hfb_data = [
[lay, row1, col1, row2, col2, hydchr],
[lay, row1, col1, row2, col2, hydchr],
[lay, row1, col1, row2, col2, hydchr],
].
(default is None).
nacthfb : int
The number of active horizontal-flow barrier parameters
(default is 0).
no_print : boolean
When True or 1, a list of horizontal flow barriers will not be
written to the Listing File (default is False)
options : list of strings
Package options (default is None).
extension : string
Filename extension (default is 'hfb').
unitnumber : int
File unit number (default is 17).
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Parameters are supported in Flopy only when reading in existing models.
Parameter values are converted to native values in Flopy and the
connection to "parameters" is thus nonexistent.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> hfb_data = [[0, 10, 4, 10, 5, 0.01],[1, 10, 4, 10, 5, 0.01]]
>>> hfb = flopy.modflow.ModflowHfb(m, hfb_data=hfb_data)
"""
def __init__(self, model, nphfb=0, mxfb=0, nhfbnp=0,
hfb_data=None, nacthfb=0, no_print=False,
options=None, extension='hfb', unitnumber=17):
Package.__init__(self, model, extension, 'HFB6',
unitnumber) # Call ancestor's init to set self.parent, extension, name and unit number
self.heading = '# HFB for MODFLOW, generated by Flopy.'
self.url = 'hfb6.htm'
self.nphfb = nphfb
self.mxfb = mxfb
self.nacthfb = nacthfb
self.no_print = no_print
self.np = 0
if options is None:
options = []
if self.no_print:
options.append('NOPRINT')
self.options = options
aux_names = []
it = 0
while it < len(options):
print it, options[it]
if 'aux' in options[it].lower():
aux_names.append(options[it + 1].lower())
it += 1
it += 1
if hfb_data is None:
raise Exception('Failed to specify hfb_data.')
self.nhfbnp = len(hfb_data)
self.hfb_data = ModflowHfb.get_empty(self.nhfbnp)
for ibnd, t in enumerate(hfb_data):
self.hfb_data[ibnd] = tuple(t)
self.parent.add_package(self)
def __repr__(self):
return 'HFB package class'
def ncells(self):
"""
Returns the maximum number of cell pairs that have horizontal
flow barriers (developed for MT3DMS SSM package)
"""
return self.nhfbnp
def write_file(self):
"""
Write the package input file.
"""
f_hfb = open(self.fn_path, 'w')
f_hfb.write('{}\n'.format(self.heading))
f_hfb.write('{:10d}{:10d}{:10d}'.format(self.nphfb, self.mxfb, self.nhfbnp))
for option in self.options:
f_hfb.write(' {}'.format(option))
f_hfb.write('\n')
for a in self.hfb_data:
f_hfb.write('{:10d}{:10d}{:10d}{:10d}{:10d}{:13.6g}\n'.format(a[0] + 1, a[1] + 1, a[2] + 1, a[3] + 1, a[4] + 1, a[5]))
f_hfb.write('{:10d}'.format(self.nacthfb))
f_hfb.close()
@staticmethod
def get_empty(ncells=0, aux_names=None):
"""
Get an empty recarray that correponds to hfb dtype and has
been extended to include aux variables and associated
aux names.
"""
dtype = ModflowHfb.get_default_dtype()
if aux_names is not None:
dtype = Package.add_to_dtype(dtype, aux_names, np.float32)
d = np.zeros((ncells, len(dtype)), dtype=dtype)
d[:, :] = -1.0E+10
return np.core.records.fromarrays(d.transpose(), dtype=dtype)
@staticmethod
def get_default_dtype():
"""
Get the default dtype for hfb data
"""
dtype = np.dtype([("k", np.int),
("irow1", np.int), ("icol1", np.int),
("irow2", np.int), ("icol2", np.int),
("hydchr", np.float32)])
return dtype
@staticmethod
def load(f, model, ext_unit_dict=None):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type: class:`flopy.modflow.mf.Modflow`)
to which this package will be added.
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
Returns
-------
hfb : ModflowHfb object
ModflowHfb object (of type :class:`flopy.modflow.mfbas.ModflowHfb`)
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> hfb = flopy.modflow.ModflowHfb.load('test.hfb', m)
"""
print 'loading hfb6 package file...'
if type(f) is not file:
filename = f
f = open(filename, 'r')
#dataset 0 -- header
while True:
line = f.readline()
if line[0] != '#':
break
# dataset 1
t = line.strip().split()
nphfb = int(t[0])
mxfb = int(t[1])
nhfbnp = int(t[2])
#--check for no-print suppressor
options = []
aux_names = []
if len(t) > 2:
it = 2
while it < len(t):
toption = t[it]
#print it, t[it]
if toption.lower() is 'noprint':
options.append(toption)
elif 'aux' in toption.lower():
options.append(' '.join(t[it:it + 2]))
aux_names.append(t[it + 1].lower())
it += 1
it += 1
#--data set 2 and 3
if nphfb > 0:
dt = ModflowHfb.get_empty(1).dtype
pak_parms = mfparbc.load(f, nphfb, dt)
#--data set 4
bnd_output = None
if nhfbnp > 0:
specified = ModflowHfb.get_empty(nhfbnp)
for ibnd in xrange(nhfbnp):
line = f.readline()
if "open/close" in line.lower():
raise NotImplementedError("load() method does not support \'open/close\'")
t = line.strip().split()
specified[ibnd] = tuple(t[:len(specified.dtype.names)])
#--convert indices to zero-based
specified['k'] -= 1
specified['irow1'] -= 1
specified['icol1'] -= 1
specified['irow2'] -= 1
specified['icol2'] -= 1
bnd_output = np.recarray.copy(specified)
if nphfb > 0:
partype = ['hydchr']
line = f.readline()
t = line.strip().split()
nacthfb = int(t[0])
for iparm in xrange(nacthfb):
line = f.readline()
t = line.strip().split()
pname = t[0].lower()
iname = 'static'
par_dict, current_dict = pak_parms.get(pname)
data_dict = current_dict[iname]
#print par_dict
#print data_dict
par_current = ModflowHfb.get_empty(par_dict['nlst'])
#--
if model.mfpar.pval is None:
parval = np.float(par_dict['parval'])
else:
try:
parval = np.float(model.mfpar.pval.pval_dict[pname])
except:
parval = np.float(par_dict['parval'])
#--fill current parameter data (par_current)
for ibnd, t in enumerate(data_dict):
par_current[ibnd] = tuple(t[:len(par_current.dtype.names)])
#--convert indices to zero-based
par_current['k'] -= 1
par_current['irow1'] -= 1
par_current['icol1'] -= 1
par_current['irow2'] -= 1
par_current['icol2'] -= 1
for ptype in partype:
par_current[ptype] *= parval
if bnd_output is None:
bnd_output = np.recarray.copy(par_current)
else:
bnd_output = stack_arrays((bnd_output, par_current),
asrecarray=True, usemask=False)
hfb = ModflowHfb(model, nphfb=0, mxfb=0, nhfbnp=len(bnd_output),
hfb_data=bnd_output,
nacthfb=0, options=options)
return hfb
| StarcoderdataPython |
11351301 | import abc
from .renderer import Renderer
from hail.utils.java import Env
class BaseIR(object):
def __init__(self):
super().__init__()
self._type = None
def __str__(self):
r = Renderer(stop_at_jir = False)
return r(self)
@abc.abstractmethod
def parse(self, code, ref_map, ir_map):
return
@abc.abstractproperty
def typ(self):
return
class IR(BaseIR):
def __init__(self, *children):
super().__init__()
self._aggregations = None
self.children = children
@property
def aggregations(self):
if self._aggregations is None:
self._aggregations = [agg for child in self.children for agg in child.aggregations]
return self._aggregations
@property
def is_nested_field(self):
return False
def search(self, criteria):
others = [node for child in self.children if isinstance(child, IR) for node in child.search(criteria)]
if criteria(self):
return others + [self]
return others
def copy(self, *args):
raise NotImplementedError("IR has no copy method defined.")
def map_ir(self, f):
new_children = []
for child in self.children:
if isinstance(child, IR):
new_children.append(f(child))
else:
new_children.append(child)
return self.copy(*new_children)
@property
def bound_variables(self):
return {v for child in self.children for v in child.bound_variables}
@property
def typ(self):
if self._type is None:
self._compute_type({}, None)
assert self._type is not None, self
return self._type
@abc.abstractmethod
def _compute_type(self, env, agg_env):
raise NotImplementedError(self)
def parse(self, code, ref_map={}, ir_map={}):
return Env.hail().expr.ir.IRParser.parse_value_ir(code, ref_map, ir_map)
class TableIR(BaseIR):
def __init__(self):
super().__init__()
@abc.abstractmethod
def _compute_type(self):
raise NotImplementedError(self)
@property
def typ(self):
if self._type is None:
self._compute_type()
assert self._type is not None, self
return self._type
def parse(self, code, ref_map={}, ir_map={}):
return Env.hail().expr.ir.IRParser.parse_table_ir(code, ref_map, ir_map)
class MatrixIR(BaseIR):
def __init__(self):
super().__init__()
@abc.abstractmethod
def _compute_type(self):
raise NotImplementedError(self)
@property
def typ(self):
if self._type is None:
self._compute_type()
assert self._type is not None, self
return self._type
def parse(self, code, ref_map={}, ir_map={}):
return Env.hail().expr.ir.IRParser.parse_matrix_ir(code, ref_map, ir_map)
class BlockMatrixIR(BaseIR):
def __init__(self):
super().__init__()
@abc.abstractmethod
def _compute_type(self):
raise NotImplementedError(self)
@property
def typ(self):
if self._type is None:
self._compute_type()
assert self._type is not None, self
return self._type
def parse(self, code, ref_map={}, ir_map={}):
return Env.hail().expr.ir.IRParser.parse_blockmatrix_ir(code, ref_map, ir_map)
| StarcoderdataPython |
1614397 | import operator
import math
import BeatFrequentPick
import PatternPredictor
import justRock
import rps
import random
from pprint import pprint
Debug = True
Debug = False
# opcode
scoreBufferWin = 0
scoreBufferLost = 1
# implementations
useScoreBuffer = True
useScoreBuffer = False
scoreReset = False # perform badly
scoreReset = True
class Predictor:
def __init__(self, module, variant, name=""):
if name == "":
name = "%s [%s]" % (module.__name__, str(variant))
self.name = name
self.module = module(variant)
self.play = self.module.play
self.variant = variant
self.scoreWins = 0
self.scoreLosts = 0
self.totalTurns = 0
self.scoreBuffer = []
self.scoreBufferSize = 300
self.reset()
def addWins(self, points):
self.scoreWins += points
self.scoreBuffer.extend([scoreBufferWin] * points)
self.scoreBuffer = self.scoreBuffer[-self.scoreBufferSize:]
def addLosts(self, points):
self.scoreLosts += points
self.scoreBuffer.extend([scoreBufferLost] * points)
self.scoreBuffer = self.scoreBuffer[-self.scoreBufferSize:]
def reset(self):
if scoreReset:
self.scoreBuffer = []
self.scoreWins = 0
self.scoreLosts = 0
self.totalTurns = 0
self.moveLastTurn = 0
self.confidence = 0
self.rankingConfidence = 0
def update(self):
self.totalTurns += 1
self.module.update()
def play(self):
return self.module.play()
class PredictorSelector:
"""PredictorSelector contains all predictors to be used"""
def __init__(self, dna):
Predictors = []
for predictor in dna.predictors:
if predictor == "none":
continue
if predictor.find(" ") > 0:
# Predictor has variant
name, value = predictor.split(" ")
value = int(value)
else:
# Predictor has no variant
name, value = predictor, None
p = None
if name == "pp":
variant = ",".join([str(s + 1) for s in range(value)])
name = "Pattern Predictor [%i]" % (value)
p = Predictor(module=PatternPredictor.PatternPredictor, variant=variant, name=name)
elif name == "mbfp":
p = Predictor(module=BeatFrequentPick.MBFP, variant=value)
elif name == "rock":
p = Predictor(module=justRock.Rock, variant=None)
if p != None:
Predictors.append(p)
self.Predictors = Predictors
self.reset()
def reset(self):
self.LastPredictor = None
self.LastYomiLayer = 0
# note: resetting against each AI seems to give a better rank. study this further
for predictor in self.Predictors:
predictor.reset()
def update(self):
self._updateScore()
def _updateScore(self):
currentTurn = rps.getTurn()
if currentTurn == 0: return
myMoveLastTurn = rps.myHistory(currentTurn)
enemyMoveLastTurn = rps.enemyHistory(currentTurn)
# update predictor used last turn
if self.LastPredictor:
predictor = self.LastPredictor
victory = (myMoveLastTurn == (enemyMoveLastTurn + 1) % 3)
tie = (myMoveLastTurn == enemyMoveLastTurn)
lost = (myMoveLastTurn == (enemyMoveLastTurn - 1) % 3)
if victory:
predictor.addWins(1)
elif tie:
predictor.addWins(1)
predictor.addLosts(0)
elif lost:
predictor.addLosts(1)
# update the rest of the predictors that they should have gained if they were chosen
for predictor in self.Predictors:
if self.LastPredictor == predictor:
continue
#predictor.score *= 0.9
if self.LastYomiLayer == -1:
myMoveLastTurn = (predictor.moveLastTurn + 1) % 3
else:
myMoveLastTurn = (predictor.moveLastTurn + self.LastYomiLayer + 1) % 3
victory = (myMoveLastTurn == (enemyMoveLastTurn + 1) % 3)
tie = (myMoveLastTurn == enemyMoveLastTurn)
lost = (myMoveLastTurn == (enemyMoveLastTurn - 1) % 3)
if victory:
predictor.addWins(1)
elif tie:
predictor.addWins(1)
predictor.addLosts(0)
elif lost:
predictor.addLosts(1)
if Debug:
print("Turn:", currentTurn - 1)
print("Enemy Move last turn:", enemyMoveLastTurn)
print(" " * 25 + "move layeredMove score confidence ranking")
#for predictor in reversed(sorted(self.Predictors, key=operator.attrgetter('rankingConfidence'))):
for predictor in sorted(self.Predictors, key=operator.attrgetter('rankingConfidence')):
if self.LastYomiLayer == -1:
myMoveLastTurn = (predictor.moveLastTurn + 1) % 3
else:
myMoveLastTurn = (predictor.moveLastTurn + self.LastYomiLayer + 1) % 3
victory = (myMoveLastTurn == (enemyMoveLastTurn + 1) % 3)
tie = (myMoveLastTurn == enemyMoveLastTurn)
lost = (myMoveLastTurn == (enemyMoveLastTurn - 1) % 3)
print("%s: %i %i (+%i/-%i) %.2f %f" %
(predictor.name.ljust(24), predictor.moveLastTurn, myMoveLastTurn, predictor.scoreWins, predictor.scoreLosts, predictor.confidence, predictor.rankingConfidence), end="")
if victory:
print(" win", end="")
elif tie:
print(" tie", end="")
elif lost:
print(" lost", end="")
if predictor == self.LastPredictor:
print (" **", end="")
print("")
input()
def getPrediction(self, dna):
"""
1. run each predictor.
2. select the predictors with the highest confidence and score
3. return the highest ranking
"""
# 1. run each predictor.
#scoreSorted = sorted(self.Predictors, key=operator.attrgetter('score'))
scoreSorted = self.Predictors
chosenPredictor = None
self.LastPredictor = None
for i, predictor in enumerate(scoreSorted):
predictor.update()
move, confidence = predictor.play()
predictor.moveLastTurn = move
#confidence = round(confidence, 2) # round to the nearest 2 decimals
#if confidence > 0.9: confidence = 0.9
predictor.confidence = confidence
#2. select the predictors with the highest confidence and score
move, confidence = self.getHighestRank(dna)
# predictor = self.LastPredictor
# print("%s: %i (+%i/-%i) %.2f %f" % (predictor.name.ljust(24), predictor.moveLastTurn,predictor.scoreWins, predictor.scoreLosts, predictor.confidence, predictor.rankingConfidence))
#3. return the highest ranking
return move, confidence
def getHighestRank(self, dna):
if dna.predictor_ranking == "wilson-high":
chosenPredictor, rankRating = self.getHighestRank_LowerWilson(higherBound = True)
elif dna.predictor_ranking == "wilson-low":
chosenPredictor, rankRating = self.getHighestRank_LowerWilson(higherBound = False)
# elif dna.predictor_ranking == "toilet":
# chosenPredictor, rankRating = self.getHighestRank_Toilet()
elif dna.predictor_ranking == "naive":
chosenPredictor, rankRating = self.getHighestRank_Naive()
else:
chosenPredictor, rankRating = None, 0
return 0, 0
self.LastPredictor = chosenPredictor
move = chosenPredictor.moveLastTurn
predictorConfidence = chosenPredictor.confidence
confidence = rankRating
return move, confidence
def getHighestRank_LowerWilson(self, higherBound = True):
"""
Get the highest rank using "lower bound of Wilson score confidence interval for a Bernoulli parameter"
http://www.evanmiller.org
How Not To Sort By Average Rating.htm
https://news.ycombinator.com/item?id=3792627
"""
if len(self.Predictors) == 1:
# there is only one predictor. choose that immediately
predictor = self.Predictors[0]
return (predictor, predictor.confidence)
# grab the top 3 wins, top 3 wins-lost, top 3 confidences
# maxWins = sorted(self.Predictors, key=lambda i: i.scoreWins)
# maxDiff = sorted(self.Predictors, key=lambda i: i.scoreWins - i.scoreLosts)
# maxConfidence = sorted(self.Predictors, key=lambda i: i.confidence)
# grab the top predictors by wins, diffs and confidence.
# on test, this has worse effect on ranking. (need more testing for confirmation)
filteredPredictors = self.Predictors # no union
# warning: set is non-deterministic
#filteredPredictors = set(maxWins[:3]) | set(maxDiff[:3]) | set(maxConfidence[:3]) # union
#filteredPredictors = set(maxWins[:5]) | set(maxDiff[:5]) | set(maxConfidence[:5]) # union
#filteredPredictors = list(filteredPredictors)
##############
##todo: add treshold instead?
#########
predictorScores = []
for i, predictor in enumerate(filteredPredictors):
if useScoreBuffer == False:
positiveRatings = predictor.scoreWins
negativeRatings = predictor.scoreLosts
totalRatings = predictor.totalTurns
totalRatings = positiveRatings + negativeRatings
else:
positiveRatings = predictor.scoreBuffer.count(scoreBufferWin)
negativeRatings = predictor.scoreBuffer.count(scoreBufferLost)
totalRatings = len(predictor.scoreBuffer)
totalRatings = positiveRatings + negativeRatings
confidence = predictor.confidence
# experiment: what happens if we use our score as confidence in self?
# if confidence >= 1: # possible DNA
# predictorScores.append((1.0, predictor))
# continue
if positiveRatings <= 0 or totalRatings <= 0:
continue
if 1:
#confidence = 1 - confidence
maxPredictionRating = 0.99 # possible DNA
#maxPredictionRating = 1 # possible DNA
if confidence > maxPredictionRating: confidence = maxPredictionRating
if confidence < 0.0: confidence = 0.0
ratings = rps.binconf(positiveRatings, negativeRatings, confidence)
#ratings = binconf(positiveRatings, negativeRatings, confidence)
if higherBound:
rating = float(ratings[1])
else:
rating = float(ratings[0])
#rating += (ratings[1] - ratings[0]) / 2
if math.isnan(rating): rating = 0
rating = round(rating,3) # fix for conversion from C float to Python float
else:
maxPredictionRating = 0.99 # possible DNA
#maxPredictionRating = 1 # possible DNA
if confidence > maxPredictionRating: confidence = maxPredictionRating
if confidence < 0.0: confidence = 0.0
#z = 1.96 # hardcorded for confidence=95%
#z = 1.0 # 1.44=85% 1.96=95%
p = 1 - 0.5 * (1 - confidence)
z = cached_normcdfi(p)
#z = rps.normcdfi(p)
phat = float(positiveRatings) / totalRatings
n = totalRatings
rating = (phat + z*z/(2*n) - z * math.sqrt((phat*(1-phat)+z*z/(4*n))/n))/(1+z*z/n)
#rating = round(rating, 3) # round to the nearest 3 decimals. experiment
predictor.rankingConfidence = rating
predictorScores.append((rating, predictor))
if len(predictorScores) > 1:
# filter out predictors that does not tie with the maximum rating, for optimization purposes
maxRating = max(predictorScores, key=lambda i: i[0])[0]
p = [p for p in predictorScores if p[0] == maxRating]
if predictorScores[0] != maxRating:
assert("Something is wrong. We filtered out predictions that is not the maximum but we got some here")
predictorScores = p
elif len(predictorScores) == 1:
rating, chosenPredictor = predictorScores[0]
return chosenPredictor, rating
else:
random = rps.random() % len(filteredPredictors)
chosenPredictor = filteredPredictors[random]
rating = 0
return chosenPredictor, rating
# there are multiple predictors with the same rating.
# let's choose the one with the biggest score (positive - negative)
if useScoreBuffer == False:
highestScorers = max(predictorScores, key=lambda i: i[1].scoreWins)
else:
highestScorers = max(predictorScores, key=lambda i: i[1].scoreBuffer.count(scoreBufferWin))
predictorScores = [p for p in predictorScores if p[0] == highestScorers[0]]
# tally the moves and choose the move with the most tally
tally = [0, 0, 0]
for p in predictorScores:
# tally[p[1].moveLastTurn] += 1
if p[1].moveLastTurn == 0: tally[0] += 1
if p[1].moveLastTurn == 1: tally[1] += 1
if p[1].moveLastTurn == 2: tally[2] += 1
# let's choose a move at random between them
# Filter predictorScores to only include the predictors with the maximum tally.
maxTally = max(tally)
talliedScorers = []
if tally[0] == maxTally:
rocks = [talliedScorers.append(p) for p in predictorScores if p[1].moveLastTurn == 0]
if tally[1] == maxTally:
papers = [talliedScorers.append(p) for p in predictorScores if p[1].moveLastTurn == 1]
if tally[2] == maxTally:
scissors = [talliedScorers.append(p) for p in predictorScores if p[1].moveLastTurn == 2]
if len(talliedScorers) == 1:
# in practice, this doesn't happen, but we put in this option to try to minimize bugs
rating, chosenPredictor = talliedScorers[0]
else:
# play the move with the highest score
finalChoice = None
if tally[0] and tally[0] > tally[1] and tally[0] > tally[2]:
Rmoves = [p for p in talliedScorers if p[1].moveLastTurn == 0]
finalChoice = Rmoves[0]
elif tally[1] and tally[1] > tally[0] and tally[1] > tally[2]:
Pmoves = [p for p in talliedScorers if p[1].moveLastTurn == 1]
finalChoice = Pmoves[0]
elif tally[2] and tally[2] > tally[0] and tally[2] > tally[1]:
Smoves = [p for p in talliedScorers if p[1].moveLastTurn == 2]
finalChoice = Smoves[0]
else:
# there are still ties so we choose at random
random = rps.random() % len(talliedScorers)
finalChoice = talliedScorers[random]
chosenPredictor = finalChoice[1]
rating = finalChoice[0]
if Debug:
currentTurn = rps.getTurn()
print("currentTurn", currentTurn)
for p in talliedScorers:
print ("%s (%i) Wilson Rating: %.3f. Confidence: %.3f Score +%i/-%i" % (p[1].name, p[1].moveLastTurn, p[0], p[1].confidence, p[1].scoreWins, p[1].scoreLosts))
input()
return chosenPredictor, rating
def getHighestRank_Toilet(self):
"""Get the highest rank using a TOILET algo"""
# filter out low confidences
#maxConfidence = max(self.Predictors, key=operator.attrgetter('confidence'))
#p = [p for p in self.Predictors if p.confidence == maxConfidence]
p = self.Predictors
if len(p) == 1:
# only one predictor has high confidence
chosenPredictor = p[0]
elif len(p) > 1:
random.shuffle(p, random = rps.randomRange)
# drop the first 37% and grab the best
drop = round(len(p) * 0.37) - 1
initial = p[:drop]
maxConfidence = max(initial, key=operator.attrgetter('confidence'))
maxConfidence = maxConfidence.confidence
toCheck = p[drop:]
for p in toCheck:
if p.confidence >= maxConfidence:
chosenPredictor = p
break
else:
chosenPredictor = toCheck[-1]
rankConfidence = chosenPredictor.confidence
return chosenPredictor, rankConfidence
def getHighestRank_Naive(self):
"""Get the highest rank using a naive algo"""
# filter out low confidences
maxConfidence = max(self.Predictors, key=operator.attrgetter('confidence'))
p = [p for p in self.Predictors if p.confidence >= maxConfidence.confidence]
if len(p) == 1:
# only one predictor has high confidence
chosenPredictor = p[0]
elif len(p) > 1:
# many predictors has high confidence. look for highest wins
maxScore = max(p, key=operator.attrgetter('scoreWins'))
# maxScore = 0
# for pred in p:
# maxScore = max(maxScore, pred.scoreWins - pred.scoreLosts)
predictors = p
p = [p for p in predictors if p.scoreWins >= maxScore.scoreWins]
if len(p) == 1:
chosenPredictor = p[0]
elif len(p) > 1:
# there are ties. look for lowest losts
maxScore = min(p, key=operator.attrgetter('scoreLosts'))
predictors = p
p = [p for p in predictors if p.scoreLosts == maxScore]
if len(p) == 1:
chosenPredictor = p[-1]
elif len(p) > 1:
# choose at random
random = rps.random() % len(p)
chosenPredictor = p[random]
if len(p) == 0:
maxConfidence = max(self.Predictors, key=operator.attrgetter('confidence'))
p = [p for p in self.Predictors if p.confidence >= maxConfidence.confidence]
random = rps.random() % len(p)
chosenPredictor = p[random]
else:
# confidences are low. look for highest wins
maxScore = max(self.Predictors, key=operator.attrgetter('scoreWins'))
p = [p for p in self.Predictors if p.scoreWins == maxScore]
if len(p) == 1:
chosenPredictor = p[0]
elif len(p) > 1:
# choose at random
random = rps.random() % len(p)
chosenPredictor = p[random]
else:
# choose at random
random = rps.random() % len(self.Predictors)
chosenPredictor = self.Predictors[random]
if Debug:
maxScore = max([p.scoreWins for p in self.Predictors])
print("max score: %f " % (maxScore), end="")
maxScore = max([p.confidence for p in self.Predictors])
print("max confidence: %f " % (maxScore), end="")
print("chosen predictor: %s" % (chosenPredictor.name))
#input()
rankConfidence = chosenPredictor.confidence
return chosenPredictor, rankConfidence
# http://stackoverflow.com/questions/10029588/python-implementation-of-the-wilson-score-interval
def binconf(p, n, c=0.95):
'''
Calculate binomial confidence interval based on the number of positive and
negative events observed. Uses Wilson score and approximations to inverse
of normal cumulative density function.
Parameters
----------
p: int
number of positive events observed
n: int
number of negative events observed
c : optional, [0,1]
confidence percentage. e.g. 0.95 means 95% confident the probability of
success lies between the 2 returned values
Returns
-------
theta_low : float
lower bound on confidence interval
theta_high : float
upper bound on confidence interval
'''
p, n = float(p), float(n)
N = p + n
if N == 0.0: return (0.0, 1.0)
p = p / N
z = normcdfi(1 - 0.5 * (1-c))
a1 = 1.0 / (1.0 + z * z / N)
a2 = p + z * z / (2 * N)
a3 = z * math.sqrt(p * (1-p) / N + z * z / (4 * N * N))
return (a1 * (a2 - a3), a1 * (a2 + a3))
def erfi(x):
"""Approximation to inverse error function"""
a = 0.147 # MAGIC!!!
a1 = math.log(1 - x * x)
a2 = (2.0 / (math.pi * a) + a1 / 2.0)
return (sign(x) * math.sqrt( math.sqrt(a2 * a2 - a1 / a) - a2 ))
def sign(x):
if x < 0: return -1
if x == 0: return 0
if x > 0: return 1
def normcdfi(p, mu=0.0, sigma2=1.0):
"""Inverse CDF of normal distribution"""
if mu == 0.0 and sigma2 == 1.0:
return math.sqrt(2) * erfi(2 * p - 1)
else:
return mu + math.sqrt(sigma2) * normcdfi(p)
CacheForNormCDFISet = []
def cached_normcdfi(p, mu=0.0, sigma2=1.0):
"""Call normcdfi and cache the results"""
global CacheForNormCDFISet
for i in CacheForNormCDFISet:
if i[1] == p and i[2] == mu and i[3] == sigma2: return i[0]
# p is not in cache. Add it
result = normcdfi(p, mu, sigma2)
cache = (result, p, mu, sigma2)
CacheForNormCDFISet.append(cache)
return result | StarcoderdataPython |
388974 | from .nbplot import main
main()
| StarcoderdataPython |
1873270 | try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
print(find_packages())
setup(
name="Weather",
version='1.0.2',
description='A wrapper for the Weather Underground Rest API.',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
url='https://github.com/paris3200/wunderapi',
packages=find_packages(),
install_requires=['Click', 'Requests', 'terminaltables'],
test_suite='nose.collector',
entry_points={
'console_scripts': ['weather = weather.cli:cli'],
}
)
| StarcoderdataPython |
6469550 | from __future__ import division, absolute_import, print_function
import os
import json
import pickle
from flask import Flask, request, jsonify, send_from_directory, send_file, \
render_template, redirect, url_for, abort
from tinyrecord import transaction
from functools import wraps
from . import stats, casting, utils
from io import BytesIO
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
app = Flask(__name__)
def only_admin_allowlist(f):
@wraps(f)
def wrapped(*args, **kwargs):
if request.remote_addr in app.config['admin_allowlist']:
return f(*args, **kwargs)
else:
return abort(403)
return wrapped
@app.route('/')
@app.route('/<path:url>')
def home(url='index.html'):
return send_from_directory(app.config['webmushra_dir'], url)
@app.route('/service/write.php', methods=['POST'])
@app.route('/<testid>/collect', methods=['POST'])
@app.route('/collect', methods=['POST'])
def collect(testid=''):
if request.headers['Content-Type'].startswith(
'application/x-www-form-urlencoded'
):
try:
db = app.config['db']
payload = json.loads(request.form['sessionJSON'])
payload = casting.cast_recursively(payload)
insert = casting.json_to_dict(payload)
collection = db.table(payload['trials'][0]['testId'])
with transaction(collection):
inserted_ids = collection.insert_multiple(insert)
print(inserted_ids)
return jsonify({
'error': False,
'message': "Saved as ids %s" % ','.join(map(str, inserted_ids))
})
except Exception as e:
return jsonify({
'error': True,
'message': "An error occurred: %s" % str(e)
})
else:
return "415 Unsupported Media Type", 415
@app.route('/admin/')
@app.route('/admin/list')
@only_admin_allowlist
def admin_list():
db = app.config['db']
collection_names = db.tables()
collection_dfs = [
casting.collection_to_df(db.table(name)) for name in collection_names
]
print(collection_dfs)
collections = [
{
'id': name,
'participants': len(df['questionaire', 'uuid'].unique()),
'last_submission': df['wm', 'date'].max(),
} for name, df in zip(collection_names, collection_dfs)
if len(df) > 0
]
configs = utils.get_configs(
os.path.join(app.config['webmushra_dir'], "configs")
)
return render_template(
"admin/list.html",
collections=collections,
configs=configs
)
@app.route('/admin/delete/<testid>')
@only_admin_allowlist
def admin_delete(testid):
collection = app.config['db'].table(testid)
collection.drop()
return redirect(url_for('admin_list'))
@app.route('/admin/info/<testid>/')
@only_admin_allowlist
def admin_info(testid):
collection = app.config['db'].table(testid)
df = casting.collection_to_df(collection)
try:
configs = df['wm']['config'].unique().tolist()
except KeyError:
configs = []
configs = map(os.path.basename, configs)
return render_template(
"admin/info.html",
testId=testid,
configs=configs
)
@app.route('/admin/latest/<testid>/')
@only_admin_allowlist
def admin_latest(testid):
collection = app.config['db'].table(testid)
latest = sorted(collection.all(), key=lambda x: x['date'], reverse=True)[0]
return json.dumps(latest)
@app.route('/admin/stats/<testid>/<stats_type>')
@only_admin_allowlist
def admin_stats(testid, stats_type='mushra'):
collection = app.config['db'].table(testid)
df = casting.collection_to_df(collection)
df.columns = utils.flatten_columns(df.columns)
# analyse mushra experiment
try:
if stats_type == "mushra":
return stats.render_mushra(testid, df)
except ValueError as e:
return render_template(
'error/error.html', type="Value", message=str(e)
)
return render_template('error/404.html'), 404
@app.route(
'/admin/download/<testid>.<filetype>',
defaults={'show_as': 'download'})
@app.route(
'/admin/download/<testid>/<statstype>.<filetype>',
defaults={'show_as': 'download'})
@app.route(
'/download/<testid>/<statstype>.<filetype>',
defaults={'show_as': 'download'})
@app.route(
'/download/<testid>.<filetype>',
defaults={'show_as': 'download'})
@app.route(
'/admin/show/<testid>.<filetype>',
defaults={'show_as': 'text'})
@app.route(
'/admin/show/<testid>/<statstype>.<filetype>',
defaults={'show_as': 'text'})
@only_admin_allowlist
def download(testid, show_as, statstype=None, filetype='csv'):
allowed_types = ('csv', 'pickle', 'json', 'html')
if show_as == 'download':
as_attachment = True
else:
as_attachment = False
if filetype not in allowed_types:
return render_template(
'error/error.html',
type="Value",
message="File type must be in %s" % ','.join(allowed_types)
)
if filetype == "pickle" and not as_attachment:
return render_template(
'error/error.html',
type="Value",
message="Pickle data cannot be viewed"
)
collection = app.config['db'].table(testid)
df = casting.collection_to_df(collection)
if statstype is not None:
# subset by statstype
df = df[df[('wm', 'type')] == statstype]
# Merge hierarchical columns
if filetype not in ("pickle", "html"):
df.columns = utils.flatten_columns(df.columns.values)
if len(df) == 0:
return render_template(
'error/error.html',
type="Value",
message="Data Frame was empty"
)
if filetype == "csv":
# We need to escape certain objects in the DF to prevent Segfaults
mem = StringIO()
casting.escape_objects(df).to_csv(
mem,
sep=";",
index=False,
encoding='utf-8'
)
elif filetype == "html":
mem = StringIO()
df.sort_index(axis=1).to_html(mem, classes="table table-striped")
elif filetype == "pickle":
mem = BytesIO()
pickle.dump(df, mem)
elif filetype == "json":
mem = StringIO()
# We need to escape certain objects in the DF to prevent Segfaults
casting.escape_objects(df).to_json(mem, orient='records')
mem.seek(0)
if (as_attachment or filetype != "html") and not isinstance(mem, BytesIO):
mem2 = BytesIO()
mem2.write(mem.getvalue().encode('utf-8'))
mem2.seek(0)
mem = mem2
if as_attachment:
return send_file(
mem,
attachment_filename="%s.%s" % (testid, filetype),
as_attachment=True,
cache_timeout=-1
)
else:
if filetype == "html":
return render_template('admin/table.html', table=mem.getvalue())
else:
return send_file(
mem,
mimetype="text/plain",
cache_timeout=-1
)
@app.context_processor
def utility_processor():
def significance_stars(p, alpha=0.05):
return ''.join(
[
'<span class="glyphicon glyphicon-star small"'
'aria-hidden="true"></span>'
] * stats.significance_class(p, alpha)
)
return dict(significance_stars=significance_stars)
@app.template_filter('datetime')
def datetime_filter(value, format='%x %X'):
return value.strftime(format)
| StarcoderdataPython |
6414040 | # Copyright (c) 2021, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
from pynq.lib import MicroblazeLibrary
from pynq.lib.pynqmicroblaze.compile import preprocess, checkmodule
from pynq.lib.pynqmicroblaze.bsp import add_module_path
class GroveAdapter:
"""This abstract class controls multiple Grove modules connected to a given adapter."""
_module_re = re.compile('([^@]*)(?:@([a-fA-F0-9]+))?')
def __init__(self, iop, port_prefix, **kwargs):
"""Create a new GroveAdapter object.
Parameters
----------
port_prefix : string
One of "PMOD", "ARDUINO_SEEED", "ARDUINO_DIGILENT"
args: list of ports
"""
modules = set()
for _, v in kwargs.items():
if v:
if type(v) is str:
modules.update(self._get_modules(v))
else:
for spec in v:
modules.update(self._get_modules(spec))
for mod in modules:
if not checkmodule(mod, iop):
raise RuntimeError(f"Module {mod} not found")
if 'i2c' in modules or 'gpio' in modules or 'analog' in modules:
modules.add('grove_interfaces')
self._lib = MicroblazeLibrary(iop, modules)
for k, v in kwargs.items():
if v is None:
continue
port = getattr(self._lib, f'{port_prefix}_{k}')
try:
if type(v) is str:
setattr(self, k, self._instantiate_device(v, port))
else:
setattr(self, k,
[self._instantiate_device(s, port) for s in v])
except Exception as exc:
raise RuntimeError(f"Failed to initialise port {k}") from exc
def _module_basename(self, name):
return self._module_re.match(name)[1]
def _get_modules(self, spec):
return [self._module_basename(p) for p in spec.split('.')]
def _open_device(self, device, port):
details = self._module_re.match(device)
name = details[1]
if details[2]:
if not hasattr(self._lib, f'{name}_open_at_address'):
raise RuntimeError(
f"Module {name} does not support opening at an address")
return getattr(self._lib, f'{name}_open_at_address')(
port, int(details[2], 16))
else:
if name == 'i2c':
return self._lib.i2c_open_grove(port)
elif name == 'gpio':
return self._lib.gpio_open_grove(port)
elif name == 'analog':
return self._lib.analog_open_grove(port)
else:
return getattr(self._lib, f'{name}_open')(port)
def _instantiate_device(self, spec, port):
chain = spec.split('.')
if len(chain) == 1:
return self._open_device(chain[0], port)
else:
if self._module_basename(chain[0]) == 'grove_adc':
adc = self._open_device(chain[0], port)
if not hasattr(self._lib, f'{chain[1]}_open_adc'):
raise RuntimeError(
f'{chain[1]} should not be connected to an ADC')
return getattr(self._lib, f'{chain[1]}_open_adc')(adc)
else:
raise RuntimeError("Only grove_adc can be used in a chain")
class PmodGroveAdapter(GroveAdapter):
"""This class controls multiple Grove modules connected to a PMOD adapter."""
def __init__(self, iop, G1=None, G2=None, G3=None, G4=None):
"""Create a new PmodGroveAdapter object.
Parameters
----------
iop : name of IOP for adapter
e.g. base.PMODA, base.PMODB
port args: ports by name either G1, G2, G3, G4
Available names of Grove modules for port args:
'grove_adc', 'grove_buzzer', 'grove_gesture', 'grove_imu',
'grove_joystick', 'grove_led_stick', 'grove_ledbar',
'grove_lgcp', 'grove_light', 'grove_line_finder',
'grove_oled', 'grove_pir', 'grove_potentiometer',
'grove_relay', 'grove_servo', 'grove_temperature',
'grove_usranger', 'grove_water_sensor'
"""
super().__init__(iop, 'PMOD', G1=G1, G2=G2, G3=G3, G4=G4)
class ArduinoSEEEDGroveAdapter(GroveAdapter):
"""This class controls multiple Grove modules connected to a ArduinoSEEED adapter."""
def __init__(self, iop, UART=None, D2=None, D3=None,
D4=None, D5=None, D6=None, D7=None,
D8=None, A0=None, A1=None, A2=None,
A3=None, I2C=None):
"""Create a new ArduinoSEEEDGroveAdapter object.
Parameters
----------
iop : name of IOP for adapter
e.g. base.ARDUINO
port args: ports by name either UART, D2, D3, D4, D6, D7, D8
A0, A1, A2, A3, I2C (can be a list)
Available names of Grove modules for port args:
'grove_adc', 'grove_buzzer', 'grove_gesture', 'grove_imu',
'grove_joystick', 'grove_led_stick', 'grove_ledbar',
'grove_lgcp', 'grove_light', 'grove_line_finder',
'grove_oled', 'grove_pir', 'grove_potentiometer',
'grove_relay', 'grove_servo', 'grove_temperature',
'grove_usranger', 'grove_water_sensor'
"""
super().__init__(iop, 'ARDUINO_SEEED', UART=UART, D2=D2,
D3=D3, D4=D4, D5=D5,
D6=D6, D7=D7, D8=D8,
A0=A0, A1=A1, A2=A2,
A3=A3, I2C=I2C)
class ArduinoDIGILENTGroveAdapter(GroveAdapter):
"""This class controls multiple Grove modules connected to a ArduinoDIGILENT adapter."""
def __init__(self, iop, UART=None, G1=None, G2=None,
G3=None, G4=None, G5=None, G6=None,
G7=None, A1=None, A2=None, A3=None,
A4=None, I2C=None):
"""Create a new ArduinoDIGILENTGroveAdapter object.
Parameters
----------
iop : name of IOP for adapter
e.g. base.ARDUINO
port args: ports by name either UART, G1, G2, G3, G4, G5, G6
A1, A2, A3, A4, I2C (can be a list)
Available names of Grove modules for port args:
'grove_adc', 'grove_buzzer', 'grove_gesture', 'grove_imu',
'grove_joystick', 'grove_led_stick', 'grove_ledbar',
'grove_lgcp', 'grove_light', 'grove_line_finder',
'grove_oled', 'grove_pir', 'grove_potentiometer',
'grove_relay', 'grove_servo', 'grove_temperature',
'grove_usranger', 'grove_water_sensor'
"""
super().__init__(iop, 'ARDUINO_DIGILENT', UART=UART, G1=G1,
G2=G2, G3=G3, G4=G4,
G5=G5, G6=G6, G7=G7,
A1=A1, A2=A2, A3=A3,
A4=A4, I2C=I2C)
class GroveG0Adapter(GroveAdapter):
"""This class controls multiple Grove modules connected to a Grove G0 (ZU) adapter."""
def __init__(self, iop, G0=None):
"""Create a new GroveG0Adapter object.
Parameters
----------
iop : name of IOP for adapter
e.g. base.ARDUINO
port args: ports by name: G0
"""
super().__init__(iop, 'GROVE0', G0=G0)
class GroveG1Adapter(GroveAdapter):
"""This class controls multiple Grove modules connected to a Grove G1 (ZU) adapter."""
def __init__(self, iop, G1=None):
"""Create a new GroveG1Adapter object.
Parameters
----------
iop : name of IOP for adapter
e.g. base.ARDUINO
port args: ports by name: G1
"""
super().__init__(iop, 'GROVE1', G1=G1)
moddir = os.path.dirname(os.path.realpath(__file__))
add_module_path(os.path.join(moddir, 'modules'))
| StarcoderdataPython |
4816665 | <reponame>ankur-gupta/rain
from setuptools import setup
PACKAGE_NAME = 'rain'
# Read-in the version
# See 3 in https://packaging.python.org/guides/single-sourcing-package-version/
version_file = './{}/version.py'.format(PACKAGE_NAME)
version = {}
try:
# Python 2
execfile(version_file, version)
except NameError:
# Python 3
exec(open(version_file).read(), version)
# Read-in the README.md
with open('README.md', 'r') as f:
readme = f.readlines()
readme = ''.join(readme)
setup(name=PACKAGE_NAME,
version=version['__version__'],
url='https://github.com/ankur-gupta/rain',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='Template python package',
long_description=readme,
long_description_content_type="text/markdown",
keywords='template, python, package',
packages=[PACKAGE_NAME,
'{}.scripts'.format(PACKAGE_NAME),
'{}.module_three'.format(PACKAGE_NAME)],
include_package_data=True, # => if True, you must provide MANIFEST.in
entry_points='''
[console_scripts]
rain_maker=rain.scripts.rain_maker:make_it_rain
''',
classifiers=[
"License :: OSI Approved :: MIT License",
],
install_requires=['click', 'six', 'numpy', 'future'],
setup_requires=['pytest-runner'],
# pytest-cov needed for coverage only
tests_require=['pytest', 'pytest-cov'],
zip_safe=True)
# Notes:
# (1) Script installs in user-site as a newly made binary file which
# points to the script actually in the package.
# (2) Run tests from the repo root:
# cd $REPO_ROOT
# python3 -m pytest rain/tests
# (3) Install using pip in user-site/bin:
# cd $REPO_ROOT
# pip3 install --user ./
# (4) Designed to work with both python2 and python3 but only tested
# on python3.
# (5) Run `rain_maker` after install from anywhere (assuming user-site/bin
# is in the PATH):
# rain_maker --help
# rain_maker --times 10
# rain_maker
| StarcoderdataPython |
4817770 | import setuptools
import shutil
import os
path = os.path.dirname(os.path.abspath(__file__))
shutil.copyfile(f"{path}/dmm.py", f"{path}/dmm/dmm.py")
#shutil.copyfile(f"{path}/dmm.py", f"{path}/dmm/gcn.py")
setuptools.setup(
name="DMM",
version="1.0",
author="<NAME>",
author_email="<EMAIL>",
description="deep markov model library",
long_description="deep markov model library",
long_description_content_type="text/markdown",
url="https://github.com/clinfo/DeepKF",
packages=setuptools.find_packages(),
entry_points={
'console_scripts': [
'dmm = dmm.dmm:main',
'dmm-plot = dmm.dmm_plot:main',
'dmm-field-plot = dmm.dmm_field_plot:main',
'dmm-map = dmm.mapping:main',
],
},
classifiers=[
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
)
| StarcoderdataPython |
45070 | """
A toy example of playing against defined set of bots on Mocsár
Using env "mocsar"-cfg Using 'human_mode'
"""
import rlcard3
# Make environment and enable human mode
env = rlcard3.make('mocsar-cfg', config={'human_mode': True})
# Register agents
agents = {"mocsar_random": 2, "mocsar_min": 2}
env.model.create_agents(agents)
# Reset environment
state = env.reset()
while not env.is_over():
legal_actions = state['legal_actions']
legal_actions.insert(0, 0)
action = input('>> You choose action (integer): ')
if action == '-1':
print('Break the game...')
break
while not action.isdigit() \
or int(action) not in legal_actions:
print('Action illegal...')
action = input('>> Re-choose action (integer): ')
state, reward, done = env.step(int(action))
| StarcoderdataPython |
1742800 | <reponame>nak/pytest_mproc<gh_stars>1-10
import os
import socket
import sys
import time
from multiprocessing import Semaphore, JoinableQueue, Queue, Process
from typing import Any, Dict, Iterator, Union
import pytest
from _pytest.config import _prepareconfig
import pytest_cov.embed
import resource
from pytest_mproc import resource_utilization
from pytest_mproc.data import TestBatch, ResourceUtilization, ResultException, ResultTestStatus, ResultExit
from pytest_mproc.fixtures import Node
from pytest_mproc.utils import BasicReporter
if sys.version_info[0] < 3:
# noinspection PyUnresolvedReferences
from Queue import Empty
else:
# noinspection PyUnresolvedReferences
from queue import Empty
try:
my_cov = None # overriden later, after fork
try:
original_cleanup = pytest_cov.embed.cleanupgit
except:
original_cleanup = None
def _cleanup(cov=None):
# pytest_cov will do special things when tests use multiprocessing, however,
# here we don't want that processing to take place. (It will otherwise
# spit out many lines of messaging all of which has no bearing on success of code coverage
# collecion)
if cov != my_cov and original_cleanup:
original_cleanup(cov)
pytest_cov.embed.cleanup = _cleanup
pycov_present = True
except Exception:
pycov_present = False
"""maximum time between reporting status back to coordinator"""
MAX_REPORTING_INTERVAL = 1.0 # seconds
class WorkerSession:
"""
Handles reporting of test status and the like
"""
def __init__(self, index, host: str, port: int, test_q: JoinableQueue, result_q: Queue):
self._is_remote = host != "127.0.0.1"
self._host = host
self._port = port
self._index = index
self._name = "worker-%d" % (index + 1)
self._count = 0
self._session_start_time = time.time()
self._buffered_results = []
self._buffer_size = 20
self._timestamp = time.time()
self._last_execution_time = time.time()
self._resource_utilization = ResourceUtilization(-1.0, -1.0, -1.0, -1)
self._reporter = BasicReporter()
self._global_fixtures: Dict[str, Any] = {}
self._node_fixtures: Dict[str, Any] = {}
self._test_q = test_q
self._result_q = result_q
self._node_fixture_manager = None
def _put(self, result: Union[ResultException, ResultExit, ResourceUtilization], timeout=None):
"""
Append test result data to queue, flushing buffered results to queue at watermark level for efficiency
:param kind: kind of data
:param data: test result data to publih to queue
"""
if self._is_remote and isinstance(result, ResultTestStatus):
os.write(sys.stderr.fileno(), b'.')
self._buffered_results.append(result)
if len(self._buffered_results) >= self._buffer_size or (time.time() - self._timestamp) > MAX_REPORTING_INTERVAL:
self._flush(timeout)
def _flush(self, timeout=None):
"""
fluh buffered results out to the queue.
"""
if self._buffered_results:
if timeout is not None:
self._result_q.put(self._buffered_results, timeout)
else:
self._result_q.put(self._buffered_results)
self._buffered_results = []
self._timestamp = time.time()
def test_loop(self, session):
"""
This is where the action takes place. We override the usual implementation since
that doesn't support a dynamic generation of tests (whose source is the test Queue
that it draws from to pick out the next test
:param session: Where the tests generator is kept
"""
start_time = time.time()
rusage = resource.getrusage(resource.RUSAGE_SELF)
try:
if pycov_present:
global my_cov
try:
my_cov = pytest_cov.embed.active_cov
except:
my_cov = None
if session.testsfailed and not session.config.option.continue_on_collection_errors:
raise session.Failed("%d errors during collection" % session.testsfailed)
if session.config.option.collectonly:
return # should never really get here, but for consistency
for test_batch in session.items_generator:
# test item comes through as a unique string nodeid of the test
# We use the pytest-collected mapping we squirrelled away to look up the
# actual _pytest.python.Function test callable
# NOTE: session.items is an iterator, as wet upon construction of WorkerSession
for test_id in test_batch.test_ids:
item = session._named_items[test_id]
item.config.hook.pytest_runtest_protocol(item=item, nextitem=None)
# very much like that in _pytest.main:
try:
if session.shouldfail:
raise session.Failed(session.shouldfail)
except AttributeError:
pass # some version of pytest do not have this attribute
if session.shouldstop:
raise session.Interrupted(session.shouldstop)
# count tests that have been run
self._count += 1
self._last_execution_time = time.time()
self._flush()
finally:
end_usage = resource.getrusage(resource.RUSAGE_SELF)
time_span = self._last_execution_time - start_time
self._resource_utilization = resource_utilization(time_span=time_span,
start_rusage=rusage,
end_rusage=end_usage)
@pytest.mark.tryfirst
def pytest_collection_finish(self, session):
"""
Invoked once pytest has collected all information about which tests to run.
Those items are squirrelled away in a different attributes, and a generator
is put in its place to draw from.
:param session: the pytest session
:return: the generator of tests to run
"""
assert self._test_q is not None
def generator(test_q: JoinableQueue) -> Iterator[TestBatch]:
test = test_q.get()
while test:
test_q.task_done()
yield test
test = test_q.get()
test_q.task_done()
session.items_generator = generator(self._test_q)
session._named_items = {item.nodeid: item for item in session.items}
return session.items
@classmethod
def start(cls, index: int, host: str, port: int, start_sem: Semaphore, fixture_sem: Semaphore,
test_q: JoinableQueue, result_q: Queue, node_port: int) -> Process:
"""
:param index: index of worker being created
:param host: host name of main multiprocessing manager
:param port: port of main mp manqager
:param start_sem: gating semaphore used to control # of simultaneous connections (prevents deadlock)
:param fixture_sem: gating semaphore when querying for test fixtures
:param test_q: Queue to pull from to get next test to run
:param result_q: Queue to place results
:param node_port: port node of localhost manager for node-level fixtures, etc
:return: Process created for new worker
"""
proc = Process(target=cls.run, args=(index, host, port, start_sem, fixture_sem, test_q, result_q, node_port, ))
proc.start()
return proc
@staticmethod
def run(index: int, host: str, port: int, start_sem: Semaphore, fixture_sem: Semaphore,
test_q: JoinableQueue, result_q: Queue, node_port: int) -> None:
start_sem.acquire()
worker = WorkerSession(index, host, port, test_q, result_q)
worker._node_fixture_manager = Node.Manager(as_main=False, port=node_port, name=f"Worker-{index}")
worker._fixture_sem = fixture_sem
args = sys.argv[1:]
config = _prepareconfig(args, plugins=[])
# unregister terminal (don't want to output to stdout from worker)
# as well as xdist (don't want to invoke any plugin hooks from another distribute testing plugin if present)
config.pluginmanager.unregister(name="terminal")
# register our listener, and configure to let pycov-test knoew that we are a slave (aka worker) thread
config.pluginmanager.register(worker, "mproc_worker")
config.option.mproc_worker = worker
from pytest_mproc.main import Orchestrator
worker._client = Orchestrator.Manager(addr=(worker._host, worker._port))
workerinput = {'slaveid': "worker-%d" % worker._index,
'workerid': "worker-%d" % worker._index,
'cov_master_host': socket.gethostname(),
'cov_slave_output': os.path.join(os.getcwd(),
"worker-%d" % worker._index),
'cov_master_topdir': os.getcwd()
}
config.slaveinput = workerinput
config.slaveoutput = workerinput
try:
# and away we go....
config.hook.pytest_cmdline_main(config=config)
finally:
config._ensure_unconfigure()
worker._reporter.write(f"\nWorker-{index} finished\n")
def pytest_internalerror(self, excrepr):
self._put(ResultException(excrepr))
@pytest.hookimpl(tryfirst=True)
def pytest_runtest_logreport(self, report):
"""
report only status of calls (not setup or teardown), unless there was an error in those
stash report for later end-game parsing of failures
:param report: report to draw info from
"""
self._put(ResultTestStatus(report))
return True
@pytest.hookimpl(tryfirst=True)
def pytest_sessionfinish(self, exitstatus):
"""
output failure information and final exit status back to coordinator
:param exitstatus: exit status of the test suite run
"""
# timeouts on queues sometimes can still have process hang if there is a bottleneck, so use alarm
self._put(ResultExit(self._index,
self._count,
exitstatus,
self._last_execution_time - self._session_start_time,
self._resource_utilization))
self._flush()
return True
@pytest.hookimpl(tryfirst=True)
def pytest_fixture_setup(self, fixturedef, request):
my_cache_key = fixturedef.cache_key(request)
if not getattr(request.config.option, "mproc_numcores") or fixturedef.scope not in ['node', 'global']:
return
if fixturedef.scope == 'node':
if fixturedef.argname not in self._node_fixtures:
self._fixture_sem.acquire()
self._node_fixtures[fixturedef.argname] = self._node_fixture_manager.get_fixture(fixturedef.argname)
self._fixture_sem.release()
fixturedef.cached_result = (self._node_fixtures.get(fixturedef.argname),
my_cache_key,
None)
return self._node_fixtures.get(fixturedef.argname)
if fixturedef.scope == 'global':
if fixturedef.argname not in self._global_fixtures:
self._fixture_sem.acquire()
self._global_fixtures[fixturedef.argname] = self._client.get_fixture(fixturedef.argname)
self._fixture_sem.release()
fixturedef.cached_result = (self._global_fixtures.get(fixturedef.argname),
my_cache_key,
None)
return self._global_fixtures.get(fixturedef.argname)
| StarcoderdataPython |
9709040 | names = [""]
emails = [""] | StarcoderdataPython |
8165252 | """Detects outliers based on Chi-square test
Description:
------------
"""
# External library imports
import numpy as np
# Midgard imports
from midgard.dev import plugins
from midgard.gnss.solution_validation import sol_validation
# Where imports
from where.lib import config
# Name of section in configuration
_SECTION = "_".join(__name__.split(".")[-1:])
@plugins.register
def chi2(dset):
"""Detects outliers based on chi-square test
Args:
dset: A Dataset containing model data.
Returns:
Array containing False for observations to throw away
"""
field = config.tech[_SECTION].field.str
alpha = config.tech[_SECTION].alpha.float
num_params = 4 # TODO
keep_idx = np.ones(dset.num_obs, dtype=bool)
for time in dset.unique("time"):
idx = dset.filter(time=time)
residual_norm = (dset.residual[idx] - np.mean(dset.residual[idx])) / np.std(dset.residual[idx])
keep_idx[idx] = sol_validation(residual_norm, alpha, num_params)
return keep_idx
| StarcoderdataPython |
5131442 | <reponame>spara/examples
"""Google Cloud Function source code for an ETA messaging app.
Defines a single Cloud Function endpoint, get_demo, which will compute the
estimated travel time to a location. If configured, will also send the result
via SMS.
"""
import os
from datetime import datetime
import googlemaps
import twilio.rest
def get_travel_time(origin, destination, offset):
"""Returns the estimated travel time using the Google Maps API.
Returns: A string, such as '3 minutes'"""
key = os.getenv("GOOGLE_MAPS_API_KEY", "")
if key == "":
return "[ENABLE GOOGLE MAPS TO DETERMINE TRAVEL TIME]"
gmaps = googlemaps.Client(key=key)
now = datetime.now()
directions_result = gmaps.directions(
origin=origin,
destination=destination,
mode="driving",
departure_time=now)
travel_time = directions_result[0]["legs"][0]["duration"]["value"]
travel_time /= 60 # seconds to minutes
travel_time += offset
return "%d minutes" % travel_time
def send_text(message_body):
"""Sends an SMS using the Twilio API."""
to_number = os.getenv("TO_PHONE_NUMBER", "")
from_number = os.getenv("FROM_PHONE_NUMBER", "")
account_sid = os.getenv("TWILLIO_ACCOUNT_SID", "")
auth_token = os.getenv("TWILLIO_ACCESS_TOKEN", "")
if account_sid and auth_token and to_number and from_number:
client = twilio.rest.Client(account_sid, auth_token)
client.messages.create(
to=to_number,
from_=from_number,
body=message_body)
return "Sent text message to %s\n%s" % (to_number, message_body)
return "[ENABLE TWILIO TO SEND A TEXT]: \n%s" % (message_body)
def get_demo(request):
"""The Google Cloud Function computing estimated travel time."""
# Get origin location from URL-query parameters.
lat = request.args.get("lat")
long = request.args.get("long")
if lat and long:
origin = "%s, %s" % (lat, long)
else:
origin = "Pulumi HQ, Seattle, WA"
destination = os.getenv(
"DESTINATION",
"Space Needle, Seattle, WA")
# Optional travel time offset, e.g. add a static 5m delay.
travel_offset_str = os.getenv("TRAVEL_OFFSET", "0")
travel_offset = int(travel_offset_str)
travel_time_str = get_travel_time(
origin=origin, destination=destination, offset=travel_offset)
# Send the message. Returns a summary in the Cloud Function's response.
message = "Hey! I'm leaving now, I'll be at '%s' to pick you up in about %s." % (
destination, travel_time_str)
return send_text(message)
| StarcoderdataPython |
8019207 | <reponame>moran991231/LeetCode-Solutions
class Solution:
def findAndReplacePattern(self, words, pattern):
sz = len(pattern)
ans = []
for w in words:
ok = True
trans = {}
used = set([])
for i in range(sz):
if pattern[i] not in trans:
if w[i] in used:
ok = False
break
trans[pattern[i]] = w[i]
used.add(w[i])
elif trans[pattern[i]] != w[i]:
ok = False
break
if ok:
ans.append(w)
return ans
| StarcoderdataPython |
152244 | from django.core.exceptions import ValidationError
from django.db import models
from django.urls import reverse
from django.db.models.deletion import CASCADE
from storages.backends.sftpstorage import SFTPStorage
from django.core.files.storage import FileSystemStorage
from django.contrib.auth.models import User
# Create your models here.
SFS = SFTPStorage()
LFS = FileSystemStorage()
def file_size(value): # add this to some file where you can import it from
limit = 5242880
if value.size > limit:
raise ValidationError('File too large. Size should not exceed 5 MB.')
class UserFile(models.Model):
CATEGORIES = [('videos', 'videos'), ('audios', 'audios'),
('documents', 'documents'), ('fotos', 'fotos')]
user = models.ForeignKey(User, on_delete=CASCADE)
file = models.FileField(
upload_to='files\\', storage=LFS, validators=[file_size])
category = models.CharField(max_length=25, choices=CATEGORIES)
def get_absolute_url(self):
return f'{self.file.name}'
def delete(self, using=None, keep_parents=False):
self.file.storage.delete(self.file.name)
super().delete()
def __str__(self) -> str:
filename = self.file.name.replace("files/", "")
return f'{filename}'
| StarcoderdataPython |
5039353 | import argparse
import pathlib
import xml.etree.ElementTree as ET
import numpy as np
def update_const(xml_input_filepath, bin_input_filepath, bin_output_filepath, node_id, new_value):
tree = ET.parse(xml_input_filepath)
for layer in tree.getroot().find('layers').findall('layer'):
if layer.get('id') == node_id:
data_node = layer.find('data')
offset = int(data_node.get('offset'))
size = int(data_node.get('size'))
element_type = data_node.get('element_type')
shape_str = data_node.get('shape')
if not shape_str:
shape_str = "1"
shape = [int(s) for s in shape_str.split(',')]
count = np.prod(shape)
with open(bin_input_filepath, 'rb') as f:
weights = f.read()
dtype = {'f32': np.float32, 'i32': np.int32, 'i64': np.int64}[element_type]
old_np_array = np.frombuffer(weights, dtype, count, offset)
print(f"Old values are: {old_np_array}")
if new_value:
new_value = [float(i) for i in new_value] # TODO
new_np_array = np.array(new_value, dtype=dtype)
assert new_np_array.nbytes == size
print(f"New values are: {new_np_array}")
with open(bin_output_filepath, 'wb') as f:
f.write(weights[:offset])
f.write(new_np_array.tobytes())
f.write(weights[offset+size:])
def main():
parser = argparse.ArgumentParser(description="Add a new const node to OpenVino model")
parser.add_argument('xml_input_filepath', type=pathlib.Path)
parser.add_argument('bin_input_filepath', type=pathlib.Path)
parser.add_argument('node_id')
parser.add_argument('values', nargs='*')
parser.add_argument('--bin_output_filepath', '-o', type=pathlib.Path)
args = parser.parse_args()
update_const(args.xml_input_filepath, args.bin_input_filepath, args.bin_output_filepath, args.node_id, args.values)
if __name__ == '__main__':
main()
| StarcoderdataPython |
54712 | from django import template
register = template.Library()
@register.filter
def next(some_list, current_index):
"""
Returns the next element of the list using the current index if it exists.
Otherwise returns an empty string.
https://docs.djangoproject.com/en/3.0/howto/custom-template-tags/#writing-custom-template-filters
"""
try:
return some_list[int(current_index) + 1]
except:
pass
| StarcoderdataPython |
189361 | """Useful fixtures for testing the convention document."""
import pytest
from tests.test_convention_doc import doctypes
@pytest.fixture
def sections():
"""Fixture with each convention section."""
return doctypes.SECTIONS
@pytest.fixture
def subsections():
"""Fixture with each convention subsection."""
return doctypes.SUBSECTIONS
@pytest.fixture
def rules():
"""Fixture with each convention rule."""
return doctypes.RULES
@pytest.fixture(
scope="module",
params=[pytest.param(section, id=section.identifier) for section in doctypes.SECTIONS],
)
def section(request):
"""Parametrized fixture of each convention section."""
return request.param
@pytest.fixture(
scope="module",
params=[
pytest.param(subsection, id=subsection.identifier) for subsection in doctypes.SUBSECTIONS
],
)
def subsection(request):
"""Parametrized fixture of each convention subsection."""
return request.param
@pytest.fixture(
scope="module",
params=[pytest.param(rule, id=rule.identifier) for rule in doctypes.RULES],
)
def rule(request):
"""Parametrized fixture of each convention rule."""
return request.param
@pytest.fixture(
scope="module",
params=[
pytest.param(codeblock, id=f"{rule.identifier}-{codeblock.descriptor}")
for rule in doctypes.RULES
for codeblock in rule.codeblocks
],
)
def codeblock(request):
"""Parametrized fixture of each convention codeblock."""
return request.param
@pytest.fixture(
scope="module",
params=[
pytest.param(codeblock, id=f"{rule.identifier}-{codeblock.descriptor}")
for rule in doctypes.RULES
for codeblock in rule.codeblocks
if codeblock.descriptor == "Bad"
],
)
def bad_codeblock(request):
"""Parametrized fixture of each convention codeblock marked "Bad"."""
return request.param
@pytest.fixture(
scope="module",
params=[
pytest.param(codeblock, id=f"{rule.identifier}-{codeblock.descriptor}")
for rule in doctypes.RULES
for codeblock in rule.codeblocks
if codeblock.descriptor == "Good"
],
)
def good_codeblock(request):
"""Parametrized fixture of each convention codeblock marked "Good"."""
return request.param
@pytest.fixture(
scope="module",
params=[
pytest.param(codeblock, id=f"{rule.identifier}-{codeblock.descriptor}")
for rule in doctypes.RULES
for codeblock in rule.codeblocks
if codeblock.descriptor == "Best"
],
)
def best_codeblock(request):
"""Parametrized fixture of each convention codeblock marked "Best"."""
return request.param
| StarcoderdataPython |
285346 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import signal
import subprocess
class FileWrapper(object):
"""
Wrap `file` in a "real" so stuff can be added to it after creation.
"""
def __init__(self, file_object):
self._subpipe = file_object
def __getattr__(self, name):
# forward calls to 'write', 'close' and other methods not defined below
return getattr(self._subpipe, name)
def __enter__(self, *args, **kwargs):
# instead of returning whatever is returned by __enter__ on the subpipe
# this returns self, so whatever custom injected methods are still available
# this might cause problems with custom file_objects, but seems to work
# fine with standard python `file` objects which is the only default use
return self
def __exit__(self, *args, **kwargs):
return self._subpipe.__exit__(*args, **kwargs)
def __iter__(self):
return iter(self._subpipe)
class InputPipeProcessWrapper(object):
def __init__(self, command, input_pipe=None):
"""
Initializes a InputPipeProcessWrapper instance.
:param command: a subprocess.Popen instance with stdin=input_pipe and
stdout=subprocess.PIPE.
Alternatively, just its args argument as a convenience.
"""
self._command = command
self._input_pipe = input_pipe
self._process = command if isinstance(command, subprocess.Popen) else self.create_subprocess(command)
# we want to keep a circular reference to avoid garbage collection
# when the object is used in, e.g., pipe.read()
self._process._selfref = self
def create_subprocess(self, command):
"""
http://www.chiark.greenend.org.uk/ucgi/~cjwatson/blosxom/2009-07-02-python-sigpipe.html
"""
def subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
return subprocess.Popen(command,
stdin=self._input_pipe,
stdout=subprocess.PIPE,
preexec_fn=subprocess_setup,
close_fds=True)
def _finish(self):
# Need to close this before input_pipe to get all SIGPIPE messages correctly
self._process.stdout.close()
if self._input_pipe is not None:
self._input_pipe.close()
self._process.wait() # deadlock?
if self._process.returncode not in (0, 141, 128 - 141):
# 141 == 128 + 13 == 128 + SIGPIPE - normally processes exit with 128 + {reiceived SIG}
# 128 - 141 == -13 == -SIGPIPE, sometimes python receives -13 for some subprocesses
raise RuntimeError('Error reading from pipe. Subcommand exited with non-zero exit status %s.' % self._process.returncode)
def close(self):
self._finish()
def __del__(self):
self._finish()
def __enter__(self):
return self
def _abort(self):
"""
Call _finish, but eat the exception (if any).
"""
try:
self._finish()
except KeyboardInterrupt:
raise
except BaseException:
pass
def __exit__(self, type, value, traceback):
if type:
self._abort()
else:
self._finish()
def __getattr__(self, name):
if name == '_process':
raise AttributeError(name)
return getattr(self._process.stdout, name)
def __iter__(self):
for line in self._process.stdout:
yield line
self._finish()
class OutputPipeProcessWrapper(object):
WRITES_BEFORE_FLUSH = 10000
def __init__(self, command, output_pipe=None):
self.closed = False
self._command = command
self._output_pipe = output_pipe
self._process = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=output_pipe,
close_fds=True)
self._flushcount = 0
def write(self, *args, **kwargs):
self._process.stdin.write(*args, **kwargs)
self._flushcount += 1
if self._flushcount == self.WRITES_BEFORE_FLUSH:
self._process.stdin.flush()
self._flushcount = 0
def writeLine(self, line):
assert '\n' not in line
self.write(line + '\n')
def _finish(self):
"""
Closes and waits for subprocess to exit.
"""
if self._process.returncode is None:
self._process.stdin.flush()
self._process.stdin.close()
self._process.wait()
self.closed = True
def __del__(self):
if not self.closed:
self.abort()
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
self.abort()
def __enter__(self):
return self
def close(self):
self._finish()
if self._process.returncode == 0:
if self._output_pipe is not None:
self._output_pipe.close()
else:
raise RuntimeError('Error when executing command %s' % self._command)
def abort(self):
self._finish()
def __getattr__(self, name):
if name == '_process':
raise AttributeError(name)
return getattr(self._process.stdin, name)
class Format(object):
"""
Interface for format specifications.
"""
# TODO Move this to somewhere else?
@classmethod
def hdfs_reader(cls, path):
raise NotImplementedError()
@classmethod
def pipe_reader(cls, input_pipe):
raise NotImplementedError()
# TODO Move this to somewhere else?
@classmethod
def hdfs_writer(cls, path):
raise NotImplementedError()
@classmethod
def pipe_writer(cls, output_pipe):
raise NotImplementedError()
class Gzip(Format):
@classmethod
def pipe_reader(cls, input_pipe):
return InputPipeProcessWrapper(['gunzip'], input_pipe)
@classmethod
def pipe_writer(cls, output_pipe):
return OutputPipeProcessWrapper(['gzip'], output_pipe)
class Bzip2(Format):
@classmethod
def pipe_reader(cls, input_pipe):
return InputPipeProcessWrapper(['bzcat'], input_pipe)
@classmethod
def pipe_writer(cls, output_pipe):
return OutputPipeProcessWrapper(['bzip2'], output_pipe)
| StarcoderdataPython |
3370654 | <gh_stars>0
import mysql.connector
from mysql.connector import Error
#Open database connection with a dictionary
conDict = {'host':'localhost',
'database':'game_records',
'user':'root',
'password':''}
db = mysql.connector.connect(**conDict)
#preparing cursor
cursor = db.cursor()
#executing SQL query
myInsertText = "INSERT INTO tbl_game_score VALUES (12341,'Me,'Easy',10,50)"
cursor.execute(myInsertText)
#Commit
db.commit()
print(cursor.rowcount,"Record Added")
#Disconnect
db.close()
| StarcoderdataPython |
391605 | def gen_detector_intro(filename_out,seconds,fps=30):
''' Generate a video of a central dot
running for the defined number of
seconds, and a corner rectangle
with changing brigthness, used as
introduction step in experiment to
calibrate the stimulus-detector
synchronization apparatus.
Parameters
----------
filename_out: str,
Path and name of the MP4 video file to be created
seconds: int,
Length in seconds of the video file to be created
fps: int, optional
Frames per second of the generated video. (default=30)
Returns
-------
(none) (output video is saved in the specified filename_out)
See Also
--------
preprocess: reads a CSV file from VICON Motion Capture and
creates a new CSV file only with the trajectories,
changing to another reference frame, if convenient
make_video: uses pre-processed VICON data to generate
video of the movement at specified viewing angle
scrambled_video: uses pre-processed VICON data to produce
video of scrambled points (non-biological motion)
central_dot: generate video of a central dot (resting interval)
Example
-------
vicon.gen_detector_intro('C:\\Users\\MyUser\\Documents\\Vicon\\intro_video.mp4',15)
'''
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('TkAgg') # Needed to run on mac
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import cnames
from matplotlib import animation
from matplotlib import patches as patches
numframes=seconds*fps
#generate figure
fig = plt.figure()
plt.style.use('dark_background')
fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)
fig.set_size_inches(13.66, 7.68, forward=True)
ax = plt.axes()
def animate(i):
#plot the points
ax.clear()
ax.scatter(0.5,0.5, c='w', alpha=0.7)
#set axis limits, removeing grid, setting background etc
ax.set_xlim(0,1)
ax.set_ylim(0,1)
#setting varying rectangle brightness levels for detector calibration
if i>12*fps:
detector=0.3
elif i>11*fps:
detector=0
elif i>10*fps:
detector=1
else:
detector=int(i/fps)/10
ax.add_patch(patches.Rectangle((0.95,0.85),0.1,0.3,fill=True,fc=(detector,detector,detector),zorder=2,clip_on=False))
#black background
ax.patch.set_facecolor('black')
fig.set_facecolor('black')
plt.axis('off')
plt.grid(b=None)
#make animation
ani = animation.FuncAnimation(fig, animate, frames=numframes)
#setting up animation file
Writer = animation.writers['ffmpeg']
writer = animation.FFMpegWriter(fps=fps,metadata=dict(artist='NeuroMat'),bitrate=1800,extra_args=['-vcodec','libx264'])
#save animation
ani.save(filename_out, writer=writer)
plt.close()
| StarcoderdataPython |
8001124 | <reponame>MS17-010/python-misc
# alternative for windows
# download notify-send from here: http://vaskovsky.net/notify-send/notify-send.zip
# unzip the file and add notify-send.exe to environment variables OR
# keep notify-send.exe in same folder where this python script is kept.
from subprocess import call
call(["notify-send", "This is title!", "This is message body!"])
| StarcoderdataPython |
274486 | from django.test.client import RequestFactory
from mock import patch
from nose.tools import eq_, ok_
from mozillians.common.tests import TestCase
from mozillians.groups import forms
from mozillians.groups.models import Group
from mozillians.groups.tests import GroupAliasFactory, GroupFactory
from mozillians.users.tests import UserFactory
class GroupCreateFormTests(TestCase):
def test_group_creation(self):
form_data = {'name': 'test group', 'accepting_new_members': 'by_request'}
form = forms.GroupCreateForm(data=form_data)
ok_(form.is_valid())
form.save()
ok_(Group.objects.filter(name='test group').exists())
def test_name_unique(self):
group = GroupFactory.create()
GroupAliasFactory.create(alias=group, name='bar')
form = forms.GroupCreateForm({'name': 'bar', 'accepting_new_members': 'by_request'})
ok_(not form.is_valid())
ok_('name' in form.errors)
msg = u'This name already exists.'
ok_(msg in form.errors['name'])
def test_creation_without_group_type(self):
form_data = {'name': 'test group'}
form = forms.GroupCreateForm(data=form_data)
ok_(not form.is_valid())
msg = u'This field is required.'
ok_(msg in form.errors['accepting_new_members'])
def test_legacy_group_curators_validation(self):
group = GroupFactory.create()
# Update form without adding curators
form_data = {'name': 'test_group'}
form = forms.GroupCuratorsForm(instance=group, data=form_data)
ok_(form.is_valid())
# Ensure that groups has no curators
group = Group.objects.get(id=group.id)
ok_(not group.curators.exists())
def test_group_curators_validation(self):
group = GroupFactory.create()
curator = UserFactory.create()
group.curators.add(curator.userprofile)
# Update form without adding curators
form_data = {'name': 'test_group',
'curators': []}
form = forms.GroupCuratorsForm(instance=group, data=form_data)
ok_(not form.is_valid())
eq_(form.errors, {'curators': [u'The group must have at least one curator.']})
class BaseGroupEditTestCase(TestCase):
def validate_group_edit_forms(self, form_class, instance, data, request=None, valid=True):
form = form_class(instance=instance, data=data, request=request)
if valid:
ok_(form.is_valid())
form.save()
# Get the object from the db
obj = instance._meta.model.objects.get(pk=instance.pk)
# compare the value of each field in the object with the ones in the data dict
for field in [f for f in obj._meta.fields if f.name in data.keys()]:
eq_(field.value_from_object(obj), data[field.name])
else:
ok_(not form.is_valid())
return form
class GroupEditFormTests(BaseGroupEditTestCase):
def test_edit_basic_form_with_data(self):
group = GroupFactory.create()
data = {'name': 'test group',
'description': 'sample description',
'irc_channel': 'foobar',
'website': 'https://example.com',
'wiki': 'https://example-wiki.com'}
self.validate_group_edit_forms(forms.GroupBasicForm, group, data)
def test_edit_basic_form_without_data(self):
group = GroupFactory.create()
data = {}
form = self.validate_group_edit_forms(forms.GroupBasicForm, group, data, None, False)
eq_(form.errors, {'name': [u'This field is required.']})
def test_edit_curators(self):
curator = UserFactory.create()
group = GroupFactory.create()
data = {'curators': [curator.id]}
self.validate_group_edit_forms(forms.GroupCuratorsForm, group, data)
def test_edit_terms(self):
group = GroupFactory.create()
data = {'terms': 'foobar'}
self.validate_group_edit_forms(forms.GroupTermsExpirationForm, group, data)
def test_edit_terms_without_data(self):
group = GroupFactory.create()
data = {}
self.validate_group_edit_forms(forms.GroupTermsExpirationForm, group, data)
def test_edit_invalidation(self):
group = GroupFactory.create()
data = {'invalidation_days': 5}
self.validate_group_edit_forms(forms.GroupTermsExpirationForm, group, data)
def test_edit_invalidation_invalid_data(self):
group = GroupFactory.create()
data = {'invalidation_days': 1000}
form = self.validate_group_edit_forms(forms.GroupTermsExpirationForm, group,
data, None, False)
eq_(form.errors, {'invalidation_days': [u'The maximum expiration date for a group '
'cannot exceed two years.']})
def test_edit_terms_and_invalidation(self):
group = GroupFactory.create()
data = {'terms': 'foobar',
'invalidation_days': 40}
self.validate_group_edit_forms(forms.GroupTermsExpirationForm, group, data)
def test_edit_invitation(self):
invitee = UserFactory.create()
curator = UserFactory.create()
group = GroupFactory.create(invite_email_text='foobar')
group.curators.add(curator.userprofile)
request = RequestFactory().request()
request.user = curator
data = {'invites': [invitee.userprofile.id]}
with patch('mozillians.groups.forms.notify_redeemer_invitation.delay') as mocked_task:
form = self.validate_group_edit_forms(forms.GroupInviteForm, group, data, request)
eq_(list(form.instance.invites.all().values_list('id', flat=True)), [invitee.id])
ok_(mocked_task.called)
eq_(mocked_task.call_args[0][1], u'foobar')
def test_edit_invitation_without_curator(self):
invitee = UserFactory.create()
group = GroupFactory.create()
request = RequestFactory().request()
request.user = UserFactory.create()
data = {'invites': [invitee.userprofile.id]}
form = self.validate_group_edit_forms(forms.GroupInviteForm, group, data, request, False)
eq_(form.errors, {'invites': [u'You need to be the curator of this group before '
'inviting someone to join.']})
def test_edit_admin_without_permissions(self):
group = GroupFactory.create()
data = {}
request = RequestFactory().request()
request.user = UserFactory.create()
form = self.validate_group_edit_forms(forms.GroupAdminForm, group, data, request, False)
eq_(form.errors, {'__all__': [u'You need to be the administrator of this group '
'in order to edit this section.']})
def test_edit_admin(self):
group = GroupFactory.create()
request = RequestFactory().request()
request.user = UserFactory.create(is_superuser=True)
data = {'functional_area': True,
'visible': True,
'members_can_leave': True}
self.validate_group_edit_forms(forms.GroupAdminForm, group, data, request)
def test_email_invite(self):
curator = UserFactory.create()
group = GroupFactory.create()
group.curators.add(curator.userprofile)
request = RequestFactory().request()
request.user = curator
data = {'invite_email_text': u'Custom message in the email.'}
self.validate_group_edit_forms(forms.GroupCustomEmailForm, group, data, request)
| StarcoderdataPython |
6490673 | from .capture import Capture | StarcoderdataPython |
3585780 | pkg_dnf = {
'haproxy': {},
}
svc_systemd = {
'haproxy': {
'needs': ['pkg_dnf:haproxy']
},
}
files = {
'/etc/haproxy/haproxy.cfg': {
'mode': '0664',
'source': '{}.haproxy.cfg'.format(node.name),
'triggers': ['svc_systemd:haproxy:restart'],
},
}
| StarcoderdataPython |
1703783 | # -*- coding: utf-8 -*-
from __future__ import print_function
import ssl, hmac, base64, hashlib,json
from datetime import datetime as pydatetime
# from eventNotice.models import EventNoticeSmsSendStatistic
try:
from urllib import urlencode
from urllib2 import Request, urlopen
except ImportError:
from urllib.parse import urlencode
from urllib.request import Request, urlopen
from logging import getLogger
logger = getLogger("default")
def smsSend(mobile,smsCode):
# 云市场分配的密钥Id
secretId = "<KEY>"
# 云市场分配的密钥Key
secretKey = "<KEY>"
source = "market"
# 签名
datetime = pydatetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
signStr = "x-date: %s\nx-source: %s" % (datetime, source)
sign = base64.b64encode(hmac.new(secretKey.encode('utf-8'), signStr.encode('utf-8'), hashlib.sha1).digest())
auth = 'hmac id="%s", algorithm="hmac-sha1", headers="x-date x-source", signature="%s"' % (secretId, sign.decode('utf-8'))
# 请求方法
method = 'GET'
# 请求头
headers = {
'X-Source': source,
'X-Date': datetime,
'Authorization': auth,
}
# 查询参数
queryParams = {
'mobile': mobile,
'param': "code:%s"%smsCode,
'tpl_id': 'TP18031513'}
# body参数(POST方法下存在)
bodyParams = {
}
# url参数拼接
url = 'http://service-g9x2885n-1255399658.ap-beijing.apigateway.myqcloud.com/release/smsNotify'
if len(queryParams.keys()) > 0:
url = url + '?' + urlencode(queryParams)
request = Request(url, headers=headers)
request.get_method = lambda: method
print(request.get_full_url())
if method in ('POST', 'PUT', 'PATCH'):
request.data = urlencode(bodyParams).encode('utf-8')
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# statisticObj = EventNoticeSmsSendStatistic()
# statisticObj.phone = mobile
try:
response = urlopen(request, timeout=2)
content = response.read()
dictContent = json.loads(content.decode("utf-8"))
logger.info(dictContent)
if dictContent:
# statisticObj.return_code = dictContent.get("return_code")
# statisticObj.order_id = dictContent.get("order_id")
# statisticObj.save()
return True
else:
raise Exception
except:
logger.exception("发送短信验证码失败")
# statisticObj.return_code = "999"
# statisticObj.save()
return False
if __name__ == '__main__':
a = smsSend("18649715651", "1234")
print(a) | StarcoderdataPython |
4806400 | import operator as op
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from functools import reduce
from typing import Type
file_name = 'data/day16.txt'
with open(file_name) as file:
data = file.readline()
bits = ''
end = 0
for _hex in data:
for bit in f'{int(_hex, 16):04b}':
bits += bit
if bit == '1':
end = len(bits)
@dataclass
class Packet(ABC):
version: int
@property
@abstractmethod
def value(self) -> int:
raise NotImplementedError
@property
@abstractmethod
def version_sum(self) -> int:
raise NotImplementedError
@dataclass
class Literal(Packet):
_value: int
@property
def value(self) -> int:
return self._value
@property
def version_sum(self) -> int:
return self.version
@dataclass
class Operator(Packet, ABC):
bit_mode: bool
subpackets: list[Packet] = field(default_factory=lambda: [])
@property
def version_sum(self) -> int:
return self.version + sum(sub.version_sum for sub in self.subpackets)
class SumOperator(Operator):
@property
def value(self) -> int:
return sum(sub.value for sub in self.subpackets)
class ProductOperator(Operator):
@property
def value(self) -> int:
return reduce(op.mul, [sub.value for sub in self.subpackets])
class MinOperator(Operator):
@property
def value(self) -> int:
return min(sub.value for sub in self.subpackets)
class MaxOperator(Operator):
@property
def value(self) -> int:
return max(sub.value for sub in self.subpackets)
class GreaterThanOperator(Operator):
@property
def value(self) -> int:
return 1 if self.subpackets[0].value > self.subpackets[1].value else 0
class LessThanOperator(Operator):
@property
def value(self) -> int:
return 1 if self.subpackets[0].value < self.subpackets[1].value else 0
class EqualToOperator(Operator):
@property
def value(self) -> int:
return 1 if self.subpackets[0].value == self.subpackets[1].value else 0
type_id_to_class: dict[int, Type[Literal | Operator]] = {
0: SumOperator,
1: ProductOperator,
2: MinOperator,
3: MaxOperator,
4: Literal,
5: GreaterThanOperator,
6: LessThanOperator,
7: EqualToOperator
}
def parse_version(index: int) -> tuple[int, int]:
return index + 3, int(bits[index:index + 3], 2)
def parse_type_id(index: int) -> tuple[int, int]:
return index + 3, int(bits[index:index + 3], 2)
def parse_literal(index: int) -> tuple[int, int]:
literal = ''
while True:
literal += bits[index + 1: index + 5]
if bits[index] == '0':
break
index += 5
return index + 5, int(literal, 2)
def parse_operator_type(index: int) -> tuple[int, bool, int]:
size = 16 if bits[index] == '0' else 12
return index + size, size == 16, int(bits[index + 1:index + size], 2)
def parse_subpackets(index: int, bit_mode: bool, length: int) -> tuple[int, list[Packet]]:
subpackets = []
if bit_mode:
new_index = index
while new_index - index < length:
new_index, subpacket = parse_packet(new_index)
subpackets.append(subpacket)
index = new_index
else:
while len(subpackets) < length:
index, subpacket = parse_packet(index)
subpackets.append(subpacket)
return index, subpackets
def parse_packet(index: int) -> tuple[int, Packet]:
index, version = parse_version(index)
index, type_id = parse_type_id(index)
packet_class = type_id_to_class[type_id]
if packet_class is Literal:
index, literal = parse_literal(index)
return index, Literal(version, literal)
index, bit_mode, length = parse_operator_type(index)
operator = packet_class(version, bit_mode)
index, subpackets = parse_subpackets(index, bit_mode, length)
operator.subpackets += subpackets
return index, operator
packets = []
parse_index = 0
while parse_index < len(bits) and parse_index < end:
parse_index, packet = parse_packet(parse_index)
packets.append(packet)
print(f'Part 1: {sum(packet.version_sum for packet in packets)}')
print(f'Part 2: {sum(packet.value for packet in packets)}')
| StarcoderdataPython |
1686538 |
import pyaudio
import speech_recognition as sr
#ENTER THE NAME OF THE MIC THAT WE ARE GOING TO TEST ON
#I WILL BE MAKING ANOTHER PROGRAM TO DISPLAY NAMES OF MIC ON A SYSTEM
#FROM THAT LIST WE WILL BE SELECTING WHICH ONE WE WANT TO USE
#mic_name is the mic that we will be using,
mic_name = "Microphone (Realtek High Defini"
#Sample rate is how often values are recorded
sample_rate = 48000
#selecting buffer size.
#Selecting 2048 could be increased.
chunk_size = 2048
#Initialize the recognizer
r = sr.Recognizer()
#generate a list of all audio cards/microphones
mic_list = sr.Microphone.list_microphone_names()
#the following loop aims to set the device ID of the mic that
#we specifically want to use to avoid ambiguity.
for i, microphone_name in enumerate(mic_list):
print(i, " ", microphone_name)
if microphone_name == mic_name:
device_id = i
#use the microphone as source for input. Here, we also specify
#which device ID to specifically look for incase the microphone
#is not working, an error will pop up saying "device_id undefined"
with sr.Microphone(device_index = device_id, sample_rate = sample_rate, chunk_size = chunk_size) as source:
#It will be somewhat slow but it will work.
#not taking into consideration surrounding noise level
r.adjust_for_ambient_noise(source)
print("Say Something")
#listening
audio = r.listen(source)
file = open('../command.cd', 'w')
try:
text = r.recognize_google(audio)
file.write(text)
#error when not recognised
except sr.UnknownValueError:
file.write("failed")
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
file.close() | StarcoderdataPython |
5139308 | #!python
#-*- coding:utf-8 -*-
import os,sys,base64,hashlib,time
from Crypto import Random
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5
def KeysNew(bit = 2048):
rnd_generator = Random.new().read
rsa = RSA.generate(bit,rnd_generator)
secret_pem = rsa.exportKey()
public_pem = rsa.publickey().exportKey()
if not os.path.exists('ssl'):
os.mkdir('ssl')
with open('ssl/secret.pem','wb') as file:
file.write(secret_pem)
with open('ssl/public.pem','wb') as file:
file.write(public_pem)
print('finish export pem files')
def encrypt(strtxt):
with open('ssl/public.pem','r') as file:
rsakey = RSA.importKey(file.read())
cipher = PKCS1_v1_5.new(rsakey)
enctxt = base64.b64encode(cipher.encrypt(strtxt.encode(encoding = 'utf-8')))
return enctxt
def decrypt(strtxt):
with open('ssl/secret.pem','r') as file:
rsakey = RSA.importKey(file.read())
cipher = PKCS1_v1_5.new(rsakey)
dectxt = cipher.decrypt(base64.b64decode(strtxt),'ERROR')
return dectxt
def calmd5(strtxt):
md5gen = hashlib.md5()
md5gen.update(strtxt)
return(md5gen.hexdigest())
def test(root = '.'):
def recursion(mpath):
if os.path.isfile(mpath):
return ['{0}-{1}'.format(os.path.relpath(mpath,root),os.path.getsize(mpath))]
elif os.path.isdir(mpath):
lst = os.listdir(mpath)
tmp = []
for x in lst:
fpath = os.path.join(mpath,x)
tmp.extend(recursion(fpath))
return tmp
filelist = recursion(root)
print u'\n总共{0}个文件'.format(len(filelist))
md5list = [calmd5(x) for x in filelist]
md5list.sort()
strtxt = ','.join(md5list)
md5str = calmd5(strtxt)
enctxt = encrypt(md5str)
print u'\n原始字符串:',md5str
print u'\n加密字符串:',enctxt
print u'\n解密字符串:',decrypt(enctxt)
if __name__ == '__main__':
start = time.time()
if not os.path.exists('ssl'):
KeysNew()
test()
print(u'\n用时{0}s'.format(time.time()-start))
| StarcoderdataPython |
172621 | import csv
from pathlib import Path
def get_student_dict() -> list:
pom = []
with open(f"{Path(__file__).parent.absolute()}/../assets/students.csv", mode="rt+") as f:
reader = csv.reader(f)
header = reader.__iter__().__next__()
for line in reader:
pom.append(list(zip(header, line)))
return [{student[0][0]: student[0][1], student[1][0]: student[1][1]} for student in pom]
def get_topics_dict() -> list:
pom = []
# TODO: REMOVE CODE DUPLICATE
with open(f"{Path(__file__).parent.absolute()}/../assets/topics.csv", mode="rt+") as f:
reader = csv.reader(f)
header = reader.__iter__().__next__()
for line in reader:
pom.append(list(zip(header, line)))
return [{topic[0][0]: topic[0][1], topic[1][0]: topic[1][1]} for topic in pom]
| StarcoderdataPython |
1928753 | from numpy import equal
from ui.Menus import Menus
from service.PlatformService import PlatformService
from service.dtos.PlatformDto import PlatformDto
from service.CategoryService import CategoryService
from service.dtos.CategoryDto import CategoryDto, PasswordRetentionPeriod
from utils.UiUtils import UiUtils
import sys
import os
sys.path.append(os.path.abspath(os.path.join('..', 'utils')))
sys.path.append(os.path.abspath(os.path.join('..', 'persistence')))
class PlatformMenu:
platform_service = PlatformService()
def __init__(self, user_id) -> None:
self.user_id = user_id
def exec_platform_menu(self):
platform_menu_level = True
while platform_menu_level:
UiUtils.clear()
menu_choice = UiUtils.disp_and_select_from_menu(
Menus.platform_menu)
platform_menu_level = self.exec_platform_menu_choice(
int(menu_choice))
return platform_menu_level
def exec_platform_menu_choice(self, choice):
if choice == 1:
self.add_platform_menu()
elif choice == 2:
self.remove_platform_menu()
elif choice == 3:
self.update_platform_menu()
elif choice == 4:
self.show_platforms_menu_on_pause(True),
elif choice == 5:
return False
else:
input("This menu choice is not available!")
return True
def add_platform_menu(self):
try:
UiUtils.clear()
self.show_platforms_menu_on_pause(False)
print("\n")
print("ADD PLATFORM")
print("------------")
name = input("Platform Name: ")
description = input("Description: ")
self.platform_service.add_platform(
PlatformDto(None, name, description))
return True
except Exception as e:
input("ERR: Add Platform failed!", e)
return False
def remove_platform_menu(self):
try:
UiUtils.clear()
self.show_platforms_menu_on_pause(False)
print("REMOVE PLATFORM")
print("---------------")
id = input("Platform id: ")
self.platform_service.remove_platform(id)
return True
except Exception as e:
input("ERR: Remove Platform failed!", e)
return False
def update_platform_menu(self):
try:
UiUtils.clear()
platform_dtos = self.show_platforms_menu_on_pause(False)
print("UPDATE PLATFORM")
print("---------------")
id = input("Platform Id: ")
update_platform = None
for platform in platform_dtos:
if platform.id() == int(id):
update_platform = platform
break
name = input(f"Platform Name ({update_platform.name()}): ")
description = input(
f"Platform Description ({update_platform.description()}): ")
if (name is not None) and (update_platform.name() != name):
update_platform.name(name)
if (description is not None) and (update_platform.description() != description):
update_platform.description(description)
input(f'setupMenu: {str(update_platform)}')
self.platform_service.update_platform(update_platform)
return True
except Exception as e:
input("ERR: Remove Platform failed!", e)
return False
def show_platforms_menu_on_pause(self, pause):
try:
if pause:
UiUtils.clear()
field_names = ["ID", "NAME", "DESCRIPTION"]
platform_dtos = self.platform_service.get_all()
print("\t\tPLATFORMS TABLE")
print("\t\t===============")
UiUtils.disp_table(field_names, platform_dtos)
if pause:
input("Press <enter> to return to menu...")
return platform_dtos
except Exception as e:
input(f"ERR: exception occured in Table 'Platform' : {str(e)}")
return False
def show_platforms_id_name_columns_on_pause(self, pause):
try:
UiUtils.clear()
field_names = ["ID", "NAME"]
platform_maps = [dict(id=platform.id(), name=platform.name())
for platform in self.platform_service.get_all()]
UiUtils.disp_as_columns(field_names, platform_maps)
if pause:
input("Press <enter> to continue...")
except Exception as e:
input(
f"ERR: exception occured while displaying 'Platforms' as columns: {str(e)}")
return False
| StarcoderdataPython |
3561122 | <gh_stars>0
import logging
import datetime
from flask_sqlalchemy import sqlalchemy
from init import ProductCostEstimation, config
from utilities.scm_enums import ErrorCodes, BoxStatus
from utilities.scm_exceptions import ScmException
from utilities.scm_logger import ScmLogger
class ProductCostEstimationRepository:
logger = ScmLogger(__name__)
def __init__(self, db):
self.db = db
def add_product_cost_estimation(self,
product_id,
cost_estimation_id):
try:
product_cost_estimation_rec = ProductCostEstimation(product_id=product_id,
cost_estimation_id=cost_estimation_id)
self.db.session.add(product_cost_estimation_rec)
self.db.session.flush()
return product_cost_estimation_rec.id
except sqlalchemy.exc.SQLAlchemyError as ex:
message = 'Error: failed to add product_cost_estimation record. Details: %s' % (str(ex))
ProductCostEstimationRepository.logger.error(message)
raise ScmException(ErrorCodes.ERROR_ADD_PRODUCT_COST_ESTIMATION_FAILED, message)
def delete_cost_estimation_of_product(self, product_id):
ProductCostEstimation.query.filter(ProductCostEstimation.product_id == product_id).delete()
self.db.session.flush() | StarcoderdataPython |
5115162 | <filename>api/app/controllers/api/domain.py<gh_stars>10-100
import os
from flask import current_app, request
from flask_restful import Resource, reqparse
from app.helpers import command, helpers, validator
from app.middlewares import auth
from app.models import domain as domain_model
from app.models import model
from app.models import record as record_model
from app.models import zone as zone_model
from app.vendors.rest import response
def insert_zone(zone, user_id):
data = {"zone": zone, "user_id": user_id}
zone_id = model.insert(table="zone", data=data)
return zone_id
def insert_soa_record(zone_id):
record_data = {"owner": "@", "zone_id": zone_id, "type_id": "1", "ttl_id": "6"}
record_id = model.insert(table="record", data=record_data)
return record_id
def insert_soa_rdata(record_id):
"""Insert default SOA record.
Notes:
<MNAME> <RNAME> <serial> <refresh> <retry> <expire> <minimum>
See: https://tools.ietf.org/html/rfc1035 (3.3.13. SOA RDATA format)
"""
current_time = helpers.soa_time_set()
serial = f"{str(current_time)}01"
default_soa_content = os.environ.get("DEFAULT_SOA_RDATA")
rdatas = default_soa_content.split(" ")
# rdata doesn't contains serial
mname_and_rname = " ".join(rdatas[0:2])
ttls = " ".join(rdatas[2:])
rdata = f"{mname_and_rname} {serial} {ttls}"
content_data = {"rdata": rdata, "record_id": record_id}
model.insert(table="rdata", data=content_data)
def insert_soa_default(zone_id):
"""Create default SOA record"""
record_id = insert_soa_record(zone_id)
insert_soa_rdata(record_id)
return record_id
def insert_ns_record(zone_id):
record_data = {"owner": "@", "zone_id": zone_id, "type_id": "4", "ttl_id": "6"}
record_id = model.insert(table="record", data=record_data)
return record_id
def insert_ns_rdata(name, record_id):
data = {"rdata": name, "record_id": record_id}
model.insert(table="rdata", data=data)
def insert_ns_default(zone_id):
"""Create default NS record"""
default_ns = os.environ.get("DEFAULT_NS")
nameserver = default_ns.split(" ")
record_ids = []
for name in nameserver:
record_id = insert_ns_record(zone_id)
insert_ns_rdata(name, record_id)
record_ids.append(record_id)
return record_ids
def insert_cname_record(zone_id):
record_data = {"owner": "www", "zone_id": zone_id, "type_id": "5", "ttl_id": "6"}
record_id = model.insert(table="record", data=record_data)
return record_id
def insert_cname_rdata(zone, record_id):
data = {"rdata": f"{zone}.", "record_id": record_id}
model.insert(table="rdata", data=data)
def insert_cname_default(zone_id, zone):
"""Create default CNAME record"""
record_id = insert_cname_record(zone_id)
insert_cname_rdata(zone, record_id)
return record_id
class GetDomainData(Resource):
@auth.auth_required
def get(self):
try:
zones = model.get_all("zone")
if not zones:
return response(404)
domains_detail = []
for zone in zones:
detail = domain_model.get_other_data(zone)
domains_detail.append(detail)
return response(200, data=domains_detail)
except Exception as e:
current_app.logger.error(f"{e}")
return response(500)
class GetDomainDataId(Resource):
@auth.auth_required
def get(self):
zone_id = request.args.get("id")
zone_name = request.args.get("name")
if not any((zone_id, zone_name)):
return response(422, "Problems parsing parameters")
try:
if zone_id:
zone = model.get_one(table="zone", field="id", value=zone_id)
if zone_name:
zone = model.get_one(table="zone", field="zone", value=zone_name)
if not zone:
return response(404)
data = domain_model.get_other_data(zone)
return response(200, data=data)
except Exception as e:
current_app.logger.error(f"{e}")
return response(500)
class GetDomainByUser(Resource):
@auth.auth_required
def get(self, user_id):
try:
zones = zone_model.get_zones_by_user(user_id)
if not zones:
return response(404)
domains_detail = []
for zone in zones:
detail = domain_model.get_other_data(zone)
domains_detail.append(detail)
return response(200, data=domains_detail)
except Exception as e:
current_app.logger.error(f"{e}")
return response(500)
class AddDomain(Resource):
@helpers.check_producer
@auth.auth_required
def post(self):
"""Add new domain (zone) with additional default record.
note:
SOA, NS, and CNAME records are added automatically when adding new domain
"""
parser = reqparse.RequestParser()
parser.add_argument("zone", type=str, required=True)
parser.add_argument("user_id", type=int, required=True)
args = parser.parse_args()
zone = args["zone"]
user_id = args["user_id"]
# Validation
if not model.is_unique(table="zone", field="zone", value=f"{zone}"):
return response(409, message="Duplicate Zone")
user = model.get_one(table="user", field="id", value=user_id)
if not user:
return response(404, message="User Not Found")
try:
validator.validate("ZONE", zone)
except Exception as e:
return response(422, message=f"{e}")
try:
zone_id = insert_zone(zone, user_id)
# create zone config
command.set_config(zone, zone_id, "conf-set")
# create default records
soa_record_id = insert_soa_default(zone_id)
ns_record_ids = insert_ns_default(zone_id)
cname_record_id = insert_cname_default(zone_id, zone)
record_ids = [soa_record_id, *ns_record_ids, cname_record_id]
command.set_default_zone(record_ids)
command.delegate(zone, zone_id, "conf-set", "master")
command.delegate(zone, zone_id, "conf-set", "slave")
data_ = {"id": zone_id, "zone": zone}
return response(201, data=data_)
except Exception as e:
current_app.logger.error(f"{e}")
return response(500)
class DeleteDomain(Resource):
@helpers.check_producer
@auth.auth_required
def delete(self):
"""Remove domain (zone) and all its related records."""
parser = reqparse.RequestParser()
parser.add_argument("zone", type=str, required=True)
args = parser.parse_args()
zone = args["zone"]
try:
zone_id = zone_model.get_zone_id(zone)
except Exception:
return response(404, message="Zone Not Found")
try:
records = record_model.get_records_by_zone(zone)
for record in records:
# zone-purge didn't work
# all the records must be unset one-by-one. otherwise old record
# will appear again if the same zone name crated.
command.set_zone(record["id"], "zone-unset")
command.set_config(zone, zone_id, "conf-unset")
# other data (e.g record) deleted automatically
# by cockroach when no PK existed
model.delete(table="zone", field="id", value=zone_id)
return response(204, data=zone)
except Exception as e:
current_app.logger.error(f"{e}")
return response(500)
| StarcoderdataPython |
3376275 | # Generated by Django 3.2.3 on 2021-09-13 11:32
from functools import partial
from django.db import migrations
import easydmp.dmpt.models.questions.mixins
from easydmp.dmpt.utils import register_question_type
register_email_type = partial(register_question_type, 'email')
class Migration(migrations.Migration):
dependencies = [
('dmpt', '0002_add_QuestionType_model'),
]
operations = [
migrations.CreateModel(
name='EmailQuestion',
fields=[
],
options={
'managed': False,
'proxy': True,
},
bases=(easydmp.dmpt.models.questions.mixins.PrimitiveTypeMixin, easydmp.dmpt.models.questions.mixins.SaveMixin, 'dmpt.question'),
),
migrations.RunPython(register_email_type, migrations.RunPython.noop),
]
| StarcoderdataPython |
1989527 | #
# -*- coding: utf-8 -*-
# Wireshark tests
# By <NAME> <<EMAIL>>
#
# Ported from a set of Bash scripts which were copyright 2005 <NAME>
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
'''Capture tests'''
import fixtures
import glob
import hashlib
import os
import socket
import subprocess
import subprocesstest
import sys
import threading
import time
import uuid
capture_duration = 5
testout_pcap = 'testout.pcap'
testout_pcapng = 'testout.pcapng'
snapshot_len = 96
class UdpTrafficGenerator(threading.Thread):
def __init__(self):
super().__init__(daemon=True)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.stopped = False
def run(self):
while not self.stopped:
time.sleep(.05)
self.sock.sendto(b'Wireshark test\n', ('127.0.0.1', 9))
def stop(self):
if not self.stopped:
self.stopped = True
self.join()
@fixtures.fixture
def traffic_generator():
'''
Traffic generator factory. Invoking it returns a tuple (start_func, cfilter)
where cfilter is a capture filter to match the generated traffic.
start_func can be invoked to start generating traffic and returns a function
which can be used to stop traffic generation early.
Currently generates a bunch of UDP traffic to localhost.
'''
threads = []
def start_processes():
thread = UdpTrafficGenerator()
thread.start()
threads.append(thread)
return thread.stop
try:
yield start_processes, 'udp port 9'
finally:
for thread in threads:
thread.stop()
@fixtures.fixture(scope='session')
def wireshark_k(wireshark_command):
return tuple(list(wireshark_command) + ['-k'])
def capture_command(*cmd_args, shell=False):
if type(cmd_args[0]) != str:
# Assume something like ['wireshark', '-k']
cmd_args = list(cmd_args[0]) + list(cmd_args)[1:]
if shell:
cmd_args = ' '.join(cmd_args)
return cmd_args
@fixtures.fixture
def check_capture_10_packets(capture_interface, cmd_dumpcap, traffic_generator):
start_traffic, cfilter = traffic_generator
def check_capture_10_packets_real(self, cmd=None, to_stdout=False):
self.assertIsNotNone(cmd)
testout_file = self.filename_from_id(testout_pcap)
stop_traffic = start_traffic()
if to_stdout:
capture_proc = self.runProcess(capture_command(cmd,
'-i', '"{}"'.format(capture_interface),
'-p',
'-w', '-',
'-c', '10',
'-a', 'duration:{}'.format(capture_duration),
'-f', '"{}"'.format(cfilter),
'>', testout_file,
shell=True
),
shell=True
)
else:
capture_proc = self.runProcess(capture_command(cmd,
'-i', capture_interface,
'-p',
'-w', testout_file,
'-c', '10',
'-a', 'duration:{}'.format(capture_duration),
'-f', cfilter,
))
stop_traffic()
capture_returncode = capture_proc.returncode
if capture_returncode != 0:
self.log_fd.write('{} -D output:\n'.format(cmd))
self.runProcess((cmd, '-D'))
self.assertEqual(capture_returncode, 0)
self.checkPacketCount(10)
return check_capture_10_packets_real
@fixtures.fixture
def check_capture_fifo(cmd_dumpcap):
if sys.platform == 'win32':
fixtures.skip('Test requires OS fifo support.')
def check_capture_fifo_real(self, cmd=None):
self.assertIsNotNone(cmd)
testout_file = self.filename_from_id(testout_pcap)
fifo_file = self.filename_from_id('testout.fifo')
try:
# If a previous test left its fifo laying around, e.g. from a failure, remove it.
os.unlink(fifo_file)
except:
pass
os.mkfifo(fifo_file)
slow_dhcp_cmd = subprocesstest.cat_dhcp_command('slow')
fifo_proc = self.startProcess(
('{0} > {1}'.format(slow_dhcp_cmd, fifo_file)),
shell=True)
capture_proc = self.assertRun(capture_command(cmd,
'-i', fifo_file,
'-p',
'-w', testout_file,
'-a', 'duration:{}'.format(capture_duration),
))
fifo_proc.kill()
self.assertTrue(os.path.isfile(testout_file))
self.checkPacketCount(8)
return check_capture_fifo_real
@fixtures.fixture
def check_capture_stdin(cmd_dumpcap):
# Capturing always requires dumpcap, hence the dependency on it.
def check_capture_stdin_real(self, cmd=None):
# Similar to suite_io.check_io_4_packets.
self.assertIsNotNone(cmd)
testout_file = self.filename_from_id(testout_pcap)
slow_dhcp_cmd = subprocesstest.cat_dhcp_command('slow')
capture_cmd = capture_command(cmd,
'-i', '-',
'-w', testout_file,
'-a', 'duration:{}'.format(capture_duration),
shell=True
)
is_gui = type(cmd) != str and '-k' in cmd[0]
if is_gui:
capture_cmd += ' -o console.log.level:127'
pipe_proc = self.assertRun(slow_dhcp_cmd + ' | ' + capture_cmd, shell=True)
if is_gui:
self.assertTrue(self.grepOutput('Wireshark is up and ready to go'), 'No startup message.')
self.assertTrue(self.grepOutput('Capture started'), 'No capture start message.')
self.assertTrue(self.grepOutput('Capture stopped'), 'No capture stop message.')
self.assertTrue(os.path.isfile(testout_file))
self.checkPacketCount(8)
return check_capture_stdin_real
@fixtures.fixture
def check_capture_read_filter(capture_interface, traffic_generator):
start_traffic, cfilter = traffic_generator
def check_capture_read_filter_real(self, cmd=None):
self.assertIsNotNone(cmd)
testout_file = self.filename_from_id(testout_pcap)
stop_traffic = start_traffic()
capture_proc = self.assertRun(capture_command(cmd,
'-i', capture_interface,
'-p',
'-w', testout_file,
'-2',
'-R', 'dcerpc.cn_call_id==123456', # Something unlikely.
'-c', '10',
'-a', 'duration:{}'.format(capture_duration),
'-f', cfilter,
))
stop_traffic()
self.checkPacketCount(0)
return check_capture_read_filter_real
@fixtures.fixture
def check_capture_snapshot_len(capture_interface, cmd_tshark, traffic_generator):
start_traffic, cfilter = traffic_generator
def check_capture_snapshot_len_real(self, cmd=None):
self.assertIsNotNone(cmd)
stop_traffic = start_traffic()
testout_file = self.filename_from_id(testout_pcap)
capture_proc = self.assertRun(capture_command(cmd,
'-i', capture_interface,
'-p',
'-w', testout_file,
'-s', str(snapshot_len),
'-a', 'duration:{}'.format(capture_duration),
'-f', cfilter,
))
stop_traffic()
self.assertTrue(os.path.isfile(testout_file))
# Use tshark to filter out all packets larger than 68 bytes.
testout2_file = self.filename_from_id('testout2.pcap')
filter_proc = self.assertRun((cmd_tshark,
'-r', testout_file,
'-w', testout2_file,
'-Y', 'frame.cap_len>{}'.format(snapshot_len),
))
self.checkPacketCount(0, cap_file=testout2_file)
return check_capture_snapshot_len_real
@fixtures.fixture
def check_dumpcap_autostop_stdin(cmd_dumpcap):
def check_dumpcap_autostop_stdin_real(self, packets=None, filesize=None):
# Similar to check_capture_stdin.
testout_file = self.filename_from_id(testout_pcap)
cat100_dhcp_cmd = subprocesstest.cat_dhcp_command('cat100')
condition='oops:invalid'
self.assertTrue(packets is not None or filesize is not None, 'Need one of packets or filesize')
self.assertFalse(packets is not None and filesize is not None, 'Need one of packets or filesize')
if packets is not None:
condition = 'packets:{}'.format(packets)
elif filesize is not None:
condition = 'filesize:{}'.format(filesize)
capture_cmd = ' '.join((cmd_dumpcap,
'-i', '-',
'-w', testout_file,
'-a', condition,
))
pipe_proc = self.assertRun(cat100_dhcp_cmd + ' | ' + capture_cmd, shell=True)
self.assertTrue(os.path.isfile(testout_file))
if packets is not None:
self.checkPacketCount(packets)
elif filesize is not None:
capturekb = os.path.getsize(testout_file) / 1000
self.assertGreaterEqual(capturekb, filesize)
return check_dumpcap_autostop_stdin_real
@fixtures.fixture
def check_dumpcap_ringbuffer_stdin(cmd_dumpcap):
def check_dumpcap_ringbuffer_stdin_real(self, packets=None, filesize=None):
# Similar to check_capture_stdin.
rb_unique = 'dhcp_rb_' + uuid.uuid4().hex[:6] # Random ID
testout_file = '{}.{}.pcapng'.format(self.id(), rb_unique)
testout_glob = '{}.{}_*.pcapng'.format(self.id(), rb_unique)
cat100_dhcp_cmd = subprocesstest.cat_dhcp_command('cat100')
condition='oops:invalid'
self.assertTrue(packets is not None or filesize is not None, 'Need one of packets or filesize')
self.assertFalse(packets is not None and filesize is not None, 'Need one of packets or filesize')
if packets is not None:
condition = 'packets:{}'.format(packets)
elif filesize is not None:
condition = 'filesize:{}'.format(filesize)
capture_cmd = ' '.join((cmd_dumpcap,
'-i', '-',
'-w', testout_file,
'-a', 'files:2',
'-b', condition,
))
pipe_proc = self.assertRun(cat100_dhcp_cmd + ' | ' + capture_cmd, shell=True)
rb_files = glob.glob(testout_glob)
for rbf in rb_files:
self.cleanup_files.append(rbf)
self.assertEqual(len(rb_files), 2)
for rbf in rb_files:
self.assertTrue(os.path.isfile(rbf))
if packets is not None:
self.checkPacketCount(packets, cap_file=rbf)
elif filesize is not None:
capturekb = os.path.getsize(rbf) / 1000
self.assertGreaterEqual(capturekb, filesize)
return check_dumpcap_ringbuffer_stdin_real
@fixtures.fixture
def check_dumpcap_pcapng_sections(cmd_dumpcap, cmd_tshark, capture_file):
if sys.platform == 'win32':
fixtures.skip('Test requires OS fifo support.')
def check_dumpcap_pcapng_sections_real(self, multi_input=False, multi_output=False):
# Make sure we always test multiple SHBs in an input.
in_files_l = [ [
capture_file('many_interfaces.pcapng.1'),
capture_file('many_interfaces.pcapng.2')
] ]
if multi_input:
in_files_l.append([ capture_file('many_interfaces.pcapng.3') ])
fifo_files = []
fifo_procs = []
# Default values for our validity tests
check_val_d = {
'filename': None,
'packet_count': 0,
'idb_count': 0,
'ua_pt1_count': 0,
'ua_pt2_count': 0,
'ua_pt3_count': 0,
'ua_dc_count': 0,
}
check_vals = [ check_val_d ]
for in_files in in_files_l:
fifo_file = self.filename_from_id('dumpcap_pcapng_sections_{}.fifo'.format(len(fifo_files) + 1))
fifo_files.append(fifo_file)
# If a previous test left its fifo laying around, e.g. from a failure, remove it.
try:
os.unlink(fifo_file)
except: pass
os.mkfifo(fifo_file)
cat_cmd = subprocesstest.cat_cap_file_command(in_files)
fifo_procs.append(self.startProcess(('{0} > {1}'.format(cat_cmd, fifo_file)), shell=True))
if multi_output:
rb_unique = 'sections_rb_' + uuid.uuid4().hex[:6] # Random ID
testout_glob = '{}.{}_*.pcapng'.format(self.id(), rb_unique)
testout_file = '{}.{}.pcapng'.format(self.id(), rb_unique)
check_vals.append(check_val_d.copy())
# check_vals[]['filename'] will be filled in below
else:
testout_file = self.filename_from_id(testout_pcapng)
check_vals[0]['filename'] = testout_file
# Capture commands
if not multi_input and not multi_output:
# Passthrough SHBs, single output file
capture_cmd_args = (
'-i', fifo_files[0],
'-w', testout_file
)
check_vals[0]['packet_count'] = 79
check_vals[0]['idb_count'] = 22
check_vals[0]['ua_pt1_count'] = 1
check_vals[0]['ua_pt2_count'] = 1
elif not multi_input and multi_output:
# Passthrough SHBs, multiple output files
capture_cmd_args = (
'-i', fifo_files[0],
'-w', testout_file,
'-a', 'files:2',
'-b', 'packets:53'
)
check_vals[0]['packet_count'] = 53
check_vals[0]['idb_count'] = 11
check_vals[0]['ua_pt1_count'] = 1
check_vals[1]['packet_count'] = 26
check_vals[1]['idb_count'] = 22
check_vals[1]['ua_pt1_count'] = 1
check_vals[1]['ua_pt2_count'] = 1
elif multi_input and not multi_output:
# Dumpcap SHBs, single output file
capture_cmd_args = (
'-i', fifo_files[0],
'-i', fifo_files[1],
'-w', testout_file
)
check_vals[0]['packet_count'] = 88
check_vals[0]['idb_count'] = 35
check_vals[0]['ua_dc_count'] = 1
else:
# Dumpcap SHBs, multiple output files
capture_cmd_args = (
'-i', fifo_files[0],
'-i', fifo_files[1],
'-w', testout_file,
'-a', 'files:2',
'-b', 'packets:53'
)
check_vals[0]['packet_count'] = 53
check_vals[0]['idb_count'] = 13
check_vals[0]['ua_dc_count'] = 1
check_vals[1]['packet_count'] = 35
check_vals[1]['idb_count'] = 35
check_vals[1]['ua_dc_count'] = 1
capture_cmd = capture_command(cmd_dumpcap, *capture_cmd_args)
capture_proc = self.assertRun(capture_cmd)
for fifo_proc in fifo_procs: fifo_proc.kill()
rb_files = []
if multi_output:
rb_files = sorted(glob.glob(testout_glob))
self.assertEqual(len(rb_files), 2)
check_vals[0]['filename'] = rb_files[0]
check_vals[1]['filename'] = rb_files[1]
for rbf in rb_files:
self.cleanup_files.append(rbf)
self.assertTrue(os.path.isfile(rbf))
# Output tests
if not multi_input and not multi_output:
# Check strict bit-for-bit passthrough.
in_hash = hashlib.sha256()
out_hash = hashlib.sha256()
for in_file in in_files_l[0]:
in_cap_file = capture_file(in_file)
with open(in_cap_file, 'rb') as f:
in_hash.update(f.read())
with open(testout_file, 'rb') as f:
out_hash.update(f.read())
self.assertEqual(in_hash.hexdigest(), out_hash.hexdigest())
# many_interfaces.pcapng.1 : 64 packets written by "Passthrough test #1"
# many_interfaces.pcapng.2 : 15 packets written by "Passthrough test #2"
# many_interfaces.pcapng.3 : 9 packets written by "Passthrough test #3"
# Each has 11 interfaces.
idb_compare_eq = True
if multi_input and multi_output:
# Having multiple inputs forces the use of threads. In our
# case this means that non-packet block counts in the first
# file in is nondeterministic.
idb_compare_eq = False
for check_val in check_vals:
self.checkPacketCount(check_val['packet_count'], cap_file=check_val['filename'])
tshark_proc = self.assertRun(capture_command(cmd_tshark,
'-r', check_val['filename'],
'-V',
'-X', 'read_format:MIME Files Format'
))
# XXX Are there any other sanity checks we should run?
if idb_compare_eq:
self.assertEqual(self.countOutput(r'Block: Interface Description Block',
proc=tshark_proc), check_val['idb_count'])
else:
self.assertGreaterEqual(self.countOutput(r'Block: Interface Description Block',
proc=tshark_proc), check_val['idb_count'])
idb_compare_eq = True
self.assertEqual(self.countOutput(r'Option: User Application = Passthrough test #1',
proc=tshark_proc), check_val['ua_pt1_count'])
self.assertEqual(self.countOutput(r'Option: User Application = Passthrough test #2',
proc=tshark_proc), check_val['ua_pt2_count'])
self.assertEqual(self.countOutput(r'Option: User Application = Passthrough test #3',
proc=tshark_proc), check_val['ua_pt3_count'])
self.assertEqual(self.countOutput(r'Option: User Application = Dumpcap \(Wireshark\)',
proc=tshark_proc), check_val['ua_dc_count'])
return check_dumpcap_pcapng_sections_real
@fixtures.mark_usefixtures('test_env')
@fixtures.uses_fixtures
class case_wireshark_capture(subprocesstest.SubprocessTestCase):
def test_wireshark_capture_10_packets_to_file(self, wireshark_k, check_capture_10_packets, make_screenshot_on_error):
'''Capture 10 packets from the network to a file using Wireshark'''
with make_screenshot_on_error():
check_capture_10_packets(self, cmd=wireshark_k)
# Wireshark doesn't currently support writing to stdout while capturing.
# def test_wireshark_capture_10_packets_to_stdout(self, wireshark_k, check_capture_10_packets):
# '''Capture 10 packets from the network to stdout using Wireshark'''
# check_capture_10_packets(self, cmd=wireshark_k, to_stdout=True)
def test_wireshark_capture_from_fifo(self, wireshark_k, check_capture_fifo, make_screenshot_on_error):
'''Capture from a fifo using Wireshark'''
with make_screenshot_on_error():
check_capture_fifo(self, cmd=wireshark_k)
def test_wireshark_capture_from_stdin(self, wireshark_k, check_capture_stdin, make_screenshot_on_error):
'''Capture from stdin using Wireshark'''
with make_screenshot_on_error():
check_capture_stdin(self, cmd=wireshark_k)
def test_wireshark_capture_snapshot_len(self, wireshark_k, check_capture_snapshot_len, make_screenshot_on_error):
'''Capture truncated packets using Wireshark'''
with make_screenshot_on_error():
check_capture_snapshot_len(self, cmd=wireshark_k)
@fixtures.mark_usefixtures('test_env')
@fixtures.uses_fixtures
class case_tshark_capture(subprocesstest.SubprocessTestCase):
def test_tshark_capture_10_packets_to_file(self, cmd_tshark, check_capture_10_packets):
'''Capture 10 packets from the network to a file using TShark'''
check_capture_10_packets(self, cmd=cmd_tshark)
def test_tshark_capture_10_packets_to_stdout(self, cmd_tshark, check_capture_10_packets):
'''Capture 10 packets from the network to stdout using TShark'''
check_capture_10_packets(self, cmd=cmd_tshark, to_stdout=True)
def test_tshark_capture_from_fifo(self, cmd_tshark, check_capture_fifo):
'''Capture from a fifo using TShark'''
check_capture_fifo(self, cmd=cmd_tshark)
def test_tshark_capture_from_stdin(self, cmd_tshark, check_capture_stdin):
'''Capture from stdin using TShark'''
check_capture_stdin(self, cmd=cmd_tshark)
def test_tshark_capture_snapshot_len(self, cmd_tshark, check_capture_snapshot_len):
'''Capture truncated packets using TShark'''
check_capture_snapshot_len(self, cmd=cmd_tshark)
@fixtures.mark_usefixtures('base_env')
@fixtures.uses_fixtures
class case_dumpcap_capture(subprocesstest.SubprocessTestCase):
def test_dumpcap_capture_10_packets_to_file(self, cmd_dumpcap, check_capture_10_packets):
'''Capture 10 packets from the network to a file using Dumpcap'''
check_capture_10_packets(self, cmd=cmd_dumpcap)
def test_dumpcap_capture_10_packets_to_stdout(self, cmd_dumpcap, check_capture_10_packets):
'''Capture 10 packets from the network to stdout using Dumpcap'''
check_capture_10_packets(self, cmd=cmd_dumpcap, to_stdout=True)
def test_dumpcap_capture_from_fifo(self, cmd_dumpcap, check_capture_fifo):
'''Capture from a fifo using Dumpcap'''
check_capture_fifo(self, cmd=cmd_dumpcap)
def test_dumpcap_capture_from_stdin(self, cmd_dumpcap, check_capture_stdin):
'''Capture from stdin using Dumpcap'''
check_capture_stdin(self, cmd=cmd_dumpcap)
def test_dumpcap_capture_snapshot_len(self, check_capture_snapshot_len, cmd_dumpcap):
'''Capture truncated packets using Dumpcap'''
check_capture_snapshot_len(self, cmd=cmd_dumpcap)
@fixtures.mark_usefixtures('base_env')
@fixtures.uses_fixtures
class case_dumpcap_autostop(subprocesstest.SubprocessTestCase):
# duration, filesize, packets, files
def test_dumpcap_autostop_filesize(self, check_dumpcap_autostop_stdin):
'''Capture from stdin using Dumpcap until we reach a file size limit'''
check_dumpcap_autostop_stdin(self, filesize=15)
def test_dumpcap_autostop_packets(self, check_dumpcap_autostop_stdin):
'''Capture from stdin using Dumpcap until we reach a packet limit'''
check_dumpcap_autostop_stdin(self, packets=97) # Last prime before 100. Arbitrary.
@fixtures.mark_usefixtures('base_env')
@fixtures.uses_fixtures
class case_dumpcap_ringbuffer(subprocesstest.SubprocessTestCase):
# duration, interval, filesize, packets, files
def test_dumpcap_ringbuffer_filesize(self, check_dumpcap_ringbuffer_stdin):
'''Capture from stdin using Dumpcap and write multiple files until we reach a file size limit'''
check_dumpcap_ringbuffer_stdin(self, filesize=15)
def test_dumpcap_ringbuffer_packets(self, check_dumpcap_ringbuffer_stdin):
'''Capture from stdin using Dumpcap and write multiple files until we reach a packet limit'''
check_dumpcap_ringbuffer_stdin(self, packets=47) # Last prime before 50. Arbitrary.
@fixtures.mark_usefixtures('base_env')
@fixtures.uses_fixtures
class case_dumpcap_pcapng_sections(subprocesstest.SubprocessTestCase):
def test_dumpcap_pcapng_single_in_single_out(self, check_dumpcap_pcapng_sections):
'''Capture from a single pcapng source using Dumpcap and write a single file'''
check_dumpcap_pcapng_sections(self)
def test_dumpcap_pcapng_single_in_multi_out(self, check_dumpcap_pcapng_sections):
'''Capture from a single pcapng source using Dumpcap and write two files'''
check_dumpcap_pcapng_sections(self, multi_output=True)
def test_dumpcap_pcapng_multi_in_single_out(self, check_dumpcap_pcapng_sections):
'''Capture from two pcapng sources using Dumpcap and write a single file'''
check_dumpcap_pcapng_sections(self, multi_input=True)
def test_dumpcap_pcapng_multi_in_multi_out(self, check_dumpcap_pcapng_sections):
'''Capture from two pcapng sources using Dumpcap and write two files'''
check_dumpcap_pcapng_sections(self, multi_input=True, multi_output=True)
| StarcoderdataPython |
4832475 | from flask import current_app
from flask_classful import FlaskView, route
from flask_json import as_json
from flexmeasures.data import db
def _check_sql_database():
try:
db.session.execute("SELECT 1").first()
return True
except Exception: # noqa: B902
current_app.logger.exception("Database down or undetected")
return False
class HealthAPI(FlaskView):
route_base = "/health"
trailing_slash = False
@route("/ready", methods=["GET"])
@as_json
def is_ready(self):
"""
Get readiness status
"""
status = {"database_sql": _check_sql_database()} # TODO: check redis
if all(status.values()):
return status, 200
else:
return status, 503
| StarcoderdataPython |
1664692 | import argparse
import logging
import logging.config
from bunsan.broker import connection_pb2
from bunsan.broker.service import consumer
from bunsan.broker.worker import worker
from google.protobuf import text_format
def main():
parser = argparse.ArgumentParser(description='Server')
parser.add_argument('--logging', required=True,
help='Logging configuration file')
parser.add_argument('--connection', required=True,
help='Proto-encoded ConnectionParameters')
parser.add_argument('--constraints', required=True,
help='Proto-encoded Constraints')
parser.add_argument('--jobs', type=int, default=1,
help='Number of jobs to run in parallel')
parser.add_argument('--repository-config', required=True,
help='Configuration for bunsan::pm::repository')
parser.add_argument('--tmpdir', default='/tmp',
help='Temporary directory for task execution')
args = parser.parse_args()
logging.config.fileConfig(args.logging)
_logger = logging.getLogger(__name__)
connection_parameters = connection_pb2.ConnectionParameters()
constraints = connection_pb2.Constraints()
text_format.Parse(args.connection, connection_parameters)
text_format.Parse(args.constraints, constraints)
_logger.info('Creating consumer')
with consumer.Consumer(connection_parameters=connection_parameters,
constraints=constraints) as cns:
_logger.info('Creating worker')
with worker.Worker(jobs=args.jobs,
tmpdir=args.tmpdir,
repository_config=args.repository_config) as wrk:
cns.listen_and_wait(wrk.callback)
if __name__ == '__main__':
main()
| StarcoderdataPython |
6615495 | """This module provides file I/O for the Quake network protocol.
References:
Quake Source
- id Software
- https://github.com/id-Software/Quake
The Unofficial DEM Format Description
- <NAME>, et al.
- https://www.quakewiki.net/archives/demospecs/dem/dem.html
"""
import io
import struct
__all__ = ['Bad', 'Nop', 'Disconnect', 'UpdateStat', 'Version', 'SetView',
'Sound', 'Time', 'Print', 'StuffText', 'SetAngle', 'ServerInfo',
'LightStyle', 'UpdateName', 'UpdateFrags', 'ClientData',
'StopSound', 'UpdateColors', 'Particle', 'Damage', 'SpawnStatic',
'SpawnBinary', 'SpawnBaseline', 'TempEntity', 'SetPause',
'SignOnNum', 'CenterPrint', 'KilledMonster', 'FoundSecret',
'SpawnStaticSound', 'Intermission', 'Finale', 'CdTrack',
'SellScreen', 'CutScene', 'UpdateEntity', 'MessageBlock']
class _IO:
"""Simple namespace for protocol IO"""
@staticmethod
def _read(fmt, file):
return struct.unpack(fmt, file.read(struct.calcsize(fmt)))[0]
class read:
"""Read IO namespace"""
@staticmethod
def char(file):
return int(_IO._read('<b', file))
@staticmethod
def byte(file):
return int(_IO._read('<B', file))
@staticmethod
def short(file):
return int(_IO._read('<h', file))
@staticmethod
def long(file):
return int(_IO._read('<l', file))
@staticmethod
def float(file):
return float(_IO._read('<f', file))
@staticmethod
def coord(file):
return _IO.read.short(file) * 0.125
@staticmethod
def position(file):
return _IO.read.coord(file), _IO.read.coord(file), _IO.read.coord(file)
@staticmethod
def angle(file):
return _IO.read.char(file) * 360 / 256
@staticmethod
def angles(file):
return _IO.read.angle(file), _IO.read.angle(file), _IO.read.angle(file)
@staticmethod
def string(file, terminal_byte=b'\x00'):
string = b''
char = _IO._read('<s', file)
while char != terminal_byte:
string += char
char = _IO._read('<s', file)
return string.decode('ascii')
@staticmethod
def _write(fmt, file, value):
data = struct.pack(fmt, value)
file.write(data)
class write:
"""Write IO namespace"""
@staticmethod
def char(file, value):
_IO._write('<b', file, int(value))
@staticmethod
def byte(file, value):
_IO._write('<B', file, int(value))
@staticmethod
def short(file, value):
_IO._write('<h', file, int(value))
@staticmethod
def long(file, value):
_IO._write('<l', file, int(value))
@staticmethod
def float(file, value):
_IO._write('<f', file, float(value))
@staticmethod
def coord(file, value):
_IO.write.short(file, value / 0.125)
@staticmethod
def position(file, values):
_IO.write.coord(file, values[0]), _IO.write.coord(file, values[1]), _IO.write.coord(file, values[2])
@staticmethod
def angle(file, value):
_IO.write.char(file, int(value * 256 / 360))
@staticmethod
def angles(file, values):
_IO.write.angle(file, values[0]), _IO.write.angle(file, values[1]), _IO.write.angle(file, values[2])
@staticmethod
def string(file, value, terminal_byte=b'\x00'):
value = value[:2048]
size = len(value)
format = '<%is' % (size + 1)
v = value.encode('ascii') + terminal_byte
data = struct.pack(format, v)
file.write(data)
class BadMessage(Exception):
pass
SVC_BAD = 0
SVC_NOP = 1
SVC_DISCONNECT = 2
SVC_UPDATESTAT = 3
SVC_VERSION = 4
SVC_SETVIEW = 5
SVC_SOUND = 6
SVC_TIME = 7
SVC_PRINT = 8
SVC_STUFFTEXT = 9
SVC_SETANGLE = 10
SVC_SERVERINFO = 11
SVC_LIGHTSTYLE = 12
SVC_UPDATENAME = 13
SVC_UPDATEFRAGS = 14
SVC_CLIENTDATA = 15
SVC_STOPSOUND = 16
SVC_UPDATECOLORS = 17
SVC_PARTICLE = 18
SVC_DAMAGE = 19
SVC_SPAWNSTATIC = 20
SVC_SPAWNBINARY = 21
SVC_SPAWNBASELINE = 22
SVC_TEMP_ENTITY = 23
SVC_SETPAUSE = 24
SVC_SIGNONNUM = 25
SVC_CENTERPRINT = 26
SVC_KILLEDMONSTER = 27
SVC_FOUNDSECRET = 28
SVC_SPAWNSTATICSOUND = 29
SVC_INTERMISSION = 30
SVC_FINALE = 31
SVC_CDTRACK = 32
SVC_SELLSCREEN = 33
SVC_CUTSCENE = 34
class Bad:
"""Class for representing a Bad message
This is an error message and should not appear.
"""
__slots__ = ()
@staticmethod
def write(file, bad=None):
_IO.write.byte(file, SVC_BAD)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_BAD
return Bad()
class Nop:
"""Class for representing a Nop message"""
__slots__ = ()
@staticmethod
def write(file, nop=None):
_IO.write.byte(file, SVC_NOP)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_NOP
return Nop()
class Disconnect:
"""Class for representing a Disconnect message
Disconnect from the server and end the game. Typically this the last
message of a demo.
"""
__slots__ = ()
@staticmethod
def write(file, disconnect=None):
_IO.write.byte(file, SVC_DISCONNECT)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_DISCONNECT
return Disconnect()
class UpdateStat:
"""Class for representing UpdateStat messages
Updates a player state value.
Attributes:
index: The index to update in the player state array.
value: The new value to set.
"""
__slots__ = (
'index',
'value'
)
def __init__(self):
self.index = None
self.value = None
@staticmethod
def write(file, update_stat):
_IO.write.byte(file, SVC_UPDATESTAT)
_IO.write.byte(file, update_stat.index)
_IO.write.long(file, update_stat.value)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_UPDATESTAT
update_stat = UpdateStat()
update_stat.index = _IO.read.byte(file)
update_stat.value = _IO.read.long(file)
return update_stat
class Version:
"""Class for representing Version messages
Attributes:
protocol_version: Protocol version of the server. Quake uses 15.
"""
__slots__ = (
'protocol_version'
)
def __init__(self):
self.protocol_version = None
@staticmethod
def write(file, version):
_IO.write.byte(file, SVC_VERSION)
_IO.write.long(file, version.protocol_version)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_VERSION
version = Version()
version.protocol_version = _IO.read.long(file)
return version
class SetView:
"""Class for representing SetView messages
Sets the camera position to the given entity.
Attributes:
entity: The entity number
"""
__slots__ = (
'entity'
)
def __init__(self):
self.entity = None
@staticmethod
def write(file, set_view):
_IO.write.byte(file, SVC_SETVIEW)
_IO.write.short(file, set_view.entity)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_SETVIEW
set_view = SetView()
set_view.entity = _IO.read.short(file)
return set_view
SND_VOLUME = 0b0001
SND_ATTENUATION = 0b0010
SND_LOOPING = 0b0100
class Sound:
"""Class for representing Sound messages
Plays a sound on a channel at a position.
Attributes:
entity: The entity that caused the sound.
bit_mask: A bit field indicating what data is sent.
volume: Optional. The sound volume or None.
attenuation: Optional. The sound attenuation or None.
channel: The sound channel, maximum of eight.
sound_number: The number of the sound in the sound table.
origin: The position of the sound.
"""
__slots__ = (
'entity',
'bit_mask',
'volume',
'attenuation',
'channel',
'sound_number',
'origin'
)
def __init__(self):
self.entity = None
self.bit_mask = 0b0000
self.volume = 255
self.attenuation = 1.0
self.channel = None
self.sound_number = None
self.origin = None, None, None
@staticmethod
def write(file, sound):
_IO.write.byte(file, SVC_SOUND)
_IO.write.byte(file, sound.bit_mask)
if sound.bit_mask & SND_VOLUME:
_IO.write.byte(file, sound.volume)
if sound.bit_mask & SND_ATTENUATION:
_IO.write.byte(file, sound.attenuation * 64)
channel = sound.entity << 3
channel |= sound.channel
_IO.write.short(file, channel)
_IO.write.byte(file, sound.sound_number)
_IO.write.position(file, sound.origin)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_SOUND
sound = Sound()
sound.bit_mask = _IO.read.byte(file)
if sound.bit_mask & SND_VOLUME:
sound.volume = _IO.read.byte(file)
if sound.bit_mask & SND_ATTENUATION:
sound.attenuation = _IO.read.byte(file) / 64
sound.channel = _IO.read.short(file)
sound.entity = sound.channel >> 3
sound.channel &= 7
sound.sound_number = _IO.read.byte(file)
sound.origin = _IO.read.position(file)
return sound
class Time:
"""Class for representing Time messages
A time stamp that should appear in each block of messages.
Attributes:
time: The amount of elapsed time(in seconds) since the start of the
game.
"""
__slots__ = (
'time'
)
def __init__(self):
self.time = None
@staticmethod
def write(file, time):
_IO.write.byte(file, SVC_TIME)
_IO.write.float(file, time.time)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_TIME
time = Time()
time.time = _IO.read.float(file)
return time
class Print:
"""Class for representing Print messages
Prints text in the top left corner of the screen and console.
Attributes:
text: The text to be shown.
"""
__slots__ = (
'text'
)
def __init__(self):
self.text = None
@staticmethod
def write(file, _print):
_IO.write.byte(file, SVC_PRINT)
_IO.write.string(file, _print.text)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_PRINT
_print = Print()
_print.text = _IO.read.string(file)
return _print
class StuffText:
"""Class for representing StuffText messages
Text sent to the client console and ran.
Attributes:
text: The text to send to the client console.
Note: This string is terminated with the newline character.
"""
__slots__ = (
'text'
)
def __init__(self):
self.text = None
@staticmethod
def write(file, stuff_text):
_IO.write.byte(file, SVC_STUFFTEXT)
_IO.write.string(file, stuff_text.text, b'\n')
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_STUFFTEXT
stuff_text = StuffText()
stuff_text.text = _IO.read.string(file, b'\n')
return stuff_text
class SetAngle:
"""Class for representing SetAngle messages
Sets the camera's orientation.
Attributes:
angles: The new angles for the camera.
"""
__slots__ = (
'angles'
)
def __init__(self):
self.angles = None
@staticmethod
def write(file, set_angle):
_IO.write.byte(file, SVC_SETANGLE)
_IO.write.angles(file, set_angle.angles)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_SETANGLE
set_angle = SetAngle()
set_angle.angles = _IO.read.angles(file)
return set_angle
class ServerInfo:
"""Class for representing ServerInfo messages
Handles the loading of assets. Usually first message sent after a level
change.
Attributes:
protocol_version: Protocol version of the server. Quake uses 15.
max_clients: Number of clients.
multi: Multiplayer flag. Set to 0 for single-player and 1 for
multiplayer.
map_name: The name of the level.
models: The model table as as sequence of strings.
sounds: The sound table as a sequence of strings.
"""
__slots__ = (
'protocol_version',
'max_clients',
'multi',
'map_name',
'models',
'sounds'
)
def __init__(self):
self.protocol_version = 15
self.max_clients = 0
self.multi = 0
self.map_name = ''
self.models = []
self.sounds = []
@staticmethod
def write(file, server_data):
_IO.write.byte(file, SVC_SERVERINFO)
_IO.write.long(file, server_data.protocol_version)
_IO.write.byte(file, server_data.max_clients)
_IO.write.byte(file, server_data.multi)
_IO.write.string(file, server_data.map_name)
for model in server_data.models:
_IO.write.string(file, model)
_IO.write.byte(file, 0)
for sound in server_data.sounds:
_IO.write.string(file, sound)
_IO.write.byte(file, 0)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_SERVERINFO
server_data = ServerInfo()
server_data.protocol_version = _IO.read.long(file)
server_data.max_clients = _IO.read.byte(file)
server_data.multi = _IO.read.byte(file)
server_data.map_name = _IO.read.string(file)
model = _IO.read.string(file)
while model:
server_data.models.append(model)
model = _IO.read.string(file)
server_data.models = tuple(server_data.models)
sound = _IO.read.string(file)
while sound:
server_data.sounds.append(sound)
sound = _IO.read.string(file)
server_data.sounds = tuple(server_data.sounds)
return server_data
class LightStyle:
"""Class for representing a LightStyle message
Defines the style of a light. Usually happens shortly after level change.
Attributes:
style: The light style number.
string: A string of arbitrary length representing the brightness of
the light. The brightness is mapped to the characters 'a' to 'z',
with 'a' being black and 'z' being pure white.
Example:
# Flickering light
light_style_message = LightStyle()
light_style.style = 0
light_style.string = 'aaazaazaaaaaz'
"""
__slots__ = (
'style',
'string'
)
def __init__(self):
self.style = None
self.string = None
@staticmethod
def write(file, light_style):
_IO.write.byte(file, SVC_LIGHTSTYLE)
_IO.write.byte(file, light_style.style)
_IO.write.string(file, light_style.string)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_LIGHTSTYLE
light_style = LightStyle()
light_style.style = _IO.read.byte(file)
light_style.string = _IO.read.string(file)
return light_style
class UpdateName:
"""Class for representing UpdateName messages
Sets the player's name.
Attributes:
player: The player number to update.
name: The new name as a string.
"""
__slots__ = (
'player',
'name'
)
def __init__(self):
self.player = None
self.name = None
@staticmethod
def write(file, update_name):
_IO.write.byte(file, SVC_UPDATENAME)
_IO.write.byte(file, update_name.player)
_IO.write.string(file, update_name.name)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_UPDATENAME
update_name = UpdateName()
update_name.player = _IO.read.byte(file)
update_name.name = _IO.read.string(file)
return update_name
class UpdateFrags:
"""Class for representing UpdateFrags messages
Sets the player's frag count.
Attributes:
player: The player to update.
frags: The new frag count.
"""
__slots__ = (
'player',
'frags'
)
def __init__(self):
self.player = None
self.frags = None
@staticmethod
def write(file, update_frags):
_IO.write.byte(file, SVC_UPDATEFRAGS)
_IO.write.byte(file, update_frags.player)
_IO.write.short(file, update_frags.frags)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_UPDATEFRAGS
update_frags = UpdateFrags()
update_frags.player = _IO.read.byte(file)
update_frags.frags = _IO.read.short(file)
return update_frags
# Client Data bit mask
SU_VIEWHEIGHT = 0b0000000000000001
SU_IDEALPITCH = 0b0000000000000010
SU_PUNCH1 = 0b0000000000000100
SU_PUNCH2 = 0b0000000000001000
SU_PUNCH3 = 0b0000000000010000
SU_VELOCITY1 = 0b0000000000100000
SU_VELOCITY2 = 0b0000000001000000
SU_VELOCITY3 = 0b0000000010000000
SU_ITEMS = 0b0000001000000000
SU_ONGROUND = 0b0000010000000000
SU_INWATER = 0b0000100000000000
SU_WEAPONFRAME = 0b0001000000000000
SU_ARMOR = 0b0010000000000000
SU_WEAPON = 0b0100000000000000
class ClientData:
"""Class for representing ClientData messages
Server information about this client.
Attributes:
bit_mask: A bit field indicating what data is sent.
view_height: Optional. The view offset from the origin along the z-axis.
ideal_pitch: Optional. The calculated angle for looking up/down slopes.
punch_angle: Optional. A triple representing camera shake.
velocity: Optional. Player velocity.
item_bit_mask: A bit field for player inventory.
on_ground: Flag indicating if player is on the ground.
in_water: Flag indicating if player is in a water volume.
weapon_frame: Optional. The animation frame of the weapon.
armor: Optional. The current armor value.
weapon: Optional. The model number in the model table.
health: The current health value.
active_ammo: The amount count for the active weapon.
ammo: The current ammo counts as a quadruple.
active_weapon: The actively held weapon.
"""
__slots__ = (
'bit_mask',
'view_height',
'ideal_pitch',
'punch_angle',
'velocity',
'item_bit_mask',
'on_ground',
'in_water',
'weapon_frame',
'armor',
'weapon',
'health',
'active_ammo',
'ammo',
'active_weapon'
)
def __init__(self):
self.bit_mask = 0b0000000000000000
self.view_height = 22
self.ideal_pitch = 0
self.punch_angle = 0, 0, 0
self.velocity = 0, 0, 0
self.item_bit_mask = 0b0000
self.on_ground = False
self.in_water = False
self.weapon_frame = 0
self.armor = 0
self.weapon = None
self.health = None
self.active_ammo = None
self.ammo = None
self.active_weapon = None
@staticmethod
def write(file, client_data):
_IO.write.byte(file, SVC_CLIENTDATA)
if client_data.on_ground:
client_data.bit_mask |= SU_ONGROUND
if client_data.in_water:
client_data.bit_mask |= SU_INWATER
_IO.write.short(file, client_data.bit_mask)
if client_data.bit_mask & SU_VIEWHEIGHT:
_IO.write.char(file, client_data.view_height)
if client_data.bit_mask & SU_IDEALPITCH:
_IO.write.char(file, client_data.ideal_pitch)
if client_data.bit_mask & SU_PUNCH1:
pa = client_data.punch_angle
_IO.write.angle(file, pa[0])
if client_data.bit_mask & SU_VELOCITY1:
ve = client_data.velocity
_IO.write.char(file, ve[0] // 16)
if client_data.bit_mask & SU_PUNCH2:
pa = client_data.punch_angle
_IO.write.angle(file, pa[1])
if client_data.bit_mask & SU_VELOCITY2:
ve = client_data.velocity
_IO.write.char(file, ve[1] // 16)
if client_data.bit_mask & SU_PUNCH3:
pa = client_data.punch_angle
_IO.write.angle(file, pa[2])
if client_data.bit_mask & SU_VELOCITY3:
ve = client_data.velocity
_IO.write.char(file, ve[2] // 16)
_IO.write.long(file, client_data.item_bit_mask)
if client_data.bit_mask & SU_WEAPONFRAME:
_IO.write.byte(file, client_data.weapon_frame)
if client_data.bit_mask & SU_ARMOR:
_IO.write.byte(file, client_data.armor)
if client_data.bit_mask & SU_WEAPON:
_IO.write.byte(file, client_data.weapon)
_IO.write.short(file, client_data.health)
_IO.write.byte(file, client_data.active_ammo)
_IO.write.byte(file, client_data.ammo[0])
_IO.write.byte(file, client_data.ammo[1])
_IO.write.byte(file, client_data.ammo[2])
_IO.write.byte(file, client_data.ammo[3])
_IO.write.byte(file, client_data.active_weapon)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_CLIENTDATA
client_data = ClientData()
client_data.bit_mask = _IO.read.short(file)
client_data.on_ground = client_data.bit_mask & SU_ONGROUND != 0
client_data.in_water = client_data.bit_mask & SU_INWATER != 0
if client_data.bit_mask & SU_VIEWHEIGHT:
client_data.view_height = _IO.read.char(file)
if client_data.bit_mask & SU_IDEALPITCH:
client_data.ideal_pitch = _IO.read.char(file)
if client_data.bit_mask & SU_PUNCH1:
pa = client_data.punch_angle
client_data.punch_angle = _IO.read.angle(file), pa[1], pa[2]
if client_data.bit_mask & SU_VELOCITY1:
ve = client_data.velocity
client_data.velocity = _IO.read.char(file) * 16, ve[1], ve[2]
if client_data.bit_mask & SU_PUNCH2:
pa = client_data.punch_angle
client_data.punch_angle = pa[0], _IO.read.angle(file), pa[2]
if client_data.bit_mask & SU_VELOCITY2:
ve = client_data.velocity
client_data.velocity = ve[0], _IO.read.char(file) * 16, ve[2]
if client_data.bit_mask & SU_PUNCH3:
pa = client_data.punch_angle
client_data.punch_angle = pa[0], pa[1], _IO.read.angle(file)
if client_data.bit_mask & SU_VELOCITY3:
ve = client_data.velocity
client_data.velocity = ve[0], ve[1], _IO.read.char(file) * 16
client_data.item_bit_mask = _IO.read.long(file)
if client_data.bit_mask & SU_WEAPONFRAME:
client_data.weapon_frame = _IO.read.byte(file)
if client_data.bit_mask & SU_ARMOR:
client_data.armor = _IO.read.byte(file)
if client_data.bit_mask & SU_WEAPON:
client_data.weapon = _IO.read.byte(file)
client_data.health = _IO.read.short(file)
client_data.active_ammo = _IO.read.byte(file)
client_data.ammo = _IO.read.byte(file), _IO.read.byte(file), _IO.read.byte(file), _IO.read.byte(file)
client_data.active_weapon = _IO.read.byte(file)
return client_data
class StopSound:
"""Class for representing StopSound messages
Stops a playing sound.
Attributes:
channel: The channel on which the sound is playing.
entity: The entity that caused the sound.
"""
__slots__ = (
'channel',
'entity'
)
def __init__(self):
self.channel = None
@staticmethod
def write(file, stop_sound):
_IO.write.byte(file, SVC_STOPSOUND)
data = stop_sound.entity << 3 | (stop_sound.channel & 0x07)
_IO.write.short(file, data)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_STOPSOUND
stop_sound = StopSound()
data = _IO.read.short(file)
stop_sound.channel = data & 0x07
stop_sound.entity = data >> 3
return stop_sound
class UpdateColors:
"""Class for representing UpdateColors messages
Sets the player's colors.
Attributes:
player: The player to update.
colors: The combined shirt/pant color.
"""
__slots__ = (
'player',
'colors'
)
def __init__(self):
self.player = None
self.colors = None
@staticmethod
def write(file, update_colors):
_IO.write.byte(file, SVC_UPDATECOLORS)
_IO.write.byte(file, update_colors.player)
_IO.write.byte(file, update_colors.colors)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_UPDATECOLORS
update_colors = UpdateColors()
update_colors.player = _IO.read.byte(file)
update_colors.colors = _IO.read.byte(file)
return update_colors
class Particle:
"""Class for representing Particle messages
Creates particle effects
Attributes:
origin: The origin position of the particles.
direction: The velocity of the particles represented as a triple.
count: The number of particles.
color: The color index of the particle.
"""
__slots__ = (
'origin',
'direction',
'count',
'color'
)
def __init__(self):
self.origin = None
self.direction = None
self.count = None
self.color = None
@staticmethod
def write(file, particle):
_IO.write.byte(file, SVC_PARTICLE)
_IO.write.position(file, particle.origin)
_IO.write.char(file, particle.direction[0] * 16)
_IO.write.char(file, particle.direction[1] * 16)
_IO.write.char(file, particle.direction[2] * 16)
_IO.write.byte(file, particle.count)
_IO.write.byte(file, particle.color)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_PARTICLE
particle = Particle()
particle.origin = _IO.read.position(file)
particle.direction = _IO.read.char(file) / 16, _IO.read.char(file) / 16, _IO.read.char(file) / 16,
particle.count = _IO.read.byte(file)
particle.color = _IO.read.byte(file)
return particle
class Damage:
"""Class for representing Damage messages
Damage information
Attributes:
armor: The damage amount to be deducted from player armor.
blood: The damage amount to be deducted from player health.
origin: The position of the entity that inflicted the damage.
"""
__slots__ = (
'armor',
'blood',
'origin'
)
def __init__(self):
self.armor = None
self.blood = None
self.origin = None
@staticmethod
def write(file, damage):
_IO.write.byte(file, SVC_DAMAGE)
_IO.write.byte(file, damage.armor)
_IO.write.byte(file, damage.blood)
_IO.write.position(file, damage.origin)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_DAMAGE
damage = Damage()
damage.armor = _IO.read.byte(file)
damage.blood = _IO.read.byte(file)
damage.origin = _IO.read.position(file)
return damage
class SpawnStatic:
"""Class for representing SpawnStatic messages
Creates a static entity
Attributes:
model_index: The model number in the model table.
frame: The frame number of the model.
color_map: The color map used to display the model.
skin: The skin number of the model.
origin: The position of the entity.
angles: The orientation of the entity.
"""
__slots__ = (
'model_index',
'frame',
'color_map',
'skin',
'origin',
'angles'
)
def __init__(self):
self.model_index = None
self.frame = None
self.color_map = None
self.skin = None
self.origin = None
self.angles = None
@staticmethod
def write(file, spawn_static):
_IO.write.byte(file, SVC_SPAWNSTATIC)
_IO.write.byte(file, spawn_static.model_index)
_IO.write.byte(file, spawn_static.frame)
_IO.write.byte(file, spawn_static.color_map)
_IO.write.byte(file, spawn_static.skin)
_IO.write.position(file, spawn_static.origin)
_IO.write.angles(file, spawn_static.angles)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_SPAWNSTATIC
spawn_static = SpawnStatic()
spawn_static.model_index = _IO.read.byte(file)
spawn_static.frame = _IO.read.byte(file)
spawn_static.color_map = _IO.read.byte(file)
spawn_static.skin = _IO.read.byte(file)
spawn_static.origin = _IO.read.position(file)
spawn_static.angles = _IO.read.angles(file)
return spawn_static
class SpawnBinary:
"""Class for representing SpawnBinary messages
This is a deprecated message.
"""
__slots__ = ()
@staticmethod
def write(file):
raise BadMessage('SpawnBinary message obsolete')
@staticmethod
def read(file):
raise BadMessage('SpawnBinary message obsolete')
class SpawnBaseline:
"""Class for representing SpawnBaseline messages
Creates a dynamic entity
Attributes:
entity: The number of the entity.
model_index: The number of the model in the model table.
frame: The frame number of the model.
color_map: The color map used to display the model.
skin: The skin number of the model.
origin: The position of the entity.
angles: The orientation of the entity.
"""
__slots__ = (
'entity',
'model_index',
'frame',
'color_map',
'skin',
'origin',
'angles'
)
def __init__(self):
self.entity = None
self.model_index = None
self.frame = None
self.color_map = None
self.skin = None
self.origin = None
self.angles = None
@staticmethod
def write(file, spawn_baseline):
_IO.write.byte(file, SVC_SPAWNBASELINE)
_IO.write.short(file, spawn_baseline.entity)
_IO.write.byte(file, spawn_baseline.model_index)
_IO.write.byte(file, spawn_baseline.frame)
_IO.write.byte(file, spawn_baseline.color_map)
_IO.write.byte(file, spawn_baseline.skin)
_IO.write.position(file, spawn_baseline.origin)
_IO.write.angles(file, spawn_baseline.angles)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_SPAWNBASELINE
spawn_baseline = SpawnBaseline()
spawn_baseline.entity = _IO.read.short(file)
spawn_baseline.model_index = _IO.read.byte(file)
spawn_baseline.frame = _IO.read.byte(file)
spawn_baseline.color_map = _IO.read.byte(file)
spawn_baseline.skin = _IO.read.byte(file)
spawn_baseline.origin = _IO.read.position(file)
spawn_baseline.angles = _IO.read.angles(file)
return spawn_baseline
TE_SPIKE = 0
TE_SUPERSPIKE = 1
TE_GUNSHOT = 2
TE_EXPLOSION = 3
TE_TAREXPLOSION = 4
TE_LIGHTNING1 = 5
TE_LIGHTNING2 = 6
TE_WIZSPIKE = 7
TE_KNIGHTSPIKE = 8
TE_LIGHTNING3 = 9
TE_LAVASPLASH = 10
TE_TELEPORT = 11
TE_EXPLOSION2 = 12
TE_BEAM = 13
class TempEntity:
"""Class for representing TempEntity messages
Creates a temporary entity. The attributes of the message depend on the
type of entity being created.
Attributes:
type: The type of the temporary entity.
"""
def __init__(self):
self.type = None
@staticmethod
def write(file, temp_entity):
_IO.write.byte(file, SVC_TEMP_ENTITY)
_IO.write.byte(file, temp_entity.type)
if temp_entity.type == TE_WIZSPIKE or \
temp_entity.type == TE_KNIGHTSPIKE or \
temp_entity.type == TE_SPIKE or \
temp_entity.type == TE_SUPERSPIKE or \
temp_entity.type == TE_GUNSHOT or \
temp_entity.type == TE_EXPLOSION or \
temp_entity.type == TE_TAREXPLOSION or \
temp_entity.type == TE_LAVASPLASH or \
temp_entity.type == TE_TELEPORT:
_IO.write.position(file, temp_entity.origin)
elif temp_entity.type == TE_LIGHTNING1 or \
temp_entity.type == TE_LIGHTNING2 or \
temp_entity.type == TE_LIGHTNING3 or \
temp_entity.type == TE_BEAM:
_IO.write.short(file, temp_entity.entity)
_IO.write.position(file, temp_entity.start)
_IO.write.position(file, temp_entity.end)
elif temp_entity.type == TE_EXPLOSION2:
_IO.write.position(file, temp_entity.origin)
_IO.write.byte(file, temp_entity.color_start)
_IO.write.byte(file, temp_entity.color_length)
else:
raise BadMessage('Invalid Temporary Entity type: %r' % temp_entity.type)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_TEMP_ENTITY
temp_entity = TempEntity()
temp_entity.type = _IO.read.byte(file)
if temp_entity.type == TE_WIZSPIKE or \
temp_entity.type == TE_KNIGHTSPIKE or \
temp_entity.type == TE_SPIKE or \
temp_entity.type == TE_SUPERSPIKE or \
temp_entity.type == TE_GUNSHOT or \
temp_entity.type == TE_EXPLOSION or \
temp_entity.type == TE_TAREXPLOSION or \
temp_entity.type == TE_LAVASPLASH or \
temp_entity.type == TE_TELEPORT:
temp_entity.origin = _IO.read.position(file)
elif temp_entity.type == TE_LIGHTNING1 or \
temp_entity.type == TE_LIGHTNING2 or \
temp_entity.type == TE_LIGHTNING3 or \
temp_entity.type == TE_BEAM:
temp_entity.entity = _IO.read.short(file)
temp_entity.start = _IO.read.position(file)
temp_entity.end = _IO.read.position(file)
elif temp_entity.type == TE_EXPLOSION2:
temp_entity.origin = _IO.read.position(file)
temp_entity.color_start = _IO.read.byte(file)
temp_entity.color_length = _IO.read.byte(file)
else:
raise BadMessage(f'Invalid Temporary Entity type: {temp_entity.type}')
return temp_entity
class SetPause:
"""Class for representing SetPause messages
Sets the pause state
Attributes:
paused: The pause state. 1 for paused, 0 otherwise.
"""
__slots__ = (
'paused'
)
def __init__(self):
self.paused = None
@staticmethod
def write(file, set_pause):
_IO.write.byte(file, SVC_SETPAUSE)
_IO.write.byte(file, set_pause.paused)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_SETPAUSE
set_pause = SetPause()
set_pause.paused = _IO.read.byte(file)
return set_pause
class SignOnNum:
"""Class for representing SignOnNum messages
This message represents the client state.
Attributes:
sign_on: The client state.
"""
__slots__ = (
'sign_on'
)
def __init__(self):
self.sign_on = None
@staticmethod
def write(file, sign_on_num):
_IO.write.byte(file, SVC_SIGNONNUM)
_IO.write.byte(file, sign_on_num.sign_on)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_SIGNONNUM
sign_on_num = SignOnNum()
sign_on_num.sign_on = _IO.read.byte(file)
return sign_on_num
class CenterPrint:
"""Class for representing CenterPrint messages
Prints text in the center of the screen.
Attributes:
text: The text to be shown.
"""
__slots__ = (
'text'
)
def __init__(self):
self.text = None
@staticmethod
def write(file, center_print):
_IO.write.byte(file, SVC_CENTERPRINT)
_IO.write.string(file, center_print.text)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_CENTERPRINT
center_print = CenterPrint()
center_print.text = _IO.read.string(file)
return center_print
class KilledMonster:
"""Class for representing KilledMonster messages
Indicates the death of a monster.
"""
__slots__ = ()
@staticmethod
def write(file, killed_monster=None):
_IO.write.byte(file, SVC_KILLEDMONSTER)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_KILLEDMONSTER
return KilledMonster()
class FoundSecret:
"""Class for representing FoundSecret messages
Indicates a secret has been found.
"""
__slots__ = ()
@staticmethod
def write(file, found_secret=None):
_IO.write.byte(file, SVC_FOUNDSECRET)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_FOUNDSECRET
return FoundSecret()
class SpawnStaticSound:
"""Class for representing SpawnStaticSound messages
Creates a static sound
Attributes:
origin: The position of the sound.
sound_number: The sound number in the sound table.
volume: The sound volume.
attenuation: The sound attenuation.
"""
__slots__ = (
'origin',
'sound_number',
'volume',
'attenuation'
)
def __init__(self):
self.origin = None
self.sound_number = None
self.volume = None
self.attenuation = None
@staticmethod
def write(file, spawn_static_sound):
_IO.write.byte(file, SVC_SPAWNSTATICSOUND)
_IO.write.position(file, spawn_static_sound.origin)
_IO.write.byte(file, spawn_static_sound.sound_number)
_IO.write.byte(file, spawn_static_sound.volume * 256)
_IO.write.byte(file, spawn_static_sound.attenuation * 64)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_SPAWNSTATICSOUND
spawn_static_sound = SpawnStaticSound()
spawn_static_sound.origin = _IO.read.position(file)
spawn_static_sound.sound_number = _IO.read.byte(file)
spawn_static_sound.volume = _IO.read.byte(file) / 256
spawn_static_sound.attenuation = _IO.read.byte(file) / 64
return spawn_static_sound
class Intermission:
"""Class for representing Intermission messages
Displays the level end screen.
"""
__slots__ = ()
@staticmethod
def write(file, intermission=None):
_IO.write.byte(file, SVC_INTERMISSION)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_INTERMISSION
return Intermission()
class Finale:
"""Class for representing Finale messages
Displays the episode end screen.
Attributes:
text: The text to show.
"""
__slots__ = (
'text'
)
def __init__(self):
self.text = None
@staticmethod
def write(file, finale):
_IO.write.byte(file, SVC_FINALE)
_IO.write.string(file, finale.text)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_FINALE
finale = Finale()
finale.text = _IO.read.string(file)
return finale
class CdTrack:
"""Class for representing CdTrack messages
Selects the cd track
Attributes:
from_track: The start track.
to_track: The end track.
"""
__slots__ = (
'from_track',
'to_track'
)
def __init__(self):
self.from_track = None
self.to_track = None
@staticmethod
def write(file, cd_track):
_IO.write.byte(file, SVC_CDTRACK)
_IO.write.byte(file, cd_track.from_track)
_IO.write.byte(file, cd_track.to_track)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_CDTRACK
cd_track = CdTrack()
cd_track.from_track = _IO.read.byte(file)
cd_track.to_track = _IO.read.byte(file)
return cd_track
class SellScreen:
"""Class for representing SellScreen messages
Displays the help and sell screen.
"""
__slots__ = ()
@staticmethod
def write(file, sell_screen=None):
_IO.write.byte(file, SVC_SELLSCREEN)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_SELLSCREEN
return SellScreen()
class CutScene:
"""Class for representing CutScene messages
Displays end screen and text.
Attributes:
text: The text to be shown.
"""
__slots__ = (
'text'
)
def __init__(self):
self.text = None
@staticmethod
def write(file, cut_scene):
_IO.write.byte(file, SVC_CUTSCENE)
_IO.write.string(file, cut_scene.text)
@staticmethod
def read(file):
assert _IO.read.byte(file) == SVC_CUTSCENE
cut_scene = CutScene()
cut_scene.text = _IO.read.string(file)
return cut_scene
_messages = [Bad, Nop, Disconnect, UpdateStat, Version, SetView, Sound,
Time, Print, StuffText, SetAngle, ServerInfo, LightStyle,
UpdateName, UpdateFrags, ClientData, StopSound, UpdateColors,
Particle, Damage, SpawnStatic, SpawnBinary, SpawnBaseline,
TempEntity, SetPause, SignOnNum, CenterPrint, KilledMonster,
FoundSecret, SpawnStaticSound, Intermission, Finale, CdTrack,
SellScreen, CutScene]
U_MOREBITS = 0b0000000000000001
U_ORIGIN1 = 0b0000000000000010
U_ORIGIN2 = 0b0000000000000100
U_ORIGIN3 = 0b0000000000001000
U_ANGLE2 = 0b0000000000010000
U_NOLERP = 0b0000000000100000
U_FRAME = 0b0000000001000000
U_SIGNAL = 0b0000000010000000
U_ANGLE1 = 0b0000000100000000
U_ANGLE3 = 0b0000001000000000
U_MODEL = 0b0000010000000000
U_COLORMAP = 0b0000100000000000
U_SKIN = 0b0001000000000000
U_EFFECTS = 0b0010000000000000
U_LONGENTITY = 0b0100000000000000
class UpdateEntity:
"""Class for representing UpdateEntity messages
Updates an entity.
Attributes:
bit_mask: A bit field indicating what data is sent.
entity: The number of the entity.
model_index: The number of the model in the model table.
frame: The frame number of the model.
color_map: The color map used to display the model.
skin: The skin number of the model.
effects: A bit field indicating special effects.
origin: The position of the entity.
angles: The orientation of the entity.
"""
__slots__ = (
'bit_mask',
'entity',
'model_index',
'frame',
'colormap',
'skin',
'effects',
'origin',
'angles'
)
def __init__(self):
self.bit_mask = 0b0000000000000000
self.entity = None
self.model_index = None
self.frame = None
self.colormap = None
self.skin = None
self.effects = None
self.origin = None, None, None
self.angles = None, None, None
@staticmethod
def write(file, update_entity):
_IO.write.byte(file, update_entity.bit_mask & 0xFF | 0x80)
if update_entity.bit_mask & U_MOREBITS:
_IO.write.byte(file, update_entity.bit_mask >> 8 & 0xFF)
if update_entity.bit_mask & U_LONGENTITY:
_IO.write.short(file, update_entity.entity)
else:
_IO.write.byte(file, update_entity.entity)
if update_entity.bit_mask & U_MODEL:
_IO.write.byte(file, update_entity.model_index)
if update_entity.bit_mask & U_FRAME:
_IO.write.byte(file, update_entity.frame)
if update_entity.bit_mask & U_COLORMAP:
_IO.write.byte(file, update_entity.colormap)
if update_entity.bit_mask & U_SKIN:
_IO.write.byte(file, update_entity.skin)
if update_entity.bit_mask & U_EFFECTS:
_IO.write.byte(file, update_entity.effects)
if update_entity.bit_mask & U_ORIGIN1:
_IO.write.coord(file, update_entity.origin[0])
if update_entity.bit_mask & U_ANGLE1:
_IO.write.angle(file, update_entity.angles[0])
if update_entity.bit_mask & U_ORIGIN2:
_IO.write.coord(file, update_entity.origin[1])
if update_entity.bit_mask & U_ANGLE2:
_IO.write.angle(file, update_entity.angles[1])
if update_entity.bit_mask & U_ORIGIN3:
_IO.write.coord(file, update_entity.origin[2])
if update_entity.bit_mask & U_ANGLE3:
_IO.write.angle(file, update_entity.angles[2])
@staticmethod
def read(file):
update_entity = UpdateEntity()
b = _IO.read.byte(file)
update_entity.bit_mask = b & 0x7F
if update_entity.bit_mask & U_MOREBITS:
update_entity.bit_mask |= _IO.read.byte(file) << 8
if update_entity.bit_mask & U_LONGENTITY:
update_entity.entity = _IO.read.short(file)
else:
update_entity.entity = _IO.read.byte(file)
if update_entity.bit_mask & U_MODEL:
update_entity.model_index = _IO.read.byte(file)
if update_entity.bit_mask & U_FRAME:
update_entity.frame = _IO.read.byte(file)
if update_entity.bit_mask & U_COLORMAP:
update_entity.colormap = _IO.read.byte(file)
if update_entity.bit_mask & U_SKIN:
update_entity.skin = _IO.read.byte(file)
if update_entity.bit_mask & U_EFFECTS:
update_entity.effects = _IO.read.byte(file)
if update_entity.bit_mask & U_ORIGIN1:
update_entity.origin = _IO.read.coord(file), update_entity.origin[1], update_entity.origin[2]
if update_entity.bit_mask & U_ANGLE1:
update_entity.angles = _IO.read.angle(file), update_entity.angles[1], update_entity.angles[2]
if update_entity.bit_mask & U_ORIGIN2:
update_entity.origin = update_entity.origin[0], _IO.read.coord(file), update_entity.origin[2]
if update_entity.bit_mask & U_ANGLE2:
update_entity.angles = update_entity.angles[0], _IO.read.angle(file), update_entity.angles[2]
if update_entity.bit_mask & U_ORIGIN3:
update_entity.origin = update_entity.origin[0], update_entity.origin[1], _IO.read.coord(file)
if update_entity.bit_mask & U_ANGLE3:
update_entity.angles = update_entity.angles[0], update_entity.angles[1], _IO.read.angle(file)
return update_entity
class MessageBlock:
"""Class for representing a message block
Attributes:
view_angles: The client view angles.
messages: A sequence of messages.
"""
__slots__ = (
'view_angles',
'messages'
)
def __init__(self):
self.view_angles = None
self.messages = []
@staticmethod
def write(file, message_block):
start_of_block = file.tell()
_IO.write.long(file, 0)
_IO.write.float(file, message_block.view_angles[0])
_IO.write.float(file, message_block.view_angles[1])
_IO.write.float(file, message_block.view_angles[2])
start_of_messages = file.tell()
for message in message_block.messages:
message.__class__.write(file, message)
end_of_messages = file.tell()
block_size = end_of_messages - start_of_messages
file.seek(start_of_block)
_IO.write.long(file, block_size)
file.seek(end_of_messages )
@staticmethod
def read(file):
message_block = MessageBlock()
blocksize = _IO.read.long(file)
message_block.view_angles = _IO.read.float(file), _IO.read.float(file), _IO.read.float(file)
message_block_data = file.read(blocksize)
buff = io.BufferedReader(io.BytesIO(message_block_data))
message_id = buff.peek(1)[:1]
while message_id != b'':
message_id = struct.unpack('<B', message_id)[0]
if message_id < 128:
message = _messages[message_id].read(buff)
else:
message = UpdateEntity.read(buff)
if message:
message_block.messages.append(message)
message_id = buff.peek(1)[:1]
buff.close()
return message_block
| StarcoderdataPython |
9713917 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
import logging
import optparse
import os
import sys
import shutil
import tempfile
import traceback
from time import time, sleep
from .util import (
initialize_chain,
start_node,
start_nodes,
connect_nodes_bi,
connect_nodes,
sync_blocks,
sync_mempools,
sync_masternodes,
stop_nodes,
stop_node,
enable_coverage,
check_json_precision,
initialize_chain_clean,
PortSeed,
set_cache_mocktime,
set_genesis_mocktime,
get_mocktime,
set_mocktime,
set_node_times,
p2p_port,
satoshi_round
)
from .authproxy import JSONRPCException
class BitcoinTestFramework(object):
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = None
def run_test(self):
raise NotImplementedError
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
if self.setup_clean_chain:
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
set_genesis_mocktime()
else:
initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir)
set_cache_mocktime()
def stop_node(self, num_node):
stop_node(self.nodes[num_node], num_node)
def setup_nodes(self):
return start_nodes(self.num_nodes, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
self.setup_network(False)
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave polisds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop polisds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing polisd/polis-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
# backup dir variable for removal at cleanup
self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + '/' + str(self.options.port_seed)
if self.options.trace_rpc:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
success = False
try:
os.makedirs(self.options.tmpdir, exist_ok=False)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: " + str(e))
traceback.print_tb(sys.exc_info()[2])
except KeyError as e:
print("key not found: "+ str(e))
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: " + repr(e))
traceback.print_tb(sys.exc_info()[2])
except KeyboardInterrupt as e:
print("Exiting after " + repr(e))
if not self.options.noshutdown:
print("Stopping nodes")
stop_nodes(self.nodes)
else:
print("Note: polisds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success:
print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
if not os.listdir(self.options.root):
os.rmdir(self.options.root)
else:
print("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for f in filenames:
print("From" , f, ":")
from collections import deque
print("".join(deque(open(f), MAX_LINES_TO_PRINT)))
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
MASTERNODE_COLLATERAL = 1000
class MasternodeInfo:
def __init__(self, key, blsKey, collateral_id, collateral_out):
self.key = key
self.blsKey = blsKey
self.collateral_id = collateral_id
self.collateral_out = collateral_out
class DashTestFramework(BitcoinTestFramework):
def __init__(self, num_nodes, masterodes_count, extra_args):
super().__init__()
self.mn_count = masterodes_count
self.num_nodes = num_nodes
self.mninfo = []
self.setup_clean_chain = True
self.is_network_split = False
# additional args
self.extra_args = extra_args
def create_simple_node(self):
idx = len(self.nodes)
args = self.extra_args
self.nodes.append(start_node(idx, self.options.tmpdir,
args))
for i in range(0, idx):
connect_nodes(self.nodes[i], idx)
def get_mnconf_file(self):
return os.path.join(self.options.tmpdir, "node0/regtest/masternode.conf")
def prepare_masternodes(self):
for idx in range(0, self.mn_count):
key = self.nodes[0].masternode("genkey")
blsKey = self.nodes[0].bls('generate')['secret']
address = self.nodes[0].getnewaddress()
txid = self.nodes[0].sendtoaddress(address, MASTERNODE_COLLATERAL)
txrow = self.nodes[0].getrawtransaction(txid, True)
collateral_vout = 0
for vout_idx in range(0, len(txrow["vout"])):
vout = txrow["vout"][vout_idx]
if vout["value"] == MASTERNODE_COLLATERAL:
collateral_vout = vout_idx
self.nodes[0].lockunspent(False,
[{"txid": txid, "vout": collateral_vout}])
self.mninfo.append(MasternodeInfo(key, blsKey, txid, collateral_vout))
def write_mn_config(self):
conf = self.get_mnconf_file()
f = open(conf, 'a')
for idx in range(0, self.mn_count):
f.write("mn%d 127.0.0.1:%d %s %s %d\n" % (idx + 1, p2p_port(idx + 1),
self.mninfo[idx].key,
self.mninfo[idx].collateral_id,
self.mninfo[idx].collateral_out))
f.close()
def create_masternodes(self):
for idx in range(0, self.mn_count):
args = ['-externalip=127.0.0.1', '-masternode=1',
'-masternodeprivkey=%s' % self.mninfo[idx].key,
'-masternodeblsprivkey=%s' % self.mninfo[idx].blsKey] + self.extra_args
self.nodes.append(start_node(idx + 1, self.options.tmpdir, args))
for i in range(0, idx + 1):
connect_nodes(self.nodes[i], idx + 1)
def setup_network(self):
self.nodes = []
# create faucet node for collateral and transactions
args = self.extra_args
self.nodes.append(start_node(0, self.options.tmpdir, args))
required_balance = MASTERNODE_COLLATERAL * self.mn_count + 1
while self.nodes[0].getbalance() < required_balance:
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(1)
# create masternodes
self.prepare_masternodes()
self.write_mn_config()
stop_node(self.nodes[0], 0)
args = ["-sporkkey=<KEY>"] + \
self.extra_args
self.nodes[0] = start_node(0, self.options.tmpdir,
args)
self.create_masternodes()
# create connected simple nodes
for i in range(0, self.num_nodes - self.mn_count - 1):
self.create_simple_node()
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].generate(1)
# sync nodes
self.sync_all()
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
sync_masternodes(self.nodes, True)
for i in range(1, self.mn_count + 1):
res = self.nodes[0].masternode("start-alias", "mn%d" % i)
assert (res["result"] == 'successful')
mn_info = self.nodes[0].masternodelist("status")
assert (len(mn_info) == self.mn_count)
for status in mn_info.values():
assert (status == 'ENABLED')
def enforce_masternode_payments(self):
self.nodes[0].spork('SPORK_8_MASTERNODE_PAYMENT_ENFORCEMENT', 0)
def create_raw_trx(self, node_from, node_to, amount, min_inputs, max_inputs):
assert (min_inputs <= max_inputs)
# fill inputs
inputs = []
balances = node_from.listunspent()
in_amount = 0.0
last_amount = 0.0
for tx in balances:
if len(inputs) < min_inputs:
input = {}
input["txid"] = tx['txid']
input['vout'] = tx['vout']
in_amount += float(tx['amount'])
inputs.append(input)
elif in_amount > amount:
break
elif len(inputs) < max_inputs:
input = {}
input["txid"] = tx['txid']
input['vout'] = tx['vout']
in_amount += float(tx['amount'])
inputs.append(input)
else:
input = {}
input["txid"] = tx['txid']
input['vout'] = tx['vout']
in_amount -= last_amount
in_amount += float(tx['amount'])
inputs[-1] = input
last_amount = float(tx['amount'])
assert (len(inputs) > 0)
assert (in_amount > amount)
# fill outputs
receiver_address = node_to.getnewaddress()
change_address = node_from.getnewaddress()
fee = 0.001
outputs = {}
outputs[receiver_address] = satoshi_round(amount)
outputs[change_address] = satoshi_round(in_amount - amount - fee)
rawtx = node_from.createrawtransaction(inputs, outputs)
return node_from.signrawtransaction(rawtx)
def wait_for_instantlock(self, txid, node):
# wait for instantsend locks
start = time()
locked = False
while True:
is_trx = node.gettransaction(txid)
if is_trx['instantlock']:
locked = True
break
if time() > start + 10:
break
sleep(0.1)
return locked
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("POLISD", "polisd"),
help="bitcoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("POLISD", "polisd"),
help="bitcoind binary to use for reference nodes (if any)")
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
| StarcoderdataPython |
1746138 | import logging
from typing import Any, Dict, List
from pydantic import BaseModel # pylint: disable=no-name-in-module
from tgcf.plugins import FileType, TgcfMessage, TgcfPlugin
from tgcf.utils import match
class FilterList(BaseModel):
blacklist: List[str] = []
whitelist: List[str] = []
class FilesFilterList(BaseModel):
blacklist: List[FileType] = []
whitelist: List[FileType] = []
class TextFilter(FilterList):
case_sensitive: bool = False
regex: bool = False
class Filters(BaseModel):
users: FilterList = FilterList()
files: FilesFilterList = FilesFilterList()
text: TextFilter = TextFilter()
class TgcfFilter(TgcfPlugin):
id_ = "filter"
def __init__(self, data: Dict[str, Any]) -> None:
print("tgcf filter data loaded")
self.filters = Filters(**data)
self.case_correct()
logging.info(self.filters)
def case_correct(self) -> None:
textf: TextFilter = self.filters.text
if textf.case_sensitive is False:
textf.blacklist = [item.lower() for item in textf.blacklist]
textf.whitelist = [item.lower() for item in textf.whitelist]
def modify(self, tm: TgcfMessage) -> TgcfMessage:
if self.users_safe(tm):
if self.files_safe(tm):
if self.text_safe(tm):
return tm
def text_safe(self, tm: TgcfMessage) -> bool:
flist = self.filters.text
text = tm.text
if not flist.case_sensitive:
text = text.lower()
if not text and flist.whitelist == []:
return True
# first check if any blacklisted pattern is present
for forbidden in flist.blacklist:
if match(forbidden, text, self.filters.text.regex):
return False # when a forbidden pattern is found
if not flist.whitelist:
return True # if no whitelist is present
# if whitelist is present
for allowed in flist.whitelist:
if match(allowed, text, self.filters.text.regex):
return True # only when atleast one whitelisted pattern is found
def users_safe(self, tm: TgcfMessage) -> bool:
flist = self.filters.users
sender = str(tm.sender_id)
logging.info(f"M message from sender id {sender}")
if sender in flist.blacklist:
return False
if not flist.whitelist:
return True
if sender in flist.whitelist:
return True
def files_safe(self, tm: TgcfMessage) -> bool:
flist = self.filters.files
fl_type = tm.file_type
print(fl_type)
if fl_type in flist.blacklist:
return False
if not flist.whitelist:
return True
if fl_type in flist.whitelist:
return True
| StarcoderdataPython |
120550 | ########################################################################
#
# Copyright (c) 2021 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
########################################################################
import jsonpatch
import pecan
from pecan import rest
import six
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from oslo_log import log
from sysinv._i18n import _
from sysinv.api.controllers.v1 import base
from sysinv.api.controllers.v1 import collection
from sysinv.api.controllers.v1 import ptp_parameter
from sysinv.api.controllers.v1 import types
from sysinv.api.controllers.v1 import utils
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import utils as cutils
from sysinv import objects
LOG = log.getLogger(__name__)
class PtpInterfacePatchType(types.JsonPatchType):
@staticmethod
def mandatory_attrs():
return []
class PtpInterface(base.APIBase):
"""API representation of a PTP interface.
This class enforces type checking and value constraints, and converts
between the interna object model and the API representation of a PTP
interface.
"""
created_at = wtypes.datetime.datetime
"Timestamp of creation of this PTP interface"
updated_at = wtypes.datetime.datetime
"Timestamp of update of this PTP interface"
# Inherited from PtpParameterOwner
id = int
"ID (primary key) of this PTP interface"
uuid = types.uuid
"Unique UUID for this PTP interface"
type = wtypes.Enum(str,
constants.PTP_PARAMETER_OWNER_INSTANCE,
constants.PTP_PARAMETER_OWNER_INTERFACE)
"Type of parameter owner (PTP_PARAMETER_OWNER_INTERFACE)"
capabilities = {wtypes.text: utils.ValidTypes(wtypes.text,
six.integer_types)}
"Capabilities (metadata) of this PTP interface"
# Fields of PtpInterface
name = wtypes.text
"Name given to the PTP interface"
ptp_instance_id = int
"ID for the PTP instance this interface is associated with"
ptp_instance_uuid = types.uuid
"The UUID of the host this PTP interface belongs to"
ptp_instance_name = wtypes.text
"The name of the associated PTP instance"
hostnames = types.MultiType([list])
"Name(s) of host(s) associated to this PTP interface"
interface_names = types.MultiType([list])
"Interface(s) associated to this PTP interface"
parameters = types.MultiType([list])
"List of parameters referred by this PTP interface"
def __init__(self, **kwargs):
self.fields = list(objects.ptp_interface.fields.keys())
for k in self.fields:
if not hasattr(self, k):
continue
setattr(self, k, kwargs.get(k, wtypes.Unset))
@classmethod
def convert_with_links(cls, rpc_ptp_interface, expand=True):
ptp_interface = PtpInterface(**rpc_ptp_interface.as_dict())
if not expand:
ptp_interface.unset_fields_except(['id',
'uuid',
'type',
'capabilities',
'name',
'ptp_instance_id',
'ptp_instance_uuid',
'ptp_instance_name',
'hostnames',
'interface_names',
'parameters',
'created_at',
'updated_at'])
LOG.debug("PtpInterface.convert_with_links: converted %s" %
ptp_interface.as_dict())
return ptp_interface
class PtpInterfaceCollection(collection.Collection):
"""API representation of a collection of PTP interfaces."""
ptp_interfaces = [PtpInterface]
"A list containing PtpInterface objects"
def __init__(self, **kwargs):
self._type = 'ptp_interfaces'
@classmethod
def convert_with_links(cls, rpc_ptp_interfaces, limit, url=None,
expand=False, **kwargs):
collection = PtpInterfaceCollection()
collection.ptp_interfaces = [PtpInterface.convert_with_links(p, expand)
for p in rpc_ptp_interfaces]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
LOCK_NAME = 'PtpInterfaceController'
class PtpInterfaceController(rest.RestController):
"""REST controller for ptp interfaces."""
ptp_parameters = ptp_parameter.PtpParameterController(
parent="ptp_interface")
"Expose PTP parameters as a sub-element of PTP interfaces"
def __init__(self, parent=None):
self._parent = parent
@wsme_pecan.wsexpose(PtpInterfaceCollection, types.uuid, types.uuid, int,
wtypes.text, wtypes.text)
def get_all(self, parent_uuid=None, marker=None, limit=None, sort_key='id',
sort_dir='asc'):
"""Retrieve a list of PTP interfaces."""
LOG.debug("PtpInterfaceController.get_all: parent %s uuid %s type %s" %
(self._parent, parent_uuid, type))
if self._parent and not parent_uuid:
raise exception.InvalidParameterValue(_(
"Parent id not specified."))
limit = utils.validate_limit(limit)
sort_dir = utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.ptp_interface.get_by_uuid(
pecan.request.context, marker)
if self._parent == 'iinterface':
ptp_interfaces = \
pecan.request.dbapi.ptp_interfaces_get_list(
interface=parent_uuid, limit=limit, marker=marker_obj,
sort_key=sort_key, sort_dir=sort_dir)
elif self._parent == 'ihosts':
ptp_interfaces = \
pecan.request.dbapi.ptp_interfaces_get_list(
host=parent_uuid, limit=limit, marker=marker_obj,
sort_key=sort_key, sort_dir=sort_dir)
else:
ptp_interfaces = \
pecan.request.dbapi.ptp_interfaces_get_list(
limit=limit, marker=marker_obj, sort_key=sort_key,
sort_dir=sort_dir)
return PtpInterfaceCollection.convert_with_links(
ptp_interfaces, limit, sort_key=sort_key, sort_dir=sort_dir)
@wsme_pecan.wsexpose(PtpInterface, types.uuid)
def get_one(self, ptp_interface_uuid):
"""Retrieve information about the given PTP interface"""
LOG.debug("PtpInterfaceController.get_one: uuid=%s"
% ptp_interface_uuid)
ptp_interface = objects.ptp_interface.get_by_uuid(
pecan.request.context, ptp_interface_uuid)
return PtpInterface.convert_with_links(ptp_interface)
@cutils.synchronized(LOCK_NAME)
@wsme_pecan.wsexpose(PtpInterface, body=PtpInterface)
def post(self, ptp_interface):
"""Create a new PTP interface"""
ptp_interface_dict = ptp_interface.as_dict()
LOG.debug("PtpInterfaceController.post: %s" % ptp_interface_dict)
"""
TODO: enforce "name" as required field here
"""
ptp_instance_uuid = ptp_interface_dict.pop('ptp_instance_uuid', None)
ptp_instance = objects.ptp_instance.get_by_uuid(pecan.request.context,
ptp_instance_uuid)
ptp_interface_dict['ptp_instance_id'] = ptp_instance['id']
return PtpInterface.convert_with_links(
pecan.request.dbapi.ptp_interface_create(ptp_interface_dict))
@cutils.synchronized(LOCK_NAME)
@wsme.validate(types.uuid, [PtpInterfacePatchType])
@wsme_pecan.wsexpose(PtpInterface, types.uuid,
body=[PtpInterfacePatchType])
def patch(self, uuid, patch):
"""Update the association between PTP interface and PTP parameters."""
if self._parent:
raise exception.OperationNotPermitted
LOG.debug("PtpInterfaceController.patch: uuid %s params %s" %
(uuid, patch))
utils.validate_patch(patch)
try:
# Check PTP interface exists
objects.ptp_interface.get_by_uuid(pecan.request.context, uuid)
except exception.InvalidParameterValue:
raise wsme.exc.ClientSideError(
_("No PTP interface found for %s" % uuid))
# Currently patch is used to add/remove PTP parameters
# (but not having both operations in same patch)
patch_list = list(jsonpatch.JsonPatch(patch))
for p in patch_list:
param_uuid = p['value']
try:
# Check PTP parameter exists
pecan.request.dbapi.ptp_parameter_get(param_uuid)
except exception.PtpParameterNotFound:
raise wsme.exc.ClientSideError(
_("No PTP parameter object found for %s" % param_uuid))
if p['op'] == 'add':
pecan.request.dbapi.ptp_interface_parameter_add(uuid,
param_uuid)
else:
pecan.request.dbapi.ptp_interface_parameter_remove(uuid,
param_uuid)
return PtpInterface.convert_with_links(
objects.ptp_interface.get_by_uuid(pecan.request.context, uuid))
@wsme_pecan.wsexpose(None, types.uuid, status_code=204)
def delete(self, ptp_interface_uuid):
"""Delete a PTP interface."""
LOG.debug("PtpInterfaceController.delete: %s" % ptp_interface_uuid)
if self._parent:
raise exception.OperationNotPermitted
try:
ptp_interface_obj = objects.ptp_interface.get_by_uuid(
pecan.request.context, ptp_interface_uuid)
except exception.PtpInterfaceNotFound:
raise
# Only allow delete if there are no associated hosts/interfaces and
# parameters
parameters = pecan.request.dbapi.ptp_parameters_get_list(
ptp_interface=ptp_interface_uuid)
if parameters:
raise wsme.exc.ClientSideError(
"PTP interface %s is still associated with PTP parameter(s)"
% ptp_interface_uuid)
interfaces = pecan.request.dbapi.ptp_interface_get_assignees(
ptp_interface_obj.id)
if interfaces:
raise wsme.exc.ClientSideError(
"PTP interface %s is still associated with host interface(s)"
% ptp_interface_uuid)
LOG.debug("PtpInterfaceController.delete: all clear for %s" %
ptp_interface_uuid)
pecan.request.dbapi.ptp_interface_destroy(ptp_interface_uuid)
| StarcoderdataPython |
6543709 | #Imports
import os
import fnmatch
import matplotlib.pyplot as plt
from tensorflow import keras
def load_images_from_folder(folder, filter):
# Get files in the folder
files = fnmatch.filter(os.listdir(folder), filter)
# Load all files
result = []
for file in files:
# Get full file path
file_path = os.path.join(folder, file)
# Load image and convert to unit8 array
img = keras.preprocessing.image.load_img(file_path)
img = keras.preprocessing.image.img_to_array(img)
# Get expected label
label = file.split('_')[1]
# Append image with the label
result.append((img, label))
return result
def plot_results(images_with_labels, predicted_labels, row_count, col_count, font_size):
# Prepare plot
plt.figure()
# Iterate over rows and cols
for i in range(row_count * col_count):
plt.subplot(row_count, col_count, i+1)
# Disable ticks
plt.xticks([])
plt.yticks([])
plt.grid(False)
# Display image
plt.imshow(images_with_labels[i][0] / 255)
# Prepare and display actual and predicted labels
label = predicted_labels[i] + ' (actual: ' + images_with_labels[i][1] + ')'
plt.xlabel(label, fontsize=font_size)
# Add padding
plt.tight_layout(pad=1.0)
# Show plot
plt.show()
def say_text(text):
os.system('echo ' + text + ' | festival --tts')
| StarcoderdataPython |
8171838 | <gh_stars>0
""" report test results in JUnit-XML format, for use with Hudson and build integration servers.
Based on initial code from <NAME>.
"""
import py
import os
import re
import sys
import time
# Python 2.X and 3.X compatibility
try:
unichr(65)
except NameError:
unichr = chr
try:
unicode('A')
except NameError:
unicode = str
try:
long(1)
except NameError:
long = int
class Junit(py.xml.Namespace):
pass
# We need to get the subset of the invalid unicode ranges according to
# XML 1.0 which are valid in this python build. Hence we calculate
# this dynamically instead of hardcoding it. The spec range of valid
# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
# | [#x10000-#x10FFFF]
_legal_chars = (0x09, 0x0A, 0x0d)
_legal_ranges = (
(0x20, 0x7E),
(0x80, 0xD7FF),
(0xE000, 0xFFFD),
(0x10000, 0x10FFFF),
)
_legal_xml_re = [unicode("%s-%s") % (unichr(low), unichr(high))
for (low, high) in _legal_ranges
if low < sys.maxunicode]
_legal_xml_re = [unichr(x) for x in _legal_chars] + _legal_xml_re
illegal_xml_re = re.compile(unicode('[^%s]') %
unicode('').join(_legal_xml_re))
del _legal_chars
del _legal_ranges
del _legal_xml_re
def bin_xml_escape(arg):
def repl(matchobj):
i = ord(matchobj.group())
if i <= 0xFF:
return unicode('#x%02X') % i
else:
return unicode('#x%04X') % i
return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg)))
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting")
group.addoption('--junitxml', '--junit-xml', action="store",
dest="xmlpath", metavar="path", default=None,
help="create junit-xml style report file at given path.")
group.addoption('--junitprefix', '--junit-prefix', action="store",
metavar="str", default=None,
help="prepend prefix to classnames in junit-xml output")
def pytest_configure(config):
xmlpath = config.option.xmlpath
# prevent opening xmllog on slave nodes (xdist)
if xmlpath and not hasattr(config, 'slaveinput'):
config._xml = LogXML(xmlpath, config.option.junitprefix)
config.pluginmanager.register(config._xml)
def pytest_unconfigure(config):
xml = getattr(config, '_xml', None)
if xml:
del config._xml
config.pluginmanager.unregister(xml)
def mangle_testnames(names):
names = [x.replace(".py", "") for x in names if x != '()']
names[0] = names[0].replace("/", '.')
return names
class LogXML(object):
def __init__(self, logfile, prefix):
logfile = os.path.expanduser(os.path.expandvars(logfile))
self.logfile = os.path.normpath(os.path.abspath(logfile))
self.prefix = prefix
self.tests = []
self.passed = self.skipped = 0
self.failed = self.errors = 0
def _opentestcase(self, report):
names = mangle_testnames(report.nodeid.split("::"))
classnames = names[:-1]
if self.prefix:
classnames.insert(0, self.prefix)
self.tests.append(Junit.testcase(
classname=".".join(classnames),
name=bin_xml_escape(names[-1]),
time=getattr(report, 'duration', 0)
))
def _write_captured_output(self, report):
for capname in ('out', 'err'):
allcontent = ""
for name, content in report.get_sections("Captured std%s" %
capname):
allcontent += content
if allcontent:
tag = getattr(Junit, 'system-'+capname)
self.append(tag(bin_xml_escape(allcontent)))
def append(self, obj):
self.tests[-1].append(obj)
def append_pass(self, report):
self.passed += 1
self._write_captured_output(report)
def append_failure(self, report):
#msg = str(report.longrepr.reprtraceback.extraline)
if hasattr(report, "wasxfail"):
self.append(
Junit.skipped(message="xfail-marked test passes unexpectedly"))
self.skipped += 1
else:
fail = Junit.failure(message="test failure")
fail.append(bin_xml_escape(report.longrepr))
self.append(fail)
self.failed += 1
self._write_captured_output(report)
def append_collect_failure(self, report):
#msg = str(report.longrepr.reprtraceback.extraline)
self.append(Junit.failure(bin_xml_escape(report.longrepr),
message="collection failure"))
self.errors += 1
def append_collect_skipped(self, report):
#msg = str(report.longrepr.reprtraceback.extraline)
self.append(Junit.skipped(bin_xml_escape(report.longrepr),
message="collection skipped"))
self.skipped += 1
def append_error(self, report):
self.append(Junit.error(bin_xml_escape(report.longrepr),
message="test setup failure"))
self.errors += 1
def append_skipped(self, report):
if hasattr(report, "wasxfail"):
self.append(Junit.skipped(bin_xml_escape(report.wasxfail),
message="expected test failure"))
else:
filename, lineno, skipreason = report.longrepr
if skipreason.startswith("Skipped: "):
skipreason = bin_xml_escape(skipreason[9:])
self.append(
Junit.skipped("%s:%s: %s" % (filename, lineno, skipreason),
type="pytest.skip",
message=skipreason
))
self.skipped += 1
self._write_captured_output(report)
def pytest_runtest_logreport(self, report):
if report.passed:
if report.when == "call": # ignore setup/teardown
self._opentestcase(report)
self.append_pass(report)
elif report.failed:
self._opentestcase(report)
if report.when != "call":
self.append_error(report)
else:
self.append_failure(report)
elif report.skipped:
self._opentestcase(report)
self.append_skipped(report)
def pytest_collectreport(self, report):
if not report.passed:
self._opentestcase(report)
if report.failed:
self.append_collect_failure(report)
else:
self.append_collect_skipped(report)
def pytest_internalerror(self, excrepr):
self.errors += 1
data = bin_xml_escape(excrepr)
self.tests.append(
Junit.testcase(
Junit.error(data, message="internal error"),
classname="pytest",
name="internal"))
def pytest_sessionstart(self):
self.suite_start_time = time.time()
def pytest_sessionfinish(self):
if py.std.sys.version_info[0] < 3:
logfile = py.std.codecs.open(self.logfile, 'w', encoding='utf-8')
else:
logfile = open(self.logfile, 'w', encoding='utf-8')
suite_stop_time = time.time()
suite_time_delta = suite_stop_time - self.suite_start_time
numtests = self.passed + self.failed
logfile.write('<?xml version="1.0" encoding="utf-8"?>')
logfile.write(Junit.testsuite(
self.tests,
name="pytest",
errors=self.errors,
failures=self.failed,
skips=self.skipped,
tests=numtests,
time="%.3f" % suite_time_delta,
).unicode(indent=0))
logfile.close()
def pytest_terminal_summary(self, terminalreporter):
terminalreporter.write_sep("-", "generated xml file: %s" % (self.logfile))
| StarcoderdataPython |
15193 | <filename>wwt_api_client/communities.py
# Copyright 2019-2020 the .NET Foundation
# Distributed under the terms of the revised (3-clause) BSD license.
"""Interacting with the WWT Communities APIs."""
import json
import os.path
import requests
import sys
from urllib.parse import parse_qs, urlparse
from . import APIRequest, Client, enums
__all__ = '''
CommunitiesAPIRequest
CommunitiesClient
CreateCommunityRequest
DeleteCommunityRequest
GetCommunityInfoRequest
GetLatestCommunityRequest
GetMyProfileRequest
GetProfileEntitiesRequest
IsUserRegisteredRequest
interactive_communities_login
'''.split()
LIVE_OAUTH_AUTH_SERVICE = "https://login.live.com/oauth20_authorize.srf"
LIVE_OAUTH_TOKEN_SERVICE = "https://login.live.com/oauth20_token.srf"
LIVE_OAUTH_DESKTOP_ENDPOINT = "https://login.live.com/oauth20_desktop.srf"
LIVE_AUTH_SCOPES = ['wl.emails', 'wl.signin']
WWT_CLIENT_ID = '000000004015657B'
OAUTH_STATE_BASENAME = 'communities-oauth.json'
CLIENT_SECRET_BASENAME = 'communities-client-secret.txt'
class CommunitiesClient(object):
"""A client for WWT Communities API requests.
Instantiating such a client will make at least one web request, to refresh
the Microsoft Live OAuth login token.
In addition, an interactive user login may be necessary. This must be
explicitly allowed by the caller to prevent random programs from hanging
waiting for user input. If interactive login is successful, the
authentication data from such a login are saved in the current user's
state directory (~/.local/state/wwt_api_client/ on Linux machines) for
subsequent use.
"""
_parent = None
_state_dir = None
_state = None
_access_token = None
_refresh_token = None
def __init__(self, parent_client, oauth_client_secret=None, interactive_login_if_needed=False, state_dir=None):
self._parent = parent_client
if state_dir is None:
import appdirs
state_dir = appdirs.user_state_dir('wwt_api_client', 'AAS_WWT')
self._state_dir = state_dir
# Do we have the client secret? This is saved to disk upon the first
# login, but it can also be passed in.
if oauth_client_secret is None:
try:
with open(os.path.join(self._state_dir, CLIENT_SECRET_BASENAME), 'rt') as f:
oauth_client_secret = f.readline().strip()
except FileNotFoundError:
pass
if oauth_client_secret is None:
raise Exception('cannot create CommunitiesClient: the \"oauth client secret\" '
'is not available to the program')
# Try to get state from a previous OAuth flow and decide what to do
# based on where we're at.
try:
with open(os.path.join(self._state_dir, OAUTH_STATE_BASENAME), 'rt') as f:
self._state = json.load(f)
except FileNotFoundError:
pass
# For the record, `http://worldwidetelescope.org/webclient` and
# `http://www.worldwidetelesope.org/webclient` are valid
# redirect_uri's.
token_service_params = {
'client_id': WWT_CLIENT_ID,
'client_secret': oauth_client_secret,
'redirect_uri': LIVE_OAUTH_DESKTOP_ENDPOINT,
}
# Once set, the structure of oauth_data is : {
# 'token_type': 'bearer',
# 'expires_in': <seconds>,
# 'scope': <scopes>,
# 'access_token': <long hex>,
# 'refresh_token': <long hex>,
# 'authentication_token': <long hex>,
# 'user_id': <...>
# }
oauth_data = None
if self._state is not None:
# We have previous state -- hopefully, we only need a refresh, which
# can proceed non-interactively.
token_service_params['grant_type'] = 'refresh_token'
token_service_params['refresh_token'] = self._state['refresh_token']
oauth_data = requests.post(
LIVE_OAUTH_TOKEN_SERVICE,
data = token_service_params,
).json()
if 'error' in oauth_data:
if oauth_data['error'] == 'invalid_grant':
# This indicates that our grant has expired. We need to
# rerun the auth flow.
self._state = None
else:
# Some other kind of error. Bail.
raise Exception(repr(oauth_data))
if self._state is None:
# We need to do the interactive authentication flow. This has to
# be explicitly allowed by the caller because we don't want random
# programs pausing for user input on the terminal.
if not interactive_login_if_needed:
raise Exception('cannot create CommunitiesClient: an interactive login is '
'required but unavailable right now')
params = {
'client_id': WWT_CLIENT_ID,
'scope': ' '.join(LIVE_AUTH_SCOPES),
'redirect_uri': LIVE_OAUTH_DESKTOP_ENDPOINT,
'response_type': 'code'
}
preq = requests.Request(url=LIVE_OAUTH_AUTH_SERVICE, params=params).prepare()
print()
print('To use the WWT Communities APIs, interactive authentication to Microsoft')
print('Live is required. Open this URL in a browser and log in:')
print()
print(preq.url)
print()
print('When done, copy the URL *that you are redirected to* and paste it here:')
print('>> ', end='')
redir_url = input()
# should look like:
# 'https://login.live.com/oauth20_desktop.srf?code=MHEXHEXHE-XHEX-HEXH-EXHE-XHEXHEXHEXHE&lc=NNNN'
parsed = urlparse(redir_url)
params = parse_qs(parsed.query)
code = params.get('code')
if not code:
raise Exception('didn\'t get "code" parameter from response URL')
token_service_params['grant_type'] = 'authorization_code'
token_service_params['code'] = code
oauth_data = requests.post(
LIVE_OAUTH_TOKEN_SERVICE,
data = token_service_params,
).json()
if 'error' in oauth_data:
raise Exception(repr(oauth_data))
# Looks like it worked! Save the results for next time.
os.makedirs(self._state_dir, exist_ok=True)
# Sigh, Python not making it easy to be secure ...
fd = os.open(os.path.join(self._state_dir, OAUTH_STATE_BASENAME), os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
f = open(fd, 'wt')
with f:
json.dump(oauth_data, f)
fd = os.open(os.path.join(self._state_dir, CLIENT_SECRET_BASENAME), os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
f = open(fd, 'wt')
with f:
print(oauth_client_secret, file=f)
# And for this time:
self._access_token = oauth_data['access_token']
self._refresh_token = oauth_data['refresh_token']
def create_community(self, payload=None):
"""Create a new community owned by the current user.
Parameters
----------
See the definition of the :class:`CreateCommunityRequest` class.
Returns
-------
request : an initialized :class:`CreateCommunityRequest` object
The request.
"""
req = CreateCommunityRequest(self)
req.payload = payload
return req
def delete_community(self, id=None):
"""Delete a community.
Parameters
----------
See the definition of the :class:`DeleteCommunityRequest` class.
Returns
-------
request : an initialized :class:`DeleteCommunityRequest` object
The request.
"""
req = DeleteCommunityRequest(self)
req.id = id
return req
def get_community_info(self, id=None):
"""Get information about the specified community.
Parameters
----------
See the definition of the :class:`GetCommunityInfoRequest` class.
Returns
-------
request : an initialized :class:`GetCommunityInfoRequest` object
The request.
"""
req = GetCommunityInfoRequest(self)
req.id = id
return req
def get_latest_community(self):
"""Get information about the most recently created WWT Communities.
.. testsetup:: [*]
>>> comm_client = getfixture('communities_client_cached')
Examples
--------
There are no arguments::
>>> req = comm_client.get_latest_community()
>>> folder = req.send() # returns wwt_data_formats.folder.Folder
Returns
-------
request : an initialized :class:`GetLatestCommunityRequest` object
The request.
"""
return GetLatestCommunityRequest(self)
def get_my_profile(self):
"""Get the logged-in user's profile information.
.. testsetup:: [*]
>>> comm_client = getfixture('communities_client_cached')
Examples
--------
There are no arguments::
>>> req = comm_client.get_my_profile()
>>> json = req.send() # returns JSON data structure
>>> print(json['ProfileId'])
123456
Returns
-------
request : an initialized :class:`GetMyProfileRequest` object
The request.
"""
return GetMyProfileRequest(self)
def get_profile_entities(
self,
entity_type = enums.EntityType.CONTENT,
current_page = 1,
page_size = 99999,
):
"""Get "entities" associated with the logged-in user's profile.
.. testsetup:: [*]
>>> comm_client = getfixture('communities_client_cached')
Parameters
----------
See the definition of the :class:`GetProfileEntitiesRequest` class.
Examples
--------
>>> from wwt_api_client.enums import EntityType
>>> req = comm_client.get_profile_entities(
... entity_type = EntityType.CONTENT,
... current_page = 1, # one-based
... page_size = 99999,
... )
>>> json = req.send() # returns json
>>> print(json['entities'][0]['Id'])
82077
Returns
-------
request : an initialized :class:`GetProfileEntitiesRequest` object
The request.
"""
req = GetProfileEntitiesRequest(self)
req.entity_type = entity_type
req.current_page = current_page
req.page_size = page_size
return req
def is_user_registered(self):
"""Query whether the logged-in Microsoft Live user is registered with
the WWT Communities system.
.. testsetup:: [*]
>>> comm_client = getfixture('communities_client_cached')
Examples
--------
There are no arguments::
>>> req = comm_client.is_user_registered()
>>> print(req.send())
True
Returns
-------
request : an initialized :class:`IsUserRegisteredRequest` object
The request.
"""
return IsUserRegisteredRequest(self)
class CommunitiesAPIRequest(APIRequest):
"""A base class for WWT Communities API requests.
These require that the user be logged in to a Microsoft Live account.
"""
_comm_client = None
def __init__(self, communities_client):
super(CommunitiesAPIRequest, self).__init__(communities_client._parent)
self._comm_client = communities_client
class CreateCommunityRequest(CommunitiesAPIRequest):
"""Create a new community.
The response gives the ID of the new community.
"""
payload = None
"""The request payload is JSON resembling::
{
"communityJson": {
"CategoryID": 20,
"ParentID": "610131",
"AccessTypeID": 2,
"IsOffensive":false,
"IsLink": false,
"CommunityType": "Community",
"Name": "Community name",
"Description": "Community description",
"Tags": "tag1,tag2"
}
}
(It doesn't feel worthwhile to implement this payload as a fully-fledged
data structure at the moment.)
"""
def invalidity_reason(self):
if self.payload is None:
return '"payload" must be a JSON dictionary'
return None
def make_request(self):
return requests.Request(
method = 'POST',
url = self._client._api_base + '/Community/Create/New',
json = self.payload,
cookies = {
'access_token': self._comm_client._access_token,
'refresh_token': self._comm_client._refresh_token,
},
)
def _process_response(self, resp):
s = json.loads(resp.text)
return s['ID']
class DeleteCommunityRequest(CommunitiesAPIRequest):
"""Delete a community.
Returns True if the community was successfully deleted, False otherwise.
"""
id = None
"The ID number of the community to delete"
def invalidity_reason(self):
if not isinstance(self.id, int):
return '"id" must be an integer'
return None
def make_request(self):
# The API includes a {parentId} after the community ID, but it is
# unused.
return requests.Request(
method = 'POST',
url = f'{self._client._api_base}/Community/Delete/{self.id}/0',
cookies = {
'access_token': self._comm_client._access_token,
'refresh_token': self._comm_client._refresh_token,
},
)
def _process_response(self, resp):
t = resp.text
if t == 'True':
return True
elif t == 'False':
return False
raise Exception(f'unexpected response from IsUserRegistered API: {t!r}')
# TODO: we're not implementing the "isEdit" mode where you can update
# community info.
class GetCommunityInfoRequest(CommunitiesAPIRequest):
"""Get information about the specified community.
The response is JSON, looking like::
{
"community": {
"MemberCount": 0,
"ViewCount": 6,
"ShareUrl": null,
"Description": "Testing community",
"LastUpdated": "44 minutes ago",
"ActionUrl": null,
"IsOffensive": false,
"Id": 610180,
"Name": "<NAME>",
"Category": 20,
"ParentId": 610131,
"ParentName": "None",
"ParentType": 3,
"Tags": "testtag",
"Rating": 0,
"RatedPeople": 0,
"ThumbnailID": "00000000-0000-0000-0000-000000000000",
"Entity": 1,
"FileName": null,
"ContentAzureID": null,
"UserPermission": 63,
"AccessType": 2,
"Producer": "<NAME> ",
"ProducerId": 609582,
"ContentType": 0,
"DistributedBy": null
},
"permission": {
"Result": {
"CurrentUserPermission": 63,
"PermissionItemList": [
{
"Comment": null,
"Date": "/Date(1585273889157)/",
"Requested": "44 minutes ago",
"CommunityId": 610180,
"CommunityName": "PKGW Test",
"CurrentUserRole": 5,
"IsInherited": true,
"CanShowEditLink": false,
"CanShowDeleteLink": false,
"Id": 609582,
"Name": "<NAME> ",
"Role": 5
}
],
"PaginationDetails": {
"ItemsPerPage": 8,
"CurrentPage": 0,
"TotalPages": 1,
"TotalCount": 1
},
"SelectedPermissionsTab": 1
},
"Id": 4,
"Exception": null,
"Status": 5,
"IsCanceled": false,
"IsCompleted": true,
"CreationOptions": 0,
"AsyncState": null,
"IsFaulted": false
}
}
"""
id = None
"The ID number of the community to probe"
def invalidity_reason(self):
if not isinstance(self.id, int):
return '"id" must be an integer'
return None
def make_request(self):
return requests.Request(
method = 'GET',
url = f'{self._client._api_base}/Community/Detail/{self.id}',
cookies = {
'access_token': self._comm_client._access_token,
'refresh_token': self._comm_client._refresh_token,
},
headers = {'LiveUserToken': self._comm_client._access_token},
)
def _process_response(self, resp):
return json.loads(resp.text)
class GetLatestCommunityRequest(CommunitiesAPIRequest):
"""Get information about the most recently created WWT Communities. The
information is returned as a ``wwt_data_formats.folder.Folder`` with
sub-Folders corresponding to the communities.
"""
def invalidity_reason(self):
return None
def make_request(self):
return requests.Request(
method = 'GET',
url = self._client._api_base + '/Resource/Service/Browse/LatestCommunity',
headers = {'LiveUserToken': self._comm_client._access_token},
)
def _process_response(self, resp):
from wwt_data_formats.folder import Folder
from xml.etree import ElementTree as etree
xml = etree.fromstring(resp.text)
return Folder.from_xml(xml)
class GetMyProfileRequest(CommunitiesAPIRequest):
"""Get the currently logged-in user's profile information.
The response is JSON, looking like::
{
'ProfileId': 123456,
'ProfileName': '<NAME>',
'AboutProfile': '',
'Affiliation': 'Affil Text',
'ProfilePhotoLink': '~/Content/Images/profile.png',
'TotalStorage': '5.00 GB',
'UsedStorage': '0.00 B',
'AvailableStorage': '5.00 GB',
'PercentageUsedStorage': '0%',
'IsCurrentUser': True,
'IsSubscribed': False
}
"""
def invalidity_reason(self):
return None
def make_request(self):
return requests.Request(
method = 'GET',
url = self._client._api_base + '/Profile/MyProfile/Get',
headers = {
'Accept': 'application/json, text/plain, */*',
},
cookies = {
'access_token': self._comm_client._access_token,
'refresh_token': self._comm_client._refresh_token,
},
)
def _process_response(self, resp):
return json.loads(resp.text)
class GetProfileEntitiesRequest(CommunitiesAPIRequest):
"""Get "entities" associated with the logged-in user.
Entities include communities, folders, and content files. The response is JSON.
"""
entity_type = enums.EntityType.CONTENT
"What kind of entity to query. Only COMMUNITY and CONTENT are allowed."
current_page = 1
"What page of search results to return -- starting at 1."
page_size = 99999
"How many items to return per page of search results."
def invalidity_reason(self):
if not isinstance(self.entity_type, enums.EntityType):
return '"entity_type" must be a wwt_api_client.enums.EntityType'
if not isinstance(self.current_page, int):
return '"current_page" must be an int'
if not isinstance(self.page_size, int):
return '"current_page" must be an int'
return None
def make_request(self):
return requests.Request(
method = 'GET',
url = f'{self._client._api_base}/Profile/Entities/{self.entity_type.value}/{self.current_page}/{self.page_size}',
cookies = {
'access_token': self._comm_client._access_token,
'refresh_token': self._comm_client._refresh_token,
},
)
def _process_response(self, resp):
return json.loads(resp.text)
class IsUserRegisteredRequest(CommunitiesAPIRequest):
"""Asks whether the logged-in Microsoft Live user is registered with the WWT
Communities system.
"""
def invalidity_reason(self):
return None
def make_request(self):
return requests.Request(
method = 'GET',
url = self._client._api_base + '/Resource/Service/User',
headers = {'LiveUserToken': self._comm_client._access_token},
)
def _process_response(self, resp):
t = resp.text
if t == 'True':
return True
elif t == 'False':
return False
raise Exception(f'unexpected response from IsUserRegistered API: {t!r}')
# Command-line utility for initializing the OAuth state.
def interactive_communities_login(args):
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--secret-file',
metavar = 'PATH',
help = 'Path to a file from which to read the WWT client secret',
)
parser.add_argument(
'--secret-env',
metavar = 'ENV-VAR-NAME',
help = 'Name of an environment variable containing the WWT client secret',
)
settings = parser.parse_args(args)
# Make sure we actually have a secret to work with.
if settings.secret_file is not None:
with open(settings.secret_file) as f:
client_secret = f.readline().strip()
elif settings.secret_env is not None:
client_secret = os.environ.get(settings.secret_env)
else:
print('error: the WWT \"client secret\" must be provided; '
'use --secret-file or --secret-env', file=sys.stderr)
sys.exit(1)
if not client_secret:
print('error: the WWT \"client secret\" is empty or unset', file=sys.stderr)
sys.exit(1)
# Ready to go ...
CommunitiesClient(
Client(),
oauth_client_secret = client_secret,
interactive_login_if_needed = True,
)
print('OAuth flow successfully completed.')
if __name__ == '__main__':
interactive_communities_login(sys.argv[1:])
| StarcoderdataPython |
3366933 | <filename>third_party/gtest/workspace.bzl
"""Google Test project."""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def deps():
http_archive(
name = "com_google_googletest",
urls = ["https://github.com/google/googletest/archive/011959aafddcd30611003de96cfd8d7a7685c700.zip"],
strip_prefix = "googletest-011959aafddcd30611003de96cfd8d7a7685c700",
sha256 = "6a5d7d63cd6e0ad2a7130471105a3b83799a7a2b14ef7ec8d742b54f01a4833c",
)
| StarcoderdataPython |
1810521 | <filename>sample.py
from random import random , sample
num = random()
print( 'Random Float 0.0-1.0 : ' , num )
num = int( num * 10 )
print( 'Random Integer 0 - 9 : ' , num )
nums = [] ; i = 0
while i < 6 :
nums.append( int( random() * 10 ) + 1 )
i += 1
print( 'Random Multiple Integers 1-10: ' , nums )
nums = sample( range( 1, 59 ) , 6 )
print( 'Random Integer Sample 1 - 59 : ' , nums )
| StarcoderdataPython |
9790734 | <filename>replaying/xor_nxor_pdf.py
#%%
import random
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.keras as keras
import seaborn as sns
import numpy as np
import pickle
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from math import log2, ceil
#%%
def pdf(x):
mu01 = np.array([-0.5,0.5])
mu02 = np.array([0.5,-0.5])
mu11 = np.array([0.5,0.5])
mu12 = np.array([-0.5,-0.5])
cov = 0.1 * np.eye(2)
inv_cov = np.linalg.inv(cov)
p0 = (
np.exp(-(x - mu01)@inv_cov@(x-mu01).T)
+ np.exp(-(x - mu02)@inv_cov@(x-mu02).T)
)/(2*np.pi*np.sqrt(np.linalg.det(cov)))
p1 = (
np.exp(-(x - mu11)@inv_cov@(x-mu11).T)
+ np.exp(-(x - mu12)@inv_cov@(x-mu12).T)
)/(2*np.pi*np.sqrt(np.linalg.det(cov)))
return p0/(p0+p1)
# %%
delta = 0.01
x = np.arange(-1,1,step=delta)
y = np.arange(-1,1,step=delta)
x,y = np.meshgrid(x,y)
sample = np.concatenate(
(
x.reshape(-1,1),
y.reshape(-1,1)
),
axis=1
)
z = np.zeros(len(sample),dtype=float)
for ii,x in enumerate(sample):
z[ii] = pdf(x)
data = pd.DataFrame(data={'x':sample[:,0], 'y':sample[:,1], 'z':z})
data = data.pivot(index='x', columns='y', values='z')
sns.set_context("talk")
fig, ax = plt.subplots(1,1, figsize=(8,8))
cmap= sns.diverging_palette(240, 10, n=9)
ax1 = sns.heatmap(data, ax=ax, vmin=0, vmax=1,cmap=cmap)
ax1.set_xticklabels(['-1','' , '', '', '', '', '','','','','0','','','','','','','','','1'])
ax1.set_yticklabels(['-1','' , '', '', '', '', '','','','','','','0','','','','','','','','','','','','1'])
#ax1.set_yticklabels(['-1','' , '', '', '', '', '','','','' , '', '', '', '', '', '','','','','', '0','','' , '', '', '', '', '','','','','','','','','','','','','1'])
ax.set_title('True PDF of xor-nxor simulation data',fontsize=24)
ax.invert_yaxis()
plt.savefig('result/figs/true_pdf.pdf')
# %%
def generate_2d_rotation(theta=0, acorn=None):
if acorn is not None:
np.random.seed(acorn)
R = np.array([
[np.cos(theta), np.sin(theta)],
[-np.sin(theta), np.cos(theta)]
])
return R
# %%
delta = 0.01
x = np.arange(-1,1,step=delta)
y = np.arange(-1,1,step=delta)
x,y = np.meshgrid(x,y)
sample = np.concatenate(
(
x.reshape(-1,1),
y.reshape(-1,1)
),
axis=1
)
z = np.zeros(len(sample),dtype=float)
R = generate_2d_rotation(theta=np.pi*45/180)
for ii,x in enumerate(sample):
z[ii] = pdf(R@x)
data = pd.DataFrame(data={'x':sample[:,0], 'y':sample[:,1], 'z':z})
data = data.pivot(index='x', columns='y', values='z')
sns.set_context("talk")
fig, ax = plt.subplots(1,1, figsize=(8,8))
cmap= sns.diverging_palette(240, 10, n=9)
ax1 = sns.heatmap(data, ax=ax, vmin=0, vmax=1,cmap=cmap)
ax1.set_xticklabels(['-1','' , '', '', '', '', '','','','','0','','','','','','','','','1'])
ax1.set_yticklabels(['-1','' , '', '', '', '', '','','','','','','0','','','','','','','','','','','','1'])
#ax1.set_yticklabels(['-1','' , '', '', '', '', '','','','' , '', '', '', '', '', '','','','','', '0','','' , '', '', '', '', '','','','','','','','','','','','','1'])
ax.set_title('True PDF of xor-rxor simulation data',fontsize=24)
ax.invert_yaxis()
plt.savefig('result/figs/true_pdf_xor_rxor.pdf')
# %%
| StarcoderdataPython |
5199625 | <reponame>nelliesnoodles/Whispering-Wall<filename>wiwa_class.py
#!usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import re
#import enchant #Remove enchant for windows, will not install easily
from nltk.corpus import wordnet
import nltk as nltk
import random
import os
"""
Requires:
*Running setup1.py in linux (&&) and in your python3 virtual env will set all this up for you*
Manual setup:
install - nltk
install - python3
install - PyEnchant (Windows 10 fails to load pyEnchant)
* if you can't use pyenchant, there is a nltk function to check words *
Areas where enchant is removed are marked with ## !!!!!!!! lines
In your python3 shell type these to download needed data sets:
>>>import nltk
>>>nltk.download('wordnet')
>>>nltk.download('punkt')
>>>nltk.download('averaged_perceptron_tagger')
Scripts for responses should be in same directory as wiwa_class.py
"""
class Wiwa(object):
def __init__(self):
self.fileDirectory = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
self.nounscript = os.path.join(self.fileDirectory, 'script1.txt')
self.verbscript = os.path.join(self.fileDirectory, 'script2.txt')
self.simplescript = os.path.join(self.fileDirectory, 'script3.txt')
self.questionable = os.path.join(self.fileDirectory, 'script4.txt')
self.adjectives = os.path.join(self.fileDirectory, 'script5.txt')
self.errorscript = os.path.join(self.fileDirectory, 'script6.txt')
self.adverbs = os.path.join(self.fileDirectory, 'script7.txt')
## NEW--
self.aboutNellie = os.path.join(self.fileDirectory, 'script8.txt')
self.about_index = 0
self.about_list = []
self.aboutWiwa = os.path.join(self.fileDirectory, 'script9.txt')
self.about_W_index = 0
self.about_W_list = []
## -----
## !!!!!!!!! ----
#self.dictionary = enchant.Dict("en_US") #<-- Removed for windows
## !!!!!!! ----
self.noun_script_order = create_script_line_order(self.nounscript)
self.verb_script_order = create_script_line_order(self.verbscript)
self.simple_script_order = create_script_line_order(self.simplescript)
self.question_script_order = create_script_line_order(self.questionable)
self.adj_script_order = create_script_line_order(self.adjectives)
self.err_script_order = create_script_line_order(self.errorscript)
self.adv_script_order = create_script_line_order(self.adverbs)
self.scripts_list = [self.nounscript, self.verbscript, self.simplescript, self.questionable, self.adjectives, self.errorscript, self.adverbs]
self.line_get = 0
def test_filePath(self):
#files_list = [self.nounscript, self.verbscript, self.simplescript, self.questionable, self.adjectives, self.adverbs]
for script in self.scripts_list:
try:
with open(script) as f:
printable = str(script)
print(printable)
print("SUCCESS")
except Exception as e:
print(e)
def test_variables(self):
print("collection of script orders:")
print("--------- nouns ---------")
print(self.noun_script_order)
print("--------- verbs ---------")
print(self.verb_script_order)
print("---------simple----------")
print(self.simple_script_order)
print("--------questions--------")
print(self.question_script_order)
print("-------adjectives--------")
print(self.adj_script_order)
print("------ errors ---------")
print(self.err_script_order)
print("--------adverbs----------")
print(self.adv_script_order)
print("line_get, integer:")
print(self.line_get)
def test_responses(self):
for script in self.scripts_list:
result = self.get_script_line(script)
string_result = str(result)
print(result)
def run_wiwa(self):
intro = """ Welcome to the whispering wall, Wiwa is here to respond
and question your perspective. She is just a program, and
will respond according to her script.
If you wish to stop program, type EXIT or QUIT.
Have fun! """
self.about_list = self.create_about_list()
self.about_W_list = self.create_about_W_list()
print(intro)
make = input("..>>")
while make not in ['EXIT', 'QUIT']:
stripped = make.lower()
make = re.sub("[^a-zA-Z| |]+", "", stripped)
choice = self.pick_response(make)
#print(choice)
question = self.check_question(make)
about_me = self.check_for_name(make)
about_wiwa = self.check_for_name_wiwa(make)
greet = self.is_greeting(make)
if greet:
print("Wiwa: Greetings human!")
elif about_me != False:
print("Wiwa:")
response = self.get_about_line()
print(response)
elif question:
# Maybe use simple script for these too?
print("Wiwa:")
discusanswer = self.get_script_line(self.questionable)
print(discusanswer)
elif make in ['yes', 'no', 'maybe']:
response = self.get_script_line(self.simplescript)
print("Wiwa:")
print(response)
else:
if choice[0] == 'noun':
response = self.get_script_line(self.nounscript)
print("Wiwa:")
if '%' in response:
print(response % choice[1])
else:
print(response)
elif choice[0] =='verb':
response = self.get_script_line(self.verbscript)
print("Wiwa:")
if '%' in response:
print(response % choice[1])
else:
print(response)
elif choice[0] == 'adv':
response = self.get_script_line(self.adverbs)
print("Wiwa:")
if '%' in response:
print(response % choice[1])
else:
print(response)
elif choice[0] == 'adj':
response = self.get_script_line(self.adjectives)
print("Wiwa:")
if '%' in response:
print(response % choice[1])
else:
print(response)
elif choice[0] == 'err':
response = self.get_script_line(self.errorscript)
print("Wiwa:")
if '%' in response:
print(response % choice[1])
else:
print(response)
elif about_wiwa != False:
print("Wiwa:")
response = self.get_about_W_line()
print(response)
else:
print("Wiwa: ... ... ")
make = input("...>>")
## NEW Greetings---
def is_greeting(self, user_input):
greetings = [
'hiya', 'howdy', 'hello', 'greetings',
'aloha', 'yo', 'salutations', 'hi'
]
if len(user_input) == 0:
return True
else:
if type(user_input) == str:
alist = user_input.split()
for word in alist:
if word in greetings:
return True
else:
pass
return False
def create_about_list(self):
"""
Get the lines in the text file to respond when input includes creator's name
"""
about_list = []
try:
with open(self.aboutNellie) as f:
lines = f.readlines()
for line in lines:
#print(line)
self.about_list.append(line)
except:
print(f"file at : {self.aboutNellie} could not be opened.")
finally:
return self.about_list
def create_about_W_list(self):
"""
Get lines from script9 for input that includes reference to Wiwa.
"""
about_list = []
try:
with open(self.aboutWiwa) as f:
lines = f.readlines()
for line in lines:
#print(line)
self.about_W_list.append(line)
except:
print(f"file at : {self.aboutWiwa} could not be opened.")
finally:
return self.about_W_list
def get_about_line(self):
"""
The about Nellie script is supposed to go in order.
It is not randomized and responses are only dependant on the
user refering to the words 'nellie', 'tobey' or 'creator' in an input.
script lines are to be put in a list, and incremented through.
Once the end of the list is reached, we start over.
"""
max = len(self.about_list) - 1
if self.about_index > max:
self.about_index = 0
line = self.about_list[self.about_index]
self.about_index += 1
return line
def get_about_W_line(self):
"""
The about Wiwa script is supposed to go in order.
It is not randomized and responses are only dependant on the
user refering to the words 'wiwa', 'you' in an input.
script lines are to be put in a list, and incremented through.
Once the end of the list is reached, we start over.
"""
max = len(self.about_W_list) - 1
if self.about_W_index > max:
self.about_W_index = 0
line = self.about_W_list[self.about_W_index]
self.about_W_index += 1
return line
def check_for_name(self, arg):
"""
check the user input for key words that trigger the about
me script. script8.txt.
"""
if len(arg) == 0:
return "I'm just a wall, you can talk to me."
else:
arg = arg.lower()
me = ['nellie', 'creator', 'tobey']
found = False
for item in me:
if item in arg:
#self.get_about_line()
found = True
else:
pass
return found
def check_for_name_wiwa(self, arg):
"""
check the user input for key words that trigger the about
Wiwa script. script9.txt.
"""
if len(arg) == 0:
return "I'm just a wall, you can talk to me."
else:
arg = arg.lower()
me = ['wiwa', 'you', 'your']
found = False
for item in me:
if item in arg:
#self.get_about_line()
found = True
else:
pass
return found
## END NEW -----
def get_script_line(self, arg):
""" Chooses a random script line to give back to user """
# is often not random *sad face*
#print(self.line_get)
order = None
for script in self.scripts_list:
if script == arg:
if arg.endswith('script1.txt'):
order = self.noun_script_order
break
elif arg.endswith('script2.txt'):
order = self.question_script_order
break
elif arg.endswith('script3.txt'):
order = self.verb_script_order
break
elif arg.endswith('script4.txt'):
order = self.simple_script_order
break
elif arg.endswith('script5.txt'):
order = self.adj_script_order
break
elif arg.endswith('script7.txt'):
order = self.adv_script_order
break
elif arg.endswith('script6.txt'):
order = self.err_script_order
break
else:
pass
else:
order = None
if order != None:
if self.line_get >= len(order):
self.line_get = 0
get_line = order[self.line_get]
with open(arg) as f:
lines = f.readlines()
x = int(get_line)
#print(lines[x])
self.line_get += 1
return lines[x]
else:
message = """
script file could not be located:
Original text file names should be one of the following:
script1.txt, script2.txt, script3.txt, script4.txt, script5.txt, script6.txt
or script7.txt
"""
print(message)
return None
def pick_response(self, raw_input):
""" Create lists of possible valid words for response mechanism,
Then uses random to choose one to send back to run_wiwa() """
make = raw_input.lower()
nouns, verbs, adj, adv, errors = self.make_tag_lists(make)
print("nouns=", nouns)
print("verbs=", verbs)
print("adj=", adj)
print("adv=", adv)
print("errors=", errors)
n = len(nouns)
v = len(verbs)
aj = len(adj)
av = len(adv)
er = len(errors)
words_found = False
options = {'noun': [], 'verb': [], 'adj': [], 'adv': [], 'err': []}
if n > 0:
words_found = True
for item in nouns:
options['noun'].append(item)
if v > 0:
words_found = True
for item in verbs:
options['verb'].append(item)
if aj > 0:
words_found = True
for item in adj:
options['adj'].append(item)
if av > 0:
words_found = True
for item in adv:
options['adv'].append(item)
if er > 0:
words_found = True
for item in errors:
options['err'].append(item)
done = False
if words_found == True:
while not done:
word_type = random.choice(list(options.keys()))
word_list = options[word_type]
#print("word_type=", word_type)
#print("word_list[0]=", options[word_type])
if len(word_list) > 0:
choice_tup = (word_type, word_list[0])
done = True
return choice_tup
else:
return ('error', 'not identified')
def strip_stop_words(self, arg):
"""
arg is passed in as a list, and as a string.
Once while checking for errors as a string, and once again
as a list when removing stop words from the list.
"""
stops = [' ', 'i','the', 'of', 'he', 'she', 'it', 'some', 'all', 'a', 'lot',
'have', 'about', 'been', 'to', 'too', 'from', 'an', 'at', 'do', 'go'
'above', 'are', 'before', 'across', 'against', 'almost', 'along', 'aslo',
'although', 'always', 'am', 'among', 'amongst', 'amount', 'and',
'another', 'any', 'anyhow', 'anyone', 'anything', 'around', 'as',
'be', 'maybe', 'being', 'beside', 'besides', 'between', 'beyond', 'both',
'but', 'by', 'can', 'could', 'done', 'during', 'each', 'either',
'else', 'even', 'every', 'everyone', 'everything', 'everywhere',
'except', 'few', 'for', 'had', 'has', 'hence', 'here', 'in', 'into', 'is',
'it', 'its', 'keep', 'last', 'latter', 'many', 'may', 'more', 'most',
'much', 'name', 'next', 'none', 'not', 'nothing', 'now', 'nowhere',
'often', 'other', 'others', 'over', 'rather', 'perhaps', 'seems', 'then',
'there', 'these', 'they', 'though', 'thru', 'too', 'under', 'until',
'upon', 'very', 'was', 'were' 'which', 'while', 'will', 'with', 'ill', 'lets']
new_arg = []
if type(arg) == str:
arg = arg.split()
for item in arg:
if item in stops:
pass
else:
new_arg.append(item)
elif type(arg) == list:
i = 0
for item in arg:
word = arg[i]
if word in stops:
pass
else:
new_arg.append(word)
i += 1
else:
new_arg = ['ERRORinSTOPwordSTRIP']
return new_arg
def make_tag_lists(self, arg):
""" Use nltk to tag words for Wiwa to recycle and or respond too """
## !!!!!!!!!!!!!
errors = self.check_errors_W(arg)
## !!!!!!!!!!!!
tokens = nltk.word_tokenize(arg)
tags = nltk.pos_tag(tokens)
print("tags=", tags)
clean_tags = self.remove_bad_tags(tags)
print("cleaned tags=", clean_tags)
nouns = []
verbs = []
adj = []
adv = []
#print('clean_tags =', clean_tags)
#!!!! if someone enters unfindable text, clean_tags will be empty !!!!
if len(clean_tags) > 0:
for item in clean_tags:
x = item[1]
#print(item)
if x.startswith("V"):
verbs.append(item[0])
elif x.startswith("NN"):
nouns.append(item[0])
elif x.startswith("JJ"):
adj.append(item[0])
elif x.startswith("RB"):
adv.append(item[0])
else:
pass
nouns = self.strip_stop_words(nouns)
verbs = self.strip_stop_words(verbs)
adj = self.strip_stop_words(adj)
adv = self.strip_stop_words(adv)
return nouns, verbs, adj, adv, errors
else:
nouns = []
verbs = []
adj = []
adv = []
return nouns, verbs, adj, adv, errors
def check_errors(self, arg):
"""
Make a list of words that are not found with pyEnchant,
Currently using NLTK in wiwa online. This is an optional way.
"""
errors = []
#print("arg =", arg)
for item in arg:
if self.enchant_check(arg):
pass
else:
errors.append(item)
return errors
## NEW --- for windows users:
def check_errors_W(self, arg):
"""
arg is the user's input. Must be split into words.
"""
errors = []
print('arg type in check_errors_W=', type(arg))
stripped = self.strip_stop_words(arg)
user_words = stripped
print("user_words = ", user_words)
for item in user_words:
if wordnet.synsets(item):
pass
else:
errors.append(item)
return errors
### end validating word with wordnet
def remove_bad_tags(self, tags_list):
""" Use pyEnchant to remove unidentifiable words from tags list"""
new_tags = []
for item in tags_list:
word = item[0]
## !!!!!!!!!! windows vs non-windows change !!!!!#
# !!! Use enchant_check_W for windows !!!
# !!! enchant_check for non-windows !!!
if self.enchant_check_W(word):
new_tags.append(item)
else:
pass
#print("word is not found:", word)
return new_tags
def enchant_check(self, arg):
""" using the PyEnchant English dictionary to check validity of a word."""
x = self.dictionary.check(arg)
return x
### NEW check validity check for Windows:
def enchant_check_W(self, arg):
if wordnet.synsets(arg):
return True
else:
return False
#### END NEW validity check for Windows
def check_question(self, arg):
questions = ['why', '?']
if questions[0] in arg or questions[1] in arg:
return True
else:
return False
def create_script_line_order(somescript):
""" make a list with randomized order of line numbers from script
not sure if this is worth all the work yet. Must be a better way."""
# get count:
count = None
#print(somescript)
if somescript.endswith('.txt'):
try:
with open(somescript) as f:
for i, l in enumerate(f):
pass
count = i
except:
print("file is Empty.")
raise ValueError
else:
print("***file is not a txt file***")
print("\t file=", somescript)
raise ValueError
if count != None:
first_list = []
# create a list with all line numbers in it
for x in range(1, i):
first_list.append(x)
# shuffle those items:
random.shuffle(first_list)
return first_list
new_wiwa = Wiwa()
new_wiwa.run_wiwa()
######### tests #########
#### check if filePaths are being created properly
#new_wiwa.test_filePath()
#### check that all self.attributes are being created successfully
#new_wiwa.test_variables()
#### check that responses are being generated from the files:
#new_wiwa.test_responses()
| StarcoderdataPython |
6675476 | from django.db.models.fields import DecimalField
from django.shortcuts import render
from django.contrib.contenttypes.models import ContentType
from django.http import HttpResponse
from django.core.exceptions import ObjectDoesNotExist
from store.models import Product
from store.models import Collection
from store.models import Order
from store.models import OrderItem
from store.models import Customer
from store.models import Address
from store.models import Cart, CartItem
from django.db.models import F, Q, Value, Func, ExpressionWrapper
from django.db.models.aggregates import Count, Max, Min, Avg, Sum
import json
from django.db.models.functions import Concat
from django.core.exceptions import ObjectDoesNotExist
from tags.models import TaggedItem
from django.db import transaction
from django.db import connection
# Create your views here.
def say_hello(request):
# query_set = Product.objects.all()
# for query in query_set:
# print(query)
return HttpResponse('hello world')
def orm(request):
queryset = Product.objects.annotate(
total_sales=Sum(F('orderitem__quantity') *
F('orderitem__unit_price'))
).order_by('-total_sales')[:5]
return render(request, 'orm.html',
{
'result': list(queryset),
})
def product_list(request):
queryset = Product.objects.all()
return render(request, 'template.html', {'result': list(queryset)})
def say_hello2(request):
query_set = Order.objects.select_related(
'customer').values('id', 'customer__first_name').order_by('id').reverse()[:5]
return render(request, 'hello.html', {'orders': list(query_set)})
def love_sabrina(request):
return HttpResponse('I Love You, Sabrina')
| StarcoderdataPython |
341770 | <filename>main.py
from flask import Flask, render_template, url_for, request, redirect
from flask import send_from_directory
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/calendar/<year>', methods=['POST'])
def calendar(year):
return send_from_directory("calendars", "calendar" + year + ".json")
if __name__ == '__main__':
app.run(debug=True, host="0.0.0.0") | StarcoderdataPython |
1759208 | <reponame>scieloorg/scielo-opds
# coding: utf-8
"""
.. module: scieloopds.renderers
:synopsis: Renderer to build OPDS Atom catalog from dict input
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from datetime import datetime
from lxml import etree
from lxml.builder import ElementMaker
from opds import ContentType, Namespace, LinkRel
def make_entry(values):
"""Create atom:entry element from dict which follow structure:
{
'title': str, # REQUIRED
'_id': str, # REQUIRED
'updated': datetime,
'language': str,
'year': str,
'publisher': str,
'eisbn': str,
'links': list, -- use :func:`opds.make_link` to create each item
'pdf_file': str,
'epub_file': str,
'cover_thumbnail': str,
'cover': str,
'content': str,
'synopsis': str,
'creators': dict
}
:param values: Catalog entry fields.
:type values: dict.
:returns: lxml.etree._Element.
"""
atom = ElementMaker(namespace=Namespace.ATOM,
nsmap={'atom': Namespace.ATOM})
dc = ElementMaker(namespace=Namespace.OPDS,
nsmap={'dc': Namespace.OPDS})
entry = atom.entry(
atom.title(values['title']),
atom.id(values['_id']))
updated = values.get('updated', datetime.now())
entry.append(atom.updated(updated.strftime('%Y-%m-%dT%H:%M:%SZ')))
if 'language' in values:
entry.append(dc.language(values['language']))
if 'year' in values:
entry.append(dc.issued(values['year']))
if 'publisher' in values:
entry.append(dc.publisher(values['publisher']))
if 'eisbn' in values:
entry.append(dc.identifier('urn:isbn:%s' % format(values['eisbn'])))
links = values.get('links', [])
for link in links:
entry.append(atom.link(type=link['type'],
href=link['href'], rel=link['rel']))
if 'pdf_file' in values:
link = values['pdf_file']
entry.append(atom.link(type=link.get('type', 'application/pdf'),
href=link['uri'], rel=LinkRel.ACQUISITION))
if 'epub_file' in values:
link = values['epub_file']
entry.append(atom.link(type=link.get('type', 'application/epub+zip'),
href=link['uri'], rel=LinkRel.ACQUISITION))
if 'cover_thumbnail' in values:
link = values['cover_thumbnail']
entry.append(atom.link(type=link.get('type', 'image/jpeg'),
href=link['uri'], rel=LinkRel.THUMBNAIL))
if 'cover' in values:
link = values['cover']
entry.append(atom.link(type=link.get('type', 'image/jpeg'),
href=link['uri'], rel=LinkRel.IMAGE))
if 'content' in values:
entry.append(atom.content(values['content']['value'],
type=values['content'].get('type', 'text')))
if 'synopsis' in values:
entry.append(atom.summary(values['synopsis']))
creators = values.get('creators', {})
for author_key in ('individual_author', 'corporate_author', 'organizer'):
for author in creators.get(author_key, []):
new_author = atom.author(atom.name(author[0]))
if author[1]:
new_author.append(atom.uri(author[1]))
entry.append(new_author)
for contributor_key in ('editor', 'translator', 'collaborator', 'other',
'coordinator'):
for contributor in creators.get(contributor_key, []):
new_contrib = atom.contributor(atom.name(contributor[0]))
if contributor[1]:
new_contrib.append(atom.uri(contributor[1]))
entry.append(new_contrib)
return entry
def make_feed(values):
"""Create atom:feed element from dict which follow structure:
{
'title': str,
'_id': str,
'updated': datetime,
'language': str,
'links': list, -- use :func:`opds.make_link` to create each item
'entry': list -- see :func:`make_entry` doc for structure of each item,
}
:param values: Catalog feed fields.
:type values: dict.
:returns: lxml.etree._Element.
"""
atom = ElementMaker(namespace=Namespace.ATOM,
nsmap={'atom': Namespace.ATOM})
updated = values.get('updated', datetime.now())
feed = atom.feed(
atom.id(values.get('_id', u'http://books.scielo.org/opds/')),
atom.title(values.get('title', u'SciELO Books')),
atom.updated(updated.strftime('%Y-%m-%dT%H:%M:%SZ')),
atom.author(
atom.name(u'<NAME>'),
atom.uri(u'http://books.scielo.org'),
atom.email(u'<EMAIL>')
),
atom.link(type=ContentType.NAVIGATION,
href=u'/opds/', rel=u'start')
)
links = values.get('links', [])
for link in links:
feed.append(atom.link(type=link['type'],
href=link['href'], rel=link['rel']))
entries = values.get('entry', [])
for entry_values in entries:
feed.append(make_entry(entry_values))
return feed
def opds_factory(info):
"""Factory which create OPDS render
:param info: An object having the attributes name (render name) and
package (the active package when render was registered)
:type info: object.
:returns: function.
"""
def _render(value, system):
"""Call the render implementation
:param value: View return parameters.
:type value: dict.
:param system: System values (view, context, request)
:type system: dict.
:returns: str.
"""
request = system.get('request')
if request is not None:
response = request.response
response.charset = 'utf-8'
response.content_type = ContentType.CATALOG
return etree.tostring(make_feed(value), pretty_print=True)
return _render
| StarcoderdataPython |
6674619 | import jax.numpy as jnp
import objax
import bayesnewton
from bayesnewton.utils import softplus_inv
from .models import MGPR
from numpy.random import gamma
def squash_sin(m, s, max_action=None):
"""
Squashing function, passing the controls mean and variance
through a sinus, as in gSin.m. The output is in [-max_action, max_action].
IN: mean (m) and variance(s) of the control input, max_action
OUT: mean (M) variance (S) and input-output (C) covariance of the squashed
control input
"""
k = jnp.shape(m)[1]
if max_action is None:
max_action = jnp.ones((1, k)) # squashes in [-1,1] by default
else:
max_action = max_action * jnp.ones((1, k))
M = max_action * jnp.exp(-0.5 * jnp.diag(s)) * jnp.sin(m)
lq = -0.5 * (jnp.diag(s)[:, None] + jnp.diag(s)[None, :])
q = jnp.exp(lq)
mT = jnp.transpose(m, (1, 0))
S = (jnp.exp(lq + s) - q) * jnp.cos(mT - m) - (jnp.exp(lq - s) - q) * jnp.cos(
mT + m
)
S = 0.5 * max_action * jnp.transpose(max_action, (1, 0)) * S
C = max_action * objax.Vectorize(lambda x: jnp.diag(x, k=0), objax.VarCollection())(
jnp.exp(-0.5 * jnp.diag(s)) * jnp.cos(m)
)
return M, S, C.reshape((k, k))
class LinearController(objax.Module):
def __init__(self, state_dim, control_dim, max_action=1.0):
objax.random.Generator(0)
self.W = objax.TrainVar(objax.random.uniform((control_dim, state_dim)))
self.b = objax.TrainVar(objax.random.uniform((1, control_dim)))
self.max_action = max_action
def compute_action(self, m, s, squash=True):
"""
Simple affine action: M <- W(m-t) - b
IN: mean (m) and variance (s) of the state
OUT: mean (M) and variance (S) of the action
"""
WT = jnp.transpose(self.W.value, (1, 0))
M = m @ WT + self.b.value # mean output
S = self.W.value @ s @ WT # output variance
V = WT # input output covariance
if squash:
M, S, V2 = squash_sin(M, S, self.max_action)
V = V @ V2
return M, S, V
def randomize(self):
mean = 0
sigma = 1
self.W.assign(mean + sigma * objax.random.normal(self.W.shape))
self.b.assign(mean + sigma * objax.random.normal(self.b.shape))
class RbfController(MGPR):
"""
An RBF Controller implemented as a deterministic GP
See Deisenroth et al 2015: Gaussian Processes for Data-Efficient Learning in Robotics and Control
Section 5.3.2.
"""
def __init__(
self,
state_dim,
control_dim,
num_basis_functions,
max_action=1.0,
fixed_parameters=False,
):
MGPR.__init__(
self,
[
objax.random.normal((num_basis_functions, state_dim)),
0.1 * objax.random.normal((num_basis_functions, control_dim)),
],
fixed_parameters,
)
self.fixed_parameters = fixed_parameters
self.max_action = max_action
def create_models(self, data):
self.models = []
for i in range(self.num_outputs):
kern = bayesnewton.kernels.Matern72(
variance=1.0,
lengthscale=jnp.ones((data[0].shape[1],)),
fix_variance=self.fixed_parameters,
fix_lengthscale=self.fixed_parameters,
)
lik = bayesnewton.likelihoods.Gaussian(
variance=1e-4, fix_variance=self.fixed_parameters
)
self.models.append(
bayesnewton.models.VariationalGP(
kernel=kern, likelihood=lik, X=data[0], Y=data[1][:, i : i + 1]
)
)
def compute_action(self, m, s, squash=True):
"""
RBF Controller. See Deisenroth's Thesis Section
IN: mean (m) and variance (s) of the state
OUT: mean (M) and variance (S) of the action
"""
iK, beta = self.calculate_factorizations()
M, S, V = self.predict_given_factorizations(m, s, 0.0 * iK, beta)
S = S - jnp.diag(self.variance - 1e-6)
if squash:
M, S, V2 = squash_sin(M, S, self.max_action)
V = V @ V2
return M, S, V
def randomize(self):
print("Randomizing controller")
for m in self.models:
m.X = jnp.array(objax.random.normal(m.X.shape))
m.Y = jnp.array(
0.1 * self.max_action * objax.random.normal(m.Y.shape)
)
mean = 1.0
sigma = 0.1
m.kernel.transformed_lengthscale.assign(
softplus_inv(
mean +
sigma * objax.random.normal(m.kernel.lengthscale.shape)
)
)
| StarcoderdataPython |
6697607 | #
# Copyright 2022 European Centre for Medium-Range Weather Forecasts (ECMWF)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
#
import datetime
import enum
import logging
import uuid
from .user import User
class Status(enum.Enum):
WAITING = "waiting"
UPLOADING = "uploading"
QUEUED = "queued"
PROCESSING = "processing"
PROCESSED = "processed"
FAILED = "failed"
class Verb(enum.Enum):
RETRIEVE = "retrieve"
ARCHIVE = "archive"
class Request:
"""A sealed class representing a request"""
__slots__ = [
"id",
"timestamp",
"last_modified",
"user",
"verb",
"url",
"md5",
"collection",
"status",
"user_message",
"user_request",
"content_length",
"content_type",
]
def __init__(self, from_dict=None, **kwargs):
self.id = str(uuid.uuid4())
self.timestamp = datetime.datetime.utcnow().timestamp()
self.last_modified = datetime.datetime.utcnow().timestamp()
self.user = None
self.verb = Verb.RETRIEVE
self.url = ""
self.collection = ""
self.status = Status.WAITING
self.md5 = None
self.user_message = ""
self.user_request = ""
self.content_length = None
self.content_type = "application/octet-stream"
if from_dict:
self.deserialize(from_dict)
for k, v in kwargs.items():
self.__setattr__(k, v)
def set_status(self, value):
self.status = value
logging.info("Request ID {} status set to {}.".format(self.id, value.value))
@classmethod
def serialize_slot(cls, key, value):
if value is None:
return None
if key == "verb":
return value.value
if key == "status":
return value.value
if key == "user":
return value.serialize()
return value
@classmethod
def deserialize_slot(cls, key, value):
if value is None:
return None
if key == "verb":
return Verb(value)
if key == "status":
return Status(value)
if key == "user":
return User(from_dict=value)
return value
def serialize(self):
"""Serialize the request object to a dictionary with plain data types"""
result = {}
for k in self.__slots__:
v = self.__getattribute__(k)
result[k] = self.serialize_slot(k, v)
return result
def deserialize(self, dict):
"""Modify the request by deserializing a dictionary into it"""
for k, v in dict.items():
self.__setattr__(k, self.deserialize_slot(k, v))
def __eq__(self, other):
if isinstance(other, Request):
return other.id == self.id
return False
def __hash__(self):
return uuid.UUID(self.id).int
| StarcoderdataPython |
6670168 | with open('test.txt',mode= 'w+') as gamefile:
gamefile.seek(0)
curr_highscore = float(gamefile.read())
cup = ['','o','',]
token = 0
from random import shuffle
def guess_func():
guess = ''
while guess not in ['0','1','2']:
guess = input("pick a number : 0,1, or 2 \n")
return int(guess)
def cuproll(cup, index):
shuffle(cup)
print(f"\n correct order is {cup}")
if cup[index]== 'o':
print("YOU WON\n ")
token = 1
else:
print('YOU LOST \n')
token = 0
return token
go_on=1
loop = 'y'
tot_chance = 0
tot_points = 0
while loop == 'y':
tot_chance+=1
#shuffle(cup)
token1 = cuproll(cup,guess_func())
if token1 ==1:
tot_points+=1
elif token1 == 0:
pass
loop = 'z'
while loop not in ['y','y','n','N']:
loop =input("do you want to continue : y or n \n")
shuffle(cup)
score = (tot_points/tot_chance)*100
if curr_highscore < score:
gamefile.write(str(score))
else :
pass
print(f'TOTAL POINT {tot_points}\n TOTAL CHANCE {tot_chance}\n SCORE {score:1.3f}')
gamefile.close() | StarcoderdataPython |
8054080 | <reponame>Amplo-GmbH/AutoML
import numpy as np
import pandas as pd
from Amplo.AutoML import Sequencer
class TestSequence:
def test_init(self):
assert Sequencer(), 'Class initiation failed'
def test_numpy_none(self):
# Parameters
features = 5
length = 500
back = np.random.randint(1, 50)
x, y = np.random.randint(0, 50, (length, features)), np.random.randint(0, 50, length)
# Iterate scenarios
for forward in [[1], [5]]:
# Sequence
sequence = Sequencer(back=back, forward=forward)
seq_x, seq_y = sequence.convert(x, y, flat=False)
# Test
assert len(seq_x.shape) == 3, 'seq_x is no tensor'
assert seq_x.shape[1] == back, 'seq_x step dimension incorrect'
assert seq_x.shape[2] == features, 'seq_x feature dimension incorrect'
assert seq_x.shape[0] == seq_y.shape[0], 'seq_x and seq_y have inconsistent samples'
for i in range(seq_x.shape[0]):
assert np.allclose(seq_x[i], x[i:i + back]), 'seq_x samples are not correct'
assert np.allclose(seq_y[i], y[i + back + forward[0] - 1]), 'seq_y samples are not correct'
def test_numpy_diff(self):
# Parameters
features = 5
length = 500
back = np.random.randint(2, 50)
x = np.outer(np.linspace(1, length, length), np.ones(features))
y = np.linspace(0, length - 1, length)
# Iterate Scenarios
for forward in [[1], [5]]:
# Sequence
sequence = Sequencer(back=back, forward=forward, diff='diff')
seq_x, seq_y = sequence.convert(x, y, flat=False)
# Tests
assert len(seq_x.shape) == 3, 'seq_x is not a tensor'
assert seq_x.shape[1] == back - 1, 'seq_x step dimensions incorrect'
assert seq_x.shape[2] == features, 'seq_x feature dimensions incorrect'
assert seq_x.shape[0] == seq_y.shape[0], 'seq_x and seq_y inconsistent samples'
assert np.allclose(seq_x, np.ones_like(seq_x)), 'samples seq_x incorrect'
assert np.allclose(seq_y, np.ones_like(seq_y) * forward[0]), 'seq_y samples incorrect'
def test_numpy_log(self):
# Parameters
features = 5
length = 500
back = np.random.randint(2, 50)
x = np.outer(np.linspace(1, length, length), np.ones(features))
y = np.linspace(1, length, length)
# Iterate Scenarios
for forward in [[1], [5]]:
# Sequence
sequence = Sequencer(back=back, forward=forward, diff='log_diff')
seq_x, seq_y = sequence.convert(x, y, flat=False)
# Tests
assert len(seq_x.shape) == 3, 'seq_x is not a tensor'
assert seq_x.shape[1] == back - 1, 'seq_x step dimensions incorrect'
assert seq_x.shape[2] == features, 'seq_x feature dimensions incorrect'
assert seq_x.shape[0] == seq_y.shape[0], 'seq_x and seq_y inconsistent samples'
for i in range(seq_x.shape[0]):
assert np.allclose(seq_x[i], np.log(x[1+i:i+back]) - np.log(x[i:i+back-1])), 'samples seq_x incorrect'
assert np.allclose(seq_y[i], np.log(y[i+back+forward[0]-1]) - np.log(y[i+back-1])), \
'seq_y samples incorrect'
def test_numpy_multi_out(self):
# Parameters
features = 5
length = 500
forward = [np.random.randint(2, 10)]
forward += [forward[0] + np.random.randint(1, 10)]
back = np.random.randint(2, 50)
x = np.outer(np.linspace(1, length, length), np.ones(features))
y = np.linspace(1, length, length)
# Without differencing
sequence = Sequencer(back=back, forward=forward, diff='none')
seq_x, seq_y = sequence.convert(x, y, flat=False)
# Test
assert seq_x.shape[0] == seq_y.shape[0], 'seq_x and seq_y have inconsistent samples'
assert seq_y.shape[1] == 2, 'seq_y has incorrect steps'
for i in range(seq_x.shape[0]):
assert np.allclose(seq_y[i].tolist(),
[y[i + back + forward[0] - 1], y[i + back + forward[1] - 1]]
), 'seq_y samples are not correct'
# With differencing
sequence = Sequencer(back=back, forward=forward, diff='diff')
seq_x, seq_y = sequence.convert(x, y, flat=False)
revert = sequence.revert(seq_y, y[back - 1:back - 1 + max(forward)])
# Tests
assert seq_x.shape[0] == seq_y.shape[0], 'seq_x and seq_y have inconsistent samples'
assert seq_y.shape[1] == 2, 'seq_y has incorrect steps'
assert np.allclose(revert[0, :forward[0] - forward[-1]], y[back - 1: forward[0] - forward[1]])
assert np.allclose(revert[1], y[back - 1:])
def test_pandas_tensor(self):
# Parameters
features = 5
length = 500
back = np.random.randint(1, 50)
x, y = np.random.randint(0, 50, (length, features)), np.random.randint(0, 50, length)
x, y = pd.DataFrame(x), pd.Series(y)
# Iterate scenarios
for forward in [[1], [5]]:
# Sequence
sequence = Sequencer(back=back, forward=forward)
seq_x, seq_y = sequence.convert(x, y, flat=False)
# Tests
assert len(seq_x.shape) == 3, 'seq_x is no tensor'
assert seq_x.shape[1] == back, 'seq_x step dimension incorrect'
assert seq_x.shape[2] == features, 'seq_x feature dimension incorrect'
assert seq_x.shape[0] == seq_y.shape[0], 'seq_x and seq_y have inconsistent samples'
for i in range(seq_x.shape[0]):
assert np.allclose(seq_x[i], x[i:i + back]), 'seq_x samples incorrect'
assert np.allclose(seq_y[i], y[i + back + forward[0] - 1]), 'seq_y samples incorrect'
def test_reconstruction(self):
# Parameters
length = 100
features = 5
back = np.random.randint(2, 10)
forward = [np.random.randint(2, 10)]
# x, y = np.random.randint(0, 50, (length, features)), np.random.randint(0, 50, length)
x, y = np.outer(np.linspace(1, length, length), np.ones(features)), np.linspace(1, length, length)
# Iterate scenarios
for diff in ['diff', 'log_diff']:
# Sequence
seq = Sequencer(back=back, forward=forward, diff=diff)
seq_x, seq_y = seq.convert(x, y, flat=False)
# Tests
assert len(seq_x.shape) == 3, 'seq_x is not a tensor'
assert seq_x.shape[1] == back - 1, 'seq_x step dimensions incorrect'
assert seq_x.shape[2] == features, 'seq_x feature dimensions incorrect'
assert seq_x.shape[0] == seq_y.shape[0], 'seq_x and seq_y inconsistent samples'
revert = seq.revert(seq_y, y[back - 1:back - 1 + forward[0]])
assert np.allclose(revert, y[back - 1:]), 'reverted seq_y incorrect'
| StarcoderdataPython |
8169691 | import logging
import os
import pkg_resources
import requests
try:
import youtube_dl
youtube_dl_bin_name = 'youtube-dl'
except:
import youtube_dlc as youtube_dl
youtube_dl_bin_name = 'youtube-dlc'
from boltons.urlutils import URL
from plumbum import local, ProcessExecutionError, ProcessTimedOut
from scdlbot.exceptions import *
# from requests.exceptions import Timeout, RequestException, SSLError
bin_path = os.getenv('BIN_PATH', '')
scdl_bin = local[os.path.join(bin_path, 'scdl')]
bandcamp_dl_bin = local[os.path.join(bin_path, 'bandcamp-dl')]
youtube_dl_bin = local[os.path.join(bin_path, youtube_dl_bin_name)]
BOTAN_TRACK_URL = 'https://api.botan.io/track'
logger = logging.getLogger(__name__)
def get_response_text(file_name):
# https://stackoverflow.com/a/20885799/2490759
path = '/'.join(('texts', file_name))
return pkg_resources.resource_string(__name__, path).decode("UTF-8")
def get_direct_urls(url, cookies_file=None, cookies_download_file=None, source_ip=None, proxy=None):
logger.debug("Entered get_direct_urls")
youtube_dl_args = []
# https://github.com/ytdl-org/youtube-dl#how-do-i-pass-cookies-to-youtube-dl
if cookies_file:
if "http" in cookies_file:
try:
r = requests.get(cookies_file, allow_redirects=True, timeout=5)
open(cookies_download_file, 'wb').write(r.content)
youtube_dl_args.extend(["--cookies", cookies_download_file])
except:
pass
else:
youtube_dl_args.extend(["--cookies", cookies_file])
if source_ip:
youtube_dl_args.extend(["--source-address", source_ip])
if proxy:
youtube_dl_args.extend(["--proxy", proxy])
youtube_dl_args.extend(["--get-url", url])
try:
ret_code, std_out, std_err = youtube_dl_bin[youtube_dl_args].run(timeout=60)
except ProcessTimedOut as exc:
raise URLTimeoutError
except ProcessExecutionError as exc:
# TODO: look at case: one page has multiple videos, some available, some not
if "returning it as such" in exc.stderr:
raise URLDirectError
if "proxy server" in exc.stderr:
raise URLCountryError
raise exc
if "yt_live_broadcast" in std_out:
raise URLLiveError
return std_out
def get_italic(text):
return "_{}_".format(text)
def youtube_dl_func(url, ydl_opts, queue=None):
ydl = youtube_dl.YoutubeDL(ydl_opts)
try:
ydl.download([url])
except Exception as exc:
ydl_status = 1, str(exc)
# ydl_status = exc #TODO: pass and re-raise original Exception
else:
ydl_status = 0, "OK"
if queue:
queue.put(ydl_status)
else:
return ydl_status
# def botan_track(token, message, event_name):
# try:
# # uid = message.chat_id
# uid = message.from_user.id
# except AttributeError:
# logger.warning('Botan no chat_id in message')
# return False
# num_retries = 2
# ssl_verify = True
# for i in range(num_retries):
# try:
# r = requests.post(
# BOTAN_TRACK_URL,
# params={"token": token, "uid": uid, "name": event_name},
# data=message.to_json(),
# verify=ssl_verify,
# timeout=2,
# )
# return r.json()
# except Timeout:
# logger.exception("Botan timeout on event: %s", event_name)
# except SSLError:
# ssl_verify = False
# except (Exception, RequestException, ValueError):
# # catastrophic error
# logger.exception("Botan 🙀astrophic error on event: %s", event_name)
# return False
def shorten_url(url):
try:
return requests.get('https://clck.ru/--?url=' + url).text.replace("https://", "")
except:
return url
def log_and_track(event_name, message=None):
logger.info("Event: %s", event_name)
if message:
pass
# if self.botan_token:
# return botan_track(self.botan_token, message, event_name)
def get_link_text(urls):
link_text = ""
for i, url in enumerate(urls):
link_text += "[Source Link #{}]({}) | `{}`\n".format(str(i + 1), url, URL(url).host)
direct_urls = urls[url].splitlines()
for direct_url in direct_urls:
if "http" in direct_url:
content_type = ""
if "googlevideo" in direct_url:
if "audio" in direct_url:
content_type = "Audio"
else:
content_type = "Video"
# direct_url = shorten_url(direct_url)
link_text += "• {} [Direct Link]({})\n".format(content_type, direct_url)
link_text += "\n*Note:* Final download URLs are only guaranteed to work on the same machine/IP where extracted"
return link_text
| StarcoderdataPython |
5110514 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2019-04-23 11:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("crm", "0024_auto_20190423_1113")]
operations = [
migrations.AlterField(
model_name="billinglog",
name="log_type",
field=models.CharField(
choices=[
(b"create_invoice", b"Invoice"),
(b"send_invoice", b"Send invoice via email"),
(b"create_paid_invoice", b"Create paid invoice"),
(b"send_paid_invoice", b"Send paid invoice via email"),
(b"create_note", b"Accounting note"),
(b"create_general_note", b"General note"),
(b"create_payment", b"Payment"),
],
max_length=30,
),
)
]
| StarcoderdataPython |
4817583 | class AdversarialArm():
def __init__(self, t, active_start, active_end):
self.t = t
self.active_start = active_start
self.active_end = active_end
def draw(self):
self.t = self.t + 1
if self.active_start <= self.t <= self.active_end:
return 1.0
else:
return 0.0
| StarcoderdataPython |
9668072 |
from PyQt5 import QtWidgets, QtCore
from . selectable_line import Template,Source
from . draggable_points import TemplateLandmarks,SourceLandmarks
from . stack import SelectionStack
class _LinePointCollection(QtCore.QObject):
LandmarksClass = TemplateLandmarks
LineClass = Template
point_deleted = QtCore.pyqtSignal(object, int)
selected = QtCore.pyqtSignal(object, int)
def __init__(self, ax, y):
super().__init__()
self.ax = ax
self.line = self.LineClass(ax, y, collection=self)
self.h = self.line.h
self.landmarks = None
self.stack = None
def add_landmark(self, x):
self.landmarks.add_point(x)
def get_line_x(self):
return self.line.get_xdata()
def get_line_y(self):
return self.line.get_ydata()
@property
def isvisible(self):
return self.line.isvisible
def on_point_deleted(self, ind):
self.point_deleted.emit(self.landmarks, ind)
def set_landmarks(self, x):
self.landmarks = self.LandmarksClass(self.ax, x, y_constraint=self.get_line_y(), collection=self)
self.stack = SelectionStack(self.ax, [self.landmarks, self.line])
self.line.set_notify(False)
self.landmarks.set_notify(False)
self.stack.set_notify(True)
self.selected = self.stack.selected
self.landmarks.point_deleted.connect( self.on_point_deleted )
def set_active(self, active):
self.line.set_active(active)
if self.landmarks is not None:
self.landmarks.set_active(active)
def set_notify(self, notify):
self.stack.set_notify(notify)
def set_visible(self, visible):
self.landmarks.set_visible(visible)
self.line.set_visible(visible)
class SourceWithLandmarks(_LinePointCollection):
LandmarksClass = SourceLandmarks
LineClass = Source
def calculate_mse(self, template):
y0 = template.line.h.get_ydata()
y = self.h.get_ydata()
return ((y-y0)**2).mean()
class TemplateWithLandmarks(_LinePointCollection):
LandmarksClass = TemplateLandmarks
LineClass = Template
| StarcoderdataPython |
1787584 | #!/usr/bin/env python
import rospy
from std_msgs.msg import UInt16
import random
def publisher():
pub = rospy.Publisher('launcher/pitch', UInt16, queue_size=10)
rospy.init_node('pitch_control', anonymous=False)
rate = rospy.Rate(1) # 1 hz
angle = 90
while not rospy.is_shutdown():
angle = random.randint(5,160)
print "Angle: ", angle
pub.publish(angle)
rate.sleep()
if __name__ == '__main__':
try:
publisher()
except rospy.ROSInterruptException:
pass
| StarcoderdataPython |
4874728 | # -*- coding: utf-8 -*-
import queue
import threading
from mantarray_desktop_app import MantarrayProcessesMonitor
from mantarray_desktop_app import SERVER_INITIALIZING_STATE
import pytest
@pytest.fixture(scope="function", name="test_monitor")
def fixture_test_monitor():
def _foo(process_manager):
the_dict = process_manager.get_values_to_share_to_server()
the_dict["system_status"] = SERVER_INITIALIZING_STATE
error_queue = error_queue = queue.Queue()
the_lock = threading.Lock()
monitor = MantarrayProcessesMonitor(the_dict, process_manager, error_queue, the_lock)
return monitor, the_dict, error_queue, the_lock
yield _foo
| StarcoderdataPython |
189014 | <filename>0x15-api/1-export_to_CSV.py
#!/usr/bin/python3
""" Python CVS """
import csv
import requests
from sys import argv
if __name__ == '__main__':
user_id = argv[1]
url_todos = "https://jsonplaceholder.typicode.com/todos?userId={}"
api_todos = requests.get(url_todos.format(user_id), verify=False).json()
url_names = "https://jsonplaceholder.typicode.com/users/{}"
name_users = requests.get(url_names.format(user_id), verify=False).json()
with open("{}.csv".format(user_id), 'w', newline='') as csvfile:
writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
for task in api_todos:
writer.writerow([int(user_id), name_users.get('username'),
task.get('completed'), task.get('title')])
| StarcoderdataPython |
12836058 | <reponame>lynnUg/vumi-go
from uuid import uuid4
import urllib
from django.core.urlresolvers import reverse
from go.base.tests.helpers import GoDjangoTestCase, DjangoVumiApiHelper
from go.channel.views import get_channel_view_definition
class TestChannelViews(GoDjangoTestCase):
def setUp(self):
self.vumi_helper = self.add_helper(DjangoVumiApiHelper())
self.user_helper = self.vumi_helper.make_django_user()
self.vumi_helper.setup_tagpool(
u'longcode', [u'default1000%s' % i for i in [1, 2, 3, 4]])
self.user_helper.add_tagpool_permission(u'longcode')
self.client = self.vumi_helper.get_client()
def assert_active_channel_tags(self, expected):
self.assertEqual(
set(':'.join(tag) for tag in expected),
set(ch.key for ch in self.user_helper.user_api.active_channels()))
def add_tagpool_permission(self, tagpool, max_keys=None):
permission = self.api.account_store.tag_permissions(
uuid4().hex, tagpool=tagpool, max_keys=max_keys)
permission.save()
account = self.user_helper.user_api.get_user_account()
account.tagpools.add(permission)
account.save()
def get_view_url(self, view, channel_key):
view_def = get_channel_view_definition(None)
return view_def.get_view_url(view, channel_key=channel_key)
def test_index(self):
tag = (u'longcode', u'default10001')
channel_key = u'%s:%s' % tag
response = self.client.get(reverse('channels:index'))
self.assertNotContains(response, urllib.quote(channel_key))
self.user_helper.user_api.acquire_specific_tag(tag)
response = self.client.get(reverse('channels:index'))
self.assertContains(response, urllib.quote(channel_key))
def test_get_new_channel(self):
self.assert_active_channel_tags([])
response = self.client.get(reverse('channels:new_channel'))
self.assertContains(response, 'International')
self.assertContains(response, 'longcode:')
def test_get_new_channel_empty_or_exhausted_tagpool(self):
self.vumi_helper.setup_tagpool(u'empty', [])
self.vumi_helper.setup_tagpool(u'exhausted', [u'tag1'])
self.user_helper.add_tagpool_permission(u'empty')
self.user_helper.add_tagpool_permission(u'exhausted')
tag = self.user_helper.user_api.acquire_tag(u'exhausted')
self.assert_active_channel_tags([tag])
response = self.client.get(reverse('channels:new_channel'))
self.assertContains(response, 'International')
self.assertContains(response, 'longcode:')
self.assertNotContains(response, 'empty:')
self.assertNotContains(response, 'exhausted:')
def test_post_new_channel(self):
self.assert_active_channel_tags([])
response = self.client.post(reverse('channels:new_channel'), {
'country': 'International', 'channel': 'longcode:'})
tag = (u'longcode', u'default10001')
channel_key = u'%s:%s' % tag
self.assertRedirects(response, self.get_view_url('show', channel_key))
self.assert_active_channel_tags([tag])
def test_post_new_channel_no_country(self):
self.assert_active_channel_tags([])
response = self.client.post(reverse('channels:new_channel'), {
'channel': 'longcode:'})
self.assertContains(response, '<li>country<ul class="errorlist">'
'<li>This field is required.</li></ul></li>')
self.assert_active_channel_tags([])
def test_post_new_channel_no_channel(self):
self.assert_active_channel_tags([])
response = self.client.post(reverse('channels:new_channel'), {
'country': 'International'})
self.assertContains(response, '<li>channel<ul class="errorlist">'
'<li>This field is required.</li></ul></li>')
self.assert_active_channel_tags([])
def test_show_channel_missing(self):
response = self.client.get(self.get_view_url('show', u'foo:bar'))
self.assertEqual(response.status_code, 404)
def test_show_channel(self):
tag = (u'longcode', u'default10002')
channel_key = u'%s:%s' % tag
self.user_helper.user_api.acquire_specific_tag(tag)
response = self.client.get(self.get_view_url('show', channel_key))
self.assertContains(response, tag[0])
self.assertContains(response, tag[1])
def test_release_channel(self):
tag = (u'longcode', u'default10002')
channel_key = u'%s:%s' % tag
self.user_helper.user_api.acquire_specific_tag(tag)
self.assert_active_channel_tags([tag])
response = self.client.post(self.get_view_url('release', channel_key))
self.assertRedirects(response, reverse('conversations:index'))
self.assert_active_channel_tags([])
| StarcoderdataPython |
1899208 | import miniserver
miniserver.start_server()
| StarcoderdataPython |
1860181 | from textwrap import dedent
from dnslib import RR, QTYPE, RCODE
from boucanpy.core import logger
from boucanpy.dns.record import Record
from boucanpy.dns.zone_template import ZONE_TEMPLATE
class RecordParser:
def __init__(self, records):
self.records = records
# TODO: make this better
@classmethod
def from_zones(cls, zones):
records = []
for zone in zones:
records = records + cls.from_zone(zone).records
logger.debug("%d zone resource records generated", len(records))
return cls(records)
@classmethod
def from_zone(cls, zone):
records = []
logger.info(
f"<EMAIL> - Loading zone: {zone.domain}/{zone.ip} ({zone.id})"
)
dns_records = zone.dns_records or []
# if the zone has no records, create some default ones
if not dns_records:
logger.warning(
f"<EMAIL> - Zone has no dns_records. loading defaults: {zone.domain}/{zone.ip} ({zone.id})"
)
rrs = RR.fromZone(
ZONE_TEMPLATE.format(domain_name=zone.domain, domain_ip=zone.ip)
)
zone_records = [Record.make(zone, rr) for rr in rrs]
for zr in zone_records:
# TODO: make this clean on output
rrstr = str(dedent(str(zr.rr)))
logger.debug(f"<EMAIL> - Loading record entry: {rrstr}")
logger.debug(
"<EMAIL> - Loaded record details - name: {} | rtype: {} | rr: {}".format(
str(zr.rr.rname), str(QTYPE[zr.rr.rtype]), str(zr.rr)
)
)
else:
# loop over each dns_record of the zone and convert it to RR record
dns_records = sorted(dns_records, key=lambda x: x.sort)
zone_records = []
for dns_record in dns_records:
try:
rrs = RR.fromZone(dns_record.record)
_zone_records = [Record.make(zone, rr) for rr in rrs]
for zr in _zone_records:
rrstr = str(dedent(str(zr.rr)))
logger.debug(
f"<EMAIL> - Loading record: {str(dns_record.record)}"
)
logger.debug(
f"from_<EMAIL> - Loading record entry: {rrstr}"
)
logger.debug(
"from_<EMAIL> - Loaded record details - name: {} | rtype: {} | rr: {}".format(
str(zr.rr.rname), str(QTYPE[zr.rr.rtype]), str(zr.rr)
)
)
zone_records = zone_records + _zone_records
except Exception as e:
logger.critical(
f'from_<EMAIL> - Error processing line ({e.__class__.__name__}: {e}) "{dns_record.id}:{dns_record.record}" '
)
raise e
# add the records for the zone to the rest of the records
records = records + zone_records
return cls(records)
def get_rrs(self):
return [record.rr for record in self.records]
| StarcoderdataPython |
285684 | from __future__ import absolute_import, division, print_function
import itertools
import inspect
from functools import wraps, partial
import numpy as np
import scipy.interpolate
import scipy.linalg
from future.builtins import zip, range
from future.backports import OrderedDict
import torch
from matplotlib.colors import ListedColormap
__all__ = ['PT_loose_thresh', 'PT_indicator', 'combinations', 'linearly_spaced_combinations', 'lqr', 'dlqr',
'ellipse_bounds', 'concatenate_inputs', 'make_tf_fun',
'with_scope', 'use_parent_scope', 'add_weight_constraint',
'batchify', 'get_storage', 'set_storage', 'unique_rows',
'gradient_clipping', 'binary_cmap', 'make_dataset_from_trajectories', ]
def unique_rows(array):
"""Return the unique rows of the array.
Parameters
----------
array : ndarray
A 2D numpy array.
Returns
-------
unique_array : ndarray
A 2D numpy array that contains all the unique rows of array.
"""
array = np.ascontiguousarray(array)
# Combine all the rows into a single element of the flexible void datatype
dtype = np.dtype((np.void, array.dtype.itemsize * array.shape[1]))
combined_array = array.view(dtype=dtype)
# Get all the unique rows of the combined array
_, idx = np.unique(combined_array, return_index=True)
return array[idx]
def get_storage(dictionary, index=None):
"""Get a unique storage point within a class method.
Parameters
----------
dictionary : dict
A dictionary used for storage.
index : hashable
An index under which to load the element. Needs to be hashable.
This is useful for functions which might be accessed with multiple
different arguments.
Returns
-------
storage : OrderedDict
The storage object. Is None if no storage exists. Otherwise it
returns the OrderedDict that was previously put in the storage.
"""
# Use function name as storage name
frame = inspect.currentframe()
storage_name = inspect.getframeinfo(frame.f_back).function
storage = dictionary.get(storage_name)
if index is None:
return storage
elif storage is not None:
# Return directly the indexed object
try:
return storage[index]
except KeyError:
pass
def set_storage(dictionary, name_value, index=None):
"""Set the storage point within a class method.
Parameters
----------
dictionary : dict
name_value : tuple
A list of tuples, where each tuple contains a string with the name
of the storage object and the corresponding value that is to be put
in storage. These are stored as OrderedDicts.
index : hashable
An index under which to store the element. Needs to be hashable.
This is useful for functions which might be accessed with multiple
different arguements.
"""
# Use function name as storage name
frame = inspect.currentframe()
storage_name = inspect.getframeinfo(frame.f_back).function
storage = OrderedDict(name_value)
if index is None:
dictionary[storage_name] = storage
else:
# Make sure the storage is initialized
if storage_name not in dictionary:
dictionary[storage_name] = {}
# Set the indexed storage
dictionary[storage_name][index] = storage
def batchify(arrays, batch_size):
"""Yield the arrays in batches and in order.
The last batch might be smaller than batch_size.
Parameters
----------
arrays : list of ndarray
The arrays that we want to convert to batches.
batch_size : int
The size of each individual batch.
"""
if not isinstance(arrays, (list, tuple)):
arrays = (arrays,)
# Iterate over array in batches
for i, i_next in zip(itertools.count(start=0, step=batch_size),
itertools.count(start=batch_size, step=batch_size)):
batches = [array[i:i_next] for array in arrays]
# Break if there are no points left
if batches[0].size:
yield i, batches
else:
break
def combinations(arrays):
"""Return a single array with combinations of parameters.
Parameters
----------
arrays : list of np.array
Returns
-------
array : np.array
An array that contains all combinations of the input arrays
"""
return np.array(np.meshgrid(*arrays)).T.reshape(-1, len(arrays))
def linearly_spaced_combinations(bounds, num_samples):
"""
Return 2-D array with all linearly spaced combinations with the bounds.
Parameters
----------
bounds : sequence of tuples
The bounds for the variables, [(x1_min, x1_max), (x2_min, x2_max), ...]
num_samples : integer or array_likem
Number of samples to use for every dimension. Can be a constant if
the same number should be used for all, or an array to fine-tune
precision. Total number of data points is num_samples ** len(bounds).
Returns
-------
combinations : 2-d array
A 2-d arrray. If d = len(bounds) and l = prod(num_samples) then it
is of size l x d, that is, every row contains one combination of
inputs.
"""
bounds = np.atleast_2d(bounds)
num_vars = len(bounds)
num_samples = np.broadcast_to(num_samples, num_vars)
# Create linearly spaced test inputs
inputs = [np.linspace(b[0], b[1], n) for b, n in zip(bounds,
num_samples)]
# Convert to 2-D array
return combinations(inputs)
def lqr(a, b, q, r):
"""Compute the continuous time LQR-controller.
The optimal control input is `u = -k.dot(x)`.
Parameters
----------
a : np.array
b : np.array
q : np.array
r : np.array
Returns
-------
k : np.array
Controller matrix
p : np.array
Cost to go matrix
"""
a, b, q, r = map(np.atleast_2d, (a, b, q, r))
p = scipy.linalg.solve_continuous_are(a, b, q, r)
# LQR gain
k = np.linalg.solve(r, b.T.dot(p))
return k, p
def dlqr(a, b, q, r):
"""Compute the discrete-time LQR controller.
The optimal control input is `u = -k.dot(x)`.
Parameters
----------
a : np.array
b : np.array
q : np.array
r : np.array
Returns
-------
k : np.array
Controller matrix
p : np.array
Cost to go matrix
"""
a, b, q, r = map(np.atleast_2d, (a, b, q, r))
p = scipy.linalg.solve_discrete_are(a, b, q, r)
# LQR gain
# k = (b.T * p * b + r)^-1 * (b.T * p * a)
bp = b.T.dot(p)
tmp1 = bp.dot(b)
tmp1 += r
tmp2 = bp.dot(a)
k = np.linalg.solve(tmp1, tmp2)
return k, p
def ellipse_bounds(P, level, n=100):
"""Compute the bounds of a 2D ellipse.
The levelset of the ellipsoid is given by
level = x' P x. Given the coordinates of the first
dimension, this function computes the corresponding
lower and upper values of the second dimension and
removes any values of x0 that are outside of the ellipse.
Parameters
----------
P : np.array
The matrix of the ellipsoid
level : float
The value of the levelset
n : int
Number of data points
Returns
-------
x : np.array
1D array of x positions of the ellipse
yu : np.array
The upper bound of the ellipse
yl : np.array
The lower bound of the ellipse
Notes
-----
This can be used as
```plt.fill_between(*ellipse_bounds(P, level))```
"""
# Round up to multiple of 2
n += n % 2
# Principal axes of ellipsoid
eigval, eigvec = np.linalg.eig(P)
eigvec *= np.sqrt(level / eigval)
# set zero angle at maximum x
angle = np.linspace(0, 2 * np.pi, n)[:, None]
angle += np.arctan(eigvec[0, 1] / eigvec[0, 0])
# Compute positions
pos = np.cos(angle) * eigvec[:, 0] + np.sin(angle) * eigvec[:, 1]
n /= 2
# Return x-position (symmetric) and upper/lower bounds
return pos[:n, 0], pos[:n, 1], pos[:n - 1:-1, 1]
def dict2func(d):
return lambda x: d[x]
def concatenate_inputs(start=0):
"""Concatenate the numpy array inputs to the functions.
Parameters
----------
start : int, optional
The attribute number at which to start concatenating.
"""
def wrap(function):
@wraps(function)
def wrapped_function(*args, **kwargs):
"""Concatenate the input arguments."""
nargs = len(args) - start
torch_objects = torch.Tensor
if any(isinstance(arg, torch_objects) for arg in args[start:]):
# reduce number of function calls in graph
if nargs == 1:
return function(*args, **kwargs)
# concatenate extra arguments
args = args[:start] + (torch.cat(args[start:], dim=1),)
return function(*args, **kwargs)
else:
to_concatenate = list(map(np.atleast_2d, args[start:]))
if nargs == 1:
concatenated = tuple(to_concatenate)
else:
concatenated = (np.hstack(to_concatenate),)
args = args[:start] + concatenated
return function(*args, **kwargs)
return wrapped_function
return wrap
def binary_cmap(color='red', alpha=1.):
"""Construct a binary colormap."""
if color == 'red':
color_code = (1., 0., 0., alpha)
elif color == 'green':
color_code = (0., 1., 0., alpha)
elif color == 'blue':
color_code = (0., 0., 1., alpha)
else:
color_code = color
transparent_code = (1., 1., 1., 0.)
return ListedColormap([transparent_code, color_code])
def get_batch_grad(f, inputs):
""""Computes the gradient of a scalar-valued function
with respect to its input
Parameters
----------
f : A torch scalar-valued function.
inputs: torch Tensor (nsamples, input dimension)
Returns
----------
grads : (nsamples, input dimension), The gradient of f
with respect to each of its inputs.
"""
nsamples = inputs.shape[0]
grads = torch.zeros_like(inputs)
input_values = inputs.detach().numpy()
for i in range(nsamples):
x = torch.tensor(input_values[[i]], requires_grad=True)
y = f(x)
y.backward()
grads[i] = x.grad
return grads
def get_number_of_rows_and_columns(m):
""" Takes m as the total number things and find r and c as
they are closest to the square root of m
Parameters
----------
m : Total number of subplots
Returns
----------
r, c : the number of rows and columns
"""
r = int(np.sqrt(m))
c = m // r if np.mod(m, r) == 0 else m // r + 1
return r, c
def save_lyapunov_nn(lyapunov_nn, full_path):
"""
Get an instance of Lyapunov and store the weights of its lyapunov_function which is a neural network
"""
model = lyapunov_nn.lyapunov_function.net
torch.save(model.state_dict(), full_path)
def load_lyapunov_nn(lyapunov_nn, full_path):
"""
Get an initialized instance of Lyapunov and replace its lyapunov_function which is
a neural network with the loaded one from the specified address.
"""
loaded_state_dict = torch.load(full_path)
lyapunov_nn.lyapunov_function.net.load_state_dict(loaded_state_dict)
return lyapunov_nn
def print_no_newline(string):
"""Print with replacement without going to the new line
Useful for showing the progress of training or search
"""
sys.stdout.write(string)
sys.stdout.flush()
def compute_nrows_ncolumns(nplots):
"""
Takes the total number of plots and calculate the number
of rows and columns.
"""
n_rows = int(np.sqrt(nplots)) + (np.sqrt(nplots) != int(np.sqrt(nplots))) * 1
n_columns = int(nplots / n_rows) + (nplots / n_rows != int(nplots / n_rows)) * 1
return n_rows, n_columns
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
import numpy as np
import torch
def choose_initial_points(all_points, n_points):
"""
Parameters
----------
all_states: all available points in the grid to choose from
n_states: number of points to choose
Returns
----------
chosen_states: chosen initial points
"""
all_inds = np.arange(len(all_points))
chosen_inds = np.random.choice(all_inds, n_points)
chosen_states = np.atleast_2d(all_points[chosen_inds])
return chosen_states
def generate_trajectories_from_inits(closed_loop_dynamics, initial_states, dt, n_reps, length, noise_std):
""" Takes a list of initial states and outputs corresponding
trajectories of the specified length.
Parameters
----------
init_states : iterable. n_envs = len(init_states) is the number of initial states (environments)
n_reps: number of trajectories starting for each initial state (environment). For example,
the more noisy trajectories from a particular intial state can be used to reduce noise
by averaging.
Returns
-------
dataset : is a dictionary whose "envs" entry gives a list of environment and the
"trajectories" gives a list of size n_envs whose every element is a list of size n_reps whose every element is
of size L x n_states of repititins of trajectories for a particular starting point
"""
n_envs = len(initial_states)
ddim = len(initial_states[0])
trajectories = np.zeros((n_envs, ddim, length, n_reps))
for r in range(n_reps):
trajectories[:, :, :, r] = trajectory_simulator(closed_loop_dynamics, initial_states,
dt, length, noise_std)
return trajectories
def trajectory_simulator(close_loop_dynamics, initial_states, dt, length, noise_std):
"""Takes the closed loop dynamics and generate a trajectory with desired length
and observation Gaussian noise with specified standard deviation.
Parameters
----------
close_loop_dynamics : a Torch function that represents the ode rhs.
initial_states: initial states of the intented trajectories.
dt: sampling time interval
length: Length of the intended trajectory.
noise_std: Standard deviation of the observation noise
Returns
-------
trajectories : len(initial_states) x length ndarray of trajectories.
"""
initial_states = np.atleast_2d(initial_states)
n_envs = len(initial_states)
ddim = len(initial_states[0])
data = np.zeros((n_envs, ddim, length))
data[:, :, 0] = initial_states
for t in range(1, length):
data[:, :, t] = close_loop_dynamics(data[:, :, t-1]).detach().numpy()
data = data + np.random.normal(0, noise_std, data.shape) # zero-mean Guassian noise
return data
def make_dataset_from_trajectories(closed_loop_dynamics, initial_states, dt, state_norm, length, n_reps, noise_std):
"""Make a dataset of trajectories which is compatible with ode package. It stores trajectories and meta info.
Parameters
----------
close_loop_dynamics : a Torch function that represents the ode rhs.
initial_states: normalized initial states of the intented trajectories.
dt: sampling time interval
state_norm: Because the input points are normalized to [-1, 1], if state_norm is not None,
it denormalizes the value of states to their actual values which can be used for ode learning purpose.
if state_norm is None, do not denormalize the trajectories.
length: Length of the intended trajectory.
noise_std: Standard deviation of the observation noise
Returns
-------
dataset: A dictionary {"envs": ndarray of initial states, "trajectories": ndarray of trajectories, "meta_info": a dictionary of
information such as the length of the trajectories and the standard deviation of the observation noise.}
"""
data = {"envs":initial_states, "trajectories":[]}
data['meta_info'] = {'L':length, 'T': length * dt, 'obs_noise_std':noise_std}
trajs = generate_trajectories_from_inits(closed_loop_dynamics, initial_states, dt, n_reps, length, noise_std)
if state_norm is not None:
Tx = np.diag(state_norm)
else:
Tx = np.eye(len(initial_states[0]))
state = np.moveaxis(trajs, 1, 3)
state = np.dot(state, Tx)
trajs = np.moveaxis(state, 3, 1)
data["trajectories"] = trajs
return data
def PT_indicator(x, a, b):
if a is None:
out = torch.le(x, b).type(x.dtype)
elif b is None:
out = torch.ge(x, a).type(x.dtype)
else:
out = torch.le(x, b).type(x.dtype) * torch.ge(x, a).type(x.dtype)
return out
def PT_loose_thresh(x, a, b, ma, mb):
"""
All elements are torch tensor the same dtype as x
ma: slope of the negative section
mb: slope of the positive section
a: left side of the middle segment
b: right side of the middle segment
"""
out = PT_indicator(x, a, b) * x + PT_indicator(x, b, None) * (b + mb * (x - b)) + PT_indicator(x, None, a) * (a + ma * (x-a))
return out | StarcoderdataPython |
9651111 | <filename>Tabuada1078.py
'''
Leia 1 valor inteiro N (2 < N < 1000). A seguir, mostre a tabuada de N:
1 x N = N 2 x N = 2N ... 10 x N = 10N
Entrada = A entrada contém um valor inteiro N (2 < N < 1000).
Saída = Imprima a tabuada de N, conforme o exemplo fornecido.
Exemplo de Entrada Exemplo de Saída
140 1 x 140 = 140
2 x 140 = 280
3 x 140 = 420
4 x 140 = 560
5 x 140 = 700
6 x 140 = 840
7 x 140 = 980
8 x 140 = 1120
9 x 140 = 1260
10 x 140 = 1400
'''
num = int(input())
for i in range(1, 11):
print(f'{i} x {num} = {i * num}')
| StarcoderdataPython |
1987710 | <reponame>edz-o/unreal-stereo-evaluation
#!/usr/bin/env python
import numpy as np
from skimage.color import rgb2gray
import time, datetime
import cv2
import json, re
import os, sys
import os.path as osp
import argparse
import metrics
from utils import read_disp
from config import cfg
imread = lambda x: cv2.imread(x)[:,:,(2,1,0)]
read_msk = lambda file_path: cv2.imread(file_path, cv2.IMREAD_GRAYSCALE).astype(bool)
write_err_img = lambda file_path, err_map: cv2.imwrite(file_path, err_map[:,:,::-1])
# Dataset root
# DATA_ROOT = '/home/yzhang/data/unrealstereo'
def _evaluate_on_masked_regions(disp_est, disp_gth_masked):
er1 = metrics.end_point_error(disp_gth_masked, disp_est)
er2 = metrics.D1_error(disp_gth_masked, disp_est, (3,0.05))
er3 = metrics.N_pixel_error(disp_gth_masked, disp_est, 0.5)
er4 = metrics.N_pixel_error(disp_gth_masked, disp_est, 1)
er5 = metrics.N_pixel_error(disp_gth_masked, disp_est, 2)
er11 = metrics.N_pixel_error(disp_gth_masked, disp_est, 3)
er6 = metrics.N_pixel_error(disp_gth_masked, disp_est, 4)
er7 = metrics.A_percent_error_quantile(disp_gth_masked, disp_est, 50)
er8 = metrics.A_percent_error_quantile(disp_gth_masked, disp_est, 90)
er9 = metrics.A_percent_error_quantile(disp_gth_masked, disp_est, 95)
er10 = metrics.A_percent_error_quantile(disp_gth_masked, disp_est, 99)
errs = [er1, er2, er3, er4, er5, er6, er7, er8, er9, er10, er11]
err_names = ['EPE', 'D1', 'Bad 0.5','Bad 1', 'Bad 2', 'Bad 4', 'A50', 'A90', 'A95', 'A99', 'Bad 3']
error = dict(zip(err_names, errs))
D_err = metrics.disp_error_image(disp_gth_masked,disp_est, (3,0.05))
#err_map = np.uint8(np.vstack((metrics.disp_to_color(np.vstack((disp_est,disp_gth_masked)),228), D_err))*255)
err_map = np.uint8(np.vstack((metrics.disp_to_color(np.vstack((disp_est,disp_gth_masked)),228), D_err))*255) # change color encoding for UrbanCity
return error, err_map
def _evaluate_on_masked_regions_D1(disp_est, disp_gth_masked):
err = metrics.D1_error(disp_gth_masked, disp_est, (3,0.05))
err_name = 'D1'
error = dict([(err_name, err)])
D_err = metrics.disp_error_image(disp_gth_masked,disp_est, (3,0.05))
#err_map = np.uint8(np.vstack((metrics.disp_to_color(np.vstack((disp_est,disp_gth_masked)),228), D_err))*255)
err_map = np.uint8(np.vstack((metrics.disp_to_color(np.vstack((disp_est,disp_gth_masked)),228), D_err))*255) # change color encoding for UrbanCity
return error, err_map
def evaluate_on_masked_regions(disp_est, dispL_gth, ERRMAP_OUT_PATH, store=False):
error, err_map = _evaluate_on_masked_regions(disp_est, dispL_gth)
if store:
# err_map_file = osp.join(OUT_PATH, TYPE, im + '_err' + '.png')
##if not check_exist(err_map_file):
#check_exist(err_map_file)
#write_err_img(err_map_file, err_map)
check_exist(ERRMAP_OUT_PATH)
write_err_img(ERRMAP_OUT_PATH, err_map)
return error
def check_exist(filename):
if osp.isfile(filename):
return True
else:
file_path = osp.split(filename)[0]
if not osp.isdir(file_path):
os.system('mkdir "{}"'.format(file_path))
return False
def assert_exist(filename):
assert check_exist(filename), \
'File {} does not exist'.format(filename)
def parse_args():
"""
Input format:
"""
parser = argparse.ArgumentParser(description='Evaluate outputs')
parser.add_argument('--src', dest='eval_src', help='scene_alg.json file for evaluating',
default='/', type=str)
parser.add_argument('--conf', dest='eval_conf', help='Evaluation configuration',
default='/', type=str)
parser.add_argument('--ds', dest='dataset', help='Dataset file')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
eval_src = json.load(open(args.eval_src))
eval_conf = json.load(open(args.eval_conf))
dataset = json.loads(open(args.dataset).read().replace('$ROOT', cfg.DATA_ROOT))
print('Called with args:')
print(args)
evaluation_result_path = osp.join(os.getcwd(),'Evaluation/results')
if not osp.isdir(evaluation_result_path):
os.makedirs(evaluation_result_path)
if eval_conf['append']:
print(eval_conf['append'])
result_path = eval_conf['append_file']
outputs = json.load(open(result_path))
else:
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H:%M:%S')
result_path = osp.join(evaluation_result_path, eval_conf['name']+timestamp+'.json')
outputs = {}
#alg_name = eval_conf['name']
Total_img = len(eval_src.keys())
for i, im in enumerate(eval_src.keys()):
if not im in outputs:
outputs[im] = {}
output_path = eval_src[im]
OUT_PATH = osp.split(output_path)[0]
assert_exist(output_path)
disp_est = read_disp(output_path)
try:
dispL_gth = read_disp(dataset[im]['dispL_occ'])
except:
print(dataset[im]['dispL_occ'])
if check_exist(dataset[im]['occlusion_msk']):
noc_mask = ~read_msk(dataset[im]['occlusion_msk'])
if eval_conf["region"]["full"]:
TYPE = 'full'
ERRMAP_OUT_PATH = osp.join(OUT_PATH, TYPE, im + '_err' + '.png')
outputs[im]['{}_result'.format(TYPE)] = evaluate_on_masked_regions(disp_est,
dispL_gth, ERRMAP_OUT_PATH, eval_conf["store_img"])
if eval_conf["region"]["noc"]:
TYPE = 'noc'
ERRMAP_OUT_PATH = osp.join(OUT_PATH, TYPE, im + '_err' + '.png')
mask = noc_mask
outputs[im]['{}_result'.format(TYPE)] = evaluate_on_masked_regions(disp_est,
dispL_gth*mask, ERRMAP_OUT_PATH, eval_conf["store_img"])
if eval_conf["region"]["occ"]:
TYPE = 'occ'
ERRMAP_OUT_PATH = osp.join(OUT_PATH, TYPE, im + '_err' + '.png')
mask = ~noc_mask
outputs[im]['{}_result'.format(TYPE)] = evaluate_on_masked_regions(disp_est,
dispL_gth*mask, ERRMAP_OUT_PATH, eval_conf["store_img"])
if eval_conf["region"]["tl"]:
TYPE = 'textureless'
ERRMAP_OUT_PATH = osp.join(OUT_PATH, TYPE, im + '_err' + '.png')
if osp.isfile(dataset[im]['textureless_msk']):
#assert_exist(dataset[im]['textureless_msk'])
mask = read_msk(dataset[im]['textureless_msk']) * noc_mask
outputs[im]['{}_result'.format(TYPE)] = evaluate_on_masked_regions(disp_est,
dispL_gth*mask, ERRMAP_OUT_PATH, eval_conf["store_img"])
if eval_conf["region"]["sp"]:
TYPE = 'reflective'
ERRMAP_OUT_PATH = osp.join(OUT_PATH, TYPE, im + '_err' + '.png')
if osp.isfile(dataset[im]['specular_msk']):
#assert_exist(dataset[im]['specular_msk'])
mask = read_msk(dataset[im]['specular_msk']) * noc_mask
outputs[im]['{}_result'.format(TYPE)] = evaluate_on_masked_regions(disp_est,
dispL_gth*mask, ERRMAP_OUT_PATH, eval_conf["store_img"])
if eval_conf["region"]["tr"]:
TYPE = 'transparent'
ERRMAP_OUT_PATH = osp.join(OUT_PATH, TYPE, im + '_err' + '.png')
if osp.isfile(dataset[im]['transparent_msk']):
#assert_exist(dataset[im]['transparent_msk'])
mask = read_msk(dataset[im]['transparent_msk']) * noc_mask
outputs[im]['{}_result'.format(TYPE)] = evaluate_on_masked_regions(disp_est,
dispL_gth*mask, ERRMAP_OUT_PATH, eval_conf["store_img"])
if eval_conf["region"]["bd"]:
TYPE = 'boundary'
ERRMAP_OUT_PATH = osp.join(OUT_PATH, TYPE, im + '_err' + '.png')
if osp.isfile(dataset[im]['boundary_msk']):
#assert_exist(dataset[im]['boundary_msk'])
mask = read_msk(dataset[im]['boundary_msk']) * noc_mask
outputs[im]['{}_result'.format(TYPE)] = evaluate_on_masked_regions(disp_est,
dispL_gth*mask, ERRMAP_OUT_PATH, eval_conf["store_img"])
# Progress bar
sys.stdout.write('\r')
sys.stdout.write('%d/%d' % (i+1, Total_img))
sys.stdout.flush()
if not osp.isdir(evaluation_result_path):
os.system('mkdir {}'.format(evaluation_result_path))
with open(result_path, 'w') as f:
json.dump(outputs, f, indent = 4)
print('Save result to {}'.format(result_path))
| StarcoderdataPython |
3380398 | from django.contrib import admin
from .models import Obliged, Payment
admin.site.register(Obliged)
admin.site.register(Payment)
| StarcoderdataPython |
3436260 | <filename>HelloWorld-OOP/Clases.py
# Este archivo es una coleccion de clases que se llamaran desde otros archivos
import math
class Calculadora:
def __init__(self, num1, num2):
self.num1 = float(num1)
self.num2 = float(num2)
print "Los numeros a operar son: ", self.num1, "y", self.num2, "\n"
def suma(self):
print self.num1, "+", self.num2, "=", self.num1 + self.num2
def resta(self):
print self.num1, "-", self.num2, "=", self.num1 - self.num2
def multiplica(self):
print self.num1, "*", self.num2, "=", self.num1 * self.num2
def divide(self):
print self.num1, "/", self.num2, "=", self.num1 / self.num2
def potencia(self):
print self.num1, "^", self.num2, "=", math.pow(self.num1, self.num2)
def raiz(self):
print "sqrt(", self.num1, ")= ", math.sqrt(self.num1), "\n", "sqrt(",self.num2,")= ", math.sqrt(self.num2)
class Constantes:
def __init__(self):
self.pi = math.pi
self.e = math.e
class Calculadora2(Calculadora):
def __init__(self, nombre, numer1, numer2, color):
self.name = nombre
self.color = color
self.num1 = float(numer1)
self.num2 = float(numer2)
def presentarcolor(self):
print "El color de ", self.name, " es: ", self.color | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.