code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
/*
MIT License http://www.opensource.org/licenses/mit-license.php
Author Tobias Koppers @sokra
*/
"use strict";
const makeSerializable = require("../util/makeSerializable");
const ModuleDependency = require("./ModuleDependency");
const ModuleDependencyTemplateAsId = require("./ModuleDependencyTemplateAsId");
/** @typedef {import("../javascript/JavascriptParser").Range} Range */
class ModuleHotDeclineDependency extends ModuleDependency {
/**
* @param {string} request the request string
* @param {Range} range location in source code
*/
constructor(request, range) {
super(request);
this.range = range;
this.weak = true;
}
get type() {
return "module.hot.decline";
}
get category() {
return "commonjs";
}
}
makeSerializable(
ModuleHotDeclineDependency,
"webpack/lib/dependencies/ModuleHotDeclineDependency"
);
ModuleHotDeclineDependency.Template = ModuleDependencyTemplateAsId;
module.exports = ModuleHotDeclineDependency; | javascript | github | https://github.com/webpack/webpack | lib/dependencies/ModuleHotDeclineDependency.js |
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: generator
plugin_type: inventory
version_added: "2.6"
short_description: Uses Jinja2 to construct hosts and groups from patterns
description:
- Uses a YAML configuration file with a valid YAML or C(.config) extension to define var expressions and group conditionals
- Create a template pattern that describes each host, and then use independent configuration layers
- Every element of every layer is combined to create a host for every layer combination
- Parent groups can be defined with reference to hosts and other groups using the same template variables
options:
plugin:
description: token that ensures this is a source file for the 'generator' plugin.
required: True
choices: ['generator']
hosts:
description:
- The C(name) key is a template used to generate
hostnames based on the C(layers) option. Each variable in the name is expanded to create a
cartesian product of all possible layer combinations.
- The C(parents) are a list of parent groups that the host belongs to. Each C(parent) item
contains a C(name) key, again expanded from the template, and an optional C(parents) key
that lists its parents.
- Parents can also contain C(vars), which is a dictionary of vars that
is then always set for that variable. This can provide easy access to the group name. E.g
set an C(application) variable that is set to the value of the C(application) layer name.
layers:
description:
- A dictionary of layers, with the key being the layer name, used as a variable name in the C(host)
C(name) and C(parents) keys. Each layer value is a list of possible values for that layer.
'''
EXAMPLES = '''
# inventory.config file in YAML format
plugin: generator
strict: False
hosts:
name: "{{ operation }}-{{ application }}-{{ environment }}-runner"
parents:
- name: "{{ operation }}-{{ application }}-{{ environment }}"
parents:
- name: "{{ operation }}-{{ application }}"
parents:
- name: "{{ operation }}"
- name: "{{ application }}"
- name: "{{ application }}-{{ environment }}"
parents:
- name: "{{ application }}"
vars:
application: "{{ application }}"
- name: "{{ environment }}"
vars:
environment: "{{ environment }}"
- name: runner
layers:
operation:
- build
- launch
environment:
- dev
- test
- prod
application:
- web
- api
'''
import os
from itertools import product
from ansible import constants as C
from ansible.errors import AnsibleParserError
from ansible.plugins.inventory import BaseInventoryPlugin
class InventoryModule(BaseInventoryPlugin):
""" constructs groups and vars using Jinja2 template expressions """
NAME = 'generator'
def __init__(self):
super(InventoryModule, self).__init__()
def verify_file(self, path):
valid = False
if super(InventoryModule, self).verify_file(path):
file_name, ext = os.path.splitext(path)
if not ext or ext in ['.config'] + C.YAML_FILENAME_EXTENSIONS:
valid = True
return valid
def template(self, pattern, variables):
t = self.templar
t.set_available_variables(variables)
return t.do_template(pattern)
def add_parents(self, inventory, child, parents, template_vars):
for parent in parents:
try:
groupname = self.template(parent['name'], template_vars)
except (AttributeError, ValueError):
raise AnsibleParserError("Element %s has a parent with no name element" % child['name'])
if groupname not in inventory.groups:
inventory.add_group(groupname)
group = inventory.groups[groupname]
for (k, v) in parent.get('vars', {}).items():
group.set_variable(k, self.template(v, template_vars))
inventory.add_child(groupname, child)
self.add_parents(inventory, groupname, parent.get('parents', []), template_vars)
def parse(self, inventory, loader, path, cache=False):
''' parses the inventory file '''
super(InventoryModule, self).parse(inventory, loader, path, cache=cache)
config = self._read_config_data(path)
template_inputs = product(*config['layers'].values())
for item in template_inputs:
template_vars = dict()
for i, key in enumerate(config['layers'].keys()):
template_vars[key] = item[i]
host = self.template(config['hosts']['name'], template_vars)
inventory.add_host(host)
self.add_parents(inventory, host, config['hosts'].get('parents', []), template_vars) | unknown | codeparrot/codeparrot-clean | ||
trigger:
branches:
include:
- release-*
resources:
repositories:
- repository: 1esPipelines
type: git
name: 1ESPipelineTemplates/1ESPipelineTemplates
ref: refs/tags/release
variables:
Codeql.InitParameters: '--codescanning-config=$(Build.SourcesDirectory)/.github/codeql/codeql-configuration.yml'
extends:
template: v1/1ES.Official.PipelineTemplate.yml@1esPipelines
parameters:
pool:
name: TypeScript-AzurePipelines-EO
image: 1ESPT-AzureLinux3
os: linux
sdl:
sourceAnalysisPool:
name: TypeScript-AzurePipelines-EO
image: 1ESPT-Windows2022
os: windows
stages:
- stage: buildStage
displayName: Build Stage
jobs:
- job: test
displayName: Test
steps:
- checkout: self
clean: true
fetchDepth: 1
fetchTags: false
- task: NodeTool@0
inputs:
versionSpec: 20.x
displayName: 'Install Node'
- script: |
npm install -g `node -e 'console.log(JSON.parse(fs.readFileSync("package.json", "utf8")).packageManager)'`
npm --version
displayName: 'Install packageManager from package.json'
- script: npm ci
displayName: 'npm ci'
- script: 'npm test'
displayName: 'npm test'
- job: build
displayName: Build
dependsOn: test
steps:
- checkout: self
clean: true
fetchDepth: 1
fetchTags: false
- task: NodeTool@0
inputs:
versionSpec: 20.x
displayName: 'Install Node'
- script: |
npm install -g `node -e 'console.log(JSON.parse(fs.readFileSync("package.json", "utf8")).packageManager)'`
npm --version
displayName: 'Install packageManager from package.json'
- script: npm ci
displayName: 'npm ci'
- script: |
npx hereby LKG
npx hereby clean
node ./scripts/addPackageJsonGitHead.mjs package.json
npm pack
displayName: 'LKG, clean, pack'
- task: CopyFiles@2
displayName: 'Copy Files to: $(Build.ArtifactStagingDirectory)'
inputs:
SourceFolder: ./
Contents: 'typescript-*.tgz'
TargetFolder: '$(Build.ArtifactStagingDirectory)'
templateContext:
outputs:
- output: pipelineArtifact
targetPath: '$(Build.ArtifactStagingDirectory)'
artifactName: tgz | unknown | github | https://github.com/microsoft/TypeScript | azure-pipelines.release.yml |
#!/usr/bin/env python
# ******************************************************************************
# Copyright (c) 2015 UT-Battelle, LLC.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Andrew Bennett - Initial API and implementation and/or initial documentation
#
# ******************************************************************************
# The Python ICE Installer
# ******************************************************************************
from __future__ import print_function
import os
import sys
import glob
import time
import stat
import errno
import shutil
import tarfile
import zipfile
import fnmatch
import platform
import datetime
import argparse
import itertools
import subprocess
if sys.version_info >= (3,0):
import urllib.request as urllib2
else:
import urllib2
def parse_args(args):
""" Parse command line arguments and return them. """
parser = argparse.ArgumentParser(description="ICE Installer script.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
fromfile_prefix_chars='@')
parser.add_argument('-u', '--update', nargs='*', default=['all'],
choices=("all", "none", "VisIt", "HDFJava", "ICE"),
help='The packages to update. Leave blank to update all available packages.')
parser.add_argument('-p', '--prefix', default=os.path.abspath(os.path.join(".","ICE")),
help="The location to download and install ICE.")
parser.add_argument("--unstable", action='store_true', help='Install the unstable nightly version of ICE.')
parser.add_argument("--with-hdfjava", help="The path to an installation of HDFJava")
parser.add_argument("--with-visit", help="The path to an installation of VisIt")
parser.add_argument("--skip-download", action='store_true',
help='Do not download new packages, use previously downloaded ones.')
parser.add_argument('--cleanup', action='store_true',
help='Remove downloaded packages after installation.')
opts = parser.parse_args(args)
# If update option was given blank set it to update everything
if opts.update == [] or 'all' in opts.update:
opts.update = ['ICE', 'VisIt', 'HDFJava']
if opts.with_hdfjava is not None and 'HDFJava' in opts.update:
print("")
print("--------------------------- WARNING -----------------------------")
print("Options used to install HDFJava and use an existing installation.")
print("We will try to use the existing installation. If this does not work")
print("try running again without the --with-hdfjava option.")
print("--------------------------- WARNING -----------------------------")
print("")
if opts.with_visit is not None and 'VisIt' in opts.update:
print("")
print("--------------------------- WARNING -----------------------------")
print("Options used to install HDFJava and use an existing installation.")
print("We will try to use the existing installation. If this does not work")
print("try running again without the --with-hdfjava option.")
print("--------------------------- WARNING -----------------------------")
print("")
return opts
def mkdir_p(path):
""" Operates like mkdir -p in a Unix-like system """
try:
os.makedirs(path)
except OSError as e:
if os.path.exists(path) and os.path.isdir(path):
pass
else:
print("")
print("--------------------------- ERROR -----------------------------")
print("Cannot create directory " + path + ". File already exists.")
print("Either delete this file, or specify a different installation")
print("location by using the --prefix option.")
print("--------------------------- ERROR -----------------------------")
print("")
exit()
def get_os_and_arch():
""" Do some converting and enforcing of OS and architecture settings. """
allowed_os = ['Windows', 'Darwin', 'Linux']
allowed_arch = ['x86_64', 'x86']
arch_type = platform.machine()
os_type = platform.system()
if arch_type == "AMD64": arch_type = "x86_64"
# TODO: Add more processing for different things
if os_type not in allowed_os or arch_type not in allowed_arch:
print("ERROR: Incorrect architecture or operating system.")
exit()
return os_type, arch_type
def print_header(opts, os_type, arch_type):
print("Preparing to install ICE...")
print("")
if not opts.skip_download:
print("Downloading and installing:")
else:
print("Installing the following packages:")
for pkg in opts.update:
print(" " + pkg)
def get_package_file(pkg, os_type, arch_type):
package_files = {"ICE" : {"Windows" : {"x86_64" : "ice.product-win32.win32.x86_64.zip" ,
"x86" : "ice.product-win32.win32.x86.zip" },
"Darwin" : {"x86_64" : "ice.product-macosx.cocoa.x86_64.zip" ,
"x86" : "ice.product-macosx.cocoa.x86.zip" },
"Linux" : {"x86_64" : "ice.product-linux.gtk.x86_64.zip" ,
"x86" : "ice.product-linux.gtk.x86.zip" }},
"VisIt" : {"Windows" : {"x86_64" : "visit2.10.0_x64.exe" ,
"x86" : "visit2.10.0.exe" },
"Darwin" : {"x86_64" : "VisIt-2.10.0.dmg" },
"Linux" : {"x86_64" : "visit2_10_0.linux-x86_64-rhel6.tar.gz" }},
"HDFJava" : {"Windows" : {"x86_64" : "HDFView-2.11-win64-vs2012.zip" ,
"x86" : "HDFView-2.11-win32-vs2012.zip" },
"Darwin" : {"x86_64" : "HDFView-2.11.0-Darwin.dmg" },
"Linux" : {"x86_64" : "HDFView-2.11-centos6-x64.tar.gz" }}}
return package_files[pkg][os_type][arch_type]
def download_packages(opts, os_type, arch_type):
"""
Pull down the appropriate packages for the given run, OS type, and machine architecture
Args:
opts: the list of options selected
os_type: the operating system to download for
arch_type: the architecture of the system
"""
packages = opts.update
if packages == [] or os_type == None or arch_type == None:
return
date = (datetime.date.today()- datetime.timedelta(1)).isoformat().replace('-','')
package_urls = {"ICE" : "http://eclipseice.ornl.gov/downloads/ice/",
"VisIt" : "http://eclipseice.ornl.gov/downloads/visit/",
"HDFJava" : "http://www.hdfgroup.org/ftp/HDF5/hdf-java/current/bin/"}
# TODO: If a site from packag_urls is down we can try to download from one of these
backup_urls = {"ICE" : "http://sourceforge.net/projects/niceproject/files/nightly/nice/",
"VisIt" : "http://portal.nersc.gov/project/visit/releases/2.9.1/",
"HDFJava" : "http://www.hdfgroup.org/ftp/HDF5/hdf-java/current/bin/"}
if opts.unstable:
package_urls['ICE'] = "http://eclipseice.ornl.gov/downloads/ice/unstable-nightly/" + date + '/'
else:
package_urls['ICE'] = "http://eclipseice.ornl.gov/downloads/ice/stable-nightly/" + date + '/'
files = dict()
for pkg in packages:
fname = get_package_file(pkg, os_type, arch_type)
files[pkg] = fname
if not opts.skip_download:
print("Downloading " + pkg + ":")
url = package_urls[pkg] + fname
u = urllib2.urlopen(url)
f = open(fname, 'wb')
info = {k.lower():v for k,v in dict(u.info()).items()}
fsize = int(info['content-length'])
dl_size = 0
block = 8192
while True:
buffer = u.read(block)
if not buffer: break
dl_size += len(buffer)
f.write(buffer)
status = r" %5.2f%%" % (dl_size * 100. / fsize)
status = status + chr(8)*(len(status)+1)
print(status,end='')
print("")
return files
def unzip_package(pkg, file_path, out_path):
""" Unzips file_path to out_path """
print("Unpacking " + file_path + "....")
mkdir_p(out_path)
if 'Darwin' not in get_os_and_arch():
pkg = zipfile.ZipFile(file_path)
pkg.extractall(out_path)
else:
unzip_cmd = ['unzip', '-q', '-o', file_path, '-d', out_path]
subprocess.call(unzip_cmd)
return out_path
def untar_package(pkg, file_path, out_path):
""" Untars file_path to out_path """
print("Unpacking " + file_path + "....")
mkdir_p(out_path)
pkg = tarfile.open(file_path)
dir_name = os.path.commonprefix(pkg.getnames())
if os.path.isdir(os.path.join(out_path, dir_name)):
shutil.rmtree(os.path.join(out_path,dir_name))
pkg.extractall(out_path)
pkg.close()
return dir_name
def undmg_package(pkg, file_path, out_path):
""" Extracts contents of file_path to out_path """
print("Unpacking " + file_path + " to " + out_path + "....")
mnt_point = os.path.join(out_path, 'mnt')
mkdir_p(mnt_point)
mount_cmd = ['hdiutil', 'attach', '-mountpoint', mnt_point, file_path, '-quiet']
unmount_cmd = ['hdiutil', 'detach', mnt_point, '-force', '-quiet']
subprocess.Popen(mount_cmd)
time.sleep(3)
subprocess.Popen(unmount_cmd)
time.sleep(3)
subprocess.Popen(mount_cmd)
time.sleep(3)
content = find_dir(mnt_point, "Resources")
if content is None:
return
print(" Copying " + content + " to " + os.path.join(out_path,pkg) + "....")
if os.path.exists(os.path.join(out_path, pkg)):
shutil.rmtree(os.path.join(out_path, pkg))
shutil.copytree(content, os.path.join(out_path,pkg))
time.sleep(3)
subprocess.Popen(unmount_cmd)
def unpack_packages(opts, pkg_files):
""" Delegates unpacking of packages """
dirs = dict()
for pkg, archive in pkg_files.items():
if archive.endswith(".tar.gz") or archive.endswith(".tgz") or archive.endswith(".tar"):
dirs[pkg] = untar_package(pkg, archive, opts.prefix)
elif archive.endswith(".zip"):
dirs[pkg] = unzip_package(pkg, archive, opts.prefix)
elif archive.endswith(".dmg"):
dirs[pkg] = undmg_package(pkg, archive, opts.prefix)
elif archive.endswith(".exe"):
dirs[pkg] = archive
return dirs
def find_file(dir, fname):
""" Warning: this only finds the first file that matches """
if fname == "*.app":
if glob.glob(os.path.join(dir,fname)):
return glob.glob(os.path.join(dir,fname))[0]
for root, dirs, files in os.walk(dir):
for basename in files:
if fnmatch.fnmatch(basename, fname):
filename = os.path.join(root, basename)
return filename
return None
def find_dir(dir, dirname):
""" Warning: this only finds the first directory that matches """
for root, dirs, files in os.walk(dir):
for dir in dirs:
if fnmatch.fnmatch(dir, dirname):
return os.path.join(root, dir)
return None
def nix_install(opts, pkg_dirs):
""" Install packages for *nix """
if "HDFJava" in pkg_dirs.keys():
print("Installing HDFJava...")
install_script = find_file(opts.prefix, "HDFView*.sh")
if install_script is not None:
install_cmd = [install_script, "--exclude-subdir", "--prefix="+os.path.join(opts.prefix,pkg_dirs['HDFJava'])]
subprocess.call(install_cmd)
hdf_path = opts.with_hdfjava if opts.with_hdfjava else opts.prefix
hdf_libdir = find_file(hdf_path, "libhdf.a")
if hdf_libdir is None:
print("")
print("--------------------------- ERROR -----------------------------")
print("Could not find a usable HDFJava library. Try downloading")
print("a fresh copy using this installer by providing the --update")
print("")
print("Alternatively you may specify the location of an existing")
print("HDFJava installation using the --with-hdfjava option.")
print("option without any arguments")
print("--------------------------- ERROR -----------------------------")
print("")
exit()
hdf_libdir = os.path.abspath(os.path.dirname(hdf_libdir))
visit_path = opts.with_visit if opts.with_visit is not None else opts.prefix
visit_bin_dir = find_file(visit_path, "visit")
if visit_bin_dir is None:
print("")
print("--------------------------- ERROR -----------------------------")
print("Could not find a usable VisIt executable. Try downloading")
print("a fresh copy using this installer by providing the --update")
print("option without any arguments")
print("")
print("Alternatively you may specify the location of an existing")
print("VisIt installation using the --with-visit option.")
print("--------------------------- ERROR -----------------------------")
print("")
exit()
visit_bin_dir = os.path.abspath(os.path.dirname(visit_bin_dir))
ice_preferences = find_file(opts.prefix, "ICE.ini")
if ice_preferences == None:
print("")
print("--------------------------- ERROR -----------------------------")
print("Could not find a usable ICE preferences file. Try downloading")
print("a fresh copy using this installer by providing the --update")
print("option without any arguments")
print("--------------------------- ERROR -----------------------------")
print("")
exit()
ice_preferences = os.path.abspath(ice_preferences)
shutil.move(ice_preferences, ice_preferences + ".bak")
with open(ice_preferences + ".bak") as infile:
filedata = infile.read()
if visit_bin_dir is not None:
filedata = filedata.replace("-Dvisit.binpath=@user.home/visit/bin", "-Dvisit.binpath=" +
visit_bin_dir)
with open(ice_preferences, 'w') as outfile:
outfile.write(filedata)
if hdf_libdir is not None:
with open(ice_preferences, 'a') as outfile:
outfile.write("-Djava.library.path=" + hdf_libdir)
def windows_install(opts, pkg_dirs):
""" Install packages for Windows """
if "HDFJava" in pkg_dirs.keys():
print("Installing HDFJava...")
install_script = find_file(opts.prefix, "HDFView*.exe")
install_cmd = [install_script]
subprocess.call(install_cmd, shell=True)
hdf_libdir = os.path.dirname(find_file("C:\\", "libhdf.lib"))
if hdf_libdir is None:
print("ERROR: Could not find HDF Java libraries.")
exit()
if "VisIt" in pkg_dirs.keys():
print("Installing VisIt...")
install_script = find_file(os.getcwd(), "visit*.exe")
install_cmd = [install_script]
subprocess.call(install_cmd)
visit_bin_dir = os.path.dirname(find_file("C:\\", "visit*.exe"))
if visit_bin_dir is None:
print("ERROR: Could not find VisIt executable.")
exit()
ice_preferences = find_file(opts.prefix, "ICE.ini")
if ice_preferences is None:
print("ERROR: Could not find ICE preferences directory.")
exit()
shutil.move(ice_preferences, ice_preferences + ".bak")
with open(ice_preferences + ".bak") as infile:
filedata = infile.read()
filedata = filedata.replace("-Dvisit.binpath=@user.home/visit/bin", "-Dvisit.binpath=" +
os.path.join(os.path.abspath(opts.prefix),visit_bin_dir))
with open(ice_preferences, 'w') as outfile:
outfile.write(filedata)
with open(ice_preferences, 'a') as outfile:
outfile.write("-Djava.library.path=" + hdf_libdir)
def linux_post(opts, pkgs):
""" Post installation for Linux """
print("Generating desktop file for ICE...")
with open(os.path.join(opts.prefix,"splash.bmp"),'wb') as f:
f.write(urllib2.urlopen('https://raw.githubusercontent.com/eclipse/ice/master/org.eclipse.ice.client.rcp/splash.bmp').read())
if 'SUDO_USER' in os.environ:
user = os.environ['SUDO_USER']
else:
user = os.environ['USER']
os.chmod(os.path.join(opts.prefix, "ICE"), stat.S_IXUSR | \
stat.S_IRUSR | \
stat.S_IWUSR | \
stat.S_IRGRP | \
stat.S_IWGRP | \
stat.S_IROTH | \
stat.S_IWOTH)
mkdir_p(os.path.join('/home', user, ".local", "share", "applications"))
with open(os.path.join('/home', user, ".local", "share", "applications","ICE.desktop"),'w') as f:
f.write("[Desktop Entry]")
f.write("\nType=Application")
f.write("\nName=ICE")
f.write("\nExec=" + os.path.join(opts.prefix, "ICE"))
f.write("\nComment=Eclipse Integrated Computational Environment")
f.write("\nIcon=" + os.path.join(opts.prefix, "splash.bmp"))
f.write("\nTerminal=true")
f.write("\nCategories=Programming")
f.write("\n")
def osx_post(opts, pkgs):
""" Post installation for OS X """
mkdir_p(os.path.join(opts.prefix, "ICE.app", "Contents", "MacOS"))
script_path = os.path.join(opts.prefix, "ICE.app", "Contents", "Info.plist")
visit_libdir = os.path.dirname(find_file(opts.prefix, "libvisit*"))
plutil_cmd = ['plutil', '-replace', 'CFBundleExecutable', '-string', 'ice.sh', script_path]
lsregister_cmd = ['/System/Library/Frameworks/CoreServices.framework/Frameworks/LaunchServices.framework/Support/lsregister',
'-v', '-f', os.path.join(opts.prefix, 'ICE.app')]
ln_cmd = ['ln', '-sf', os.path.abspath(os.path.join(opts.prefix, "ICE.app")), os.path.join(os.path.expanduser("~"),'Applications','ICE.app')]
with open(os.path.join(opts.prefix, "ICE.app", "Contents", "MacOS", "ice.sh"), 'w') as f:
f.write('#!/bin/bash')
f.write('\nsource ~/.bash_profile')
f.write('\nexport DYLD_LIBRARY_PATH=' + visit_libdir + ':$DYLD_LIBRARY_PATH')
f.write('\nexec `dirname $0`/ICE $0')
f.write('\n')
os.chmod(os.path.join(opts.prefix, "ICE.app", "Contents", "MacOS", "ice.sh"), stat.S_IXUSR)
ice_preferences = find_file(opts.prefix, 'ICE.ini')
with open(ice_preferences, 'a') as f:
f.write("\n-Xdock:name=Eclipse ICE")
subprocess.Popen(plutil_cmd)
subprocess.Popen(lsregister_cmd)
subprocess.Popen(ln_cmd)
def windows_post(opts, pkgs):
""" Post installation for Windows """
pass
''' Execute Git with the given arguments '''
def git(*args):
return subprocess.check_call(['git'] + list(args))
''' Execute the sh command with the given arguments '''
def executeShellScript(*args):
return subprocess.check_call(['sh'] + list(args))
''' Execute make with the given arguments '''
def executeMake(*args):
return subprocess.check_call(['make'] + list(args))
def main():
""" Run the full installer. """
opts = parse_args(sys.argv[1:])
os_type, arch_type = get_os_and_arch()
install_funct = {"Windows" : windows_install,
"Darwin" : nix_install,
"Linux" : nix_install}
post_funct = {"Windows" : windows_post,
"Darwin" : osx_post,
"Linux" : linux_post}
print_header(opts, os_type, arch_type)
pkg_files = download_packages(opts, os_type, arch_type)
pkg_dirs = unpack_packages(opts, pkg_files)
install_funct[os_type](opts, pkg_dirs)
post_funct[os_type](opts, pkg_dirs)
# Get the Moose directory
script_path = sys.path[0]
moose_dir = os.path.dirname(script_path)
# Remove bad files from moose repo
os.chdir(moose_dir)
subprocess.check_call("rm -rf *.dmg *.zip ICE/mnt", shell=True)
# Clone ASIO and kick off a Moose build
os.chdir(moose_dir+"/framework/contrib/")
if (not os.path.exists("asio")):
git("clone", "https://github.com/chriskohlhoff/asio")
os.chdir("asio")
subprocess.check_call("mv asio/include .", shell=True)
subprocess.check_call("rm -rf asio", shell=True)
os.chdir(moose_dir)
executeShellScript('scripts/update_and_rebuild_libmesh.sh', '--enable-cxx11')
executeMake("-j"+os.environ['MOOSE_JOBS'], "-C", "framework")
executeMake("-j"+os.environ['MOOSE_JOBS'], "-C", "test")
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
"""
Base class for Scrapy commands
"""
import os
from optparse import OptionGroup
from twisted.python import failure
from scrapy.utils.conf import arglist_to_dict
from scrapy.exceptions import UsageError
class ScrapyCommand(object):
requires_project = False
crawler_process = None
# default settings to be used for this command instead of global defaults
default_settings = {}
exitcode = 0
def __init__(self):
self.settings = None # set in scrapy.cmdline
def set_crawler(self, crawler):
assert not hasattr(self, '_crawler'), "crawler already set"
self._crawler = crawler
def syntax(self):
"""
Command syntax (preferably one-line). Do not include command name.
"""
return ""
def short_desc(self):
"""
A short description of the command
"""
return ""
def long_desc(self):
"""A long description of the command. Return short description when not
available. It cannot contain newlines, since contents will be formatted
by optparser which removes newlines and wraps text.
"""
return self.short_desc()
def help(self):
"""An extensive help for the command. It will be shown when using the
"help" command. It can contain newlines, since not post-formatting will
be applied to its contents.
"""
return self.long_desc()
def add_options(self, parser):
"""
Populate option parse with options available for this command
"""
group = OptionGroup(parser, "Global Options")
group.add_option("--logfile", metavar="FILE",
help="log file. if omitted stderr will be used")
group.add_option("-L", "--loglevel", metavar="LEVEL", default=None,
help="log level (default: %s)" % self.settings['LOG_LEVEL'])
group.add_option("--nolog", action="store_true",
help="disable logging completely")
group.add_option("--profile", metavar="FILE", default=None,
help="write python cProfile stats to FILE")
group.add_option("--pidfile", metavar="FILE",
help="write process ID to FILE")
group.add_option("-s", "--set", action="append", default=[], metavar="NAME=VALUE",
help="set/override setting (may be repeated)")
group.add_option("--pdb", action="store_true", help="enable pdb on failure")
parser.add_option_group(group)
def process_options(self, args, opts):
try:
self.settings.setdict(arglist_to_dict(opts.set),
priority='cmdline')
except ValueError:
raise UsageError("Invalid -s value, use -s NAME=VALUE", print_help=False)
if opts.logfile:
self.settings.set('LOG_ENABLED', True, priority='cmdline')
self.settings.set('LOG_FILE', opts.logfile, priority='cmdline')
if opts.loglevel:
self.settings.set('LOG_ENABLED', True, priority='cmdline')
self.settings.set('LOG_LEVEL', opts.loglevel, priority='cmdline')
if opts.nolog:
self.settings.set('LOG_ENABLED', False, priority='cmdline')
if opts.pidfile:
with open(opts.pidfile, "w") as f:
f.write(str(os.getpid()) + os.linesep)
if opts.pdb:
failure.startDebugMode()
def run(self, args, opts):
"""
Entry point for running commands
"""
raise NotImplementedError | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) 2016 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.graph;
import static com.google.common.truth.Truth.assertThat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import java.util.RandomAccess;
import org.jspecify.annotations.NullUnmarked;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
/** Tests for repeated node and edge addition and removal in a {@link Graph}. */
@RunWith(JUnit4.class)
@NullUnmarked
public final class GraphMutationTest {
private static final int NUM_TRIALS = 50;
private static final int NUM_NODES = 100;
private static final int NUM_EDGES = 1000;
private static final int NODE_POOL_SIZE = 1000; // must be >> NUM_NODES
@Test
public void directedGraph() {
testGraphMutation(GraphBuilder.directed());
}
@Test
public void undirectedGraph() {
testGraphMutation(GraphBuilder.undirected());
}
private static void testGraphMutation(GraphBuilder<? super Integer> graphBuilder) {
Random gen = new Random(42); // Fixed seed so test results are deterministic.
for (int trial = 0; trial < NUM_TRIALS; ++trial) {
MutableGraph<Integer> graph = graphBuilder.allowsSelfLoops(true).build();
assertThat(graph.nodes()).isEmpty();
assertThat(graph.edges()).isEmpty();
AbstractGraphTest.validateGraph(graph);
while (graph.nodes().size() < NUM_NODES) {
graph.addNode(gen.nextInt(NODE_POOL_SIZE));
}
ArrayList<Integer> nodeList = new ArrayList<>(graph.nodes());
while (graph.edges().size() < NUM_EDGES) {
graph.putEdge(getRandomElement(nodeList, gen), getRandomElement(nodeList, gen));
}
ArrayList<EndpointPair<Integer>> edgeList = new ArrayList<>(graph.edges());
assertThat(graph.nodes()).hasSize(NUM_NODES);
assertThat(graph.edges()).hasSize(NUM_EDGES);
AbstractGraphTest.validateGraph(graph);
Collections.shuffle(edgeList, gen);
int numEdgesToRemove = gen.nextInt(NUM_EDGES);
for (int i = 0; i < numEdgesToRemove; ++i) {
EndpointPair<Integer> edge = edgeList.get(i);
assertThat(graph.removeEdge(edge.nodeU(), edge.nodeV())).isTrue();
}
assertThat(graph.nodes()).hasSize(NUM_NODES);
assertThat(graph.edges()).hasSize(NUM_EDGES - numEdgesToRemove);
AbstractGraphTest.validateGraph(graph);
Collections.shuffle(nodeList, gen);
int numNodesToRemove = gen.nextInt(NUM_NODES);
for (int i = 0; i < numNodesToRemove; ++i) {
assertThat(graph.removeNode(nodeList.get(i))).isTrue();
}
assertThat(graph.nodes()).hasSize(NUM_NODES - numNodesToRemove);
// Number of edges remaining is unknown (node's incident edges have been removed).
AbstractGraphTest.validateGraph(graph);
for (int i = numNodesToRemove; i < NUM_NODES; ++i) {
assertThat(graph.removeNode(nodeList.get(i))).isTrue();
}
assertThat(graph.nodes()).isEmpty();
assertThat(graph.edges()).isEmpty(); // no edges can remain if there's no nodes
AbstractGraphTest.validateGraph(graph);
Collections.shuffle(nodeList, gen);
for (Integer node : nodeList) {
assertThat(graph.addNode(node)).isTrue();
}
Collections.shuffle(edgeList, gen);
for (EndpointPair<Integer> edge : edgeList) {
assertThat(graph.putEdge(edge.nodeU(), edge.nodeV())).isTrue();
}
assertThat(graph.nodes()).hasSize(NUM_NODES);
assertThat(graph.edges()).hasSize(NUM_EDGES);
AbstractGraphTest.validateGraph(graph);
}
}
private static <L extends List<T> & RandomAccess, T> T getRandomElement(L list, Random gen) {
return list.get(gen.nextInt(list.size()));
}
} | java | github | https://github.com/google/guava | android/guava-tests/test/com/google/common/graph/GraphMutationTest.java |
import {useIdentity, Stringify, identity} from 'shared-runtime';
function Foo({val1}) {
// `x={inner: val1}` should be able to be memoized
const x = {inner: val1};
// Any references to `x` after this hook call should be read-only
const cb = useIdentity(() => x.inner);
// With enableTransitivelyFreezeFunctionExpressions, it's invalid
// to write to `x` after it's been frozen.
// TODO: runtime validation for DX
const copy = identity(x);
return <Stringify copy={copy} cb={cb} shouldInvokeFns={true} />;
}
export const FIXTURE_ENTRYPOINT = {
fn: Foo,
params: [{val1: 1}],
sequentialRenders: [{val1: 1}, {val1: 1}],
}; | typescript | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/hook-call-freezes-captured-memberexpr.tsx |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_resourcemanager_project_facts
description:
- Gather facts for GCP Project
short_description: Gather facts for GCP Project
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options: {}
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: " a project facts"
gcp_resourcemanager_project_facts:
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: facts
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
number:
description:
- Number uniquely identifying the project.
returned: success
type: int
lifecycleState:
description:
- The Project lifecycle state.
returned: success
type: str
name:
description:
- 'The user-assigned display name of the Project. It must be 4 to 30 characters.
Allowed characters are: lowercase and uppercase letters, numbers, hyphen,
single-quote, double-quote, space, and exclamation point.'
returned: success
type: str
createTime:
description:
- Time of creation.
returned: success
type: str
labels:
description:
- The labels associated with this Project.
- 'Label keys must be between 1 and 63 characters long and must conform to the
following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.'
- Label values must be between 0 and 63 characters long and must conform to
the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
- No more than 256 labels can be associated with a given resource.
- Clients should store labels in a representation such as JSON that does not
depend on specific characters being disallowed .
returned: success
type: dict
parent:
description:
- A parent organization.
returned: success
type: complex
contains:
type:
description:
- Must be organization.
returned: success
type: str
id:
description:
- Id of the organization.
returned: success
type: str
id:
description:
- The unique, user-assigned ID of the Project. It must be 6 to 30 lowercase
letters, digits, or hyphens. It must start with a letter.
- Trailing hyphens are prohibited.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict())
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform']
items = fetch_list(module, collection(module))
if items.get('projects'):
items = items.get('projects')
else:
items = []
return_value = {'items': items}
module.exit_json(**return_value)
def collection(module):
return "https://cloudresourcemanager.googleapis.com/v1/projects".format(**module.params)
def fetch_list(module, link):
auth = GcpSession(module, 'resourcemanager')
response = auth.get(link)
return return_if_object(module, response)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
#pragma once
#include <ATen/core/function.h>
#include <ATen/core/ivalue.h>
#include <c10/util/Exception.h>
#include <c10/util/intrusive_ptr.h>
#include <functional>
#include <utility>
namespace torch::jit {
struct BuiltinOpFunction : public Function {
BuiltinOpFunction(
c10::QualifiedName qualname,
c10::FunctionSchema schema,
std::function<void(Stack&)> callable,
std::string doc_string = "")
: name_(std::move(qualname)),
callable_(std::move(callable)),
schema_(std::move(schema)),
doc_string_(std::move(doc_string)) {
TORCH_INTERNAL_ASSERT(schema_.returns().size() == 1);
}
std::string_view doc_string() const override {
return doc_string_;
}
void run(Stack& stack) override {
callable_(stack);
}
c10::intrusive_ptr<c10::ivalue::Future> runAsync(
Stack& stack,
TaskLauncher /* not used */) override {
run(stack);
auto res = c10::make_intrusive<c10::ivalue::Future>(stack.front().type());
res->markCompleted(std::move(stack.front()));
return res;
}
const c10::QualifiedName& qualname() const override {
return name_;
}
// if this isn't yet defined, run its method_creator function
void ensure_defined() override {
// nop
}
const c10::FunctionSchema& getSchema() const override {
return schema_;
}
size_t num_inputs() const override {
return schema_.arguments().size();
}
Function& setSchema(c10::FunctionSchema schema) override {
schema_ = std::move(schema);
return *this;
}
bool call(
Stack& stack,
std::optional<size_t> /*unused*/,
c10::function_ref<void(const Code&)> /*unused*/) override {
run(stack);
return false;
}
bool call(Stack& stack, c10::function_ref<void(const mobile::Code&)> /*unused*/)
override {
run(stack);
return false;
}
~BuiltinOpFunction() override = default;
private:
c10::QualifiedName name_;
std::function<void(Stack&)> callable_;
c10::FunctionSchema schema_;
std::string doc_string_;
};
} // namespace torch::jit | c | github | https://github.com/pytorch/pytorch | aten/src/ATen/core/builtin_function.h |
import numpy as np
import pytest
from sklearn.metrics import euclidean_distances
from sklearn.neighbors import KNeighborsTransformer, RadiusNeighborsTransformer
from sklearn.neighbors._base import _is_sorted_by_data
from sklearn.utils._testing import assert_array_equal
def test_transformer_result():
# Test the number of neighbors returned
n_neighbors = 5
n_samples_fit = 20
n_queries = 18
n_features = 10
rng = np.random.RandomState(42)
X = rng.randn(n_samples_fit, n_features)
X2 = rng.randn(n_queries, n_features)
radius = np.percentile(euclidean_distances(X), 10)
# with n_neighbors
for mode in ["distance", "connectivity"]:
add_one = mode == "distance"
nnt = KNeighborsTransformer(n_neighbors=n_neighbors, mode=mode)
Xt = nnt.fit_transform(X)
assert Xt.shape == (n_samples_fit, n_samples_fit)
assert Xt.data.shape == (n_samples_fit * (n_neighbors + add_one),)
assert Xt.format == "csr"
assert _is_sorted_by_data(Xt)
X2t = nnt.transform(X2)
assert X2t.shape == (n_queries, n_samples_fit)
assert X2t.data.shape == (n_queries * (n_neighbors + add_one),)
assert X2t.format == "csr"
assert _is_sorted_by_data(X2t)
# with radius
for mode in ["distance", "connectivity"]:
add_one = mode == "distance"
nnt = RadiusNeighborsTransformer(radius=radius, mode=mode)
Xt = nnt.fit_transform(X)
assert Xt.shape == (n_samples_fit, n_samples_fit)
assert not Xt.data.shape == (n_samples_fit * (n_neighbors + add_one),)
assert Xt.format == "csr"
assert _is_sorted_by_data(Xt)
X2t = nnt.transform(X2)
assert X2t.shape == (n_queries, n_samples_fit)
assert not X2t.data.shape == (n_queries * (n_neighbors + add_one),)
assert X2t.format == "csr"
assert _is_sorted_by_data(X2t)
def _has_explicit_diagonal(X):
"""Return True if the diagonal is explicitly stored"""
X = X.tocoo()
explicit = X.row[X.row == X.col]
return len(explicit) == X.shape[0]
def test_explicit_diagonal():
# Test that the diagonal is explicitly stored in the sparse graph
n_neighbors = 5
n_samples_fit, n_samples_transform, n_features = 20, 18, 10
rng = np.random.RandomState(42)
X = rng.randn(n_samples_fit, n_features)
X2 = rng.randn(n_samples_transform, n_features)
nnt = KNeighborsTransformer(n_neighbors=n_neighbors)
Xt = nnt.fit_transform(X)
assert _has_explicit_diagonal(Xt)
assert np.all(Xt.data.reshape(n_samples_fit, n_neighbors + 1)[:, 0] == 0)
Xt = nnt.transform(X)
assert _has_explicit_diagonal(Xt)
assert np.all(Xt.data.reshape(n_samples_fit, n_neighbors + 1)[:, 0] == 0)
# Using transform on new data should not always have zero diagonal
X2t = nnt.transform(X2)
assert not _has_explicit_diagonal(X2t)
@pytest.mark.parametrize("Klass", [KNeighborsTransformer, RadiusNeighborsTransformer])
def test_graph_feature_names_out(Klass):
"""Check `get_feature_names_out` for transformers defined in `_graph.py`."""
n_samples_fit = 20
n_features = 10
rng = np.random.RandomState(42)
X = rng.randn(n_samples_fit, n_features)
est = Klass().fit(X)
names_out = est.get_feature_names_out()
class_name_lower = Klass.__name__.lower()
expected_names_out = np.array(
[f"{class_name_lower}{i}" for i in range(est.n_samples_fit_)],
dtype=object,
)
assert_array_equal(names_out, expected_names_out) | python | github | https://github.com/scikit-learn/scikit-learn | sklearn/neighbors/tests/test_graph.py |
from django.conf import settings
from django import forms
from oscar.core.loading import get_model
from oscar.views.generic import PhoneNumberMixin
UserAddress = get_model('address', 'useraddress')
class AbstractAddressForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
"""
Set fields in OSCAR_REQUIRED_ADDRESS_FIELDS as required.
"""
super(AbstractAddressForm, self).__init__(*args, **kwargs)
field_names = (set(self.fields) &
set(settings.OSCAR_REQUIRED_ADDRESS_FIELDS))
for field_name in field_names:
self.fields[field_name].required = True
class UserAddressForm(PhoneNumberMixin, AbstractAddressForm):
class Meta:
model = UserAddress
fields = [
'title', 'first_name', 'last_name',
'line1', 'line2', 'line3', 'line4',
'state', 'postcode', 'country',
'phone_number', 'notes',
]
def __init__(self, user, *args, **kwargs):
super(UserAddressForm, self).__init__(*args, **kwargs)
self.instance.user = user | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provider related utilities
"""
from libcloud.compute.types import Provider
from libcloud.common.providers import get_driver as _get_provider_driver
from libcloud.common.providers import set_driver as _set_provider_driver
from libcloud.compute.types import OLD_CONSTANT_TO_NEW_MAPPING
from libcloud.compute.deprecated import DEPRECATED_DRIVERS
__all__ = [
"Provider",
"DRIVERS",
"get_driver"]
DRIVERS = {
Provider.AZURE:
('libcloud.compute.drivers.azure', 'AzureNodeDriver'),
Provider.DUMMY:
('libcloud.compute.drivers.dummy', 'DummyNodeDriver'),
Provider.EC2:
('libcloud.compute.drivers.ec2', 'EC2NodeDriver'),
Provider.ECP:
('libcloud.compute.drivers.ecp', 'ECPNodeDriver'),
Provider.ELASTICHOSTS:
('libcloud.compute.drivers.elastichosts', 'ElasticHostsNodeDriver'),
Provider.SKALICLOUD:
('libcloud.compute.drivers.skalicloud', 'SkaliCloudNodeDriver'),
Provider.SERVERLOVE:
('libcloud.compute.drivers.serverlove', 'ServerLoveNodeDriver'),
Provider.CLOUDSIGMA:
('libcloud.compute.drivers.cloudsigma', 'CloudSigmaNodeDriver'),
Provider.GCE:
('libcloud.compute.drivers.gce', 'GCENodeDriver'),
Provider.GOGRID:
('libcloud.compute.drivers.gogrid', 'GoGridNodeDriver'),
Provider.RACKSPACE:
('libcloud.compute.drivers.rackspace', 'RackspaceNodeDriver'),
Provider.RACKSPACE_FIRST_GEN:
('libcloud.compute.drivers.rackspace', 'RackspaceFirstGenNodeDriver'),
Provider.KILI:
('libcloud.compute.drivers.kili', 'KiliCloudNodeDriver'),
Provider.VPSNET:
('libcloud.compute.drivers.vpsnet', 'VPSNetNodeDriver'),
Provider.LINODE:
('libcloud.compute.drivers.linode', 'LinodeNodeDriver'),
Provider.RIMUHOSTING:
('libcloud.compute.drivers.rimuhosting', 'RimuHostingNodeDriver'),
Provider.VOXEL:
('libcloud.compute.drivers.voxel', 'VoxelNodeDriver'),
Provider.SOFTLAYER:
('libcloud.compute.drivers.softlayer', 'SoftLayerNodeDriver'),
Provider.EUCALYPTUS:
('libcloud.compute.drivers.ec2', 'EucNodeDriver'),
Provider.OPENNEBULA:
('libcloud.compute.drivers.opennebula', 'OpenNebulaNodeDriver'),
Provider.BRIGHTBOX:
('libcloud.compute.drivers.brightbox', 'BrightboxNodeDriver'),
Provider.NIMBUS:
('libcloud.compute.drivers.ec2', 'NimbusNodeDriver'),
Provider.BLUEBOX:
('libcloud.compute.drivers.bluebox', 'BlueboxNodeDriver'),
Provider.GANDI:
('libcloud.compute.drivers.gandi', 'GandiNodeDriver'),
Provider.DIMENSIONDATA:
('libcloud.compute.drivers.dimensiondata', 'DimensionDataNodeDriver'),
Provider.OPENSTACK:
('libcloud.compute.drivers.openstack', 'OpenStackNodeDriver'),
Provider.VCLOUD:
('libcloud.compute.drivers.vcloud', 'VCloudNodeDriver'),
Provider.TERREMARK:
('libcloud.compute.drivers.vcloud', 'TerremarkDriver'),
Provider.CLOUDSTACK:
('libcloud.compute.drivers.cloudstack', 'CloudStackNodeDriver'),
Provider.LIBVIRT:
('libcloud.compute.drivers.libvirt_driver', 'LibvirtNodeDriver'),
Provider.JOYENT:
('libcloud.compute.drivers.joyent', 'JoyentNodeDriver'),
Provider.VCL:
('libcloud.compute.drivers.vcl', 'VCLNodeDriver'),
Provider.KTUCLOUD:
('libcloud.compute.drivers.ktucloud', 'KTUCloudNodeDriver'),
Provider.HOSTVIRTUAL:
('libcloud.compute.drivers.hostvirtual', 'HostVirtualNodeDriver'),
Provider.ABIQUO:
('libcloud.compute.drivers.abiquo', 'AbiquoNodeDriver'),
Provider.DIGITAL_OCEAN:
('libcloud.compute.drivers.digitalocean', 'DigitalOceanNodeDriver'),
Provider.NEPHOSCALE:
('libcloud.compute.drivers.nephoscale', 'NephoscaleNodeDriver'),
Provider.EXOSCALE:
('libcloud.compute.drivers.exoscale', 'ExoscaleNodeDriver'),
Provider.IKOULA:
('libcloud.compute.drivers.ikoula', 'IkoulaNodeDriver'),
Provider.OUTSCALE_SAS:
('libcloud.compute.drivers.ec2', 'OutscaleSASNodeDriver'),
Provider.OUTSCALE_INC:
('libcloud.compute.drivers.ec2', 'OutscaleINCNodeDriver'),
Provider.VSPHERE:
('libcloud.compute.drivers.vsphere', 'VSphereNodeDriver'),
Provider.PROFIT_BRICKS:
('libcloud.compute.drivers.profitbricks', 'ProfitBricksNodeDriver'),
Provider.VULTR:
('libcloud.compute.drivers.vultr', 'VultrNodeDriver'),
Provider.AURORACOMPUTE:
('libcloud.compute.drivers.auroracompute', 'AuroraComputeNodeDriver'),
Provider.CLOUDWATT:
('libcloud.compute.drivers.cloudwatt', 'CloudwattNodeDriver'),
Provider.PACKET:
('libcloud.compute.drivers.packet', 'PacketNodeDriver'),
Provider.ONAPP:
('libcloud.compute.drivers.onapp', 'OnAppNodeDriver'),
Provider.RUNABOVE:
('libcloud.compute.drivers.runabove', 'RunAboveNodeDriver'),
Provider.INTERNETSOLUTIONS:
('libcloud.compute.drivers.internetsolutions',
'InternetSolutionsNodeDriver'),
Provider.INDOSAT:
('libcloud.compute.drivers.indosat', 'IndosatNodeDriver'),
Provider.MEDONE:
('libcloud.compute.drivers.medone', 'MedOneNodeDriver'),
Provider.BSNL:
('libcloud.compute.drivers.bsnl', 'BSNLNodeDriver'),
Provider.CISCOCCS:
('libcloud.compute.drivers.ciscoccs', 'CiscoCCSNodeDriver'),
Provider.NTTA:
('libcloud.compute.drivers.ntta', 'NTTAmericaNodeDriver'),
Provider.ALIYUN_ECS:
('libcloud.compute.drivers.ecs', 'ECSDriver'),
}
def get_driver(provider):
deprecated_constants = OLD_CONSTANT_TO_NEW_MAPPING
return _get_provider_driver(drivers=DRIVERS, provider=provider,
deprecated_providers=DEPRECATED_DRIVERS,
deprecated_constants=deprecated_constants)
def set_driver(provider, module, klass):
return _set_provider_driver(drivers=DRIVERS, provider=provider,
module=module, klass=klass) | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# $Id: dellswitch.py 1034085 2010-11-11 19:56:30Z rgass $
#
# switch configuration manager for force10 s50
import os
import sys
import pexpect
import datetime
import time
import thread
import string
import getpass
import socket
import tempfile
import logging
#import zoni
from zoni.data.resourcequerysql import *
from zoni.hardware.hwswitchinterface import HwSwitchInterface
from zoni.data.resourcequerysql import ResourceQuerySql
from zoni.agents.dhcpdns import DhcpDns
''' Using pexpect to control switches because couldn't get snmp to work
'''
class HwF10S50Switch(HwSwitchInterface):
def __init__(self, config, host=None):
self.config = config
self.host = host
self.verbose = False
self.log = logging.getLogger(os.path.basename(__file__))
def setVerbose(self, verbose):
self.verbose = verbose
def __login(self):
switchIp = "ssh " + self.host['hw_userid'] + "@" + self.host['hw_name']
child = pexpect.spawn(switchIp)
# Be Verbose and print everything
if self.verbose:
child.logfile = sys.stdout
opt = child.expect(['Name:', 'assword:', 'Are you sure.*', pexpect.EOF, pexpect.TIMEOUT])
#XXX Doesn't seem to do what I want:(
child.setecho(False)
# Send a yes to register authenticity of host for ssh
if opt == 2:
child.sendline("yes")
opt = child.expect(['Name:', 'assword:', 'Are you sure.*', pexpect.EOF, pexpect.TIMEOUT])
if opt == 0:
child.sendline(self.host['hw_userid'])
i = child.expect(['assword:', 'Connection', pexpect.EOF, pexpect.TIMEOUT])
child.sendline(self.host['hw_password'])
i=child.expect(['console','#', 'Name:', pexpect.EOF, pexpect.TIMEOUT])
if i == 2:
mesg = "ERROR: Login to %s failed\n" % (self.host['hw_name'])
self.log.error(mesg)
exit(1)
if opt == 1:
# the S50 doesn't prompt for username
child.sendline(self.host['hw_password'])
i=child.expect(['console','>', 'Name:', pexpect.EOF, pexpect.TIMEOUT])
# on the S50, need to send enable, just send to all
child.sendline('enable')
i=child.expect(['assword:', pexpect.EOF, pexpect.TIMEOUT])
child.sendline(self.host['hw_password'])
i=child.expect(['#', pexpect.EOF, pexpect.TIMEOUT])
return child
def __getPrsLabel(self):
dadate = datetime.datetime.now().strftime("%Y%m%d-%H%M-%S")
return "PRS_" + dadate
def enableHostPort(self):
child = self.__login()
child.sendline('config')
cmd = "interface g 0/" + str(self.host['hw_port'])
child.sendline(cmd)
cmd = "no shutdown"
child.sendline(cmd)
child.sendline('exit')
child.terminate()
def disableHostPort(self):
child = self.__login()
child.sendline('config')
cmd = "interface g 0/" + str(self.host['hw_port'])
child.sendline(cmd)
cmd = "shutdown"
child.sendline(cmd)
child.sendline('exit')
child.terminate()
def removeVlan(self, num):
# Check for important vlans
cmd = "no interface vlan " + num
child = self.__login()
child.sendline('config')
child.sendline(cmd)
child.sendline('exit')
child.terminate()
def addVlanToTrunk(self, vlan):
mesg = "Adding Vlan %s to trunk on switch" % (vlan)
self.log.info(mesg)
child = self.__login()
child.sendline('config')
cmd = "interface port-channel 1"
child.sendline(cmd)
child.expect(["conf-if", pexpect.EOF])
child.sendline("switchport")
child.sendline("exit")
child.sendline("interface vlan " + vlan")
child.expect(["conf-if", pexpect.EOF])
cmd = "tagged port-channel 1"
child.sendline(cmd)
child.sendline('exit')
def createVlansThread(self, vlan, switch,host):
mesg = "Creating vlan %s on switch %s" % (str(vlan),str(switch))
print "host is ", host
self.log(mesg)
print "create"
self.createVlan(vlan)
print "cend"
self.addVlanToTrunk(vlan);
thread.exit()
def createVlans(self, vlan, switchlist, query):
for switch in switchlist:
#print "working on switch ", switch
#self.host = query.getSwitchInfo(switch)
#thread.start_new_thread(self.createVlansThread, (vlan, switch, self.host))
mesg = "Creating vlan %s on switch %s" % (str(vlan), str(switch))
self.log.info(mesg)
self.host = query.getSwitchInfo(switch)
self.createVlan(vlan)
self.addVlanToTrunk(vlan);
def removeVlans(self, vlan, switchlist, query):
for switch in switchlist:
mesg = "Deleting vlan %s on switch %s" % (str(vlan),str(switch))
self.log.info(mesg)
self.host = query.getSwitchInfo(switch)
self.removeVlan(vlan)
def createVlan(self, val):
vlanname = False
if ":" in val:
num = int(val.split(":")[0])
vlanname = val.split(":")[1]
else:
vlanname = self.__getPrsLabel()
num = int(val)
#if type(num) != int:
#mesg = "ERROR: Vlan must be a number (0-4095)\n"
#sys.stderr.write(mesg)
#exit(1)
if num > 4095 or num < 0:
mesg = "Vlan out of range. Must be < %s" % (self.config['vlan_max'])
self.log.error(mesg)
exit(1)
child = self.__login()
child.sendline('config')
child.expect(["config",pexpect.EOF, pexpect.TIMEOUT])
child.sendline('interface vlan %d' % num)
child.expect(["conf-if",pexpect.EOF, pexpect.TIMEOUT])
child.sendline("shutdown")
child.sendline("no ip address")
if vlanname:
child.sendline("name " + vlanname)
child.sendline('exit')
child.expect(["config",pexpect.EOF, pexpect.TIMEOUT])
# Raw Switch commands. DEBUG ONLY!, Doesn't work!
def sendSwitchCommand(self, cmds):
if len(cmds) > 0:
child = self.__login()
child.logfile = sys.stdout
for cmd in cmds.split(";"):
child.sendline(cmd)
try:
i=child.expect(['console','#', 'Name:', pexpect.EOF, pexpect.TIMEOUT], timeout=2)
i=child.expect(['console','#', 'Name:', pexpect.EOF, pexpect.TIMEOUT], timeout=2)
except EOF:
print "EOF", i
#child.sendline()
except TIMEOUT:
print "TIMEOUT", i
#child.interact(escape_character='\x1d', input_filter=None, output_filter=None)
child.terminate()
#print "before", child.before
#print "after", child.after
def addNodeToVlan(self, vlan):
mesg = "NOOP Adding Node to vlan %s" % (str(vlan))
self.log.info(mesg)
def removeNodeFromVlan(self, vlan):
mesg = "NOOP Removing Node from vlan %s" % (str(vlan))
self.log.info(mesg)
def setNativeVlan(self, vlan):
child = self.__login()
child.logfile = sys.stdout
child.sendline('config')
cmd = "interface vlan " + vlan)
child.sendline(cmd)
i=child.expect(['conf-if', pexpect.EOF, pexpect.TIMEOUT])
if i > 0:
self.log.error("setNativeVlan %s failed" % (cmd))
cmd = "untagged g 0/" + str(self.host['hw_port'])
child.sendline(cmd)
child.expect(['config-if', pexpect.EOF, pexpect.TIMEOUT])
child.sendline('exit')
child.terminate()
# Restore Native Vlan. In Dell's case, this is vlan 1
def restoreNativeVlan(self):
pass
# Setup the switch for node allocation
def allocateNode(self):
pass
# Remove all vlans from the interface
def removeAllVlans(self):
pass
def showInterfaceConfig(self):
child = self.__login()
print "\n------------------------------------"
print "SWITCH - " + self.host['hw_name'] + "/" + str(self.host['hw_port'])
print "NODE - " + self.host['location']
print "------------------------------------\n"
child.logfile = sys.stdout
cmd = "show interfaces g 0/" + str(self.host['hw_port'])
child.sendline(cmd)
i = child.expect(['#', pexpect.EOF, pexpect.TIMEOUT])
child.terminate()
def interactiveSwitchConfig(self):
switchIp = "ssh " + self.host['hw_name']
child = pexpect.spawn(switchIp)
child.setecho(False)
#child.expect('Name:')
child.sendline(self.host['hw_userid'])
#i=child.expect(['test','password:','Password:', pexpect.EOF, pexpect.TIMEOUT])
#child.logfile = sys.stdout
child.sendline(self.host['hw_password'])
child.interact(escape_character='\x1d', input_filter=None, output_filter=None)
def registerToZoni(self, user, password, host):
host = string.strip(str(host))
# Get hostname of the switch
if len(host.split(".")) == 4:
ip = host
try:
host = string.strip(socket.gethostbyaddr(ip)[0].split(".")[0])
except Exception, e:
mesg = "Host (%s) not registered in DNS, %s" % (host,str(e))
self.log.warning(mesg)
else:
# Maybe a hostname was entered...
try:
ip = socket.gethostbyname(host)
except Exception, e:
mesg = "Host (%s) not registered in DNS, %s" % (host, str(e))
self.log.error(mesg)
mesg = "Unable to resolve hostname"
self.log.critical(mesg)
exit()
switchIp = "ssh " + user + "@" + ip
child = pexpect.spawn(switchIp)
opt = child.expect(['Name:', 'assword:', 'Are you sure.*', pexpect.EOF, pexpect.TIMEOUT])
#XXX Doesn't seem to do what I want:(
child.setecho(False)
# Send a yes to register authenticity of host for ssh
if opt == 2:
child.sendline("yes")
opt = child.expect(['Name:', 'assword:', 'Are you sure.*', pexpect.EOF, pexpect.TIMEOUT])
if opt == 0:
child.sendline(user)
i = child.expect(['assword:', 'Connection', pexpect.EOF, pexpect.TIMEOUT])
child.sendline(password)
i=child.expect(['console',host, 'Name:', pexpect.EOF, pexpect.TIMEOUT])
if i == 2:
mesg = "Login to switch %s failed" % (host)
self.log.error(mesg)
exit(1)
if opt == 1:
child.sendline(password)
i=child.expect(['console',host, 'Name:', pexpect.EOF, pexpect.TIMEOUT])
# on the 6448 dell, need to send enable, just send to all
child.sendline('enable')
i=child.expect(['#', pexpect.EOF, pexpect.TIMEOUT])
fout = tempfile.TemporaryFile()
child.logfile = fout
cmd = "show system"
child.sendline(cmd)
val = host + "#"
i = child.expect([val, '\n\r\n\r', pexpect.EOF, pexpect.TIMEOUT])
cmd = "show version"
child.sendline(cmd)
i = child.expect([val, '\n\r\n\r', pexpect.EOF, pexpect.TIMEOUT])
fout.seek(0)
a={}
for i in fout.readlines():
if "System Location:" in i:
datime = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.localtime())
val = "Registered by Zoni on : " + datime
a['hw_notes'] = val + "; " + string.strip(i.split(':', 1)[1])
if "System MAC" in i:
a['hw_mac'] = string.strip(i.split(':', 1)[1])
if "SW version" in i:
a['hw_version_sw'] = string.strip(i.split(' ')[1].split()[0])
if "HW version" in i:
a['hw_version_fw'] = string.strip(i.split(' ')[1].split()[0])
a['hw_type'] = "switch"
a['hw_make'] = "F10S50"
a['hw_name'] = host
a['hw_ipaddr'] = ip
a['hw_userid'] = user
a['hw_password'] = password
child.sendline('exit')
child.sendline('exit')
child.terminate()
# Try to get more info via snmp
from pysnmp.entity.rfc3413.oneliner import cmdgen
from pysnmp.proto import rfc1902
user = "public"
oid = eval("1,3,6,1,4,1,674,10895,3000,1,2,100,1,0")
errorIndication, errorStatus, errorIndex, varBinds = cmdgen.CommandGenerator().getCmd( \
cmdgen.CommunityData('my-agent', user, 0), \
cmdgen.UdpTransportTarget((host, 161)), oid)
a['hw_model'] = str(varBinds[0][1])
oid = eval("1,3,6,1,4,1,674,10895,3000,1,2,100,3,0")
errorIndication, errorStatus, errorIndex, varBinds = cmdgen.CommandGenerator().getCmd( \
cmdgen.CommunityData('my-agent', user, 0), \
cmdgen.UdpTransportTarget((host, 161)), oid)
a['hw_make'] = str(varBinds[0][1])
# Register in dns
if self.config['dnsEnabled']:
try:
mesg = "Adding %s(%s) to dns" % (host, ip)
self.log.info(mesg)
DhcpDns(self.config, verbose=self.verbose).addDns(host, ip)
mesg = "Adding %s(%s) to dhcp" % (host, ip)
self.log.info(mesg)
DhcpDns(self.config, verbose=self.verbose).addDhcp(host, ip, a['hw_mac'])
except:
mesg = "Adding %s(%s) %s to dhcp/dns failed" % (host, ip, a['hw_mac'])
self.log.error(mesg)
# Add to db
# Register to DB
query = ResourceQuerySql(self.config, self.verbose)
query.registerHardware(a) | unknown | codeparrot/codeparrot-clean | ||
import scrapy
from scrapy import log
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from rcbi.items import Part
import os
import urlparse
import urllib
MANUFACTURERS = ["Cobra", "DTF-UHF", "EMAX", "FatShark", "Foxeer", "FrSky", "Gemfan", "ImmersionRC", "XHover"]
CORRECT = {"Cobra Motor": "Cobra", "Emax": "EMAX", "HQ Prop": "HQProp", "HQ Prop Propellers": "HQProp", "HQ Direct Drive Propellers": "HQProp", "SunnySky": "Sunnysky", "TBS": "Team BlackSheep", "VAS": "Video Aerial Systems"}
MANUFACTURERS.extend(CORRECT.keys())
MANUFACTURERS.sort(key=len, reverse=True)
NEW_PREFIX = {}
QUANTITY = {}
STOCK_STATE_MAP = {"out-of-stock": "out_of_stock",
"in-stock": "in_stock"}
class StoneBlueAirlinesSpider(CrawlSpider):
name = "stoneblueairlines"
allowed_domains = ["www.stoneblueairlines.com"]
start_urls = ["http://www.stoneblueairlines.com/"]
rules = (
Rule(LinkExtractor(restrict_css=["#nav", ".pages ol"])),
Rule(LinkExtractor(restrict_css=[".product-name"]), callback='parse_item'),
)
def parse_item(self, response):
item = Part()
item["site"] = self.name
product_name = response.css(".product-name h1::text")
if not product_name:
return
item["name"] = product_name.extract_first().strip()
for m in MANUFACTURERS:
if item["name"].startswith(m):
item["name"] = item["name"][len(m):].strip("- ")
item["manufacturer"] = m
break
if "manufacturer" in item:
m = item["manufacturer"]
if m in NEW_PREFIX:
item["name"] = NEW_PREFIX[m] + " " + item["name"]
if m in CORRECT:
item["manufacturer"] = CORRECT[m]
variant = {}
item["variants"] = [variant]
parsed = urlparse.urlparse(response.url)
filename = "/" + os.path.basename(parsed[2])
variant["url"] = urlparse.urlunparse((parsed[0], parsed[1], filename,
parsed[3], parsed[4], parsed[5]))
price_box = response.css(".product-essential .price-box")
if price_box:
price = price_box.css(".special-price .price::text")
if not price:
price = price_box.css(".regular-price .price::text")
if price:
variant["price"] = price.extract_first().strip()
for quantity in QUANTITY:
if quantity in item["name"]:
variant["quantity"] = QUANTITY[quantity]
item["name"] = item["name"].replace(quantity, "")
stock = response.css(".availability")
if stock:
stock_class = stock.css("::attr(class)").extract_first().strip().split()[-1]
text = stock.css("span::text").extract()[-1].strip()
variant["stock_text"] = text
if stock_class in STOCK_STATE_MAP:
variant["stock_state"] = STOCK_STATE_MAP[stock_class]
else:
print(stock_class)
return item | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import ipaddr
from st2common.log import logging
LOG = logging.getLogger(__name__)
__all__ = [
'is_ipv4',
'is_ipv6',
'split_host_port'
]
BRACKET_PATTERN = "^\[.*\]" # IPv6 bracket pattern to specify port
COMPILED_BRACKET_PATTERN = re.compile(BRACKET_PATTERN)
HOST_ONLY_IN_BRACKET = "^\[.*\]$"
COMPILED_HOST_ONLY_IN_BRACKET_PATTERN = re.compile(HOST_ONLY_IN_BRACKET)
def is_ipv6(ip_str):
"""
Validate whether given string is IPv6.
:param ip_str: String to validate.
:type ip_str: ``str``
:rtype: ``bool``
"""
try:
addr = ipaddr.IPAddress(ip_str)
return addr.version == 6
except:
return False
def is_ipv4(ip_str):
"""
Validate whether given string is IPv4.
:param ip_str: String to validate.
:type ip_str: ``str``
:rtype: ``bool``
"""
try:
addr = ipaddr.IPAddress(ip_str)
return addr.version == 4
except:
return False
def split_host_port(host_str):
"""
Split host_str into host and port.
Can handle IPv4, IPv6, hostname inside or outside brackets.
Note: If you want to specify a port with IPv6, you definitely
should enclose IP address within [].
:param host_str: Host port string.
:type host_str: ``str``
:return: Hostname (string), port (int) tuple. Raises exception on invalid port.
:rtype: ``tuple`` of ``str`` and ``int``
"""
hostname = host_str
port = None
# If it's simple IPv6 or IPv4 address, return here.
if is_ipv6(host_str) or is_ipv4(host_str):
return (hostname, port)
# Check if it's square bracket style.
match = COMPILED_BRACKET_PATTERN.match(host_str)
if match:
LOG.debug('Square bracket style.')
# Check if square bracket style no port.
match = COMPILED_HOST_ONLY_IN_BRACKET_PATTERN.match(host_str)
if match:
hostname = match.group().strip('[]')
return (hostname, port)
hostname, separator, port = hostname.rpartition(':')
try:
LOG.debug('host_str: %s, hostname: %s port: %s' % (host_str, hostname, port))
port = int(port)
hostname = hostname.strip('[]')
return (hostname, port)
except:
raise Exception('Invalid port %s specified.' % port)
else:
LOG.debug('Non-bracket address. host_str: %s' % host_str)
if ':' in host_str:
LOG.debug('Non-bracket with port.')
hostname, separator, port = hostname.rpartition(':')
try:
port = int(port)
return (hostname, port)
except:
raise Exception('Invalid port %s specified.' % port)
return (hostname, port) | unknown | codeparrot/codeparrot-clean | ||
""" Hangman Game (v1.0)
Name: Joe Young
Date: 24/09/2016
"""
#Joe Young
#06/09/2016
import sys
import platform
if "windows" == platform.system():
sys.path.append(sys.path[0]+'\\Extra')
else:
sys.path.append(sys.path[0]+'//Extra')
from random import *
from time import *
import hangmanp
def load_file(filename):#Opens file and returns a full list of words
file = open (filename)
word_list = file.readlines()
file.close()
return word_list
def select_word(): #gets single random word from list
word_list = load_file("wordlists.txt")
single_word = (word_list[randint(0, len(word_list)-1)])
return single_word
def again():
while 0 != 1:
again = str(input("Would you like to play again?\n-")).lower()
if again == "yes":
print("You have choosen to play again!\n")
sleep(1)
return main()
elif again == "no":
print("You have choosen to not play again!")
sleep(3)
break
else:
print("Please input a vaild method!\n")
def main():
ran_word = select_word()
li = (len(ran_word)-1)
trys = 0
guess = 0
print(ran_word)
left = ["_" for i in range (li)]
while 0 != 1:
print (left)
letter = str(input("What is your guess?\n-")).lower()
if ran_word.find(letter) == -1:
print ("Your guess was incorrect\n")
if guess != 11:
print (hangmanp.hangman(guess))
guess = guess + 1
else:
if not(len(letter) == 1):
print("Please only entre one letter at a time!\n")
else:
if not((letter) in left):
print("Your guess was correct!\n")
else:
print("You have already correctly guessed", letter, "!\n")
for i in range(li):
if ran_word[i] == letter:
left[i] = letter
if not(("_") in left): #checks to see if user has won the game
print(left)
print("You have won the game!, it took you ", trys, " guesses!\n")
sleep(3)
return again()
break
trys = trys + 1
if not(guess != 11):
print("You have exceed your guesses!\n")
print("You have lost the game!, it took you ", trys, " to fail :(\n")
sleep(5)
return again()
break
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.docs.core.aop.aopatconfigurable;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.aspectj.EnableSpringConfigured;
// tag::snippet[]
@Configuration
@EnableSpringConfigured
public class ApplicationConfiguration {
}
// end::snippet[] | java | github | https://github.com/spring-projects/spring-framework | framework-docs/src/main/java/org/springframework/docs/core/aop/aopatconfigurable/ApplicationConfiguration.java |
# -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from HTMLParser import HTMLParser
from goose.text import innerTrim
class OutputFormatter(object):
def __init__(self, config):
self.top_node = None
self.config = config
# parser
self.parser = self.config.get_parser()
self.stopwords_class = config.stopwords_class
def get_language(self, article):
"""\
Returns the language is by the article or
the configuration language
"""
# we don't want to force the target laguage
# so we use the article.meta_lang
if self.config.use_meta_language == True:
if article.meta_lang:
return article.meta_lang[:2]
return self.config.target_language
def get_top_node(self):
return self.top_node
def get_formatted_text(self, article):
self.top_node = article.top_node
self.remove_negativescores_nodes()
self.links_to_text()
self.add_newline_to_br()
self.replace_with_text()
self.remove_fewwords_paragraphs(article)
return self.convert_to_text()
def convert_to_text(self):
txts = []
for node in list(self.get_top_node()):
txt = self.parser.getText(node)
if txt:
txt = HTMLParser().unescape(txt)
txt_lis = innerTrim(txt).split(r'\n')
txts.extend(txt_lis)
return '\n\n'.join(txts)
def add_newline_to_br(self):
for e in self.parser.getElementsByTag(self.top_node, tag='br'):
e.text = r'\n'
def links_to_text(self):
"""\
cleans up and converts any nodes that
should be considered text into text
"""
self.parser.stripTags(self.get_top_node(), 'a')
def remove_negativescores_nodes(self):
"""\
if there are elements inside our top node
that have a negative gravity score,
let's give em the boot
"""
gravity_items = self.parser.css_select(self.top_node, "*[gravityScore]")
for item in gravity_items:
score = self.parser.getAttribute(item, 'gravityScore')
score = int(score, 0)
if score < 1:
item.getparent().remove(item)
def replace_with_text(self):
"""\
replace common tags with just
text so we don't have any crazy formatting issues
so replace <br>, <i>, <strong>, etc....
with whatever text is inside them
code : http://lxml.de/api/lxml.etree-module.html#strip_tags
"""
self.parser.stripTags(self.get_top_node(), 'b', 'strong', 'i', 'br', 'sup')
def remove_fewwords_paragraphs(self, article):
"""\
remove paragraphs that have less than x number of words,
would indicate that it's some sort of link
"""
all_nodes = self.parser.getElementsByTags(self.get_top_node(), ['*'])
all_nodes.reverse()
for el in all_nodes:
tag = self.parser.getTag(el)
text = self.parser.getText(el)
stop_words = self.stopwords_class(language=self.get_language(article)).get_stopword_count(text)
if (tag != 'br' or text != '\\r') and stop_words.get_stopword_count() < 3 \
and len(self.parser.getElementsByTag(el, tag='object')) == 0 \
and len(self.parser.getElementsByTag(el, tag='embed')) == 0:
self.parser.remove(el)
# TODO
# check if it is in the right place
else:
trimmed = self.parser.getText(el)
if trimmed.startswith("(") and trimmed.endswith(")"):
self.parser.remove(el)
class StandardOutputFormatter(OutputFormatter):
pass | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: cp1252 -*-
import csv
import sys
from xml.dom import minidom
def get_attributes(file_xml):
out = []
dom1 = minidom.parse(file_xml)
for node in dom1.getElementsByTagName('attribute'):
out.append({
'name': node.getAttribute('name') ,
'atype': node.getAttribute('atype'),
'format':node.getAttribute('format'),
'skip':node.getAttribute('skip')
})
#print out
return out
def get_relation(file_xml):
dom1 = minidom.parse(file_xml)
out=''
delimiter=''
for node in dom1.getElementsByTagName('csv'):
out=node.getAttribute('name')
delimiter=node.getAttribute('delimiter');
if(len(delimiter)==0):
delimiter=';';
print delimiter
return out, delimiter
class csv_arff_converter:
def __init__(self,csv_file, attribute_file, file_out):
self.csv_file = csv_file
self.attribute_file = attribute_file
self.file_out = file_out
def run(self):
classes = []
#read attribute
self.relation_name, self.delimiter = get_relation(attribute_file)
attributes_list = get_attributes(attribute_file)
arff_data = '@RELATION ' + self.relation_name + '\n\n'
for i in attributes_list:
if (i['skip'] != 'yes'):
arff_data += '@ATTRIBUTE '+i['name']+' ' + i['atype']
if (i['atype']=='date'):
arff_data += ' '+i['format']
if (i['atype']=='class'):
arff_data += ' (#@#'+i['name'] + '#@#)'
arff_data +='\n'
classes.append('')
arff_data += '\n@DATA\n'
print classes
#open csv
reader = csv.reader(open(self.csv_file), delimiter=self.delimiter, quoting=csv.QUOTE_NONE)
rnum = 0
for row in reader:
#print row
buff = ''
pos = 0
#print len(row)
#occhio alla lunghezza riga
for j in range(0, len(row)-1):
field = row[j]
if(attributes_list[pos]['skip'] != 'yes'):
if (pos > 0):
buff += ','
if(attributes_list[pos]['atype'] == 'string'):
field = "'" + field + "'"
buff += field
#se è una classe raccolgo i valori
if(attributes_list[pos]['atype'] == 'class'):
if (rnum > 0):
classes[pos]+= ','+ field
else:
classes[pos]+= field
pos += 1
buff += '\n'
arff_data += buff
rnum += 1
pos = 0
for a in classes:
j = a.split(',')
un = list(set(j))
#print un
if (len(un) > 0):
this_replacement = ",".join(un)
#print this_replacement
old_text = '#@#'+ attributes_list[pos]['name'] + '#@#'
#print old_text
arff_data = arff_data.replace(old_text, this_replacement)
pos += 1
#print arff_data
a = open(self.file_out, 'w')
a.write(arff_data)
a.close()
if __name__ == "__main__":
#csv_file = sys.argv[1]
#attribute_file = sys.argv[2]
csv_file = './test_csv2arff/test_dataset_1.csv'
attribute_file = './test_csv2arff/test_dataset_1.att'
instance = csv_arff_converter(csv_file, attribute_file, './test_csv2arff/output.arff')
instance.run() | unknown | codeparrot/codeparrot-clean | ||
# This file is marked as binary in the CVS, to prevent MacCVS from recoding it.
import unittest
class PEP3120Test(unittest.TestCase):
def test_pep3120(self):
self.assertEqual(
"Питон".encode("utf-8"),
b'\xd0\x9f\xd0\xb8\xd1\x82\xd0\xbe\xd0\xbd'
)
self.assertEqual(
"\П".encode("utf-8"),
b'\\\xd0\x9f'
)
def test_badsyntax(self):
try:
import test.badsyntax_pep3120
except SyntaxError as msg:
msg = str(msg).lower()
self.assertTrue('utf-8' in msg)
else:
self.fail("expected exception didn't occur")
class BuiltinCompileTests(unittest.TestCase):
# Issue 3574.
def test_latin1(self):
# Allow compile() to read Latin-1 source.
source_code = '# coding: Latin-1\nu = "Ç"\n'.encode("Latin-1")
try:
code = compile(source_code, '<dummy>', 'exec')
except SyntaxError:
self.fail("compile() cannot handle Latin-1 source")
ns = {}
exec(code, ns)
self.assertEqual('Ç', ns['u'])
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from guessit.test.guessittest import *
class TestAutoDetect(TestGuessit):
def testEmpty(self):
result = guessit.guess_file_info('')
assert result == {}
result = guessit.guess_file_info('___-__')
assert result == {}
result = guessit.guess_file_info('__-.avc')
assert result == {'type': 'unknown', 'extension': 'avc'}
def testAutoDetect(self):
self.checkMinimumFieldsCorrect(filename='autodetect.yaml',
remove_type=False) | unknown | codeparrot/codeparrot-clean | ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package command
import (
"io"
"strings"
"testing"
"github.com/hashicorp/cli"
"github.com/hashicorp/vault/api"
)
func testWriteCommand(tb testing.TB) (*cli.MockUi, *WriteCommand) {
tb.Helper()
ui := cli.NewMockUi()
return ui, &WriteCommand{
BaseCommand: &BaseCommand{
UI: ui,
},
}
}
func TestWriteCommand_Run(t *testing.T) {
t.Parallel()
cases := []struct {
name string
args []string
out string
code int
}{
{
"not_enough_args",
[]string{},
"Not enough arguments",
1,
},
{
"empty_kvs",
[]string{"secret/write/foo"},
"Must supply data or use -force",
1,
},
{
"force_kvs",
[]string{"-force", "auth/token/create"},
"token",
0,
},
{
"force_f_kvs",
[]string{"-f", "auth/token/create"},
"token",
0,
},
{
"kvs_no_value",
[]string{"secret/write/foo", "foo"},
"Failed to parse K=V data",
1,
},
{
"single_value",
[]string{"secret/write/foo", "foo=bar"},
"Success!",
0,
},
{
"multi_value",
[]string{"secret/write/foo", "foo=bar", "zip=zap"},
"Success!",
0,
},
{
"field",
[]string{
"-field", "token_renewable",
"auth/token/create", "display_name=foo",
},
"false",
0,
},
{
"field_not_found",
[]string{
"-field", "not-a-real-field",
"auth/token/create", "display_name=foo",
},
"not present in secret",
1,
},
}
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
client, closer := testVaultServer(t)
defer closer()
ui, cmd := testWriteCommand(t)
cmd.client = client
code := cmd.Run(tc.args)
if code != tc.code {
t.Errorf("expected %d to be %d", code, tc.code)
}
combined := ui.OutputWriter.String() + ui.ErrorWriter.String()
if !strings.Contains(combined, tc.out) {
t.Errorf("expected %q to contain %q", combined, tc.out)
}
})
}
// If we ask for a field and get an empty result, do not output "Success!" or anything else
t.Run("field_from_nothing", func(t *testing.T) {
t.Parallel()
client, closer := testVaultServer(t)
defer closer()
ui, cmd := testWriteCommand(t)
cmd.client = client
code := cmd.Run([]string{
"-field", "somefield",
"secret/write/foo", "foo=bar",
})
if exp := 0; code != exp {
t.Fatalf("expected %d to be %d: %q", code, exp, ui.ErrorWriter.String())
}
combined := ui.OutputWriter.String() + ui.ErrorWriter.String()
if combined != "" {
t.Errorf("expected %q to be empty", combined)
}
})
t.Run("force", func(t *testing.T) {
t.Parallel()
client, closer := testVaultServer(t)
defer closer()
if err := client.Sys().Mount("transit/", &api.MountInput{
Type: "transit",
}); err != nil {
t.Fatal(err)
}
ui, cmd := testWriteCommand(t)
cmd.client = client
code := cmd.Run([]string{
"-force",
"transit/keys/my-key",
})
if exp := 0; code != exp {
t.Fatalf("expected %d to be %d: %q", code, exp, ui.ErrorWriter.String())
}
secret, err := client.Logical().Read("transit/keys/my-key")
if err != nil {
t.Fatal(err)
}
if secret == nil || secret.Data == nil {
t.Fatal("expected secret to have data")
}
})
t.Run("stdin_full", func(t *testing.T) {
t.Parallel()
client, closer := testVaultServer(t)
defer closer()
stdinR, stdinW := io.Pipe()
go func() {
stdinW.Write([]byte(`{"foo":"bar"}`))
stdinW.Close()
}()
_, cmd := testWriteCommand(t)
cmd.client = client
cmd.testStdin = stdinR
code := cmd.Run([]string{
"secret/write/stdin_full", "-",
})
if code != 0 {
t.Fatalf("expected 0 to be %d", code)
}
secret, err := client.Logical().Read("secret/write/stdin_full")
if err != nil {
t.Fatal(err)
}
if secret == nil || secret.Data == nil {
t.Fatal("expected secret to have data")
}
if exp, act := "bar", secret.Data["foo"].(string); exp != act {
t.Errorf("expected %q to be %q", act, exp)
}
})
t.Run("stdin_value", func(t *testing.T) {
t.Parallel()
client, closer := testVaultServer(t)
defer closer()
stdinR, stdinW := io.Pipe()
go func() {
stdinW.Write([]byte("bar"))
stdinW.Close()
}()
_, cmd := testWriteCommand(t)
cmd.client = client
cmd.testStdin = stdinR
code := cmd.Run([]string{
"secret/write/stdin_value", "foo=-",
})
if code != 0 {
t.Fatalf("expected 0 to be %d", code)
}
secret, err := client.Logical().Read("secret/write/stdin_value")
if err != nil {
t.Fatal(err)
}
if secret == nil || secret.Data == nil {
t.Fatal("expected secret to have data")
}
if exp, act := "bar", secret.Data["foo"].(string); exp != act {
t.Errorf("expected %q to be %q", act, exp)
}
})
t.Run("integration", func(t *testing.T) {
t.Parallel()
client, closer := testVaultServer(t)
defer closer()
_, cmd := testWriteCommand(t)
cmd.client = client
code := cmd.Run([]string{
"secret/write/integration", "foo=bar", "zip=zap",
})
if code != 0 {
t.Fatalf("expected 0 to be %d", code)
}
secret, err := client.Logical().Read("secret/write/integration")
if err != nil {
t.Fatal(err)
}
if secret == nil || secret.Data == nil {
t.Fatal("expected secret to have data")
}
if exp, act := "bar", secret.Data["foo"].(string); exp != act {
t.Errorf("expected %q to be %q", act, exp)
}
if exp, act := "zap", secret.Data["zip"].(string); exp != act {
t.Errorf("expected %q to be %q", act, exp)
}
})
t.Run("communication_failure", func(t *testing.T) {
t.Parallel()
client, closer := testVaultServerBad(t)
defer closer()
ui, cmd := testWriteCommand(t)
cmd.client = client
code := cmd.Run([]string{
"foo/bar", "a=b",
})
if exp := 2; code != exp {
t.Errorf("expected %d to be %d", code, exp)
}
expected := "Error writing data to foo/bar: "
combined := ui.OutputWriter.String() + ui.ErrorWriter.String()
if !strings.Contains(combined, expected) {
t.Errorf("expected %q to contain %q", combined, expected)
}
})
t.Run("no_tabs", func(t *testing.T) {
t.Parallel()
_, cmd := testWriteCommand(t)
assertNoTabs(t, cmd)
})
} | go | github | https://github.com/hashicorp/vault | command/write_test.go |
#!/usr/bin/env python
# This script aims to help developers locate forms and view code that needs to
# use the new CSRF protection in Django 1.2. It tries to find all the code that
# may need the steps described in the CSRF documentation. It does not modify
# any code directly, it merely attempts to locate it. Developers should be
# aware of its limitations, described below.
#
# For each template that contains at least one POST form, the following info is printed:
#
# <Absolute path to template>
# AKA: <Aliases (relative to template directory/directories that contain it)>
# POST forms: <Number of POST forms>
# With token: <Number of POST forms with the CSRF token already added>
# Without token:
# <File name and line number of form without token>
#
# Searching for:
# <Template names that need to be searched for in view code
# (includes templates that 'include' current template)>
#
# Found:
# <File name and line number of any view code found>
#
# The format used allows this script to be used in Emacs grep mode:
# M-x grep
# Run grep (like this): /path/to/my/virtualenv/python /path/to/django/src/extras/csrf_migration_helper.py --settings=mysettings /path/to/my/srcs
# Limitations
# ===========
#
# - All templates must be stored on disk in '.html' or '.htm' files.
# (extensions configurable below)
#
# - All Python code must be stored on disk in '.py' files. (extensions
# configurable below)
#
# - All templates must be accessible from TEMPLATE_DIRS or from the 'templates/'
# directory in apps specified in INSTALLED_APPS. Non-file based template
# loaders are out of the picture, because there is no way to ask them to
# return all templates.
#
# - It's impossible to programmatically determine which forms should and should
# not have the token added. The developer must decide when to do this,
# ensuring that the token is only added to internally targetted forms.
#
# - It's impossible to programmatically work out when a template is used. The
# attempts to trace back to view functions are guesses, and could easily fail
# in the following ways:
#
# * If the 'include' template tag is used with a variable
# i.e. {% include tname %} where tname is a variable containing the actual
# template name, rather than {% include "my_template.html" %}.
#
# * If the template name has been built up by view code instead of as a simple
# string. For example, generic views and the admin both do this. (These
# apps are both contrib and both use RequestContext already, as it happens).
#
# * If the 'ssl' tag (or any template tag other than 'include') is used to
# include the template in another template.
#
# - All templates belonging to apps referenced in INSTALLED_APPS will be
# searched, which may include third party apps or Django contrib. In some
# cases, this will be a good thing, because even if the templates of these
# apps have been fixed by someone else, your own view code may reference the
# same template and may need to be updated.
#
# You may, however, wish to comment out some entries in INSTALLED_APPS or
# TEMPLATE_DIRS before running this script.
# Improvements to this script are welcome!
# Configuration
# =============
TEMPLATE_EXTENSIONS = [
".html",
".htm",
]
PYTHON_SOURCE_EXTENSIONS = [
".py",
]
TEMPLATE_ENCODING = "UTF-8"
PYTHON_ENCODING = "UTF-8"
# Method
# ======
# Find templates:
# - template dirs
# - installed apps
#
# Search for POST forms
# - Work out what the name of the template is, as it would appear in an
# 'include' or get_template() call. This can be done by comparing template
# filename to all template dirs. Some templates can have more than one
# 'name' e.g. if a directory and one of its child directories are both in
# TEMPLATE_DIRS. This is actually a common hack used for
# overriding-and-extending admin templates.
#
# For each POST form,
# - see if it already contains '{% csrf_token %}' immediately after <form>
# - work back to the view function(s):
# - First, see if the form is included in any other templates, then
# recursively compile a list of affected templates.
# - Find any code function that references that template. This is just a
# brute force text search that can easily return false positives
# and fail to find real instances.
import os
import sys
import re
from optparse import OptionParser
USAGE = """
This tool helps to locate forms that need CSRF tokens added and the
corresponding view code. This processing is NOT fool proof, and you should read
the help contained in the script itself. Also, this script may need configuring
(by editing the script) before use.
Usage:
python csrf_migration_helper.py [--settings=path.to.your.settings] /path/to/python/code [more paths...]
Paths can be specified as relative paths.
With no arguments, this help is printed.
"""
_POST_FORM_RE = \
re.compile(r'(<form\W[^>]*\bmethod\s*=\s*(\'|"|)POST(\'|"|)\b[^>]*>)', re.IGNORECASE)
_FORM_CLOSE_RE = re.compile(r'</form\s*>')
_TOKEN_RE = re.compile('\{% csrf_token')
def get_template_dirs():
"""
Returns a set of all directories that contain project templates.
"""
from django.conf import settings
dirs = set()
if ('django.template.loaders.filesystem.load_template_source' in settings.TEMPLATE_LOADERS
or 'django.template.loaders.filesystem.Loader' in settings.TEMPLATE_LOADERS):
dirs.update(map(unicode, settings.TEMPLATE_DIRS))
if ('django.template.loaders.app_directories.load_template_source' in settings.TEMPLATE_LOADERS
or 'django.template.loaders.app_directories.Loader' in settings.TEMPLATE_LOADERS):
from django.template.loaders.app_directories import app_template_dirs
dirs.update(app_template_dirs)
return dirs
def make_template_info(filename, root_dirs):
"""
Creates a Template object for a filename, calculating the possible
relative_filenames from the supplied filename and root template directories
"""
return Template(filename,
[filename[len(d)+1:] for d in root_dirs if filename.startswith(d)])
class Template(object):
def __init__(self, absolute_filename, relative_filenames):
self.absolute_filename, self.relative_filenames = absolute_filename, relative_filenames
def content(self):
try:
return self._content
except AttributeError:
fd = open(self.absolute_filename)
try:
content = fd.read().decode(TEMPLATE_ENCODING)
except UnicodeDecodeError, e:
message = '%s in %s' % (
e[4], self.absolute_filename.encode('UTF-8', 'ignore'))
raise UnicodeDecodeError(*(e.args[:4] + (message,)))
fd.close()
self._content = content
return content
content = property(content)
def post_form_info(self):
"""
Get information about any POST forms in the template.
Returns [(linenumber, csrf_token added)]
"""
forms = {}
form_line = 0
for ln, line in enumerate(self.content.split("\n")):
if not form_line and _POST_FORM_RE.search(line):
# record the form with no CSRF token yet
form_line = ln + 1
forms[form_line] = False
if form_line and _TOKEN_RE.search(line):
# found the CSRF token
forms[form_line] = True
form_line = 0
if form_line and _FORM_CLOSE_RE.search(line):
# no token found by form closing tag
form_line = 0
return forms.items()
def includes_template(self, t):
"""
Returns true if this template includes template 't' (via {% include %})
"""
for r in t.relative_filenames:
if re.search(r'\{%\s*include\s+(\'|")' + re.escape(r) + r'(\1)\s*%\}', self.content):
return True
return False
def related_templates(self):
"""
Returns all templates that include this one, recursively. (starting
with this one)
"""
try:
return self._related_templates
except AttributeError:
pass
retval = set([self])
for t in self.all_templates:
if t.includes_template(self):
# If two templates mutually include each other, directly or
# indirectly, we have a problem here...
retval = retval.union(t.related_templates())
self._related_templates = retval
return retval
def __repr__(self):
return repr(self.absolute_filename)
def __eq__(self, other):
return self.absolute_filename == other.absolute_filename
def __hash__(self):
return hash(self.absolute_filename)
def get_templates(dirs):
"""
Returns all files in dirs that have template extensions, as Template
objects.
"""
templates = set()
for root in dirs:
for (dirpath, dirnames, filenames) in os.walk(root):
for f in filenames:
if len([True for e in TEMPLATE_EXTENSIONS if f.endswith(e)]) > 0:
t = make_template_info(os.path.join(dirpath, f), dirs)
# templates need to be able to search others:
t.all_templates = templates
templates.add(t)
return templates
def get_python_code(paths):
"""
Returns all Python code, as a list of tuples, each one being:
(filename, list of lines)
"""
retval = []
for p in paths:
if not os.path.isdir(p):
raise Exception("'%s' is not a directory." % p)
for (dirpath, dirnames, filenames) in os.walk(p):
for f in filenames:
if len([True for e in PYTHON_SOURCE_EXTENSIONS if f.endswith(e)]) > 0:
fn = os.path.join(dirpath, f)
fd = open(fn)
content = [l.decode(PYTHON_ENCODING) for l in fd.readlines()]
fd.close()
retval.append((fn, content))
return retval
def search_python_list(python_code, template_names):
"""
Searches python code for a list of template names.
Returns a list of tuples, each one being:
(filename, line number)
"""
retval = []
for tn in template_names:
retval.extend(search_python(python_code, tn))
retval = list(set(retval))
retval.sort()
return retval
def search_python(python_code, template_name):
"""
Searches Python code for a template name.
Returns a list of tuples, each one being:
(filename, line number)
"""
retval = []
for fn, content in python_code:
for ln, line in enumerate(content):
if ((u'"%s"' % template_name) in line) or \
((u"'%s'" % template_name) in line):
retval.append((fn, ln + 1))
return retval
def main(pythonpaths):
template_dirs = get_template_dirs()
templates = get_templates(template_dirs)
python_code = get_python_code(pythonpaths)
for t in templates:
# Logic
form_matches = t.post_form_info()
num_post_forms = len(form_matches)
form_lines_without_token = [ln for (ln, has_token) in form_matches if not has_token]
if num_post_forms == 0:
continue
to_search = [rf for rt in t.related_templates() for rf in rt.relative_filenames]
found = search_python_list(python_code, to_search)
# Display:
print t.absolute_filename
for r in t.relative_filenames:
print u" AKA %s" % r
print u" POST forms: %s" % num_post_forms
print u" With token: %s" % (num_post_forms - len(form_lines_without_token))
if form_lines_without_token:
print u" Without token:"
for ln in form_lines_without_token:
print "%s:%d:" % (t.absolute_filename, ln)
print
print u" Searching for:"
for r in to_search:
print u" " + r
print
print u" Found:"
if len(found) == 0:
print " Nothing"
else:
for fn, ln in found:
print "%s:%d:" % (fn, ln)
print
print "----"
parser = OptionParser(usage=USAGE)
parser.add_option("", "--settings", action="store", dest="settings", help="Dotted path to settings file")
if __name__ == '__main__':
options, args = parser.parse_args()
if len(args) == 0:
parser.print_help()
sys.exit(1)
settings = getattr(options, 'settings', None)
if settings is None:
if os.environ.get("DJANGO_SETTINGS_MODULE", None) is None:
print "You need to set DJANGO_SETTINGS_MODULE or use the '--settings' parameter"
sys.exit(1)
else:
os.environ["DJANGO_SETTINGS_MODULE"] = settings
main(args) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.test.system;
import org.junit.Rule;
import org.junit.Test;
import static org.assertj.core.api.Assertions.assertThat;
/**
* Tests for {@link OutputCaptureRule}.
*
* @author Roland Weisleder
*/
@SuppressWarnings("removal")
public class OutputCaptureRuleTests {
@Rule
public OutputCaptureRule output = new OutputCaptureRule();
@Test
public void toStringShouldReturnAllCapturedOutput() {
System.out.println("Hello World");
assertThat(this.output.toString()).contains("Hello World");
}
@Test
public void getAllShouldReturnAllCapturedOutput() {
System.out.println("Hello World");
System.err.println("Hello Error");
assertThat(this.output.getAll()).contains("Hello World", "Hello Error");
}
@Test
public void getOutShouldOnlyReturnOutputCapturedFromSystemOut() {
System.out.println("Hello World");
System.err.println("Hello Error");
assertThat(this.output.getOut()).contains("Hello World");
assertThat(this.output.getOut()).doesNotContain("Hello Error");
}
@Test
public void getErrShouldOnlyReturnOutputCapturedFromSystemErr() {
System.out.println("Hello World");
System.err.println("Hello Error");
assertThat(this.output.getErr()).contains("Hello Error");
assertThat(this.output.getErr()).doesNotContain("Hello World");
}
@Test
public void captureShouldBeAssertable() {
System.out.println("Hello World");
assertThat(this.output).contains("Hello World");
}
} | java | github | https://github.com/spring-projects/spring-boot | core/spring-boot-test/src/test/java/org/springframework/boot/test/system/OutputCaptureRuleTests.java |
def strip_headers(post):
"""Find the first blank line and drop the headers to keep the body"""
if '\n\n' in post:
headers, body = post.split('\n\n', 1)
return body.lower()
else:
# Unexpected post inner-structure, be conservative
# and keep everything
return post.lower()
# Let's try it on the first post. Here is the original post content,
# including the headers:
original_text = all_twenty_train.data[0]
print("Oringinal text:")
print(original_text + "\n")
text_body = strip_headers(original_text)
print("Stripped text:")
print(text_body + "\n")
# Let's train a new classifier with the header stripping preprocessor
strip_vectorizer = TfidfVectorizer(preprocessor=strip_headers, min_df=2)
X_train_small_stripped = strip_vectorizer.fit_transform(
twenty_train_small.data)
y_train_small_stripped = twenty_train_small.target
classifier = MultinomialNB(alpha=0.01).fit(
X_train_small_stripped, y_train_small_stripped)
print("Training score: {0:.1f}%".format(
classifier.score(X_train_small_stripped, y_train_small_stripped) * 100))
X_test_small_stripped = strip_vectorizer.transform(twenty_test_small.data)
y_test_small_stripped = twenty_test_small.target
print("Testing score: {0:.1f}%".format(
classifier.score(X_test_small_stripped, y_test_small_stripped) * 100)) | unknown | codeparrot/codeparrot-clean | ||
"""The tests for the pushbullet notification platform."""
import json
import unittest
from unittest.mock import patch
from pushbullet import PushBullet
import requests_mock
from homeassistant.setup import setup_component
import homeassistant.components.notify as notify
from tests.common import (
assert_setup_component, get_test_home_assistant, load_fixture)
class TestPushBullet(unittest.TestCase):
"""Tests the Pushbullet Component."""
def setUp(self):
"""Initialize values for this test case class."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that we started."""
self.hass.stop()
@patch.object(PushBullet, '_get_data',
return_value=json.loads(load_fixture(
'pushbullet_devices.json')))
def test_pushbullet_config(self, mock__get_data):
"""Test setup."""
config = {notify.DOMAIN: {'name': 'test',
'platform': 'pushbullet',
'api_key': 'MYFAKEKEY'}}
with assert_setup_component(1) as handle_config:
assert setup_component(self.hass, notify.DOMAIN, config)
assert handle_config[notify.DOMAIN]
def test_pushbullet_config_bad(self):
"""Test set up the platform with bad/missing configuration."""
config = {
notify.DOMAIN: {
'platform': 'pushbullet',
}
}
with assert_setup_component(0) as handle_config:
assert setup_component(self.hass, notify.DOMAIN, config)
assert not handle_config[notify.DOMAIN]
@requests_mock.Mocker()
@patch.object(PushBullet, '_get_data',
return_value=json.loads(load_fixture(
'pushbullet_devices.json')))
def test_pushbullet_push_default(self, mock, mock__get_data):
"""Test pushbullet push to default target."""
config = {notify.DOMAIN: {'name': 'test',
'platform': 'pushbullet',
'api_key': 'MYFAKEKEY'}}
with assert_setup_component(1) as handle_config:
assert setup_component(self.hass, notify.DOMAIN, config)
assert handle_config[notify.DOMAIN]
mock.register_uri(
requests_mock.POST,
'https://api.pushbullet.com/v2/pushes',
status_code=200,
json={'mock_response': 'Ok'}
)
data = {'title': 'Test Title',
'message': 'Test Message'}
self.hass.services.call(notify.DOMAIN, 'test', data)
self.hass.block_till_done()
assert mock.called
assert mock.call_count == 1
expected_body = {'body': 'Test Message',
'title': 'Test Title',
'type': 'note'}
assert mock.last_request.json() == expected_body
@requests_mock.Mocker()
@patch.object(PushBullet, '_get_data',
return_value=json.loads(load_fixture(
'pushbullet_devices.json')))
def test_pushbullet_push_device(self, mock, mock__get_data):
"""Test pushbullet push to default target."""
config = {notify.DOMAIN: {'name': 'test',
'platform': 'pushbullet',
'api_key': 'MYFAKEKEY'}}
with assert_setup_component(1) as handle_config:
assert setup_component(self.hass, notify.DOMAIN, config)
assert handle_config[notify.DOMAIN]
mock.register_uri(
requests_mock.POST,
'https://api.pushbullet.com/v2/pushes',
status_code=200,
json={'mock_response': 'Ok'}
)
data = {'title': 'Test Title',
'message': 'Test Message',
'target': ['device/DESKTOP']}
self.hass.services.call(notify.DOMAIN, 'test', data)
self.hass.block_till_done()
assert mock.called
assert mock.call_count == 1
expected_body = {'body': 'Test Message',
'device_iden': 'identity1',
'title': 'Test Title',
'type': 'note'}
assert mock.last_request.json() == expected_body
@requests_mock.Mocker()
@patch.object(PushBullet, '_get_data',
return_value=json.loads(load_fixture(
'pushbullet_devices.json')))
def test_pushbullet_push_devices(self, mock, mock__get_data):
"""Test pushbullet push to default target."""
config = {notify.DOMAIN: {'name': 'test',
'platform': 'pushbullet',
'api_key': 'MYFAKEKEY'}}
with assert_setup_component(1) as handle_config:
assert setup_component(self.hass, notify.DOMAIN, config)
assert handle_config[notify.DOMAIN]
mock.register_uri(
requests_mock.POST,
'https://api.pushbullet.com/v2/pushes',
status_code=200,
json={'mock_response': 'Ok'}
)
data = {'title': 'Test Title',
'message': 'Test Message',
'target': ['device/DESKTOP', 'device/My iPhone']}
self.hass.services.call(notify.DOMAIN, 'test', data)
self.hass.block_till_done()
assert mock.called
assert mock.call_count == 2
assert len(mock.request_history) == 2
expected_body = {'body': 'Test Message',
'device_iden': 'identity1',
'title': 'Test Title',
'type': 'note'}
assert mock.request_history[0].json() == expected_body
expected_body = {'body': 'Test Message',
'device_iden': 'identity2',
'title': 'Test Title',
'type': 'note'}
assert mock.request_history[1].json() == expected_body
@requests_mock.Mocker()
@patch.object(PushBullet, '_get_data',
return_value=json.loads(load_fixture(
'pushbullet_devices.json')))
def test_pushbullet_push_email(self, mock, mock__get_data):
"""Test pushbullet push to default target."""
config = {notify.DOMAIN: {'name': 'test',
'platform': 'pushbullet',
'api_key': 'MYFAKEKEY'}}
with assert_setup_component(1) as handle_config:
assert setup_component(self.hass, notify.DOMAIN, config)
assert handle_config[notify.DOMAIN]
mock.register_uri(
requests_mock.POST,
'https://api.pushbullet.com/v2/pushes',
status_code=200,
json={'mock_response': 'Ok'}
)
data = {'title': 'Test Title',
'message': 'Test Message',
'target': ['email/user@host.net']}
self.hass.services.call(notify.DOMAIN, 'test', data)
self.hass.block_till_done()
assert mock.called
assert mock.call_count == 1
assert len(mock.request_history) == 1
expected_body = {'body': 'Test Message',
'email': 'user@host.net',
'title': 'Test Title',
'type': 'note'}
assert mock.request_history[0].json() == expected_body
@requests_mock.Mocker()
@patch.object(PushBullet, '_get_data',
return_value=json.loads(load_fixture(
'pushbullet_devices.json')))
def test_pushbullet_push_mixed(self, mock, mock__get_data):
"""Test pushbullet push to default target."""
config = {notify.DOMAIN: {'name': 'test',
'platform': 'pushbullet',
'api_key': 'MYFAKEKEY'}}
with assert_setup_component(1) as handle_config:
assert setup_component(self.hass, notify.DOMAIN, config)
assert handle_config[notify.DOMAIN]
mock.register_uri(
requests_mock.POST,
'https://api.pushbullet.com/v2/pushes',
status_code=200,
json={'mock_response': 'Ok'}
)
data = {'title': 'Test Title',
'message': 'Test Message',
'target': ['device/DESKTOP', 'email/user@host.net']}
self.hass.services.call(notify.DOMAIN, 'test', data)
self.hass.block_till_done()
assert mock.called
assert mock.call_count == 2
assert len(mock.request_history) == 2
expected_body = {'body': 'Test Message',
'device_iden': 'identity1',
'title': 'Test Title',
'type': 'note'}
assert mock.request_history[0].json() == expected_body
expected_body = {'body': 'Test Message',
'email': 'user@host.net',
'title': 'Test Title',
'type': 'note'}
assert mock.request_history[1].json() == expected_body
@requests_mock.Mocker()
@patch.object(PushBullet, '_get_data',
return_value=json.loads(load_fixture(
'pushbullet_devices.json')))
def test_pushbullet_push_no_file(self, mock, mock__get_data):
"""Test pushbullet push to default target."""
config = {notify.DOMAIN: {'name': 'test',
'platform': 'pushbullet',
'api_key': 'MYFAKEKEY'}}
with assert_setup_component(1) as handle_config:
assert setup_component(self.hass, notify.DOMAIN, config)
assert handle_config[notify.DOMAIN]
mock.register_uri(
requests_mock.POST,
'https://api.pushbullet.com/v2/pushes',
status_code=200,
json={'mock_response': 'Ok'}
)
data = {'title': 'Test Title',
'message': 'Test Message',
'target': ['device/DESKTOP', 'device/My iPhone'],
'data': {'file': 'not_a_file'}}
assert not self.hass.services.call(notify.DOMAIN, 'test', data)
self.hass.block_till_done() | unknown | codeparrot/codeparrot-clean | ||
"""
Aliases for functions which may be accelerated by Scipy.
Scipy_ can be built to use accelerated or otherwise improved libraries
for FFTs, linear algebra, and special functions. This module allows
developers to transparently support these accelerated functions when
scipy is available but still support users who have only installed
Numpy.
.. _Scipy : http://www.scipy.org
"""
# This module should be used for functions both in numpy and scipy if
# you want to use the numpy version if available but the scipy version
# otherwise.
# Usage --- from numpy.dual import fft, inv
__all__ = ['fft','ifft','fftn','ifftn','fft2','ifft2',
'norm','inv','svd','solve','det','eig','eigvals',
'eigh','eigvalsh','lstsq', 'pinv','cholesky','i0']
import numpy.linalg as linpkg
import numpy.fft as fftpkg
from numpy.lib import i0
import sys
fft = fftpkg.fft
ifft = fftpkg.ifft
fftn = fftpkg.fftn
ifftn = fftpkg.ifftn
fft2 = fftpkg.fft2
ifft2 = fftpkg.ifft2
norm = linpkg.norm
inv = linpkg.inv
svd = linpkg.svd
solve = linpkg.solve
det = linpkg.det
eig = linpkg.eig
eigvals = linpkg.eigvals
eigh = linpkg.eigh
eigvalsh = linpkg.eigvalsh
lstsq = linpkg.lstsq
pinv = linpkg.pinv
cholesky = linpkg.cholesky
_restore_dict = {}
def register_func(name, func):
if name not in __all__:
raise ValueError, "%s not a dual function." % name
f = sys._getframe(0).f_globals
_restore_dict[name] = f[name]
f[name] = func
def restore_func(name):
if name not in __all__:
raise ValueError, "%s not a dual function." % name
try:
val = _restore_dict[name]
except KeyError:
return
else:
sys._getframe(0).f_globals[name] = val
def restore_all():
for name in _restore_dict.keys():
restore_func(name) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2014-2025 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.client.webrtc
import kotlinx.serialization.Serializable
/**
* An object containing WebRTC protocol entities and abstractions.
* Provides the core types and interfaces needed for WebRTC peer-to-peer communication.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc)
*
* @see [MDN WebRTC API](https://developer.mozilla.org/en-US/docs/Web/API/WebRTC_API)
*/
public object WebRtc {
/**
* Represents the state of the ICE connection.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.IceConnectionState)
*
* @see [MDN iceConnectionState](https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection/iceConnectionState)
*/
public enum class IceConnectionState {
NEW,
CHECKING,
CONNECTED,
COMPLETED,
FAILED,
DISCONNECTED,
CLOSED;
public fun isSuccessful(): Boolean = this == COMPLETED || this == CONNECTED
}
/**
* Represents the state of the ICE gathering process.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.IceGatheringState)
*
* @see [MDN iceGatheringState](https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection/iceGatheringState)
*/
public enum class IceGatheringState {
NEW,
GATHERING,
COMPLETE
}
/**
* Represents the state of the peer connection.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.ConnectionState)
*
* @see [MDN connectionState](https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection/connectionState)
*/
public enum class ConnectionState {
NEW,
CONNECTING,
CONNECTED,
DISCONNECTED,
FAILED,
CLOSED
}
/**
* Represents the signaling state of the peer connection.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.SignalingState)
*
* @see [MDN signalingState](https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection/signalingState)
*/
public enum class SignalingState {
STABLE,
CLOSED,
HAVE_LOCAL_OFFER,
HAVE_LOCAL_PROVISIONAL_ANSWER,
HAVE_REMOTE_OFFER,
HAVE_REMOTE_PROVISIONAL_ANSWER,
}
/**
* Represents statistics about the WebRtc connection.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.Stats)
*
* @property id The unique identifier for this statistics object.
* @property type The type of the statistics object.
* @property timestamp The timestamp when these statistics were collected.
* @property props Additional properties specific to the statistics type.
*
* @see [MDN RTCStats](https://developer.mozilla.org/en-US/docs/Web/API/RTCStats)
*/
public data class Stats(
val id: String,
val type: String,
val timestamp: Long,
val props: Map<String, Any?>,
)
/**
* Represents an ICE server configuration for WebRtc connections.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.IceServer)
*
* @property urls The URLs of the ICE server.
* @property username Optional username for the ICE server.
* @property credential Optional credential for the ICE server.
*
* @see [MDN RTCIceServer](https://developer.mozilla.org/en-US/docs/Web/API/RTCIceServer)
*/
public data class IceServer(
val urls: List<String>,
val username: String? = null,
val credential: String? = null
) {
public constructor(url: String, username: String? = null, credential: String? = null) : this(
urls = listOf(url),
username,
credential
)
}
/**
* Represents the bundle policy for media negotiation.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.BundlePolicy)
*
* @see [MDN bundlePolicy](https://developer.mozilla.org/en-US/docs/Web/API/RTCConfiguration/bundlePolicy)
*/
public enum class BundlePolicy {
MAX_BUNDLE,
BALANCED,
MAX_COMPAT
}
/**
* Represents the ICE candidate policy for the connection.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.IceTransportPolicy)
*
* @see [MDN iceTransportPolicy](https://developer.mozilla.org/en-US/docs/Web/API/RTCConfiguration/iceTransportPolicy)
*/
public enum class IceTransportPolicy {
ALL,
RELAY
}
/**
* Represents the RTCP mux policy for the connection.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.RtcpMuxPolicy)
*
* @see [MDN rtcpMuxPolicy](https://developer.mozilla.org/en-US/docs/Web/API/RTCConfiguration/rtcpMuxPolicy)
*/
public enum class RtcpMuxPolicy {
NEGOTIATE,
REQUIRE
}
/**
* Represents an ICE candidate in the WebRtc connection process.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.IceCandidate)
*
* @property candidate The ICE candidate string in SDP format.
* @property sdpMid The media stream identifier for the candidate.
* @property sdpMLineIndex The index of the media description in the SDP.
*
* @see [MDN RTCIceCandidate](https://developer.mozilla.org/en-US/docs/Web/API/RTCIceCandidate)
*/
@Serializable
public data class IceCandidate(
public val candidate: String,
public val sdpMid: String,
public val sdpMLineIndex: Int
)
/**
* Represents a session description in the WebRtc connection process.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.SessionDescription)
*
* @property type The type of the session description.
* @property sdp The SDP (Session Description Protocol) string.
*
* @see [MDN RTCSessionDescription](https://developer.mozilla.org/en-US/docs/Web/API/RTCSessionDescription)
*/
@Serializable
public data class SessionDescription(
val type: SessionDescriptionType,
val sdp: String
)
/**
* Represents the type of session description in the WebRtc connection process.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.SessionDescriptionType)
*
* @see [MDN RTCSessionDescription](https://developer.mozilla.org/en-US/docs/Web/API/RTCSessionDescription)
*/
@Serializable
public enum class SessionDescriptionType {
OFFER,
ANSWER,
PROVISIONAL_ANSWER,
ROLLBACK
}
/**
* Interface for sending DTMF (Dual-Tone Multi-Frequency) tones.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DtmfSender)
*
* @property toneBuffer The tone buffer containing the tones to be played.
* @property canInsertDtmf Whether DTMF tones can be inserted.
*
* @see [MDN RTCDTMFSender](https://developer.mozilla.org/en-US/docs/Web/API/RTCDTMFSender)
*/
public interface DtmfSender {
public val toneBuffer: String
public val canInsertDtmf: Boolean
public fun insertDtmf(tones: String, duration: Int, interToneGap: Int)
}
/**
* Represents parameters for RTP header extensions.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.RtpHeaderExtensionParameters)
*
* @property id The ID of the header extension.
* @property uri The URI of the header extension.
* @property encrypted Whether the header extension is encrypted.
*
* @see [MDN RTCRtpHeaderExtension](https://developer.mozilla.org/en-US/docs/Web/API/RTCRtpHeaderExtension)
*/
public data class RtpHeaderExtensionParameters(
val id: Int,
val uri: String,
val encrypted: Boolean
)
/**
* Interface representing parameters for RTP transmission.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.RtpParameters)
*
* @property transactionId The transaction ID for these parameters.
* @property codecs The codecs used for transmission.
* @property rtcp The RTCP parameters.
* @property headerExtensions The header extensions for the RTP packets.
* @property degradationPreference The degradation preference for the media quality.
* @property encodings The encoding parameters for the media.
*
* @see [MDN RTCRtpParameters](https://developer.mozilla.org/en-US/docs/Web/API/RTCRtpParameters)
*/
public interface RtpParameters {
public val transactionId: String
public val codecs: Iterable<Any>
public val rtcp: Any
public val headerExtensions: Iterable<RtpHeaderExtensionParameters>
public val degradationPreference: DegradationPreference
public val encodings: Iterable<Any>
}
/**
* Represents the degradation preference for media quality when bandwidth is constrained.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DegradationPreference)
*
* @see [MDN degradationPreference](https://developer.mozilla.org/en-US/docs/Web/API/RTCRtpParameters/degradationPreference)
*/
public enum class DegradationPreference {
DISABLED,
MAINTAIN_FRAMERATE,
MAINTAIN_RESOLUTION,
BALANCED
}
/**
* Interface for sending RTP media.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.RtpSender)
*
* @property dtmf The DTMF sender associated with this RTP sender.
* @property track The media track being sent.
*
* @see [MDN RTCRtpSender](https://developer.mozilla.org/en-US/docs/Web/API/RTCRtpSender)
*/
public interface RtpSender {
public val dtmf: DtmfSender?
public val track: WebRtcMedia.Track?
public suspend fun replaceTrack(withTrack: WebRtcMedia.Track?)
public suspend fun getParameters(): RtpParameters
public suspend fun setParameters(parameters: RtpParameters)
}
/**
* Abstract class representing a network channel which can be used for bidirectional peer-to-peer transfers
* of arbitrary data. Every data channel is associated with an [WebRtcPeerConnection].
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel)
*
* @see [MDN RTCDataChannel](https://developer.mozilla.org/en-US/docs/Web/API/RTCDataChannel)
*/
public interface DataChannel : AutoCloseable {
/**
* Represents a message that can be received through a WebRTC data channel.
* The message can contain either string data or binary data.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.Message)
*
* @see [MDN RTCDataChannel.send()](https://developer.mozilla.org/en-US/docs/Web/API/RTCDataChannel/send)
* @see [MDN message event](https://developer.mozilla.org/en-US/docs/Web/API/RTCDataChannel/message_event)
*/
public sealed interface Message {
public class Text(public val data: String) : Message
public class Binary(public val data: ByteArray) : Message
/**
* Returns the text content of the message if it's a text message, otherwise returns null.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.Message.textOrNull)
*/
public fun textOrNull(): String? = (this as? Text)?.data
/**
* Returns the binary content of the message if it's a binary message, otherwise returns null.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.Message.binaryOrNull)
*/
public fun binaryOrNull(): ByteArray? = (this as? Binary)?.data
/**
* Returns the text content of the message if it's a text message, otherwise throws an exception.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.Message.textOrThrow)
*/
public fun textOrThrow(): String =
(this as? Text ?: error("Received a binary instead of string data.")).data
/**
* Returns the binary content of the message if it's a binary message, otherwise throws an exception.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.Message.binaryOrThrow)
*/
public fun binaryOrThrow(): ByteArray =
(this as? Binary ?: error("Received a string instead of binary data.")).data
}
/**
* Represents the current state of a WebRTC data channel.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.State)
*
* @see [MDN RTCDataChannel.readyState](https://developer.mozilla.org/en-US/docs/Web/API/RTCDataChannel/readyState)
*/
public enum class State {
CONNECTING,
OPEN,
CLOSING,
CLOSED;
public fun canSend(): Boolean = this == OPEN
}
/**
* An ID number (between 0 and 65,534) which uniquely identifies the data channel.
* It can be null when the data channel is created but not yet assigned an ID.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.id)
*
* @see [MDN RTCDataChannel.id](https://developer.mozilla.org/en-US/docs/Web/API/RTCDataChannel/id)
*/
public val id: Int?
/**
* A string containing a name describing the data channel.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.label)
*
* @see [MDN RTCDataChannel.label](https://developer.mozilla.org/en-US/docs/Web/API/RTCDataChannel/label)
*/
public val label: String
/**
* A state of the data channel's underlying data connection.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.state)
*
* @see [MDN RTCDataChannel.readyState](https://developer.mozilla.org/en-US/docs/Web/API/RTCDataChannel/readyState)
*/
public val state: State
/**
* A number of bytes of data currently queued to be sent over the data channel.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.bufferedAmount)
*
* @see [MDN RTCDataChannel.bufferedAmount](https://developer.mozilla.org/en-US/docs/Web/API/RTCDataChannel/bufferedAmount)
*/
public val bufferedAmount: Long
/**
* A number of queued outgoing data bytes below which the buffer is considered to be "low."
* When the number of buffered outgoing bytes, as indicated by the bufferedAmount property,
* falls to or below this value, a [DataChannelEvent.BufferedAmountLow] event is fired.
* The default value is 0.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.bufferedAmountLowThreshold)
*
* @see [MDN RTCDataChannel.bufferedAmountLowThreshold](https://developer.mozilla.org/en-US/docs/Web/API/RTCDataChannel/bufferedAmountLowThreshold)
*/
public val bufferedAmountLowThreshold: Long
/**
* The maximum number of milliseconds that attempts to transfer a message may take in unreliable mode.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.maxPacketLifeTime)
*
* @see [MDN RTCDataChannel.maxPacketLifeTime](https://developer.mozilla.org/en-US/docs/Web/API/RTCDataChannel/maxPacketLifeTime)
*/
public val maxPacketLifeTime: Int?
/**
* The maximum number of times the user agent should attempt to retransmit a message
* which fails the first time in unreliable mode.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.maxRetransmits)
*
* @see [MDN RTCDataChannel.maxRetransmits](https://developer.mozilla.org/en-US/docs/Web/API/RTCDataChannel/maxRetransmits)
*/
public val maxRetransmits: Int?
/**
* Indicates whether the data channel was negotiated by the application or the WebRTC layer.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.negotiated)
*
* @see [MDN RTCDataChannel.negotiated](https://developer.mozilla.org/en-US/docs/Web/API/RTCDataChannel/negotiated)
*/
public val negotiated: Boolean
/**
* Indicates whether messages sent on the data channel are required to arrive at their destination
* in the same order in which they were sent, or if they're allowed to arrive out-of-order.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.ordered)
*
* @see [MDN RTCDataChannel.ordered](https://developer.mozilla.org/en-US/docs/Web/API/RTCDataChannel/ordered)
*/
public val ordered: Boolean
/**
* The name of the sub-protocol being used on the data channel, if any; otherwise, the empty string.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.protocol)
*
* @see [MDN RTCDataChannel.protocol](https://developer.mozilla.org/en-US/docs/Web/API/RTCDataChannel/protocol)
*/
public val protocol: String
/**
* Sets the threshold for the buffered amount of data below which the buffer is considered to be "low."
* When the buffered amount falls to or below this value, a [DataChannelEvent.BufferedAmountLow] event is fired.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.setBufferedAmountLowThreshold)
*/
public fun setBufferedAmountLowThreshold(threshold: Long)
/**
* Sends a text message through the data channel.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.send)
*
* @param text The text message to send.
* @see [MDN RTCDataChannel.send()](https://developer.mozilla.org/en-US/docs/Web/API/RTCDataChannel/send)
*/
public suspend fun send(text: String)
/**
* Sends binary data through the data channel.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.send)
*
* @param bytes The binary data to send.
* @see [MDN RTCDataChannel.send()](https://developer.mozilla.org/en-US/docs/Web/API/RTCDataChannel/send)
*/
public suspend fun send(bytes: ByteArray)
/**
* Suspends until a message is available in the data channel and returns it.
*
* This method will suspend the current coroutine until a message is received.
* The message can be either text or binary data.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.receive)
*/
public suspend fun receive(): Message
/**
* Receives a binary message from the data channel.
*
* This method suspends until a binary message is available. If the next message
* in the channel is a text message instead of binary data, this method will throw an error.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.receiveBinary)
*/
public suspend fun receiveBinary(): ByteArray
/**
* Receives a text message from the data channel.
*
* This method suspends until a text message is available. If the next message
* in the channel is binary data instead of text, this method will throw an error.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.receiveText)
*/
public suspend fun receiveText(): String
/**
* Immediately returns a message from the data channel or null if no message is available.
*
* This method does not suspend and returns immediately. If a message is available,
* it is returned; otherwise, null is returned.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.tryReceive)
*/
public fun tryReceive(): Message?
/**
* Immediately returns binary data from the data channel or null if no binary message is available.
*
* This method does not suspend and returns immediately. If a binary message is available,
* its data is returned; otherwise, null is returned. If the next message is a text message,
* null is returned.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.tryReceiveBinary)
*/
public fun tryReceiveBinary(): ByteArray?
/**
* Immediately returns text data from the data channel or null if no text message is available.
*
* This method does not suspend and returns immediately. If a text message is available,
* its content is returned; otherwise, null is returned. If the next message is a binary message,
* null is returned.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.tryReceiveText)
*/
public fun tryReceiveText(): String?
/**
* Closes the data channel transport. The underlying message receiving channel will be closed.
*
* After calling a channel will start a closing process:
* - The channel state will transition to [WebRtc.DataChannel.State.CLOSED]
* - No more messages can be sent through this channel
* - The underlying message receiving channel will be closed
* - Any pending send operations may fail
* - A [DataChannelEvent.Closed] event will be emitted
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.closeTransport)
*/
public fun closeTransport()
/**
* Closes the data channel and releases all associated resources.
* Automatically invokes `closeTransport`.
* Accessing the channel after this operation could throw an exception.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.DataChannel.close)
*/
override fun close() {
closeTransport()
}
}
/**
* This exception indicates problems with creating, parsing, or validating SDP descriptions
* during the WebRTC connection establishment process.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.SdpException)
*/
public class SdpException(message: String?, cause: Throwable? = null) : RuntimeException(message, cause)
/**
* This exception indicates problems with ICE candidates gathering, processing, or connectivity
* during the WebRTC peer connection establishment.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.client.webrtc.WebRtc.IceException)
*/
public class IceException(message: String?, cause: Throwable? = null) : RuntimeException(message, cause)
} | kotlin | github | https://github.com/ktorio/ktor | ktor-client/ktor-client-webrtc/common/src/io/ktor/client/webrtc/WebRtc.kt |
"""
Tests common to list and UserList.UserList
"""
import sys
import os
from functools import cmp_to_key
from test import support, seq_tests
class CommonTest(seq_tests.CommonTest):
def test_init(self):
# Iterable arg is optional
self.assertEqual(self.type2test([]), self.type2test())
# Init clears previous values
a = self.type2test([1, 2, 3])
a.__init__()
self.assertEqual(a, self.type2test([]))
# Init overwrites previous values
a = self.type2test([1, 2, 3])
a.__init__([4, 5, 6])
self.assertEqual(a, self.type2test([4, 5, 6]))
# Mutables always return a new object
b = self.type2test(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
def test_repr(self):
l0 = []
l2 = [0, 1, 2]
a0 = self.type2test(l0)
a2 = self.type2test(l2)
self.assertEqual(str(a0), str(l0))
self.assertEqual(repr(a0), repr(l0))
self.assertEqual(repr(a2), repr(l2))
self.assertEqual(str(a2), "[0, 1, 2]")
self.assertEqual(repr(a2), "[0, 1, 2]")
a2.append(a2)
a2.append(3)
self.assertEqual(str(a2), "[0, 1, 2, [...], 3]")
self.assertEqual(repr(a2), "[0, 1, 2, [...], 3]")
l0 = []
for i in range(sys.getrecursionlimit() + 100):
l0 = [l0]
self.assertRaises(RuntimeError, repr, l0)
def test_print(self):
d = self.type2test(range(200))
d.append(d)
d.extend(range(200,400))
d.append(d)
d.append(400)
try:
with open(support.TESTFN, "w") as fo:
fo.write(str(d))
with open(support.TESTFN, "r") as fo:
self.assertEqual(fo.read(), repr(d))
finally:
os.remove(support.TESTFN)
def test_set_subscript(self):
a = self.type2test(range(20))
self.assertRaises(ValueError, a.__setitem__, slice(0, 10, 0), [1,2,3])
self.assertRaises(TypeError, a.__setitem__, slice(0, 10), 1)
self.assertRaises(ValueError, a.__setitem__, slice(0, 10, 2), [1,2])
self.assertRaises(TypeError, a.__getitem__, 'x', 1)
a[slice(2,10,3)] = [1,2,3]
self.assertEqual(a, self.type2test([0, 1, 1, 3, 4, 2, 6, 7, 3,
9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19]))
def test_reversed(self):
a = self.type2test(range(20))
r = reversed(a)
self.assertEqual(list(r), self.type2test(range(19, -1, -1)))
self.assertRaises(StopIteration, next, r)
self.assertEqual(list(reversed(self.type2test())),
self.type2test())
# Bug 3689: make sure list-reversed-iterator doesn't have __len__
self.assertRaises(TypeError, len, reversed([1,2,3]))
def test_setitem(self):
a = self.type2test([0, 1])
a[0] = 0
a[1] = 100
self.assertEqual(a, self.type2test([0, 100]))
a[-1] = 200
self.assertEqual(a, self.type2test([0, 200]))
a[-2] = 100
self.assertEqual(a, self.type2test([100, 200]))
self.assertRaises(IndexError, a.__setitem__, -3, 200)
self.assertRaises(IndexError, a.__setitem__, 2, 200)
a = self.type2test([])
self.assertRaises(IndexError, a.__setitem__, 0, 200)
self.assertRaises(IndexError, a.__setitem__, -1, 200)
self.assertRaises(TypeError, a.__setitem__)
a = self.type2test([0,1,2,3,4])
a[0] = 1
a[1] = 2
a[2] = 3
self.assertEqual(a, self.type2test([1,2,3,3,4]))
a[0] = 5
a[1] = 6
a[2] = 7
self.assertEqual(a, self.type2test([5,6,7,3,4]))
a[-2] = 88
a[-1] = 99
self.assertEqual(a, self.type2test([5,6,7,88,99]))
a[-2] = 8
a[-1] = 9
self.assertEqual(a, self.type2test([5,6,7,8,9]))
def test_delitem(self):
a = self.type2test([0, 1])
del a[1]
self.assertEqual(a, [0])
del a[0]
self.assertEqual(a, [])
a = self.type2test([0, 1])
del a[-2]
self.assertEqual(a, [1])
del a[-1]
self.assertEqual(a, [])
a = self.type2test([0, 1])
self.assertRaises(IndexError, a.__delitem__, -3)
self.assertRaises(IndexError, a.__delitem__, 2)
a = self.type2test([])
self.assertRaises(IndexError, a.__delitem__, 0)
self.assertRaises(TypeError, a.__delitem__)
def test_setslice(self):
l = [0, 1]
a = self.type2test(l)
for i in range(-3, 4):
a[:i] = l[:i]
self.assertEqual(a, l)
a2 = a[:]
a2[:i] = a[:i]
self.assertEqual(a2, a)
a[i:] = l[i:]
self.assertEqual(a, l)
a2 = a[:]
a2[i:] = a[i:]
self.assertEqual(a2, a)
for j in range(-3, 4):
a[i:j] = l[i:j]
self.assertEqual(a, l)
a2 = a[:]
a2[i:j] = a[i:j]
self.assertEqual(a2, a)
aa2 = a2[:]
aa2[:0] = [-2, -1]
self.assertEqual(aa2, [-2, -1, 0, 1])
aa2[0:] = []
self.assertEqual(aa2, [])
a = self.type2test([1, 2, 3, 4, 5])
a[:-1] = a
self.assertEqual(a, self.type2test([1, 2, 3, 4, 5, 5]))
a = self.type2test([1, 2, 3, 4, 5])
a[1:] = a
self.assertEqual(a, self.type2test([1, 1, 2, 3, 4, 5]))
a = self.type2test([1, 2, 3, 4, 5])
a[1:-1] = a
self.assertEqual(a, self.type2test([1, 1, 2, 3, 4, 5, 5]))
a = self.type2test([])
a[:] = tuple(range(10))
self.assertEqual(a, self.type2test(range(10)))
self.assertRaises(TypeError, a.__setitem__, slice(0, 1, 5))
self.assertRaises(TypeError, a.__setitem__)
def test_delslice(self):
a = self.type2test([0, 1])
del a[1:2]
del a[0:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[1:2]
del a[0:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[-2:-1]
self.assertEqual(a, self.type2test([1]))
a = self.type2test([0, 1])
del a[-2:-1]
self.assertEqual(a, self.type2test([1]))
a = self.type2test([0, 1])
del a[1:]
del a[:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[1:]
del a[:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[-1:]
self.assertEqual(a, self.type2test([0]))
a = self.type2test([0, 1])
del a[-1:]
self.assertEqual(a, self.type2test([0]))
a = self.type2test([0, 1])
del a[:]
self.assertEqual(a, self.type2test([]))
def test_append(self):
a = self.type2test([])
a.append(0)
a.append(1)
a.append(2)
self.assertEqual(a, self.type2test([0, 1, 2]))
self.assertRaises(TypeError, a.append)
def test_extend(self):
a1 = self.type2test([0])
a2 = self.type2test((0, 1))
a = a1[:]
a.extend(a2)
self.assertEqual(a, a1 + a2)
a.extend(self.type2test([]))
self.assertEqual(a, a1 + a2)
a.extend(a)
self.assertEqual(a, self.type2test([0, 0, 1, 0, 0, 1]))
a = self.type2test("spam")
a.extend("eggs")
self.assertEqual(a, list("spameggs"))
self.assertRaises(TypeError, a.extend, None)
self.assertRaises(TypeError, a.extend)
def test_insert(self):
a = self.type2test([0, 1, 2])
a.insert(0, -2)
a.insert(1, -1)
a.insert(2, 0)
self.assertEqual(a, [-2, -1, 0, 0, 1, 2])
b = a[:]
b.insert(-2, "foo")
b.insert(-200, "left")
b.insert(200, "right")
self.assertEqual(b, self.type2test(["left",-2,-1,0,0,"foo",1,2,"right"]))
self.assertRaises(TypeError, a.insert)
def test_pop(self):
a = self.type2test([-1, 0, 1])
a.pop()
self.assertEqual(a, [-1, 0])
a.pop(0)
self.assertEqual(a, [0])
self.assertRaises(IndexError, a.pop, 5)
a.pop(0)
self.assertEqual(a, [])
self.assertRaises(IndexError, a.pop)
self.assertRaises(TypeError, a.pop, 42, 42)
a = self.type2test([0, 10, 20, 30, 40])
def test_remove(self):
a = self.type2test([0, 0, 1])
a.remove(1)
self.assertEqual(a, [0, 0])
a.remove(0)
self.assertEqual(a, [0])
a.remove(0)
self.assertEqual(a, [])
self.assertRaises(ValueError, a.remove, 0)
self.assertRaises(TypeError, a.remove)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = self.type2test([0, 1, 2, 3])
self.assertRaises(BadExc, a.remove, BadCmp())
class BadCmp2:
def __eq__(self, other):
raise BadExc()
d = self.type2test('abcdefghcij')
d.remove('c')
self.assertEqual(d, self.type2test('abdefghcij'))
d.remove('c')
self.assertEqual(d, self.type2test('abdefghij'))
self.assertRaises(ValueError, d.remove, 'c')
self.assertEqual(d, self.type2test('abdefghij'))
# Handle comparison errors
d = self.type2test(['a', 'b', BadCmp2(), 'c'])
e = self.type2test(d)
self.assertRaises(BadExc, d.remove, 'c')
for x, y in zip(d, e):
# verify that original order and values are retained.
self.assertIs(x, y)
def test_count(self):
a = self.type2test([0, 1, 2])*3
self.assertEqual(a.count(0), 3)
self.assertEqual(a.count(1), 3)
self.assertEqual(a.count(3), 0)
self.assertRaises(TypeError, a.count)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
self.assertRaises(BadExc, a.count, BadCmp())
def test_index(self):
u = self.type2test([0, 1])
self.assertEqual(u.index(0), 0)
self.assertEqual(u.index(1), 1)
self.assertRaises(ValueError, u.index, 2)
u = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(u.count(0), 2)
self.assertEqual(u.index(0), 2)
self.assertEqual(u.index(0, 2), 2)
self.assertEqual(u.index(-2, -10), 0)
self.assertEqual(u.index(0, 3), 3)
self.assertEqual(u.index(0, 3, 4), 3)
self.assertRaises(ValueError, u.index, 2, 0, -10)
self.assertRaises(TypeError, u.index)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = self.type2test([0, 1, 2, 3])
self.assertRaises(BadExc, a.index, BadCmp())
a = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(a.index(0), 2)
self.assertEqual(a.index(0, 2), 2)
self.assertEqual(a.index(0, -4), 2)
self.assertEqual(a.index(-2, -10), 0)
self.assertEqual(a.index(0, 3), 3)
self.assertEqual(a.index(0, -3), 3)
self.assertEqual(a.index(0, 3, 4), 3)
self.assertEqual(a.index(0, -3, -2), 3)
self.assertEqual(a.index(0, -4*sys.maxsize, 4*sys.maxsize), 2)
self.assertRaises(ValueError, a.index, 0, 4*sys.maxsize,-4*sys.maxsize)
self.assertRaises(ValueError, a.index, 2, 0, -10)
a.remove(0)
self.assertRaises(ValueError, a.index, 2, 0, 4)
self.assertEqual(a, self.type2test([-2, -1, 0, 1, 2]))
# Test modifying the list during index's iteration
class EvilCmp:
def __init__(self, victim):
self.victim = victim
def __eq__(self, other):
del self.victim[:]
return False
a = self.type2test()
a[:] = [EvilCmp(a) for _ in range(100)]
# This used to seg fault before patch #1005778
self.assertRaises(ValueError, a.index, None)
def test_reverse(self):
u = self.type2test([-2, -1, 0, 1, 2])
u2 = u[:]
u.reverse()
self.assertEqual(u, [2, 1, 0, -1, -2])
u.reverse()
self.assertEqual(u, u2)
self.assertRaises(TypeError, u.reverse, 42)
def test_sort(self):
u = self.type2test([1, 0])
u.sort()
self.assertEqual(u, [0, 1])
u = self.type2test([2,1,0,-1,-2])
u.sort()
self.assertEqual(u, self.type2test([-2,-1,0,1,2]))
self.assertRaises(TypeError, u.sort, 42, 42)
def revcmp(a, b):
if a == b:
return 0
elif a < b:
return 1
else: # a > b
return -1
u.sort(key=cmp_to_key(revcmp))
self.assertEqual(u, self.type2test([2,1,0,-1,-2]))
# The following dumps core in unpatched Python 1.5:
def myComparison(x,y):
xmod, ymod = x%3, y%7
if xmod == ymod:
return 0
elif xmod < ymod:
return -1
else: # xmod > ymod
return 1
z = self.type2test(range(12))
z.sort(key=cmp_to_key(myComparison))
self.assertRaises(TypeError, z.sort, 2)
def selfmodifyingComparison(x,y):
z.append(1)
if x == y:
return 0
elif x < y:
return -1
else: # x > y
return 1
self.assertRaises(ValueError, z.sort,
key=cmp_to_key(selfmodifyingComparison))
self.assertRaises(TypeError, z.sort, 42, 42, 42, 42)
def test_slice(self):
u = self.type2test("spam")
u[:2] = "h"
self.assertEqual(u, list("ham"))
def test_iadd(self):
super().test_iadd()
u = self.type2test([0, 1])
u2 = u
u += [2, 3]
self.assertIs(u, u2)
u = self.type2test("spam")
u += "eggs"
self.assertEqual(u, self.type2test("spameggs"))
self.assertRaises(TypeError, u.__iadd__, None)
def test_imul(self):
u = self.type2test([0, 1])
u *= 3
self.assertEqual(u, self.type2test([0, 1, 0, 1, 0, 1]))
u *= 0
self.assertEqual(u, self.type2test([]))
s = self.type2test([])
oldid = id(s)
s *= 10
self.assertEqual(id(s), oldid)
def test_extendedslicing(self):
# subscript
a = self.type2test([0,1,2,3,4])
# deletion
del a[::2]
self.assertEqual(a, self.type2test([1,3]))
a = self.type2test(range(5))
del a[1::2]
self.assertEqual(a, self.type2test([0,2,4]))
a = self.type2test(range(5))
del a[1::-2]
self.assertEqual(a, self.type2test([0,2,3,4]))
a = self.type2test(range(10))
del a[::1000]
self.assertEqual(a, self.type2test([1, 2, 3, 4, 5, 6, 7, 8, 9]))
# assignment
a = self.type2test(range(10))
a[::2] = [-1]*5
self.assertEqual(a, self.type2test([-1, 1, -1, 3, -1, 5, -1, 7, -1, 9]))
a = self.type2test(range(10))
a[::-4] = [10]*3
self.assertEqual(a, self.type2test([0, 10, 2, 3, 4, 10, 6, 7, 8 ,10]))
a = self.type2test(range(4))
a[::-1] = a
self.assertEqual(a, self.type2test([3, 2, 1, 0]))
a = self.type2test(range(10))
b = a[:]
c = a[:]
a[2:3] = self.type2test(["two", "elements"])
b[slice(2,3)] = self.type2test(["two", "elements"])
c[2:3:] = self.type2test(["two", "elements"])
self.assertEqual(a, b)
self.assertEqual(a, c)
a = self.type2test(range(10))
a[::2] = tuple(range(5))
self.assertEqual(a, self.type2test([0, 1, 1, 3, 2, 5, 3, 7, 4, 9]))
# test issue7788
a = self.type2test(range(10))
del a[9::1<<333]
def test_constructor_exception_handling(self):
# Bug #1242657
class F(object):
def __iter__(self):
raise KeyboardInterrupt
self.assertRaises(KeyboardInterrupt, list, F()) | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2010 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Entropy encoding (Huffman) utilities. */
#ifndef BROTLI_ENC_ENTROPY_ENCODE_H_
#define BROTLI_ENC_ENTROPY_ENCODE_H_
#include "../common/platform.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
/* A node of a Huffman tree. */
typedef struct HuffmanTree {
uint32_t total_count_;
int16_t index_left_;
int16_t index_right_or_value_;
} HuffmanTree;
static BROTLI_INLINE void InitHuffmanTree(HuffmanTree* self, uint32_t count,
int16_t left, int16_t right) {
self->total_count_ = count;
self->index_left_ = left;
self->index_right_or_value_ = right;
}
/* Returns 1 is assignment of depths succeeded, otherwise 0. */
BROTLI_INTERNAL BROTLI_BOOL BrotliSetDepth(
int p, HuffmanTree* pool, uint8_t* depth, int max_depth);
/* This function will create a Huffman tree.
The (data,length) contains the population counts.
The tree_limit is the maximum bit depth of the Huffman codes.
The depth contains the tree, i.e., how many bits are used for
the symbol.
The actual Huffman tree is constructed in the tree[] array, which has to
be at least 2 * length + 1 long.
See http://en.wikipedia.org/wiki/Huffman_coding */
BROTLI_INTERNAL void BrotliCreateHuffmanTree(const uint32_t* data,
const size_t length,
const int tree_limit,
HuffmanTree* tree,
uint8_t* depth);
/* Change the population counts in a way that the consequent
Huffman tree compression, especially its RLE-part will be more
likely to compress this data more efficiently.
length contains the size of the histogram.
counts contains the population counts.
good_for_rle is a buffer of at least length size */
BROTLI_INTERNAL void BrotliOptimizeHuffmanCountsForRle(
size_t length, uint32_t* counts, uint8_t* good_for_rle);
/* Write a Huffman tree from bit depths into the bit-stream representation
of a Huffman tree. The generated Huffman tree is to be compressed once
more using a Huffman tree */
BROTLI_INTERNAL void BrotliWriteHuffmanTree(const uint8_t* depth,
size_t length,
size_t* tree_size,
uint8_t* tree,
uint8_t* extra_bits_data);
/* Get the actual bit values for a tree of bit depths. */
BROTLI_INTERNAL void BrotliConvertBitDepthsToSymbols(const uint8_t* depth,
size_t len,
uint16_t* bits);
BROTLI_INTERNAL extern BROTLI_MODEL("small") const size_t kBrotliShellGaps[6];
/* Input size optimized Shell sort. */
typedef BROTLI_BOOL (*HuffmanTreeComparator)(
const HuffmanTree*, const HuffmanTree*);
static BROTLI_INLINE void SortHuffmanTreeItems(HuffmanTree* items,
const size_t n, HuffmanTreeComparator comparator) {
if (n < 13) {
/* Insertion sort. */
size_t i;
for (i = 1; i < n; ++i) {
HuffmanTree tmp = items[i];
size_t k = i;
size_t j = i - 1;
while (comparator(&tmp, &items[j])) {
items[k] = items[j];
k = j;
if (!j--) break;
}
items[k] = tmp;
}
return;
} else {
/* Shell sort. */
int g = n < 57 ? 2 : 0;
for (; g < 6; ++g) {
size_t gap = kBrotliShellGaps[g];
size_t i;
for (i = gap; i < n; ++i) {
size_t j = i;
HuffmanTree tmp = items[i];
for (; j >= gap && comparator(&tmp, &items[j - gap]); j -= gap) {
items[j] = items[j - gap];
}
items[j] = tmp;
}
}
}
}
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif
#endif /* BROTLI_ENC_ENTROPY_ENCODE_H_ */ | c | github | https://github.com/nodejs/node | deps/brotli/c/enc/entropy_encode.h |
#!/usr/bin/python
# Copyright (c) 2015 VMware, Inc. All Rights Reserved.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vca_nat
short_description: add remove nat rules in a gateway in a vca
description:
- Adds or removes nat rules from a gateway in a vca environment
version_added: "2.0"
author: Peter Sprygada (@privateip)
options:
purge_rules:
description:
- If set to true, it will delete all rules in the gateway that are not given as paramter to this module.
required: false
default: false
nat_rules:
description:
- A list of rules to be added to the gateway, Please see examples on valid entries
required: True
default: false
extends_documentation_fragment: vca.documentation
'''
EXAMPLES = '''
#An example for a source nat
- hosts: localhost
connection: local
tasks:
- vca_nat:
instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282'
vdc_name: 'benz_ansible'
state: 'present'
nat_rules:
- rule_type: SNAT
original_ip: 192.168.2.10
translated_ip: 107.189.95.208
#example for a DNAT
- hosts: localhost
connection: local
tasks:
- vca_nat:
instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282'
vdc_name: 'benz_ansible'
state: 'present'
nat_rules:
- rule_type: DNAT
original_ip: 107.189.95.208
original_port: 22
translated_ip: 192.168.2.10
translated_port: 22
'''
import time
import xmltodict
VALID_RULE_KEYS = ['rule_type', 'original_ip', 'original_port',
'translated_ip', 'translated_port', 'protocol']
def validate_nat_rules(nat_rules):
for rule in nat_rules:
if not isinstance(rule, dict):
raise VcaError("nat rules must be a list of dictionaries, "
"Please check", valid_keys=VALID_RULE_KEYS)
for k in rule.keys():
if k not in VALID_RULE_KEYS:
raise VcaError("%s is not a valid key in nat rules, please "
"check above.." % k, valid_keys=VALID_RULE_KEYS)
rule['original_port'] = str(rule.get('original_port', 'any')).lower()
rule['original_ip'] = rule.get('original_ip', 'any').lower()
rule['translated_ip'] = rule.get('translated_ip', 'any').lower()
rule['translated_port'] = str(rule.get('translated_port', 'any')).lower()
rule['protocol'] = rule.get('protocol', 'any').lower()
rule['rule_type'] = rule.get('rule_type', 'DNAT').lower()
return nat_rules
def nat_rules_to_dict(nat_rules):
result = []
for rule in nat_rules:
gw_rule = rule.get_GatewayNatRule()
result.append(
dict(
rule_type=rule.get_RuleType().lower(),
original_ip=gw_rule.get_OriginalIp().lower(),
original_port=(gw_rule.get_OriginalPort().lower() or 'any'),
translated_ip=gw_rule.get_TranslatedIp().lower(),
translated_port=(gw_rule.get_TranslatedPort().lower() or 'any'),
protocol=(gw_rule.get_Protocol().lower() or 'any')
)
)
return result
def rule_to_string(rule):
strings = list()
for key, value in rule.items():
strings.append('%s=%s' % (key, value))
return ', '.join(string)
def main():
argument_spec = vca_argument_spec()
argument_spec.update(
dict(
nat_rules = dict(type='list', default=[]),
gateway_name = dict(default='gateway'),
purge_rules = dict(default=False, type='bool'),
state = dict(default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
vdc_name = module.params.get('vdc_name')
state = module.params['state']
nat_rules = module.params['nat_rules']
gateway_name = module.params['gateway_name']
purge_rules = module.params['purge_rules']
if not purge_rules and not nat_rules:
module.fail_json(msg='Must define purge_rules or nat_rules')
vca = vca_login(module)
gateway = vca.get_gateway(vdc_name, gateway_name)
if not gateway:
module.fail_json(msg="Not able to find the gateway %s, please check "
"the gateway_name param" % gateway_name)
try:
desired_rules = validate_nat_rules(nat_rules)
except VcaError as e:
module.fail_json(msg=e.message)
rules = gateway.get_nat_rules()
result = dict(changed=False, rules_purged=0)
deletions = 0
additions = 0
if purge_rules is True and len(rules) > 0:
result['rules_purged'] = len(rules)
deletions = result['rules_purged']
rules = list()
if not module.check_mode:
gateway.del_all_nat_rules()
task = gateway.save_services_configuration()
vca.block_until_completed(task)
rules = gateway.get_nat_rules()
result['changed'] = True
current_rules = nat_rules_to_dict(rules)
result['current_rules'] = current_rules
result['desired_rules'] = desired_rules
for rule in desired_rules:
if rule not in current_rules:
additions += 1
if not module.check_mode:
gateway.add_nat_rule(**rule)
result['changed'] = True
result['rules_added'] = additions
result['delete_rule'] = list()
result['delete_rule_rc'] = list()
for rule in current_rules:
if rule not in desired_rules:
deletions += 1
if not module.check_mode:
result['delete_rule'].append(rule)
rc = gateway.del_nat_rule(**rule)
result['delete_rule_rc'].append(rc)
result['changed'] = True
result['rules_deleted'] = deletions
if not module.check_mode and (additions > 0 or deletions > 0):
task = gateway.save_services_configuration()
vca.block_until_completed(task)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.vca import *
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# orm/evaluator.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import operator
from ..sql import operators
class UnevaluatableError(Exception):
pass
_straight_ops = set(getattr(operators, op)
for op in ('add', 'mul', 'sub',
'div',
'mod', 'truediv',
'lt', 'le', 'ne', 'gt', 'ge', 'eq'))
_notimplemented_ops = set(getattr(operators, op)
for op in ('like_op', 'notlike_op', 'ilike_op',
'notilike_op', 'between_op', 'in_op',
'notin_op', 'endswith_op', 'concat_op'))
class EvaluatorCompiler(object):
def __init__(self, target_cls=None):
self.target_cls = target_cls
def process(self, clause):
meth = getattr(self, "visit_%s" % clause.__visit_name__, None)
if not meth:
raise UnevaluatableError(
"Cannot evaluate %s" % type(clause).__name__)
return meth(clause)
def visit_grouping(self, clause):
return self.process(clause.element)
def visit_null(self, clause):
return lambda obj: None
def visit_false(self, clause):
return lambda obj: False
def visit_true(self, clause):
return lambda obj: True
def visit_column(self, clause):
if 'parentmapper' in clause._annotations:
parentmapper = clause._annotations['parentmapper']
if self.target_cls and not issubclass(
self.target_cls, parentmapper.class_):
raise UnevaluatableError(
"Can't evaluate criteria against alternate class %s" %
parentmapper.class_
)
key = parentmapper._columntoproperty[clause].key
else:
key = clause.key
get_corresponding_attr = operator.attrgetter(key)
return lambda obj: get_corresponding_attr(obj)
def visit_clauselist(self, clause):
evaluators = list(map(self.process, clause.clauses))
if clause.operator is operators.or_:
def evaluate(obj):
has_null = False
for sub_evaluate in evaluators:
value = sub_evaluate(obj)
if value:
return True
has_null = has_null or value is None
if has_null:
return None
return False
elif clause.operator is operators.and_:
def evaluate(obj):
for sub_evaluate in evaluators:
value = sub_evaluate(obj)
if not value:
if value is None:
return None
return False
return True
else:
raise UnevaluatableError(
"Cannot evaluate clauselist with operator %s" %
clause.operator)
return evaluate
def visit_binary(self, clause):
eval_left, eval_right = list(map(self.process,
[clause.left, clause.right]))
operator = clause.operator
if operator is operators.is_:
def evaluate(obj):
return eval_left(obj) == eval_right(obj)
elif operator is operators.isnot:
def evaluate(obj):
return eval_left(obj) != eval_right(obj)
elif operator in _straight_ops:
def evaluate(obj):
left_val = eval_left(obj)
right_val = eval_right(obj)
if left_val is None or right_val is None:
return None
return operator(eval_left(obj), eval_right(obj))
else:
raise UnevaluatableError(
"Cannot evaluate %s with operator %s" %
(type(clause).__name__, clause.operator))
return evaluate
def visit_unary(self, clause):
eval_inner = self.process(clause.element)
if clause.operator is operators.inv:
def evaluate(obj):
value = eval_inner(obj)
if value is None:
return None
return not value
return evaluate
raise UnevaluatableError(
"Cannot evaluate %s with operator %s" %
(type(clause).__name__, clause.operator))
def visit_bindparam(self, clause):
val = clause.value
return lambda obj: val | unknown | codeparrot/codeparrot-clean | ||
from base64 import urlsafe_b64encode, urlsafe_b64decode
from hashlib import sha256
import json
from Crypto.Cipher import AES
from Crypto import Random
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.http import Http404, HttpResponse
from django.views.decorators.http import require_GET, require_POST
from edxmako.shortcuts import render_to_response
from notification_prefs import NOTIFICATION_PREF_KEY
from user_api.models import UserPreference
class UsernameDecryptionException(Exception):
pass
class UsernameCipher(object):
"""
A transformation of a username to/from an opaque token
The purpose of the token is to make one-click unsubscribe links that don't
require the user to log in. To prevent users from unsubscribing other users,
we must ensure the token cannot be computed by anyone who has this
source code. The token must also be embeddable in a URL.
Thus, we take the following steps to encode (and do the inverse to decode):
1. Pad the UTF-8 encoding of the username with PKCS#7 padding to match the
AES block length
2. Generate a random AES block length initialization vector
3. Use AES-256 (with a hash of settings.SECRET_KEY as the encryption key)
in CBC mode to encrypt the username
4. Prepend the IV to the encrypted value to allow for initialization of the
decryption cipher
5. base64url encode the result
"""
@staticmethod
def _get_aes_cipher(initialization_vector):
hash_ = sha256()
hash_.update(settings.SECRET_KEY)
return AES.new(hash_.digest(), AES.MODE_CBC, initialization_vector)
@staticmethod
def _add_padding(input_str):
"""Return `input_str` with PKCS#7 padding added to match AES block length"""
padding_len = AES.block_size - len(input_str) % AES.block_size
return input_str + padding_len * chr(padding_len)
@staticmethod
def _remove_padding(input_str):
"""Return `input_str` with PKCS#7 padding trimmed to match AES block length"""
num_pad_bytes = ord(input_str[-1])
if num_pad_bytes < 1 or num_pad_bytes > AES.block_size or num_pad_bytes >= len(input_str):
raise UsernameDecryptionException("padding")
return input_str[:-num_pad_bytes]
@staticmethod
def encrypt(username):
initialization_vector = Random.new().read(AES.block_size)
aes_cipher = UsernameCipher._get_aes_cipher(initialization_vector)
return urlsafe_b64encode(
initialization_vector +
aes_cipher.encrypt(UsernameCipher._add_padding(username.encode("utf-8")))
)
@staticmethod
def decrypt(token):
try:
base64_decoded = urlsafe_b64decode(token)
except TypeError:
raise UsernameDecryptionException("base64url")
if len(base64_decoded) < AES.block_size:
raise UsernameDecryptionException("initialization_vector")
initialization_vector = base64_decoded[:AES.block_size]
aes_encrypted = base64_decoded[AES.block_size:]
aes_cipher = UsernameCipher._get_aes_cipher(initialization_vector)
try:
decrypted = aes_cipher.decrypt(aes_encrypted)
except ValueError:
raise UsernameDecryptionException("aes")
return UsernameCipher._remove_padding(decrypted)
@require_POST
def ajax_enable(request):
"""
A view that enables notifications for the authenticated user
This view should be invoked by an AJAX POST call. It returns status 204
(no content) or an error. If notifications were already enabled for this
user, this has no effect. Otherwise, a preference is created with the
unsubscribe token (an ecnryption of the username) as the value.unsernam
"""
if not request.user.is_authenticated():
raise PermissionDenied
UserPreference.objects.get_or_create(
user=request.user,
key=NOTIFICATION_PREF_KEY,
defaults={
"value": UsernameCipher.encrypt(request.user.username)
}
)
return HttpResponse(status=204)
@require_POST
def ajax_disable(request):
"""
A view that disables notifications for the authenticated user
This view should be invoked by an AJAX POST call. It returns status 204
(no content) or an error.
"""
if not request.user.is_authenticated():
raise PermissionDenied
UserPreference.objects.filter(
user=request.user,
key=NOTIFICATION_PREF_KEY
).delete()
return HttpResponse(status=204)
@require_GET
def ajax_status(request):
"""
A view that retrieves notifications status for the authenticated user.
This view should be invoked by an AJAX GET call. It returns status 200,
with a JSON-formatted payload, or an error.
"""
if not request.user.is_authenticated():
raise PermissionDenied
qs = UserPreference.objects.filter(
user=request.user,
key=NOTIFICATION_PREF_KEY
)
return HttpResponse(json.dumps({"status":len(qs)}), content_type="application/json")
@require_GET
def set_subscription(request, token, subscribe): # pylint: disable=unused-argument
"""
A view that disables or re-enables notifications for a user who may not be authenticated
This view is meant to be the target of an unsubscribe link. The request
must be a GET, and the `token` parameter must decrypt to a valid username.
The subscribe flag feature controls whether the view subscribes or unsubscribes the user, with subscribe=True
used to "undo" accidentally clicking on the unsubscribe link
A 405 will be returned if the request method is not GET. A 404 will be
returned if the token parameter does not decrypt to a valid username. On
success, the response will contain a page indicating success.
"""
try:
username = UsernameCipher().decrypt(token.encode())
user = User.objects.get(username=username)
except UnicodeDecodeError:
raise Http404("base64url")
except UsernameDecryptionException as exn:
raise Http404(exn.message)
except User.DoesNotExist:
raise Http404("username")
if subscribe:
UserPreference.objects.get_or_create(user=user,
key=NOTIFICATION_PREF_KEY,
defaults={
"value": UsernameCipher.encrypt(user.username)
})
return render_to_response("resubscribe.html", {'token': token})
else:
UserPreference.objects.filter(user=user, key=NOTIFICATION_PREF_KEY).delete()
return render_to_response("unsubscribe.html", {'token': token}) | unknown | codeparrot/codeparrot-clean | ||
from string import *
import urllib, re
'''
RaiClick for XBMC 1.1.1
Copyright (C) 2005-2011 Angelo Conforti <angeloxx@angeloxx.it>
http://www.angeloxx.it
Lo script e' un semplice browser del sito rai.tv, tutti i diritti
sono di proprieta' della RAI
'''
urlBase = "http://www.rai.tv/%s"
urlBaseThemes = "http://www.rai.tv/dl/RaiTV/cerca_tematiche.html?%s"
urlListItems = "http://www.rai.tv/dl/RaiTV/programmi/liste/%s-V-%s.html"
'''
There are three level of contents:
Themes (Musica,Spettacoli) -> listThemes\urlBaseThemes
ThemeItems (Sanremo,XFactor) -> listThemeItems\urlBaseThemes?Theme
ListItems (Extra,Sintesi Giornalierei) -> listSubItems\urlBase + ThemeItem
VideoList (Videos) -> listItems\urlListItems + SubItem + ? + ?
'''
''' Return a list with <description> (and GET argument) of each theme '''
def listThemes():
items = []
data = urllib.urlopen(urlBaseThemes % ("Nope")).read()
result = re.findall("<a title=\".*?\" href=\"\?(.*?)\">.*</a>", data)
for item in result:
items.append(item)
return items
''' Return a list with <url> and <description> of each section '''
def listThemeItems(theme):
items = []
print("listThemeItems: Theme %s" % (theme))
data = urllib.urlopen(urlBaseThemes % theme).read()
''' Remove newline char and simplify the regexp'''
data = data.replace("\n","")
result = re.findall("<a href=\"(\/dl.*?)\">.*?<div class=\"internal\">(.*?)</div>", data)
for item in result:
items.append(item)
return items
''' Return a list with <list-url> and <description> of each sub-section '''
def listSubItems(startURL):
items = []
startURL = startURL.replace(".html","-page.html?LOAD_CONTENTS")
print("listSubItems: startURL %s" % (startURL))
data = urllib.urlopen(urlBase % startURL).read()
result = re.findall("<a target=\"_top\" href=\"#\" id=\"(ContentSet.*?)\">(.*?)</a>", data)
for item in result:
items.append(item)
return items
''' Return a list with <url> and <description> of each video '''
def listItems(startURL):
items = []
print("listItems: startURL %s" % (startURL))
page = 0
while(True):
data = urllib.urlopen(urlListItems % (startURL,page)).read()
if data.find("404 Not Found") > 0:
print("listItems: startURL %s stopped at page %s" % (startURL,page))
return items
data = data.replace("\n","")
result = re.findall("<a .*?href=\"(/dl/RaiTV/programmi/media/ContentItem.*?)\".*?><h2>(.*?)</h2>", data)
for item in result:
items.append(item)
page += 1
def openMovie(url):
print("openMovie: URL " + url)
data = urllib.urlopen(url).read()
result = re.findall("videoURL = \"(.*?)\"", data, re.DOTALL)
return result[0] | unknown | codeparrot/codeparrot-clean | ||
import numpy as np
import sys
import time
sys.path.append("./src")
from dataset import get_moive1m
from similarities import PhiMatrix, DeltaMatrix
from models import Model
from pylab import *
def get_model():
raw_data = get_moive1m()
model = Model(raw_data)
return model
def get_matrix_delta(model,phi_mat):
delm = DeltaMatrix(model,phi_mat)
sim_m = delm.get_similarities_matrix() #history issue--- bad name :(
return sim_m
def get_matrix_phi(model):
phim = PhiMatrix(model)
sim_m = phim.get_similarities_matrix()
return sim_m
def get_predict(u_id, i_id, model, mat_delta,mat_phi):
item_ids = list(model.get_items(u_id))
nom = 0
denom = 0
pre_delta_xa = mat_delta[i_id]
pre_phi_xa = mat_phi[i_id]
pre_ratings = model.raw_data[u_id]
for a_id in item_ids:
if a_id == i_id:
continue
delta_xa = pre_delta_xa[a_id] #mat_delta[i_id][a_id]
phi_xa = pre_phi_xa[a_id] #mat_phi[i_id][a_id]
if delta_xa == -1000 or phi_xa == 0: #avoid such a case
continue
r_ua = pre_ratings[a_id] #model.get_rate(u_id,a_id,ts=False)
nom += (delta_xa+r_ua)*phi_xa
denom += phi_xa
if denom < 1:
return 0
return nom/denom
def get_matrix_predict(model,mat_delta,mat_phi):
user_ids = model.get_user_ids()
item_ids = model.get_item_ids()
pred_matrix = {} #only predict un_rated u-i pair
for u_id in user_ids:
pred_matrix.setdefault(u_id,{})
pre_model = model.raw_data[u_id]
for i_id in item_ids:
if i_id in pre_model:
continue
prid_rate = get_predict(u_id,i_id,model,mat_delta,mat_phi)
pred_matrix[u_id][i_id] = prid_rate
#print "predict: ", u_id, " ", i_id, " ", prid_rate
return pred_matrix
if __name__ == '__main__':
print "start to load data..."
model = get_model()
print "model is created..."
ist = time.time()
phi = get_matrix_phi(model)
iend = time.time()
print "phi cost time: ", iend-ist
delta = get_matrix_delta(model,phi)
ist = time.time()
print "delta cost time: ", ist-iend
pred = get_matrix_predict(model, delta, phi)
iend = time.time()
print "prediction cost time: ", iend-ist | unknown | codeparrot/codeparrot-clean | ||
#
# The Python Imaging Library.
# $Id$
#
# XV Thumbnail file handler by Charles E. "Gene" Cash
# (gcash@magicnet.net)
#
# see xvcolor.c and xvbrowse.c in the sources to John Bradley's XV,
# available from ftp://ftp.cis.upenn.edu/pub/xv/
#
# history:
# 98-08-15 cec created (b/w only)
# 98-12-09 cec added color palette
# 98-12-28 fl added to PIL (with only a few very minor modifications)
#
# To do:
# FIXME: make save work (this requires quantization support)
#
__version__ = "0.1"
from PIL import Image, ImageFile, ImagePalette, _binary
o8 = _binary.o8
# standard color palette for thumbnails (RGB332)
PALETTE = b""
for r in range(8):
for g in range(8):
for b in range(4):
PALETTE = PALETTE + (o8((r*255)//7)+o8((g*255)//7)+o8((b*255)//3))
##
# Image plugin for XV thumbnail images.
class XVThumbImageFile(ImageFile.ImageFile):
format = "XVThumb"
format_description = "XV thumbnail image"
def _open(self):
# check magic
s = self.fp.read(6)
if s != b"P7 332":
raise SyntaxError("not an XV thumbnail file")
# Skip to beginning of next line
self.fp.readline()
# skip info comments
while True:
s = self.fp.readline()
if not s:
raise SyntaxError("Unexpected EOF reading XV thumbnail file")
if s[0] != b'#':
break
# parse header line (already read)
s = s.strip().split()
self.mode = "P"
self.size = int(s[0:1]), int(s[1:2])
self.palette = ImagePalette.raw("RGB", PALETTE)
self.tile = [
("raw", (0, 0)+self.size,
self.fp.tell(), (self.mode, 0, 1)
)]
# --------------------------------------------------------------------
Image.register_open("XVThumb", XVThumbImageFile) | unknown | codeparrot/codeparrot-clean | ||
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import urllib
import urllib2
import sickbeard
from sickbeard import logger
from sickrage.helper.exceptions import ex
try:
import json
except ImportError:
import simplejson as json
class EMBYNotifier:
def _notify_emby(self, message, host=None, emby_apikey=None):
"""Handles notifying Emby host via HTTP API
Returns:
Returns True for no issue or False if there was an error
"""
# fill in omitted parameters
if not host:
host = sickbeard.EMBY_HOST
if not emby_apikey:
emby_apikey = sickbeard.EMBY_APIKEY
url = 'http://%s/emby/Notifications/Admin' % (host)
values = {'Name': 'SickRage', 'Description': message, 'ImageUrl': 'https://raw.githubusercontent.com/SiCKRAGETV/SickRage/master/gui/slick/images/sickrage-shark-mascot.png'}
data = json.dumps(values)
try:
req = urllib2.Request(url, data)
req.add_header('X-MediaBrowser-Token', emby_apikey)
req.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(req)
result = response.read()
response.close()
logger.log(u'EMBY: HTTP response: ' + result.replace('\n', ''), logger.DEBUG)
return True
except (urllib2.URLError, IOError) as e:
logger.log(u'EMBY: Warning: Couldn\'t contact Emby at ' + url + ' ' + ex(e), logger.WARNING)
return False
##############################################################################
# Public functions
##############################################################################
def test_notify(self, host, emby_apikey):
return self._notify_emby('This is a test notification from SickRage', host, emby_apikey)
def update_library(self, show=None):
"""Handles updating the Emby Media Server host via HTTP API
Returns:
Returns True for no issue or False if there was an error
"""
if sickbeard.USE_EMBY:
if not sickbeard.EMBY_HOST:
logger.log(u'EMBY: No host specified, check your settings', logger.DEBUG)
return False
if show:
if show.indexer == 1:
provider = 'tvdb'
elif show.indexer == 2:
logger.log(u'EMBY: TVRage Provider no longer valid', logger.WARNING)
return False
else:
logger.log(u'EMBY: Provider unknown', logger.WARNING)
return False
query = '?%sid=%s' % (provider, show.indexerid)
else:
query = ''
url = 'http://%s/emby/Library/Series/Updated%s' % (sickbeard.EMBY_HOST, query)
values = {}
data = urllib.urlencode(values)
try:
req = urllib2.Request(url, data)
req.add_header('X-MediaBrowser-Token', sickbeard.EMBY_APIKEY)
response = urllib2.urlopen(req)
result = response.read()
response.close()
logger.log(u'EMBY: HTTP response: ' + result.replace('\n', ''), logger.DEBUG)
return True
except (urllib2.URLError, IOError) as e:
logger.log(u'EMBY: Warning: Couldn\'t contact Emby at ' + url + ' ' + ex(e), logger.WARNING)
return False
notifier = EMBYNotifier | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"slices"
"sort"
"sync"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/controller/volume/selinuxwarning/translator"
)
const (
// Log level at which the volume cache will be dumped after each change.
dumpLogLevel = 10
)
type VolumeCache interface {
// Add a single volume to the cache. Returns list of conflicts it caused.
AddVolume(logger klog.Logger, volumeName v1.UniqueVolumeName, podKey cache.ObjectName, seLinuxLabel string, changePolicy v1.PodSELinuxChangePolicy, csiDriver string) []Conflict
// Remove a pod from the cache. Prunes all empty structures.
DeletePod(logger klog.Logger, podKey cache.ObjectName)
// GetPodsForCSIDriver returns all pods that use volumes with the given CSI driver.
// This is useful when a CSIDrive changes its spec.seLinuxMount and the controller
// needs to reevaluate all pods that use volumes with this driver.
// The controller doesn't need to track in-tree volume plugins, because they don't
// change their SELinux support dynamically.
GetPodsForCSIDriver(driverName string) []cache.ObjectName
// SendConflicts sends all current conflicts to the given channel.
SendConflicts(logger klog.Logger, ch chan<- Conflict)
}
// VolumeCache stores all volumes used by Pods and their properties that the controller needs to track,
// like SELinux labels and SELinuxChangePolicies.
type volumeCache struct {
mutex sync.RWMutex
seLinuxTranslator *translator.ControllerSELinuxTranslator
// All volumes of all existing Pods.
volumes map[v1.UniqueVolumeName]usedVolume
}
var _ VolumeCache = &volumeCache{}
// NewVolumeLabelCache creates a new VolumeCache.
func NewVolumeLabelCache(seLinuxTranslator *translator.ControllerSELinuxTranslator) VolumeCache {
return &volumeCache{
seLinuxTranslator: seLinuxTranslator,
volumes: make(map[v1.UniqueVolumeName]usedVolume),
}
}
// usedVolume is a volume that is used by one or more existing pods.
// It stores information about these pods to detect conflicts and generate events.
type usedVolume struct {
csiDriver string
// List of pods that use this volume. Indexed by pod key for faster deletion.
pods map[cache.ObjectName]podInfo
}
// Information about a Pod that uses a volume.
type podInfo struct {
// SELinux seLinuxLabel to be applied to the volume in the Pod.
// Either as mount option or recursively by the container runtime.
seLinuxLabel string
// SELinuxChangePolicy of the Pod.
changePolicy v1.PodSELinuxChangePolicy
}
func newPodInfoListForPod(podKey cache.ObjectName, seLinuxLabel string, changePolicy v1.PodSELinuxChangePolicy) map[cache.ObjectName]podInfo {
return map[cache.ObjectName]podInfo{
podKey: {
seLinuxLabel: seLinuxLabel,
changePolicy: changePolicy,
},
}
}
// Add a single volume to the cache. Returns list of conflicts it caused.
func (c *volumeCache) AddVolume(logger klog.Logger, volumeName v1.UniqueVolumeName, podKey cache.ObjectName, label string, changePolicy v1.PodSELinuxChangePolicy, csiDriver string) []Conflict {
c.mutex.Lock()
defer c.mutex.Unlock()
defer c.dump(logger)
conflicts := make([]Conflict, 0)
volume, found := c.volumes[volumeName]
if !found {
// This is a new volume
volume = usedVolume{
csiDriver: csiDriver,
pods: newPodInfoListForPod(podKey, label, changePolicy),
}
c.volumes[volumeName] = volume
return conflicts
}
// The volume is already known
podInfo := podInfo{
seLinuxLabel: label,
changePolicy: changePolicy,
}
oldPodInfo, found := volume.pods[podKey]
if found && oldPodInfo == podInfo {
// The Pod is already known too and nothing changed since the last update.
// All conflicts were already reported when the Pod was added / updated in the cache last time.
return conflicts
}
// Add the updated pod info to the cache
volume.pods[podKey] = podInfo
// Emit conflicts for the pod
for otherPodKey, otherPodInfo := range volume.pods {
if otherPodInfo.changePolicy != changePolicy {
// Send conflict to both pods
conflicts = append(conflicts, Conflict{
PropertyName: "SELinuxChangePolicy",
EventReason: "SELinuxChangePolicyConflict",
Pod: podKey,
PropertyValue: string(changePolicy),
OtherPod: otherPodKey,
OtherPropertyValue: string(otherPodInfo.changePolicy),
}, Conflict{
PropertyName: "SELinuxChangePolicy",
EventReason: "SELinuxChangePolicyConflict",
Pod: otherPodKey,
PropertyValue: string(otherPodInfo.changePolicy),
OtherPod: podKey,
OtherPropertyValue: string(changePolicy),
})
}
if c.seLinuxTranslator.Conflicts(otherPodInfo.seLinuxLabel, label) {
// Send conflict to both pods
conflicts = append(conflicts, Conflict{
PropertyName: "SELinuxLabel",
EventReason: "SELinuxLabelConflict",
Pod: podKey,
PropertyValue: label,
OtherPod: otherPodKey,
OtherPropertyValue: otherPodInfo.seLinuxLabel,
}, Conflict{
PropertyName: "SELinuxLabel",
EventReason: "SELinuxLabelConflict",
Pod: otherPodKey,
PropertyValue: otherPodInfo.seLinuxLabel,
OtherPod: podKey,
OtherPropertyValue: label,
})
}
}
return conflicts
}
// Remove a pod from the cache. Prunes all empty structures.
func (c *volumeCache) DeletePod(logger klog.Logger, podKey cache.ObjectName) {
c.mutex.Lock()
defer c.mutex.Unlock()
defer c.dump(logger)
for volumeName, volume := range c.volumes {
delete(volume.pods, podKey)
if len(volume.pods) == 0 {
delete(c.volumes, volumeName)
}
}
}
func (c *volumeCache) dump(logger klog.Logger) {
if !logger.V(dumpLogLevel).Enabled() {
return
}
logger.Info("VolumeCache dump:")
// sort the volume to have consistent output
volumeIDs := make([]v1.UniqueVolumeName, 0, len(c.volumes))
for volumeID := range c.volumes {
volumeIDs = append(volumeIDs, volumeID)
}
slices.Sort(volumeIDs)
for _, volumeID := range volumeIDs {
volume := c.volumes[volumeID]
logger.Info("Cached volume", "volume", volumeID, "csiDriver", volume.csiDriver)
// Sort the pods to have consistent output
podKeys := make([]cache.ObjectName, 0, len(volume.pods))
for podKey := range volume.pods {
podKeys = append(podKeys, podKey)
}
sort.Slice(podKeys, func(i, j int) bool {
return podKeys[i].String() < podKeys[j].String()
})
for _, podKey := range podKeys {
podInfo := volume.pods[podKey]
logger.Info(" pod", "pod", podKey, "seLinuxLabel", podInfo.seLinuxLabel, "changePolicy", podInfo.changePolicy)
}
}
}
// GetPodsForCSIDriver returns all pods that use volumes with the given CSI driver.
func (c *volumeCache) GetPodsForCSIDriver(driverName string) []cache.ObjectName {
c.mutex.RLock()
defer c.mutex.RUnlock()
var pods []cache.ObjectName
for _, volume := range c.volumes {
if volume.csiDriver != driverName {
continue
}
for podKey := range volume.pods {
pods = append(pods, podKey)
}
}
return pods
}
// SendConflicts sends all current conflicts to the given channel.
func (c *volumeCache) SendConflicts(logger klog.Logger, ch chan<- Conflict) {
c.mutex.RLock()
defer c.mutex.RUnlock()
logger.V(4).Info("Scraping conflicts")
c.dump(logger)
for _, volume := range c.volumes {
// compare pods that use the same volume with each other
for podKey, podInfo := range volume.pods {
for otherPodKey, otherPodInfo := range volume.pods {
if podKey == otherPodKey {
continue
}
// create conflict only for the first pod. The other pod will get the same conflict in its own iteration of `volume.pods` loop.
if podInfo.changePolicy != otherPodInfo.changePolicy {
ch <- Conflict{
PropertyName: "SELinuxChangePolicy",
EventReason: "SELinuxChangePolicyConflict",
Pod: podKey,
PropertyValue: string(podInfo.changePolicy),
OtherPod: otherPodKey,
OtherPropertyValue: string(otherPodInfo.changePolicy),
}
}
if c.seLinuxTranslator.Conflicts(podInfo.seLinuxLabel, otherPodInfo.seLinuxLabel) {
ch <- Conflict{
PropertyName: "SELinuxLabel",
EventReason: "SELinuxLabelConflict",
Pod: podKey,
PropertyValue: podInfo.seLinuxLabel,
OtherPod: otherPodKey,
OtherPropertyValue: otherPodInfo.seLinuxLabel,
}
}
}
}
}
} | go | github | https://github.com/kubernetes/kubernetes | pkg/controller/volume/selinuxwarning/cache/volumecache.go |
#![cfg(feature = "experimental-introspection")]
use actix_web::{guard, test, web, App, HttpResponse};
async fn introspection_handler(
tree: web::Data<actix_web::introspection::IntrospectionTree>,
) -> HttpResponse {
HttpResponse::Ok()
.content_type("application/json")
.body(tree.report_as_json())
}
async fn externals_handler(
tree: web::Data<actix_web::introspection::IntrospectionTree>,
) -> HttpResponse {
HttpResponse::Ok()
.content_type("application/json")
.body(tree.report_externals_as_json())
}
fn find_item<'a>(items: &'a [serde_json::Value], path: &str) -> &'a serde_json::Value {
items
.iter()
.find(|item| item.get("full_path").and_then(|v| v.as_str()) == Some(path))
.unwrap_or_else(|| panic!("missing route for {path}"))
}
fn find_external<'a>(items: &'a [serde_json::Value], name: &str) -> &'a serde_json::Value {
items
.iter()
.find(|item| item.get("name").and_then(|v| v.as_str()) == Some(name))
.unwrap_or_else(|| panic!("missing external resource for {name}"))
}
#[actix_rt::test]
async fn introspection_report_includes_details_and_metadata() {
let app = test::init_service(
App::new()
.external_resource("app-external", "https://example.com/{id}")
.service(
web::resource(["/alpha", "/beta"])
.name("multi")
.route(web::get().to(HttpResponse::Ok)),
)
.service(
web::resource("/guarded")
.guard(guard::Header("accept", "text/plain"))
.route(web::get().to(HttpResponse::Ok)),
)
.service(
web::scope("/scoped")
.guard(guard::Header("x-scope", "1"))
.configure(|cfg| {
cfg.external_resource("scope-external", "https://scope.example/{id}");
})
.service(web::resource("/item").route(web::get().to(HttpResponse::Ok))),
)
.service(web::resource("/introspection").route(web::get().to(introspection_handler)))
.service(
web::resource("/introspection/externals").route(web::get().to(externals_handler)),
),
)
.await;
let req = test::TestRequest::get().uri("/introspection").to_request();
let resp = test::call_service(&app, req).await;
assert!(resp.status().is_success());
let body = test::read_body(resp).await;
let items: Vec<serde_json::Value> =
serde_json::from_slice(&body).expect("invalid introspection json");
let alpha = find_item(&items, "/alpha");
let patterns = alpha
.get("patterns")
.and_then(|v| v.as_array())
.expect("patterns missing");
let patterns = patterns
.iter()
.filter_map(|v| v.as_str())
.collect::<Vec<_>>();
assert!(patterns.contains(&"/alpha"));
assert!(patterns.contains(&"/beta"));
assert_eq!(
alpha.get("resource_name").and_then(|v| v.as_str()),
Some("multi")
);
assert_eq!(
alpha.get("resource_type").and_then(|v| v.as_str()),
Some("resource")
);
let guarded = find_item(&items, "/guarded");
let guards = guarded
.get("guards")
.and_then(|v| v.as_array())
.expect("guards missing");
assert!(guards
.iter()
.any(|v| v.as_str() == Some("Header(accept, text/plain)")));
let guard_details = guarded
.get("guards_detail")
.and_then(|v| v.as_array())
.expect("guards_detail missing");
assert!(!guard_details.is_empty());
let alpha_guards = alpha
.get("guards")
.and_then(|v| v.as_array())
.expect("alpha guards missing");
let alpha_guard_details = alpha
.get("guards_detail")
.and_then(|v| v.as_array())
.expect("alpha guards_detail missing");
assert!(alpha_guards.is_empty());
assert!(!alpha_guard_details.is_empty());
let scoped = find_item(&items, "/scoped");
assert_eq!(
scoped.get("resource_type").and_then(|v| v.as_str()),
Some("scope")
);
let scoped_guards = scoped
.get("guards")
.and_then(|v| v.as_array())
.expect("scoped guards missing");
assert!(scoped_guards
.iter()
.any(|v| v.as_str() == Some("Header(x-scope, 1)")));
let req = test::TestRequest::get()
.uri("/introspection/externals")
.to_request();
let resp = test::call_service(&app, req).await;
assert!(resp.status().is_success());
let body = test::read_body(resp).await;
let externals: Vec<serde_json::Value> =
serde_json::from_slice(&body).expect("invalid externals json");
let app_external = find_external(&externals, "app-external");
let app_patterns = app_external
.get("patterns")
.and_then(|v| v.as_array())
.expect("app external patterns missing");
assert!(app_patterns
.iter()
.any(|v| v.as_str() == Some("https://example.com/{id}")));
assert_eq!(
app_external.get("origin_scope").and_then(|v| v.as_str()),
Some("/")
);
let scope_external = find_external(&externals, "scope-external");
let scope_patterns = scope_external
.get("patterns")
.and_then(|v| v.as_array())
.expect("scope external patterns missing");
assert!(scope_patterns
.iter()
.any(|v| v.as_str() == Some("https://scope.example/{id}")));
assert_eq!(
scope_external.get("origin_scope").and_then(|v| v.as_str()),
Some("/scoped")
);
} | rust | github | https://github.com/actix/actix-web | actix-web/tests/introspection.rs |
import numpy as np
import mxnet as mx
import json
import utils
import math
import sys
def calc_complexity(ishape, node):
y, x = map(int, eval(node['param']['kernel']))
N = int(node['param']['num_filter'])
C, Y, X = ishape
return x*(N+C)*X*Y, x*y*N*C*X*Y
def calc_eigenvalue(model, node):
W = model.arg_params[node['name'] + '_weight'].asnumpy()
N, C, y, x = W.shape
W = W.transpose((1,2,0,3)).reshape((C*y, -1))
U, D, Q = np.linalg.svd(W, full_matrices=False)
return D
def get_ranksel(model, ratio):
conf = json.loads(model.symbol.tojson())
_, output_shapes, _ = model.symbol.get_internals().infer_shape(data=(1,3,224,224))
out_names = model.symbol.get_internals().list_outputs()
out_shape_dic = dict(zip(out_names, output_shapes))
nodes = conf['nodes']
nodes = utils.topsort(nodes)
C = []
D = []
S = []
conv_names = []
EC = 0
for node in nodes:
if node['op'] == 'Convolution':
input_nodes = [nodes[int(j[0])] for j in node['inputs']]
data = [input_node for input_node in input_nodes\
if not input_node['name'].startswith(node['name'])][0]
if utils.is_input(data):
ishape = (3, 224, 224)
else:
ishape = out_shape_dic[data['name'] + '_output'][1:]
C.append(calc_complexity(ishape, node))
D.append(int(node['param']['num_filter']))
S.append(calc_eigenvalue(model, node))
conv_names.append(node['name'])
EC += C[-1][1]
for s in S:
ss = sum(s)
for i in xrange(1, len(s)):
s[i] += s[i-1]
n = len(C)
EC /= ratio
dp = [{}, {}]
dpc = [{} for _ in xrange(n)]
now, nxt = 0, 1
dp[now][0] = 0
for i in xrange(n):
dp[nxt] = {}
sys.stdout.flush()
for now_c, now_v in dp[now].items():
for d in xrange(min(len(S[i]), D[i])):
nxt_c = now_c + (d+1)*C[i][0]
if nxt_c > EC:
continue
nxt_v = dp[now][now_c] + math.log(S[i][d])
if dp[nxt].has_key(nxt_c):
if nxt_v > dp[nxt][nxt_c]:
dp[nxt][nxt_c] = nxt_v
dpc[i][nxt_c] = (d,now_c)
else:
dp[nxt][nxt_c] = nxt_v
dpc[i][nxt_c] = (d,now_c)
now, nxt = nxt, now
maxv = -1e9
target_c = 0
for c,v in dp[now].items():
assert c <= EC, 'False'
if v > maxv:
maxv = v
target_c = c
res = [0]*n
nowc = target_c
for i in xrange(n-1,-1,-1):
res[i] = dpc[i][nowc][0] + 1
nowc = dpc[i][nowc][1]
return dict(zip(conv_names, res)) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from six import moves
from tempest_lib.common.utils import data_utils
from tempest_lib import decorators
from tempest.api.image import base
from tempest import test
class BasicOperationsImagesTest(base.BaseV2ImageTest):
"""
Here we test the basic operations of images
"""
@classmethod
def skip_checks(cls):
super(base.BaseV2ImageTest, cls).skip_checks()
skip_msg = ("%s skipped as bring your own image is not supported" %
cls.__name__)
raise cls.skipException(skip_msg)
@test.attr(type='smoke')
@test.idempotent_id('139b765e-7f3d-4b3d-8b37-3ca3876ee318')
def test_register_upload_get_image_file(self):
"""
Here we test these functionalities - Register image,
upload the image file, get image and get image file api's
"""
uuid = '00000000-1111-2222-3333-444455556666'
image_name = data_utils.rand_name('image')
body = self.create_image(name=image_name,
container_format='bare',
disk_format='raw',
visibility='private',
ramdisk_id=uuid)
self.assertIn('id', body)
image_id = body.get('id')
self.assertIn('name', body)
self.assertEqual(image_name, body['name'])
self.assertIn('visibility', body)
self.assertEqual('private', body['visibility'])
self.assertIn('status', body)
self.assertEqual('queued', body['status'])
# Now try uploading an image file
file_content = data_utils.random_bytes()
image_file = moves.cStringIO(file_content)
self.client.store_image_file(image_id, image_file)
# Now try to get image details
body = self.client.show_image(image_id)
self.assertEqual(image_id, body['id'])
self.assertEqual(image_name, body['name'])
self.assertEqual(uuid, body['ramdisk_id'])
self.assertIn('size', body)
self.assertEqual(1024, body.get('size'))
# Now try get image file
body = self.client.load_image_file(image_id)
self.assertEqual(file_content, body.data)
@test.attr(type='smoke')
@test.idempotent_id('f848bb94-1c6e-45a4-8726-39e3a5b23535')
def test_delete_image(self):
# Deletes an image by image_id
# Create image
image_name = data_utils.rand_name('image')
body = self.client.create_image(name=image_name,
container_format='bare',
disk_format='raw',
visibility='private')
image_id = body['id']
# Delete Image
self.client.delete_image(image_id)
self.client.wait_for_resource_deletion(image_id)
# Verifying deletion
images = self.client.list_images()
images_id = [item['id'] for item in images]
self.assertNotIn(image_id, images_id)
@test.attr(type='smoke')
@test.idempotent_id('f66891a7-a35c-41a8-b590-a065c2a1caa6')
def test_update_image(self):
# Updates an image by image_id
# Create image
image_name = data_utils.rand_name('image')
body = self.client.create_image(name=image_name,
container_format='bare',
disk_format='iso',
visibility='private')
self.addCleanup(self.client.delete_image, body['id'])
self.assertEqual('queued', body['status'])
image_id = body['id']
# Now try uploading an image file
image_file = moves.cStringIO(data_utils.random_bytes())
self.client.store_image_file(image_id, image_file)
# Update Image
new_image_name = data_utils.rand_name('new-image')
body = self.client.update_image(image_id, [
dict(replace='/name', value=new_image_name)])
# Verifying updating
body = self.client.show_image(image_id)
self.assertEqual(image_id, body['id'])
self.assertEqual(new_image_name, body['name'])
class ListImagesTest(base.BaseV2ImageTest):
"""
Here we test the listing of image information
"""
@classmethod
def skip_checks(cls):
super(base.BaseV2ImageTest, cls).skip_checks()
skip_msg = ("%s skipped as bring your own image is not supported" %
cls.__name__)
raise cls.skipException(skip_msg)
@classmethod
def resource_setup(cls):
super(ListImagesTest, cls).resource_setup()
# We add a few images here to test the listing functionality of
# the images API
cls._create_standard_image('bare', 'raw')
cls._create_standard_image('bare', 'raw')
cls._create_standard_image('ami', 'raw')
# Add some more for listing
cls._create_standard_image('ami', 'ami')
cls._create_standard_image('ari', 'ari')
cls._create_standard_image('aki', 'aki')
@classmethod
def _create_standard_image(cls, container_format, disk_format):
"""
Create a new standard image and return the ID of the newly-registered
image. Note that the size of the new image is a random number between
1024 and 4096
"""
size = random.randint(1024, 4096)
image_file = moves.cStringIO(data_utils.random_bytes(size))
name = data_utils.rand_name('image')
body = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
visibility='private')
image_id = body['id']
cls.client.store_image_file(image_id, data=image_file)
return image_id
def _list_by_param_value_and_assert(self, params):
"""
Perform list action with given params and validates result.
"""
images_list = self.client.list_images(params=params)
# Validating params of fetched images
for image in images_list:
for key in params:
msg = "Failed to list images by %s" % key
self.assertEqual(params[key], image[key], msg)
@test.idempotent_id('1e341d7a-90a9-494c-b143-2cdf2aeb6aee')
def test_index_no_params(self):
# Simple test to see all fixture images returned
images_list = self.client.list_images()
image_list = map(lambda x: x['id'], images_list)
for image in self.created_images:
self.assertIn(image, image_list)
@test.idempotent_id('9959ca1d-1aa7-4b7a-a1ea-0fff0499b37e')
def test_list_images_param_container_format(self):
# Test to get all images with container_format='bare'
params = {"container_format": "bare"}
self._list_by_param_value_and_assert(params)
@test.idempotent_id('4a4735a7-f22f-49b6-b0d9-66e1ef7453eb')
def test_list_images_param_disk_format(self):
# Test to get all images with disk_format = raw
params = {"disk_format": "raw"}
self._list_by_param_value_and_assert(params)
@test.idempotent_id('7a95bb92-d99e-4b12-9718-7bc6ab73e6d2')
def test_list_images_param_visibility(self):
# Test to get all images with visibility = private
params = {"visibility": "private"}
self._list_by_param_value_and_assert(params)
@test.idempotent_id('cf1b9a48-8340-480e-af7b-fe7e17690876')
def test_list_images_param_size(self):
# Test to get all images by size
image_id = self.created_images[1]
# Get image metadata
image = self.client.show_image(image_id)
params = {"size": image['size']}
self._list_by_param_value_and_assert(params)
@test.idempotent_id('4ad8c157-971a-4ba8-aa84-ed61154b1e7f')
def test_list_images_param_min_max_size(self):
# Test to get all images with size between 2000 to 3000
image_id = self.created_images[1]
# Get image metadata
image = self.client.show_image(image_id)
size = image['size']
params = {"size_min": size - 500, "size_max": size + 500}
images_list = self.client.list_images(params=params)
image_size_list = map(lambda x: x['size'], images_list)
for image_size in image_size_list:
self.assertTrue(image_size >= params['size_min'] and
image_size <= params['size_max'],
"Failed to get images by size_min and size_max")
@test.idempotent_id('7fc9e369-0f58-4d05-9aa5-0969e2d59d15')
def test_list_images_param_status(self):
# Test to get all active images
params = {"status": "active"}
self._list_by_param_value_and_assert(params)
@test.idempotent_id('e914a891-3cc8-4b40-ad32-e0a39ffbddbb')
def test_list_images_param_limit(self):
# Test to get images by limit
params = {"limit": 2}
images_list = self.client.list_images(params=params)
self.assertEqual(len(images_list), params['limit'],
"Failed to get images by limit")
@test.idempotent_id('622b925c-479f-4736-860d-adeaf13bc371')
def test_get_image_schema(self):
# Test to get image schema
schema = "image"
body = self.client.show_schema(schema)
self.assertEqual("image", body['name'])
@test.idempotent_id('25c8d7b2-df21-460f-87ac-93130bcdc684')
def test_get_images_schema(self):
# Test to get images schema
schema = "images"
body = self.client.show_schema(schema)
self.assertEqual("images", body['name']) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Flex Messaging implementation.
This module contains the message classes used with Flex Data Services.
@see: U{RemoteObject on OSFlash (external)
<http://osflash.org/documentation/amf3#remoteobject>}
@since: 0.1
"""
import uuid
import pyamf.util
from pyamf import amf3
__all__ = [
'RemotingMessage',
'CommandMessage',
'AcknowledgeMessage',
'ErrorMessage',
'AbstractMessage',
'AsyncMessage'
]
NAMESPACE = 'flex.messaging.messages'
SMALL_FLAG_MORE = 0x80
class AbstractMessage(object):
"""
Abstract base class for all Flex messages.
Messages have two customizable sections; headers and data. The headers
property provides access to specialized meta information for a specific
message instance. The data property contains the instance specific data
that needs to be delivered and processed by the decoder.
@see: U{AbstractMessage on Livedocs<http://
livedocs.adobe.com/flex/201/langref/mx/messaging/messages/AbstractMessage.html>}
@ivar body: Specific data that needs to be delivered to the remote
destination.
@type body: C{mixed}
@ivar clientId: Indicates which client sent the message.
@type clientId: C{str}
@ivar destination: Message destination.
@type destination: C{str}
@ivar headers: Message headers. Core header names start with DS.
@type headers: C{dict}
@ivar messageId: Unique Message ID.
@type messageId: C{str}
@ivar timeToLive: How long the message should be considered valid and
deliverable.
@type timeToLive: C{int}
@ivar timestamp: Timestamp when the message was generated.
@type timestamp: C{int}
"""
class __amf__:
amf3 = True
static = ('body', 'clientId', 'destination', 'headers', 'messageId',
'timestamp', 'timeToLive')
#: Each message pushed from the server will contain this header identifying
#: the client that will receive the message.
DESTINATION_CLIENT_ID_HEADER = "DSDstClientId"
#: Messages are tagged with the endpoint id for the channel they are sent
#: over.
ENDPOINT_HEADER = "DSEndpoint"
#: Messages that need to set remote credentials for a destination carry the
#: C{Base64} encoded credentials in this header.
REMOTE_CREDENTIALS_HEADER = "DSRemoteCredentials"
#: The request timeout value is set on outbound messages by services or
#: channels and the value controls how long the responder will wait for an
#: acknowledgement, result or fault response for the message before timing
#: out the request.
REQUEST_TIMEOUT_HEADER = "DSRequestTimeout"
SMALL_ATTRIBUTE_FLAGS = [0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40]
SMALL_ATTRIBUTES = dict(zip(
SMALL_ATTRIBUTE_FLAGS,
__amf__.static
))
SMALL_UUID_FLAGS = [0x01, 0x02]
SMALL_UUIDS = dict(zip(
SMALL_UUID_FLAGS,
['clientId', 'messageId']
))
def __new__(cls, *args, **kwargs):
obj = object.__new__(cls)
obj.__init__(*args, **kwargs)
return obj
def __init__(self, *args, **kwargs):
self.body = kwargs.get('body', None)
self.clientId = kwargs.get('clientId', None)
self.destination = kwargs.get('destination', None)
self.headers = kwargs.get('headers', {})
self.messageId = kwargs.get('messageId', None)
self.timestamp = kwargs.get('timestamp', None)
self.timeToLive = kwargs.get('timeToLive', None)
def __repr__(self):
m = '<%s ' % self.__class__.__name__
for k in self.__dict__:
m += ' %s=%r' % (k, getattr(self, k))
return m + " />"
def decodeSmallAttribute(self, attr, input):
"""
@since: 0.5
"""
obj = input.readObject()
if attr in ['timestamp', 'timeToLive']:
return pyamf.util.get_datetime(obj / 1000.0)
return obj
def encodeSmallAttribute(self, attr):
"""
@since: 0.5
"""
obj = getattr(self, attr)
if not obj:
return obj
if attr in ['timestamp', 'timeToLive']:
return pyamf.util.get_timestamp(obj) * 1000.0
elif attr in ['clientId', 'messageId']:
if isinstance(obj, uuid.UUID):
return None
return obj
def __readamf__(self, input):
flags = read_flags(input)
if len(flags) > 2:
raise pyamf.DecodeError('Expected <=2 (got %d) flags for the '
'AbstractMessage portion of the small message for %r' % (
len(flags), self.__class__))
for index, byte in enumerate(flags):
if index == 0:
for flag in self.SMALL_ATTRIBUTE_FLAGS:
if flag & byte:
attr = self.SMALL_ATTRIBUTES[flag]
setattr(self, attr, self.decodeSmallAttribute(attr, input))
elif index == 1:
for flag in self.SMALL_UUID_FLAGS:
if flag & byte:
attr = self.SMALL_UUIDS[flag]
setattr(self, attr, decode_uuid(input.readObject()))
def __writeamf__(self, output):
flag_attrs = []
uuid_attrs = []
byte = 0
for flag in self.SMALL_ATTRIBUTE_FLAGS:
value = self.encodeSmallAttribute(self.SMALL_ATTRIBUTES[flag])
if value:
byte |= flag
flag_attrs.append(value)
flags = byte
byte = 0
for flag in self.SMALL_UUID_FLAGS:
attr = self.SMALL_UUIDS[flag]
value = getattr(self, attr)
if not value:
continue
byte |= flag
uuid_attrs.append(amf3.ByteArray(value.bytes))
if not byte:
output.writeUnsignedByte(flags)
else:
output.writeUnsignedByte(flags | SMALL_FLAG_MORE)
output.writeUnsignedByte(byte)
[output.writeObject(attr) for attr in flag_attrs]
[output.writeObject(attr) for attr in uuid_attrs]
def getSmallMessage(self):
"""
Return a ISmallMessage representation of this object. If one is not
available, L{NotImplementedError} will be raised.
@since: 0.5
"""
raise NotImplementedError
class AsyncMessage(AbstractMessage):
"""
I am the base class for all asynchronous Flex messages.
@see: U{AsyncMessage on Livedocs<http://
livedocs.adobe.com/flex/201/langref/mx/messaging/messages/AsyncMessage.html>}
@ivar correlationId: Correlation id of the message.
@type correlationId: C{str}
"""
#: Messages that were sent with a defined subtopic property indicate their
#: target subtopic in this header.
SUBTOPIC_HEADER = "DSSubtopic"
class __amf__:
static = ('correlationId',)
def __init__(self, *args, **kwargs):
AbstractMessage.__init__(self, *args, **kwargs)
self.correlationId = kwargs.get('correlationId', None)
def __readamf__(self, input):
AbstractMessage.__readamf__(self, input)
flags = read_flags(input)
if len(flags) > 1:
raise pyamf.DecodeError('Expected <=1 (got %d) flags for the '
'AsyncMessage portion of the small message for %r' % (
len(flags), self.__class__))
byte = flags[0]
if byte & 0x01:
self.correlationId = input.readObject()
if byte & 0x02:
self.correlationId = decode_uuid(input.readObject())
def __writeamf__(self, output):
AbstractMessage.__writeamf__(self, output)
if not isinstance(self.correlationId, uuid.UUID):
output.writeUnsignedByte(0x01)
output.writeObject(self.correlationId)
else:
output.writeUnsignedByte(0x02)
output.writeObject(pyamf.amf3.ByteArray(self.correlationId.bytes))
def getSmallMessage(self):
"""
Return a ISmallMessage representation of this async message.
@since: 0.5
"""
return AsyncMessageExt(**self.__dict__)
class AcknowledgeMessage(AsyncMessage):
"""
I acknowledge the receipt of a message that was sent previously.
Every message sent within the messaging system must receive an
acknowledgement.
@see: U{AcknowledgeMessage on Livedocs<http://
livedocs.adobe.com/flex/201/langref/mx/messaging/messages/AcknowledgeMessage.html>}
"""
#: Used to indicate that the acknowledgement is for a message that
#: generated an error.
ERROR_HINT_HEADER = "DSErrorHint"
def __readamf__(self, input):
AsyncMessage.__readamf__(self, input)
flags = read_flags(input)
if len(flags) > 1:
raise pyamf.DecodeError('Expected <=1 (got %d) flags for the '
'AcknowledgeMessage portion of the small message for %r' % (
len(flags), self.__class__))
def __writeamf__(self, output):
AsyncMessage.__writeamf__(self, output)
output.writeUnsignedByte(0)
def getSmallMessage(self):
"""
Return a ISmallMessage representation of this acknowledge message.
@since: 0.5
"""
return AcknowledgeMessageExt(**self.__dict__)
class CommandMessage(AsyncMessage):
"""
Provides a mechanism for sending commands related to publish/subscribe
messaging, ping, and cluster operations.
@see: U{CommandMessage on Livedocs<http://
livedocs.adobe.com/flex/201/langref/mx/messaging/messages/CommandMessage.html>}
@ivar operation: The command
@type operation: C{int}
@ivar messageRefType: hmm, not sure about this one.
@type messageRefType: C{str}
"""
#: The server message type for authentication commands.
AUTHENTICATION_MESSAGE_REF_TYPE = "flex.messaging.messages.AuthenticationMessage"
#: This is used to test connectivity over the current channel to the remote
#: endpoint.
PING_OPERATION = 5
#: This is used by a remote destination to sync missed or cached messages
#: back to a client as a result of a client issued poll command.
SYNC_OPERATION = 4
#: This is used to request a list of failover endpoint URIs for the remote
#: destination based on cluster membership.
CLUSTER_REQUEST_OPERATION = 7
#: This is used to send credentials to the endpoint so that the user can be
#: logged in over the current channel. The credentials need to be C{Base64}
#: encoded and stored in the body of the message.
LOGIN_OPERATION = 8
#: This is used to log the user out of the current channel, and will
#: invalidate the server session if the channel is HTTP based.
LOGOUT_OPERATION = 9
#: This is used to poll a remote destination for pending, undelivered
#: messages.
POLL_OPERATION = 2
#: Subscribe commands issued by a consumer pass the consumer's C{selector}
#: expression in this header.
SELECTOR_HEADER = "DSSelector"
#: This is used to indicate that the client's session with a remote
#: destination has timed out.
SESSION_INVALIDATE_OPERATION = 10
#: This is used to subscribe to a remote destination.
SUBSCRIBE_OPERATION = 0
#: This is the default operation for new L{CommandMessage} instances.
UNKNOWN_OPERATION = 1000
#: This is used to unsubscribe from a remote destination.
UNSUBSCRIBE_OPERATION = 1
#: This operation is used to indicate that a channel has disconnected.
DISCONNECT_OPERATION = 12
class __amf__:
static = ('operation',)
def __init__(self, *args, **kwargs):
AsyncMessage.__init__(self, *args, **kwargs)
self.operation = kwargs.get('operation', None)
def __readamf__(self, input):
AsyncMessage.__readamf__(self, input)
flags = read_flags(input)
if not flags:
return
if len(flags) > 1:
raise pyamf.DecodeError('Expected <=1 (got %d) flags for the '
'CommandMessage portion of the small message for %r' % (
len(flags), self.__class__))
byte = flags[0]
if byte & 0x01:
self.operation = input.readObject()
def __writeamf__(self, output):
AsyncMessage.__writeamf__(self, output)
if self.operation:
output.writeUnsignedByte(0x01)
output.writeObject(self.operation)
else:
output.writeUnsignedByte(0)
def getSmallMessage(self):
"""
Return a ISmallMessage representation of this command message.
@since: 0.5
"""
return CommandMessageExt(**self.__dict__)
class ErrorMessage(AcknowledgeMessage):
"""
I am the Flex error message to be returned to the client.
This class is used to report errors within the messaging system.
@see: U{ErrorMessage on Livedocs<http://
livedocs.adobe.com/flex/201/langref/mx/messaging/messages/ErrorMessage.html>}
"""
#: If a message may not have been delivered, the faultCode will contain
#: this constant.
MESSAGE_DELIVERY_IN_DOUBT = "Client.Error.DeliveryInDoubt"
#: Header name for the retryable hint header.
#:
#: This is used to indicate that the operation that generated the error may
#: be retryable rather than fatal.
RETRYABLE_HINT_HEADER = "DSRetryableErrorHint"
class __amf__:
static = ('extendedData', 'faultCode', 'faultDetail', 'faultString',
'rootCause')
def __init__(self, *args, **kwargs):
AcknowledgeMessage.__init__(self, *args, **kwargs)
#: Extended data that the remote destination has chosen to associate
#: with this error to facilitate custom error processing on the client.
self.extendedData = kwargs.get('extendedData', {})
#: Fault code for the error.
self.faultCode = kwargs.get('faultCode', None)
#: Detailed description of what caused the error.
self.faultDetail = kwargs.get('faultDetail', None)
#: A simple description of the error.
self.faultString = kwargs.get('faultString', None)
#: Should a traceback exist for the error, this property contains the
#: message.
self.rootCause = kwargs.get('rootCause', {})
def getSmallMessage(self):
"""
Return a ISmallMessage representation of this error message.
@since: 0.5
"""
raise NotImplementedError
class RemotingMessage(AbstractMessage):
"""
I am used to send RPC requests to a remote endpoint.
@see: U{RemotingMessage on Livedocs<http://
livedocs.adobe.com/flex/201/langref/mx/messaging/messages/RemotingMessage.html>}
"""
class __amf__:
static = ('operation', 'source')
def __init__(self, *args, **kwargs):
AbstractMessage.__init__(self, *args, **kwargs)
#: Name of the remote method/operation that should be called.
self.operation = kwargs.get('operation', None)
#: Name of the service to be called including package name.
#: This property is provided for backwards compatibility.
self.source = kwargs.get('source', None)
class AcknowledgeMessageExt(AcknowledgeMessage):
"""
An L{AcknowledgeMessage}, but implementing C{ISmallMessage}.
@since: 0.5
"""
class __amf__:
external = True
class CommandMessageExt(CommandMessage):
"""
A L{CommandMessage}, but implementing C{ISmallMessage}.
@since: 0.5
"""
class __amf__:
external = True
class AsyncMessageExt(AsyncMessage):
"""
A L{AsyncMessage}, but implementing C{ISmallMessage}.
@since: 0.5
"""
class __amf__:
external = True
def read_flags(input):
"""
@since: 0.5
"""
flags = []
done = False
while not done:
byte = input.readUnsignedByte()
if not byte & SMALL_FLAG_MORE:
done = True
else:
byte = byte ^ SMALL_FLAG_MORE
flags.append(byte)
return flags
def decode_uuid(obj):
"""
Decode a L{ByteArray} contents to a C{uuid.UUID} instance.
@since: 0.5
"""
return uuid.UUID(bytes=str(obj))
pyamf.register_package(globals(), package=NAMESPACE)
pyamf.register_class(AcknowledgeMessageExt, 'DSK')
pyamf.register_class(CommandMessageExt, 'DSC')
pyamf.register_class(AsyncMessageExt, 'DSA') | unknown | codeparrot/codeparrot-clean | ||
{
"components": {
"schemas": {
"io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup": {
"description": "APIGroup contains the name, the supported versions, and the preferred version of a group.",
"properties": {
"apiVersion": {
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
"type": "string"
},
"kind": {
"description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
"type": "string"
},
"name": {
"default": "",
"description": "name is the name of the group.",
"type": "string"
},
"preferredVersion": {
"allOf": [
{
"$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery"
}
],
"default": {},
"description": "preferredVersion is the version preferred by the API server, which probably is the storage version."
},
"serverAddressByClientCIDRs": {
"description": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.",
"items": {
"allOf": [
{
"$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR"
}
],
"default": {}
},
"type": "array",
"x-kubernetes-list-type": "atomic"
},
"versions": {
"description": "versions are the versions supported in this group.",
"items": {
"allOf": [
{
"$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery"
}
],
"default": {}
},
"type": "array",
"x-kubernetes-list-type": "atomic"
}
},
"required": [
"name",
"versions"
],
"type": "object",
"x-kubernetes-group-version-kind": [
{
"group": "",
"kind": "APIGroup",
"version": "v1"
}
]
},
"io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery": {
"description": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.",
"properties": {
"groupVersion": {
"default": "",
"description": "groupVersion specifies the API group and version in the form \"group/version\"",
"type": "string"
},
"version": {
"default": "",
"description": "version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion.",
"type": "string"
}
},
"required": [
"groupVersion",
"version"
],
"type": "object"
},
"io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR": {
"description": "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.",
"properties": {
"clientCIDR": {
"default": "",
"description": "The CIDR with which clients can match their IP to figure out the server address that they should use.",
"type": "string"
},
"serverAddress": {
"default": "",
"description": "Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.",
"type": "string"
}
},
"required": [
"clientCIDR",
"serverAddress"
],
"type": "object"
}
},
"securitySchemes": {
"BearerToken": {
"description": "Bearer Token authentication",
"in": "header",
"name": "authorization",
"type": "apiKey"
}
}
},
"info": {
"title": "Kubernetes",
"version": "unversioned"
},
"openapi": "3.0.0",
"paths": {
"/apis/resource.k8s.io/": {
"get": {
"description": "get information of a group",
"operationId": "getResourceAPIGroup",
"responses": {
"200": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup"
}
},
"application/vnd.kubernetes.protobuf": {
"schema": {
"$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup"
}
},
"application/yaml": {
"schema": {
"$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup"
}
}
},
"description": "OK"
},
"401": {
"description": "Unauthorized"
}
},
"tags": [
"resource"
]
}
}
}
} | json | github | https://github.com/kubernetes/kubernetes | api/openapi-spec/v3/apis__resource.k8s.io_openapi.json |
goldalign = [ ([0], [0]),
([1], [1]),
([2], [2]),
([3], [3]),
([4], [4]),
([5], [5]),
([6], [6, 7, 8]),
([7], [9]),
([8, 9], [10, 11, 12]),
([10, 11], [13]),
([12], [14]),
([13], [15]),
([], [16]),
([], [17]),
([], [18]),
([], [19]),
([], [20]),
([], [21]),
([], [22]),
([], [23]),
([], [24]),
([], [25]),
([], [26]),
([], [27]),
([], [28]),
([], [29]),
([], [30]),
([], [31]),
([], [32]),
([], [33]),
([], [34]),
([], [35]),
([], [36]),
([], [37]),
([], [38]),
([], [39]),
([], [40]),
([], [41]),
([], [42]),
([], [43]),
([], [44]),
([], [45]),
([], [46]),
([], [47]),
([], [48]),
([], [49]),
([], [50]),
([], [51]),
([14], [52]),
([15], [53]),
([16], [54]),
([17, 18], [55]),
([19, 20], [56]),
([21], [57, 58]),
([22], [59]),
([23], [60]),
([24], [61]),
([25], [62]),
([26, 27], [63, 64]),
([28], [65]),
([29], [66]),
([30], [67]),
([31, 32, 33], [68]),
([34], [69]),
([35], [70]),
([36], [71]),
([37], [72]),
([38], [73]),
([39], [74]),
([40], [75]),
([41], [76]),
([42], [77, 78]),
([43], [79]),
([44], [80]),
([45], [81]),
([46], [82]),
([47, 48], [83]),
([49], [84]),
([50], [85, 86]),
([51], [87]),
([52], []),
([53], [88]),
([54], [89]),
([55, 56], [90]),
([57, 58], [91]),
([59], [92]),
([60], [93]),
([61], [95, 96, 97, 98, 99]),
([62], [100]),
([63], [101, 102]),
([64], [103]),
([65], [104]),
([66, 67, 68], [105, 106]),
([69, 70], [107]),
([71], [108]),
([72], [109]),
([73], [110]),
([74], [111]),
([75, 76], [112]),
([77], [113]),
([78, 79], [114, 115, 116, 117, 118]),
([80, 81], [119]),
([82], [120, 121]),
([83], [122, 123]),
([84], [124, 125, 126, 127]),
([85], [128]),
([86], [129]),
([87], [130, 131]),
([88], [132, 133]),
([89], [134]),
([90], [135]),
([91], [136, 137]),
([92], [138, 139, 140, 141]),
([93], [142, 143]),
([94, 95, 96], [144, 145, 146]),
([97], [147]),
([98, 99, 100], [148, 149]),
([101], [150]),
([102], [151]),
([103], [152]),
([104], [153]),
([105, 106, 107], [154]),
([108, 109], [155]),
([110, 111, 112], [156]),
([113], [157]),
([114], [158]),
([115], [159]),
([116], [160]),
([117], [161]),
([118], [162]),
([119, 120], [163]),
([121], [164]),
([122], [165]),
([123], [166]),
([124, 125], [167]),
([126], [168]),
([127], [169]),
([128], [170]),
([129, 130], [171, 172]),
([131], [173]),
([132], [174]),
([133], [175]),
([134], [176]),
([135], [177]),
([136], [178]),
([137], [179]),
([138], [180]),
([139], [181]),
([140, 141], [182, 183]),
([142], [184]),
([143, 144], [185]),
([145], [186]),
([146], [187]),
([147], [188]),
([148], [189]),
([149], [190]),
([150], [191]),
([151], [192]),
([152], [193]),
([153], [194]),
([154], [195]),
([155], [196, 197]),
([156, 157], [198]),
([158], [199]),
([159], [200]),
([160], [201]),
([161, 162], [202]),
([163], [203]),
([164, 165], [204, 205]),
([166, 167, 168], [206]),
([169], [207]),
([170], [208]),
([171, 172], [209]),
([173, 174, 175], [210, 211]),
([176], [212]),
([177], [213]),
([178], [214]),
([179], [215]),
([180], [216]),
([181], [217]),
([182, 183], [218]),
([184], [219]),
([185], [220]),
([186], [221]),
([187, 188], [222]),
([189], [223]),
([190], [224]),
([191], [225]),
([192], [226]),
([193], [227]),
([194, 195], [228]),
([196, 197], [229, 230, 231]),
([198], [232]),
([199], [233]),
([200, 201], [234]),
([202, 203], [235, 236, 237]),
([204], [238, 239, 240]),
([205], [241, 242]),
([206], [243, 244]),
([207], [245]),
([208], [246]),
([209], [247, 248]),
([210], [249]),
([211], [250, 251]),
([212], [252]),
([213], [253]),
([214], [254]),
([215], [255]),
([216, 217], [256]),
([218], [257]),
([219], [258]),
([220], [259, 260]),
([221], [261]),
([222], [262, 263]),
([223], [264, 265]),
([224], [266]),
([225, 226], [267, 268, 269]),
([227], [270]),
([228, 229], [271]),
([230], [272]),
([231, 232], [273]),
([233, 234], [274]),
([235], [275]),
([236], [276]),
([237], [277]),
([238], [278]),
([239], [279, 280]),
([240], [281]),
([241], [282]),
([242, 243], [283, 284]),
([244], [285]),
([245], [286]),
([246, 247], [287, 288]),
([248], [289]),
([249], [290]),
([250], [291, 293, 294]),
([], [292]),
([251, 252], [295]),
([253, 254], [296, 297]),
([255], [298, 299]),
([256], [300, 301]),
([257], [302, 303, 304, 305]),
([258], [306, 307]),
([259], [308, 309]),
([260], [310]),
([261, 262], [311]),
([263], [312]),
([264, 265], [313]),
([266, 267], [314, 315]),
([268], [316]),
([269, 270, 271, 272], [317]),
([273, 274], [318]),
([275], [319]),
([276, 277], [320]),
([278], [321]),
([279, 280], [322, 323, 324]),
([281], [325]),
([282, 283], [326, 327]),
([284], [328]),
([], [329]),
([285], [330, 331]),
([286], [332]),
([287], [333, 334]),
([288], [335, 336]),
([289], [337]),
([290], [338]),
([291], [339]),
([292], [340]),
([293], [341]),
([294], [342]),
([295], [343]),
([296], [344, 345]),
([297], [346]),
([298], [347]),
([299], [348, 349]),
([300], [350]),
([301], [351]),
([302, 303], [352, 353]),
([304, 305, 306], [354]),
([307], [355, 356]),
([308], [357]),
([309], [358]),
([310], [359]),
([311], [360]),
([312], [361]),
([313], [362]),
([314], [363]),
([315], [364, 365, 366]),
([316], [367]),
([317], [368]),
([318], [369]),
([319, 320, 321], [370]),
([322], [372]),
([323], [373, 374]),
([324], [375, 376]),
([325], [377]),
([326, 327], [378]),
([328], [379]),
([329], [380]),
([330], [381]),
([331], [382, 383]),
([332], [384]),
([333], [385]),
([334], [386]),
([335], [387]),
([336], [388]),
([337], [389]),
([338], [390, 391]),
([339], [392]),
([340], [393]),
([341], [394]),
([342], [395]),
([343], [396]),
([344], [397]),
([345], [398, 399]),
([346, 347], [400, 401]),
([348], [402]),
([349], [403]),
([350], [404]),
([351], [405]),
([352, 353], [406, 407]),
([354], [408, 409]),
([355], [410, 411]),
([356], [412]),
([357], [413]),
([358, 359], [414]),
([360], [415]),
([361], [416, 417]),
([362], [418]),
([363], [419]),
([364], [420, 421, 422, 423, 424]),
([365], [425]),
([366], [426]),
([367], [427]),
([368], [428]),
([369], [429]),
([370], [430, 431]),
([371], [432]),
([372], [433]),
([373, 374, 375], [434, 435]),
([376, 377], [436, 437]),
([378, 379], [438]),
([380], [439, 440, 441]),
([381], [442]),
([382], [443]),
([383], [444, 445]),
([384], [446]),
([385], [447, 448]),
([386], [449, 450]),
([387], [451, 452]),
([388], [453]),
([389], [454]),
([390], [455, 456]),
([391], [457, 458, 459, 460]),
([392], [461]),
([393], [462, 463]),
([394], [464]),
([395, 396], [465, 466]),
([397], [467, 468, 469]),
([398], [470]),
([399], [471]),
([400], [472]),
([401], [473]),
([402, 403, 404, 405], [474, 475, 476]),
([406], [477]),
([407], [478]),
([408], [479]),
([409], [480, 481]),
([410], [482]),
([411], [483, 484]),
([412], [485, 486]),
([413], [487]),
([414], [488, 489]),
([415], [490, 491, 492]),
([416], [493]),
([417, 418], [494, 495]),
([419, 420], [496, 497]),
([421], [498]),
([422], [499]),
([423], [500]),
([424], [501, 502]),
([425, 426, 427], [503, 504, 505]),
([428], [506, 507]),
([429], [508, 509, 510]),
([430], [511]),
([431], [512, 513, 514]),
([432], [515]),
([433, 434, 435], [516]),
([436], [517]),
([437], [518]),
([438], [519]),
([439], [520]),
([440], [521]),
([441], [522]),
([442], [523]),
([443], [524]),
([444], [525]),
([445], [526]),
([446], [527]),
([447], [528]),
([448], [529]),
([449], [530]),
([450], [531, 533, 534, 535]),
([], [532]),
([451], [536]),
([452], [537]),
([453], [538]),
([454], [539]),
([455], [541]),
([456], [540]),
([457], [542]),
([458], [543]),
([459], [544]),
([460], [545]),
([461], [546]),
([462], [547]),
([463], [548]),
([464], [549]),
([465], [550]),
([466], [551]),
([467], [552]),
([], [553])] | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
"""
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ https://setuptools.readthedocs.io/en/latest/easy_install.html
"""
from glob import glob
from distutils.util import get_platform
from distutils.util import convert_path, subst_vars
from distutils.errors import (
DistutilsArgError, DistutilsOptionError,
DistutilsError, DistutilsPlatformError,
)
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from distutils import log, dir_util
from distutils.command.build_scripts import first_line_re
from distutils.spawn import find_executable
import sys
import os
import zipimport
import shutil
import tempfile
import zipfile
import re
import stat
import random
import textwrap
import warnings
import site
import struct
import contextlib
import subprocess
import shlex
import io
from setuptools.extern import six
from setuptools.extern.six.moves import configparser, map
from setuptools import Command
from setuptools.sandbox import run_setup
from setuptools.py31compat import get_path, get_config_vars
from setuptools.py27compat import rmtree_safe
from setuptools.command import setopt
from setuptools.archive_util import unpack_archive
from setuptools.package_index import (
PackageIndex, parse_requirement_arg, URL_SCHEME,
)
from setuptools.command import bdist_egg, egg_info
from pkg_resources import (
yield_lines, normalize_path, resource_string, ensure_directory,
get_distribution, find_distributions, Environment, Requirement,
Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
VersionConflict, DEVELOP_DIST,
)
import pkg_resources
# Turn on PEP440Warnings
warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
def is_64bit():
return struct.calcsize("P") == 8
def samefile(p1, p2):
"""
Determine if two paths reference the same file.
Augments os.path.samefile to work on Windows and
suppresses errors if the path doesn't exist.
"""
both_exist = os.path.exists(p1) and os.path.exists(p2)
use_samefile = hasattr(os.path, 'samefile') and both_exist
if use_samefile:
return os.path.samefile(p1, p2)
norm_p1 = os.path.normpath(os.path.normcase(p1))
norm_p2 = os.path.normpath(os.path.normcase(p2))
return norm_p1 == norm_p2
if six.PY2:
def _to_ascii(s):
return s
def isascii(s):
try:
six.text_type(s, 'ascii')
return True
except UnicodeError:
return False
else:
def _to_ascii(s):
return s.encode('ascii')
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
_one_liner = lambda text: textwrap.dedent(text).strip().replace('\n', '; ')
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=', 'S', "list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l',
"allow building eggs from local checkouts"),
('version', None, "print version information and exit"),
('no-find-links', None,
"Don't load find-links defined in packages being installed")
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'editable',
'no-deps', 'local-snapshots-ok', 'version'
]
if site.ENABLE_USER_SITE:
help_msg = "install in user site-package '%s'" % site.USER_SITE
user_options.append(('user', None, help_msg))
boolean_options.append('user')
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
# the --user option seems to be an opt-in one,
# so the default should be False.
self.user = 0
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
self.version = None
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_base = None
self.install_platbase = None
if site.ENABLE_USER_SITE:
self.install_userbase = site.USER_BASE
self.install_usersite = site.USER_SITE
else:
self.install_userbase = None
self.install_usersite = None
self.no_find_links = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.site_dirs = None
self.installed_projects = {}
self.sitepy_installed = False
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
extant_blockers = (
filename for filename in blockers
if os.path.exists(filename) or os.path.islink(filename)
)
list(map(self._delete_path, extant_blockers))
def _delete_path(self, path):
log.info("Deleting %s", path)
if self.dry_run:
return
is_tree = os.path.isdir(path) and not os.path.islink(path)
remover = rmtree if is_tree else os.unlink
remover(path)
@staticmethod
def _render_version():
"""
Render the Setuptools version and installation details, then exit.
"""
ver = sys.version[:3]
dist = get_distribution('setuptools')
tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})'
print(tmpl.format(**locals()))
raise SystemExit()
def finalize_options(self):
self.version and self._render_version()
py_version = sys.version.split()[0]
prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {
'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
# Only python 3.2+ has abiflags
'abiflags': getattr(sys, 'abiflags', ''),
}
if site.ENABLE_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
self._fix_install_dir_for_user_site()
self.expand_basedirs()
self.expand_dirs()
self._expand(
'install_dir', 'script_dir', 'build_directory',
'site_dirs',
)
# If a non-default installation directory was specified, default the
# script directory to match it.
if self.script_dir is None:
self.script_dir = self.install_dir
if self.no_find_links is None:
self.no_find_links = False
# Let install_dir get set by install_lib command, which in turn
# gets its info from the install command, and takes into account
# --prefix and --home and all that other crud.
self.set_undefined_options(
'install_lib', ('install_dir', 'install_dir')
)
# Likewise, set default script_dir from 'install_scripts.install_dir'
self.set_undefined_options(
'install_scripts', ('install_dir', 'script_dir')
)
if self.user and self.install_purelib:
self.install_dir = self.install_purelib
self.script_dir = self.install_scripts
# default --record from the install command
self.set_undefined_options('install', ('record', 'record'))
# Should this be moved to the if statement below? It's not used
# elsewhere
normpath = map(normalize_path, sys.path)
self.all_site_dirs = get_site_dirs()
if self.site_dirs is not None:
site_dirs = [
os.path.expanduser(s.strip()) for s in
self.site_dirs.split(',')
]
for d in site_dirs:
if not os.path.isdir(d):
log.warn("%s (in --site-dirs) does not exist", d)
elif normalize_path(d) not in normpath:
raise DistutilsOptionError(
d + " (in --site-dirs) is not on sys.path"
)
else:
self.all_site_dirs.append(normalize_path(d))
if not self.editable:
self.check_site_dir()
self.index_url = self.index_url or "https://pypi.python.org/simple"
self.shadow_path = self.all_site_dirs[:]
for path_item in self.install_dir, normalize_path(self.script_dir):
if path_item not in self.shadow_path:
self.shadow_path.insert(0, path_item)
if self.allow_hosts is not None:
hosts = [s.strip() for s in self.allow_hosts.split(',')]
else:
hosts = ['*']
if self.package_index is None:
self.package_index = self.create_index(
self.index_url, search_path=self.shadow_path, hosts=hosts,
)
self.local_index = Environment(self.shadow_path + sys.path)
if self.find_links is not None:
if isinstance(self.find_links, six.string_types):
self.find_links = self.find_links.split()
else:
self.find_links = []
if self.local_snapshots_ok:
self.package_index.scan_egg_links(self.shadow_path + sys.path)
if not self.no_find_links:
self.package_index.add_find_links(self.find_links)
self.set_undefined_options('install_lib', ('optimize', 'optimize'))
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
if not (0 <= self.optimize <= 2):
raise ValueError
except ValueError:
raise DistutilsOptionError("--optimize must be 0, 1, or 2")
if self.editable and not self.build_directory:
raise DistutilsArgError(
"Must specify a build directory (-b) when using --editable"
)
if not self.args:
raise DistutilsArgError(
"No urls, filenames, or requirements specified (see --help)")
self.outputs = []
def _fix_install_dir_for_user_site(self):
"""
Fix the install_dir if "--user" was used.
"""
if not self.user or not site.ENABLE_USER_SITE:
return
self.create_home_path()
if self.install_userbase is None:
msg = "User base directory is not specified"
raise DistutilsPlatformError(msg)
self.install_base = self.install_platbase = self.install_userbase
scheme_name = os.name.replace('posix', 'unix') + '_user'
self.select_scheme(scheme_name)
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
dirs = [
'install_purelib',
'install_platlib',
'install_lib',
'install_headers',
'install_scripts',
'install_data',
]
self._expand_attrs(dirs)
def run(self):
if self.verbose != self.distribution.verbose:
log.set_verbosity(self.verbose)
try:
for spec in self.args:
self.easy_install(spec, not self.no_deps)
if self.record:
outputs = self.outputs
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
from distutils import file_util
self.execute(
file_util.write_file, (self.record, outputs),
"writing list of installed files to '%s'" %
self.record
)
self.warn_deprecated_options()
finally:
log.set_verbosity(self.distribution.verbose)
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except Exception:
pid = random.randint(0, sys.maxsize)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
def warn_deprecated_options(self):
pass
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir, 'easy-install.pth')
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname() + '.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists:
os.unlink(testfile)
open(testfile, 'w').close()
os.unlink(testfile)
except (OSError, IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir
raise DistutilsError(self.no_default_version_msg())
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
if instdir not in map(normalize_path, _pythonpath()):
# only PYTHONPATH dirs need a site.py, so pretend it's there
self.sitepy_installed = True
elif self.multi_version and not os.path.exists(pth_file):
self.sitepy_installed = True # don't need site.py in this case
self.pth_file = None # and don't create a .pth file
self.install_dir = instdir
__cant_write_msg = textwrap.dedent("""
can't create or remove files in install directory
The following error occurred while trying to add or remove files in the
installation directory:
%s
The installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
""").lstrip()
__not_exists_id = textwrap.dedent("""
This directory does not currently exist. Please create it and try again, or
choose a different installation directory (using the -d or --install-dir
option).
""").lstrip()
__access_msg = textwrap.dedent("""
Perhaps your account does not have write access to this directory? If the
installation directory is a system-owned directory, you may need to sign in
as the administrator or "root" account. If you do not have administrative
access to this machine, you may wish to choose a different installation
directory, preferably one that is listed in your PYTHONPATH environment
variable.
For information on other options, you may wish to consult the
documentation at:
https://setuptools.readthedocs.io/en/latest/easy_install.html
Please make the appropriate changes for your system and try again.
""").lstrip()
def cant_write_to_target(self):
msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,)
if not os.path.exists(self.install_dir):
msg += '\n' + self.__not_exists_id
else:
msg += '\n' + self.__access_msg
raise DistutilsError(msg)
def check_pth_processing(self):
"""Empirically verify whether .pth files are supported in inst. dir"""
instdir = self.install_dir
log.info("Checking .pth file support in %s", instdir)
pth_file = self.pseudo_tempname() + ".pth"
ok_file = pth_file + '.ok'
ok_exists = os.path.exists(ok_file)
tmpl = _one_liner("""
import os
f = open({ok_file!r}, 'w')
f.write('OK')
f.close()
""") + '\n'
try:
if ok_exists:
os.unlink(ok_file)
dirname = os.path.dirname(ok_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(pth_file, 'w')
except (OSError, IOError):
self.cant_write_to_target()
else:
try:
f.write(tmpl.format(**locals()))
f.close()
f = None
executable = sys.executable
if os.name == 'nt':
dirname, basename = os.path.split(executable)
alt = os.path.join(dirname, 'pythonw.exe')
use_alt = (
basename.lower() == 'python.exe' and
os.path.exists(alt)
)
if use_alt:
# use pythonw.exe to avoid opening a console window
executable = alt
from distutils.spawn import spawn
spawn([executable, '-E', '-c', 'pass'], 0)
if os.path.exists(ok_file):
log.info(
"TEST PASSED: %s appears to support .pth files",
instdir
)
return True
finally:
if f:
f.close()
if os.path.exists(ok_file):
os.unlink(ok_file)
if os.path.exists(pth_file):
os.unlink(pth_file)
if not self.multi_version:
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
return False
def install_egg_scripts(self, dist):
"""Write all the scripts for `dist`, unless scripts are excluded"""
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
for script_name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + script_name):
# The "script" is a directory, likely a Python 3
# __pycache__ directory, so skip it.
continue
self.install_script(
dist, script_name,
dist.get_metadata('scripts/' + script_name)
)
self.install_wrapper_scripts(dist)
def add_output(self, path):
if os.path.isdir(path):
for base, dirs, files in os.walk(path):
for filename in files:
self.outputs.append(os.path.join(base, filename))
else:
self.outputs.append(path)
def not_editable(self, spec):
if self.editable:
raise DistutilsArgError(
"Invalid argument %r: you can't use filenames or URLs "
"with --editable (except via the --find-links option)."
% (spec,)
)
def check_editable(self, spec):
if not self.editable:
return
if os.path.exists(os.path.join(self.build_directory, spec.key)):
raise DistutilsArgError(
"%r already exists in %s; can't do a checkout there" %
(spec.key, self.build_directory)
)
@contextlib.contextmanager
def _tmpdir(self):
tmpdir = tempfile.mkdtemp(prefix=six.u("easy_install-"))
try:
# cast to str as workaround for #709 and #710 and #712
yield str(tmpdir)
finally:
os.path.exists(tmpdir) and rmtree(rmtree_safe(tmpdir))
def easy_install(self, spec, deps=False):
if not self.editable:
self.install_site_py()
with self._tmpdir() as tmpdir:
if not isinstance(spec, Requirement):
if URL_SCHEME(spec):
# It's a url, download it to tmpdir and process
self.not_editable(spec)
dl = self.package_index.download(spec, tmpdir)
return self.install_item(None, dl, tmpdir, deps, True)
elif os.path.exists(spec):
# Existing file or directory, just process it directly
self.not_editable(spec)
return self.install_item(None, spec, tmpdir, deps, True)
else:
spec = parse_requirement_arg(spec)
self.check_editable(spec)
dist = self.package_index.fetch_distribution(
spec, tmpdir, self.upgrade, self.editable,
not self.always_copy, self.local_index
)
if dist is None:
msg = "Could not find suitable distribution for %r" % spec
if self.always_copy:
msg += " (--always-copy skips system and development eggs)"
raise DistutilsError(msg)
elif dist.precedence == DEVELOP_DIST:
# .egg-info dists don't need installing, just process deps
self.process_distribution(spec, dist, deps, "Using")
return dist
else:
return self.install_item(spec, dist.location, tmpdir, deps)
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
# Installation is also needed if file in tmpdir or is not an egg
install_needed = install_needed or self.always_copy
install_needed = install_needed or os.path.dirname(download) == tmpdir
install_needed = install_needed or not download.endswith('.egg')
install_needed = install_needed or (
self.always_copy_from is not None and
os.path.dirname(normalize_path(download)) ==
normalize_path(self.always_copy_from)
)
if spec and not install_needed:
# at this point, we know it's a local .egg, we just don't know if
# it's already installed.
for dist in self.local_index[spec.project_name]:
if dist.location == download:
break
else:
install_needed = True # it's not in the local index
log.info("Processing %s", os.path.basename(download))
if install_needed:
dists = self.install_eggs(spec, download, tmpdir)
for dist in dists:
self.process_distribution(spec, dist, deps)
else:
dists = [self.egg_distribution(download)]
self.process_distribution(spec, dists[0], deps, "Using")
if spec is not None:
for dist in dists:
if dist in spec:
return dist
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def process_distribution(self, requirement, dist, deps=True, *info):
self.update_pth(dist)
self.package_index.add(dist)
if dist in self.local_index[dist.key]:
self.local_index.remove(dist)
self.local_index.add(dist)
self.install_egg_scripts(dist)
self.installed_projects[dist.key] = dist
log.info(self.installation_report(requirement, dist, *info))
if (dist.has_metadata('dependency_links.txt') and
not self.no_find_links):
self.package_index.add_find_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not deps and not self.always_copy:
return
elif requirement is not None and dist.key != requirement.key:
log.warn("Skipping dependencies for %s", dist)
return # XXX this is not the distribution we were looking for
elif requirement is None or dist not in requirement:
# if we wound up with a different version, resolve what we've got
distreq = dist.as_requirement()
requirement = Requirement(str(distreq))
log.info("Processing dependencies for %s", requirement)
try:
distros = WorkingSet([]).resolve(
[requirement], self.local_index, self.easy_install
)
except DistributionNotFound as e:
raise DistutilsError(str(e))
except VersionConflict as e:
raise DistutilsError(e.report())
if self.always_copy or self.always_copy_from:
# Force all the relevant distros to be copied or activated
for dist in distros:
if dist.key not in self.installed_projects:
self.easy_install(dist.as_requirement())
log.info("Finished processing dependencies for %s", requirement)
def should_unzip(self, dist):
if self.zip_ok is not None:
return not self.zip_ok
if dist.has_metadata('not-zip-safe'):
return True
if not dist.has_metadata('zip-safe'):
return True
return False
def maybe_move(self, spec, dist_filename, setup_base):
dst = os.path.join(self.build_directory, spec.key)
if os.path.exists(dst):
msg = (
"%r already exists in %s; build directory %s will not be kept"
)
log.warn(msg, spec.key, self.build_directory, setup_base)
return setup_base
if os.path.isdir(dist_filename):
setup_base = dist_filename
else:
if os.path.dirname(dist_filename) == setup_base:
os.unlink(dist_filename) # get it out of the tmp dir
contents = os.listdir(setup_base)
if len(contents) == 1:
dist_filename = os.path.join(setup_base, contents[0])
if os.path.isdir(dist_filename):
# if the only thing there is a directory, move it instead
setup_base = dist_filename
ensure_directory(dst)
shutil.move(setup_base, dst)
return dst
def install_wrapper_scripts(self, dist):
if self.exclude_scripts:
return
for args in ScriptWriter.best().get_args(dist):
self.write_script(*args)
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
if is_script:
body = self._load_template(dev_path) % locals()
script_text = ScriptWriter.get_header(script_text) + body
self.write_script(script_name, _to_ascii(script_text), 'b')
@staticmethod
def _load_template(dev_path):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
"""
# See https://github.com/pypa/setuptools/issues/134 for info
# on script file naming and downstream issues with SVR4
name = 'script.tmpl'
if dev_path:
name = name.replace('.tmpl', ' (dev).tmpl')
raw_bytes = resource_string('setuptools', name)
return raw_bytes.decode('utf-8')
def write_script(self, script_name, contents, mode="t", blockers=()):
"""Write an executable file to the scripts directory"""
self.delete_blockers( # clean up old .py/.pyw w/o a script
[os.path.join(self.script_dir, x) for x in blockers]
)
log.info("Installing %s script to %s", script_name, self.script_dir)
target = os.path.join(self.script_dir, script_name)
self.add_output(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
if os.path.exists(target):
os.unlink(target)
with open(target, "w" + mode) as f:
f.write(contents)
chmod(target, 0o777 - mask)
def install_eggs(self, spec, dist_filename, tmpdir):
# .egg dirs or files are already built, so just return them
if dist_filename.lower().endswith('.egg'):
return [self.install_egg(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.exe'):
return [self.install_exe(dist_filename, tmpdir)]
# Anything else, try to extract and build
setup_base = tmpdir
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
elif os.path.isdir(dist_filename):
setup_base = os.path.abspath(dist_filename)
if (setup_base.startswith(tmpdir) # something we downloaded
and self.build_directory and spec is not None):
setup_base = self.maybe_move(spec, dist_filename, setup_base)
# Find the setup.py file
setup_script = os.path.join(setup_base, 'setup.py')
if not os.path.exists(setup_script):
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
if not setups:
raise DistutilsError(
"Couldn't find a setup script in %s" %
os.path.abspath(dist_filename)
)
if len(setups) > 1:
raise DistutilsError(
"Multiple setup scripts in %s" %
os.path.abspath(dist_filename)
)
setup_script = setups[0]
# Now run it, and return the result
if self.editable:
log.info(self.report_editable(spec, setup_script))
return []
else:
return self.build_and_install(setup_script, setup_base)
def egg_distribution(self, egg_path):
if os.path.isdir(egg_path):
metadata = PathMetadata(egg_path, os.path.join(egg_path,
'EGG-INFO'))
else:
metadata = EggMetadata(zipimport.zipimporter(egg_path))
return Distribution.from_filename(egg_path, metadata=metadata)
def install_egg(self, egg_path, tmpdir):
destination = os.path.join(
self.install_dir,
os.path.basename(egg_path),
)
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
dist = self.egg_distribution(egg_path)
if not samefile(egg_path, destination):
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(
os.unlink,
(destination,),
"Removing " + destination,
)
try:
new_dist_is_zipped = False
if os.path.isdir(egg_path):
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copytree, "Copying"
elif self.should_unzip(dist):
self.mkpath(destination)
f, m = self.unpack_and_compile, "Extracting"
else:
new_dist_is_zipped = True
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copy2, "Copying"
self.execute(
f,
(egg_path, destination),
(m + " %s to %s") % (
os.path.basename(egg_path),
os.path.dirname(destination)
),
)
update_dist_caches(
destination,
fix_zipimporter_caches=new_dist_is_zipped,
)
except Exception:
update_dist_caches(destination, fix_zipimporter_caches=False)
raise
self.add_output(destination)
return self.egg_distribution(destination)
def install_exe(self, dist_filename, tmpdir):
# See if it's valid, get data
cfg = extract_wininst_cfg(dist_filename)
if cfg is None:
raise DistutilsError(
"%s is not a valid distutils Windows .exe" % dist_filename
)
# Create a dummy distribution object until we build the real distro
dist = Distribution(
None,
project_name=cfg.get('metadata', 'name'),
version=cfg.get('metadata', 'version'), platform=get_platform(),
)
# Convert the .exe to an unpacked egg
egg_path = os.path.join(tmpdir, dist.egg_name() + '.egg')
dist.location = egg_path
egg_tmp = egg_path + '.tmp'
_egg_info = os.path.join(egg_tmp, 'EGG-INFO')
pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
self.exe_to_egg(dist_filename, egg_tmp)
# Write EGG-INFO/PKG-INFO
if not os.path.exists(pkg_inf):
f = open(pkg_inf, 'w')
f.write('Metadata-Version: 1.0\n')
for k, v in cfg.items('metadata'):
if k != 'target_version':
f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
f.close()
script_dir = os.path.join(_egg_info, 'scripts')
# delete entry-point scripts to avoid duping
self.delete_blockers([
os.path.join(script_dir, args[0])
for args in ScriptWriter.get_args(dist)
])
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run,
)
# install the .egg
return self.install_egg(egg_path, tmpdir)
def exe_to_egg(self, dist_filename, egg_tmp):
"""Extract a bdist_wininst to the directories an egg would use"""
# Check for .pth file and set up prefix translations
prefixes = get_exe_prefixes(dist_filename)
to_compile = []
native_libs = []
top_level = {}
def process(src, dst):
s = src.lower()
for old, new in prefixes:
if s.startswith(old):
src = new + src[len(old):]
parts = src.split('/')
dst = os.path.join(egg_tmp, *parts)
dl = dst.lower()
if dl.endswith('.pyd') or dl.endswith('.dll'):
parts[-1] = bdist_egg.strip_module(parts[-1])
top_level[os.path.splitext(parts[0])[0]] = 1
native_libs.append(src)
elif dl.endswith('.py') and old != 'SCRIPTS/':
top_level[os.path.splitext(parts[0])[0]] = 1
to_compile.append(dst)
return dst
if not src.endswith('.pth'):
log.warn("WARNING: can't process %s", src)
return None
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
unpack_archive(dist_filename, egg_tmp, process)
stubs = []
for res in native_libs:
if res.lower().endswith('.pyd'): # create stubs for .pyd's
parts = res.split('/')
resource = parts[-1]
parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
pyfile = os.path.join(egg_tmp, *parts)
to_compile.append(pyfile)
stubs.append(pyfile)
bdist_egg.write_stub(resource, pyfile)
self.byte_compile(to_compile) # compile .py's
bdist_egg.write_safety_flag(
os.path.join(egg_tmp, 'EGG-INFO'),
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
for name in 'top_level', 'native_libs':
if locals()[name]:
txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
if not os.path.exists(txt):
f = open(txt, 'w')
f.write('\n'.join(locals()[name]) + '\n')
f.close()
__mv_warning = textwrap.dedent("""
Because this distribution was installed --multi-version, before you can
import modules from this package in an application, you will need to
'import pkg_resources' and then use a 'require()' call similar to one of
these examples, in order to select the desired version:
pkg_resources.require("%(name)s") # latest installed version
pkg_resources.require("%(name)s==%(version)s") # this exact version
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
""").lstrip()
__id_warning = textwrap.dedent("""
Note also that the installation directory must be on sys.path at runtime for
this to work. (e.g. by being the application's script directory, by being on
PYTHONPATH, or by being added to sys.path by your code.)
""")
def installation_report(self, req, dist, what="Installed"):
"""Helpful installation message for display to package users"""
msg = "\n%(what)s %(eggloc)s%(extras)s"
if self.multi_version and not self.no_report:
msg += '\n' + self.__mv_warning
if self.install_dir not in map(normalize_path, sys.path):
msg += '\n' + self.__id_warning
eggloc = dist.location
name = dist.project_name
version = dist.version
extras = '' # TODO: self.report_extras(req, dist)
return msg % locals()
__editable_msg = textwrap.dedent("""
Extracted editable version of %(spec)s to %(dirname)s
If it uses setuptools in its setup script, you can activate it in
"development" mode by going to that directory and running::
%(python)s setup.py develop
See the setuptools documentation for the "develop" command for more info.
""").lstrip()
def report_editable(self, spec, setup_script):
dirname = os.path.dirname(setup_script)
python = sys.executable
return '\n' + self.__editable_msg % locals()
def run_setup(self, setup_script, setup_base, args):
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
sys.modules.setdefault('distutils.command.egg_info', egg_info)
args = list(args)
if self.verbose > 2:
v = 'v' * (self.verbose - 1)
args.insert(0, '-' + v)
elif self.verbose < 2:
args.insert(0, '-q')
if self.dry_run:
args.insert(0, '-n')
log.info(
"Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
)
try:
run_setup(setup_script, args)
except SystemExit as v:
raise DistutilsError("Setup script exited with %s" % (v.args[0],))
def build_and_install(self, setup_script, setup_base):
args = ['bdist_egg', '--dist-dir']
dist_dir = tempfile.mkdtemp(
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
)
try:
self._set_fetcher_options(os.path.dirname(setup_script))
args.append(dist_dir)
self.run_setup(setup_script, setup_base, args)
all_eggs = Environment([dist_dir])
eggs = []
for key in all_eggs:
for dist in all_eggs[key]:
eggs.append(self.install_egg(dist.location, setup_base))
if not eggs and not self.dry_run:
log.warn("No eggs found in %s (setup script problem?)",
dist_dir)
return eggs
finally:
rmtree(dist_dir)
log.set_verbosity(self.verbose) # restore our log verbosity
def _set_fetcher_options(self, base):
"""
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
"""
# find the fetch options from easy_install and write them out
# to the setup.cfg file.
ei_opts = self.distribution.get_option_dict('easy_install').copy()
fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts',
)
fetch_options = {}
for key, val in ei_opts.items():
if key not in fetch_directives:
continue
fetch_options[key.replace('_', '-')] = val[1]
# create a settings dictionary suitable for `edit_config`
settings = dict(easy_install=fetch_options)
cfg_filename = os.path.join(base, 'setup.cfg')
setopt.edit_config(cfg_filename, settings)
def update_pth(self, dist):
if self.pth_file is None:
return
for d in self.pth_file[dist.key]: # drop old entries
if self.multi_version or d.location != dist.location:
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
if not self.multi_version:
if dist.location in self.pth_file.paths:
log.info(
"%s is already the active version in easy-install.pth",
dist,
)
else:
log.info("Adding %s to easy-install.pth file", dist)
self.pth_file.add(dist) # add new entry
if dist.location not in self.shadow_path:
self.shadow_path.append(dist.location)
if not self.dry_run:
self.pth_file.save()
if dist.key == 'setuptools':
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir, 'setuptools.pth')
if os.path.islink(filename):
os.unlink(filename)
f = open(filename, 'wt')
f.write(self.pth_file.make_relative(dist.location) + '\n')
f.close()
def unpack_progress(self, src, dst):
# Progress filter for unpacking
log.debug("Unpacking %s to %s", src, dst)
return dst # only unpack-and-compile skips files for dry run
def unpack_and_compile(self, egg_path, destination):
to_compile = []
to_chmod = []
def pf(src, dst):
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
to_compile.append(dst)
elif dst.endswith('.dll') or dst.endswith('.so'):
to_chmod.append(dst)
self.unpack_progress(src, dst)
return not self.dry_run and dst or None
unpack_archive(egg_path, destination, pf)
self.byte_compile(to_compile)
if not self.dry_run:
for f in to_chmod:
mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
chmod(f, mode)
def byte_compile(self, to_compile):
if sys.dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
try:
# try to make the byte compile messages quieter
log.set_verbosity(self.verbose - 1)
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
if self.optimize:
byte_compile(
to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run,
)
finally:
log.set_verbosity(self.verbose) # restore original verbosity
__no_default_msg = textwrap.dedent("""
bad install directory or PYTHONPATH
You are attempting to install a package to a directory that is not
on PYTHONPATH and which Python does not read ".pth" files from. The
installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
and your PYTHONPATH environment variable currently contains:
%r
Here are some of your options for correcting the problem:
* You can choose a different installation directory, i.e., one that is
on PYTHONPATH or supports .pth files
* You can add the installation directory to the PYTHONPATH environment
variable. (It must then also be on PYTHONPATH whenever you run
Python and want to use the package(s) you are installing.)
* You can set up the installation directory to support ".pth" files by
using one of the approaches described here:
https://setuptools.readthedocs.io/en/latest/easy_install.html#custom-installation-locations
Please make the appropriate changes for your system and try again.""").lstrip()
def no_default_version_msg(self):
template = self.__no_default_msg
return template % (self.install_dir, os.environ.get('PYTHONPATH', ''))
def install_site_py(self):
"""Make sure there's a site.py in the target dir, if needed"""
if self.sitepy_installed:
return # already did it, or don't need to
sitepy = os.path.join(self.install_dir, "site.py")
source = resource_string("setuptools", "site-patch.py")
source = source.decode('utf-8')
current = ""
if os.path.exists(sitepy):
log.debug("Checking existing site.py in %s", self.install_dir)
with io.open(sitepy) as strm:
current = strm.read()
if not current.startswith('def __boot():'):
raise DistutilsError(
"%s is not a setuptools-generated site.py; please"
" remove it." % sitepy
)
if current != source:
log.info("Creating %s", sitepy)
if not self.dry_run:
ensure_directory(sitepy)
with io.open(sitepy, 'w', encoding='utf-8') as strm:
strm.write(source)
self.byte_compile([sitepy])
self.sitepy_installed = True
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in six.iteritems(self.config_vars):
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0o700)" % path)
os.makedirs(path, 0o700)
INSTALL_SCHEMES = dict(
posix=dict(
install_dir='$base/lib/python$py_version_short/site-packages',
script_dir='$base/bin',
),
)
DEFAULT_SCHEME = dict(
install_dir='$base/Lib/site-packages',
script_dir='$base/Scripts',
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix:
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
for attr, val in scheme.items():
if getattr(self, attr, None) is None:
setattr(self, attr, val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def _pythonpath():
items = os.environ.get('PYTHONPATH', '').split(os.pathsep)
return filter(None, items)
def get_site_dirs():
"""
Return a list of 'site' dirs
"""
sitedirs = []
# start with PYTHONPATH
sitedirs.extend(_pythonpath())
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.extend([
os.path.join(
prefix,
"lib",
"python" + sys.version[:3],
"site-packages",
),
os.path.join(prefix, "lib", "site-python"),
])
else:
sitedirs.extend([
prefix,
os.path.join(prefix, "lib", "site-packages"),
])
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
home_sp = os.path.join(
home,
'Library',
'Python',
sys.version[:3],
'site-packages',
)
sitedirs.append(home_sp)
lib_paths = get_path('purelib'), get_path('platlib')
for site_lib in lib_paths:
if site_lib not in sitedirs:
sitedirs.append(site_lib)
if site.ENABLE_USER_SITE:
sitedirs.append(site.USER_SITE)
try:
sitedirs.extend(site.getsitepackages())
except AttributeError:
pass
sitedirs = list(map(normalize_path, sitedirs))
return sitedirs
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth', 'setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname, name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line)
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a configparser.RawConfigParser, or None
"""
f = open(dist_filename, 'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended - 12)
tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended - (12 + cfglen))
init = {'version': '', 'target_version': ''}
cfg = configparser.RawConfigParser(init)
try:
part = f.read(cfglen)
# Read up to the first null byte.
config = part.split(b'\0', 1)[0]
# Now the config is in bytes, but for RawConfigParser, it should
# be text, so decode it.
config = config.decode(sys.getfilesystemencoding())
cfg.readfp(six.StringIO(config))
except configparser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close()
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''),
('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/'),
('DATA/lib/site-packages', ''),
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts) == 3 and parts[2] == 'PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts) != 2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB', 'PLATLIB'):
contents = z.read(name)
if six.PY3:
contents = contents.decode()
for pth in yield_lines(contents):
pth = pth.strip().replace('\\', '/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
finally:
z.close()
prefixes = [(x.lower(), y) for x, y in prefixes]
prefixes.sort()
prefixes.reverse()
return prefixes
class PthDistributions(Environment):
"""A .pth file with Distribution paths in it"""
dirty = False
def __init__(self, filename, sitedirs=()):
self.filename = filename
self.sitedirs = list(map(normalize_path, sitedirs))
self.basedir = normalize_path(os.path.dirname(self.filename))
self._load()
Environment.__init__(self, [], None, None)
for path in yield_lines(self.paths):
list(map(self.add, find_distributions(path, True)))
def _load(self):
self.paths = []
saw_import = False
seen = dict.fromkeys(self.sitedirs)
if os.path.isfile(self.filename):
f = open(self.filename, 'rt')
for line in f:
if line.startswith('import'):
saw_import = True
continue
path = line.rstrip()
self.paths.append(path)
if not path.strip() or path.strip().startswith('#'):
continue
# skip non-existent paths, in case somebody deleted a package
# manually, and duplicate paths as well
path = self.paths[-1] = normalize_path(
os.path.join(self.basedir, path)
)
if not os.path.exists(path) or path in seen:
self.paths.pop() # skip it
self.dirty = True # we cleaned up, so we're dirty now :)
continue
seen[path] = 1
f.close()
if self.paths and not saw_import:
self.dirty = True # ensure anything we touch has import wrappers
while self.paths and not self.paths[-1].strip():
self.paths.pop()
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
rel_paths = list(map(self.make_relative, self.paths))
if rel_paths:
log.debug("Saving %s", self.filename)
lines = self._wrap_lines(rel_paths)
data = '\n'.join(lines) + '\n'
if os.path.islink(self.filename):
os.unlink(self.filename)
with open(self.filename, 'wt') as f:
f.write(data)
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False
@staticmethod
def _wrap_lines(lines):
return lines
def add(self, dist):
"""Add `dist` to the distribution map"""
new_path = (
dist.location not in self.paths and (
dist.location not in self.sitedirs or
# account for '.' being in PYTHONPATH
dist.location == os.getcwd()
)
)
if new_path:
self.paths.append(dist.location)
self.dirty = True
Environment.add(self, dist)
def remove(self, dist):
"""Remove `dist` from the distribution map"""
while dist.location in self.paths:
self.paths.remove(dist.location)
self.dirty = True
Environment.remove(self, dist)
def make_relative(self, path):
npath, last = os.path.split(normalize_path(path))
baselen = len(self.basedir)
parts = [last]
sep = os.altsep == '/' and '/' or os.sep
while len(npath) >= baselen:
if npath == self.basedir:
parts.append(os.curdir)
parts.reverse()
return sep.join(parts)
npath, last = os.path.split(npath)
parts.append(last)
else:
return path
class RewritePthDistributions(PthDistributions):
@classmethod
def _wrap_lines(cls, lines):
yield cls.prelude
for line in lines:
yield line
yield cls.postlude
prelude = _one_liner("""
import sys
sys.__plen = len(sys.path)
""")
postlude = _one_liner("""
import sys
new = sys.path[sys.__plen:]
del sys.path[sys.__plen:]
p = getattr(sys, '__egginsert', 0)
sys.path[p:p] = new
sys.__egginsert = p + len(new)
""")
if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite':
PthDistributions = RewritePthDistributions
def _first_line_re():
"""
Return a regular expression based on first_line_re suitable for matching
strings.
"""
if isinstance(first_line_re.pattern, str):
return first_line_re
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
return re.compile(first_line_re.pattern.decode())
def auto_chmod(func, arg, exc):
if func in [os.unlink, os.remove] and os.name == 'nt':
chmod(arg, stat.S_IWRITE)
return func(arg)
et, ev, _ = sys.exc_info()
six.reraise(et, (ev[0], ev[1] + (" %s %s" % (func, arg))))
def update_dist_caches(dist_path, fix_zipimporter_caches):
"""
Fix any globally cached `dist_path` related data
`dist_path` should be a path of a newly installed egg distribution (zipped
or unzipped).
sys.path_importer_cache contains finder objects that have been cached when
importing data from the original distribution. Any such finders need to be
cleared since the replacement distribution might be packaged differently,
e.g. a zipped egg distribution might get replaced with an unzipped egg
folder or vice versa. Having the old finders cached may then cause Python
to attempt loading modules from the replacement distribution using an
incorrect loader.
zipimport.zipimporter objects are Python loaders charged with importing
data packaged inside zip archives. If stale loaders referencing the
original distribution, are left behind, they can fail to load modules from
the replacement distribution. E.g. if an old zipimport.zipimporter instance
is used to load data from a new zipped egg archive, it may cause the
operation to attempt to locate the requested data in the wrong location -
one indicated by the original distribution's zip archive directory
information. Such an operation may then fail outright, e.g. report having
read a 'bad local file header', or even worse, it may fail silently &
return invalid data.
zipimport._zip_directory_cache contains cached zip archive directory
information for all existing zipimport.zipimporter instances and all such
instances connected to the same archive share the same cached directory
information.
If asked, and the underlying Python implementation allows it, we can fix
all existing zipimport.zipimporter instances instead of having to track
them down and remove them one by one, by updating their shared cached zip
archive directory information. This, of course, assumes that the
replacement distribution is packaged as a zipped egg.
If not asked to fix existing zipimport.zipimporter instances, we still do
our best to clear any remaining zipimport.zipimporter related cached data
that might somehow later get used when attempting to load data from the new
distribution and thus cause such load operations to fail. Note that when
tracking down such remaining stale data, we can not catch every conceivable
usage from here, and we clear only those that we know of and have found to
cause problems if left alive. Any remaining caches should be updated by
whomever is in charge of maintaining them, i.e. they should be ready to
handle us replacing their zip archives with new distributions at runtime.
"""
# There are several other known sources of stale zipimport.zipimporter
# instances that we do not clear here, but might if ever given a reason to
# do so:
# * Global setuptools pkg_resources.working_set (a.k.a. 'master working
# set') may contain distributions which may in turn contain their
# zipimport.zipimporter loaders.
# * Several zipimport.zipimporter loaders held by local variables further
# up the function call stack when running the setuptools installation.
# * Already loaded modules may have their __loader__ attribute set to the
# exact loader instance used when importing them. Python 3.4 docs state
# that this information is intended mostly for introspection and so is
# not expected to cause us problems.
normalized_path = normalize_path(dist_path)
_uncache(normalized_path, sys.path_importer_cache)
if fix_zipimporter_caches:
_replace_zip_directory_cache_data(normalized_path)
else:
# Here, even though we do not want to fix existing and now stale
# zipimporter cache information, we still want to remove it. Related to
# Python's zip archive directory information cache, we clear each of
# its stale entries in two phases:
# 1. Clear the entry so attempting to access zip archive information
# via any existing stale zipimport.zipimporter instances fails.
# 2. Remove the entry from the cache so any newly constructed
# zipimport.zipimporter instances do not end up using old stale
# zip archive directory information.
# This whole stale data removal step does not seem strictly necessary,
# but has been left in because it was done before we started replacing
# the zip archive directory information cache content if possible, and
# there are no relevant unit tests that we can depend on to tell us if
# this is really needed.
_remove_and_clear_zip_directory_cache_data(normalized_path)
def _collect_zipimporter_cache_entries(normalized_path, cache):
"""
Return zipimporter cache entry keys related to a given normalized path.
Alternative path spellings (e.g. those using different character case or
those using alternative path separators) related to the same path are
included. Any sub-path entries are included as well, i.e. those
corresponding to zip archives embedded in other zip archives.
"""
result = []
prefix_len = len(normalized_path)
for p in cache:
np = normalize_path(p)
if (np.startswith(normalized_path) and
np[prefix_len:prefix_len + 1] in (os.sep, '')):
result.append(p)
return result
def _update_zipimporter_cache(normalized_path, cache, updater=None):
"""
Update zipimporter cache data for a given normalized path.
Any sub-path entries are processed as well, i.e. those corresponding to zip
archives embedded in other zip archives.
Given updater is a callable taking a cache entry key and the original entry
(after already removing the entry from the cache), and expected to update
the entry and possibly return a new one to be inserted in its place.
Returning None indicates that the entry should not be replaced with a new
one. If no updater is given, the cache entries are simply removed without
any additional processing, the same as if the updater simply returned None.
"""
for p in _collect_zipimporter_cache_entries(normalized_path, cache):
# N.B. pypy's custom zipimport._zip_directory_cache implementation does
# not support the complete dict interface:
# * Does not support item assignment, thus not allowing this function
# to be used only for removing existing cache entries.
# * Does not support the dict.pop() method, forcing us to use the
# get/del patterns instead. For more detailed information see the
# following links:
# https://github.com/pypa/setuptools/issues/202#issuecomment-202913420
# https://bitbucket.org/pypy/pypy/src/dd07756a34a41f674c0cacfbc8ae1d4cc9ea2ae4/pypy/module/zipimport/interp_zipimport.py#cl-99
old_entry = cache[p]
del cache[p]
new_entry = updater and updater(p, old_entry)
if new_entry is not None:
cache[p] = new_entry
def _uncache(normalized_path, cache):
_update_zipimporter_cache(normalized_path, cache)
def _remove_and_clear_zip_directory_cache_data(normalized_path):
def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
old_entry.clear()
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=clear_and_remove_cached_zip_archive_directory_data)
# PyPy Python implementation does not allow directly writing to the
# zipimport._zip_directory_cache and so prevents us from attempting to correct
# its content. The best we can do there is clear the problematic cache content
# and have PyPy repopulate it as needed. The downside is that if there are any
# stale zipimport.zipimporter instances laying around, attempting to use them
# will fail due to not having its zip archive directory information available
# instead of being automatically corrected to use the new correct zip archive
# directory information.
if '__pypy__' in sys.builtin_module_names:
_replace_zip_directory_cache_data = \
_remove_and_clear_zip_directory_cache_data
else:
def _replace_zip_directory_cache_data(normalized_path):
def replace_cached_zip_archive_directory_data(path, old_entry):
# N.B. In theory, we could load the zip directory information just
# once for all updated path spellings, and then copy it locally and
# update its contained path strings to contain the correct
# spelling, but that seems like a way too invasive move (this cache
# structure is not officially documented anywhere and could in
# theory change with new Python releases) for no significant
# benefit.
old_entry.clear()
zipimport.zipimporter(path)
old_entry.update(zipimport._zip_directory_cache[path])
return old_entry
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=replace_cached_zip_archive_directory_data)
def is_python(text, filename='<string>'):
"Is this string a valid Python script?"
try:
compile(text, filename, 'exec')
except (SyntaxError, TypeError):
return False
else:
return True
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
with io.open(executable, encoding='latin-1') as fp:
magic = fp.read(2)
except (OSError, IOError):
return executable
return magic == '#!'
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing rules"""
return subprocess.list2cmdline([arg])
def is_python_script(script_text, filename):
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
"""
if filename.endswith('.py') or filename.endswith('.pyw'):
return True # extension says it's Python
if is_python(script_text, filename):
return True # it's syntactically valid Python
if script_text.startswith('#!'):
# It begins with a '#!' line, so check if 'python' is in it somewhere
return 'python' in script_text.splitlines()[0].lower()
return False # Not any Python I can recognize
try:
from os import chmod as _chmod
except ImportError:
# Jython compatibility
def _chmod(*args):
pass
def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
except os.error as e:
log.debug("chmod failed: %s", e)
class CommandSpec(list):
"""
A command spec for a #! header, specified as a list of arguments akin to
those passed to Popen.
"""
options = []
split_args = dict()
@classmethod
def best(cls):
"""
Choose the best CommandSpec class based on environmental conditions.
"""
return cls
@classmethod
def _sys_executable(cls):
_default = os.path.normpath(sys.executable)
return os.environ.get('__PYVENV_LAUNCHER__', _default)
@classmethod
def from_param(cls, param):
"""
Construct a CommandSpec from a parameter to build_scripts, which may
be None.
"""
if isinstance(param, cls):
return param
if isinstance(param, list):
return cls(param)
if param is None:
return cls.from_environment()
# otherwise, assume it's a string.
return cls.from_string(param)
@classmethod
def from_environment(cls):
return cls([cls._sys_executable()])
@classmethod
def from_string(cls, string):
"""
Construct a command spec from a simple string representing a command
line parseable by shlex.split.
"""
items = shlex.split(string, **cls.split_args)
return cls(items)
def install_options(self, script_text):
self.options = shlex.split(self._extract_options(script_text))
cmdline = subprocess.list2cmdline(self)
if not isascii(cmdline):
self.options[:0] = ['-x']
@staticmethod
def _extract_options(orig_script):
"""
Extract any options from the first line of the script.
"""
first = (orig_script + '\n').splitlines()[0]
match = _first_line_re().match(first)
options = match.group(1) or '' if match else ''
return options.strip()
def as_header(self):
return self._render(self + list(self.options))
@staticmethod
def _strip_quotes(item):
_QUOTES = '"\''
for q in _QUOTES:
if item.startswith(q) and item.endswith(q):
return item[1:-1]
return item
@staticmethod
def _render(items):
cmdline = subprocess.list2cmdline(
CommandSpec._strip_quotes(item.strip()) for item in items)
return '#!' + cmdline + '\n'
# For pbr compat; will be removed in a future version.
sys_executable = CommandSpec._sys_executable()
class WindowsCommandSpec(CommandSpec):
split_args = dict(posix=False)
class ScriptWriter(object):
"""
Encapsulates behavior around writing entry point scripts for console and
gui apps.
"""
template = textwrap.dedent(r"""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
__requires__ = %(spec)r
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point(%(spec)r, %(group)r, %(name)r)()
)
""").lstrip()
command_spec_class = CommandSpec
@classmethod
def get_script_args(cls, dist, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_args", DeprecationWarning)
writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
header = cls.get_script_header("", executable, wininst)
return writer.get_args(dist, header)
@classmethod
def get_script_header(cls, script_text, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_header", DeprecationWarning)
if wininst:
executable = "python.exe"
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
cls._ensure_safe_name(name)
script_text = cls.template % locals()
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res
@staticmethod
def _ensure_safe_name(name):
"""
Prevent paths in *_scripts entry point names.
"""
has_path_sep = re.search(r'[\\/]', name)
if has_path_sep:
raise ValueError("Path separators not allowed in script names")
@classmethod
def get_writer(cls, force_windows):
# for backward compatibility
warnings.warn("Use best", DeprecationWarning)
return WindowsScriptWriter.best() if force_windows else cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter for this environment.
"""
if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'):
return WindowsScriptWriter.best()
else:
return cls
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
# Simply write the stub with no extension.
yield (name, header + script_text)
@classmethod
def get_header(cls, script_text="", executable=None):
"""Create a #! line, getting options (if any) from script_text"""
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
class WindowsScriptWriter(ScriptWriter):
command_spec_class = WindowsCommandSpec
@classmethod
def get_writer(cls):
# for backward compatibility
warnings.warn("Use best", DeprecationWarning)
return cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter suitable for Windows
"""
writer_lookup = dict(
executable=WindowsExecutableLauncherWriter,
natural=cls,
)
# for compatibility, use the executable launcher by default
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
return writer_lookup[launcher]
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"For Windows, add a .py extension"
ext = dict(console='.pya', gui='.pyw')[type_]
if ext not in os.environ['PATHEXT'].lower().split(';'):
msg = (
"{ext} not listed in PATHEXT; scripts will not be "
"recognized as executables."
).format(**locals())
warnings.warn(msg, UserWarning)
old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
old.remove(ext)
header = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield name + ext, header + script_text, 't', blockers
@classmethod
def _adjust_header(cls, type_, orig_header):
"""
Make sure 'pythonw' is used for gui and and 'python' is used for
console (regardless of what sys.executable is).
"""
pattern = 'pythonw.exe'
repl = 'python.exe'
if type_ == 'gui':
pattern, repl = repl, pattern
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
new_header = pattern_ob.sub(string=orig_header, repl=repl)
return new_header if cls._use_header(new_header) else orig_header
@staticmethod
def _use_header(new_header):
"""
Should _adjust_header use the replaced header?
On non-windows systems, always use. On
Windows systems, only use the replaced header if it resolves
to an executable on the system.
"""
clean_header = new_header[2:-1].strip('"')
return sys.platform != 'win32' or find_executable(clean_header)
class WindowsExecutableLauncherWriter(WindowsScriptWriter):
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"""
For Windows, add a .py extension and an .exe launcher
"""
if type_ == 'gui':
launcher_type = 'gui'
ext = '-script.pyw'
old = ['.pyw']
else:
launcher_type = 'cli'
ext = '-script.py'
old = ['.py', '.pyc', '.pyo']
hdr = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield (name + ext, hdr + script_text, 't', blockers)
yield (
name + '.exe', get_win_launcher(launcher_type),
'b' # write in binary mode
)
if not is_64bit():
# install a manifest for the launcher to prevent Windows
# from detecting it as an installer (which it will for
# launchers like easy_install.exe). Consider only
# adding a manifest for launchers detected as installers.
# See Distribute #143 for details.
m_name = name + '.exe.manifest'
yield (m_name, load_launcher_manifest(name), 't')
# for backward-compatibility
get_script_args = ScriptWriter.get_script_args
get_script_header = ScriptWriter.get_script_header
def get_win_launcher(type):
"""
Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string.
"""
launcher_fn = '%s.exe' % type
if is_64bit():
launcher_fn = launcher_fn.replace(".", "-64.")
else:
launcher_fn = launcher_fn.replace(".", "-32.")
return resource_string('setuptools', launcher_fn)
def load_launcher_manifest(name):
manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
if six.PY2:
return manifest % vars()
else:
return manifest.decode('utf-8') % vars()
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
return shutil.rmtree(path, ignore_errors, onerror)
def current_umask():
tmp = os.umask(0o022)
os.umask(tmp)
return tmp
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools
argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0
sys.argv.append(argv0)
main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self, *args, **kw):
with _patch_usage():
Distribution._show_help(self, *args, **kw)
if argv is None:
argv = sys.argv[1:]
with _patch_usage():
setup(
script_args=['-q', 'easy_install', '-v'] + argv,
script_name=sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands,
**kw
)
@contextlib.contextmanager
def _patch_usage():
import distutils.core
USAGE = textwrap.dedent("""
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
""").lstrip()
def gen_usage(script_name):
return USAGE % dict(
script=os.path.basename(script_name),
)
saved = distutils.core.gen_usage
distutils.core.gen_usage = gen_usage
try:
yield
finally:
distutils.core.gen_usage = saved | unknown | codeparrot/codeparrot-clean | ||
// Test bolt instrumentation won't generate a binary with any segment that
// is writable and executable. Basically we want to put `.bolt.instr.counters`
// section into its own segment, separated from its surrounding RX sections.
// REQUIRES: system-linux
void foo() {}
void bar() { foo(); }
// RUN: %clang %cflags -c %s -o %t.o
// RUN: ld.lld -q -o %t.so %t.o -shared --init=foo --fini=foo
// RUN: llvm-bolt --instrument %t.so -o %tt.so
// RUN: llvm-readelf -l %tt.so | FileCheck %s
// CHECK-NOT: RWE
// CHECK: {{[0-9]*}} .bolt.instr.counters {{$}} | c | github | https://github.com/llvm/llvm-project | bolt/test/avoid-wx-segment.c |
/**
* \file psa/crypto_struct.h
*
* \brief PSA cryptography module: Mbed TLS structured type implementations
*
* \note This file may not be included directly. Applications must
* include psa/crypto.h.
*
* This file contains the definitions of some data structures with
* implementation-specific definitions.
*
* In implementations with isolation between the application and the
* cryptography module, it is expected that the front-end and the back-end
* would have different versions of this file.
*
* <h3>Design notes about multipart operation structures</h3>
*
* For multipart operations without driver delegation support, each multipart
* operation structure contains a `psa_algorithm_t alg` field which indicates
* which specific algorithm the structure is for. When the structure is not in
* use, `alg` is 0. Most of the structure consists of a union which is
* discriminated by `alg`.
*
* For multipart operations with driver delegation support, each multipart
* operation structure contains an `unsigned int id` field indicating which
* driver got assigned to do the operation. When the structure is not in use,
* 'id' is 0. The structure contains also a driver context which is the union
* of the contexts of all drivers able to handle the type of multipart
* operation.
*
* Note that when `alg` or `id` is 0, the content of other fields is undefined.
* In particular, it is not guaranteed that a freshly-initialized structure
* is all-zero: we initialize structures to something like `{0, 0}`, which
* is only guaranteed to initializes the first member of the union;
* GCC and Clang initialize the whole structure to 0 (at the time of writing),
* but MSVC and CompCert don't.
*
* In Mbed TLS, multipart operation structures live independently from
* the key. This allows Mbed TLS to free the key objects when destroying
* a key slot. If a multipart operation needs to remember the key after
* the setup function returns, the operation structure needs to contain a
* copy of the key.
*/
/*
* Copyright The Mbed TLS Contributors
* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
*/
#ifndef PSA_CRYPTO_STRUCT_H
#define PSA_CRYPTO_STRUCT_H
#include "mbedtls/private_access.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
* Include the build-time configuration information header. Here, we do not
* include `"mbedtls/build_info.h"` directly but `"psa/build_info.h"`, which
* is basically just an alias to it. This is to ease the maintenance of the
* TF-PSA-Crypto repository which has a different build system and
* configuration.
*/
#include "psa/build_info.h"
/* Include the context definition for the compiled-in drivers for the primitive
* algorithms. */
#include "psa/crypto_driver_contexts_primitives.h"
struct psa_hash_operation_s {
#if defined(MBEDTLS_PSA_CRYPTO_CLIENT) && !defined(MBEDTLS_PSA_CRYPTO_C)
mbedtls_psa_client_handle_t handle;
#else
/** Unique ID indicating which driver got assigned to do the
* operation. Since driver contexts are driver-specific, swapping
* drivers halfway through the operation is not supported.
* ID values are auto-generated in psa_driver_wrappers.h.
* ID value zero means the context is not valid or not assigned to
* any driver (i.e. the driver context is not active, in use). */
unsigned int MBEDTLS_PRIVATE(id);
psa_driver_hash_context_t MBEDTLS_PRIVATE(ctx);
#endif
};
#if defined(MBEDTLS_PSA_CRYPTO_CLIENT) && !defined(MBEDTLS_PSA_CRYPTO_C)
#define PSA_HASH_OPERATION_INIT { 0 }
#else
#define PSA_HASH_OPERATION_INIT { 0, { 0 } }
#endif
static inline struct psa_hash_operation_s psa_hash_operation_init(void)
{
const struct psa_hash_operation_s v = PSA_HASH_OPERATION_INIT;
return v;
}
struct psa_cipher_operation_s {
#if defined(MBEDTLS_PSA_CRYPTO_CLIENT) && !defined(MBEDTLS_PSA_CRYPTO_C)
mbedtls_psa_client_handle_t handle;
#else
/** Unique ID indicating which driver got assigned to do the
* operation. Since driver contexts are driver-specific, swapping
* drivers halfway through the operation is not supported.
* ID values are auto-generated in psa_crypto_driver_wrappers.h
* ID value zero means the context is not valid or not assigned to
* any driver (i.e. none of the driver contexts are active). */
unsigned int MBEDTLS_PRIVATE(id);
unsigned int MBEDTLS_PRIVATE(iv_required) : 1;
unsigned int MBEDTLS_PRIVATE(iv_set) : 1;
uint8_t MBEDTLS_PRIVATE(default_iv_length);
psa_driver_cipher_context_t MBEDTLS_PRIVATE(ctx);
#endif
};
#if defined(MBEDTLS_PSA_CRYPTO_CLIENT) && !defined(MBEDTLS_PSA_CRYPTO_C)
#define PSA_CIPHER_OPERATION_INIT { 0 }
#else
#define PSA_CIPHER_OPERATION_INIT { 0, 0, 0, 0, { 0 } }
#endif
static inline struct psa_cipher_operation_s psa_cipher_operation_init(void)
{
const struct psa_cipher_operation_s v = PSA_CIPHER_OPERATION_INIT;
return v;
}
/* Include the context definition for the compiled-in drivers for the composite
* algorithms. */
#include "psa/crypto_driver_contexts_composites.h"
struct psa_mac_operation_s {
#if defined(MBEDTLS_PSA_CRYPTO_CLIENT) && !defined(MBEDTLS_PSA_CRYPTO_C)
mbedtls_psa_client_handle_t handle;
#else
/** Unique ID indicating which driver got assigned to do the
* operation. Since driver contexts are driver-specific, swapping
* drivers halfway through the operation is not supported.
* ID values are auto-generated in psa_driver_wrappers.h
* ID value zero means the context is not valid or not assigned to
* any driver (i.e. none of the driver contexts are active). */
unsigned int MBEDTLS_PRIVATE(id);
uint8_t MBEDTLS_PRIVATE(mac_size);
unsigned int MBEDTLS_PRIVATE(is_sign) : 1;
psa_driver_mac_context_t MBEDTLS_PRIVATE(ctx);
#endif
};
#if defined(MBEDTLS_PSA_CRYPTO_CLIENT) && !defined(MBEDTLS_PSA_CRYPTO_C)
#define PSA_MAC_OPERATION_INIT { 0 }
#else
#define PSA_MAC_OPERATION_INIT { 0, 0, 0, { 0 } }
#endif
static inline struct psa_mac_operation_s psa_mac_operation_init(void)
{
const struct psa_mac_operation_s v = PSA_MAC_OPERATION_INIT;
return v;
}
struct psa_aead_operation_s {
#if defined(MBEDTLS_PSA_CRYPTO_CLIENT) && !defined(MBEDTLS_PSA_CRYPTO_C)
mbedtls_psa_client_handle_t handle;
#else
/** Unique ID indicating which driver got assigned to do the
* operation. Since driver contexts are driver-specific, swapping
* drivers halfway through the operation is not supported.
* ID values are auto-generated in psa_crypto_driver_wrappers.h
* ID value zero means the context is not valid or not assigned to
* any driver (i.e. none of the driver contexts are active). */
unsigned int MBEDTLS_PRIVATE(id);
psa_algorithm_t MBEDTLS_PRIVATE(alg);
psa_key_type_t MBEDTLS_PRIVATE(key_type);
size_t MBEDTLS_PRIVATE(ad_remaining);
size_t MBEDTLS_PRIVATE(body_remaining);
unsigned int MBEDTLS_PRIVATE(nonce_set) : 1;
unsigned int MBEDTLS_PRIVATE(lengths_set) : 1;
unsigned int MBEDTLS_PRIVATE(ad_started) : 1;
unsigned int MBEDTLS_PRIVATE(body_started) : 1;
unsigned int MBEDTLS_PRIVATE(is_encrypt) : 1;
psa_driver_aead_context_t MBEDTLS_PRIVATE(ctx);
#endif
};
#if defined(MBEDTLS_PSA_CRYPTO_CLIENT) && !defined(MBEDTLS_PSA_CRYPTO_C)
#define PSA_AEAD_OPERATION_INIT { 0 }
#else
#define PSA_AEAD_OPERATION_INIT { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, { 0 } }
#endif
static inline struct psa_aead_operation_s psa_aead_operation_init(void)
{
const struct psa_aead_operation_s v = PSA_AEAD_OPERATION_INIT;
return v;
}
/* Include the context definition for the compiled-in drivers for the key
* derivation algorithms. */
#include "psa/crypto_driver_contexts_key_derivation.h"
struct psa_key_derivation_s {
#if defined(MBEDTLS_PSA_CRYPTO_CLIENT) && !defined(MBEDTLS_PSA_CRYPTO_C)
mbedtls_psa_client_handle_t handle;
#else
psa_algorithm_t MBEDTLS_PRIVATE(alg);
unsigned int MBEDTLS_PRIVATE(can_output_key) : 1;
size_t MBEDTLS_PRIVATE(capacity);
psa_driver_key_derivation_context_t MBEDTLS_PRIVATE(ctx);
#endif
};
#if defined(MBEDTLS_PSA_CRYPTO_CLIENT) && !defined(MBEDTLS_PSA_CRYPTO_C)
#define PSA_KEY_DERIVATION_OPERATION_INIT { 0 }
#else
/* This only zeroes out the first byte in the union, the rest is unspecified. */
#define PSA_KEY_DERIVATION_OPERATION_INIT { 0, 0, 0, { 0 } }
#endif
static inline struct psa_key_derivation_s psa_key_derivation_operation_init(
void)
{
const struct psa_key_derivation_s v = PSA_KEY_DERIVATION_OPERATION_INIT;
return v;
}
struct psa_custom_key_parameters_s {
/* Future versions may add other fields in this structure. */
uint32_t flags;
};
/** The default production parameters for key generation or key derivation.
*
* Calling psa_generate_key_custom() or psa_key_derivation_output_key_custom()
* with `custom=PSA_CUSTOM_KEY_PARAMETERS_INIT` and `custom_data_length=0` is
* equivalent to calling psa_generate_key() or psa_key_derivation_output_key()
* respectively.
*/
#define PSA_CUSTOM_KEY_PARAMETERS_INIT { 0 }
#ifndef __cplusplus
/* Omitted when compiling in C++, because one of the parameters is a
* pointer to a struct with a flexible array member, and that is not
* standard C++.
* https://github.com/Mbed-TLS/mbedtls/issues/9020
*/
/* This is a deprecated variant of `struct psa_custom_key_parameters_s`.
* It has exactly the same layout, plus an extra field which is a flexible
* array member. Thus a `const struct psa_key_production_parameters_s *`
* can be passed to any function that reads a
* `const struct psa_custom_key_parameters_s *`.
*/
struct psa_key_production_parameters_s {
uint32_t flags;
uint8_t data[];
};
/** The default production parameters for key generation or key derivation.
*
* Calling psa_generate_key_ext() or psa_key_derivation_output_key_ext()
* with `params=PSA_KEY_PRODUCTION_PARAMETERS_INIT` and
* `params_data_length == 0` is equivalent to
* calling psa_generate_key() or psa_key_derivation_output_key()
* respectively.
*/
#define PSA_KEY_PRODUCTION_PARAMETERS_INIT { 0 }
#endif /* !__cplusplus */
struct psa_key_policy_s {
psa_key_usage_t MBEDTLS_PRIVATE(usage);
psa_algorithm_t MBEDTLS_PRIVATE(alg);
psa_algorithm_t MBEDTLS_PRIVATE(alg2);
};
typedef struct psa_key_policy_s psa_key_policy_t;
#define PSA_KEY_POLICY_INIT { 0, 0, 0 }
static inline struct psa_key_policy_s psa_key_policy_init(void)
{
const struct psa_key_policy_s v = PSA_KEY_POLICY_INIT;
return v;
}
/* The type used internally for key sizes.
* Public interfaces use size_t, but internally we use a smaller type. */
typedef uint16_t psa_key_bits_t;
/* The maximum value of the type used to represent bit-sizes.
* This is used to mark an invalid key size. */
#define PSA_KEY_BITS_TOO_LARGE ((psa_key_bits_t) -1)
/* The maximum size of a key in bits.
* Currently defined as the maximum that can be represented, rounded down
* to a whole number of bytes.
* This is an uncast value so that it can be used in preprocessor
* conditionals. */
#define PSA_MAX_KEY_BITS 0xfff8
struct psa_key_attributes_s {
#if defined(MBEDTLS_PSA_CRYPTO_SE_C)
psa_key_slot_number_t MBEDTLS_PRIVATE(slot_number);
int MBEDTLS_PRIVATE(has_slot_number);
#endif /* MBEDTLS_PSA_CRYPTO_SE_C */
psa_key_type_t MBEDTLS_PRIVATE(type);
psa_key_bits_t MBEDTLS_PRIVATE(bits);
psa_key_lifetime_t MBEDTLS_PRIVATE(lifetime);
psa_key_policy_t MBEDTLS_PRIVATE(policy);
/* This type has a different layout in the client view wrt the
* service view of the key id, i.e. in service view usually is
* expected to have MBEDTLS_PSA_CRYPTO_KEY_ID_ENCODES_OWNER defined
* thus adding an owner field to the standard psa_key_id_t. For
* implementations with client/service separation, this means the
* object will be marshalled through a transport channel and
* interpreted differently at each side of the transport. Placing
* it at the end of structures allows to interpret the structure
* at the client without reorganizing the memory layout of the
* struct
*/
mbedtls_svc_key_id_t MBEDTLS_PRIVATE(id);
};
#if defined(MBEDTLS_PSA_CRYPTO_SE_C)
#define PSA_KEY_ATTRIBUTES_MAYBE_SLOT_NUMBER 0, 0,
#else
#define PSA_KEY_ATTRIBUTES_MAYBE_SLOT_NUMBER
#endif
#define PSA_KEY_ATTRIBUTES_INIT { PSA_KEY_ATTRIBUTES_MAYBE_SLOT_NUMBER \
PSA_KEY_TYPE_NONE, 0, \
PSA_KEY_LIFETIME_VOLATILE, \
PSA_KEY_POLICY_INIT, \
MBEDTLS_SVC_KEY_ID_INIT }
static inline struct psa_key_attributes_s psa_key_attributes_init(void)
{
const struct psa_key_attributes_s v = PSA_KEY_ATTRIBUTES_INIT;
return v;
}
static inline void psa_set_key_id(psa_key_attributes_t *attributes,
mbedtls_svc_key_id_t key)
{
psa_key_lifetime_t lifetime = attributes->MBEDTLS_PRIVATE(lifetime);
attributes->MBEDTLS_PRIVATE(id) = key;
if (PSA_KEY_LIFETIME_IS_VOLATILE(lifetime)) {
attributes->MBEDTLS_PRIVATE(lifetime) =
PSA_KEY_LIFETIME_FROM_PERSISTENCE_AND_LOCATION(
PSA_KEY_LIFETIME_PERSISTENT,
PSA_KEY_LIFETIME_GET_LOCATION(lifetime));
}
}
static inline mbedtls_svc_key_id_t psa_get_key_id(
const psa_key_attributes_t *attributes)
{
return attributes->MBEDTLS_PRIVATE(id);
}
#ifdef MBEDTLS_PSA_CRYPTO_KEY_ID_ENCODES_OWNER
static inline void mbedtls_set_key_owner_id(psa_key_attributes_t *attributes,
mbedtls_key_owner_id_t owner)
{
attributes->MBEDTLS_PRIVATE(id).MBEDTLS_PRIVATE(owner) = owner;
}
#endif
static inline void psa_set_key_lifetime(psa_key_attributes_t *attributes,
psa_key_lifetime_t lifetime)
{
attributes->MBEDTLS_PRIVATE(lifetime) = lifetime;
if (PSA_KEY_LIFETIME_IS_VOLATILE(lifetime)) {
#ifdef MBEDTLS_PSA_CRYPTO_KEY_ID_ENCODES_OWNER
attributes->MBEDTLS_PRIVATE(id).MBEDTLS_PRIVATE(key_id) = 0;
#else
attributes->MBEDTLS_PRIVATE(id) = 0;
#endif
}
}
static inline psa_key_lifetime_t psa_get_key_lifetime(
const psa_key_attributes_t *attributes)
{
return attributes->MBEDTLS_PRIVATE(lifetime);
}
static inline void psa_extend_key_usage_flags(psa_key_usage_t *usage_flags)
{
if (*usage_flags & PSA_KEY_USAGE_SIGN_HASH) {
*usage_flags |= PSA_KEY_USAGE_SIGN_MESSAGE;
}
if (*usage_flags & PSA_KEY_USAGE_VERIFY_HASH) {
*usage_flags |= PSA_KEY_USAGE_VERIFY_MESSAGE;
}
}
static inline void psa_set_key_usage_flags(psa_key_attributes_t *attributes,
psa_key_usage_t usage_flags)
{
psa_extend_key_usage_flags(&usage_flags);
attributes->MBEDTLS_PRIVATE(policy).MBEDTLS_PRIVATE(usage) = usage_flags;
}
static inline psa_key_usage_t psa_get_key_usage_flags(
const psa_key_attributes_t *attributes)
{
return attributes->MBEDTLS_PRIVATE(policy).MBEDTLS_PRIVATE(usage);
}
static inline void psa_set_key_algorithm(psa_key_attributes_t *attributes,
psa_algorithm_t alg)
{
attributes->MBEDTLS_PRIVATE(policy).MBEDTLS_PRIVATE(alg) = alg;
}
static inline psa_algorithm_t psa_get_key_algorithm(
const psa_key_attributes_t *attributes)
{
return attributes->MBEDTLS_PRIVATE(policy).MBEDTLS_PRIVATE(alg);
}
static inline void psa_set_key_type(psa_key_attributes_t *attributes,
psa_key_type_t type)
{
attributes->MBEDTLS_PRIVATE(type) = type;
}
static inline psa_key_type_t psa_get_key_type(
const psa_key_attributes_t *attributes)
{
return attributes->MBEDTLS_PRIVATE(type);
}
static inline void psa_set_key_bits(psa_key_attributes_t *attributes,
size_t bits)
{
if (bits > PSA_MAX_KEY_BITS) {
attributes->MBEDTLS_PRIVATE(bits) = PSA_KEY_BITS_TOO_LARGE;
} else {
attributes->MBEDTLS_PRIVATE(bits) = (psa_key_bits_t) bits;
}
}
static inline size_t psa_get_key_bits(
const psa_key_attributes_t *attributes)
{
return attributes->MBEDTLS_PRIVATE(bits);
}
/**
* \brief The context for PSA interruptible hash signing.
*/
struct psa_sign_hash_interruptible_operation_s {
#if defined(MBEDTLS_PSA_CRYPTO_CLIENT) && !defined(MBEDTLS_PSA_CRYPTO_C)
mbedtls_psa_client_handle_t handle;
#else
/** Unique ID indicating which driver got assigned to do the
* operation. Since driver contexts are driver-specific, swapping
* drivers halfway through the operation is not supported.
* ID values are auto-generated in psa_crypto_driver_wrappers.h
* ID value zero means the context is not valid or not assigned to
* any driver (i.e. none of the driver contexts are active). */
unsigned int MBEDTLS_PRIVATE(id);
psa_driver_sign_hash_interruptible_context_t MBEDTLS_PRIVATE(ctx);
unsigned int MBEDTLS_PRIVATE(error_occurred) : 1;
uint32_t MBEDTLS_PRIVATE(num_ops);
#endif
};
#if defined(MBEDTLS_PSA_CRYPTO_CLIENT) && !defined(MBEDTLS_PSA_CRYPTO_C)
#define PSA_SIGN_HASH_INTERRUPTIBLE_OPERATION_INIT { 0 }
#else
#define PSA_SIGN_HASH_INTERRUPTIBLE_OPERATION_INIT { 0, { 0 }, 0, 0 }
#endif
static inline struct psa_sign_hash_interruptible_operation_s
psa_sign_hash_interruptible_operation_init(void)
{
const struct psa_sign_hash_interruptible_operation_s v =
PSA_SIGN_HASH_INTERRUPTIBLE_OPERATION_INIT;
return v;
}
/**
* \brief The context for PSA interruptible hash verification.
*/
struct psa_verify_hash_interruptible_operation_s {
#if defined(MBEDTLS_PSA_CRYPTO_CLIENT) && !defined(MBEDTLS_PSA_CRYPTO_C)
mbedtls_psa_client_handle_t handle;
#else
/** Unique ID indicating which driver got assigned to do the
* operation. Since driver contexts are driver-specific, swapping
* drivers halfway through the operation is not supported.
* ID values are auto-generated in psa_crypto_driver_wrappers.h
* ID value zero means the context is not valid or not assigned to
* any driver (i.e. none of the driver contexts are active). */
unsigned int MBEDTLS_PRIVATE(id);
psa_driver_verify_hash_interruptible_context_t MBEDTLS_PRIVATE(ctx);
unsigned int MBEDTLS_PRIVATE(error_occurred) : 1;
uint32_t MBEDTLS_PRIVATE(num_ops);
#endif
};
#if defined(MBEDTLS_PSA_CRYPTO_CLIENT) && !defined(MBEDTLS_PSA_CRYPTO_C)
#define PSA_VERIFY_HASH_INTERRUPTIBLE_OPERATION_INIT { 0 }
#else
#define PSA_VERIFY_HASH_INTERRUPTIBLE_OPERATION_INIT { 0, { 0 }, 0, 0 }
#endif
static inline struct psa_verify_hash_interruptible_operation_s
psa_verify_hash_interruptible_operation_init(void)
{
const struct psa_verify_hash_interruptible_operation_s v =
PSA_VERIFY_HASH_INTERRUPTIBLE_OPERATION_INIT;
return v;
}
#ifdef __cplusplus
}
#endif
#endif /* PSA_CRYPTO_STRUCT_H */ | c | github | https://github.com/nodejs/node | deps/LIEF/third-party/mbedtls/include/psa/crypto_struct.h |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import copy
import datetime
import json
import uuid
from django.core.exceptions import NON_FIELD_ERRORS
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.validators import RegexValidator
from django.forms import (
BooleanField, CharField, CheckboxSelectMultiple, ChoiceField, DateField,
DateTimeField, EmailField, FileField, FloatField, Form, HiddenInput,
ImageField, IntegerField, MultipleChoiceField, MultipleHiddenInput,
MultiValueField, NullBooleanField, PasswordInput, RadioSelect, Select,
SplitDateTimeField, SplitHiddenDateTimeWidget, Textarea, TextInput,
TimeField, ValidationError, forms,
)
from django.forms.utils import ErrorList
from django.http import QueryDict
from django.template import Context, Template
from django.test import SimpleTestCase
from django.test.utils import str_prefix
from django.utils import six
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html import format_html
from django.utils.safestring import SafeData, mark_safe
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
class PersonNew(Form):
first_name = CharField(widget=TextInput(attrs={'id': 'first_name_id'}))
last_name = CharField()
birthday = DateField()
class FormsTestCase(SimpleTestCase):
# A Form is a collection of Fields. It knows how to validate a set of data and it
# knows how to render itself in a couple of default ways (e.g., an HTML table).
# You can pass it data in __init__(), as a dictionary.
def test_form(self):
# Pass a dictionary to a Form's __init__().
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'})
self.assertTrue(p.is_bound)
self.assertEqual(p.errors, {})
self.assertTrue(p.is_valid())
self.assertHTMLEqual(p.errors.as_ul(), '')
self.assertEqual(p.errors.as_text(), '')
self.assertEqual(p.cleaned_data["first_name"], 'John')
self.assertEqual(p.cleaned_data["last_name"], 'Lennon')
self.assertEqual(p.cleaned_data["birthday"], datetime.date(1940, 10, 9))
self.assertHTMLEqual(str(p['first_name']), '<input type="text" name="first_name" value="John" id="id_first_name" />')
self.assertHTMLEqual(str(p['last_name']), '<input type="text" name="last_name" value="Lennon" id="id_last_name" />')
self.assertHTMLEqual(str(p['birthday']), '<input type="text" name="birthday" value="1940-10-9" id="id_birthday" />')
nonexistenterror = "Key u?'nonexistentfield' not found in 'Person'"
with six.assertRaisesRegex(self, KeyError, nonexistenterror):
p['nonexistentfield']
self.fail('Attempts to access non-existent fields should fail.')
form_output = []
for boundfield in p:
form_output.append(str(boundfield))
self.assertHTMLEqual('\n'.join(form_output), """<input type="text" name="first_name" value="John" id="id_first_name" />
<input type="text" name="last_name" value="Lennon" id="id_last_name" />
<input type="text" name="birthday" value="1940-10-9" id="id_birthday" />""")
form_output = []
for boundfield in p:
form_output.append([boundfield.label, boundfield.data])
self.assertEqual(form_output, [
['First name', 'John'],
['Last name', 'Lennon'],
['Birthday', '1940-10-9']
])
self.assertHTMLEqual(str(p), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" value="Lennon" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></td></tr>""")
def test_empty_dict(self):
# Empty dictionaries are valid, too.
p = Person({})
self.assertTrue(p.is_bound)
self.assertEqual(p.errors['first_name'], ['This field is required.'])
self.assertEqual(p.errors['last_name'], ['This field is required.'])
self.assertEqual(p.errors['birthday'], ['This field is required.'])
self.assertFalse(p.is_valid())
self.assertEqual(p.cleaned_data, {})
self.assertHTMLEqual(str(p), """<tr><th><label for="id_first_name">First name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="birthday" id="id_birthday" /></td></tr>""")
self.assertHTMLEqual(p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="birthday" id="id_birthday" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></li>""")
self.assertHTMLEqual(p.as_p(), """<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></p>""")
def test_unbound_form(self):
# If you don't pass any values to the Form's __init__(), or if you pass None,
# the Form will be considered unbound and won't do any validation. Form.errors
# will be an empty dictionary *but* Form.is_valid() will return False.
p = Person()
self.assertFalse(p.is_bound)
self.assertEqual(p.errors, {})
self.assertFalse(p.is_valid())
try:
p.cleaned_data
self.fail('Attempts to access cleaned_data when validation fails should fail.')
except AttributeError:
pass
self.assertHTMLEqual(str(p), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" id="id_birthday" /></td></tr>""")
self.assertHTMLEqual(p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" id="id_birthday" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></li>
<li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></li>
<li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></li>""")
self.assertHTMLEqual(p.as_p(), """<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></p>
<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></p>
<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></p>""")
def test_unicode_values(self):
# Unicode values are handled properly.
p = Person({'first_name': 'John', 'last_name': '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111', 'birthday': '1940-10-9'})
self.assertHTMLEqual(p.as_table(), '<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>\n<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></td></tr>\n<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></td></tr>')
self.assertHTMLEqual(p.as_ul(), '<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" value="John" id="id_first_name" /></li>\n<li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></li>\n<li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></li>')
self.assertHTMLEqual(p.as_p(), '<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" value="John" id="id_first_name" /></p>\n<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></p>\n<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></p>')
p = Person({'last_name': 'Lennon'})
self.assertEqual(p.errors['first_name'], ['This field is required.'])
self.assertEqual(p.errors['birthday'], ['This field is required.'])
self.assertFalse(p.is_valid())
self.assertDictEqual(p.errors, {'birthday': ['This field is required.'], 'first_name': ['This field is required.']})
self.assertEqual(p.cleaned_data, {'last_name': 'Lennon'})
self.assertEqual(p['first_name'].errors, ['This field is required.'])
self.assertHTMLEqual(p['first_name'].errors.as_ul(), '<ul class="errorlist"><li>This field is required.</li></ul>')
self.assertEqual(p['first_name'].errors.as_text(), '* This field is required.')
p = Person()
self.assertHTMLEqual(str(p['first_name']), '<input type="text" name="first_name" id="id_first_name" />')
self.assertHTMLEqual(str(p['last_name']), '<input type="text" name="last_name" id="id_last_name" />')
self.assertHTMLEqual(str(p['birthday']), '<input type="text" name="birthday" id="id_birthday" />')
def test_cleaned_data_only_fields(self):
# cleaned_data will always *only* contain a key for fields defined in the
# Form, even if you pass extra data when you define the Form. In this
# example, we pass a bunch of extra fields to the form constructor,
# but cleaned_data contains only the form's fields.
data = {'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9', 'extra1': 'hello', 'extra2': 'hello'}
p = Person(data)
self.assertTrue(p.is_valid())
self.assertEqual(p.cleaned_data['first_name'], 'John')
self.assertEqual(p.cleaned_data['last_name'], 'Lennon')
self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9))
def test_optional_data(self):
# cleaned_data will include a key and value for *all* fields defined in the Form,
# even if the Form's data didn't include a value for fields that are not
# required. In this example, the data dictionary doesn't include a value for the
# "nick_name" field, but cleaned_data includes it. For CharFields, it's set to the
# empty string.
class OptionalPersonForm(Form):
first_name = CharField()
last_name = CharField()
nick_name = CharField(required=False)
data = {'first_name': 'John', 'last_name': 'Lennon'}
f = OptionalPersonForm(data)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['nick_name'], '')
self.assertEqual(f.cleaned_data['first_name'], 'John')
self.assertEqual(f.cleaned_data['last_name'], 'Lennon')
# For DateFields, it's set to None.
class OptionalPersonForm(Form):
first_name = CharField()
last_name = CharField()
birth_date = DateField(required=False)
data = {'first_name': 'John', 'last_name': 'Lennon'}
f = OptionalPersonForm(data)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['birth_date'], None)
self.assertEqual(f.cleaned_data['first_name'], 'John')
self.assertEqual(f.cleaned_data['last_name'], 'Lennon')
def test_auto_id(self):
# "auto_id" tells the Form to add an "id" attribute to each form element.
# If it's a string that contains '%s', Django will use that as a format string
# into which the field's name will be inserted. It will also put a <label> around
# the human-readable labels for a field.
p = Person(auto_id='%s_id')
self.assertHTMLEqual(p.as_table(), """<tr><th><label for="first_name_id">First name:</label></th><td><input type="text" name="first_name" id="first_name_id" /></td></tr>
<tr><th><label for="last_name_id">Last name:</label></th><td><input type="text" name="last_name" id="last_name_id" /></td></tr>
<tr><th><label for="birthday_id">Birthday:</label></th><td><input type="text" name="birthday" id="birthday_id" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" name="first_name" id="first_name_id" /></li>
<li><label for="last_name_id">Last name:</label> <input type="text" name="last_name" id="last_name_id" /></li>
<li><label for="birthday_id">Birthday:</label> <input type="text" name="birthday" id="birthday_id" /></li>""")
self.assertHTMLEqual(p.as_p(), """<p><label for="first_name_id">First name:</label> <input type="text" name="first_name" id="first_name_id" /></p>
<p><label for="last_name_id">Last name:</label> <input type="text" name="last_name" id="last_name_id" /></p>
<p><label for="birthday_id">Birthday:</label> <input type="text" name="birthday" id="birthday_id" /></p>""")
def test_auto_id_true(self):
# If auto_id is any True value whose str() does not contain '%s', the "id"
# attribute will be the name of the field.
p = Person(auto_id=True)
self.assertHTMLEqual(p.as_ul(), """<li><label for="first_name">First name:</label> <input type="text" name="first_name" id="first_name" /></li>
<li><label for="last_name">Last name:</label> <input type="text" name="last_name" id="last_name" /></li>
<li><label for="birthday">Birthday:</label> <input type="text" name="birthday" id="birthday" /></li>""")
def test_auto_id_false(self):
# If auto_id is any False value, an "id" attribute won't be output unless it
# was manually entered.
p = Person(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>""")
def test_id_on_field(self):
# In this example, auto_id is False, but the "id" attribute for the "first_name"
# field is given. Also note that field gets a <label>, while the others don't.
p = PersonNew(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" id="first_name_id" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>""")
def test_auto_id_on_form_and_field(self):
# If the "id" attribute is specified in the Form and auto_id is True, the "id"
# attribute in the Form gets precedence.
p = PersonNew(auto_id=True)
self.assertHTMLEqual(p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" id="first_name_id" name="first_name" /></li>
<li><label for="last_name">Last name:</label> <input type="text" name="last_name" id="last_name" /></li>
<li><label for="birthday">Birthday:</label> <input type="text" name="birthday" id="birthday" /></li>""")
def test_various_boolean_values(self):
class SignupForm(Form):
email = EmailField()
get_spam = BooleanField()
f = SignupForm(auto_id=False)
self.assertHTMLEqual(str(f['email']), '<input type="email" name="email" />')
self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" />')
f = SignupForm({'email': 'test@example.com', 'get_spam': True}, auto_id=False)
self.assertHTMLEqual(str(f['email']), '<input type="email" name="email" value="test@example.com" />')
self.assertHTMLEqual(str(f['get_spam']), '<input checked="checked" type="checkbox" name="get_spam" />')
# 'True' or 'true' should be rendered without a value attribute
f = SignupForm({'email': 'test@example.com', 'get_spam': 'True'}, auto_id=False)
self.assertHTMLEqual(str(f['get_spam']), '<input checked="checked" type="checkbox" name="get_spam" />')
f = SignupForm({'email': 'test@example.com', 'get_spam': 'true'}, auto_id=False)
self.assertHTMLEqual(str(f['get_spam']), '<input checked="checked" type="checkbox" name="get_spam" />')
# A value of 'False' or 'false' should be rendered unchecked
f = SignupForm({'email': 'test@example.com', 'get_spam': 'False'}, auto_id=False)
self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" />')
f = SignupForm({'email': 'test@example.com', 'get_spam': 'false'}, auto_id=False)
self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" />')
# A value of '0' should be interpreted as a True value (#16820)
f = SignupForm({'email': 'test@example.com', 'get_spam': '0'})
self.assertTrue(f.is_valid())
self.assertTrue(f.cleaned_data.get('get_spam'))
def test_widget_output(self):
# Any Field can have a Widget class passed to its constructor:
class ContactForm(Form):
subject = CharField()
message = CharField(widget=Textarea)
f = ContactForm(auto_id=False)
self.assertHTMLEqual(str(f['subject']), '<input type="text" name="subject" />')
self.assertHTMLEqual(str(f['message']), '<textarea name="message" rows="10" cols="40"></textarea>')
# as_textarea(), as_text() and as_hidden() are shortcuts for changing the output
# widget type:
self.assertHTMLEqual(f['subject'].as_textarea(), '<textarea name="subject" rows="10" cols="40"></textarea>')
self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" />')
self.assertHTMLEqual(f['message'].as_hidden(), '<input type="hidden" name="message" />')
# The 'widget' parameter to a Field can also be an instance:
class ContactForm(Form):
subject = CharField()
message = CharField(widget=Textarea(attrs={'rows': 80, 'cols': 20}))
f = ContactForm(auto_id=False)
self.assertHTMLEqual(str(f['message']), '<textarea name="message" rows="80" cols="20"></textarea>')
# Instance-level attrs are *not* carried over to as_textarea(), as_text() and
# as_hidden():
self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" />')
f = ContactForm({'subject': 'Hello', 'message': 'I love you.'}, auto_id=False)
self.assertHTMLEqual(f['subject'].as_textarea(), '<textarea rows="10" cols="40" name="subject">Hello</textarea>')
self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" value="I love you." />')
self.assertHTMLEqual(f['message'].as_hidden(), '<input type="hidden" name="message" value="I love you." />')
def test_forms_with_choices(self):
# For a form with a <select>, use ChoiceField:
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')])
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select name="language">
<option value="P" selected="selected">Python</option>
<option value="J">Java</option>
</select>""")
# A subtlety: If one of the choices' value is the empty string and the form is
# unbound, then the <option> for the empty-string choice will get selected="selected".
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('', '------'), ('P', 'Python'), ('J', 'Java')])
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select name="language">
<option value="" selected="selected">------</option>
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
# You can specify widget attributes in the Widget constructor.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=Select(attrs={'class': 'foo'}))
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language">
<option value="P" selected="selected">Python</option>
<option value="J">Java</option>
</select>""")
# When passing a custom widget instance to ChoiceField, note that setting
# 'choices' on the widget is meaningless. The widget will use the choices
# defined on the Field, not the ones defined on the Widget.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=Select(choices=[('R', 'Ruby'), ('P', 'Perl')], attrs={'class': 'foo'}))
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language">
<option value="P" selected="selected">Python</option>
<option value="J">Java</option>
</select>""")
# You can set a ChoiceField's choices after the fact.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField()
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<select name="language">
</select>""")
f.fields['language'].choices = [('P', 'Python'), ('J', 'Java')]
self.assertHTMLEqual(str(f['language']), """<select name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""")
def test_forms_with_radio(self):
# Add widget=RadioSelect to use that widget with a ChoiceField.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=RadioSelect)
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(str(f['language']), """<ul>
<li><label><input type="radio" name="language" value="P" /> Python</label></li>
<li><label><input type="radio" name="language" value="J" /> Java</label></li>
</ul>""")
self.assertHTMLEqual(f.as_table(), """<tr><th>Name:</th><td><input type="text" name="name" /></td></tr>
<tr><th>Language:</th><td><ul>
<li><label><input type="radio" name="language" value="P" /> Python</label></li>
<li><label><input type="radio" name="language" value="J" /> Java</label></li>
</ul></td></tr>""")
self.assertHTMLEqual(f.as_ul(), """<li>Name: <input type="text" name="name" /></li>
<li>Language: <ul>
<li><label><input type="radio" name="language" value="P" /> Python</label></li>
<li><label><input type="radio" name="language" value="J" /> Java</label></li>
</ul></li>""")
# Regarding auto_id and <label>, RadioSelect is a special case. Each radio button
# gets a distinct ID, formed by appending an underscore plus the button's
# zero-based index.
f = FrameworkForm(auto_id='id_%s')
self.assertHTMLEqual(str(f['language']), """<ul id="id_language">
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul>""")
# When RadioSelect is used with auto_id, and the whole form is printed using
# either as_table() or as_ul(), the label for the RadioSelect will point to the
# ID of the *first* radio button.
self.assertHTMLEqual(f.as_table(), """<tr><th><label for="id_name">Name:</label></th><td><input type="text" name="name" id="id_name" /></td></tr>
<tr><th><label for="id_language_0">Language:</label></th><td><ul id="id_language">
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul></td></tr>""")
self.assertHTMLEqual(f.as_ul(), """<li><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></li>
<li><label for="id_language_0">Language:</label> <ul id="id_language">
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul></li>""")
self.assertHTMLEqual(f.as_p(), """<p><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></p>
<p><label for="id_language_0">Language:</label> <ul id="id_language">
<li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li>
<li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li>
</ul></p>""")
# Test iterating on individual radios in a template
t = Template('{% for radio in form.language %}<div class="myradio">{{ radio }}</div>{% endfor %}')
self.assertHTMLEqual(t.render(Context({'form': f})), """<div class="myradio"><label for="id_language_0">
<input id="id_language_0" name="language" type="radio" value="P" /> Python</label></div>
<div class="myradio"><label for="id_language_1">
<input id="id_language_1" name="language" type="radio" value="J" /> Java</label></div>""")
def test_form_with_iterable_boundfield(self):
class BeatleForm(Form):
name = ChoiceField(choices=[('john', 'John'), ('paul', 'Paul'), ('george', 'George'), ('ringo', 'Ringo')], widget=RadioSelect)
f = BeatleForm(auto_id=False)
self.assertHTMLEqual('\n'.join(str(bf) for bf in f['name']), """<label><input type="radio" name="name" value="john" /> John</label>
<label><input type="radio" name="name" value="paul" /> Paul</label>
<label><input type="radio" name="name" value="george" /> George</label>
<label><input type="radio" name="name" value="ringo" /> Ringo</label>""")
self.assertHTMLEqual('\n'.join('<div>%s</div>' % bf for bf in f['name']), """<div><label><input type="radio" name="name" value="john" /> John</label></div>
<div><label><input type="radio" name="name" value="paul" /> Paul</label></div>
<div><label><input type="radio" name="name" value="george" /> George</label></div>
<div><label><input type="radio" name="name" value="ringo" /> Ringo</label></div>""")
def test_form_with_noniterable_boundfield(self):
# You can iterate over any BoundField, not just those with widget=RadioSelect.
class BeatleForm(Form):
name = CharField()
f = BeatleForm(auto_id=False)
self.assertHTMLEqual('\n'.join(str(bf) for bf in f['name']), '<input type="text" name="name" />')
def test_forms_with_multiple_choice(self):
# MultipleChoiceField is a special case, as its data is required to be a list:
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField()
f = SongForm(auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<select multiple="multiple" name="composers">
</select>""")
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')])
f = SongForm(auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<select multiple="multiple" name="composers">
<option value="J">John Lennon</option>
<option value="P">Paul McCartney</option>
</select>""")
f = SongForm({'name': 'Yesterday', 'composers': ['P']}, auto_id=False)
self.assertHTMLEqual(str(f['name']), '<input type="text" name="name" value="Yesterday" />')
self.assertHTMLEqual(str(f['composers']), """<select multiple="multiple" name="composers">
<option value="J">John Lennon</option>
<option value="P" selected="selected">Paul McCartney</option>
</select>""")
def test_form_with_disabled_fields(self):
class PersonForm(Form):
name = CharField()
birthday = DateField(disabled=True)
class PersonFormFieldInitial(Form):
name = CharField()
birthday = DateField(disabled=True, initial=datetime.date(1974, 8, 16))
# Disabled fields are generally not transmitted by user agents.
# The value from the form's initial data is used.
f1 = PersonForm({'name': 'John Doe'}, initial={'birthday': datetime.date(1974, 8, 16)})
f2 = PersonFormFieldInitial({'name': 'John Doe'})
for form in (f1, f2):
self.assertTrue(form.is_valid())
self.assertEqual(
form.cleaned_data,
{'birthday': datetime.date(1974, 8, 16), 'name': 'John Doe'}
)
# Values provided in the form's data are ignored.
data = {'name': 'John Doe', 'birthday': '1984-11-10'}
f1 = PersonForm(data, initial={'birthday': datetime.date(1974, 8, 16)})
f2 = PersonFormFieldInitial(data)
for form in (f1, f2):
self.assertTrue(form.is_valid())
self.assertEqual(
form.cleaned_data,
{'birthday': datetime.date(1974, 8, 16), 'name': 'John Doe'}
)
def test_hidden_data(self):
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')])
# MultipleChoiceField rendered as_hidden() is a special case. Because it can
# have multiple values, its as_hidden() renders multiple <input type="hidden">
# tags.
f = SongForm({'name': 'Yesterday', 'composers': ['P']}, auto_id=False)
self.assertHTMLEqual(f['composers'].as_hidden(), '<input type="hidden" name="composers" value="P" />')
f = SongForm({'name': 'From Me To You', 'composers': ['P', 'J']}, auto_id=False)
self.assertHTMLEqual(f['composers'].as_hidden(), """<input type="hidden" name="composers" value="P" />
<input type="hidden" name="composers" value="J" />""")
# DateTimeField rendered as_hidden() is special too
class MessageForm(Form):
when = SplitDateTimeField()
f = MessageForm({'when_0': '1992-01-01', 'when_1': '01:01'})
self.assertTrue(f.is_valid())
self.assertHTMLEqual(str(f['when']), '<input type="text" name="when_0" value="1992-01-01" id="id_when_0" /><input type="text" name="when_1" value="01:01" id="id_when_1" />')
self.assertHTMLEqual(f['when'].as_hidden(), '<input type="hidden" name="when_0" value="1992-01-01" id="id_when_0" /><input type="hidden" name="when_1" value="01:01" id="id_when_1" />')
def test_mulitple_choice_checkbox(self):
# MultipleChoiceField can also be used with the CheckboxSelectMultiple widget.
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
f = SongForm(auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<ul>
<li><label><input type="checkbox" name="composers" value="J" /> John Lennon</label></li>
<li><label><input type="checkbox" name="composers" value="P" /> Paul McCartney</label></li>
</ul>""")
f = SongForm({'composers': ['J']}, auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<ul>
<li><label><input checked="checked" type="checkbox" name="composers" value="J" /> John Lennon</label></li>
<li><label><input type="checkbox" name="composers" value="P" /> Paul McCartney</label></li>
</ul>""")
f = SongForm({'composers': ['J', 'P']}, auto_id=False)
self.assertHTMLEqual(str(f['composers']), """<ul>
<li><label><input checked="checked" type="checkbox" name="composers" value="J" /> John Lennon</label></li>
<li><label><input checked="checked" type="checkbox" name="composers" value="P" /> Paul McCartney</label></li>
</ul>""")
# Test iterating on individual checkboxes in a template
t = Template('{% for checkbox in form.composers %}<div class="mycheckbox">{{ checkbox }}</div>{% endfor %}')
self.assertHTMLEqual(t.render(Context({'form': f})), """<div class="mycheckbox"><label>
<input checked="checked" name="composers" type="checkbox" value="J" /> John Lennon</label></div>
<div class="mycheckbox"><label>
<input checked="checked" name="composers" type="checkbox" value="P" /> Paul McCartney</label></div>""")
def test_checkbox_auto_id(self):
# Regarding auto_id, CheckboxSelectMultiple is a special case. Each checkbox
# gets a distinct ID, formed by appending an underscore plus the checkbox's
# zero-based index.
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
f = SongForm(auto_id='%s_id')
self.assertHTMLEqual(str(f['composers']), """<ul id="composers_id">
<li><label for="composers_id_0"><input type="checkbox" name="composers" value="J" id="composers_id_0" /> John Lennon</label></li>
<li><label for="composers_id_1"><input type="checkbox" name="composers" value="P" id="composers_id_1" /> Paul McCartney</label></li>
</ul>""")
def test_multiple_choice_list_data(self):
# Data for a MultipleChoiceField should be a list. QueryDict and
# MultiValueDict conveniently work with this.
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
data = {'name': 'Yesterday', 'composers': ['J', 'P']}
f = SongForm(data)
self.assertEqual(f.errors, {})
data = QueryDict('name=Yesterday&composers=J&composers=P')
f = SongForm(data)
self.assertEqual(f.errors, {})
data = MultiValueDict(dict(name=['Yesterday'], composers=['J', 'P']))
f = SongForm(data)
self.assertEqual(f.errors, {})
def test_multiple_hidden(self):
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple)
# The MultipleHiddenInput widget renders multiple values as hidden fields.
class SongFormHidden(Form):
name = CharField()
composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=MultipleHiddenInput)
f = SongFormHidden(MultiValueDict(dict(name=['Yesterday'], composers=['J', 'P'])), auto_id=False)
self.assertHTMLEqual(f.as_ul(), """<li>Name: <input type="text" name="name" value="Yesterday" /><input type="hidden" name="composers" value="J" />
<input type="hidden" name="composers" value="P" /></li>""")
# When using CheckboxSelectMultiple, the framework expects a list of input and
# returns a list of input.
f = SongForm({'name': 'Yesterday'}, auto_id=False)
self.assertEqual(f.errors['composers'], ['This field is required.'])
f = SongForm({'name': 'Yesterday', 'composers': ['J']}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['composers'], ['J'])
self.assertEqual(f.cleaned_data['name'], 'Yesterday')
f = SongForm({'name': 'Yesterday', 'composers': ['J', 'P']}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['composers'], ['J', 'P'])
self.assertEqual(f.cleaned_data['name'], 'Yesterday')
def test_escaping(self):
# Validation errors are HTML-escaped when output as HTML.
class EscapingForm(Form):
special_name = CharField(label="<em>Special</em> Field")
special_safe_name = CharField(label=mark_safe("<em>Special</em> Field"))
def clean_special_name(self):
raise ValidationError("Something's wrong with '%s'" % self.cleaned_data['special_name'])
def clean_special_safe_name(self):
raise ValidationError(mark_safe("'<b>%s</b>' is a safe string" % self.cleaned_data['special_safe_name']))
f = EscapingForm({'special_name': "Nothing to escape", 'special_safe_name': "Nothing to escape"}, auto_id=False)
self.assertHTMLEqual(f.as_table(), """<tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>Something's wrong with 'Nothing to escape'</li></ul><input type="text" name="special_name" value="Nothing to escape" /></td></tr>
<tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>'<b>Nothing to escape</b>' is a safe string</li></ul><input type="text" name="special_safe_name" value="Nothing to escape" /></td></tr>""")
f = EscapingForm({
'special_name': "Should escape < & > and <script>alert('xss')</script>",
'special_safe_name': "<i>Do not escape</i>"
}, auto_id=False)
self.assertHTMLEqual(f.as_table(), """<tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>Something's wrong with 'Should escape < & > and <script>alert('xss')</script>'</li></ul><input type="text" name="special_name" value="Should escape < & > and <script>alert('xss')</script>" /></td></tr>
<tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>'<b><i>Do not escape</i></b>' is a safe string</li></ul><input type="text" name="special_safe_name" value="<i>Do not escape</i>" /></td></tr>""")
def test_validating_multiple_fields(self):
# There are a couple of ways to do multiple-field validation. If you want the
# validation message to be associated with a particular field, implement the
# clean_XXX() method on the Form, where XXX is the field name. As in
# Field.clean(), the clean_XXX() method should return the cleaned value. In the
# clean_XXX() method, you have access to self.cleaned_data, which is a dictionary
# of all the data that has been cleaned *so far*, in order by the fields,
# including the current field (e.g., the field XXX if you're in clean_XXX()).
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean_password2(self):
if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise ValidationError('Please make sure your passwords match.')
return self.cleaned_data['password2']
f = UserRegistration(auto_id=False)
self.assertEqual(f.errors, {})
f = UserRegistration({}, auto_id=False)
self.assertEqual(f.errors['username'], ['This field is required.'])
self.assertEqual(f.errors['password1'], ['This field is required.'])
self.assertEqual(f.errors['password2'], ['This field is required.'])
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)
self.assertEqual(f.errors['password2'], ['Please make sure your passwords match.'])
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'foo'}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['username'], 'adrian')
self.assertEqual(f.cleaned_data['password1'], 'foo')
self.assertEqual(f.cleaned_data['password2'], 'foo')
# Another way of doing multiple-field validation is by implementing the
# Form's clean() method. Usually ValidationError raised by that method
# will not be associated with a particular field and will have a
# special-case association with the field named '__all__'. It's
# possible to associate the errors to particular field with the
# Form.add_error() method or by passing a dictionary that maps each
# field to one or more errors.
#
# Note that in Form.clean(), you have access to self.cleaned_data, a
# dictionary of all the fields/values that have *not* raised a
# ValidationError. Also note Form.clean() is required to return a
# dictionary of all clean data.
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean(self):
# Test raising a ValidationError as NON_FIELD_ERRORS.
if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise ValidationError('Please make sure your passwords match.')
# Test raising ValidationError that targets multiple fields.
errors = {}
if self.cleaned_data.get('password1') == 'FORBIDDEN_VALUE':
errors['password1'] = 'Forbidden value.'
if self.cleaned_data.get('password2') == 'FORBIDDEN_VALUE':
errors['password2'] = ['Forbidden value.']
if errors:
raise ValidationError(errors)
# Test Form.add_error()
if self.cleaned_data.get('password1') == 'FORBIDDEN_VALUE2':
self.add_error(None, 'Non-field error 1.')
self.add_error('password1', 'Forbidden value 2.')
if self.cleaned_data.get('password2') == 'FORBIDDEN_VALUE2':
self.add_error('password2', 'Forbidden value 2.')
raise ValidationError('Non-field error 2.')
return self.cleaned_data
f = UserRegistration(auto_id=False)
self.assertEqual(f.errors, {})
f = UserRegistration({}, auto_id=False)
self.assertHTMLEqual(f.as_table(), """<tr><th>Username:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="username" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="password" name="password2" /></td></tr>""")
self.assertEqual(f.errors['username'], ['This field is required.'])
self.assertEqual(f.errors['password1'], ['This field is required.'])
self.assertEqual(f.errors['password2'], ['This field is required.'])
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)
self.assertEqual(f.errors['__all__'], ['Please make sure your passwords match.'])
self.assertHTMLEqual(f.as_table(), """<tr><td colspan="2"><ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul></td></tr>
<tr><th>Username:</th><td><input type="text" name="username" value="adrian" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr>""")
self.assertHTMLEqual(f.as_ul(), """<li><ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul></li>
<li>Username: <input type="text" name="username" value="adrian" maxlength="10" /></li>
<li>Password1: <input type="password" name="password1" /></li>
<li>Password2: <input type="password" name="password2" /></li>""")
f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'foo'}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data['username'], 'adrian')
self.assertEqual(f.cleaned_data['password1'], 'foo')
self.assertEqual(f.cleaned_data['password2'], 'foo')
f = UserRegistration({'username': 'adrian', 'password1': 'FORBIDDEN_VALUE', 'password2': 'FORBIDDEN_VALUE'}, auto_id=False)
self.assertEqual(f.errors['password1'], ['Forbidden value.'])
self.assertEqual(f.errors['password2'], ['Forbidden value.'])
f = UserRegistration({'username': 'adrian', 'password1': 'FORBIDDEN_VALUE2', 'password2': 'FORBIDDEN_VALUE2'}, auto_id=False)
self.assertEqual(f.errors['__all__'], ['Non-field error 1.', 'Non-field error 2.'])
self.assertEqual(f.errors['password1'], ['Forbidden value 2.'])
self.assertEqual(f.errors['password2'], ['Forbidden value 2.'])
with six.assertRaisesRegex(self, ValueError, "has no field named"):
f.add_error('missing_field', 'Some error.')
def test_update_error_dict(self):
class CodeForm(Form):
code = CharField(max_length=10)
def clean(self):
try:
raise ValidationError({'code': [ValidationError('Code error 1.')]})
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
try:
raise ValidationError({'code': [ValidationError('Code error 2.')]})
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
try:
raise ValidationError({'code': forms.ErrorList(['Code error 3.'])})
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
try:
raise ValidationError('Non-field error 1.')
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
try:
raise ValidationError([ValidationError('Non-field error 2.')])
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
# Ensure that the newly added list of errors is an instance of ErrorList.
for field, error_list in self._errors.items():
if not isinstance(error_list, self.error_class):
self._errors[field] = self.error_class(error_list)
form = CodeForm({'code': 'hello'})
# Trigger validation.
self.assertFalse(form.is_valid())
# Check that update_error_dict didn't lose track of the ErrorDict type.
self.assertIsInstance(form._errors, forms.ErrorDict)
self.assertEqual(dict(form.errors), {
'code': ['Code error 1.', 'Code error 2.', 'Code error 3.'],
NON_FIELD_ERRORS: ['Non-field error 1.', 'Non-field error 2.'],
})
def test_has_error(self):
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput, min_length=5)
password2 = CharField(widget=PasswordInput)
def clean(self):
if (self.cleaned_data.get('password1') and self.cleaned_data.get('password2')
and self.cleaned_data['password1'] != self.cleaned_data['password2']):
raise ValidationError(
'Please make sure your passwords match.',
code='password_mismatch',
)
f = UserRegistration(data={})
self.assertTrue(f.has_error('password1'))
self.assertTrue(f.has_error('password1', 'required'))
self.assertFalse(f.has_error('password1', 'anything'))
f = UserRegistration(data={'password1': 'Hi', 'password2': 'Hi'})
self.assertTrue(f.has_error('password1'))
self.assertTrue(f.has_error('password1', 'min_length'))
self.assertFalse(f.has_error('password1', 'anything'))
self.assertFalse(f.has_error('password2'))
self.assertFalse(f.has_error('password2', 'anything'))
f = UserRegistration(data={'password1': 'Bonjour', 'password2': 'Hello'})
self.assertFalse(f.has_error('password1'))
self.assertFalse(f.has_error('password1', 'required'))
self.assertTrue(f.has_error(NON_FIELD_ERRORS))
self.assertTrue(f.has_error(NON_FIELD_ERRORS, 'password_mismatch'))
self.assertFalse(f.has_error(NON_FIELD_ERRORS, 'anything'))
def test_dynamic_construction(self):
# It's possible to construct a Form dynamically by adding to the self.fields
# dictionary in __init__(). Don't forget to call Form.__init__() within the
# subclass' __init__().
class Person(Form):
first_name = CharField()
last_name = CharField()
def __init__(self, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
self.fields['birthday'] = DateField()
p = Person(auto_id=False)
self.assertHTMLEqual(p.as_table(), """<tr><th>First name:</th><td><input type="text" name="first_name" /></td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" /></td></tr>
<tr><th>Birthday:</th><td><input type="text" name="birthday" /></td></tr>""")
# Instances of a dynamic Form do not persist fields from one Form instance to
# the next.
class MyForm(Form):
def __init__(self, data=None, auto_id=False, field_list=[]):
Form.__init__(self, data, auto_id=auto_id)
for field in field_list:
self.fields[field[0]] = field[1]
field_list = [('field1', CharField()), ('field2', CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(my_form.as_table(), """<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>""")
field_list = [('field3', CharField()), ('field4', CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(my_form.as_table(), """<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>""")
class MyForm(Form):
default_field_1 = CharField()
default_field_2 = CharField()
def __init__(self, data=None, auto_id=False, field_list=[]):
Form.__init__(self, data, auto_id=auto_id)
for field in field_list:
self.fields[field[0]] = field[1]
field_list = [('field1', CharField()), ('field2', CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(my_form.as_table(), """<tr><th>Default field 1:</th><td><input type="text" name="default_field_1" /></td></tr>
<tr><th>Default field 2:</th><td><input type="text" name="default_field_2" /></td></tr>
<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>""")
field_list = [('field3', CharField()), ('field4', CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(my_form.as_table(), """<tr><th>Default field 1:</th><td><input type="text" name="default_field_1" /></td></tr>
<tr><th>Default field 2:</th><td><input type="text" name="default_field_2" /></td></tr>
<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>""")
# Similarly, changes to field attributes do not persist from one Form instance
# to the next.
class Person(Form):
first_name = CharField(required=False)
last_name = CharField(required=False)
def __init__(self, names_required=False, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
if names_required:
self.fields['first_name'].required = True
self.fields['first_name'].widget.attrs['class'] = 'required'
self.fields['last_name'].required = True
self.fields['last_name'].widget.attrs['class'] = 'required'
f = Person(names_required=False)
self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (False, False))
self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({}, {}))
f = Person(names_required=True)
self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (True, True))
self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({'class': 'required'}, {'class': 'required'}))
f = Person(names_required=False)
self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (False, False))
self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({}, {}))
class Person(Form):
first_name = CharField(max_length=30)
last_name = CharField(max_length=30)
def __init__(self, name_max_length=None, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
if name_max_length:
self.fields['first_name'].max_length = name_max_length
self.fields['last_name'].max_length = name_max_length
f = Person(name_max_length=None)
self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (30, 30))
f = Person(name_max_length=20)
self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (20, 20))
f = Person(name_max_length=None)
self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (30, 30))
# Similarly, choices do not persist from one Form instance to the next.
# Refs #15127.
class Person(Form):
first_name = CharField(required=False)
last_name = CharField(required=False)
gender = ChoiceField(choices=(('f', 'Female'), ('m', 'Male')))
def __init__(self, allow_unspec_gender=False, *args, **kwargs):
super(Person, self).__init__(*args, **kwargs)
if allow_unspec_gender:
self.fields['gender'].choices += (('u', 'Unspecified'),)
f = Person()
self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male')])
f = Person(allow_unspec_gender=True)
self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male'), ('u', 'Unspecified')])
f = Person()
self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male')])
def test_validators_independence(self):
""" Test that we are able to modify a form field validators list without polluting
other forms """
from django.core.validators import MaxValueValidator
class MyForm(Form):
myfield = CharField(max_length=25)
f1 = MyForm()
f2 = MyForm()
f1.fields['myfield'].validators[0] = MaxValueValidator(12)
self.assertNotEqual(f1.fields['myfield'].validators[0], f2.fields['myfield'].validators[0])
def test_hidden_widget(self):
# HiddenInput widgets are displayed differently in the as_table(), as_ul())
# and as_p() output of a Form -- their verbose names are not displayed, and a
# separate row is not displayed. They're displayed in the last row of the
# form, directly after that row's form element.
class Person(Form):
first_name = CharField()
last_name = CharField()
hidden_text = CharField(widget=HiddenInput)
birthday = DateField()
p = Person(auto_id=False)
self.assertHTMLEqual(p.as_table(), """<tr><th>First name:</th><td><input type="text" name="first_name" /></td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" /></td></tr>
<tr><th>Birthday:</th><td><input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></li>""")
self.assertHTMLEqual(p.as_p(), """<p>First name: <input type="text" name="first_name" /></p>
<p>Last name: <input type="text" name="last_name" /></p>
<p>Birthday: <input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></p>""")
# With auto_id set, a HiddenInput still gets an ID, but it doesn't get a label.
p = Person(auto_id='id_%s')
self.assertHTMLEqual(p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" id="id_last_name" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" id="id_birthday" /><input type="hidden" name="hidden_text" id="id_hidden_text" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></li>
<li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></li>
<li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /><input type="hidden" name="hidden_text" id="id_hidden_text" /></li>""")
self.assertHTMLEqual(p.as_p(), """<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></p>
<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></p>
<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /><input type="hidden" name="hidden_text" id="id_hidden_text" /></p>""")
# If a field with a HiddenInput has errors, the as_table() and as_ul() output
# will include the error message(s) with the text "(Hidden field [fieldname]) "
# prepended. This message is displayed at the top of the output, regardless of
# its field's order in the form.
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'}, auto_id=False)
self.assertHTMLEqual(p.as_table(), """<tr><td colspan="2"><ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field is required.</li></ul></td></tr>
<tr><th>First name:</th><td><input type="text" name="first_name" value="John" /></td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" value="Lennon" /></td></tr>
<tr><th>Birthday:</th><td><input type="text" name="birthday" value="1940-10-9" /><input type="hidden" name="hidden_text" /></td></tr>""")
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field is required.</li></ul></li>
<li>First name: <input type="text" name="first_name" value="John" /></li>
<li>Last name: <input type="text" name="last_name" value="Lennon" /></li>
<li>Birthday: <input type="text" name="birthday" value="1940-10-9" /><input type="hidden" name="hidden_text" /></li>""")
self.assertHTMLEqual(p.as_p(), """<ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field is required.</li></ul>
<p>First name: <input type="text" name="first_name" value="John" /></p>
<p>Last name: <input type="text" name="last_name" value="Lennon" /></p>
<p>Birthday: <input type="text" name="birthday" value="1940-10-9" /><input type="hidden" name="hidden_text" /></p>""")
# A corner case: It's possible for a form to have only HiddenInputs.
class TestForm(Form):
foo = CharField(widget=HiddenInput)
bar = CharField(widget=HiddenInput)
p = TestForm(auto_id=False)
self.assertHTMLEqual(p.as_table(), '<input type="hidden" name="foo" /><input type="hidden" name="bar" />')
self.assertHTMLEqual(p.as_ul(), '<input type="hidden" name="foo" /><input type="hidden" name="bar" />')
self.assertHTMLEqual(p.as_p(), '<input type="hidden" name="foo" /><input type="hidden" name="bar" />')
def test_field_order(self):
# A Form's fields are displayed in the same order in which they were defined.
class TestForm(Form):
field1 = CharField()
field2 = CharField()
field3 = CharField()
field4 = CharField()
field5 = CharField()
field6 = CharField()
field7 = CharField()
field8 = CharField()
field9 = CharField()
field10 = CharField()
field11 = CharField()
field12 = CharField()
field13 = CharField()
field14 = CharField()
p = TestForm(auto_id=False)
self.assertHTMLEqual(p.as_table(), """<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>
<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>
<tr><th>Field5:</th><td><input type="text" name="field5" /></td></tr>
<tr><th>Field6:</th><td><input type="text" name="field6" /></td></tr>
<tr><th>Field7:</th><td><input type="text" name="field7" /></td></tr>
<tr><th>Field8:</th><td><input type="text" name="field8" /></td></tr>
<tr><th>Field9:</th><td><input type="text" name="field9" /></td></tr>
<tr><th>Field10:</th><td><input type="text" name="field10" /></td></tr>
<tr><th>Field11:</th><td><input type="text" name="field11" /></td></tr>
<tr><th>Field12:</th><td><input type="text" name="field12" /></td></tr>
<tr><th>Field13:</th><td><input type="text" name="field13" /></td></tr>
<tr><th>Field14:</th><td><input type="text" name="field14" /></td></tr>""")
def test_explicit_field_order(self):
class TestFormParent(Form):
field1 = CharField()
field2 = CharField()
field4 = CharField()
field5 = CharField()
field6 = CharField()
field_order = ['field6', 'field5', 'field4', 'field2', 'field1']
class TestForm(TestFormParent):
field3 = CharField()
field_order = ['field2', 'field4', 'field3', 'field5', 'field6']
class TestFormRemove(TestForm):
field1 = None
class TestFormMissing(TestForm):
field_order = ['field2', 'field4', 'field3', 'field5', 'field6', 'field1']
field1 = None
class TestFormInit(TestFormParent):
field3 = CharField()
field_order = None
def __init__(self, **kwargs):
super(TestFormInit, self).__init__(**kwargs)
self.order_fields(field_order=TestForm.field_order)
p = TestFormParent()
self.assertEqual(list(p.fields.keys()), TestFormParent.field_order)
p = TestFormRemove()
self.assertEqual(list(p.fields.keys()), TestForm.field_order)
p = TestFormMissing()
self.assertEqual(list(p.fields.keys()), TestForm.field_order)
p = TestForm()
self.assertEqual(list(p.fields.keys()), TestFormMissing.field_order)
p = TestFormInit()
order = list(TestForm.field_order) + ['field1']
self.assertEqual(list(p.fields.keys()), order)
TestForm.field_order = ['unknown']
p = TestForm()
self.assertEqual(list(p.fields.keys()), ['field1', 'field2', 'field4', 'field5', 'field6', 'field3'])
def test_form_html_attributes(self):
# Some Field classes have an effect on the HTML attributes of their associated
# Widget. If you set max_length in a CharField and its associated widget is
# either a TextInput or PasswordInput, then the widget's rendered HTML will
# include the "maxlength" attribute.
class UserRegistration(Form):
username = CharField(max_length=10) # uses TextInput by default
password = CharField(max_length=10, widget=PasswordInput)
realname = CharField(max_length=10, widget=TextInput) # redundantly define widget, just to test
address = CharField() # no max_length defined here
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" maxlength="10" /></li>
<li>Realname: <input type="text" name="realname" maxlength="10" /></li>
<li>Address: <input type="text" name="address" /></li>""")
# If you specify a custom "attrs" that includes the "maxlength" attribute,
# the Field's max_length attribute will override whatever "maxlength" you specify
# in "attrs".
class UserRegistration(Form):
username = CharField(max_length=10, widget=TextInput(attrs={'maxlength': 20}))
password = CharField(max_length=10, widget=PasswordInput)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" maxlength="10" /></li>""")
def test_specifying_labels(self):
# You can specify the label for a field by using the 'label' argument to a Field
# class. If you don't specify 'label', Django will use the field name with
# underscores converted to spaces, and the initial letter capitalized.
class UserRegistration(Form):
username = CharField(max_length=10, label='Your username')
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput, label='Contraseña (de nuevo)')
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Your username: <input type="text" name="username" maxlength="10" /></li>
<li>Password1: <input type="password" name="password1" /></li>
<li>Contraseña (de nuevo): <input type="password" name="password2" /></li>""")
# Labels for as_* methods will only end in a colon if they don't end in other
# punctuation already.
class Questions(Form):
q1 = CharField(label='The first question')
q2 = CharField(label='What is your name?')
q3 = CharField(label='The answer to life is:')
q4 = CharField(label='Answer this question!')
q5 = CharField(label='The last question. Period.')
self.assertHTMLEqual(Questions(auto_id=False).as_p(), """<p>The first question: <input type="text" name="q1" /></p>
<p>What is your name? <input type="text" name="q2" /></p>
<p>The answer to life is: <input type="text" name="q3" /></p>
<p>Answer this question! <input type="text" name="q4" /></p>
<p>The last question. Period. <input type="text" name="q5" /></p>""")
self.assertHTMLEqual(Questions().as_p(), """<p><label for="id_q1">The first question:</label> <input type="text" name="q1" id="id_q1" /></p>
<p><label for="id_q2">What is your name?</label> <input type="text" name="q2" id="id_q2" /></p>
<p><label for="id_q3">The answer to life is:</label> <input type="text" name="q3" id="id_q3" /></p>
<p><label for="id_q4">Answer this question!</label> <input type="text" name="q4" id="id_q4" /></p>
<p><label for="id_q5">The last question. Period.</label> <input type="text" name="q5" id="id_q5" /></p>""")
# If a label is set to the empty string for a field, that field won't get a label.
class UserRegistration(Form):
username = CharField(max_length=10, label='')
password = CharField(widget=PasswordInput)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li> <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
p = UserRegistration(auto_id='id_%s')
self.assertHTMLEqual(p.as_ul(), """<li> <input id="id_username" type="text" name="username" maxlength="10" /></li>
<li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" /></li>""")
# If label is None, Django will auto-create the label from the field name. This
# is default behavior.
class UserRegistration(Form):
username = CharField(max_length=10, label=None)
password = CharField(widget=PasswordInput)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
p = UserRegistration(auto_id='id_%s')
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_username">Username:</label> <input id="id_username" type="text" name="username" maxlength="10" /></li>
<li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" /></li>""")
def test_label_suffix(self):
# You can specify the 'label_suffix' argument to a Form class to modify the
# punctuation symbol used at the end of a label. By default, the colon (:) is
# used, and is only appended to the label if the label doesn't already end with a
# punctuation symbol: ., !, ? or :. If you specify a different suffix, it will
# be appended regardless of the last character of the label.
class FavoriteForm(Form):
color = CharField(label='Favorite color?')
animal = CharField(label='Favorite animal')
answer = CharField(label='Secret answer', label_suffix=' =')
f = FavoriteForm(auto_id=False)
self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" /></li>
<li>Favorite animal: <input type="text" name="animal" /></li>
<li>Secret answer = <input type="text" name="answer" /></li>""")
f = FavoriteForm(auto_id=False, label_suffix='?')
self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" /></li>
<li>Favorite animal? <input type="text" name="animal" /></li>
<li>Secret answer = <input type="text" name="answer" /></li>""")
f = FavoriteForm(auto_id=False, label_suffix='')
self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" /></li>
<li>Favorite animal <input type="text" name="animal" /></li>
<li>Secret answer = <input type="text" name="answer" /></li>""")
f = FavoriteForm(auto_id=False, label_suffix='\u2192')
self.assertHTMLEqual(f.as_ul(), '<li>Favorite color? <input type="text" name="color" /></li>\n<li>Favorite animal\u2192 <input type="text" name="animal" /></li>\n<li>Secret answer = <input type="text" name="answer" /></li>')
def test_initial_data(self):
# You can specify initial data for a field by using the 'initial' argument to a
# Field class. This initial data is displayed when a Form is rendered with *no*
# data. It is not displayed when a Form is rendered with any data (including an
# empty dictionary). Also, the initial value is *not* used if data for a
# particular required field isn't provided.
class UserRegistration(Form):
username = CharField(max_length=10, initial='django')
password = CharField(widget=PasswordInput)
# Here, we're not submitting any data, so the initial value will be displayed.)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
# Here, we're submitting data, so the initial value will *not* be displayed.
p = UserRegistration({}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
p = UserRegistration({'username': ''}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
p = UserRegistration({'username': 'foo'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
# An 'initial' value is *not* used as a fallback if data is not provided. In this
# example, we don't provide a value for 'username', and the form raises a
# validation error rather than using the initial value for 'username'.
p = UserRegistration({'password': 'secret'})
self.assertEqual(p.errors['username'], ['This field is required.'])
self.assertFalse(p.is_valid())
def test_dynamic_initial_data(self):
# The previous technique dealt with "hard-coded" initial data, but it's also
# possible to specify initial data after you've already created the Form class
# (i.e., at runtime). Use the 'initial' parameter to the Form constructor. This
# should be a dictionary containing initial values for one or more fields in the
# form, keyed by field name.
class UserRegistration(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
# Here, we're not submitting any data, so the initial value will be displayed.)
p = UserRegistration(initial={'username': 'django'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
p = UserRegistration(initial={'username': 'stephane'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="stephane" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
# The 'initial' parameter is meaningless if you pass data.
p = UserRegistration({}, initial={'username': 'django'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
p = UserRegistration({'username': ''}, initial={'username': 'django'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
p = UserRegistration({'username': 'foo'}, initial={'username': 'django'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""")
# A dynamic 'initial' value is *not* used as a fallback if data is not provided.
# In this example, we don't provide a value for 'username', and the form raises a
# validation error rather than using the initial value for 'username'.
p = UserRegistration({'password': 'secret'}, initial={'username': 'django'})
self.assertEqual(p.errors['username'], ['This field is required.'])
self.assertFalse(p.is_valid())
# If a Form defines 'initial' *and* 'initial' is passed as a parameter to Form(),
# then the latter will get precedence.
class UserRegistration(Form):
username = CharField(max_length=10, initial='django')
password = CharField(widget=PasswordInput)
p = UserRegistration(initial={'username': 'babik'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="babik" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>""")
def test_callable_initial_data(self):
# The previous technique dealt with raw values as initial data, but it's also
# possible to specify callable data.
class UserRegistration(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
options = MultipleChoiceField(choices=[('f', 'foo'), ('b', 'bar'), ('w', 'whiz')])
# We need to define functions that get called later.)
def initial_django():
return 'django'
def initial_stephane():
return 'stephane'
def initial_options():
return ['f', 'b']
def initial_other_options():
return ['b', 'w']
# Here, we're not submitting any data, so the initial value will be displayed.)
p = UserRegistration(initial={'username': initial_django, 'options': initial_options}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f" selected="selected">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w">whiz</option>
</select></li>""")
# The 'initial' parameter is meaningless if you pass data.
p = UserRegistration({}, initial={'username': initial_django, 'options': initial_options}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Options: <select multiple="multiple" name="options">
<option value="f">foo</option>
<option value="b">bar</option>
<option value="w">whiz</option>
</select></li>""")
p = UserRegistration({'username': ''}, initial={'username': initial_django}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Options: <select multiple="multiple" name="options">
<option value="f">foo</option>
<option value="b">bar</option>
<option value="w">whiz</option>
</select></li>""")
p = UserRegistration({'username': 'foo', 'options': ['f', 'b']}, initial={'username': initial_django}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f" selected="selected">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w">whiz</option>
</select></li>""")
# A callable 'initial' value is *not* used as a fallback if data is not provided.
# In this example, we don't provide a value for 'username', and the form raises a
# validation error rather than using the initial value for 'username'.
p = UserRegistration({'password': 'secret'}, initial={'username': initial_django, 'options': initial_options})
self.assertEqual(p.errors['username'], ['This field is required.'])
self.assertFalse(p.is_valid())
# If a Form defines 'initial' *and* 'initial' is passed as a parameter to Form(),
# then the latter will get precedence.
class UserRegistration(Form):
username = CharField(max_length=10, initial=initial_django)
password = CharField(widget=PasswordInput)
options = MultipleChoiceField(choices=[('f', 'foo'), ('b', 'bar'), ('w', 'whiz')], initial=initial_other_options)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w" selected="selected">whiz</option>
</select></li>""")
p = UserRegistration(initial={'username': initial_stephane, 'options': initial_options}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="stephane" maxlength="10" /></li>
<li>Password: <input type="password" name="password" /></li>
<li>Options: <select multiple="multiple" name="options">
<option value="f" selected="selected">foo</option>
<option value="b" selected="selected">bar</option>
<option value="w">whiz</option>
</select></li>""")
def test_changed_data(self):
class Person(Form):
first_name = CharField(initial='Hans')
last_name = CharField(initial='Greatel')
birthday = DateField(initial=datetime.date(1974, 8, 16))
p = Person(data={'first_name': 'Hans', 'last_name': 'Scrmbl',
'birthday': '1974-08-16'})
self.assertTrue(p.is_valid())
self.assertNotIn('first_name', p.changed_data)
self.assertIn('last_name', p.changed_data)
self.assertNotIn('birthday', p.changed_data)
# Test that field raising ValidationError is always in changed_data
class PedanticField(forms.Field):
def to_python(self, value):
raise ValidationError('Whatever')
class Person2(Person):
pedantic = PedanticField(initial='whatever', show_hidden_initial=True)
p = Person2(data={'first_name': 'Hans', 'last_name': 'Scrmbl',
'birthday': '1974-08-16', 'initial-pedantic': 'whatever'})
self.assertFalse(p.is_valid())
self.assertIn('pedantic', p.changed_data)
def test_boundfield_values(self):
# It's possible to get to the value which would be used for rendering
# the widget for a field by using the BoundField's value method.
class UserRegistration(Form):
username = CharField(max_length=10, initial='djangonaut')
password = CharField(widget=PasswordInput)
unbound = UserRegistration()
bound = UserRegistration({'password': 'foo'})
self.assertEqual(bound['username'].value(), None)
self.assertEqual(unbound['username'].value(), 'djangonaut')
self.assertEqual(bound['password'].value(), 'foo')
self.assertEqual(unbound['password'].value(), None)
def test_boundfield_initial_called_once(self):
"""
Multiple calls to BoundField().value() in an unbound form should return
the same result each time (#24391).
"""
class MyForm(Form):
name = CharField(max_length=10, initial=uuid.uuid4)
form = MyForm()
name = form['name']
self.assertEqual(name.value(), name.value())
# BoundField is also cached
self.assertIs(form['name'], name)
def test_boundfield_rendering(self):
"""
Python 2 issue: Test that rendering a BoundField with bytestring content
doesn't lose it's safe string status (#22950).
"""
class CustomWidget(TextInput):
def render(self, name, value, attrs=None):
return format_html(str('<input{} />'), ' id=custom')
class SampleForm(Form):
name = CharField(widget=CustomWidget)
f = SampleForm(data={'name': 'bar'})
self.assertIsInstance(force_text(f['name']), SafeData)
def test_initial_datetime_values(self):
now = datetime.datetime.now()
# Nix microseconds (since they should be ignored). #22502
now_no_ms = now.replace(microsecond=0)
if now == now_no_ms:
now = now.replace(microsecond=1)
def delayed_now():
return now
def delayed_now_time():
return now.time()
class HiddenInputWithoutMicrosec(HiddenInput):
supports_microseconds = False
class TextInputWithoutMicrosec(TextInput):
supports_microseconds = False
class DateTimeForm(Form):
auto_timestamp = DateTimeField(initial=delayed_now)
auto_time_only = TimeField(initial=delayed_now_time)
supports_microseconds = DateTimeField(initial=delayed_now, widget=TextInput)
hi_default_microsec = DateTimeField(initial=delayed_now, widget=HiddenInput)
hi_without_microsec = DateTimeField(initial=delayed_now, widget=HiddenInputWithoutMicrosec)
ti_without_microsec = DateTimeField(initial=delayed_now, widget=TextInputWithoutMicrosec)
unbound = DateTimeForm()
self.assertEqual(unbound['auto_timestamp'].value(), now_no_ms)
self.assertEqual(unbound['auto_time_only'].value(), now_no_ms.time())
self.assertEqual(unbound['supports_microseconds'].value(), now)
self.assertEqual(unbound['hi_default_microsec'].value(), now)
self.assertEqual(unbound['hi_without_microsec'].value(), now_no_ms)
self.assertEqual(unbound['ti_without_microsec'].value(), now_no_ms)
def test_help_text(self):
# You can specify descriptive text for a field by using the 'help_text' argument)
class UserRegistration(Form):
username = CharField(max_length=10, help_text='e.g., user@example.com')
password = CharField(widget=PasswordInput, help_text='Wählen Sie mit Bedacht.')
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /> <span class="helptext">e.g., user@example.com</span></li>
<li>Password: <input type="password" name="password" /> <span class="helptext">Wählen Sie mit Bedacht.</span></li>""")
self.assertHTMLEqual(p.as_p(), """<p>Username: <input type="text" name="username" maxlength="10" /> <span class="helptext">e.g., user@example.com</span></p>
<p>Password: <input type="password" name="password" /> <span class="helptext">Wählen Sie mit Bedacht.</span></p>""")
self.assertHTMLEqual(p.as_table(), """<tr><th>Username:</th><td><input type="text" name="username" maxlength="10" /><br /><span class="helptext">e.g., user@example.com</span></td></tr>
<tr><th>Password:</th><td><input type="password" name="password" /><br /><span class="helptext">Wählen Sie mit Bedacht.</span></td></tr>""")
# The help text is displayed whether or not data is provided for the form.
p = UserRegistration({'username': 'foo'}, auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /> <span class="helptext">e.g., user@example.com</span></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /> <span class="helptext">Wählen Sie mit Bedacht.</span></li>""")
# help_text is not displayed for hidden fields. It can be used for documentation
# purposes, though.
class UserRegistration(Form):
username = CharField(max_length=10, help_text='e.g., user@example.com')
password = CharField(widget=PasswordInput)
next = CharField(widget=HiddenInput, initial='/', help_text='Redirect destination')
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /> <span class="helptext">e.g., user@example.com</span></li>
<li>Password: <input type="password" name="password" /><input type="hidden" name="next" value="/" /></li>""")
def test_subclassing_forms(self):
# You can subclass a Form to add fields. The resulting form subclass will have
# all of the fields of the parent Form, plus whichever fields you define in the
# subclass.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
class Musician(Person):
instrument = CharField()
p = Person(auto_id=False)
self.assertHTMLEqual(p.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>""")
m = Musician(auto_id=False)
self.assertHTMLEqual(m.as_ul(), """<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>
<li>Instrument: <input type="text" name="instrument" /></li>""")
# Yes, you can subclass multiple forms. The fields are added in the order in
# which the parent classes are listed.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
class Instrument(Form):
instrument = CharField()
class Beatle(Person, Instrument):
haircut_type = CharField()
b = Beatle(auto_id=False)
self.assertHTMLEqual(b.as_ul(), """<li>Instrument: <input type="text" name="instrument" /></li>
<li>First name: <input type="text" name="first_name" /></li>
<li>Last name: <input type="text" name="last_name" /></li>
<li>Birthday: <input type="text" name="birthday" /></li>
<li>Haircut type: <input type="text" name="haircut_type" /></li>""")
def test_forms_with_prefixes(self):
# Sometimes it's necessary to have multiple forms display on the same HTML page,
# or multiple copies of the same form. We can accomplish this with form prefixes.
# Pass the keyword argument 'prefix' to the Form constructor to use this feature.
# This value will be prepended to each HTML form field name. One way to think
# about this is "namespaces for HTML forms". Notice that in the data argument,
# each field's key has the prefix, in this case 'person1', prepended to the
# actual field name.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
data = {
'person1-first_name': 'John',
'person1-last_name': 'Lennon',
'person1-birthday': '1940-10-9'
}
p = Person(data, prefix='person1')
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_person1-first_name">First name:</label> <input type="text" name="person1-first_name" value="John" id="id_person1-first_name" /></li>
<li><label for="id_person1-last_name">Last name:</label> <input type="text" name="person1-last_name" value="Lennon" id="id_person1-last_name" /></li>
<li><label for="id_person1-birthday">Birthday:</label> <input type="text" name="person1-birthday" value="1940-10-9" id="id_person1-birthday" /></li>""")
self.assertHTMLEqual(str(p['first_name']), '<input type="text" name="person1-first_name" value="John" id="id_person1-first_name" />')
self.assertHTMLEqual(str(p['last_name']), '<input type="text" name="person1-last_name" value="Lennon" id="id_person1-last_name" />')
self.assertHTMLEqual(str(p['birthday']), '<input type="text" name="person1-birthday" value="1940-10-9" id="id_person1-birthday" />')
self.assertEqual(p.errors, {})
self.assertTrue(p.is_valid())
self.assertEqual(p.cleaned_data['first_name'], 'John')
self.assertEqual(p.cleaned_data['last_name'], 'Lennon')
self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9))
# Let's try submitting some bad data to make sure form.errors and field.errors
# work as expected.
data = {
'person1-first_name': '',
'person1-last_name': '',
'person1-birthday': ''
}
p = Person(data, prefix='person1')
self.assertEqual(p.errors['first_name'], ['This field is required.'])
self.assertEqual(p.errors['last_name'], ['This field is required.'])
self.assertEqual(p.errors['birthday'], ['This field is required.'])
self.assertEqual(p['first_name'].errors, ['This field is required.'])
try:
p['person1-first_name'].errors
self.fail('Attempts to access non-existent fields should fail.')
except KeyError:
pass
# In this example, the data doesn't have a prefix, but the form requires it, so
# the form doesn't "see" the fields.
data = {
'first_name': 'John',
'last_name': 'Lennon',
'birthday': '1940-10-9'
}
p = Person(data, prefix='person1')
self.assertEqual(p.errors['first_name'], ['This field is required.'])
self.assertEqual(p.errors['last_name'], ['This field is required.'])
self.assertEqual(p.errors['birthday'], ['This field is required.'])
# With prefixes, a single data dictionary can hold data for multiple instances
# of the same form.
data = {
'person1-first_name': 'John',
'person1-last_name': 'Lennon',
'person1-birthday': '1940-10-9',
'person2-first_name': 'Jim',
'person2-last_name': 'Morrison',
'person2-birthday': '1943-12-8'
}
p1 = Person(data, prefix='person1')
self.assertTrue(p1.is_valid())
self.assertEqual(p1.cleaned_data['first_name'], 'John')
self.assertEqual(p1.cleaned_data['last_name'], 'Lennon')
self.assertEqual(p1.cleaned_data['birthday'], datetime.date(1940, 10, 9))
p2 = Person(data, prefix='person2')
self.assertTrue(p2.is_valid())
self.assertEqual(p2.cleaned_data['first_name'], 'Jim')
self.assertEqual(p2.cleaned_data['last_name'], 'Morrison')
self.assertEqual(p2.cleaned_data['birthday'], datetime.date(1943, 12, 8))
# By default, forms append a hyphen between the prefix and the field name, but a
# form can alter that behavior by implementing the add_prefix() method. This
# method takes a field name and returns the prefixed field, according to
# self.prefix.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
def add_prefix(self, field_name):
return '%s-prefix-%s' % (self.prefix, field_name) if self.prefix else field_name
p = Person(prefix='foo')
self.assertHTMLEqual(p.as_ul(), """<li><label for="id_foo-prefix-first_name">First name:</label> <input type="text" name="foo-prefix-first_name" id="id_foo-prefix-first_name" /></li>
<li><label for="id_foo-prefix-last_name">Last name:</label> <input type="text" name="foo-prefix-last_name" id="id_foo-prefix-last_name" /></li>
<li><label for="id_foo-prefix-birthday">Birthday:</label> <input type="text" name="foo-prefix-birthday" id="id_foo-prefix-birthday" /></li>""")
data = {
'foo-prefix-first_name': 'John',
'foo-prefix-last_name': 'Lennon',
'foo-prefix-birthday': '1940-10-9'
}
p = Person(data, prefix='foo')
self.assertTrue(p.is_valid())
self.assertEqual(p.cleaned_data['first_name'], 'John')
self.assertEqual(p.cleaned_data['last_name'], 'Lennon')
self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9))
def test_class_prefix(self):
# Prefix can be also specified at the class level.
class Person(Form):
first_name = CharField()
prefix = 'foo'
p = Person()
self.assertEqual(p.prefix, 'foo')
p = Person(prefix='bar')
self.assertEqual(p.prefix, 'bar')
def test_forms_with_null_boolean(self):
# NullBooleanField is a bit of a special case because its presentation (widget)
# is different than its data. This is handled transparently, though.
class Person(Form):
name = CharField()
is_cool = NullBooleanField()
p = Person({'name': 'Joe'}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': '1'}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': '2'}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': '3'}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': True}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>""")
p = Person({'name': 'Joe', 'is_cool': False}, auto_id=False)
self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>""")
def test_forms_with_file_fields(self):
# FileFields are a special case because they take their data from the request.FILES,
# not request.POST.
class FileForm(Form):
file1 = FileField()
f = FileForm(auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={'file1': SimpleUploadedFile('name', b'')}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><ul class="errorlist"><li>The submitted file is empty.</li></ul><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={'file1': 'something that is not a file'}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><ul class="errorlist"><li>No file was submitted. Check the encoding type on the form.</li></ul><input type="file" name="file1" /></td></tr>')
f = FileForm(data={}, files={'file1': SimpleUploadedFile('name', b'some content')}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>')
self.assertTrue(f.is_valid())
f = FileForm(data={}, files={'file1': SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह'.encode('utf-8'))}, auto_id=False)
self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>')
def test_basic_processing_in_view(self):
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean(self):
if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise ValidationError('Please make sure your passwords match.')
return self.cleaned_data
def my_function(method, post_data):
if method == 'POST':
form = UserRegistration(post_data, auto_id=False)
else:
form = UserRegistration(auto_id=False)
if form.is_valid():
return 'VALID: %r' % sorted(six.iteritems(form.cleaned_data))
t = Template('<form action="" method="post">\n<table>\n{{ form }}\n</table>\n<input type="submit" />\n</form>')
return t.render(Context({'form': form}))
# Case 1: GET (an empty form, with no errors).)
self.assertHTMLEqual(my_function('GET', {}), """<form action="" method="post">
<table>
<tr><th>Username:</th><td><input type="text" name="username" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr>
</table>
<input type="submit" />
</form>""")
# Case 2: POST with erroneous data (a redisplayed form, with errors).)
self.assertHTMLEqual(my_function('POST', {'username': 'this-is-a-long-username', 'password1': 'foo', 'password2': 'bar'}), """<form action="" method="post">
<table>
<tr><td colspan="2"><ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul></td></tr>
<tr><th>Username:</th><td><ul class="errorlist"><li>Ensure this value has at most 10 characters (it has 23).</li></ul><input type="text" name="username" value="this-is-a-long-username" maxlength="10" /></td></tr>
<tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr>
<tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr>
</table>
<input type="submit" />
</form>""")
# Case 3: POST with valid data (the success message).)
self.assertEqual(my_function('POST', {'username': 'adrian', 'password1': 'secret', 'password2': 'secret'}),
str_prefix("VALID: [('password1', %(_)s'secret'), ('password2', %(_)s'secret'), ('username', %(_)s'adrian')]"))
def test_templates_with_forms(self):
class UserRegistration(Form):
username = CharField(max_length=10, help_text="Good luck picking a username that doesn't already exist.")
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean(self):
if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']:
raise ValidationError('Please make sure your passwords match.')
return self.cleaned_data
# You have full flexibility in displaying form fields in a template. Just pass a
# Form instance to the template, and use "dot" access to refer to individual
# fields. Note, however, that this flexibility comes with the responsibility of
# displaying all the errors, including any that might not be associated with a
# particular field.
t = Template('''<form action="">
{{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p>
{{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p>
{{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p><label>Your username: <input type="text" name="username" maxlength="10" /></label></p>
<p><label>Password: <input type="password" name="password1" /></label></p>
<p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
self.assertHTMLEqual(t.render(Context({'form': UserRegistration({'username': 'django'}, auto_id=False)})), """<form action="">
<p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p>
<ul class="errorlist"><li>This field is required.</li></ul><p><label>Password: <input type="password" name="password1" /></label></p>
<ul class="errorlist"><li>This field is required.</li></ul><p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
# Use form.[field].label to output a field's label. You can specify the label for
# a field by using the 'label' argument to a Field class. If you don't specify
# 'label', Django will use the field name with underscores converted to spaces,
# and the initial letter capitalized.
t = Template('''<form action="">
<p><label>{{ form.username.label }}: {{ form.username }}</label></p>
<p><label>{{ form.password1.label }}: {{ form.password1 }}</label></p>
<p><label>{{ form.password2.label }}: {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p><label>Username: <input type="text" name="username" maxlength="10" /></label></p>
<p><label>Password1: <input type="password" name="password1" /></label></p>
<p><label>Password2: <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
# User form.[field].label_tag to output a field's label with a <label> tag
# wrapped around it, but *only* if the given field has an "id" attribute.
# Recall from above that passing the "auto_id" argument to a Form gives each
# field an "id" attribute.
t = Template('''<form action="">
<p>{{ form.username.label_tag }} {{ form.username }}</p>
<p>{{ form.password1.label_tag }} {{ form.password1 }}</p>
<p>{{ form.password2.label_tag }} {{ form.password2 }}</p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p>Username: <input type="text" name="username" maxlength="10" /></p>
<p>Password1: <input type="password" name="password1" /></p>
<p>Password2: <input type="password" name="password2" /></p>
<input type="submit" />
</form>""")
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id='id_%s')})), """<form action="">
<p><label for="id_username">Username:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>
<p><label for="id_password1">Password1:</label> <input type="password" name="password1" id="id_password1" /></p>
<p><label for="id_password2">Password2:</label> <input type="password" name="password2" id="id_password2" /></p>
<input type="submit" />
</form>""")
# User form.[field].help_text to output a field's help text. If the given field
# does not have help text, nothing will be output.
t = Template('''<form action="">
<p>{{ form.username.label_tag }} {{ form.username }}<br />{{ form.username.help_text }}</p>
<p>{{ form.password1.label_tag }} {{ form.password1 }}</p>
<p>{{ form.password2.label_tag }} {{ form.password2 }}</p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action="">
<p>Username: <input type="text" name="username" maxlength="10" /><br />Good luck picking a username that doesn't already exist.</p>
<p>Password1: <input type="password" name="password1" /></p>
<p>Password2: <input type="password" name="password2" /></p>
<input type="submit" />
</form>""")
self.assertEqual(Template('{{ form.password1.help_text }}').render(Context({'form': UserRegistration(auto_id=False)})), '')
# To display the errors that aren't associated with a particular field -- e.g.,
# the errors caused by Form.clean() -- use {{ form.non_field_errors }} in the
# template. If used on its own, it is displayed as a <ul> (or an empty string, if
# the list of errors is empty). You can also use it in {% if %} statements.
t = Template('''<form action="">
{{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p>
{{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p>
{{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration({'username': 'django', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)})), """<form action="">
<p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p>
<p><label>Password: <input type="password" name="password1" /></label></p>
<p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
t = Template('''<form action="">
{{ form.non_field_errors }}
{{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p>
{{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p>
{{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p>
<input type="submit" />
</form>''')
self.assertHTMLEqual(t.render(Context({'form': UserRegistration({'username': 'django', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)})), """<form action="">
<ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul>
<p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p>
<p><label>Password: <input type="password" name="password1" /></label></p>
<p><label>Password (again): <input type="password" name="password2" /></label></p>
<input type="submit" />
</form>""")
def test_empty_permitted(self):
# Sometimes (pretty much in formsets) we want to allow a form to pass validation
# if it is completely empty. We can accomplish this by using the empty_permitted
# argument to a form constructor.
class SongForm(Form):
artist = CharField()
name = CharField()
# First let's show what happens id empty_permitted=False (the default):
data = {'artist': '', 'song': ''}
form = SongForm(data, empty_permitted=False)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['This field is required.'], 'artist': ['This field is required.']})
self.assertEqual(form.cleaned_data, {})
# Now let's show what happens when empty_permitted=True and the form is empty.
form = SongForm(data, empty_permitted=True)
self.assertTrue(form.is_valid())
self.assertEqual(form.errors, {})
self.assertEqual(form.cleaned_data, {})
# But if we fill in data for one of the fields, the form is no longer empty and
# the whole thing must pass validation.
data = {'artist': 'The Doors', 'song': ''}
form = SongForm(data, empty_permitted=False)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['This field is required.']})
self.assertEqual(form.cleaned_data, {'artist': 'The Doors'})
# If a field is not given in the data then None is returned for its data. Lets
# make sure that when checking for empty_permitted that None is treated
# accordingly.
data = {'artist': None, 'song': ''}
form = SongForm(data, empty_permitted=True)
self.assertTrue(form.is_valid())
# However, we *really* need to be sure we are checking for None as any data in
# initial that returns False on a boolean call needs to be treated literally.
class PriceForm(Form):
amount = FloatField()
qty = IntegerField()
data = {'amount': '0.0', 'qty': ''}
form = PriceForm(data, initial={'amount': 0.0}, empty_permitted=True)
self.assertTrue(form.is_valid())
def test_extracting_hidden_and_visible(self):
class SongForm(Form):
token = CharField(widget=HiddenInput)
artist = CharField()
name = CharField()
form = SongForm()
self.assertEqual([f.name for f in form.hidden_fields()], ['token'])
self.assertEqual([f.name for f in form.visible_fields()], ['artist', 'name'])
def test_hidden_initial_gets_id(self):
class MyForm(Form):
field1 = CharField(max_length=50, show_hidden_initial=True)
self.assertHTMLEqual(MyForm().as_table(), '<tr><th><label for="id_field1">Field1:</label></th><td><input id="id_field1" type="text" name="field1" maxlength="50" /><input type="hidden" name="initial-field1" id="initial-id_field1" /></td></tr>')
def test_error_html_required_html_classes(self):
class Person(Form):
name = CharField()
is_cool = NullBooleanField()
email = EmailField(required=False)
age = IntegerField()
p = Person({})
p.error_css_class = 'error'
p.required_css_class = 'required'
self.assertHTMLEqual(p.as_ul(), """<li class="required error"><ul class="errorlist"><li>This field is required.</li></ul><label class="required" for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></li>
<li class="required"><label class="required" for="id_is_cool">Is cool:</label> <select name="is_cool" id="id_is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select></li>
<li><label for="id_email">Email:</label> <input type="email" name="email" id="id_email" /></li>
<li class="required error"><ul class="errorlist"><li>This field is required.</li></ul><label class="required" for="id_age">Age:</label> <input type="number" name="age" id="id_age" /></li>""")
self.assertHTMLEqual(p.as_p(), """<ul class="errorlist"><li>This field is required.</li></ul>
<p class="required error"><label class="required" for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></p>
<p class="required"><label class="required" for="id_is_cool">Is cool:</label> <select name="is_cool" id="id_is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select></p>
<p><label for="id_email">Email:</label> <input type="email" name="email" id="id_email" /></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p class="required error"><label class="required" for="id_age">Age:</label> <input type="number" name="age" id="id_age" /></p>""")
self.assertHTMLEqual(p.as_table(), """<tr class="required error"><th><label class="required" for="id_name">Name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="name" id="id_name" /></td></tr>
<tr class="required"><th><label class="required" for="id_is_cool">Is cool:</label></th><td><select name="is_cool" id="id_is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select></td></tr>
<tr><th><label for="id_email">Email:</label></th><td><input type="email" name="email" id="id_email" /></td></tr>
<tr class="required error"><th><label class="required" for="id_age">Age:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="number" name="age" id="id_age" /></td></tr>""")
def test_label_has_required_css_class(self):
"""
#17922 - required_css_class is added to the label_tag() of required fields.
"""
class SomeForm(Form):
required_css_class = 'required'
field = CharField(max_length=10)
field2 = IntegerField(required=False)
f = SomeForm({'field': 'test'})
self.assertHTMLEqual(f['field'].label_tag(), '<label for="id_field" class="required">Field:</label>')
self.assertHTMLEqual(f['field'].label_tag(attrs={'class': 'foo'}),
'<label for="id_field" class="foo required">Field:</label>')
self.assertHTMLEqual(f['field2'].label_tag(), '<label for="id_field2">Field2:</label>')
def test_label_split_datetime_not_displayed(self):
class EventForm(Form):
happened_at = SplitDateTimeField(widget=SplitHiddenDateTimeWidget)
form = EventForm()
self.assertHTMLEqual(form.as_ul(), '<input type="hidden" name="happened_at_0" id="id_happened_at_0" /><input type="hidden" name="happened_at_1" id="id_happened_at_1" />')
def test_multivalue_field_validation(self):
def bad_names(value):
if value == 'bad value':
raise ValidationError('bad value not allowed')
class NameField(MultiValueField):
def __init__(self, fields=(), *args, **kwargs):
fields = (CharField(label='First name', max_length=10),
CharField(label='Last name', max_length=10))
super(NameField, self).__init__(fields=fields, *args, **kwargs)
def compress(self, data_list):
return ' '.join(data_list)
class NameForm(Form):
name = NameField(validators=[bad_names])
form = NameForm(data={'name': ['bad', 'value']})
form.full_clean()
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['bad value not allowed']})
form = NameForm(data={'name': ['should be overly', 'long for the field names']})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {'name': ['Ensure this value has at most 10 characters (it has 16).',
'Ensure this value has at most 10 characters (it has 24).']})
form = NameForm(data={'name': ['fname', 'lname']})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {'name': 'fname lname'})
def test_multivalue_deep_copy(self):
"""
#19298 -- MultiValueField needs to override the default as it needs
to deep-copy subfields:
"""
class ChoicesField(MultiValueField):
def __init__(self, fields=(), *args, **kwargs):
fields = (ChoiceField(label='Rank',
choices=((1, 1), (2, 2))),
CharField(label='Name', max_length=10))
super(ChoicesField, self).__init__(fields=fields, *args, **kwargs)
field = ChoicesField()
field2 = copy.deepcopy(field)
self.assertIsInstance(field2, ChoicesField)
self.assertIsNot(field2.fields, field.fields)
self.assertIsNot(field2.fields[0].choices, field.fields[0].choices)
def test_multivalue_initial_data(self):
"""
#23674 -- invalid initial data should not break form.changed_data()
"""
class DateAgeField(MultiValueField):
def __init__(self, fields=(), *args, **kwargs):
fields = (DateField(label="Date"), IntegerField(label="Age"))
super(DateAgeField, self).__init__(fields=fields, *args, **kwargs)
class DateAgeForm(Form):
date_age = DateAgeField()
data = {"date_age": ["1998-12-06", 16]}
form = DateAgeForm(data, initial={"date_age": ["200-10-10", 14]})
self.assertTrue(form.has_changed())
def test_multivalue_optional_subfields(self):
class PhoneField(MultiValueField):
def __init__(self, *args, **kwargs):
fields = (
CharField(label='Country Code', validators=[
RegexValidator(r'^\+[0-9]{1,2}$', message='Enter a valid country code.')]),
CharField(label='Phone Number'),
CharField(label='Extension', error_messages={'incomplete': 'Enter an extension.'}),
CharField(label='Label', required=False, help_text='E.g. home, work.'),
)
super(PhoneField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
return '%s.%s ext. %s (label: %s)' % tuple(data_list)
return None
# An empty value for any field will raise a `required` error on a
# required `MultiValueField`.
f = PhoneField()
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, [])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, ['+61'])
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, ['+61', '287654321', '123'])
self.assertEqual('+61.287654321 ext. 123 (label: Home)', f.clean(['+61', '287654321', '123', 'Home']))
self.assertRaisesMessage(ValidationError,
"'Enter a valid country code.'", f.clean, ['61', '287654321', '123', 'Home'])
# Empty values for fields will NOT raise a `required` error on an
# optional `MultiValueField`
f = PhoneField(required=False)
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(None))
self.assertIsNone(f.clean([]))
self.assertEqual('+61. ext. (label: )', f.clean(['+61']))
self.assertEqual('+61.287654321 ext. 123 (label: )', f.clean(['+61', '287654321', '123']))
self.assertEqual('+61.287654321 ext. 123 (label: Home)', f.clean(['+61', '287654321', '123', 'Home']))
self.assertRaisesMessage(ValidationError,
"'Enter a valid country code.'", f.clean, ['61', '287654321', '123', 'Home'])
# For a required `MultiValueField` with `require_all_fields=False`, a
# `required` error will only be raised if all fields are empty. Fields
# can individually be required or optional. An empty value for any
# required field will raise an `incomplete` error.
f = PhoneField(require_all_fields=False)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '')
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None)
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, [])
self.assertRaisesMessage(ValidationError, "'Enter a complete value.'", f.clean, ['+61'])
self.assertEqual('+61.287654321 ext. 123 (label: )', f.clean(['+61', '287654321', '123']))
six.assertRaisesRegex(self, ValidationError,
"'Enter a complete value\.', u?'Enter an extension\.'", f.clean, ['', '', '', 'Home'])
self.assertRaisesMessage(ValidationError,
"'Enter a valid country code.'", f.clean, ['61', '287654321', '123', 'Home'])
# For an optional `MultiValueField` with `require_all_fields=False`, we
# don't get any `required` error but we still get `incomplete` errors.
f = PhoneField(required=False, require_all_fields=False)
self.assertIsNone(f.clean(''))
self.assertIsNone(f.clean(None))
self.assertIsNone(f.clean([]))
self.assertRaisesMessage(ValidationError, "'Enter a complete value.'", f.clean, ['+61'])
self.assertEqual('+61.287654321 ext. 123 (label: )', f.clean(['+61', '287654321', '123']))
six.assertRaisesRegex(self, ValidationError,
"'Enter a complete value\.', u?'Enter an extension\.'", f.clean, ['', '', '', 'Home'])
self.assertRaisesMessage(ValidationError,
"'Enter a valid country code.'", f.clean, ['61', '287654321', '123', 'Home'])
def test_custom_empty_values(self):
"""
Test that form fields can customize what is considered as an empty value
for themselves (#19997).
"""
class CustomJSONField(CharField):
empty_values = [None, '']
def to_python(self, value):
# Fake json.loads
if value == '{}':
return {}
return super(CustomJSONField, self).to_python(value)
class JSONForm(forms.Form):
json = CustomJSONField()
form = JSONForm(data={'json': '{}'})
form.full_clean()
self.assertEqual(form.cleaned_data, {'json': {}})
def test_boundfield_label_tag(self):
class SomeForm(Form):
field = CharField()
boundfield = SomeForm()['field']
testcases = [ # (args, kwargs, expected)
# without anything: just print the <label>
((), {}, '<label for="id_field">Field:</label>'),
# passing just one argument: overrides the field's label
(('custom',), {}, '<label for="id_field">custom:</label>'),
# the overridden label is escaped
(('custom&',), {}, '<label for="id_field">custom&:</label>'),
((mark_safe('custom&'),), {}, '<label for="id_field">custom&:</label>'),
# Passing attrs to add extra attributes on the <label>
((), {'attrs': {'class': 'pretty'}}, '<label for="id_field" class="pretty">Field:</label>')
]
for args, kwargs, expected in testcases:
self.assertHTMLEqual(boundfield.label_tag(*args, **kwargs), expected)
def test_boundfield_label_tag_no_id(self):
"""
If a widget has no id, label_tag just returns the text with no
surrounding <label>.
"""
class SomeForm(Form):
field = CharField()
boundfield = SomeForm(auto_id='')['field']
self.assertHTMLEqual(boundfield.label_tag(), 'Field:')
self.assertHTMLEqual(boundfield.label_tag('Custom&'), 'Custom&:')
def test_boundfield_label_tag_custom_widget_id_for_label(self):
class CustomIdForLabelTextInput(TextInput):
def id_for_label(self, id):
return 'custom_' + id
class EmptyIdForLabelTextInput(TextInput):
def id_for_label(self, id):
return None
class SomeForm(Form):
custom = CharField(widget=CustomIdForLabelTextInput)
empty = CharField(widget=EmptyIdForLabelTextInput)
form = SomeForm()
self.assertHTMLEqual(form['custom'].label_tag(), '<label for="custom_id_custom">Custom:</label>')
self.assertHTMLEqual(form['empty'].label_tag(), '<label>Empty:</label>')
def test_boundfield_empty_label(self):
class SomeForm(Form):
field = CharField(label='')
boundfield = SomeForm()['field']
self.assertHTMLEqual(boundfield.label_tag(), '<label for="id_field"></label>')
def test_boundfield_id_for_label(self):
class SomeForm(Form):
field = CharField(label='')
self.assertEqual(SomeForm()['field'].id_for_label, 'id_field')
def test_boundfield_id_for_label_override_by_attrs(self):
"""
If an id is provided in `Widget.attrs`, it overrides the generated ID,
unless it is `None`.
"""
class SomeForm(Form):
field = CharField(widget=forms.TextInput(attrs={'id': 'myCustomID'}))
field_none = CharField(widget=forms.TextInput(attrs={'id': None}))
form = SomeForm()
self.assertEqual(form['field'].id_for_label, 'myCustomID')
self.assertEqual(form['field_none'].id_for_label, 'id_field_none')
def test_label_tag_override(self):
"""
BoundField label_suffix (if provided) overrides Form label_suffix
"""
class SomeForm(Form):
field = CharField()
boundfield = SomeForm(label_suffix='!')['field']
self.assertHTMLEqual(boundfield.label_tag(label_suffix='$'), '<label for="id_field">Field$</label>')
def test_field_name(self):
"""#5749 - `field_name` may be used as a key in _html_output()."""
class SomeForm(Form):
some_field = CharField()
def as_p(self):
return self._html_output(
normal_row='<p id="p_%(field_name)s"></p>',
error_row='%s',
row_ender='</p>',
help_text_html=' %s',
errors_on_separate_row=True,
)
form = SomeForm()
self.assertHTMLEqual(form.as_p(), '<p id="p_some_field"></p>')
def test_field_without_css_classes(self):
"""
`css_classes` may be used as a key in _html_output() (empty classes).
"""
class SomeForm(Form):
some_field = CharField()
def as_p(self):
return self._html_output(
normal_row='<p class="%(css_classes)s"></p>',
error_row='%s',
row_ender='</p>',
help_text_html=' %s',
errors_on_separate_row=True,
)
form = SomeForm()
self.assertHTMLEqual(form.as_p(), '<p class=""></p>')
def test_field_with_css_class(self):
"""
`css_classes` may be used as a key in _html_output() (class comes
from required_css_class in this case).
"""
class SomeForm(Form):
some_field = CharField()
required_css_class = 'foo'
def as_p(self):
return self._html_output(
normal_row='<p class="%(css_classes)s"></p>',
error_row='%s',
row_ender='</p>',
help_text_html=' %s',
errors_on_separate_row=True,
)
form = SomeForm()
self.assertHTMLEqual(form.as_p(), '<p class="foo"></p>')
def test_field_name_with_hidden_input(self):
"""
BaseForm._html_output() should merge all the hidden input fields and
put them in the last row.
"""
class SomeForm(Form):
hidden1 = CharField(widget=HiddenInput)
custom = CharField()
hidden2 = CharField(widget=HiddenInput)
def as_p(self):
return self._html_output(
normal_row='<p%(html_class_attr)s>%(field)s %(field_name)s</p>',
error_row='%s',
row_ender='</p>',
help_text_html=' %s',
errors_on_separate_row=True,
)
form = SomeForm()
self.assertHTMLEqual(
form.as_p(),
'<p><input id="id_custom" name="custom" type="text" /> custom'
'<input id="id_hidden1" name="hidden1" type="hidden" />'
'<input id="id_hidden2" name="hidden2" type="hidden" /></p>'
)
def test_field_name_with_hidden_input_and_non_matching_row_ender(self):
"""
BaseForm._html_output() should merge all the hidden input fields and
put them in the last row ended with the specific row ender.
"""
class SomeForm(Form):
hidden1 = CharField(widget=HiddenInput)
custom = CharField()
hidden2 = CharField(widget=HiddenInput)
def as_p(self):
return self._html_output(
normal_row='<p%(html_class_attr)s>%(field)s %(field_name)s</p>',
error_row='%s',
row_ender='<hr/><hr/>',
help_text_html=' %s',
errors_on_separate_row=True
)
form = SomeForm()
self.assertHTMLEqual(
form.as_p(),
'<p><input id="id_custom" name="custom" type="text" /> custom</p>\n'
'<input id="id_hidden1" name="hidden1" type="hidden" />'
'<input id="id_hidden2" name="hidden2" type="hidden" /><hr/><hr/>'
)
def test_error_dict(self):
class MyForm(Form):
foo = CharField()
bar = CharField()
def clean(self):
raise ValidationError('Non-field error.', code='secret', params={'a': 1, 'b': 2})
form = MyForm({})
self.assertEqual(form.is_valid(), False)
errors = form.errors.as_text()
control = [
'* foo\n * This field is required.',
'* bar\n * This field is required.',
'* __all__\n * Non-field error.',
]
for error in control:
self.assertIn(error, errors)
errors = form.errors.as_ul()
control = [
'<li>foo<ul class="errorlist"><li>This field is required.</li></ul></li>',
'<li>bar<ul class="errorlist"><li>This field is required.</li></ul></li>',
'<li>__all__<ul class="errorlist nonfield"><li>Non-field error.</li></ul></li>',
]
for error in control:
self.assertInHTML(error, errors)
errors = json.loads(form.errors.as_json())
control = {
'foo': [{'code': 'required', 'message': 'This field is required.'}],
'bar': [{'code': 'required', 'message': 'This field is required.'}],
'__all__': [{'code': 'secret', 'message': 'Non-field error.'}]
}
self.assertEqual(errors, control)
def test_error_dict_as_json_escape_html(self):
"""#21962 - adding html escape flag to ErrorDict"""
class MyForm(Form):
foo = CharField()
bar = CharField()
def clean(self):
raise ValidationError('<p>Non-field error.</p>',
code='secret',
params={'a': 1, 'b': 2})
control = {
'foo': [{'code': 'required', 'message': 'This field is required.'}],
'bar': [{'code': 'required', 'message': 'This field is required.'}],
'__all__': [{'code': 'secret', 'message': '<p>Non-field error.</p>'}]
}
form = MyForm({})
self.assertFalse(form.is_valid())
errors = json.loads(form.errors.as_json())
self.assertEqual(errors, control)
errors = json.loads(form.errors.as_json(escape_html=True))
control['__all__'][0]['message'] = '<p>Non-field error.</p>'
self.assertEqual(errors, control)
def test_error_list(self):
e = ErrorList()
e.append('Foo')
e.append(ValidationError('Foo%(bar)s', code='foobar', params={'bar': 'bar'}))
self.assertIsInstance(e, list)
self.assertIn('Foo', e)
self.assertIn('Foo', forms.ValidationError(e))
self.assertEqual(
e.as_text(),
'* Foo\n* Foobar'
)
self.assertEqual(
e.as_ul(),
'<ul class="errorlist"><li>Foo</li><li>Foobar</li></ul>'
)
self.assertEqual(
json.loads(e.as_json()),
[{"message": "Foo", "code": ""}, {"message": "Foobar", "code": "foobar"}]
)
def test_error_list_class_not_specified(self):
e = ErrorList()
e.append('Foo')
e.append(ValidationError('Foo%(bar)s', code='foobar', params={'bar': 'bar'}))
self.assertEqual(
e.as_ul(),
'<ul class="errorlist"><li>Foo</li><li>Foobar</li></ul>'
)
def test_error_list_class_has_one_class_specified(self):
e = ErrorList(error_class='foobar-error-class')
e.append('Foo')
e.append(ValidationError('Foo%(bar)s', code='foobar', params={'bar': 'bar'}))
self.assertEqual(
e.as_ul(),
'<ul class="errorlist foobar-error-class"><li>Foo</li><li>Foobar</li></ul>'
)
def test_error_list_with_hidden_field_errors_has_correct_class(self):
class Person(Form):
first_name = CharField()
last_name = CharField(widget=HiddenInput)
p = Person({'first_name': 'John'})
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist nonfield"><li>(Hidden field last_name) This field is required.</li></ul></li><li><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" /><input id="id_last_name" name="last_name" type="hidden" /></li>"""
)
self.assertHTMLEqual(
p.as_p(),
"""<ul class="errorlist nonfield"><li>(Hidden field last_name) This field is required.</li></ul>
<p><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" /><input id="id_last_name" name="last_name" type="hidden" /></p>"""
)
self.assertHTMLEqual(
p.as_table(),
"""<tr><td colspan="2"><ul class="errorlist nonfield"><li>(Hidden field last_name) This field is required.</li></ul></td></tr>
<tr><th><label for="id_first_name">First name:</label></th><td><input id="id_first_name" name="first_name" type="text" value="John" /><input id="id_last_name" name="last_name" type="hidden" /></td></tr>"""
)
def test_error_list_with_non_field_errors_has_correct_class(self):
class Person(Form):
first_name = CharField()
last_name = CharField()
def clean(self):
raise ValidationError('Generic validation error')
p = Person({'first_name': 'John', 'last_name': 'Lennon'})
self.assertHTMLEqual(
str(p.non_field_errors()),
'<ul class="errorlist nonfield"><li>Generic validation error</li></ul>'
)
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist nonfield"><li>Generic validation error</li></ul></li><li><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" /></li>
<li><label for="id_last_name">Last name:</label> <input id="id_last_name" name="last_name" type="text" value="Lennon" /></li>"""
)
self.assertHTMLEqual(
p.non_field_errors().as_text(),
'* Generic validation error'
)
self.assertHTMLEqual(
p.as_p(),
"""<ul class="errorlist nonfield"><li>Generic validation error</li></ul>
<p><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" /></p>
<p><label for="id_last_name">Last name:</label> <input id="id_last_name" name="last_name" type="text" value="Lennon" /></p>"""
)
self.assertHTMLEqual(
p.as_table(),
"""<tr><td colspan="2"><ul class="errorlist nonfield"><li>Generic validation error</li></ul></td></tr>
<tr><th><label for="id_first_name">First name:</label></th><td><input id="id_first_name" name="first_name" type="text" value="John" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td><input id="id_last_name" name="last_name" type="text" value="Lennon" /></td></tr>"""
)
def test_errorlist_override(self):
@python_2_unicode_compatible
class DivErrorList(ErrorList):
def __str__(self):
return self.as_divs()
def as_divs(self):
if not self:
return ''
return '<div class="errorlist">%s</div>' % ''.join(
'<div class="error">%s</div>' % force_text(e) for e in self)
class CommentForm(Form):
name = CharField(max_length=50, required=False)
email = EmailField()
comment = CharField()
data = dict(email='invalid')
f = CommentForm(data, auto_id=False, error_class=DivErrorList)
self.assertHTMLEqual(f.as_p(), """<p>Name: <input type="text" name="name" maxlength="50" /></p>
<div class="errorlist"><div class="error">Enter a valid email address.</div></div>
<p>Email: <input type="email" name="email" value="invalid" /></p>
<div class="errorlist"><div class="error">This field is required.</div></div>
<p>Comment: <input type="text" name="comment" /></p>""")
def test_baseform_repr(self):
"""
BaseForm.__repr__() should contain some basic information about the
form.
"""
p = Person()
self.assertEqual(repr(p), "<Person bound=False, valid=Unknown, fields=(first_name;last_name;birthday)>")
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'})
self.assertEqual(repr(p), "<Person bound=True, valid=Unknown, fields=(first_name;last_name;birthday)>")
p.is_valid()
self.assertEqual(repr(p), "<Person bound=True, valid=True, fields=(first_name;last_name;birthday)>")
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': 'fakedate'})
p.is_valid()
self.assertEqual(repr(p), "<Person bound=True, valid=False, fields=(first_name;last_name;birthday)>")
def test_baseform_repr_dont_trigger_validation(self):
"""
BaseForm.__repr__() shouldn't trigger the form validation.
"""
p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': 'fakedate'})
repr(p)
self.assertRaises(AttributeError, lambda: p.cleaned_data)
self.assertFalse(p.is_valid())
self.assertEqual(p.cleaned_data, {'first_name': 'John', 'last_name': 'Lennon'})
def test_accessing_clean(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
data = self.cleaned_data
if not self.errors:
data['username'] = data['username'].lower()
return data
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_changing_cleaned_data_nothing_returned(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
self.cleaned_data['username'] = self.cleaned_data['username'].lower()
# don't return anything
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_changing_cleaned_data_in_clean(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
data = self.cleaned_data
# Return a different dict. We have not changed self.cleaned_data.
return {
'username': data['username'].lower(),
'password': 'this_is_not_a_secret',
}
f = UserForm({'username': 'SirRobin', 'password': 'blue'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['username'], 'sirrobin')
def test_multipart_encoded_form(self):
class FormWithoutFile(Form):
username = CharField()
class FormWithFile(Form):
username = CharField()
file = FileField()
class FormWithImage(Form):
image = ImageField()
self.assertFalse(FormWithoutFile().is_multipart())
self.assertTrue(FormWithFile().is_multipart())
self.assertTrue(FormWithImage().is_multipart())
def test_html_safe(self):
class SimpleForm(Form):
username = CharField()
form = SimpleForm()
self.assertTrue(hasattr(SimpleForm, '__html__'))
self.assertEqual(force_text(form), form.__html__())
self.assertTrue(hasattr(form['username'], '__html__'))
self.assertEqual(force_text(form['username']), form['username'].__html__()) | unknown | codeparrot/codeparrot-clean | ||
#
# Package analogous to 'threading.py' but using processes
#
# multiprocessing/__init__.py
#
# This package is intended to duplicate the functionality (and much of
# the API) of threading.py but uses processes instead of threads. A
# subpackage 'multiprocessing.dummy' has the same API but is a simple
# wrapper for 'threading'.
#
# Try calling `multiprocessing.doc.main()` to read the html
# documentation in in a webbrowser.
#
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__version__ = '0.70a1'
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger',
'allow_connection_pickling', 'BufferTooShort', 'TimeoutError',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Queue', 'JoinableQueue', 'Pool', 'Value', 'Array',
'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING',
]
__author__ = 'R. Oudkerk (r.m.oudkerk@gmail.com)'
#
# Imports
#
import os
import sys
from multiprocessing.process import Process, current_process, active_children
from multiprocessing.util import SUBDEBUG, SUBWARNING
#
# Exceptions
#
class ProcessError(Exception):
pass
class BufferTooShort(ProcessError):
pass
class TimeoutError(ProcessError):
pass
class AuthenticationError(ProcessError):
pass
# This is down here because _multiprocessing uses BufferTooShort
try:
# IronPython does not provide _multiprocessing
import _multiprocessing
except ImportError:
pass
#
# Definitions not depending on native semaphores
#
def Manager():
'''
Returns a manager associated with a running server process
The managers methods such as `Lock()`, `Condition()` and `Queue()`
can be used to create shared objects.
'''
from multiprocessing.managers import SyncManager
m = SyncManager()
m.start()
return m
def Pipe(duplex=True):
'''
Returns two connection object connected by a pipe
'''
from multiprocessing.connection import Pipe
return Pipe(duplex)
def cpu_count():
'''
Returns the number of CPUs in the system
'''
if sys.platform == 'win32':
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
num = 0
elif 'bsd' in sys.platform or sys.platform == 'darwin':
comm = '/sbin/sysctl -n hw.ncpu'
if sys.platform == 'darwin':
comm = '/usr' + comm
try:
with os.popen(comm) as p:
num = int(p.read())
except ValueError:
num = 0
else:
try:
num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = 0
if num >= 1:
return num
else:
raise NotImplementedError('cannot determine number of cpus')
def freeze_support():
'''
Check whether this is a fake forked process in a frozen executable.
If so then run code specified by commandline and exit.
'''
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
from multiprocessing.forking import freeze_support
freeze_support()
def get_logger():
'''
Return package logger -- if it does not already exist then it is created
'''
from multiprocessing.util import get_logger
return get_logger()
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
from multiprocessing.util import log_to_stderr
return log_to_stderr(level)
def allow_connection_pickling():
'''
Install support for sending connections and sockets between processes
'''
from multiprocessing import reduction
#
# Definitions depending on native semaphores
#
def Lock():
'''
Returns a non-recursive lock object
'''
from multiprocessing.synchronize import Lock
return Lock()
def RLock():
'''
Returns a recursive lock object
'''
from multiprocessing.synchronize import RLock
return RLock()
def Condition(lock=None):
'''
Returns a condition object
'''
from multiprocessing.synchronize import Condition
return Condition(lock)
def Semaphore(value=1):
'''
Returns a semaphore object
'''
from multiprocessing.synchronize import Semaphore
return Semaphore(value)
def BoundedSemaphore(value=1):
'''
Returns a bounded semaphore object
'''
from multiprocessing.synchronize import BoundedSemaphore
return BoundedSemaphore(value)
def Event():
'''
Returns an event object
'''
from multiprocessing.synchronize import Event
return Event()
def Queue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import Queue
return Queue(maxsize)
def JoinableQueue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import JoinableQueue
return JoinableQueue(maxsize)
def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None):
'''
Returns a process pool object
'''
from multiprocessing.pool import Pool
return Pool(processes, initializer, initargs, maxtasksperchild)
def RawValue(typecode_or_type, *args):
'''
Returns a shared object
'''
from multiprocessing.sharedctypes import RawValue
return RawValue(typecode_or_type, *args)
def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a shared array
'''
from multiprocessing.sharedctypes import RawArray
return RawArray(typecode_or_type, size_or_initializer)
def Value(typecode_or_type, *args, **kwds):
'''
Returns a synchronized shared object
'''
from multiprocessing.sharedctypes import Value
return Value(typecode_or_type, *args, **kwds)
def Array(typecode_or_type, size_or_initializer, **kwds):
'''
Returns a synchronized shared array
'''
from multiprocessing.sharedctypes import Array
return Array(typecode_or_type, size_or_initializer, **kwds)
#
#
#
if sys.platform == 'win32':
def set_executable(executable):
'''
Sets the path to a python.exe or pythonw.exe binary used to run
child processes on Windows instead of sys.executable.
Useful for people embedding Python.
'''
from multiprocessing.forking import set_executable
set_executable(executable)
__all__ += ['set_executable'] | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
package org.tensorflow;
import java.util.Iterator;
/**
* A data flow graph representing a TensorFlow computation.
*
* <p>Instances of a Graph are thread-safe.
*
* <p><b>WARNING:</b> Resources consumed by the Graph object must be explicitly freed by invoking
* the {@link #close()} method then the Graph object is no longer needed.
*/
public final class Graph implements ExecutionEnvironment, AutoCloseable {
/** Create an empty Graph. */
public Graph() {
nativeHandle = allocate();
}
/** Create a Graph from an existing handle (takes ownership). */
Graph(long nativeHandle) {
this.nativeHandle = nativeHandle;
}
/**
* Release resources associated with the Graph.
*
* <p>Blocks until there are no active {@link Session} instances referring to this Graph. A Graph
* is not usable after close returns.
*/
@Override
public void close() {
synchronized (nativeHandleLock) {
if (nativeHandle == 0) {
return;
}
while (refcount > 0) {
try {
nativeHandleLock.wait();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
// Possible leak of the graph in this case?
return;
}
}
delete(nativeHandle);
nativeHandle = 0;
}
}
/**
* Returns the operation (node in the Graph) with the provided name.
*
* <p>Or {@code null} if no such operation exists in the Graph.
*/
public GraphOperation operation(String name) {
synchronized (nativeHandleLock) {
long oph = operation(nativeHandle, name);
if (oph == 0) {
return null;
}
return new GraphOperation(this, oph);
}
}
/**
* Iterator over all the {@link Operation}s in the graph.
*
* <p>The order of iteration is unspecified. Consumers of the iterator will receive no
* notification should the underlying graph change during iteration.
*/
public Iterator<Operation> operations() {
return new OperationIterator(this);
}
/**
* Returns a builder to add {@link Operation}s to the Graph.
*
* @param type of the Operation (i.e., identifies the computation to be performed)
* @param name to refer to the created Operation in the graph.
* @return an {@link OperationBuilder}, which will add the Operation to the graph when {@link
* OperationBuilder#build()} is invoked. If {@link OperationBuilder#build()} is not invoked,
* then some resources may leak.
*/
@Override
public GraphOperationBuilder opBuilder(String type, String name) {
return new GraphOperationBuilder(this, type, name);
}
/**
* Import a serialized representation of a TensorFlow graph.
*
* <p>The serialized representation of the graph, often referred to as a <i>GraphDef</i>, can be
* generated by {@link #toGraphDef()} and equivalents in other language APIs.
*
* @throws IllegalArgumentException if graphDef is not a recognized serialization of a graph.
* @see #importGraphDef(byte[], String)
*/
public void importGraphDef(byte[] graphDef) throws IllegalArgumentException {
importGraphDef(graphDef, "");
}
/**
* Import a serialized representation of a TensorFlow graph.
*
* @param graphDef the serialized representation of a TensorFlow graph.
* @param prefix a prefix that will be prepended to names in graphDef
* @throws IllegalArgumentException if graphDef is not a recognized serialization of a graph.
* @see #importGraphDef(byte[])
*/
public void importGraphDef(byte[] graphDef, String prefix) throws IllegalArgumentException {
if (graphDef == null || prefix == null) {
throw new IllegalArgumentException("graphDef and prefix cannot be null");
}
synchronized (nativeHandleLock) {
importGraphDef(nativeHandle, graphDef, prefix);
}
}
/**
* Generate a serialized representation of the Graph.
*
* @see #importGraphDef(byte[])
* @see #importGraphDef(byte[], String)
*/
public byte[] toGraphDef() {
synchronized (nativeHandleLock) {
return toGraphDef(nativeHandle);
}
}
/**
* Adds operations to compute the partial derivatives of sum of {@code y}s w.r.t {@code x}s, i.e.,
* {@code d(y_1 + y_2 + ...)/dx_1, d(y_1 + y_2 + ...)/dx_2...}
*
* <p>{@code dx} are used as initial gradients (which represent the symbolic partial derivatives
* of some loss function {@code L} w.r.t. {@code y}). {@code dx} must be null or have size of
* {@code y}.
*
* <p>If {@code dx} is null, the implementation will use dx of {@link
* org.tensorflow.op.core.OnesLike OnesLike} for all shapes in {@code y}.
*
* <p>{@code prefix} is used as the name prefix applied to all nodes added to the graph to compute
* gradients. It must be unique within the provided graph or the operation will fail.
*
* <p>If {@code prefix} is null, then one will be chosen automatically.
*
* @param prefix unique string prefix applied before the names of nodes added to the graph to
* compute gradients. If null, a default one will be chosen.
* @param y output of the function to derive
* @param x inputs of the function for which partial derivatives are computed
* @param dx if not null, the partial derivatives of some loss function {@code L} w.r.t. {@code y}
* @return the partial derivatives {@code dy} with the size of {@code x}
*/
public Output<?>[] addGradients(String prefix, Output<?>[] y, Output<?>[] x, Output<?>[] dx) {
Output<?>[] dy = new Output<?>[x.length];
final long[] yHandles = new long[y.length];
final int[] yIndices = new int[y.length];
final long[] xHandles = new long[x.length];
final int[] xIndices = new int[x.length];
long[] dxHandles = null;
int[] dxIndices = null;
try (Reference ref = ref()) {
for (int i = 0; i < y.length; ++i) {
yHandles[i] = y[i].getUnsafeNativeHandle();
yIndices[i] = y[i].index();
}
for (int i = 0; i < x.length; ++i) {
xHandles[i] = x[i].getUnsafeNativeHandle();
xIndices[i] = x[i].index();
}
if (dx != null && dx.length > 0) {
dxHandles = new long[dx.length];
dxIndices = new int[dx.length];
for (int i = 0; i < dx.length; ++i) {
dxHandles[i] = dx[i].getUnsafeNativeHandle();
dxIndices[i] = dx[i].index();
}
}
// Gradient outputs are returned in two continuous arrays concatenated into one. The first
// holds the native handles of the gradient operations while the second holds the index of
// their output e.g. given
// xHandles = [x0Handle, x1Handle, ...] and xIndices = [x0Index, x1Index, ..], we obtain
// dy = [dy0Handle, dy1Handle, ..., dy0Index, dy1Index, ...]
long[] dyHandlesAndIndices =
addGradients(
ref.nativeHandle(),
prefix,
yHandles,
yIndices,
xHandles,
xIndices,
dxHandles,
dxIndices);
int ndy = dyHandlesAndIndices.length >> 1;
if (ndy != dy.length) {
throw new IllegalStateException(String.valueOf(ndy) + " gradients were added to the graph when " + dy.length
+ " were expected");
}
for (int i = 0, j = ndy; i < ndy; ++i, ++j) {
GraphOperation op = new GraphOperation(this, dyHandlesAndIndices[i]);
dy[i] = new Output<>(op, (int) dyHandlesAndIndices[j]);
}
}
return dy;
}
/**
* Adds operations to compute the partial derivatives of sum of {@code y}s w.r.t {@code x}s,
* i.e., {@code dy/dx_1, dy/dx_2...}
* <p>
* This is a simplified version of {@link #addGradients(String, Output[], Output[], Output[])}
* where {@code y} is a single output, {@code dx} is null and {@code prefix} is null.
*
* @param y output of the function to derive
* @param x inputs of the function for which partial derivatives are computed
* @return the partial derivatives {@code dy} with the size of {@code x}
*/
public Output<?>[] addGradients(Output<?> y, Output<?>[] x) {
return addGradients(null, new Output<?>[] {y}, x, null);
}
/**
* Used to instantiate an abstract class which overrides the buildSubgraph method to build a
* conditional or body subgraph for a while loop. After Java 8, this can alternatively be used to
* create a lambda for the same purpose.
*
* <p>To be used when calling {@link #whileLoop(Output[],
* org.tensorflow.Graph.WhileSubgraphBuilder, org.tensorflow.Graph.WhileSubgraphBuilder, String)}
*
* <p>Example usage (prior to Java 8):
*
* <p>{@code WhileSubgraphBuilder bodyGraphBuilder = new WhileSubgraphBuilder() { @Override public
* void buildSubgraph(Graph bodyGraph, Output<?>[] bodyInputs, Output<?>[] bodyOutputs) { // build
* body subgraph } }; }
*
* <p>Example usage (after Java 8):
*
* <p>{@code WhileSubgraphBuilder bodyGraphBuilder = (bodyGraph, bodyInputs, bodyOutputs) -> { //
* build body subgraph };}
*/
public interface WhileSubgraphBuilder {
/**
* To be overridden by user with code to build conditional or body subgraph for a while loop
*
* @param g the subgraph
* @param inputs subgraph inputs
* @param outputs subgraph outputs
*/
public void buildSubgraph(Graph g, Output<?>[] inputs, Output<?>[] outputs);
}
// called by while loop code in graph_jni.cc to construct conditional/body subgraphs
private static long[] buildSubgraph(
WhileSubgraphBuilder subgraphBuilder,
long subgraphHandle,
long[] inputHandles,
int[] inputIndices,
long[] outputHandles,
int[] outputIndices) {
Graph subgraph = new Graph(subgraphHandle);
int ninputs = inputHandles.length;
int noutputs = outputHandles.length;
Output<?>[] inputs = new Output<?>[ninputs];
Output<?>[] outputs = new Output<?>[noutputs];
long[] outputHandlesAndIndices = new long[noutputs * 2];
synchronized (subgraph.nativeHandleLock) {
try (Reference ref = subgraph.ref()) {
for (int i = 0; i < ninputs; i++) {
Operation op = new GraphOperation(subgraph, inputHandles[i]);
inputs[i] = op.output(inputIndices[i]);
}
for (int i = 0; i < noutputs; i++) {
Operation op = new GraphOperation(subgraph, outputHandles[i]);
outputs[i] = op.output(outputIndices[i]);
}
subgraphBuilder.buildSubgraph(subgraph, inputs, outputs);
for (int i = 0, j = noutputs; i < noutputs; i++, j++) {
outputHandlesAndIndices[i] = outputs[i].getUnsafeNativeHandle();
outputHandlesAndIndices[j] = (long) outputs[i].index();
}
}
return outputHandlesAndIndices;
}
}
/**
* Builds a while loop.
*
* @param inputs the loop inputs
* @param cgBuilder WhileSubgraphBuilder to build the conditional subgraph
* @param bgBuilder WhileSubgraphBuilder to build the body subgraph
* @param name name for the loop
* @return list of loop outputs, of the same length as {@code inputs}
*/
public Output<?>[] whileLoop(
Output<?>[] inputs,
WhileSubgraphBuilder cgBuilder,
WhileSubgraphBuilder bgBuilder,
String name) {
int ninputs = inputs.length;
long[] inputHandles = new long[ninputs];
int[] inputIndices = new int[ninputs];
Output<?>[] outputs = new Output<?>[ninputs];
synchronized (nativeHandleLock) {
try (Reference ref = ref()) {
for (int i = 0; i < ninputs; i++) {
inputHandles[i] = inputs[i].getUnsafeNativeHandle();
inputIndices[i] = inputs[i].index();
}
long[] outputHandlesAndIndices =
whileLoop(nativeHandle, inputHandles, inputIndices, name, cgBuilder, bgBuilder);
for (int i = 0, j = ninputs; i < ninputs; ++i, ++j) {
Operation op = new GraphOperation(this, outputHandlesAndIndices[i]);
outputs[i] = op.output((int) outputHandlesAndIndices[j]);
}
}
return outputs;
}
}
private final Object nativeHandleLock = new Object();
private long nativeHandle;
private int refcount = 0;
// Related native objects (such as the TF_Operation object backing an Operation instance)
// have a validity tied to that of the Graph. The handles to those native objects are not
// valid after Graph.close() has been invoked.
//
// Instances of the Reference class should be used to ensure the Graph has not been closed
// while dependent handles are in use.
class Reference implements AutoCloseable {
private Reference() {
synchronized (Graph.this.nativeHandleLock) {
active = Graph.this.nativeHandle != 0;
if (!active) {
throw new IllegalStateException("close() has been called on the Graph");
}
active = true;
Graph.this.refcount++;
}
}
@Override
public void close() {
synchronized (Graph.this.nativeHandleLock) {
if (!active) {
return;
}
active = false;
if (--Graph.this.refcount == 0) {
Graph.this.nativeHandleLock.notifyAll();
}
}
}
public long nativeHandle() {
synchronized (Graph.this.nativeHandleLock) {
return active ? Graph.this.nativeHandle : 0;
}
}
private boolean active;
}
Reference ref() {
return new Reference();
}
private static final class OperationIterator implements Iterator<Operation> {
OperationIterator(Graph g) {
this.graph = g;
this.operation = null;
this.position = 0;
this.advance();
}
private final void advance() {
Graph.Reference reference = this.graph.ref();
this.operation = null;
try {
long[] nativeReturn = nextOperation(reference.nativeHandle(), this.position);
if ((nativeReturn != null) && (nativeReturn[0] != 0)) {
this.operation = new GraphOperation(this.graph, nativeReturn[0]);
this.position = (int) nativeReturn[1];
}
} finally {
reference.close();
}
}
@Override
public boolean hasNext() {
return (this.operation != null);
}
@Override
public Operation next() {
Operation rhett = this.operation;
this.advance();
return rhett;
}
@Override
public void remove() {
throw new UnsupportedOperationException("remove() is unsupported.");
}
private final Graph graph;
private Operation operation;
private int position;
}
private static native long allocate();
private static native void delete(long handle);
private static native long operation(long handle, String name);
// This method returns the Operation native handle at index 0 and the new value for pos at index 1
// (see TF_GraphNextOperation)
private static native long[] nextOperation(long handle, int position);
private static native void importGraphDef(long handle, byte[] graphDef, String prefix)
throws IllegalArgumentException;
private static native byte[] toGraphDef(long handle);
private static native long[] addGradients(
long handle,
String prefix,
long[] inputHandles,
int[] inputIndices,
long[] outputHandles,
int[] outputIndices,
long[] gradInputHandles,
int[] gradInputIndices);
private static native long[] whileLoop(
long handle,
long[] inputHandles,
int[] inputIndices,
String name,
WhileSubgraphBuilder condGraphBuilder,
WhileSubgraphBuilder bodyGraphBuilder);
static {
TensorFlow.init();
}
} | java | github | https://github.com/tensorflow/tensorflow | tensorflow/java/src/main/java/org/tensorflow/Graph.java |
#!/usr/bin/env python3
import unittest
from test import support
import contextlib
import socket
import urllib.request
import sys
import os
import email.message
import time
class URLTimeoutTest(unittest.TestCase):
# XXX this test doesn't seem to test anything useful.
TIMEOUT = 30.0
def setUp(self):
socket.setdefaulttimeout(self.TIMEOUT)
def tearDown(self):
socket.setdefaulttimeout(None)
def testURLread(self):
with support.transient_internet("www.python.org"):
f = urllib.request.urlopen("http://www.python.org/")
x = f.read()
class urlopenNetworkTests(unittest.TestCase):
"""Tests urllib.reqest.urlopen using the network.
These tests are not exhaustive. Assuming that testing using files does a
good job overall of some of the basic interface features. There are no
tests exercising the optional 'data' and 'proxies' arguments. No tests
for transparent redirection have been written.
setUp is not used for always constructing a connection to
http://www.python.org/ since there a few tests that don't use that address
and making a connection is expensive enough to warrant minimizing unneeded
connections.
"""
@contextlib.contextmanager
def urlopen(self, *args, **kwargs):
resource = args[0]
with support.transient_internet(resource):
r = urllib.request.urlopen(*args, **kwargs)
try:
yield r
finally:
r.close()
def test_basic(self):
# Simple test expected to pass.
with self.urlopen("http://www.python.org/") as open_url:
for attr in ("read", "readline", "readlines", "fileno", "close",
"info", "geturl"):
self.assertTrue(hasattr(open_url, attr), "object returned from "
"urlopen lacks the %s attribute" % attr)
self.assertTrue(open_url.read(), "calling 'read' failed")
def test_readlines(self):
# Test both readline and readlines.
with self.urlopen("http://www.python.org/") as open_url:
self.assertIsInstance(open_url.readline(), bytes,
"readline did not return a string")
self.assertIsInstance(open_url.readlines(), list,
"readlines did not return a list")
def test_info(self):
# Test 'info'.
with self.urlopen("http://www.python.org/") as open_url:
info_obj = open_url.info()
self.assertIsInstance(info_obj, email.message.Message,
"object returned by 'info' is not an "
"instance of email.message.Message")
self.assertEqual(info_obj.get_content_subtype(), "html")
def test_geturl(self):
# Make sure same URL as opened is returned by geturl.
URL = "http://www.python.org/"
with self.urlopen(URL) as open_url:
gotten_url = open_url.geturl()
self.assertEqual(gotten_url, URL)
def test_getcode(self):
# test getcode() with the fancy opener to get 404 error codes
URL = "http://www.python.org/XXXinvalidXXX"
with support.transient_internet(URL):
open_url = urllib.request.FancyURLopener().open(URL)
try:
code = open_url.getcode()
finally:
open_url.close()
self.assertEqual(code, 404)
def test_fileno(self):
if sys.platform in ('win32',):
# On Windows, socket handles are not file descriptors; this
# test can't pass on Windows.
return
# Make sure fd returned by fileno is valid.
with self.urlopen("http://www.python.org/", timeout=None) as open_url:
fd = open_url.fileno()
with os.fdopen(fd, encoding='utf-8') as f:
self.assertTrue(f.read(), "reading from file created using fd "
"returned by fileno failed")
def test_bad_address(self):
# Make sure proper exception is raised when connecting to a bogus
# address.
bogus_domain = "sadflkjsasf.i.nvali.d"
try:
socket.gethostbyname(bogus_domain)
except socket.gaierror:
pass
else:
# This happens with some overzealous DNS providers such as OpenDNS
self.skipTest("%r should not resolve for test to work" % bogus_domain)
self.assertRaises(IOError,
# SF patch 809915: In Sep 2003, VeriSign started
# highjacking invalid .com and .net addresses to
# boost traffic to their own site. This test
# started failing then. One hopes the .invalid
# domain will be spared to serve its defined
# purpose.
# urllib.urlopen, "http://www.sadflkjsasadf.com/")
urllib.request.urlopen,
"http://sadflkjsasf.i.nvali.d/")
class urlretrieveNetworkTests(unittest.TestCase):
"""Tests urllib.request.urlretrieve using the network."""
@contextlib.contextmanager
def urlretrieve(self, *args):
resource = args[0]
with support.transient_internet(resource):
file_location, info = urllib.request.urlretrieve(*args)
try:
yield file_location, info
finally:
support.unlink(file_location)
def test_basic(self):
# Test basic functionality.
with self.urlretrieve("http://www.python.org/") as (file_location, info):
self.assertTrue(os.path.exists(file_location), "file location returned by"
" urlretrieve is not a valid path")
with open(file_location, encoding='utf-8') as f:
self.assertTrue(f.read(), "reading from the file location returned"
" by urlretrieve failed")
def test_specified_path(self):
# Make sure that specifying the location of the file to write to works.
with self.urlretrieve("http://www.python.org/",
support.TESTFN) as (file_location, info):
self.assertEqual(file_location, support.TESTFN)
self.assertTrue(os.path.exists(file_location))
with open(file_location, encoding='utf-8') as f:
self.assertTrue(f.read(), "reading from temporary file failed")
def test_header(self):
# Make sure header returned as 2nd value from urlretrieve is good.
with self.urlretrieve("http://www.python.org/") as (file_location, info):
self.assertIsInstance(info, email.message.Message,
"info is not an instance of email.message.Message")
def test_data_header(self):
logo = "http://www.python.org/community/logos/python-logo-master-v3-TM.png"
with self.urlretrieve(logo) as (file_location, fileheaders):
datevalue = fileheaders.get('Date')
dateformat = '%a, %d %b %Y %H:%M:%S GMT'
try:
time.strptime(datevalue, dateformat)
except ValueError:
self.fail('Date value not in %r format', dateformat)
def test_main():
support.requires('network')
support.run_unittest(URLTimeoutTest,
urlopenNetworkTests,
urlretrieveNetworkTests)
if __name__ == "__main__":
test_main() | unknown | codeparrot/codeparrot-clean | ||
from django.utils.translation import ugettext, ugettext_lazy as _
from simple_accounting.exceptions import MalformedTransaction
from simple_accounting.models import AccountingProxy, Transaction, LedgerEntry, account_type
from simple_accounting.utils import register_transaction
from consts import (
INCOME, EXPENSE, ASSET, LIABILITY, EQUITY,
GASMEMBER_GAS, RECYCLE, ADJUST
)
from datetime import datetime
class PersonAccountingProxy(AccountingProxy):
"""
This class is meant to be the place where implementing the accounting API
for ``Person``-like economic subjects.
Since it's a subclass of ``AccountingProxy``, it inherits from its parent
all the methods and attributes comprising the *generic* accounting API;
here, you can add whatever logic is needed to augment that generic API,
tailoring it to the specific needs of the ``Person``' model.
"""
def last_entry(self, base_path):
"""last entry for one subject"""
try:
latest = self.system[base_path].ledger_entries.latest('transaction__date')
except LedgerEntry.DoesNotExist:
latest = None
return latest
#FIXME: create last_entry or one method for each base_path? Encapsulation and refactoring
#FIXME: self <gasistafelice.base.accounting.PersonAccountingProxy object at 0xabaf86c>
# base_path '/expenses/gas/gas-1/recharges'
def do_recharge(self, gas, amount, note="", date=None):
"""
Do a recharge of amount ``amount`` to the corresponding member account
in the GAS ``gas``.
If this person is not a member of GAS ``gas``, or if ``amount`` is a negative number
a ``MalformedTransaction`` exception is raised.
"""
person = self.subject.instance
if amount < 0:
raise MalformedTransaction(ugettext("Amount of a recharge must be non-negative"))
elif not person.has_been_member(gas):
raise MalformedTransaction(ugettext("A person can't make an account recharge for a GAS that (s)he is not member of"))
else:
source_account = self.system['/wallet']
exit_point = self.system['/expenses/gas/' + gas.uid + '/recharges']
entry_point = gas.accounting.system['/incomes/recharges']
target_account = gas.accounting.system['/members/' + person.uid]
description = unicode(person.report_name)
issuer = self.subject
if not date:
date = datetime.now() #_date.today
transaction = register_transaction(source_account, exit_point,
entry_point, target_account, amount, description, issuer,
date, 'RECHARGE'
)
transaction.add_references([person, gas])
#Transaction
# date = models.DateTimeField(default=datetime.now)
# description = models.CharField(max_length=512, help_text=ugettext("Reason of the transaction"))
# issuer = models.ForeignKey(Subject, related_name='issued_transactions_set')
# source = models.ForeignKey(CashFlow)
# split_set = models.ManyToManyField(Split)
# kind = models.CharField(max_length=128, choices=settings.TRANSACTION_TYPES)
# is_confirmed = models.BooleanField(default=False)
# def splits(self):
# def is_split(self):
# def is_internal(self):
# def is_simple(self):
#LedgerEntry
# account = models.ForeignKey(Account, related_name='entry_set')
# transaction = models.ForeignKey(Transaction, related_name='entry_set')
# entry_id = models.PositiveIntegerField(null=True, blank=True, editable=False)
# amount = CurrencyField()
# def date(self):
# def description(self):
# def issuer(self):
def entries_gasmember(self, gasmember):
"""
List all LedgerEntries (account, transaction, amount)
Show transactions for gasmembers link to GAS kind='GAS_WITHDRAWAL' + another kind?
"""
member_account = gasmember.person.uid
gas_account = gasmember.gas.uid
#accounts = self.system.accounts.filter(name="wallet") | \
accounts = \
self.system.accounts.filter(parent__name="members", name__in=member_account) | \
self.system.accounts.filter(parent__name="expenses/gas/" + gas_account + "/fees", name__in=member_account) | \
self.system.accounts.filter(parent__name="expenses/gas/" + gas_account + "/recharges", name__in=member_account) | \
gasmember.gas.accounting.system.accounts.filter(parent__name="members", name=member_account)
#gasmember.gas.accounting.system.accounts.filter(name="members/%s" % member_account) ko?
return LedgerEntry.objects.filter(account__in=accounts).order_by('-id', '-transaction__date')
def extra_operation(self, gas, amount, target, causal, date):
"""
Another account operation for this subject
For a GASMEMBER the target operation can be income or expense operation
The operation can implicate a GAS economic change
"""
if amount < 0:
raise MalformedTransaction(ugettext("Payment amounts must be non-negative"))
person = self.subject.instance
if not person.has_been_member(gas):
raise MalformedTransaction(ugettext("A person can't pay membership fees to a GAS that (s)he is not member of"))
gas_acc = gas.accounting
gas_system = gas.accounting.system
kind = GASMEMBER_GAS
#UGLY: remove me when done and executed one command that regenerate all missing accounts
self.missing_accounts(gas)
if target == INCOME: #Correction for gasmember: +gasmember -GAS
source_account = gas_system['/cash']
exit_point = gas_system['/expenses/member']
entry_point = gas_system['/incomes/recharges']
target_account = gas_system['/members/' + person.uid]
elif target == EXPENSE: #Correction for GAS: +GAS -gasmember
source_account = gas_system['/members/' + person.uid]
exit_point = gas_system['/expenses/gas']
entry_point = gas_system['/incomes/member']
target_account = gas_system['/cash']
elif target == ASSET: #Detraction for Gasmember: -gasmember
source_account = gas_system['/members/' + person.uid]
exit_point = gas_system['/expenses/member']
entry_point = self.system['/incomes/other']
target_account = self.system['/wallet']
kind = ADJUST
elif target == LIABILITY: #Addition for Gasmember: +gasmember
source_account = self.system['/wallet']
exit_point = self.system['/expenses/other']
entry_point = gas_system['/incomes/recharges']
target_account = gas_system['/members/' + person.uid]
kind = ADJUST
elif target == EQUITY: #Restitution for gasmember: empty container +gasmember -GAS
source_account = gas_system['/cash']
exit_point = gas_system['/expenses/member']
entry_point = gas_system['/incomes/recharges']
target_account = gas_system['/members/' + person.uid]
kind = RECYCLE
else:
raise MalformedTransaction(ugettext("Payment target %s not identified") % target)
description = "%(gas)s %(target)s %(causal)s" % {
'gas': gas.id_in_des,
'target': target,
'causal': causal
}
issuer = self.subject
if not date:
date = datetime.now() #_date.today
transaction = register_transaction(source_account, exit_point, entry_point, target_account, amount, description, issuer, date, kind)
# . gasmember ROOT (/)
# |----------- wallet [A]
# +----------- incomes [P,I] +
# | +--- TODO: Other (Private order, correction, Deposit)
# +----------- expenses [P,E] + UNUSED because we use the gas_system[/incomes/recharges]
# +--- TODO: Other (Correction, Donation, )
# . GAS ROOT (/)
# |----------- cash [A]
# +----------- members [P,A]+
# | +--- <UID member #1> [A]
# | | ..
# | +--- <UID member #n> [A]
# +----------- expenses [P,E]+
# | +--- TODO: member (correction or other)
# | +--- TODO: gas (correction or other)
# +----------- incomes [P,I]+
# | +--- recharges [I]
# | +--- TODO: member (correction or other)
#UGLY: remove me when done and executed one command that regenerate all missing accounts
def missing_accounts(self, gas):
gas_acc = gas.accounting
gas_system = gas.accounting.system
xsys = gas_acc.get_account(gas_system, '/expenses', 'member', account_type.expense)
xsys = gas_acc.get_account(gas_system, '/expenses', 'gas', account_type.expense)
xsys = gas_acc.get_account(gas_system, '/incomes', 'member', account_type.income)
xsys = gas_acc.get_account(self.system, '/expenses', 'other', account_type.expense)
xsys = gas_acc.get_account(self.system, '/incomes', 'other', account_type.income) | unknown | codeparrot/codeparrot-clean | ||
dict.py v1.0
import sys
import itertools
def lworld0x00():
print "***************************"
print "*******Code For Fun********"
print " /\____________/\ "
print " \ / "
print " \ hello / "
print " \ world / "
print " \________/ "
print " "
print "*******by 1world0x00*******"
print "***************************"
def setLowerWord():
words = "abcdefghijklmnopqrstuvwxyz"
num = int(sys.argv[2])
fp = open(sys.argv[1], "wb+")
# fp=open("2.txt","wb+");
# num=2
if(sys.argv[2] == 0):
fp.close()
else:
i = 0
while(i <= 25):
tmp = words[0:num]
i += 1
for j in itertools.permutations(tmp, num):
fp.writelines("".join(j))
fp.writelines("\n")
words = words[1:]
fp.close()
def setUpperWord():
words = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
num = int(sys.argv[3])
fp = open(sys.argv[1], "a+")
# fp=open("2.txt","a+")
# num=2
if(sys.argv[3] == 0):
fp.close()
else:
i = 0
while(i <= 25):
tmp = words[0:num]
i += 1
for j in itertools.permutations(tmp, num):
fp.writelines("".join(j))
fp.writelines("\n")
words = words[1:]
fp.close()
def setNum():
words = "1234567890"
num = int(sys.argv[4])
fp = open(sys.argv[1], "a+")
# fp=open("2.txt","a+")
# num=2
if(sys.argv[4] == 0):
fp.close()
else:
i = 0
while(i <= 10):
tmp = words[0:num]
i += 1
for j in itertools.permutations(tmp, num):
fp.writelines("".join(j))
fp.writelines("\n")
words = words[1:]
fp.close()
# def dict():
# print "name:",sys.argv[0]
# print "first:",sys.argv[1]
# print "second:",sys.argv[2]
if __name__ == "__main__":
lworld0x00()
# dict();
# fp=open("2.txt","wb+")
setLowerWord()
setUpperWord()
setNum() | unknown | codeparrot/codeparrot-clean | ||
import re
import sys
import traceback
from fuzzywuzzy import fuzz
import requests
import TweetPoster
from TweetPoster import rehost
from raven.processors import SanitizePasswordsProcessor
class SanitizeCredentialsProcessor(SanitizePasswordsProcessor):
FIELDS = frozenset([
'authorization',
'password',
'secret',
'passwd',
'token',
'key',
'dsn',
])
def tweet_in_title(tweet, submission):
similarity = fuzz.ratio(tweet.text, submission.title)
if (similarity >= 85 or
tweet.text.lower() in submission.title.lower()):
return True
return False
def canonical_url(url):
url = url.lower()
if url.startswith('http://'):
url = url[7:]
if url.startswith('https://'):
url = url[8:]
if url.startswith('www.'):
url = url[4:]
if url.endswith('/'):
url = url[:-1]
if url.endswith('.'):
url = url[:-1]
url = url.split('/', 1)[0]
return url
def replace_entities(tweet):
"""
Rehosts images, expands urls and links
hashtags and @mentions
"""
# Link hashtags
for tag in tweet.entities['hashtags']:
replacement = u'[#{tag}](https://twitter.com/search?q=%23{tag})'.format(tag=tag['text'])
source = sanitize_markdown('#' + tag['text'])
tweet.text = tweet.text.replace(source, replacement)
# Link mentions
for mention in tweet.entities['user_mentions']:
replacement = u'[@{name}](https://twitter.com/{name})'.format(name=mention['screen_name'])
tweet.text = re.sub('(?i)\@{0}'.format(mention['screen_name']), replacement, tweet.text)
# Rehost pic.twitter.com images
if 'media' in tweet.entities:
# Photos using Twitter's own image sharing
# will be in here. We need to match an re
# against urls to grab the rest of them
for media in tweet.entities['media']:
if media['type'] != 'photo':
continue
imgur = rehost.PicTwitterCom.extract(media['media_url'])
if not imgur:
# We still want to unshorten the t.co link
replacement = '[*pic.twitter.com*]({0})'.format(media['url'])
else:
replacement = u'[*pic.twitter.com*]({url}) [^[Imgur]]({imgur})'
replacement = replacement.format(url=media['media_url'], imgur=imgur)
source = sanitize_markdown(media['url'])
tweet.text = tweet.text.replace(source, replacement)
# Replace t.co with actual urls, unshorten any
# other urls shorteners and rehost other images
for url in tweet.entities['urls']:
# check for redirects
try:
# requests will follow any redirects
# and allow us to check for them
r = requests.head(url['expanded_url'], allow_redirects=True)
except requests.exceptions.RequestException:
sys.stderr.write('Exception when checking url: {0}\n'.format(
url['expanded_url']
))
traceback.print_exc()
else:
if r.history != []:
url['expanded_url'] = r.url
replacement = u'[*{canonical}*]({url})'.format(
canonical=canonical_url(url['expanded_url']),
url=url['expanded_url']
)
# Check if this link is to an image we can rehost
for host in rehost.ImageHost.__subclasses__():
if re.match(host.url_re, url['expanded_url']):
imgur = host().extract(url['expanded_url'])
if imgur:
replacement = replacement + ' [^[Imgur]]({0})'.format(
imgur
)
source = sanitize_markdown(url['url'])
tweet.text = tweet.text.replace(source, replacement)
return tweet
def sanitize_markdown(unescaped):
# This prevents newlines breaking out of a markdown quote
# and also escapes markdown's special characters
return re.sub(r'([\\`*_{}[\]()#+-])', r'\\\1',
'\n>'.join(unescaped.splitlines()))
def tweet_to_markdown(tweet):
with open(TweetPoster.template_path + 'tweet.txt') as f:
tweet_template = f.read().decode('utf8')
# Sanitize markdown before processing twitter entities
tweet.text = sanitize_markdown(tweet.text)
# Link hashtags, expand urls, rehost images etc
tweet = replace_entities(tweet)
return tweet_template.format(**tweet.__dict__) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Display an image.
Usage::
display.py <filename>
A checkerboard background is visible behind any transparent areas of the
image.
'''
import sys
import pyglet
from pyglet.gl import *
window = pyglet.window.Window(visible=False, resizable=True)
@window.event
def on_draw():
background.blit_tiled(0, 0, 0, window.width, window.height)
img.blit(window.width // 2, window.height // 2, 0)
if __name__ == '__main__':
if len(sys.argv) != 2:
print __doc__
sys.exit(1)
filename = sys.argv[1]
img = pyglet.image.load(filename).get_texture(rectangle=True)
img.anchor_x = img.width // 2
img.anchor_y = img.height // 2
checks = pyglet.image.create(32, 32, pyglet.image.CheckerImagePattern())
background = pyglet.image.TileableTexture.create_for_image(checks)
# Enable alpha blending, required for image.blit.
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
window.width = img.width
window.height = img.height
window.set_visible()
pyglet.app.run() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#-------------------------------------------------------------------------------------------------
#Get useful information from the Mapped candidates Info.
#Like: What is the frequence of archives.How many mapped files are per archive.
#The archive with most mapping files.
#Which domains are more promising to Crawl Offline.
#-------------------------------------------------------------------------------------------------
import codecs
import sys
import re
from operator import itemgetter
def normalize(testURL):
"""Normalize the URL."""
testURL=re.sub("^http(s)?://","",testURL)
testURL=re.sub("/$","",testURL)
components=testURL.split('/')
return components
def getDictDomains(infoFile):
"""Compute the frequencies of domains to find the most profitable domains."""
domainDict={}
fi=codecs.open(infoFile, "r", "utf-8")
for line in fi:
line=line.rstrip()
url,info=line.split("\t")
components=normalize(url)
domain=components[0]
domainDict.setdefault(domain,0)
domainDict[domain]+=1
fi.close()
return domainDict
def cleanOfSpaces(myString):
"""Clean a string of trailing spaces"""
myString=re.sub("^( )+","",myString)
myString=re.sub("( )+$","",myString)
return myString
def getWarcLocation(info):
"""It gets the warc location"""
info=re.sub("^{","",info)
info=re.sub("}$","",info)
components=info.split("\",")
warcComponent=components[-1]
warcComponent=re.sub("\"","",warcComponent)
fileName,warcValue=warcComponent.split(":",1)
warcValue=cleanOfSpaces(warcValue)
return warcValue
def getWarcDistribution(infoFile):
""" Get the warc location frequencies"""
warcDict={}
fi=codecs.open(infoFile, "r", "utf-8")
for line in fi:
line=line.rstrip()
url,info=line.split("\t")
warcL=getWarcLocation(info)
warcDict.setdefault(warcL,0)
warcDict[warcL]+=1
fi.close()
return warcDict
def printDistribution(fDict,fOutput):
"""It prints a particular distribution"""
sDict=sorted(fDict.iteritems(), key=itemgetter(1), reverse=True)
fo=codecs.open(fOutput, "w", "utf-8")
for tup in sDict :
fo.write(tup[0]+"\t"+str(tup[1])+"\n")
fo.close()
def computeDomainDistribution(sLang,dLang,fl):
"""Computes the domain distribution """
infoFile=getInputFile(sLang,dLang)
domainDict=getDictDomains(infoFile)
fDomain=getDomainFile (sLang,dLang)
fl.write ("=====>In "+fDomain+"\n")
printDistribution(domainDict,fDomain)
def computeWarcDistribution(sLang,dLang,fl):
"""Computes the warc distribution """
infoFile=getInputFile(sLang,dLang)
warcDict=getWarcDistribution(infoFile)
fWarc=getWarcFile (sLang,dLang)
fl.write ("=====>In "+fWarc+"\n")
printDistribution(warcDict,fWarc)
def getInputFile (sLang,dLang) :
return "OutputFiles/candidates-Info-"+sLang+"-"+dLang+".txt"
def getDomainFile (sLang,dLang) :
return "OutputFiles/statistics-Domain-"+sLang+"-"+dLang+".txt"
def getWarcFile (sLang,dLang) :
return "OutputFiles/statistics-Warc-"+sLang+"-"+dLang+".txt"
def getLogFile (sLang,dLang) :
return "OutputFiles/run-"+sLang+"-"+dLang+".log"
def main():
sLang=sys.argv[1]
dLang=sys.argv[2]
fileLog=getLogFile(sLang,dLang)
fl=codecs.open(fileLog, "a", "utf-8")
fl.write("\nStatistics\n")
fl.write("Warc Distribution\n")
computeWarcDistribution(sLang,dLang,fl)
fl.write("Domain distribution\n")
computeDomainDistribution(sLang,dLang,fl)
fl.close()
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
##########################################################################
#
# Portions of this file are under the following copyright and license:
#
#
# Copyright (c) 2003-2004 Danny Brewer
# d29583@groovegarden.com
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#
# and other portions are under the following copyright and license:
#
#
# OpenERP, Open Source Management Solution>..
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
##############################################################################
import uno
import unohelper
import pythonloader
if __name__<>"package":
from actions import *
#------------------------------------------------------------
# Uno ServiceManager access
# A different version of this routine and global variable
# is needed for code running inside a component.
#------------------------------------------------------------
# The ServiceManager of the running OOo.
# It is cached in a global variable.
goServiceManager = False
pythonloader.DEBUG = 0
def getServiceManager( cHost="localhost", cPort="2002" ):
"""Get the ServiceManager from the running OpenOffice.org.
Then retain it in the global variable goServiceManager for future use.
This is similar to the GetProcessServiceManager() in OOo Basic.
"""
global goServiceManager
global pythonloader
if not goServiceManager:
# Get the uno component context from the PyUNO runtime
oLocalContext = uno.getComponentContext()
# Create the UnoUrlResolver on the Python side.
# Connect to the running OpenOffice.org and get its context.
if __name__<>"package":
oLocalResolver = oLocalContext.ServiceManager.createInstanceWithContext(
"com.sun.star.bridge.UnoUrlResolver", oLocalContext )
oContext = oLocalResolver.resolve( "uno:socket,host=" + cHost + ",port=" + cPort + ";urp;StarOffice.ComponentContext" )
# Get the ServiceManager object
goServiceManager = oContext.ServiceManager
else:
goServiceManager=oLocalContext.ServiceManager
return goServiceManager
#------------------------------------------------------------
# Uno convenience functions
# The stuff in this section is just to make
# python progrmaming of OOo more like using OOo Basic.
#------------------------------------------------------------
# This is the same as ServiceManager.createInstance( ... )
def createUnoService( cClass ):
"""A handy way to create a global objects within the running OOo.
Similar to the function of the same name in OOo Basic.
"""
oServiceManager = getServiceManager()
oObj = oServiceManager.createInstance( cClass )
return oObj
# The StarDesktop object. (global like in OOo Basic)
# It is cached in a global variable.
StarDesktop = None
def getDesktop():
"""An easy way to obtain the Desktop object from a running OOo.
"""
global StarDesktop
if StarDesktop == None:
StarDesktop = createUnoService( "com.sun.star.frame.Desktop" )
return StarDesktop
# preload the StarDesktop variable.
#getDesktop()
# The CoreReflection object.
# It is cached in a global variable.
goCoreReflection = False
def getCoreReflection():
global goCoreReflection
if not goCoreReflection:
goCoreReflection = createUnoService( "com.sun.star.reflection.CoreReflection" )
return goCoreReflection
def createUnoStruct( cTypeName ):
"""Create a UNO struct and return it.
Similar to the function of the same name in OOo Basic.
"""
oCoreReflection = getCoreReflection()
# Get the IDL class for the type name
oXIdlClass = oCoreReflection.forName( cTypeName )
# Create the struct.
oReturnValue, oStruct = oXIdlClass.createObject( None )
return oStruct
#------------------------------------------------------------
# API helpers
#------------------------------------------------------------
def hasUnoInterface( oObject, cInterfaceName ):
"""Similar to Basic's HasUnoInterfaces() function, but singular not plural."""
# Get the Introspection service.
oIntrospection = createUnoService( "com.sun.star.beans.Introspection" )
# Now inspect the object to learn about it.
oObjInfo = oIntrospection.inspect( oObject )
# Obtain an array describing all methods of the object.
oMethods = oObjInfo.getMethods( uno.getConstantByName( "com.sun.star.beans.MethodConcept.ALL" ) )
# Now look at every method.
for oMethod in oMethods:
# Check the method's interface to see if
# these aren't the droids you're looking for.
cMethodInterfaceName = oMethod.getDeclaringClass().getName()
if cMethodInterfaceName == cInterfaceName:
return True
return False
def hasUnoInterfaces( oObject, *cInterfaces ):
"""Similar to the function of the same name in OOo Basic."""
for cInterface in cInterfaces:
if not hasUnoInterface( oObject, cInterface ):
return False
return True
#------------------------------------------------------------
# High level general purpose functions
#------------------------------------------------------------
def makePropertyValue( cName=None, uValue=None, nHandle=None, nState=None ):
"""Create a com.sun.star.beans.PropertyValue struct and return it.
"""
oPropertyValue = createUnoStruct( "com.sun.star.beans.PropertyValue" )
if cName != None:
oPropertyValue.Name = cName
if uValue != None:
oPropertyValue.Value = uValue
if nHandle != None:
oPropertyValue.Handle = nHandle
if nState != None:
oPropertyValue.State = nState
return oPropertyValue
def makePoint( nX, nY ):
"""Create a com.sun.star.awt.Point struct."""
oPoint = createUnoStruct( "com.sun.star.awt.Point" )
oPoint.X = nX
oPoint.Y = nY
return oPoint
def makeSize( nWidth, nHeight ):
"""Create a com.sun.star.awt.Size struct."""
oSize = createUnoStruct( "com.sun.star.awt.Size" )
oSize.Width = nWidth
oSize.Height = nHeight
return oSize
def makeRectangle( nX, nY, nWidth, nHeight ):
"""Create a com.sun.star.awt.Rectangle struct."""
oRect = createUnoStruct( "com.sun.star.awt.Rectangle" )
oRect.X = nX
oRect.Y = nY
oRect.Width = nWidth
oRect.Height = nHeight
return oRect
def Array( *args ):
"""This is just sugar coating so that code from OOoBasic which
contains the Array() function can work perfectly in python."""
tArray = ()
for arg in args:
tArray += (arg,)
return tArray
def loadComponentFromURL( cUrl, tProperties=() ):
"""Open or Create a document from it's URL.
New documents are created from URL's such as:
private:factory/sdraw
private:factory/swriter
private:factory/scalc
private:factory/simpress
"""
StarDesktop = getDesktop()
oDocument = StarDesktop.loadComponentFromURL( cUrl, "_blank", 0, tProperties )
return oDocument
#------------------------------------------------------------
# Styles
#------------------------------------------------------------
def defineStyle( oDrawDoc, cStyleFamily, cStyleName, cParentStyleName=None ):
"""Add a new style to the style catalog if it is not already present.
This returns the style object so that you can alter its properties.
"""
oStyleFamily = oDrawDoc.getStyleFamilies().getByName( cStyleFamily )
# Does the style already exist?
if oStyleFamily.hasByName( cStyleName ):
# then get it so we can return it.
oStyle = oStyleFamily.getByName( cStyleName )
else:
# Create new style object.
oStyle = oDrawDoc.createInstance( "com.sun.star.style.Style" )
# Set its parent style
if cParentStyleName != None:
oStyle.setParentStyle( cParentStyleName )
# Add the new style to the style family.
oStyleFamily.insertByName( cStyleName, oStyle )
return oStyle
def getStyle( oDrawDoc, cStyleFamily, cStyleName ):
"""Lookup and return a style from the document.
"""
return oDrawDoc.getStyleFamilies().getByName( cStyleFamily ).getByName( cStyleName )
#------------------------------------------------------------
# General Utility functions
#------------------------------------------------------------
def convertToURL( cPathname ):
"""Convert a Windows or Linux pathname into an OOo URL."""
if len( cPathname ) > 1:
if cPathname[1:2] == ":":
cPathname = "/" + cPathname[0] + "|" + cPathname[2:]
cPathname = cPathname.replace( "\\", "/" )
cPathname = "file://" + cPathname
return cPathname
# The global Awt Toolkit.
# This is initialized the first time it is needed.
#goAwtToolkit = createUnoService( "com.sun.star.awt.Toolkit" )
goAwtToolkit = None
def getAwtToolkit():
global goAwtToolkit
if goAwtToolkit == None:
goAwtToolkit = createUnoService( "com.sun.star.awt.Toolkit" )
return goAwtToolkit
# This class builds dialog boxes.
# This can be used in two different ways...
# 1. by subclassing it (elegant)
# 2. without subclassing it (less elegant)
class DBModalDialog:
"""Class to build a dialog box from the com.sun.star.awt.* services.
This doesn't do anything you couldn't already do using OOo's UNO API,
this just makes it much easier.
You can change the dialog box size, position, title, etc.
You can add controls, and listeners for those controls to the dialog box.
This class can be used by subclassing it, or without subclassing it.
"""
def __init__( self, nPositionX=None, nPositionY=None, nWidth=None, nHeight=None, cTitle=None ):
self.oDialogModel = createUnoService( "com.sun.star.awt.UnoControlDialogModel" )
if nPositionX != None: self.oDialogModel.PositionX = nPositionX
if nPositionY != None: self.oDialogModel.PositionY = nPositionY
if nWidth != None: self.oDialogModel.Width = nWidth
if nHeight != None: self.oDialogModel.Height = nHeight
if cTitle != None: self.oDialogModel.Title = cTitle
self.oDialogControl = createUnoService( "com.sun.star.awt.UnoControlDialog" )
self.oDialogControl.setModel( self.oDialogModel )
def release( self ):
"""Release resources.
After calling this, you can no longer use this object.
"""
self.oDialogControl.dispose()
#--------------------------------------------------
# Dialog box adjustments
#--------------------------------------------------
def setDialogPosition( self, nX, nY ):
self.oDialogModel.PositionX = nX
self.oDialogModel.PositionY = nY
def setDialogSize( self, nWidth, nHeight ):
self.oDialogModel.Width = nWidth
self.oDialogModel.Height = nHeight
def setDialogTitle( self, cCaption ):
self.oDialogModel.Title = cCaption
def setVisible( self, bVisible ):
self.oDialogControl.setVisible( bVisible )
#--------------------------------------------------
# com.sun.star.awt.UnoControlButton
#--------------------------------------------------
# After you add a Button control, you can call self.setControlModelProperty()
# passing any of the properties for a...
# com.sun.star.awt.UnoControlButtonModel
# com.sun.star.awt.UnoControlDialogElement
# com.sun.star.awt.UnoControlModel
def addButton( self, cCtrlName, nPositionX, nPositionY, nWidth, nHeight,
cLabel=None,
actionListenerProc=None,
nTabIndex=None ):
self.addControl( "com.sun.star.awt.UnoControlButtonModel",
cCtrlName, nPositionX, nPositionY, nWidth, nHeight, bDropdown=None, bMultiSelection=None,
cLabel=cLabel,
nTabIndex=nTabIndex )
if actionListenerProc != None:
self.addActionListenerProc( cCtrlName, actionListenerProc )
def setButtonLabel( self, cCtrlName, cLabel ):
"""Set the label of the control."""
oControl = self.getControl( cCtrlName )
oControl.setLabel( cLabel )
#--------------------------------------------------
# com.sun.star.awt.UnoControlEditModel
#--------------------------------------------------
def addEdit( self, cCtrlName, nPositionX, nPositionY, nWidth, nHeight,
cText=None,
textListenerProc=None ):
"""Add a Edit control to the window."""
self.addControl( "com.sun.star.awt.UnoControlEditModel",
cCtrlName, nPositionX, nPositionY, nWidth, nHeight, bDropdown=None)
if cText != None:
self.setEditText( cCtrlName, cText )
if textListenerProc != None:
self.addTextListenerProc( cCtrlName, textListenerProc )
#--------------------------------------------------
# com.sun.star.awt.UnoControlCheckBox
#--------------------------------------------------
# After you add a CheckBox control, you can call self.setControlModelProperty()
# passing any of the properties for a...
# com.sun.star.awt.UnoControlCheckBoxModel
# com.sun.star.awt.UnoControlDialogElement
# com.sun.star.awt.UnoControlModel
def addCheckBox( self, cCtrlName, nPositionX, nPositionY, nWidth, nHeight,
cLabel=None,
itemListenerProc=None,
nTabIndex=None ):
self.addControl( "com.sun.star.awt.UnoControlCheckBoxModel",
cCtrlName, nPositionX, nPositionY, nWidth, nHeight, bDropdown=None, bMultiSelection=None,
cLabel=cLabel,
nTabIndex=nTabIndex )
if itemListenerProc != None:
self.addItemListenerProc( cCtrlName, itemListenerProc )
def setEditText( self, cCtrlName, cText ):
"""Set the text of the edit box."""
oControl = self.getControl( cCtrlName )
oControl.setText( cText )
def getEditText( self, cCtrlName):
"""Set the text of the edit box."""
oControl = self.getControl( cCtrlName )
return oControl.getText()
def setCheckBoxLabel( self, cCtrlName, cLabel ):
"""Set the label of the control."""
oControl = self.getControl( cCtrlName )
oControl.setLabel( cLabel )
def getCheckBoxState( self, cCtrlName ):
"""Get the state of the control."""
oControl = self.getControl( cCtrlName )
return oControl.getState();
def setCheckBoxState( self, cCtrlName, nState ):
"""Set the state of the control."""
oControl = self.getControl( cCtrlName )
oControl.setState( nState )
def enableCheckBoxTriState( self, cCtrlName, bTriStateEnable ):
"""Enable or disable the tri state mode of the control."""
oControl = self.getControl( cCtrlName )
oControl.enableTriState( bTriStateEnable )
#--------------------------------------------------
# com.sun.star.awt.UnoControlFixedText
#--------------------------------------------------
def addFixedText( self, cCtrlName, nPositionX, nPositionY, nWidth, nHeight,
cLabel=None ):
self.addControl( "com.sun.star.awt.UnoControlFixedTextModel",
cCtrlName, nPositionX, nPositionY, nWidth, nHeight,
bDropdown=None, bMultiSelection=None,
cLabel=cLabel )
return self.getControl( cCtrlName )
#--------------------------------------------------
# Add Controls to dialog
#--------------------------------------------------
def addControl( self, cCtrlServiceName,
cCtrlName, nPositionX, nPositionY, nWidth, nHeight,
bDropdown=None,
bMultiSelection=None,
cLabel=None,
nTabIndex=None,
sImagePath=None,
):
oControlModel = self.oDialogModel.createInstance( cCtrlServiceName )
self.oDialogModel.insertByName( cCtrlName, oControlModel )
# if negative coordinates are given for X or Y position,
# then make that coordinate be relative to the right/bottom
# edge of the dialog box instead of to the left/top.
if nPositionX < 0: nPositionX = self.oDialogModel.Width + nPositionX - nWidth
if nPositionY < 0: nPositionY = self.oDialogModel.Height + nPositionY - nHeight
oControlModel.PositionX = nPositionX
oControlModel.PositionY = nPositionY
oControlModel.Width = nWidth
oControlModel.Height = nHeight
oControlModel.Name = cCtrlName
if bDropdown != None:
oControlModel.Dropdown = bDropdown
if bMultiSelection!=None:
oControlModel.MultiSelection=bMultiSelection
if cLabel != None:
oControlModel.Label = cLabel
if nTabIndex != None:
oControlModel.TabIndex = nTabIndex
if sImagePath != None:
oControlModel.ImageURL = sImagePath
#--------------------------------------------------
# Access controls and control models
#--------------------------------------------------
#--------------------------------------------------
# com.sun.star.awt.UnoContorlListBoxModel
#--------------------------------------------------
def addComboListBox( self, cCtrlName, nPositionX, nPositionY, nWidth, nHeight,
bDropdown=True,
bMultiSelection=False,
itemListenerProc=None,
actionListenerProc=None,
):
mod = self.addControl( "com.sun.star.awt.UnoControlListBoxModel",
cCtrlName, nPositionX, nPositionY, nWidth, nHeight,bDropdown,bMultiSelection )
if itemListenerProc != None:
self.addItemListenerProc( cCtrlName, itemListenerProc )
def addListBoxItems( self, cCtrlName, tcItemTexts, nPosition=0 ):
"""Add a tupple of items to the ListBox at specified position."""
oControl = self.getControl( cCtrlName )
oControl.addItems( tcItemTexts, nPosition )
def selectListBoxItem( self, cCtrlName, cItemText, bSelect=True ):
"""Selects/Deselects the ispecified item."""
oControl = self.getControl( cCtrlName )
return oControl.selectItem( cItemText, bSelect )
def selectListBoxItemPos( self, cCtrlName, nItemPos, bSelect=True ):
"""Select/Deselect the item at the specified position."""
oControl = self.getControl( cCtrlName )
return oControl.selectItemPos( nItemPos, bSelect )
def removeListBoxItems( self, cCtrlName, nPosition, nCount=1 ):
"""Remove items from a ListBox."""
oControl = self.getControl( cCtrlName )
oControl.removeItems( nPosition, nCount )
def getListBoxItemCount( self, cCtrlName ):
"""Get the number of items in a ListBox."""
oControl = self.getControl( cCtrlName )
return oControl.getItemCount()
def getListBoxSelectedItem( self, cCtrlName ):
"""Returns the currently selected item."""
oControl = self.getControl( cCtrlName )
return oControl.getSelectedItem()
def getListBoxItem( self, cCtrlName, nPosition ):
"""Return the item at specified position within the ListBox."""
oControl = self.getControl( cCtrlName )
return oControl.getItem( nPosition )
def getListBoxSelectedItemPos(self,cCtrlName):
oControl = self.getControl( cCtrlName )
return oControl.getSelectedItemPos()
def getListBoxSelectedItems(self,cCtrlName):
oControl = self.getControl( cCtrlName )
return oControl.getSelectedItems()
def getListBoxSelectedItemsPos(self,cCtrlName):
oControl = self.getControl( cCtrlName )
return oControl.getSelectedItemsPos()
#--------------------------------------------------
# com.sun.star.awt.UnoControlComboBoxModel
#--------------------------------------------------
def addComboBox( self, cCtrlName, nPositionX, nPositionY, nWidth, nHeight,
bDropdown=True,
itemListenerProc=None,
actionListenerProc=None ):
mod = self.addControl( "com.sun.star.awt.UnoControlComboBoxModel",
cCtrlName, nPositionX, nPositionY, nWidth, nHeight,bDropdown)
if itemListenerProc != None:
self.addItemListenerProc( cCtrlName, itemListenerProc )
if actionListenerProc != None:
self.addActionListenerProc( cCtrlName, actionListenerProc )
def setComboBoxText( self, cCtrlName, cText ):
"""Set the text of the ComboBox."""
oControl = self.getControl( cCtrlName )
oControl.setText( cText )
def getComboBoxText( self, cCtrlName):
"""Set the text of the ComboBox."""
oControl = self.getControl( cCtrlName )
return oControl.getText()
def getComboBoxSelectedText( self, cCtrlName ):
"""Get the selected text of the ComboBox."""
oControl = self.getControl( cCtrlName )
return oControl.getSelectedText();
def getControl( self, cCtrlName ):
"""Get the control (not its model) for a particular control name.
The control returned includes the service com.sun.star.awt.UnoControl,
and another control-specific service which inherits from it.
"""
oControl = self.oDialogControl.getControl( cCtrlName )
return oControl
def getControlModel( self, cCtrlName ):
"""Get the control model (not the control) for a particular control name.
The model returned includes the service UnoControlModel,
and another control-specific service which inherits from it.
"""
oControl = self.getControl( cCtrlName )
oControlModel = oControl.getModel()
return oControlModel
#---------------------------------------------------
# com.sun.star.awt.UnoControlImageControlModel
#---------------------------------------------------
def addImageControl( self, cCtrlName, nPositionX, nPositionY, nWidth, nHeight,
sImagePath="",
itemListenerProc=None,
actionListenerProc=None ):
mod = self.addControl( "com.sun.star.awt.UnoControlImageControlModel",
cCtrlName, nPositionX, nPositionY, nWidth, nHeight, sImagePath=sImagePath)
if itemListenerProc != None:
self.addItemListenerProc( cCtrlName, itemListenerProc )
if actionListenerProc != None:
self.addActionListenerProc( cCtrlName, actionListenerProc )
#--------------------------------------------------
# Adjust properties of control models
#--------------------------------------------------
def setControlModelProperty( self, cCtrlName, cPropertyName, uValue ):
"""Set the value of a property of a control's model.
This affects the control model, not the control.
"""
oControlModel = self.getControlModel( cCtrlName )
oControlModel.setPropertyValue( cPropertyName, uValue )
def getControlModelProperty( self, cCtrlName, cPropertyName ):
"""Get the value of a property of a control's model.
This affects the control model, not the control.
"""
oControlModel = self.getControlModel( cCtrlName )
return oControlModel.getPropertyValue( cPropertyName )
#--------------------------------------------------
# Sugar coated property adjustments to control models.
#--------------------------------------------------
def setEnabled( self, cCtrlName, bEnabled=True ):
"""Supported controls...
UnoControlButtonModel
UnoControlCheckBoxModel
"""
self.setControlModelProperty( cCtrlName, "Enabled", bEnabled )
def getEnabled( self, cCtrlName ):
"""Supported controls...
UnoControlButtonModel
UnoControlCheckBoxModel
"""
return self.getControlModelProperty( cCtrlName, "Enabled" )
def setState( self, cCtrlName, nState ):
"""Supported controls...
UnoControlButtonModel
UnoControlCheckBoxModel
"""
self.setControlModelProperty( cCtrlName, "Status", nState )
def getState( self, cCtrlName ):
"""Supported controls...
UnoControlButtonModel
UnoControlCheckBoxModel
"""
return self.getControlModelProperty( cCtrlName, "Status" )
def setLabel( self, cCtrlName, cLabel ):
"""Supported controls...
UnoControlButtonModel
UnoControlCheckBoxModel
"""
self.setControlModelProperty( cCtrlName, "Label", cLabel )
def getLabel( self, cCtrlName ):
"""Supported controls...
UnoControlButtonModel
UnoControlCheckBoxModel
"""
return self.getControlModelProperty( cCtrlName, "Label" )
def setHelpText( self, cCtrlName, cHelpText ):
"""Supported controls...
UnoControlButtonModel
UnoControlCheckBoxModel
"""
self.setControlModelProperty( cCtrlName, "HelpText", cHelpText )
def getHelpText( self, cCtrlName ):
"""Supported controls...
UnoControlButtonModel
UnoControlCheckBoxModel
"""
return self.getControlModelProperty( cCtrlName, "HelpText" )
#--------------------------------------------------
# Adjust controls (not models)
#--------------------------------------------------
# The following apply to all controls which are a
# com.sun.star.awt.UnoControl
def setDesignMode( self, cCtrlName, bDesignMode=True ):
oControl = self.getControl( cCtrlName )
oControl.setDesignMode( bDesignMode )
def isDesignMode( self, cCtrlName, bDesignMode=True ):
oControl = self.getControl( cCtrlName )
return oControl.isDesignMode()
def isTransparent( self, cCtrlName, bDesignMode=True ):
oControl = self.getControl( cCtrlName )
return oControl.isTransparent()
# The following apply to all controls which are a
# com.sun.star.awt.UnoControlDialogElement
def setPosition( self, cCtrlName, nPositionX, nPositionY ):
self.setControlModelProperty( cCtrlName, "PositionX", nPositionX )
self.setControlModelProperty( cCtrlName, "PositionY", nPositionY )
def setPositionX( self, cCtrlName, nPositionX ):
self.setControlModelProperty( cCtrlName, "PositionX", nPositionX )
def setPositionY( self, cCtrlName, nPositionY ):
self.setControlModelProperty( cCtrlName, "PositionY", nPositionY )
def getPositionX( self, cCtrlName ):
return self.getControlModelProperty( cCtrlName, "PositionX" )
def getPositionY( self, cCtrlName ):
return self.getControlModelProperty( cCtrlName, "PositionY" )
def setSize( self, cCtrlName, nWidth, nHeight ):
self.setControlModelProperty( cCtrlName, "Width", nWidth )
self.setControlModelProperty( cCtrlName, "Height", nHeight )
def setWidth( self, cCtrlName, nWidth ):
self.setControlModelProperty( cCtrlName, "Width", nWidth )
def setHeight( self, cCtrlName, nHeight ):
self.setControlModelProperty( cCtrlName, "Height", nHeight )
def getWidth( self, cCtrlName ):
return self.getControlModelProperty( cCtrlName, "Width" )
def getHeight( self, cCtrlName ):
return self.getControlModelProperty( cCtrlName, "Height" )
def setTabIndex( self, cCtrlName, nWidth, nTabIndex ):
self.setControlModelProperty( cCtrlName, "TabIndex", nTabIndex )
def getTabIndex( self, cCtrlName ):
return self.getControlModelProperty( cCtrlName, "TabIndex" )
def setStep( self, cCtrlName, nWidth, nStep ):
self.setControlModelProperty( cCtrlName, "Step", nStep )
def getStep( self, cCtrlName ):
return self.getControlModelProperty( cCtrlName, "Step" )
def setTag( self, cCtrlName, nWidth, cTag ):
self.setControlModelProperty( cCtrlName, "Tag", cTag )
def getTag( self, cCtrlName ):
return self.getControlModelProperty( cCtrlName, "Tag" )
def setEchoChar(self, cCtrlName , cVal):
self.setControlModelProperty(cCtrlName, "EchoChar", cVal)
def getEchoChar(self, cCtrlName):
return self.setControlModelProperty(cCtrlName, "EchoChar")
#--------------------------------------------------
# Add listeners to controls.
#--------------------------------------------------
# This applies to...
# UnoControlButton
def addActionListenerProc( self, cCtrlName, actionListenerProc ):
"""Create an com.sun.star.awt.XActionListener object and add it to a control.
A listener object is created which will call the python procedure actionListenerProc.
The actionListenerProc can be either a method or a global procedure.
The following controls support XActionListener:
UnoControlButton
"""
oControl = self.getControl( cCtrlName )
oActionListener = ActionListenerProcAdapter( actionListenerProc )
oControl.addActionListener( oActionListener )
# This applies to...
# UnoControlCheckBox
def addItemListenerProc( self, cCtrlName, itemListenerProc ):
"""Create an com.sun.star.awt.XItemListener object and add it to a control.
A listener object is created which will call the python procedure itemListenerProc.
The itemListenerProc can be either a method or a global procedure.
The following controls support XActionListener:
UnoControlCheckBox
"""
oControl = self.getControl( cCtrlName )
oActionListener = ItemListenerProcAdapter( itemListenerProc )
oControl.addItemListener( oActionListener )
#--------------------------------------------------
# Display the modal dialog.
#--------------------------------------------------
def doModalDialog( self, sObjName,sValue):
"""Display the dialog as a modal dialog."""
self.oDialogControl.setVisible( True )
if not sValue==None:
self.selectListBoxItem( sObjName, sValue, True )
self.oDialogControl.execute()
def endExecute( self ):
"""Call this from within one of the listeners to end the modal dialog.
For instance, the listener on your OK or Cancel button would call this to end the dialog.
"""
self.oDialogControl.endExecute()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"fmt"
"sync"
)
// MemStore is an implementation of CheckpointStore interface which stores checkpoint in memory.
type MemStore struct {
mem map[string][]byte
sync.Mutex
}
// NewMemStore returns an instance of MemStore
func NewMemStore() *MemStore {
return &MemStore{mem: make(map[string][]byte)}
}
// Write writes the data to the store
func (mstore *MemStore) Write(key string, data []byte) error {
mstore.Lock()
defer mstore.Unlock()
mstore.mem[key] = data
return nil
}
// Read returns data read from store
func (mstore *MemStore) Read(key string) ([]byte, error) {
mstore.Lock()
defer mstore.Unlock()
data, ok := mstore.mem[key]
if !ok {
return nil, fmt.Errorf("checkpoint is not found")
}
return data, nil
}
// Delete deletes data from the store
func (mstore *MemStore) Delete(key string) error {
mstore.Lock()
defer mstore.Unlock()
delete(mstore.mem, key)
return nil
}
// List returns all the keys from the store
func (mstore *MemStore) List() ([]string, error) {
mstore.Lock()
defer mstore.Unlock()
keys := make([]string, 0)
for key := range mstore.mem {
keys = append(keys, key)
}
return keys, nil
} | go | github | https://github.com/kubernetes/kubernetes | pkg/kubelet/checkpointmanager/testing/util.go |
/*
MIT License http://www.opensource.org/licenses/mit-license.php
Author Tobias Koppers @sokra and Zackary Jackson @ScriptedAlchemy
*/
"use strict";
const { RawSource } = require("webpack-sources");
const Module = require("../Module");
const { REMOTE_AND_SHARE_INIT_TYPES } = require("../ModuleSourceTypeConstants");
const { WEBPACK_MODULE_TYPE_REMOTE } = require("../ModuleTypeConstants");
const RuntimeGlobals = require("../RuntimeGlobals");
const makeSerializable = require("../util/makeSerializable");
const FallbackDependency = require("./FallbackDependency");
const RemoteToExternalDependency = require("./RemoteToExternalDependency");
/** @typedef {import("../config/defaults").WebpackOptionsNormalizedWithDefaults} WebpackOptions */
/** @typedef {import("../Compilation")} Compilation */
/** @typedef {import("../Module").BuildCallback} BuildCallback */
/** @typedef {import("../Module").CodeGenerationContext} CodeGenerationContext */
/** @typedef {import("../Module").CodeGenerationResultData} CodeGenerationResultData */
/** @typedef {import("../Module").CodeGenerationResult} CodeGenerationResult */
/** @typedef {import("../Module").LibIdentOptions} LibIdentOptions */
/** @typedef {import("../Module").LibIdent} LibIdent */
/** @typedef {import("../Module").NameForCondition} NameForCondition */
/** @typedef {import("../Module").NeedBuildCallback} NeedBuildCallback */
/** @typedef {import("../Module").NeedBuildContext} NeedBuildContext */
/** @typedef {import("../Module").Sources} Sources */
/** @typedef {import("../Module").SourceTypes} SourceTypes */
/** @typedef {import("../ModuleGraph")} ModuleGraph */
/** @typedef {import("../Module").ExportsType} ExportsType */
/** @typedef {import("../RequestShortener")} RequestShortener */
/** @typedef {import("../ResolverFactory").ResolverWithOptions} ResolverWithOptions */
/** @typedef {import("../serialization/ObjectMiddleware").ObjectDeserializerContext} ObjectDeserializerContext */
/** @typedef {import("../serialization/ObjectMiddleware").ObjectSerializerContext} ObjectSerializerContext */
/** @typedef {import("../util/fs").InputFileSystem} InputFileSystem */
const RUNTIME_REQUIREMENTS = new Set([RuntimeGlobals.module]);
/** @typedef {string[]} ExternalRequests */
class RemoteModule extends Module {
/**
* @param {string} request request string
* @param {ExternalRequests} externalRequests list of external requests to containers
* @param {string} internalRequest name of exposed module in container
* @param {string} shareScope the used share scope name
*/
constructor(request, externalRequests, internalRequest, shareScope) {
super(WEBPACK_MODULE_TYPE_REMOTE);
/** @type {string} */
this.request = request;
/** @type {ExternalRequests} */
this.externalRequests = externalRequests;
/** @type {string} */
this.internalRequest = internalRequest;
/** @type {string} */
this.shareScope = shareScope;
/** @type {string} */
this._identifier = `remote (${shareScope}) ${this.externalRequests.join(
" "
)} ${this.internalRequest}`;
}
/**
* @returns {string} a unique identifier of the module
*/
identifier() {
return this._identifier;
}
/**
* @param {RequestShortener} requestShortener the request shortener
* @returns {string} a user readable identifier of the module
*/
readableIdentifier(requestShortener) {
return `remote ${this.request}`;
}
/**
* @param {LibIdentOptions} options options
* @returns {LibIdent | null} an identifier for library inclusion
*/
libIdent(options) {
return `${this.layer ? `(${this.layer})/` : ""}webpack/container/remote/${
this.request
}`;
}
/**
* @param {NeedBuildContext} context context info
* @param {NeedBuildCallback} callback callback function, returns true, if the module needs a rebuild
* @returns {void}
*/
needBuild(context, callback) {
callback(null, !this.buildInfo);
}
/**
* @param {WebpackOptions} options webpack options
* @param {Compilation} compilation the compilation
* @param {ResolverWithOptions} resolver the resolver
* @param {InputFileSystem} fs the file system
* @param {BuildCallback} callback callback function
* @returns {void}
*/
build(options, compilation, resolver, fs, callback) {
this.buildMeta = {};
this.buildInfo = {
strict: true
};
this.clearDependenciesAndBlocks();
if (this.externalRequests.length === 1) {
this.addDependency(
new RemoteToExternalDependency(this.externalRequests[0])
);
} else {
this.addDependency(new FallbackDependency(this.externalRequests));
}
callback();
}
/**
* @param {string=} type the source type for which the size should be estimated
* @returns {number} the estimated size of the module (must be non-zero)
*/
size(type) {
return 6;
}
/**
* @returns {SourceTypes} types available (do not mutate)
*/
getSourceTypes() {
return REMOTE_AND_SHARE_INIT_TYPES;
}
/**
* @param {ModuleGraph} moduleGraph the module graph
* @param {boolean | undefined} strict the importing module is strict
* @returns {ExportsType} export type
* "namespace": Exports is already a namespace object. namespace = exports.
* "dynamic": Check at runtime if __esModule is set. When set: namespace = { ...exports, default: exports }. When not set: namespace = { default: exports }.
* "default-only": Provide a namespace object with only default export. namespace = { default: exports }
* "default-with-named": Provide a namespace object with named and default export. namespace = { ...exports, default: exports }
*/
getExportsType(moduleGraph, strict) {
return "dynamic";
}
/**
* @returns {NameForCondition | null} absolute path which should be used for condition matching (usually the resource path)
*/
nameForCondition() {
return this.request;
}
/**
* @param {CodeGenerationContext} context context for code generation
* @returns {CodeGenerationResult} result
*/
codeGeneration({ moduleGraph, chunkGraph }) {
const module = moduleGraph.getModule(this.dependencies[0]);
const id = module && chunkGraph.getModuleId(module);
/** @type {Sources} */
const sources = new Map();
sources.set("remote", new RawSource(""));
/** @type {CodeGenerationResultData} */
const data = new Map();
data.set("share-init", [
{
shareScope: this.shareScope,
initStage: 20,
init: id === undefined ? "" : `initExternal(${JSON.stringify(id)});`
}
]);
return { sources, data, runtimeRequirements: RUNTIME_REQUIREMENTS };
}
/**
* @param {ObjectSerializerContext} context context
*/
serialize(context) {
const { write } = context;
write(this.request);
write(this.externalRequests);
write(this.internalRequest);
write(this.shareScope);
super.serialize(context);
}
/**
* @param {ObjectDeserializerContext} context context
* @returns {RemoteModule} deserialized module
*/
static deserialize(context) {
const { read } = context;
const obj = new RemoteModule(read(), read(), read(), read());
obj.deserialize(context);
return obj;
}
}
makeSerializable(RemoteModule, "webpack/lib/container/RemoteModule");
module.exports = RemoteModule; | javascript | github | https://github.com/webpack/webpack | lib/container/RemoteModule.js |
# -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('http://python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post("http://httpbin.org/post", data=payload)
>>> print r.text
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2013 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '1.2.2'
__build__ = 0x010202
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2013 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
try:
from requests.packages.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
except ImportError:
pass
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler()) | unknown | codeparrot/codeparrot-clean | ||
# util
import types
from zlib import compress as _compress, decompress
import threading
import warnings
import errno
try:
from dpark.portable_hash import portable_hash as _hash
except ImportError:
import pyximport
pyximport.install(inplace=True)
from dpark.portable_hash import portable_hash as _hash
try:
import os
import pwd
def getuser():
return pwd.getpwuid(os.getuid()).pw_name
except:
import getpass
def getuser():
return getpass.getuser()
COMPRESS = 'zlib'
def compress(s):
return _compress(s, 1)
try:
from lz4 import compress, decompress
COMPRESS = 'lz4'
except ImportError:
try:
from snappy import compress, decompress
COMPRESS = 'snappy'
except ImportError:
pass
def spawn(target, *args, **kw):
t = threading.Thread(target=target, name=target.__name__, args=args, kwargs=kw)
t.daemon = True
t.start()
return t
# hash(None) is id(None), different from machines
# http://effbot.org/zone/python-hash.htm
def portable_hash(value):
return _hash(value)
# similar to itertools.chain.from_iterable, but faster in PyPy
def chain(it):
for v in it:
for vv in v:
yield vv
def izip(*its):
its = [iter(it) for it in its]
try:
while True:
yield tuple([it.next() for it in its])
except StopIteration:
pass
def mkdir_p(path):
"like `mkdir -p`"
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def memory_str_to_mb(str):
lower = str.lower()
if lower[-1].isalpha():
number, unit = float(lower[:-1]), lower[-1]
else:
number, unit = float(lower), 'm'
scale_factors = {
'k': 1. / 1024,
'm': 1,
'g': 1024,
't': 1024 * 1024,
}
return number * scale_factors[unit] | unknown | codeparrot/codeparrot-clean | ||
#
# Copyright (C) 2003-2004 Gerard Vermeulen
#
# This file is part of PyQwt
#
# PyQwt is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# PyQwt is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# PyQwt; if not, write to the Free Software Foundation, Inc., 59 Temple Place,
# Suite 330, Boston, MA 02111-1307, USA.
#
# In addition, as a special exception, Gerard Vermeulen gives permission to
# link PyQwt dynamically with commercial, non-commercial or educational
# versions of Qt, PyQt and sip, and distribute PyQwt in this form, provided
# that equally powerful versions of Qt, PyQt and sip have been released under
# the terms of the GNU General Public License.
#
# If PyQwt is dynamically linked with commercial, non-commercial or educational
# versions of Qt, PyQt and sip, PyQwt becomes a free plug-in for a non-free
# program. | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright (c) 2011 anatanokeitai.com(sakurai_youhei)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import pyacd
pyacd.set_amazon_domain("www.amazon.co.jp")
import os, sys
sys.path.insert(0,os.path.dirname(__file__))
import test
test.main() | unknown | codeparrot/codeparrot-clean | ||
"use strict";
module.exports = function supportsUsing() {
try {
const f = eval(`(function f() {
let disposed = false;
{
const getResource = () => {
return {
[Symbol.dispose]: () => {
disposed = true;
}
}
}
using resource = getResource();
}
return disposed;
})`);
return f() === true;
} catch (_err) {
return false;
}
}; | javascript | github | https://github.com/webpack/webpack | test/helpers/supportsUsing.js |
import pytest
from conftest import assert_complete
class TestSudo:
@pytest.mark.complete("sudo -", require_cmd=True)
def test_1(self, completion):
assert completion
@pytest.mark.complete("sudo cd foo", cwd="shared/default")
def test_2(self, completion):
assert completion == ".d/"
assert not completion.endswith(" ")
@pytest.mark.complete("sudo sh share")
def test_3(self, completion):
assert completion == "d/"
assert not completion.endswith(" ")
@pytest.mark.complete("sudo mount /dev/sda1 def", cwd="shared")
def test_4(self, completion):
assert completion == "ault/"
assert not completion.endswith(" ")
@pytest.mark.complete("sudo -e -u root bar foo", cwd="shared/default")
def test_5(self, completion):
assert completion == "foo foo.d/".split()
def test_6(self, bash, part_full_user):
part, full = part_full_user
completion = assert_complete(bash, "sudo chown %s" % part)
assert completion == full[len(part) :]
assert completion.endswith(" ")
def test_7(self, bash, part_full_user, part_full_group):
_, user = part_full_user
partgroup, fullgroup = part_full_group
completion = assert_complete(
bash, "sudo chown %s:%s" % (user, partgroup)
)
assert completion == fullgroup[len(partgroup) :]
assert completion.endswith(" ")
def test_8(self, bash, part_full_group):
part, full = part_full_group
completion = assert_complete(bash, "sudo chown dot.user:%s" % part)
assert completion == full[len(part) :]
assert completion.endswith(" ")
@pytest.mark.parametrize(
"prefix",
[
r"funky\ user:",
"funky.user:",
r"funky\.user:",
r"fu\ nky.user:",
r"f\ o\ o\.\bar:",
r"foo\_b\ a\.r\ :",
],
)
def test_9(self, bash, part_full_group, prefix):
"""Test preserving special chars in $prefix$partgroup<TAB>."""
part, full = part_full_group
completion = assert_complete(bash, "sudo chown %s%s" % (prefix, part))
assert completion == full[len(part) :]
assert completion.endswith(" ")
def test_10(self, bash, part_full_user, part_full_group):
"""Test giving up on degenerate cases instead of spewing junk."""
_, user = part_full_user
partgroup, _ = part_full_group
for x in range(2, 5):
completion = assert_complete(
bash, "sudo chown %s%s:%s" % (user, x * "\\", partgroup)
)
assert not completion
def test_11(self, bash, part_full_group):
"""Test graceful fail on colon in user/group name."""
part, _ = part_full_group
completion = assert_complete(bash, "sudo chown foo:bar:%s" % part)
assert not completion | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# coding: UTF-8
'''This scirpt builds the seafile command line client (With no gui).
Some notes:
'''
import sys
####################
### Requires Python 2.6+
####################
if sys.version_info[0] == 3:
print 'Python 3 not supported yet. Quit now.'
sys.exit(1)
if sys.version_info[1] < 6:
print 'Python 2.6 or above is required. Quit now.'
sys.exit(1)
import os
import commands
import tempfile
import shutil
import re
import subprocess
import optparse
import atexit
####################
### Global variables
####################
# command line configuartion
conf = {}
# key names in the conf dictionary.
CONF_VERSION = 'version'
CONF_SEAFILE_VERSION = 'seafile_version'
CONF_LIBSEARPC_VERSION = 'libsearpc_version'
CONF_CCNET_VERSION = 'ccnet_version'
CONF_SRCDIR = 'srcdir'
CONF_KEEP = 'keep'
CONF_BUILDDIR = 'builddir'
CONF_OUTPUTDIR = 'outputdir'
CONF_THIRDPARTDIR = 'thirdpartdir'
CONF_NO_STRIP = 'nostrip'
####################
### Common helper functions
####################
def highlight(content, is_error=False):
'''Add ANSI color to content to get it highlighted on terminal'''
if is_error:
return '\x1b[1;31m%s\x1b[m' % content
else:
return '\x1b[1;32m%s\x1b[m' % content
def info(msg):
print highlight('[INFO] ') + msg
def exist_in_path(prog):
'''Test whether prog exists in system path'''
dirs = os.environ['PATH'].split(':')
for d in dirs:
if d == '':
continue
path = os.path.join(d, prog)
if os.path.exists(path):
return True
return False
def prepend_env_value(name, value, seperator=':'):
'''append a new value to a list'''
try:
current_value = os.environ[name]
except KeyError:
current_value = ''
new_value = value
if current_value:
new_value += seperator + current_value
os.environ[name] = new_value
def error(msg=None, usage=None):
if msg:
print highlight('[ERROR] ') + msg
if usage:
print usage
sys.exit(1)
def run_argv(argv, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False):
'''Run a program and wait it to finish, and return its exit code. The
standard output of this program is supressed.
'''
with open(os.devnull, 'w') as devnull:
if suppress_stdout:
stdout = devnull
else:
stdout = sys.stdout
if suppress_stderr:
stderr = devnull
else:
stderr = sys.stderr
proc = subprocess.Popen(argv,
cwd=cwd,
stdout=stdout,
stderr=stderr,
env=env)
return proc.wait()
def run(cmdline, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False):
'''Like run_argv but specify a command line string instead of argv'''
with open(os.devnull, 'w') as devnull:
if suppress_stdout:
stdout = devnull
else:
stdout = sys.stdout
if suppress_stderr:
stderr = devnull
else:
stderr = sys.stderr
proc = subprocess.Popen(cmdline,
cwd=cwd,
stdout=stdout,
stderr=stderr,
env=env,
shell=True)
return proc.wait()
def must_mkdir(path):
'''Create a directory, exit on failure'''
try:
os.mkdir(path)
except OSError, e:
error('failed to create directory %s:%s' % (path, e))
def must_copy(src, dst):
'''Copy src to dst, exit on failure'''
try:
shutil.copy(src, dst)
except Exception, e:
error('failed to copy %s to %s: %s' % (src, dst, e))
class Project(object):
'''Base class for a project'''
# Probject name, i.e. libseaprc/ccnet/seafile/
name = ''
# A list of shell commands to configure/build the project
build_commands = []
def __init__(self):
# the path to pass to --prefix=/<prefix>
self.prefix = os.path.join(conf[CONF_BUILDDIR], 'seafile-cli')
self.version = self.get_version()
self.src_tarball = os.path.join(conf[CONF_SRCDIR],
'%s-%s.tar.gz' % (self.name, self.version))
# project dir, like <builddir>/seafile-1.2.2/
self.projdir = os.path.join(conf[CONF_BUILDDIR], '%s-%s' % (self.name, self.version))
def get_version(self):
# libsearpc and ccnet can have different versions from seafile.
raise NotImplementedError
def get_source_commit_id(self):
'''By convetion, we record the commit id of the source code in the
file "<projdir>/latest_commit"
'''
latest_commit_file = os.path.join(self.projdir, 'latest_commit')
with open(latest_commit_file, 'r') as fp:
commit_id = fp.read().strip('\n\r\t ')
return commit_id
def append_cflags(self, macros):
cflags = ' '.join([ '-D%s=%s' % (k, macros[k]) for k in macros ])
prepend_env_value('CPPFLAGS',
cflags,
seperator=' ')
def uncompress(self):
'''Uncompress the source from the tarball'''
info('Uncompressing %s' % self.name)
if run('tar xf %s' % self.src_tarball) < 0:
error('failed to uncompress source of %s' % self.name)
def before_build(self):
'''Hook method to do project-specific stuff before running build commands'''
pass
def build(self):
'''Build the source'''
self.before_build()
info('Building %s' % self.name)
for cmd in self.build_commands:
if run(cmd, cwd=self.projdir) != 0:
error('error when running command:\n\t%s\n' % cmd)
class Libsearpc(Project):
name = 'libsearpc'
def __init__(self):
Project.__init__(self)
self.build_commands = [
'./configure --prefix=%s --disable-compile-demo' % self.prefix,
'make',
'make install'
]
def get_version(self):
return conf[CONF_LIBSEARPC_VERSION]
class Ccnet(Project):
name = 'ccnet'
def __init__(self):
Project.__init__(self)
self.build_commands = [
'./configure --prefix=%s --disable-compile-demo' % self.prefix,
'make',
'make install'
]
def get_version(self):
return conf[CONF_CCNET_VERSION]
def before_build(self):
macros = {}
# SET CCNET_SOURCE_COMMIT_ID, so it can be printed in the log
macros['CCNET_SOURCE_COMMIT_ID'] = '\\"%s\\"' % self.get_source_commit_id()
self.append_cflags(macros)
class Seafile(Project):
name = 'seafile'
def __init__(self):
Project.__init__(self)
self.build_commands = [
'./configure --prefix=%s --disable-gui' % self.prefix,
'make',
'make install'
]
def get_version(self):
return conf[CONF_SEAFILE_VERSION]
def update_cli_version(self):
'''Substitute the version number in seaf-cli'''
cli_py = os.path.join(self.projdir, 'app', 'seaf-cli')
with open(cli_py, 'r') as fp:
lines = fp.readlines()
ret = []
for line in lines:
old = '''SEAF_CLI_VERSION = ""'''
new = '''SEAF_CLI_VERSION = "%s"''' % conf[CONF_VERSION]
line = line.replace(old, new)
ret.append(line)
with open(cli_py, 'w') as fp:
fp.writelines(ret)
def before_build(self):
self.update_cli_version()
macros = {}
# SET SEAFILE_SOURCE_COMMIT_ID, so it can be printed in the log
macros['SEAFILE_SOURCE_COMMIT_ID'] = '\\"%s\\"' % self.get_source_commit_id()
self.append_cflags(macros)
def check_targz_src(proj, version, srcdir):
src_tarball = os.path.join(srcdir, '%s-%s.tar.gz' % (proj, version))
if not os.path.exists(src_tarball):
error('%s not exists' % src_tarball)
def validate_args(usage, options):
required_args = [
CONF_VERSION,
CONF_LIBSEARPC_VERSION,
CONF_CCNET_VERSION,
CONF_SEAFILE_VERSION,
CONF_SRCDIR,
]
# fist check required args
for optname in required_args:
if getattr(options, optname, None) == None:
error('%s must be specified' % optname, usage=usage)
def get_option(optname):
return getattr(options, optname)
# [ version ]
def check_project_version(version):
'''A valid version must be like 1.2.2, 1.3'''
if not re.match('^[0-9]+(\.([0-9])+)+$', version):
error('%s is not a valid version' % version, usage=usage)
version = get_option(CONF_VERSION)
seafile_version = get_option(CONF_SEAFILE_VERSION)
libsearpc_version = get_option(CONF_LIBSEARPC_VERSION)
ccnet_version = get_option(CONF_CCNET_VERSION)
check_project_version(version)
check_project_version(libsearpc_version)
check_project_version(ccnet_version)
check_project_version(seafile_version)
# [ srcdir ]
srcdir = get_option(CONF_SRCDIR)
check_targz_src('libsearpc', libsearpc_version, srcdir)
check_targz_src('ccnet', ccnet_version, srcdir)
check_targz_src('seafile', seafile_version, srcdir)
# [ builddir ]
builddir = get_option(CONF_BUILDDIR)
if not os.path.exists(builddir):
error('%s does not exist' % builddir, usage=usage)
builddir = os.path.join(builddir, 'seafile-cli-build')
# [ outputdir ]
outputdir = get_option(CONF_OUTPUTDIR)
if outputdir:
if not os.path.exists(outputdir):
error('outputdir %s does not exist' % outputdir, usage=usage)
else:
outputdir = os.getcwd()
# [ keep ]
keep = get_option(CONF_KEEP)
# [ no strip]
nostrip = get_option(CONF_NO_STRIP)
conf[CONF_VERSION] = version
conf[CONF_LIBSEARPC_VERSION] = libsearpc_version
conf[CONF_SEAFILE_VERSION] = seafile_version
conf[CONF_CCNET_VERSION] = ccnet_version
conf[CONF_BUILDDIR] = builddir
conf[CONF_SRCDIR] = srcdir
conf[CONF_OUTPUTDIR] = outputdir
conf[CONF_KEEP] = keep
conf[CONF_NO_STRIP] = nostrip
prepare_builddir(builddir)
show_build_info()
def show_build_info():
'''Print all conf information. Confirm before continue.'''
info('------------------------------------------')
info('Seafile command line client %s: BUILD INFO' % conf[CONF_VERSION])
info('------------------------------------------')
info('seafile: %s' % conf[CONF_SEAFILE_VERSION])
info('ccnet: %s' % conf[CONF_CCNET_VERSION])
info('libsearpc: %s' % conf[CONF_LIBSEARPC_VERSION])
info('builddir: %s' % conf[CONF_BUILDDIR])
info('outputdir: %s' % conf[CONF_OUTPUTDIR])
info('source dir: %s' % conf[CONF_SRCDIR])
info('strip symbols: %s' % (not conf[CONF_NO_STRIP]))
info('clean on exit: %s' % (not conf[CONF_KEEP]))
info('------------------------------------------')
info('press any key to continue ')
info('------------------------------------------')
dummy = raw_input()
def prepare_builddir(builddir):
must_mkdir(builddir)
if not conf[CONF_KEEP]:
def remove_builddir():
'''Remove the builddir when exit'''
info('remove builddir before exit')
shutil.rmtree(builddir, ignore_errors=True)
atexit.register(remove_builddir)
os.chdir(builddir)
must_mkdir(os.path.join(builddir, 'seafile-cli'))
def parse_args():
parser = optparse.OptionParser()
def long_opt(opt):
return '--' + opt
parser.add_option(long_opt(CONF_VERSION),
dest=CONF_VERSION,
nargs=1,
help='the version to build. Must be digits delimited by dots, like 1.3.0')
parser.add_option(long_opt(CONF_SEAFILE_VERSION),
dest=CONF_SEAFILE_VERSION,
nargs=1,
help='the version of seafile as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0')
parser.add_option(long_opt(CONF_LIBSEARPC_VERSION),
dest=CONF_LIBSEARPC_VERSION,
nargs=1,
help='the version of libsearpc as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0')
parser.add_option(long_opt(CONF_CCNET_VERSION),
dest=CONF_CCNET_VERSION,
nargs=1,
help='the version of ccnet as specified in its "configure.ac". Must be digits delimited by dots, like 1.3.0')
parser.add_option(long_opt(CONF_BUILDDIR),
dest=CONF_BUILDDIR,
nargs=1,
help='the directory to build the source. Defaults to /tmp',
default=tempfile.gettempdir())
parser.add_option(long_opt(CONF_OUTPUTDIR),
dest=CONF_OUTPUTDIR,
nargs=1,
help='the output directory to put the generated tarball. Defaults to the current directory.',
default=os.getcwd())
parser.add_option(long_opt(CONF_SRCDIR),
dest=CONF_SRCDIR,
nargs=1,
help='''Source tarballs must be placed in this directory.''')
parser.add_option(long_opt(CONF_KEEP),
dest=CONF_KEEP,
action='store_true',
help='''keep the build directory after the script exits. By default, the script would delete the build directory at exit.''')
parser.add_option(long_opt(CONF_NO_STRIP),
dest=CONF_NO_STRIP,
action='store_true',
help='''do not strip debug symbols''')
usage = parser.format_help()
options, remain = parser.parse_args()
if remain:
error(usage=usage)
validate_args(usage, options)
def setup_build_env():
'''Setup environment variables, such as export PATH=$BUILDDDIR/bin:$PATH'''
prefix = os.path.join(conf[CONF_BUILDDIR], 'seafile-cli')
prepend_env_value('CPPFLAGS',
'-I%s' % os.path.join(prefix, 'include'),
seperator=' ')
prepend_env_value('CPPFLAGS',
'-DSEAFILE_CLIENT_VERSION=\\"%s\\"' % conf[CONF_VERSION],
seperator=' ')
if conf[CONF_NO_STRIP]:
prepend_env_value('CPPFLAGS',
'-g -O0',
seperator=' ')
prepend_env_value('LDFLAGS',
'-L%s' % os.path.join(prefix, 'lib'),
seperator=' ')
prepend_env_value('LDFLAGS',
'-L%s' % os.path.join(prefix, 'lib64'),
seperator=' ')
prepend_env_value('PATH', os.path.join(prefix, 'bin'))
prepend_env_value('PKG_CONFIG_PATH', os.path.join(prefix, 'lib', 'pkgconfig'))
prepend_env_value('PKG_CONFIG_PATH', os.path.join(prefix, 'lib64', 'pkgconfig'))
def copy_scripts_and_libs():
'''Copy scripts and shared libs'''
builddir = conf[CONF_BUILDDIR]
seafile_dir = os.path.join(builddir, Seafile().projdir)
scripts_srcdir = os.path.join(seafile_dir, 'scripts')
doc_dir = os.path.join(seafile_dir, 'doc')
cli_dir = os.path.join(builddir, 'seafile-cli')
# copy the wrapper shell script for seaf-cli.py
src = os.path.join(scripts_srcdir, 'seaf-cli-wrapper.sh')
dst = os.path.join(cli_dir, 'seaf-cli')
must_copy(src, dst)
# copy Readme for cli client
src = os.path.join(doc_dir, 'cli-readme.txt')
dst = os.path.join(cli_dir, 'Readme.txt')
must_copy(src, dst)
# rename seaf-cli to seaf-cli.py to avoid confusing users
src = os.path.join(cli_dir, 'bin', 'seaf-cli')
dst = os.path.join(cli_dir, 'bin', 'seaf-cli.py')
try:
shutil.move(src, dst)
except Exception, e:
error('failed to move %s to %s: %s' % (src, dst, e))
# copy shared c libs
copy_shared_libs()
def get_dependent_libs(executable):
syslibs = ['libsearpc', 'libccnet', 'libseafile', 'libpthread.so', 'libc.so', 'libm.so', 'librt.so', 'libdl.so', 'libselinux.so']
def is_syslib(lib):
for syslib in syslibs:
if syslib in lib:
return True
return False
ldd_output = commands.getoutput('ldd %s' % executable)
ret = []
for line in ldd_output.splitlines():
tokens = line.split()
if len(tokens) != 4:
continue
if is_syslib(tokens[0]):
continue
ret.append(tokens[2])
return ret
def copy_shared_libs():
'''copy shared c libs, such as libevent, glib, libmysqlclient'''
builddir = conf[CONF_BUILDDIR]
dst_dir = os.path.join(builddir,
'seafile-cli',
'lib')
ccnet_daemon_path = os.path.join(builddir,
'seafile-cli',
'bin',
'ccnet')
seaf_daemon_path = os.path.join(builddir,
'seafile-cli',
'bin',
'seaf-daemon')
ccnet_daemon_libs = get_dependent_libs(ccnet_daemon_path)
seaf_daemon_libs = get_dependent_libs(seaf_daemon_path)
libs = ccnet_daemon_libs
for lib in seaf_daemon_libs:
if lib not in libs:
libs.append(lib)
for lib in libs:
info('Copying %s' % lib)
shutil.copy(lib, dst_dir)
def strip_symbols():
def do_strip(fn):
run('chmod u+w %s' % fn)
info('stripping: %s' % fn)
run('strip "%s"' % fn)
def remove_static_lib(fn):
info('removing: %s' % fn)
os.remove(fn)
builddir = conf[CONF_BUILDDIR]
topdir = os.path.join(builddir, 'seafile-cli')
for parent, dnames, fnames in os.walk(topdir):
dummy = dnames # avoid pylint 'unused' warning
for fname in fnames:
fn = os.path.join(parent, fname)
if os.path.isdir(fn):
continue
if fn.endswith(".a") or fn.endswith(".la"):
remove_static_lib(fn)
continue
if os.path.islink(fn):
continue
finfo = commands.getoutput('file "%s"' % fn)
if 'not stripped' in finfo:
do_strip(fn)
def create_tarball(tarball_name):
'''call tar command to generate a tarball'''
version = conf[CONF_VERSION]
cli_dir = 'seafile-cli'
versioned_cli_dir = 'seafile-cli-' + version
# move seafile-cli to seafile-cli-${version}
try:
shutil.move(cli_dir, versioned_cli_dir)
except Exception, e:
error('failed to move %s to %s: %s' % (cli_dir, versioned_cli_dir, e))
ignored_patterns = [
# common ignored files
'*.pyc',
'*~',
'*#',
# seafile
os.path.join(versioned_cli_dir, 'share*'),
os.path.join(versioned_cli_dir, 'include*'),
os.path.join(versioned_cli_dir, 'lib', 'pkgconfig*'),
os.path.join(versioned_cli_dir, 'lib64', 'pkgconfig*'),
os.path.join(versioned_cli_dir, 'bin', 'ccnet-demo*'),
os.path.join(versioned_cli_dir, 'bin', 'ccnet-tool'),
os.path.join(versioned_cli_dir, 'bin', 'ccnet-servtool'),
os.path.join(versioned_cli_dir, 'bin', 'searpc-codegen.py'),
os.path.join(versioned_cli_dir, 'bin', 'seafile-admin'),
os.path.join(versioned_cli_dir, 'bin', 'seafile'),
]
excludes_list = [ '--exclude=%s' % pattern for pattern in ignored_patterns ]
excludes = ' '.join(excludes_list)
tar_cmd = 'tar czvf %(tarball_name)s %(versioned_cli_dir)s %(excludes)s' \
% dict(tarball_name=tarball_name,
versioned_cli_dir=versioned_cli_dir,
excludes=excludes)
if run(tar_cmd) != 0:
error('failed to generate the tarball')
def gen_tarball():
# strip symbols of libraries to reduce size
if not conf[CONF_NO_STRIP]:
try:
strip_symbols()
except Exception, e:
error('failed to strip symbols: %s' % e)
# determine the output name
# 64-bit: seafile-cli_1.2.2_x86-64.tar.gz
# 32-bit: seafile-cli_1.2.2_i386.tar.gz
version = conf[CONF_VERSION]
arch = os.uname()[-1].replace('_', '-')
if arch != 'x86-64':
arch = 'i386'
dbg = ''
if conf[CONF_NO_STRIP]:
dbg = '.dbg'
tarball_name = 'seafile-cli_%(version)s_%(arch)s%(dbg)s.tar.gz' \
% dict(version=version, arch=arch, dbg=dbg)
dst_tarball = os.path.join(conf[CONF_OUTPUTDIR], tarball_name)
# generate the tarball
try:
create_tarball(tarball_name)
except Exception, e:
error('failed to generate tarball: %s' % e)
# move tarball to outputdir
try:
shutil.copy(tarball_name, dst_tarball)
except Exception, e:
error('failed to copy %s to %s: %s' % (tarball_name, dst_tarball, e))
print '---------------------------------------------'
print 'The build is successfully. Output is:\t%s' % dst_tarball
print '---------------------------------------------'
def main():
parse_args()
setup_build_env()
libsearpc = Libsearpc()
ccnet = Ccnet()
seafile = Seafile()
libsearpc.uncompress()
libsearpc.build()
ccnet.uncompress()
ccnet.build()
seafile.uncompress()
seafile.build()
copy_scripts_and_libs()
gen_tarball()
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stackit
import (
"context"
"errors"
"fmt"
"log/slog"
"net/url"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/version"
"github.com/prometheus/prometheus/discovery"
"github.com/prometheus/prometheus/discovery/refresh"
"github.com/prometheus/prometheus/discovery/targetgroup"
)
const (
stackitLabelPrefix = model.MetaLabelPrefix + "stackit_"
stackitLabelProject = stackitLabelPrefix + "project"
stackitLabelID = stackitLabelPrefix + "id"
stackitLabelName = stackitLabelPrefix + "name"
stackitLabelStatus = stackitLabelPrefix + "status"
stackitLabelPowerStatus = stackitLabelPrefix + "power_status"
stackitLabelAvailabilityZone = stackitLabelPrefix + "availability_zone"
stackitLabelPublicIPv4 = stackitLabelPrefix + "public_ipv4"
)
var userAgent = version.PrometheusUserAgent()
// DefaultSDConfig is the default STACKIT SD configuration.
var DefaultSDConfig = SDConfig{
Region: "eu01",
Port: 80,
RefreshInterval: model.Duration(60 * time.Second),
HTTPClientConfig: config.DefaultHTTPClientConfig,
}
func init() {
discovery.RegisterConfig(&SDConfig{})
}
// SDConfig is the configuration for STACKIT based service discovery.
type SDConfig struct {
HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
Project string `yaml:"project"`
RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"`
Port int `yaml:"port,omitempty"`
Region string `yaml:"region,omitempty"`
Endpoint string `yaml:"endpoint,omitempty"`
ServiceAccountKey string `yaml:"service_account_key,omitempty"`
PrivateKey string `yaml:"private_key,omitempty"`
ServiceAccountKeyPath string `yaml:"service_account_key_path,omitempty"`
PrivateKeyPath string `yaml:"private_key_path,omitempty"`
CredentialsFilePath string `yaml:"credentials_file_path,omitempty"`
// For testing only
tokenURL string
}
// NewDiscovererMetrics implements discovery.Config.
func (*SDConfig) NewDiscovererMetrics(_ prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics {
return &stackitMetrics{
refreshMetrics: rmi,
}
}
// Name returns the name of the Config.
func (*SDConfig) Name() string { return "stackit" }
// NewDiscoverer returns a Discoverer for the Config.
func (c *SDConfig) NewDiscoverer(opts discovery.DiscovererOptions) (discovery.Discoverer, error) {
return NewDiscovery(c, opts)
}
type refresher interface {
refresh(context.Context) ([]*targetgroup.Group, error)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error {
*c = DefaultSDConfig
type plain SDConfig
err := unmarshal((*plain)(c))
if err != nil {
return err
}
if c.Endpoint == "" && c.Region == "" {
return errors.New("stackit_sd: endpoint and region missing")
}
if _, err = url.Parse(c.Endpoint); err != nil {
return fmt.Errorf("stackit_sd: invalid endpoint %q: %w", c.Endpoint, err)
}
return c.HTTPClientConfig.Validate()
}
// SetDirectory joins any relative file paths with dir.
func (c *SDConfig) SetDirectory(dir string) {
c.HTTPClientConfig.SetDirectory(dir)
}
// Discovery periodically performs STACKIT API requests. It implements
// the Discoverer interface.
type Discovery struct {
*refresh.Discovery
}
// NewDiscovery returns a new Discovery which periodically refreshes its targets.
func NewDiscovery(conf *SDConfig, opts discovery.DiscovererOptions) (*refresh.Discovery, error) {
m, ok := opts.Metrics.(*stackitMetrics)
if !ok {
return nil, errors.New("invalid discovery metrics type")
}
r, err := newRefresher(conf, opts.Logger)
if err != nil {
return nil, err
}
return refresh.NewDiscovery(
refresh.Options{
Logger: opts.Logger,
Mech: "stackit",
SetName: opts.SetName,
Interval: time.Duration(conf.RefreshInterval),
RefreshF: r.refresh,
MetricsInstantiator: m.refreshMetrics,
},
), nil
}
func newRefresher(conf *SDConfig, l *slog.Logger) (refresher, error) {
return newServerDiscovery(conf, l)
} | go | github | https://github.com/prometheus/prometheus | discovery/stackit/stackit.go |
# Copyright 2012 NTT Data
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from tempest.api_schema.response.compute.v2\
import quota_classes as classes_schema
from tempest.api_schema.response.compute.v2 import quotas as schema
from tempest.common import rest_client
from tempest import config
CONF = config.CONF
class QuotasClientJSON(rest_client.RestClient):
def __init__(self, auth_provider):
super(QuotasClientJSON, self).__init__(auth_provider)
self.service = CONF.compute.catalog_type
def get_quota_set(self, tenant_id, user_id=None):
"""List the quota set for a tenant."""
url = 'os-quota-sets/%s' % str(tenant_id)
if user_id:
url += '?user_id=%s' % str(user_id)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.quota_set, resp, body)
return resp, body['quota_set']
def get_default_quota_set(self, tenant_id):
"""List the default quota set for a tenant."""
url = 'os-quota-sets/%s/defaults' % str(tenant_id)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.quota_set, resp, body)
return resp, body['quota_set']
def update_quota_set(self, tenant_id, user_id=None,
force=None, injected_file_content_bytes=None,
metadata_items=None, ram=None, floating_ips=None,
fixed_ips=None, key_pairs=None, instances=None,
security_group_rules=None, injected_files=None,
cores=None, injected_file_path_bytes=None,
security_groups=None):
"""
Updates the tenant's quota limits for one or more resources
"""
post_body = {}
if force is not None:
post_body['force'] = force
if injected_file_content_bytes is not None:
post_body['injected_file_content_bytes'] = \
injected_file_content_bytes
if metadata_items is not None:
post_body['metadata_items'] = metadata_items
if ram is not None:
post_body['ram'] = ram
if floating_ips is not None:
post_body['floating_ips'] = floating_ips
if fixed_ips is not None:
post_body['fixed_ips'] = fixed_ips
if key_pairs is not None:
post_body['key_pairs'] = key_pairs
if instances is not None:
post_body['instances'] = instances
if security_group_rules is not None:
post_body['security_group_rules'] = security_group_rules
if injected_files is not None:
post_body['injected_files'] = injected_files
if cores is not None:
post_body['cores'] = cores
if injected_file_path_bytes is not None:
post_body['injected_file_path_bytes'] = injected_file_path_bytes
if security_groups is not None:
post_body['security_groups'] = security_groups
post_body = json.dumps({'quota_set': post_body})
if user_id:
resp, body = self.put('os-quota-sets/%s?user_id=%s' %
(str(tenant_id), str(user_id)), post_body)
else:
resp, body = self.put('os-quota-sets/%s' % str(tenant_id),
post_body)
body = json.loads(body)
self.validate_response(schema.quota_set_update, resp, body)
return resp, body['quota_set']
def delete_quota_set(self, tenant_id):
"""Delete the tenant's quota set."""
resp, body = self.delete('os-quota-sets/%s' % str(tenant_id))
self.validate_response(schema.delete_quota, resp, body)
return resp, body
class QuotaClassesClientJSON(rest_client.RestClient):
def __init__(self, auth_provider):
super(QuotaClassesClientJSON, self).__init__(auth_provider)
self.service = CONF.compute.catalog_type
def get_quota_class_set(self, quota_class_id):
"""List the quota class set for a quota class."""
url = 'os-quota-class-sets/%s' % str(quota_class_id)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(classes_schema.quota_set, resp, body)
return resp, body['quota_class_set']
def update_quota_class_set(self, quota_class_id, **kwargs):
"""
Updates the quota class's limits for one or more resources.
"""
post_body = json.dumps({'quota_class_set': kwargs})
resp, body = self.put('os-quota-class-sets/%s' % str(quota_class_id),
post_body)
body = json.loads(body)
self.validate_response(classes_schema.quota_set_update, resp, body)
return resp, body['quota_class_set'] | unknown | codeparrot/codeparrot-clean | ||
from typing import Any, Dict
from uuid import uuid4
JSON = Dict[str, Any]
# class GroupUser:
#
# def __init__(self, id: str, login: str, name: str, status: str) -> None:
# self.id = id
# self.login = login
# self.name = name
# self.status = status
#
# class GroupUsers:
#
# def __init__(self, id: str, users: List[GroupUser]) -> None:
# self.id = id
# self.users = users
class Group:
"""
Group model.
"""
def __init__(self, name: str, text: str, **kwargs) -> None:
if not name:
raise ValueError('Missing mandatory value for name')
self.id = kwargs.get('id', str(uuid4()))
self.name = name
self.text = text or ''
self.count = kwargs.get('count')
def __repr__(self) -> str:
return 'Group(id={!r}, name={!r}, text={!r}, count={!r})'.format(
self.id, self.name, self.text, self.count)
@classmethod
def parse(cls, json: JSON) -> 'Group':
return Group(
id=json.get('id', None),
name=json.get('name', None),
text=json.get('text', None),
count=json.get('count', 0)
)
def tabular(self):
return {
'id': self.id,
'name': self.name,
'text': self.text,
'count': self.count
} | unknown | codeparrot/codeparrot-clean | ||
/* origin: FreeBSD /usr/src/lib/msun/src/e_asin.c */
/*
* ====================================================
* Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
*
* Developed at SunSoft, a Sun Microsystems, Inc. business.
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
/* asin(x)
* Method :
* Since asin(x) = x + x^3/6 + x^5*3/40 + x^7*15/336 + ...
* we approximate asin(x) on [0,0.5] by
* asin(x) = x + x*x^2*R(x^2)
* where
* R(x^2) is a rational approximation of (asin(x)-x)/x^3
* and its remez error is bounded by
* |(asin(x)-x)/x^3 - R(x^2)| < 2^(-58.75)
*
* For x in [0.5,1]
* asin(x) = pi/2-2*asin(sqrt((1-x)/2))
* Let y = (1-x), z = y/2, s := sqrt(z), and pio2_hi+pio2_lo=pi/2;
* then for x>0.98
* asin(x) = pi/2 - 2*(s+s*z*R(z))
* = pio2_hi - (2*(s+s*z*R(z)) - pio2_lo)
* For x<=0.98, let pio4_hi = pio2_hi/2, then
* f = hi part of s;
* c = sqrt(z) - f = (z-f*f)/(s+f) ...f+c=sqrt(z)
* and
* asin(x) = pi/2 - 2*(s+s*z*R(z))
* = pio4_hi+(pio4-2s)-(2s*z*R(z)-pio2_lo)
* = pio4_hi+(pio4-2f)-(2s*z*R(z)-(pio2_lo+2c))
*
* Special cases:
* if x is NaN, return x itself;
* if |x|>1, return NaN with invalid signal.
*
*/
use super::{fabs, get_high_word, get_low_word, sqrt, with_set_low_word};
const PIO2_HI: f64 = 1.57079632679489655800e+00; /* 0x3FF921FB, 0x54442D18 */
const PIO2_LO: f64 = 6.12323399573676603587e-17; /* 0x3C91A626, 0x33145C07 */
/* coefficients for R(x^2) */
const P_S0: f64 = 1.66666666666666657415e-01; /* 0x3FC55555, 0x55555555 */
const P_S1: f64 = -3.25565818622400915405e-01; /* 0xBFD4D612, 0x03EB6F7D */
const P_S2: f64 = 2.01212532134862925881e-01; /* 0x3FC9C155, 0x0E884455 */
const P_S3: f64 = -4.00555345006794114027e-02; /* 0xBFA48228, 0xB5688F3B */
const P_S4: f64 = 7.91534994289814532176e-04; /* 0x3F49EFE0, 0x7501B288 */
const P_S5: f64 = 3.47933107596021167570e-05; /* 0x3F023DE1, 0x0DFDF709 */
const Q_S1: f64 = -2.40339491173441421878e+00; /* 0xC0033A27, 0x1C8A2D4B */
const Q_S2: f64 = 2.02094576023350569471e+00; /* 0x40002AE5, 0x9C598AC8 */
const Q_S3: f64 = -6.88283971605453293030e-01; /* 0xBFE6066C, 0x1B8D0159 */
const Q_S4: f64 = 7.70381505559019352791e-02; /* 0x3FB3B8C5, 0xB12E9282 */
fn comp_r(z: f64) -> f64 {
let p = z * (P_S0 + z * (P_S1 + z * (P_S2 + z * (P_S3 + z * (P_S4 + z * P_S5)))));
let q = 1.0 + z * (Q_S1 + z * (Q_S2 + z * (Q_S3 + z * Q_S4)));
p / q
}
/// Arcsine (f64)
///
/// Computes the inverse sine (arc sine) of the argument `x`.
/// Arguments to asin must be in the range -1 to 1.
/// Returns values in radians, in the range of -pi/2 to pi/2.
#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
pub fn asin(mut x: f64) -> f64 {
let z: f64;
let r: f64;
let s: f64;
let hx: u32;
let ix: u32;
hx = get_high_word(x);
ix = hx & 0x7fffffff;
/* |x| >= 1 or nan */
if ix >= 0x3ff00000 {
let lx: u32;
lx = get_low_word(x);
if ((ix - 0x3ff00000) | lx) == 0 {
/* asin(1) = +-pi/2 with inexact */
return x * PIO2_HI + f64::from_bits(0x3870000000000000);
} else {
return 0.0 / (x - x);
}
}
/* |x| < 0.5 */
if ix < 0x3fe00000 {
/* if 0x1p-1022 <= |x| < 0x1p-26, avoid raising underflow */
if (0x00100000..0x3e500000).contains(&ix) {
return x;
} else {
return x + x * comp_r(x * x);
}
}
/* 1 > |x| >= 0.5 */
z = (1.0 - fabs(x)) * 0.5;
s = sqrt(z);
r = comp_r(z);
if ix >= 0x3fef3333 {
/* if |x| > 0.975 */
x = PIO2_HI - (2. * (s + s * r) - PIO2_LO);
} else {
let f: f64;
let c: f64;
/* f+c = sqrt(z) */
f = with_set_low_word(s, 0);
c = (z - f * f) / (s + f);
x = 0.5 * PIO2_HI - (2.0 * s * r - (PIO2_LO - 2.0 * c) - (0.5 * PIO2_HI - 2.0 * f));
}
if hx >> 31 != 0 { -x } else { x }
} | rust | github | https://github.com/nodejs/node | deps/crates/vendor/libm/src/math/asin.rs |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import inspect
if sys.platform.startswith('java'):
from java.lang import Class
from java.util import List, Map
from robot.errors import DataError
from robot.variables import is_dict_var, is_list_var, is_scalar_var
from .argumentspec import ArgumentSpec
class _ArgumentParser(object):
def __init__(self, type='Keyword'):
self._type = type
def parse(self, source, name=None):
return ArgumentSpec(name, self._type, *self._get_arg_spec(source))
def _get_arg_spec(self, source):
raise NotImplementedError
class PythonArgumentParser(_ArgumentParser):
def _get_arg_spec(self, handler):
args, varargs, kwargs, defaults = inspect.getargspec(handler)
if inspect.ismethod(handler):
args = args[1:] # drop 'self'
defaults = list(defaults) if defaults else []
return args, defaults, varargs, kwargs
class JavaArgumentParser(_ArgumentParser):
def _get_arg_spec(self, signatures):
if not signatures:
return self._no_signatures_arg_spec()
elif len(signatures) == 1:
return self._single_signature_arg_spec(signatures[0])
else:
return self._multi_signature_arg_spec(signatures)
def _no_signatures_arg_spec(self):
# Happens when a class has no public constructors
return self._format_arg_spec()
def _single_signature_arg_spec(self, signature):
varargs, kwargs = self._get_varargs_and_kwargs_support(signature.args)
positional = len(signature.args) - int(varargs) - int(kwargs)
return self._format_arg_spec(positional, varargs=varargs, kwargs=kwargs)
def _get_varargs_and_kwargs_support(self, args):
if not args:
return False, False
if self._is_varargs_type(args[-1]):
return True, False
if not self._is_kwargs_type(args[-1]):
return False, False
if len(args) > 1 and self._is_varargs_type(args[-2]):
return True, True
return False, True
def _is_varargs_type(self, arg):
return arg is List or isinstance(arg, Class) and arg.isArray()
def _is_kwargs_type(self, arg):
return arg is Map
def _multi_signature_arg_spec(self, signatures):
mina = maxa = len(signatures[0].args)
for sig in signatures[1:]:
argc = len(sig.args)
mina = min(argc, mina)
maxa = max(argc, maxa)
return self._format_arg_spec(maxa, maxa-mina)
def _format_arg_spec(self, positional=0, defaults=0, varargs=False, kwargs=False):
positional = ['arg%d' % (i+1) for i in range(positional)]
defaults = [''] * defaults
varargs = '*varargs' if varargs else None
kwargs = '**kwargs' if kwargs else None
supports_named = False
return positional, defaults, varargs, kwargs, supports_named
class _ArgumentSpecParser(_ArgumentParser):
def parse(self, argspec, name=None):
result = ArgumentSpec(name, self._type)
for arg in argspec:
if result.kwargs:
self._raise_invalid_spec('Only last argument can be kwargs.')
if self._is_kwargs(arg):
self._add_kwargs(arg, result)
continue
if result.varargs:
self._raise_invalid_spec('Positional argument after varargs.')
if self._is_varargs(arg):
self._add_varargs(arg, result)
continue
if '=' in arg:
self._add_arg_with_default(arg, result)
continue
if result.defaults:
self._raise_invalid_spec('Non-default argument after default '
'arguments.')
self._add_arg(arg, result)
return result
def _raise_invalid_spec(self, error):
raise DataError('Invalid argument specification: %s' % error)
def _is_kwargs(self, arg):
raise NotImplementedError
def _add_kwargs(self, kwargs, result):
result.kwargs = self._format_kwargs(kwargs)
def _format_kwargs(self, kwargs):
raise NotImplementedError
def _is_varargs(self, arg):
raise NotImplementedError
def _add_varargs(self, varargs, result):
result.varargs = self._format_varargs(varargs)
def _format_varargs(self, varargs):
raise NotImplementedError
def _add_arg_with_default(self, arg, result):
arg, default = arg.split('=', 1)
self._add_arg(arg, result)
result.defaults.append(default)
def _add_arg(self, arg, result):
result.positional.append(self._format_arg(arg))
def _format_arg(self, arg):
return arg
class DynamicArgumentParser(_ArgumentSpecParser):
def _is_kwargs(self, arg):
return arg.startswith('**')
def _format_kwargs(self, kwargs):
return kwargs[2:]
def _is_varargs(self, arg):
return arg.startswith('*') and not self._is_kwargs(arg)
def _format_varargs(self, varargs):
return varargs[1:]
class UserKeywordArgumentParser(_ArgumentSpecParser):
def _is_kwargs(self, arg):
return is_dict_var(arg)
def _is_varargs(self, arg):
return is_list_var(arg)
def _format_kwargs(self, kwargs):
return kwargs[2:-1]
def _format_varargs(self, varargs):
return varargs[2:-1]
def _format_arg(self, arg):
if not is_scalar_var(arg):
self._raise_invalid_spec("Invalid argument syntax '%s'." % arg)
return arg[2:-1] | unknown | codeparrot/codeparrot-clean | ||
#This file is distributed under the terms of the GNU General Public license.
#Copyright (C) 2001 Al Riddoch (See the file COPYING for details).
from cyphesis.Thing import Thing
from atlas import *
from Vector3D import Vector3D
# bbox = 8,8,2.5
# bmedian = 7.5,7.5,2.5
# offset = SW corner = -0.5,-0.5,0
class Farmhouse_deco_1(Thing):
def setup_operation(self, op):
ret = Oplist()
# South wall
loc = Location(self, Vector3D(-0.5,-0.5,0))
loc.bbox = Vector3D(8,0.2,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
# West wall with door
loc = Location(self, Vector3D(-0.5,-0.5,0))
loc.bbox = Vector3D(0.2,2,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
loc = Location(self, Vector3D(-0.5,3.5,0))
loc.bbox = Vector3D(0.2,12,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
# North wall with door
loc = Location(self, Vector3D(-0.5,15.3,0))
loc.bbox = Vector3D(4,0.2,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
loc = Location(self, Vector3D(3.5,15.3,0))
loc.bbox = Vector3D(12,0.2,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
# East wall
loc = Location(self, Vector3D(15.3,7.5,0))
loc.bbox = Vector3D(0.2,8,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
# Interior wall
loc = Location(self, Vector3D(7.3,-0.5,0))
loc.bbox = Vector3D(0.2,14,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
# Interior wall with door
loc = Location(self, Vector3D(7.3,7.5,0))
loc.bbox = Vector3D(4.2,0.2,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
loc = Location(self, Vector3D(11.5,7.5,0))
loc.bbox = Vector3D(4,0.2,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
# South fences
loc = Location(self, Vector3D(7.5,-0.5,0))
loc.bbox = Vector3D(2,0.1,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
loc = Location(self, Vector3D(11.5,-0.5,0))
loc.bbox = Vector3D(4,0.1,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
# East fences
loc = Location(self, Vector3D(15.4,-0.5,0))
loc.bbox = Vector3D(0.1,3,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
loc = Location(self, Vector3D(15.4,4.5,0))
loc.bbox = Vector3D(0.1,3,5)
ret.append(Operation("create",Entity(name='wall',parents=['wall'],location=loc),to=self))
return ret | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_C_EXPERIMENTAL_SAVED_MODEL_CORE_REVIVED_TYPES_PARTIALLY_REVIVED_OBJECTS_H_
#define TENSORFLOW_C_EXPERIMENTAL_SAVED_MODEL_CORE_REVIVED_TYPES_PARTIALLY_REVIVED_OBJECTS_H_
#include <memory>
#include <string>
#include "absl/status/status.h"
#include "tensorflow/c/eager/immediate_execution_context.h"
#include "tensorflow/c/experimental/saved_model/core/revived_types/asset.h"
#include "tensorflow/c/experimental/saved_model/core/revived_types/constant.h"
#include "tensorflow/c/experimental/saved_model/core/revived_types/restored_resource_revival_state.h"
#include "tensorflow/c/experimental/saved_model/core/revived_types/revived_objects.h"
#include "tensorflow/c/experimental/saved_model/core/revived_types/tf_concrete_function_revival_state.h"
#include "tensorflow/c/experimental/saved_model/core/revived_types/tf_signature_def_function_revival_state.h"
#include "tensorflow/c/experimental/saved_model/core/revived_types/variable.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/saved_object_graph.pb.h"
namespace tensorflow {
// Container for objects during the revival step in SavedModel's loading.
// Notably, resources and functions can be in a state where they reference
// other resources/functions that have not been constructed yet. We collect
// *all* objects in a partially valid state here, then properly initialize
// resources and functions. Implementation-wise, PartiallyRevivedObjects
// contains maps keyed by the node number of the SavedObjectGraph, and map to an
// object of the corresponding type. So, if node 2 in the object graph is a
// variable, PartiallyRevivedObjects.variables[2] exists, and corresponds to a
// tensorflow::Variable object. The only exception to this is the
// "signatures_map", which is keyed by the "signature" key
// (https://github.com/tensorflow/tensorflow/blob/372918decee7f558b3c194b04f77c20dcc679a31/tensorflow/core/protobuf/meta_graph.proto#L89),
// and maps to the SignatureDefFunction node in the SavedObjectGraph.
struct PartiallyRevivedObjects {
gtl::FlatMap<int, std::unique_ptr<Variable>> variables;
gtl::FlatMap<int, std::unique_ptr<Asset>> assets;
gtl::FlatMap<int, std::unique_ptr<Constant>> constants;
gtl::FlatMap<int, TFConcreteFunctionRevivalState> concrete_functions;
gtl::FlatMap<int, TFSignatureDefFunctionRevivalState> signature_def_functions;
gtl::FlatMap<int, RestoredResourceRevivalState> restored_resources;
gtl::FlatMap<std::string, int> signatures_map;
absl::Status Build(ImmediateExecutionContext* ctx,
const SavedObjectGraph& obj_graph,
RevivedObjects* revived);
};
} // namespace tensorflow
#endif // TENSORFLOW_C_EXPERIMENTAL_SAVED_MODEL_CORE_REVIVED_TYPES_PARTIALLY_REVIVED_OBJECTS_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/c/experimental/saved_model/core/revived_types/partially_revived_objects.h |
##############################################################################
#
# PROJECT: ownCloud v1.0
# LICENSE: See LICENSE in the top level directory
#
##############################################################################
import collections
import os
import polib
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-o", "--output", dest="output",
help="Directory for localized output", default="../Shared/installer/nightly_localized.nsi")
parser.add_option("-p", "--podir", dest="podir",
help="Directory containing PO files", default="../Shared/installer/locale/")
parser.add_option("-l", "--lang", dest="lang",
help="Default language of the NSI", default="English" )
(options, args) = parser.parse_args()
# Define a dict to convert locale names to language names
localeToName = {
"af" : "Afrikaans",
"sq" : "Albanian",
"ar" : "Arabic",
"hy" : "Armenian",
"eu" : "Basque",
"be" : "Belarusian",
"bs" : "Bosnian",
"br" : "Breton",
"bg" : "Bulgarian",
"ca" : "Catalan",
"bem" : "Cibemba",
"hr" : "Croatian",
"cs" : "Czech",
"da" : "Danish",
"nl" : "Dutch",
"efi" : "Efik",
"en" : "English",
"eo" : "Esperanto",
"et" : "Estonian",
"fa" : "Farsi",
"fi" : "Finnish",
"fr" : "French",
"gl" : "Galician",
"ka" : "Georgian",
"de" : "German",
"el" : "Greek",
"he" : "Hebrew",
"hi" : "Hindi",
"hu" : "Hungarian",
"is" : "Icelandic",
"ig" : "Igbo",
"id" : "Indonesian",
"ga" : "Irish",
"it" : "Italian",
"ja" : "Japanese",
"km" : "Khmer",
"ko" : "Korean",
"ku" : "Kurdish",
"lv" : "Latvian",
"lt" : "Lithuanian",
"lb" : "Luxembourgish",
"mk" : "Macedonian",
"mg" : "Malagasy",
"ms" : "Malay",
"mn" : "Mongolian",
"nb" : "Norwegian",
"nn" : "NorwegianNynorsk",
"ps" : "Pashto",
"pl" : "Polish",
"pt" : "Portuguese",
"pt_BR" : "PortugueseBR",
"ro" : "Romanian",
"ru" : "Russian",
"sr" : "Serbian",
"sr_sp" : "SerbianLatin",
"st" : "Sesotho",
"sn" : "Shona",
"zh_CN" : "SimpChinese",
"sk" : "Slovak",
"sl" : "Slovenian",
"es" : "Spanish",
"es_AR" : "SpanishInternational",
"sw" : "Swahili",
"sv" : "Swedish",
"ta" : "Tamil",
"th" : "Thai",
"zh_HK" : "TradChinese",
"tr" : "Turkish",
"tw" : "Twi",
"uk" : "Ukrainian",
"ug" : "Uyghur",
"uz" : "Uzbek",
"ca@valencia" : "Valencian",
"vi" : "Vietnamese",
"cy" : "Welsh",
"yo" : "Yoruba",
"zu" : "Zulu",
}
def escapeNSIS(st):
return st.replace('\\', r'$\\')\
.replace('\t', r'$\t')\
.replace('\r', r'\r')\
.replace('\n', r'\n')\
.replace('\"', r'$\"')\
.replace('$$\\', '$\\')
translationCache = {}
# The purpose of this loop is to go to the podir scanning for PO files for each locale name
# Once we've found a PO file, we use PO lib to read every translated entry
# Using this, for each each language, we store a dict of entries - { nsilabel (comment) : translation (msgstr) }
# For untranslated entries, we use msgid instead of msgstr (i.e. default English string)
for root,dirs,files in os.walk(options.podir):
for file in files:
filename,ext = os.path.splitext(file)
if ext == ".po":
# Valid locale filename (fr.po, de.po etc)?
if filename in localeToName:
language = localeToName[filename]
translationCache[language] = collections.OrderedDict()
po = polib.pofile(os.path.join(root,file))
for entry in po.translated_entries():
# Loop through all our labels and add translation (each translation may have multiple labels)
for label in entry.comment.split():
translationCache[language][label] = escapeNSIS(entry.msgstr)
# For untranslated strings, let's add the English entry
for entry in po.untranslated_entries():
for label in entry.comment.split():
print("Warning: Label '%s' for language '%s' remains untranslated"%(label,language))
translationCache[language][label] = escapeNSIS(entry.msgid)
def tostr(obj):
if type(obj) == unicode:
return obj.encode("utf-8")
else:
return obj
NSILanguages = []
NSIDeclarations = []
# file header
NSILanguages.append( tostr('; Auto-generated - do not modify\n') )
NSIDeclarations.append( tostr('; Auto-generated - do not modify\n') )
# loopthrough the languages an generate one nsh files for each language
lineNo = 1
for language,translations in translationCache.iteritems():
NSINewLines = []
NSINewLines.append( tostr('# Auto-generated - do not modify\n') )
count = 0
# if the language isn't the default, we add our MUI_LANGUAGE macro
if language.upper() != options.lang.upper():
NSILanguages.append( tostr('!insertmacro MUI_LANGUAGE "%s"\n'%language) )
# For every translation we grabbed from the .po, let's add our StrCpy command
for label,value in translations.iteritems():
NSINewLines.append( tostr('StrCpy $%s "%s"\n' % (label,value)) )
if language.upper() == options.lang.upper():
NSIDeclarations.append( tostr('Var %s\n' % label) )
count += 1
NSIWorkingFile = open('%s/%s.nsh' % (options.output, language),"w")
NSIWorkingFile.writelines(NSINewLines)
NSIWorkingFile.close()
print ( "%i translations merged for language '%s'"%(count,language) )
# Finally, let's write languages.nsh and declarations.nsh
NSIWorkingFile = open('%s/languages.nsh' % options.output,"w")
NSIWorkingFile.writelines(NSILanguages)
NSIWorkingFile.close()
NSIWorkingFile = open('%s/declarations.nsh' % options.output,"w")
NSIWorkingFile.writelines(NSIDeclarations)
NSIWorkingFile.close()
print ( "NSI Localization Operation Complete" ) | unknown | codeparrot/codeparrot-clean | ||
/* To learn more about this file see: https://angular.io/config/tsconfig. */
{
"compileOnSave": false,
"compilerOptions": {
"outDir": "./dist/out-tsc",
"strict": true,
"noImplicitOverride": true,
"noPropertyAccessFromIndexSignature": true,
"noImplicitReturns": true,
"noFallthroughCasesInSwitch": true,
"skipLibCheck": true,
"esModuleInterop": true,
"sourceMap": true,
"declaration": false,
"experimentalDecorators": true,
"moduleResolution": "bundler",
"importHelpers": true,
"target": "ES2022",
"module": "ES2022",
"useDefineForClassFields": false,
"lib": ["ES2022", "dom"]
},
"angularCompilerOptions": {
"enableI18nLegacyMessageIdFormat": false,
"strictInjectionParameters": true,
"strictInputAccessModifiers": true,
"strictTemplates": true
}
} | json | github | https://github.com/angular/angular | integration/platform-server-hydration/tsconfig.json |
#! /usr/bin/env python
# This script translates a list of arguments into one value specified by a rule file. Usage:
# translate ruleFilename key1 key2 ...
# It prints out the value corresponds to [key1, key2, ...] from a dictionary read from ruleFilename.
# To see how the dictionary is generated, see readRules function.
from sys import exit, argv
def processOneLine(aLine, level_indicator="+", key_separator=":", commentSymbol="#"):
"""
Return [level, keys_list, value] list from string aLine.
level is indicated by how many successive level_indicators are there to the left, key and value are separated by key_separator.
"""
# take care of comments:
if commentSymbol in aLine:
aLine = aLine[:aLine.index(commentSymbol)].strip();
# if it's an empty line:
aLine = aLine.strip()
if aLine=="": return []
# check if syntax is correct:
if key_separator not in aLine:
print("translate.processOneLine error: key-value separator "+key_separator+" not included in the line \n"+aLine)
exit(-1)
# get level
level = 0
for i in range(len(aLine)):
if aLine[i]==level_indicator:
level = level + 1
else:
aLine = aLine[i:]
break
# separate key and value
components = aLine.split(key_separator);
keys_list = [x.strip() for x in components[:-1]];
value = components[-1].strip();
# finally...
return [level, keys_list, value]
def readRules(buffer,level_indicator="+", key_separator=":", commentSymbol="#"):
"""
Process the text buffer to get the rule used for translations, line by line. Each line will be transferred into one entry in a rule dictionary. The dictionary will then be returned. The rule dictionary is generated using all the list of all strings between key_separators except the last one as the key, and the last one as value.
For example,
a : b: 1 # comments
will be translates into entry ["a","b"]:"1"
To ease the pain for repeated common keys, a level_indicator can be used to indicate how may shared keys the current line inherits from previous lines. The number of level_indicator means the number of keys the current line should inherit (starts from left) from previous lines.
For example, if the text buffer looks like:
z : 1
+ a : 2
++ b : 3
+ d: 4
The rule dictionary will contain:
("z") : "1"
("z", "a") : 2
("z", "a", "b") : 3
("z", "d") : 4
Note that the following
z : 1
++ a : 2
will raise an error.
"""
D = {}
accumulated_keys = [];
for aLine in buffer:
tmp_result = processOneLine(aLine)
if not tmp_result: continue
level, keys, value = tmp_result
if level>len(accumulated_keys):
print("translates.readRules error: two many "+level_indicator+" signs in the line\n"+aLine)
exit(-1)
else:
accumulated_keys = accumulated_keys[:level]
accumulated_keys.extend(keys)
D[tuple(accumulated_keys)] = value
return D
def translate(ruleFilename, keys_list):
"""
Translate keys_list into the correponding value given in the dictionary generated from ruleFilename using readRules function.
"""
D = readRules(ruleFilename)
result = ""
for ii in range(len(keys_list)): result+=" "+(D[tuple(keys_list[:ii+1])])
return result
if __name__ == '__main__':
if len(argv)<3:
print("Usage: translate ruleFilename key1 key2 ...")
exit(-1)
else:
print(translate(file(argv[1]).readlines(),argv[2:])) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import absolute_import
import re
from .base import Filter
# not all of these agents are guaranteed to execute JavaScript, but to avoid
# overhead of identifying which ones do, and which ones will over time we simply
# target all of the major ones
CRAWLERS = re.compile(r'|'.join((
# various Google services
r'AdsBot',
# Google Adsense
r'Mediapartners',
# Google+ and Google web search
r'Google',
# Bing search
r'BingBot',
# Baidu search
r'Baiduspider',
# Yahoo
r'Slurp',
# Sogou
r'Sogou',
# facebook
r'facebook',
# Alexa
r'ia_archiver',
# Generic bot
r'bot[\/\s\)\;]',
# Generic spider
r'spider[\/\s\)\;]',
)), re.I)
class WebCrawlersFilter(Filter):
id = 'web-crawlers'
name = 'Filter out known web crawlers'
description = 'Some crawlers may execute pages in incompatible ways which then cause errors that are unlikely to be seen by a normal user.'
default = True
def get_user_agent(self, data):
try:
for key, value in data['sentry.interfaces.Http']['headers']:
if key.lower() == 'user-agent':
return value
except LookupError:
return ''
def test(self, data):
# TODO(dcramer): we could also look at UA parser and use the 'Spider'
# device type
user_agent = self.get_user_agent(data)
if not user_agent:
return False
return bool(CRAWLERS.search(user_agent)) | unknown | codeparrot/codeparrot-clean | ||
## Copyright (C) 2017 Oscar Diaz Barriga
## This file is part of Comp-Process-STPatterns.
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
i = 0
#region, boya_temp, boya_salinidad, est_temp, est_pto_rocio, est_presion, caudal
cad = ""
for row in in_data:
boya_temp = str(row[0]).replace(" ", "").replace("≥",">=").replace("-", ":")
boya_salinidad = str(row[1]).replace(" ", "").replace("≥",">=").replace("-", ":")
estac_temp = str(row[2]).replace(" ", "").replace("≥",">=").replace("-", ":")
estac_pto_rocio = str(row[3]).replace(" ", "").replace("≥",">=").replace("-", ":")
est_presion = str(row[4]).replace(" ", "").replace("≥",">=").replace("-", ":")
est_presion_est_m = str(row[5]).replace(" ", "").replace("≥",">=").replace("-", ":")
est_veloc_viento_m = str(row[6]).replace(" ", "").replace("≥",">=").replace("-", ":")
est_temp_max = str(row[7]).replace(" ", "").replace("≥",">=").replace("-", ":")
est_temp_min = str(row[8]).replace(" ", "").replace("≥",">=").replace("-", ":")
caudal = str(row[9]).replace(" ", "").replace("≥",">=").replace("-", ":")
tmp = "boya-temp_%s boya-salinidad_%s estac-temp_%s estac-pto-rocio_%s est_presion_%s est_presion_est_m_%s est_veloc_viento_m_%s est_temp_max_%s est_temp_min_%s caudal_%s"%(boya_temp, boya_salinidad ,estac_temp ,estac_pto_rocio ,est_presion ,est_presion_est_m, est_veloc_viento_m, est_temp_max, est_temp_min, caudal)
if i <= 13:
cad = cad + " -1 " + tmp
i = i + 1
else:
cad = cad + " -1 -2\n" + tmp
i = 1
print(cad)
#array_data = row.split(",")[0]
out_data = in_data | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2011, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import logging
from webkitpy.common.memoized import memoized
_log = logging.getLogger(__name__)
# FIXME: Should this function be somewhere more general?
def _invert_dictionary(dictionary):
inverted_dictionary = {}
for key, value in dictionary.items():
if inverted_dictionary.get(value):
inverted_dictionary[value].append(key)
else:
inverted_dictionary[value] = [key]
return inverted_dictionary
class BaselineOptimizer(object):
ROOT_LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
def __init__(self, host, port, port_names, skip_scm_commands):
self._filesystem = host.filesystem
self._skip_scm_commands = skip_scm_commands
self._files_to_delete = []
self._files_to_add = []
self._scm = host.scm()
self._default_port = port
self._ports = {}
for port_name in port_names:
self._ports[port_name] = host.port_factory.get(port_name)
self._webkit_base = port.webkit_base()
self._layout_tests_dir = port.layout_tests_dir()
# Only used by unittests.
self.new_results_by_directory = []
def _baseline_root(self, baseline_name):
virtual_suite = self._virtual_suite(baseline_name)
if virtual_suite:
return self._filesystem.join(self.ROOT_LAYOUT_TESTS_DIRECTORY, virtual_suite.name)
return self.ROOT_LAYOUT_TESTS_DIRECTORY
def _baseline_search_path(self, port, baseline_name):
virtual_suite = self._virtual_suite(baseline_name)
if virtual_suite:
return port.virtual_baseline_search_path(baseline_name)
return port.baseline_search_path()
def _virtual_suite(self, baseline_name):
return self._default_port.lookup_virtual_suite(baseline_name)
def _virtual_base(self, baseline_name):
return self._default_port.lookup_virtual_test_base(baseline_name)
def _relative_baseline_search_paths(self, port, baseline_name):
baseline_search_path = self._baseline_search_path(port, baseline_name)
baseline_root = self._baseline_root(baseline_name)
relative_paths = [self._filesystem.relpath(path, self._webkit_base) for path in baseline_search_path]
return relative_paths + [baseline_root]
def _join_directory(self, directory, baseline_name):
# This code is complicated because both the directory name and the baseline_name have the virtual
# test suite in the name and the virtual baseline name is not a strict superset of the non-virtual name.
# For example, virtual/gpu/fast/canvas/foo-expected.png corresponds to fast/canvas/foo-expected.png and
# the baseline directories are like platform/mac/virtual/gpu/fast/canvas. So, to get the path
# to the baseline in the platform directory, we need to append jsut foo-expected.png to the directory.
virtual_suite = self._virtual_suite(baseline_name)
if virtual_suite:
baseline_name_without_virtual = baseline_name[len(virtual_suite.name) + 1:]
else:
baseline_name_without_virtual = baseline_name
return self._filesystem.join(self._scm.checkout_root, directory, baseline_name_without_virtual)
def read_results_by_directory(self, baseline_name):
results_by_directory = {}
directories = reduce(set.union, map(set, [self._relative_baseline_search_paths(port, baseline_name) for port in self._ports.values()]))
for directory in directories:
path = self._join_directory(directory, baseline_name)
if self._filesystem.exists(path):
results_by_directory[directory] = self._filesystem.sha1(path)
return results_by_directory
def _results_by_port_name(self, results_by_directory, baseline_name):
results_by_port_name = {}
for port_name, port in self._ports.items():
for directory in self._relative_baseline_search_paths(port, baseline_name):
if directory in results_by_directory:
results_by_port_name[port_name] = results_by_directory[directory]
break
return results_by_port_name
@memoized
def _directories_immediately_preceding_root(self, baseline_name):
directories = set()
for port in self._ports.values():
directory = self._filesystem.relpath(self._baseline_search_path(port, baseline_name)[-1], self._webkit_base)
directories.add(directory)
return directories
def _optimize_result_for_root(self, new_results_by_directory, baseline_name):
# The root directory (i.e. LayoutTests) is the only one that doesn't correspond
# to a specific platform. As such, it's the only one where the baseline in fallback directories
# immediately before it can be promoted up, i.e. if win and mac
# have the same baseline, then it can be promoted up to be the LayoutTests baseline.
# All other baselines can only be removed if they're redundant with a baseline earlier
# in the fallback order. They can never promoted up.
directories_immediately_preceding_root = self._directories_immediately_preceding_root(baseline_name)
shared_result = None
root_baseline_unused = False
for directory in directories_immediately_preceding_root:
this_result = new_results_by_directory.get(directory)
# If any of these directories don't have a baseline, there's no optimization we can do.
if not this_result:
return
if not shared_result:
shared_result = this_result
elif shared_result != this_result:
root_baseline_unused = True
baseline_root = self._baseline_root(baseline_name)
# The root baseline is unused if all the directories immediately preceding the root
# have a baseline, but have different baselines, so the baselines can't be promoted up.
if root_baseline_unused:
if baseline_root in new_results_by_directory:
del new_results_by_directory[baseline_root]
return
new_results_by_directory[baseline_root] = shared_result
for directory in directories_immediately_preceding_root:
del new_results_by_directory[directory]
def _find_optimal_result_placement(self, baseline_name):
results_by_directory = self.read_results_by_directory(baseline_name)
results_by_port_name = self._results_by_port_name(results_by_directory, baseline_name)
port_names_by_result = _invert_dictionary(results_by_port_name)
new_results_by_directory = self._remove_redundant_results(results_by_directory, results_by_port_name, port_names_by_result, baseline_name)
self._optimize_result_for_root(new_results_by_directory, baseline_name)
return results_by_directory, new_results_by_directory
def _remove_redundant_results(self, results_by_directory, results_by_port_name, port_names_by_result, baseline_name):
new_results_by_directory = copy.copy(results_by_directory)
for port_name, port in self._ports.items():
current_result = results_by_port_name.get(port_name)
# This happens if we're missing baselines for a port.
if not current_result:
continue;
fallback_path = self._relative_baseline_search_paths(port, baseline_name)
current_index, current_directory = self._find_in_fallbackpath(fallback_path, current_result, new_results_by_directory)
for index in range(current_index + 1, len(fallback_path)):
new_directory = fallback_path[index]
if not new_directory in new_results_by_directory:
# No result for this baseline in this directory.
continue
elif new_results_by_directory[new_directory] == current_result:
# Result for new_directory are redundant with the result earlier in the fallback order.
if current_directory in new_results_by_directory:
del new_results_by_directory[current_directory]
else:
# The new_directory contains a different result, so stop trying to push results up.
break
return new_results_by_directory
def _find_in_fallbackpath(self, fallback_path, current_result, results_by_directory):
for index, directory in enumerate(fallback_path):
if directory in results_by_directory and (results_by_directory[directory] == current_result):
return index, directory
assert False, "result %s not found in fallback_path %s, %s" % (current_result, fallback_path, results_by_directory)
def _platform(self, filename):
platform_dir = self.ROOT_LAYOUT_TESTS_DIRECTORY + self._filesystem.sep + 'platform' + self._filesystem.sep
if filename.startswith(platform_dir):
return filename.replace(platform_dir, '').split(self._filesystem.sep)[0]
platform_dir = self._filesystem.join(self._scm.checkout_root, platform_dir)
if filename.startswith(platform_dir):
return filename.replace(platform_dir, '').split(self._filesystem.sep)[0]
return '(generic)'
def _move_baselines(self, baseline_name, results_by_directory, new_results_by_directory):
data_for_result = {}
for directory, result in results_by_directory.items():
if not result in data_for_result:
source = self._join_directory(directory, baseline_name)
data_for_result[result] = self._filesystem.read_binary_file(source)
scm_files = []
fs_files = []
for directory, result in results_by_directory.items():
if new_results_by_directory.get(directory) != result:
file_name = self._join_directory(directory, baseline_name)
if self._scm.exists(file_name):
scm_files.append(file_name)
else:
fs_files.append(file_name)
if scm_files or fs_files:
if scm_files:
_log.debug(" Deleting (SCM):")
for platform_dir in sorted(self._platform(filename) for filename in scm_files):
_log.debug(" " + platform_dir)
if self._skip_scm_commands:
self._files_to_delete.extend(scm_files)
else:
self._scm.delete_list(scm_files)
if fs_files:
_log.debug(" Deleting (file system):")
for platform_dir in sorted(self._platform(filename) for filename in fs_files):
_log.debug(" " + platform_dir)
for filename in fs_files:
self._filesystem.remove(filename)
else:
_log.debug(" (Nothing to delete)")
file_names = []
for directory, result in new_results_by_directory.items():
if results_by_directory.get(directory) != result:
destination = self._join_directory(directory, baseline_name)
self._filesystem.maybe_make_directory(self._filesystem.split(destination)[0])
self._filesystem.write_binary_file(destination, data_for_result[result])
file_names.append(destination)
if file_names:
_log.debug(" Adding:")
for platform_dir in sorted(self._platform(filename) for filename in file_names):
_log.debug(" " + platform_dir)
if self._skip_scm_commands:
# Have adds win over deletes.
self._files_to_delete = list(set(self._files_to_delete) - set(file_names))
self._files_to_add.extend(file_names)
else:
self._scm.add_list(file_names)
else:
_log.debug(" (Nothing to add)")
def write_by_directory(self, results_by_directory, writer, indent):
for path in sorted(results_by_directory):
writer("%s%s: %s" % (indent, self._platform(path), results_by_directory[path][0:6]))
def _optimize_subtree(self, baseline_name):
basename = self._filesystem.basename(baseline_name)
results_by_directory, new_results_by_directory = self._find_optimal_result_placement(baseline_name)
if new_results_by_directory == results_by_directory:
if new_results_by_directory:
_log.debug(" %s: (already optimal)" % basename)
self.write_by_directory(results_by_directory, _log.debug, " ")
else:
_log.debug(" %s: (no baselines found)" % basename)
# This is just used for unittests. Intentionally set it to the old data if we don't modify anything.
self.new_results_by_directory.append(results_by_directory)
return True
if self._results_by_port_name(results_by_directory, baseline_name) != self._results_by_port_name(new_results_by_directory, baseline_name):
# This really should never happen. Just a sanity check to make sure the script fails in the case of bugs
# instead of committing incorrect baselines.
_log.error(" %s: optimization failed" % basename)
self.write_by_directory(results_by_directory, _log.warning, " ")
return False
_log.debug(" %s:" % basename)
_log.debug(" Before: ")
self.write_by_directory(results_by_directory, _log.debug, " ")
_log.debug(" After: ")
self.write_by_directory(new_results_by_directory, _log.debug, " ")
self._move_baselines(baseline_name, results_by_directory, new_results_by_directory)
return True
def _optimize_virtual_root(self, baseline_name, non_virtual_baseline_name):
virtual_root_expected_baseline_path = self._filesystem.join(self._layout_tests_dir, baseline_name)
if not self._filesystem.exists(virtual_root_expected_baseline_path):
return
root_sha1 = self._filesystem.sha1(virtual_root_expected_baseline_path)
results_by_directory = self.read_results_by_directory(non_virtual_baseline_name)
# See if all the immediate predecessors of the virtual root have the same expected result.
for port in self._ports.values():
directories = self._relative_baseline_search_paths(port, non_virtual_baseline_name)
for directory in directories:
if directory not in results_by_directory:
continue
if results_by_directory[directory] != root_sha1:
return
break
_log.debug("Deleting redundant virtual root expected result.")
if self._skip_scm_commands and virtual_root_expected_baseline_path in self._files_to_add:
self._files_to_add.remove(virtual_root_expected_baseline_path)
if self._scm.exists(virtual_root_expected_baseline_path):
_log.debug(" Deleting (SCM): " + virtual_root_expected_baseline_path)
if self._skip_scm_commands:
self._files_to_delete.append(virtual_root_expected_baseline_path)
else:
self._scm.delete(virtual_root_expected_baseline_path)
else:
_log.debug(" Deleting (file system): " + virtual_root_expected_baseline_path)
self._filesystem.remove(virtual_root_expected_baseline_path)
def optimize(self, baseline_name):
# The virtual fallback path is the same as the non-virtual one tacked on to the bottom of the non-virtual path.
# See https://docs.google.com/a/chromium.org/drawings/d/1eGdsIKzJ2dxDDBbUaIABrN4aMLD1bqJTfyxNGZsTdmg/edit for
# a visual representation of this.
#
# So, we can optimize the virtual path, then the virtual root and then the regular path.
self._files_to_delete = []
self._files_to_add = []
_log.debug("Optimizing regular fallback path.")
result = self._optimize_subtree(baseline_name)
non_virtual_baseline_name = self._virtual_base(baseline_name)
if not non_virtual_baseline_name:
return result, self._files_to_delete, self._files_to_add
self._optimize_virtual_root(baseline_name, non_virtual_baseline_name)
_log.debug("Optimizing non-virtual fallback path.")
result |= self._optimize_subtree(non_virtual_baseline_name)
return result, self._files_to_delete, self._files_to_add | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build js
package runtime
// resetMemoryDataView signals the JS front-end that WebAssembly's memory.grow instruction has been used.
// This allows the front-end to replace the old DataView object with a new one.
//
//go:wasmimport gojs runtime.resetMemoryDataView
func resetMemoryDataView() | go | github | https://github.com/golang/go | src/runtime/mem_js.go |
"""
Model to store a microsite in the database.
The object is stored as a json representation of the python dict
that would have been used in the settings.
"""
import collections
from django.contrib.sites.models import Site
from django.db import models
from django.db.models.base import ObjectDoesNotExist
from django.db.models.signals import pre_delete, pre_save
from django.dispatch import receiver
from jsonfield.fields import JSONField
from model_utils.models import TimeStampedModel
class Microsite(models.Model):
"""
This is where the information about the microsite gets stored to the db.
To achieve the maximum flexibility, most of the fields are stored inside
a json field.
Notes:
- The key field was required for the dict definition at the settings, and it
is used in some of the microsite_configuration methods.
- The site field is django site.
- The values field must be validated on save to prevent the platform from crashing
badly in the case the string is not able to be loaded as json.
"""
site = models.OneToOneField(Site, related_name='microsite')
key = models.CharField(max_length=63, db_index=True, unique=True)
values = JSONField(null=False, blank=True, load_kwargs={'object_pairs_hook': collections.OrderedDict})
def __unicode__(self):
return self.key
def get_organizations(self):
"""
Helper method to return a list of organizations associated with our particular Microsite
"""
return MicrositeOrganizationMapping.get_organizations_for_microsite_by_pk(self.id) # pylint: disable=no-member
@classmethod
def get_microsite_for_domain(cls, domain):
"""
Returns the microsite associated with this domain. Note that we always convert to lowercase, or
None if no match
"""
# remove any port number from the hostname
domain = domain.split(':')[0]
microsites = cls.objects.filter(site__domain__iexact=domain)
return microsites[0] if microsites else None
class MicrositeHistory(TimeStampedModel):
"""
This is an archive table for Microsites model, so that we can maintain a history of changes. Note that the
key field is no longer unique
"""
site = models.ForeignKey(Site, related_name='microsite_history')
key = models.CharField(max_length=63, db_index=True)
values = JSONField(null=False, blank=True, load_kwargs={'object_pairs_hook': collections.OrderedDict})
def __unicode__(self):
return self.key
class Meta(object):
""" Meta class for this Django model """
verbose_name_plural = "Microsite histories"
def _make_archive_copy(instance):
"""
Helper method to make a copy of a Microsite into the history table
"""
archive_object = MicrositeHistory(
key=instance.key,
site=instance.site,
values=instance.values,
)
archive_object.save()
@receiver(pre_delete, sender=Microsite)
def on_microsite_deleted(sender, instance, **kwargs): # pylint: disable=unused-argument
"""
Archive the exam attempt when the item is about to be deleted
Make a clone and populate in the History table
"""
_make_archive_copy(instance)
@receiver(pre_save, sender=Microsite)
def on_microsite_updated(sender, instance, **kwargs): # pylint: disable=unused-argument
"""
Archive the microsite on an update operation
"""
if instance.id:
# on an update case, get the original and archive it
original = Microsite.objects.get(id=instance.id)
_make_archive_copy(original)
class MicrositeOrganizationMapping(models.Model):
"""
Mapping of Organization to which Microsite it belongs
"""
organization = models.CharField(max_length=63, db_index=True, unique=True)
microsite = models.ForeignKey(Microsite, db_index=True)
def __unicode__(self):
"""String conversion"""
return u'{microsite_key}: {organization}'.format(
microsite_key=self.microsite.key,
organization=self.organization
)
@classmethod
def get_organizations_for_microsite_by_pk(cls, microsite_pk):
"""
Returns a list of organizations associated with the microsite key, returned as a set
"""
return cls.objects.filter(microsite_id=microsite_pk).values_list('organization', flat=True)
@classmethod
def get_microsite_for_organization(cls, org):
"""
Returns the microsite object for a given organization based on the table mapping, None if
no mapping exists
"""
try:
item = cls.objects.select_related('microsite').get(organization=org)
return item.microsite
except ObjectDoesNotExist:
return None
class MicrositeTemplate(models.Model):
"""
A HTML template that a microsite can use
"""
microsite = models.ForeignKey(Microsite, db_index=True)
template_uri = models.CharField(max_length=255, db_index=True)
template = models.TextField()
def __unicode__(self):
"""String conversion"""
return u'{microsite_key}: {template_uri}'.format(
microsite_key=self.microsite.key,
template_uri=self.template_uri
)
class Meta(object):
""" Meta class for this Django model """
unique_together = (('microsite', 'template_uri'),)
@classmethod
def get_template_for_microsite(cls, domain, template_uri):
"""
Returns the template object for the microsite, None if not found
"""
try:
return cls.objects.get(microsite__site__domain=domain, template_uri=template_uri)
except ObjectDoesNotExist:
return None | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, subprocess, sys
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), "..", "..", "..", "..", "..", "tools"))
import traci, sumolib
sumoBinary = sumolib.checkBinary('sumo')
sumoProcess = subprocess.Popen("%s -c sumo.sumocfg" % (sumoBinary), shell=True, stdout=sys.stdout)
traci.init(8813)
for step in range(3):
print "step", step
traci.simulationStep()
print "lanes", traci.lane.getIDList()
laneID = "2fi_0"
print "examining", laneID
print "length", traci.lane.getLength(laneID)
print "maxSpeed", traci.lane.getMaxSpeed(laneID)
print "width", traci.lane.getWidth(laneID)
print "allowed", traci.lane.getAllowed(laneID)
print "disallowed", traci.lane.getDisallowed(laneID)
print "linkNum", traci.lane.getLinkNumber(laneID)
print "links", traci.lane.getLinks(laneID)
print "shape", traci.lane.getShape(laneID)
print "edge", traci.lane.getEdgeID(laneID)
print "CO2", traci.lane.getCO2Emission(laneID)
print "CO", traci.lane.getCOEmission(laneID)
print "HC", traci.lane.getHCEmission(laneID)
print "PMx", traci.lane.getPMxEmission(laneID)
print "NOx", traci.lane.getNOxEmission(laneID)
print "Fuel", traci.lane.getFuelConsumption(laneID)
print "Noise", traci.lane.getNoiseEmission(laneID)
print "meanSpeed", traci.lane.getLastStepMeanSpeed(laneID)
print "occupancy", traci.lane.getLastStepOccupancy(laneID)
print "lastLength", traci.lane.getLastStepLength(laneID)
print "traveltime", traci.lane.getTraveltime(laneID)
print "numVeh", traci.lane.getLastStepVehicleNumber(laneID)
print "haltVeh", traci.lane.getLastStepHaltingNumber(laneID)
print "vehIds", traci.lane.getLastStepVehicleIDs(laneID)
traci.lane.setAllowed(laneID, ["taxi"])
print "after setAllowed", traci.lane.getAllowed(laneID), traci.lane.getDisallowed(laneID)
traci.lane.setDisallowed(laneID, ["bus"])
print "after setDisallowed", traci.lane.getAllowed(laneID), traci.lane.getDisallowed(laneID)
traci.lane.setMaxSpeed(laneID, 42.)
print "after setMaxSpeed", traci.lane.getMaxSpeed(laneID)
traci.lane.setLength(laneID, 123.)
print "after setLength", traci.lane.getLength(laneID)
traci.lane.subscribe(laneID)
print traci.lane.getSubscriptionResults(laneID)
for step in range(3,6):
print "step", step
traci.simulationStep()
print traci.lane.getSubscriptionResults(laneID)
traci.close() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a windows documentation stub. Actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: win_path
version_added: "2.3"
short_description: Manage Windows path environment variables
description:
- Allows element-based ordering, addition, and removal of Windows path environment variables.
options:
name:
description:
- Target path environment variable name
default: PATH
elements:
description:
- A single path element, or a list of path elements (ie, directories) to add or remove.
- When multiple elements are included in the list (and C(state) is C(present)), the elements are guaranteed to appear in the same relative order
in the resultant path value.
- Variable expansions (eg, C(%VARNAME%)) are allowed, and are stored unexpanded in the target path element.
- Any existing path elements not mentioned in C(elements) are always preserved in their current order.
- New path elements are appended to the path, and existing path elements may be moved closer to the end to satisfy the requested ordering.
- Paths are compared in a case-insensitive fashion, and trailing backslashes are ignored for comparison purposes. However, note that trailing
backslashes in YAML require quotes.
required: true
state:
description:
- Whether the path elements specified in C(elements) should be present or absent.
choices:
- present
- absent
scope:
description:
- The level at which the environment variable specified by C(name) should be managed (either for the current user or global machine scope).
choices:
- machine
- user
default: machine
author: "Matt Davis (@nitzmahone)"
notes:
- This module is for modifying indidvidual elements of path-like
environment variables. For general-purpose management of other
environment vars, use the M(win_environment) module.
- This module does not broadcast change events.
This means that the minority of windows applications which can have
their environment changed without restarting will not be notified and
therefore will need restarting to pick up new environment settings.
User level environment variables will require an interactive user to
log out and in again before they become available.
'''
EXAMPLES = r'''
- name: Ensure that system32 and Powershell are present on the global system path, and in the specified order
win_path:
elements:
- '%SystemRoot%\system32'
- '%SystemRoot%\system32\WindowsPowerShell\v1.0'
- name: Ensure that C:\Program Files\MyJavaThing is not on the current user's CLASSPATH
win_path:
name: CLASSPATH
elements: C:\Program Files\MyJavaThing
scope: user
state: absent
''' | unknown | codeparrot/codeparrot-clean | ||
/* SPDX-License-Identifier: MIT OR Apache-2.0 */
//! IEEE 754-2008 `minNum`. This has been superseded by IEEE 754-2019 `minimumNumber`.
//!
//! Per the spec, returns the canonicalized result of:
//! - `x` if `x < y`
//! - `y` if `y < x`
//! - The other number if one is NaN
//! - Otherwise, either `x` or `y`, canonicalized
//! - -0.0 and +0.0 may be disregarded (unlike newer operations)
//!
//! Excluded from our implementation is sNaN handling.
//!
//! More on the differences: [link].
//!
//! [link]: https://grouper.ieee.org/groups/msc/ANSI_IEEE-Std-754-2019/background/minNum_maxNum_Removal_Demotion_v3.pdf
use crate::support::Float;
#[inline]
pub fn fmin<F: Float>(x: F, y: F) -> F {
let res = if y.is_nan() || x < y { x } else { y };
// Canonicalize
res * F::ONE
} | rust | github | https://github.com/nodejs/node | deps/crates/vendor/libm/src/math/generic/fmin.rs |
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.f5networks.f5_modules.plugins.modules.bigip_imish_config import (
ModuleManager, ArgumentSpec
)
from ansible_collections.f5networks.f5_modules.tests.unit.compat import unittest
from ansible_collections.f5networks.f5_modules.tests.unit.compat.mock import Mock, patch
from ansible_collections.f5networks.f5_modules.tests.unit.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.p2 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_imish_config.tmos_version')
self.p3 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_imish_config.send_teem')
self.m2 = self.p2.start()
self.m2.return_value = '14.1.0'
self.m3 = self.p3.start()
self.m3.return_value = True
def tearDown(self):
self.p2.stop()
self.p3.stop()
def test_create(self, *args):
set_module_args(dict(
lines=[
'bgp graceful-restart restart-time 120',
'redistribute kernel route-map rhi',
'neighbor 10.10.10.11 remote-as 65000',
'neighbor 10.10.10.11 fall-over bfd',
'neighbor 10.10.10.11 remote-as 65000',
'neighbor 10.10.10.11 fall-over bfd'
],
parents='router bgp 64664',
before='bfd slow-timer 2000',
match='exact',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
current = load_fixture('load_imish_output_1.json')
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
required_if=self.spec.required_if,
add_file_common_args=self.spec.add_file_common_args
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.read_current_from_device = Mock(return_value=current['commandResult'])
mm.upload_file_to_device = Mock(return_value=True)
mm.load_config_on_device = Mock(return_value=True)
mm.remove_uploaded_file_from_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True | unknown | codeparrot/codeparrot-clean | ||
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from sqlalchemy import select
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.session import provide_session
class DagUnpausedDep(BaseTIDep):
"""Determines whether a task's DAG is not paused."""
NAME = "Dag Not Paused"
IGNORABLE = True
@staticmethod
def _is_dag_paused(dag_id: str, session) -> bool:
"""Check if a dag is paused. Extracted to simplify testing."""
from airflow.models.dag import DagModel
return session.scalar(select(DagModel.is_paused).where(DagModel.dag_id == dag_id))
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
if self._is_dag_paused(ti.dag_id, session):
yield self._failing_status(reason=f"Task's DAG '{ti.dag_id}' is paused.") | python | github | https://github.com/apache/airflow | airflow-core/src/airflow/ti_deps/deps/dag_unpaused_dep.py |
'use strict';
const common = require('../common.js');
const {
PerformanceObserver,
performance,
} = require('perf_hooks');
const bench = common.createBenchmark(main, {
n: [1e6],
observe: ['all', 'measure'],
});
function test() {
performance.mark('a');
performance.mark('b');
performance.measure('a to b', 'a', 'b');
}
function main({ n, observe }) {
const entryTypes = observe === 'all' ?
[ 'mark', 'measure' ] :
[ observe ];
const obs = new PerformanceObserver(() => {
bench.end(n);
});
obs.observe({ entryTypes, buffered: true });
bench.start();
performance.mark('start');
for (let i = 0; i < 1e5; i++)
test();
} | javascript | github | https://github.com/nodejs/node | benchmark/perf_hooks/usertiming.js |
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.office365.base import O365BaseTool
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"O365BaseTool": "langchain_community.tools.office365.base"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"O365BaseTool",
] | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/tools/office365/base.py |
"use server";
export async function log() {
console.log("hello from server");
} | typescript | github | https://github.com/remix-run/react-router | playground/rsc-vite-framework/app/routes/_index/actions.ts |
"""Config Flow for PlayStation 4."""
from collections import OrderedDict
import logging
from pyps4_2ndscreen.errors import CredentialTimeout
from pyps4_2ndscreen.helpers import Helper
from pyps4_2ndscreen.media_art import COUNTRIES
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_CODE,
CONF_HOST,
CONF_IP_ADDRESS,
CONF_NAME,
CONF_REGION,
CONF_TOKEN,
)
from homeassistant.util import location
from .const import CONFIG_ENTRY_VERSION, DEFAULT_ALIAS, DEFAULT_NAME, DOMAIN
_LOGGER = logging.getLogger(__name__)
CONF_MODE = "Config Mode"
CONF_AUTO = "Auto Discover"
CONF_MANUAL = "Manual Entry"
UDP_PORT = 987
TCP_PORT = 997
PORT_MSG = {UDP_PORT: "port_987_bind_error", TCP_PORT: "port_997_bind_error"}
PIN_LENGTH = 8
@config_entries.HANDLERS.register(DOMAIN)
class PlayStation4FlowHandler(config_entries.ConfigFlow):
"""Handle a PlayStation 4 config flow."""
VERSION = CONFIG_ENTRY_VERSION
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize the config flow."""
self.helper = Helper()
self.creds = None
self.name = None
self.host = None
self.region = None
self.pin = None
self.m_device = None
self.location = None
self.device_list = []
async def async_step_user(self, user_input=None):
"""Handle a user config flow."""
# Check if able to bind to ports: UDP 987, TCP 997.
ports = PORT_MSG.keys()
failed = await self.hass.async_add_executor_job(self.helper.port_bind, ports)
if failed in ports:
reason = PORT_MSG[failed]
return self.async_abort(reason=reason)
return await self.async_step_creds()
async def async_step_creds(self, user_input=None):
"""Return PS4 credentials from 2nd Screen App."""
errors = {}
if user_input is not None:
try:
self.creds = await self.hass.async_add_executor_job(
self.helper.get_creds, DEFAULT_ALIAS
)
if self.creds is not None:
return await self.async_step_mode()
return self.async_abort(reason="credential_error")
except CredentialTimeout:
errors["base"] = "credential_timeout"
return self.async_show_form(step_id="creds", errors=errors)
async def async_step_mode(self, user_input=None):
"""Prompt for mode."""
errors = {}
mode = [CONF_AUTO, CONF_MANUAL]
if user_input is not None:
if user_input[CONF_MODE] == CONF_MANUAL:
try:
device = user_input[CONF_IP_ADDRESS]
if device:
self.m_device = device
except KeyError:
errors[CONF_IP_ADDRESS] = "no_ipaddress"
if not errors:
return await self.async_step_link()
mode_schema = OrderedDict()
mode_schema[vol.Required(CONF_MODE, default=CONF_AUTO)] = vol.In(list(mode))
mode_schema[vol.Optional(CONF_IP_ADDRESS)] = str
return self.async_show_form(
step_id="mode", data_schema=vol.Schema(mode_schema), errors=errors
)
async def async_step_link(self, user_input=None):
"""Prompt user input. Create or edit entry."""
regions = sorted(COUNTRIES.keys())
default_region = None
errors = {}
if user_input is None:
# Search for device.
devices = await self.hass.async_add_executor_job(
self.helper.has_devices, self.m_device
)
# Abort if can't find device.
if not devices:
return self.async_abort(reason="no_devices_found")
self.device_list = [device["host-ip"] for device in devices]
# Check that devices found aren't configured per account.
entries = self.hass.config_entries.async_entries(DOMAIN)
if entries:
# Retrieve device data from all entries if creds match.
conf_devices = [
device
for entry in entries
if self.creds == entry.data[CONF_TOKEN]
for device in entry.data["devices"]
]
# Remove configured device from search list.
for c_device in conf_devices:
if c_device["host"] in self.device_list:
# Remove configured device from search list.
self.device_list.remove(c_device["host"])
# If list is empty then all devices are configured.
if not self.device_list:
return self.async_abort(reason="devices_configured")
# Login to PS4 with user data.
if user_input is not None:
self.region = user_input[CONF_REGION]
self.name = user_input[CONF_NAME]
# Assume pin had leading zeros, before coercing to int.
self.pin = str(user_input[CONF_CODE]).zfill(PIN_LENGTH)
self.host = user_input[CONF_IP_ADDRESS]
is_ready, is_login = await self.hass.async_add_executor_job(
self.helper.link, self.host, self.creds, self.pin, DEFAULT_ALIAS
)
if is_ready is False:
errors["base"] = "not_ready"
elif is_login is False:
errors["base"] = "login_failed"
else:
device = {
CONF_HOST: self.host,
CONF_NAME: self.name,
CONF_REGION: self.region,
}
# Create entry.
return self.async_create_entry(
title="PlayStation 4",
data={CONF_TOKEN: self.creds, "devices": [device]},
)
# Try to find region automatically.
if not self.location:
self.location = await location.async_detect_location_info(
self.hass.helpers.aiohttp_client.async_get_clientsession()
)
if self.location:
country = self.location.country_name
if country in COUNTRIES:
default_region = country
# Show User Input form.
link_schema = OrderedDict()
link_schema[vol.Required(CONF_IP_ADDRESS)] = vol.In(list(self.device_list))
link_schema[vol.Required(CONF_REGION, default=default_region)] = vol.In(
list(regions)
)
link_schema[vol.Required(CONF_CODE)] = vol.All(
vol.Strip, vol.Length(max=PIN_LENGTH), vol.Coerce(int)
)
link_schema[vol.Required(CONF_NAME, default=DEFAULT_NAME)] = str
return self.async_show_form(
step_id="link", data_schema=vol.Schema(link_schema), errors=errors
) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2013 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard.dashboards.project.networks.subnets \
import workflows as project_workflows
from openstack_dashboard.dashboards.project.networks import workflows \
as net_workflows
LOG = logging.getLogger(__name__)
class CreateSubnetInfoAction(project_workflows.CreateSubnetInfoAction):
check_subnet_range = False
# NOTE(amotoki): As of Newton, workflows.Action does not support
# an inheritance of django Meta class. It seems subclasses must
# declare django meta class.
class Meta(object):
name = _("Subnet")
help_text = _('Create a subnet associated with the network. '
'Advanced configuration is available by clicking on the '
'"Subnet Details" tab.')
class CreateSubnetInfo(project_workflows.CreateSubnetInfo):
action_class = CreateSubnetInfoAction
class CreateSubnet(project_workflows.CreateSubnet):
default_steps = (CreateSubnetInfo,
net_workflows.CreateSubnetDetail)
def get_success_url(self):
return reverse("horizon:admin:networks:detail",
args=(self.context['network'].id,))
def get_failure_url(self):
return reverse("horizon:admin:networks:detail",
args=(self.context['network'].id,))
def handle(self, request, data):
network = self.context_seed['network']
# NOTE: network argument is required to show error message correctly.
# NOTE: We must specify tenant_id of the network which a subnet is
# created for if admin user does not belong to the tenant.
subnet = self._create_subnet(
request, data,
network=network,
tenant_id=network.tenant_id)
return bool(subnet)
class UpdateSubnetInfoAction(project_workflows.UpdateSubnetInfoAction):
check_subnet_range = False
# NOTE(amotoki): As of Newton, workflows.Action does not support
# an inheritance of django Meta class. It seems subclasses must
# declare django meta class.
class Meta(object):
name = _("Subnet")
help_text = _('Update a subnet associated with the network. '
'Advanced configuration are available at '
'"Subnet Details" tab.')
class UpdateSubnetInfo(project_workflows.UpdateSubnetInfo):
action_class = UpdateSubnetInfoAction
class UpdateSubnet(project_workflows.UpdateSubnet):
success_url = "horizon:admin:networks:detail"
failure_url = "horizon:admin:networks:detail"
default_steps = (UpdateSubnetInfo,
project_workflows.UpdateSubnetDetail) | unknown | codeparrot/codeparrot-clean | ||
/* Descriptors -- a new, flexible way to describe attributes */
#include "Python.h"
#include "pycore_abstract.h" // _PyObject_RealIsSubclass()
#include "pycore_call.h" // _PyStack_AsDict()
#include "pycore_ceval.h" // _Py_EnterRecursiveCallTstate()
#include "pycore_emscripten_trampoline.h" // descr_set_trampoline_call(), descr_get_trampoline_call()
#include "pycore_descrobject.h" // _PyMethodWrapper_Type
#include "pycore_modsupport.h" // _PyArg_UnpackStack()
#include "pycore_object.h" // _PyObject_GC_UNTRACK()
#include "pycore_object_deferred.h" // _PyObject_SetDeferredRefcount()
#include "pycore_pystate.h" // _PyThreadState_GET()
#include "pycore_tuple.h" // _PyTuple_ITEMS()
/*[clinic input]
class mappingproxy "mappingproxyobject *" "&PyDictProxy_Type"
class property "propertyobject *" "&PyProperty_Type"
[clinic start generated code]*/
/*[clinic end generated code: output=da39a3ee5e6b4b0d input=556352653fd4c02e]*/
static void
descr_dealloc(PyObject *self)
{
PyDescrObject *descr = (PyDescrObject *)self;
_PyObject_GC_UNTRACK(descr);
Py_XDECREF(descr->d_type);
Py_XDECREF(descr->d_name);
Py_XDECREF(descr->d_qualname);
PyObject_GC_Del(descr);
}
static PyObject *
descr_name(PyDescrObject *descr)
{
if (descr->d_name != NULL && PyUnicode_Check(descr->d_name))
return descr->d_name;
return NULL;
}
static PyObject *
descr_repr(PyDescrObject *descr, const char *kind)
{
PyObject *name = NULL;
if (descr->d_name != NULL && PyUnicode_Check(descr->d_name))
name = descr->d_name;
if (descr->d_type == &PyBaseObject_Type) {
return PyUnicode_FromFormat("<%s '%V'>", kind, name, "?");
}
return PyUnicode_FromFormat("<%s '%V' of '%s' objects>",
kind, name, "?", descr->d_type->tp_name);
}
static PyObject *
method_repr(PyObject *descr)
{
return descr_repr((PyDescrObject *)descr, "method");
}
static PyObject *
member_repr(PyObject *descr)
{
return descr_repr((PyDescrObject *)descr, "member");
}
static PyObject *
getset_repr(PyObject *descr)
{
return descr_repr((PyDescrObject *)descr, "attribute");
}
static PyObject *
wrapperdescr_repr(PyObject *descr)
{
return descr_repr((PyDescrObject *)descr, "slot wrapper");
}
static int
descr_check(PyDescrObject *descr, PyObject *obj)
{
if (!PyObject_TypeCheck(obj, descr->d_type)) {
PyErr_Format(PyExc_TypeError,
"descriptor '%V' for '%.100s' objects "
"doesn't apply to a '%.100s' object",
descr_name((PyDescrObject *)descr), "?",
descr->d_type->tp_name,
Py_TYPE(obj)->tp_name);
return -1;
}
return 0;
}
static PyObject *
classmethod_get(PyObject *self, PyObject *obj, PyObject *type)
{
PyMethodDescrObject *descr = (PyMethodDescrObject *)self;
/* Ensure a valid type. Class methods ignore obj. */
if (type == NULL) {
if (obj != NULL)
type = (PyObject *)Py_TYPE(obj);
else {
/* Wot - no type?! */
PyErr_Format(PyExc_TypeError,
"descriptor '%V' for type '%.100s' "
"needs either an object or a type",
descr_name((PyDescrObject *)descr), "?",
PyDescr_TYPE(descr)->tp_name);
return NULL;
}
}
if (!PyType_Check(type)) {
PyErr_Format(PyExc_TypeError,
"descriptor '%V' for type '%.100s' "
"needs a type, not a '%.100s' as arg 2",
descr_name((PyDescrObject *)descr), "?",
PyDescr_TYPE(descr)->tp_name,
Py_TYPE(type)->tp_name);
return NULL;
}
if (!PyType_IsSubtype((PyTypeObject *)type, PyDescr_TYPE(descr))) {
PyErr_Format(PyExc_TypeError,
"descriptor '%V' requires a subtype of '%.100s' "
"but received '%.100s'",
descr_name((PyDescrObject *)descr), "?",
PyDescr_TYPE(descr)->tp_name,
((PyTypeObject *)type)->tp_name);
return NULL;
}
PyTypeObject *cls = NULL;
if (descr->d_method->ml_flags & METH_METHOD) {
cls = descr->d_common.d_type;
}
return PyCMethod_New(descr->d_method, type, NULL, cls);
}
static PyObject *
method_get(PyObject *self, PyObject *obj, PyObject *type)
{
PyMethodDescrObject *descr = (PyMethodDescrObject *)self;
if (obj == NULL) {
return Py_NewRef(descr);
}
if (descr_check((PyDescrObject *)descr, obj) < 0) {
return NULL;
}
if (descr->d_method->ml_flags & METH_METHOD) {
if (type == NULL || PyType_Check(type)) {
return PyCMethod_New(descr->d_method, obj, NULL, descr->d_common.d_type);
} else {
PyErr_Format(PyExc_TypeError,
"descriptor '%V' needs a type, not '%s', as arg 2",
descr_name((PyDescrObject *)descr),
Py_TYPE(type)->tp_name);
return NULL;
}
} else {
return PyCFunction_NewEx(descr->d_method, obj, NULL);
}
}
static PyObject *
member_get(PyObject *self, PyObject *obj, PyObject *type)
{
PyMemberDescrObject *descr = (PyMemberDescrObject *)self;
if (obj == NULL) {
return Py_NewRef(descr);
}
if (descr_check((PyDescrObject *)descr, obj) < 0) {
return NULL;
}
if (descr->d_member->flags & Py_AUDIT_READ) {
if (PySys_Audit("object.__getattr__", "Os",
obj ? obj : Py_None, descr->d_member->name) < 0) {
return NULL;
}
}
return PyMember_GetOne((char *)obj, descr->d_member);
}
static PyObject *
getset_get(PyObject *self, PyObject *obj, PyObject *type)
{
PyGetSetDescrObject *descr = (PyGetSetDescrObject *)self;
if (obj == NULL) {
return Py_NewRef(descr);
}
if (descr_check((PyDescrObject *)descr, obj) < 0) {
return NULL;
}
if (descr->d_getset->get != NULL)
return descr_get_trampoline_call(
descr->d_getset->get, obj, descr->d_getset->closure);
PyErr_Format(PyExc_AttributeError,
"attribute '%V' of '%.100s' objects is not readable",
descr_name((PyDescrObject *)descr), "?",
PyDescr_TYPE(descr)->tp_name);
return NULL;
}
static PyObject *
wrapperdescr_get(PyObject *self, PyObject *obj, PyObject *type)
{
PyWrapperDescrObject *descr = (PyWrapperDescrObject *)self;
if (obj == NULL) {
return Py_NewRef(descr);
}
if (descr_check((PyDescrObject *)descr, obj) < 0) {
return NULL;
}
return PyWrapper_New((PyObject *)descr, obj);
}
static int
descr_setcheck(PyDescrObject *descr, PyObject *obj, PyObject *value)
{
assert(obj != NULL);
if (!PyObject_TypeCheck(obj, descr->d_type)) {
PyErr_Format(PyExc_TypeError,
"descriptor '%V' for '%.100s' objects "
"doesn't apply to a '%.100s' object",
descr_name(descr), "?",
descr->d_type->tp_name,
Py_TYPE(obj)->tp_name);
return -1;
}
return 0;
}
static int
member_set(PyObject *self, PyObject *obj, PyObject *value)
{
PyMemberDescrObject *descr = (PyMemberDescrObject *)self;
if (descr_setcheck((PyDescrObject *)descr, obj, value) < 0) {
return -1;
}
return PyMember_SetOne((char *)obj, descr->d_member, value);
}
static int
getset_set(PyObject *self, PyObject *obj, PyObject *value)
{
PyGetSetDescrObject *descr = (PyGetSetDescrObject *)self;
if (descr_setcheck((PyDescrObject *)descr, obj, value) < 0) {
return -1;
}
if (descr->d_getset->set != NULL) {
return descr_set_trampoline_call(
descr->d_getset->set, obj, value,
descr->d_getset->closure);
}
PyErr_Format(PyExc_AttributeError,
"attribute '%V' of '%.100s' objects is not writable",
descr_name((PyDescrObject *)descr), "?",
PyDescr_TYPE(descr)->tp_name);
return -1;
}
/* Vectorcall functions for each of the PyMethodDescr calling conventions.
*
* First, common helpers
*/
static inline int
method_check_args(PyObject *func, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames)
{
assert(!PyErr_Occurred());
if (nargs < 1) {
PyObject *funcstr = _PyObject_FunctionStr(func);
if (funcstr != NULL) {
PyErr_Format(PyExc_TypeError,
"unbound method %U needs an argument", funcstr);
Py_DECREF(funcstr);
}
return -1;
}
PyObject *self = args[0];
if (descr_check((PyDescrObject *)func, self) < 0) {
return -1;
}
if (kwnames && PyTuple_GET_SIZE(kwnames)) {
PyObject *funcstr = _PyObject_FunctionStr(func);
if (funcstr != NULL) {
PyErr_Format(PyExc_TypeError,
"%U takes no keyword arguments", funcstr);
Py_DECREF(funcstr);
}
return -1;
}
return 0;
}
typedef void (*funcptr)(void);
static inline funcptr
method_enter_call(PyThreadState *tstate, PyObject *func)
{
if (_Py_EnterRecursiveCallTstate(tstate, " while calling a Python object")) {
return NULL;
}
return (funcptr)((PyMethodDescrObject *)func)->d_method->ml_meth;
}
/* Now the actual vectorcall functions */
static PyObject *
method_vectorcall_VARARGS(
PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames)
{
PyThreadState *tstate = _PyThreadState_GET();
Py_ssize_t nargs = PyVectorcall_NARGS(nargsf);
if (method_check_args(func, args, nargs, kwnames)) {
return NULL;
}
PyObject *argstuple = PyTuple_FromArray(args+1, nargs-1);
if (argstuple == NULL) {
return NULL;
}
PyCFunction meth = (PyCFunction)method_enter_call(tstate, func);
if (meth == NULL) {
Py_DECREF(argstuple);
return NULL;
}
PyObject *result = _PyCFunction_TrampolineCall(
meth, args[0], argstuple);
Py_DECREF(argstuple);
_Py_LeaveRecursiveCallTstate(tstate);
return result;
}
static PyObject *
method_vectorcall_VARARGS_KEYWORDS(
PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames)
{
PyThreadState *tstate = _PyThreadState_GET();
Py_ssize_t nargs = PyVectorcall_NARGS(nargsf);
if (method_check_args(func, args, nargs, NULL)) {
return NULL;
}
PyObject *argstuple = PyTuple_FromArray(args+1, nargs-1);
if (argstuple == NULL) {
return NULL;
}
PyObject *result = NULL;
/* Create a temporary dict for keyword arguments */
PyObject *kwdict = NULL;
if (kwnames != NULL && PyTuple_GET_SIZE(kwnames) > 0) {
kwdict = _PyStack_AsDict(args + nargs, kwnames);
if (kwdict == NULL) {
goto exit;
}
}
PyCFunctionWithKeywords meth = (PyCFunctionWithKeywords)
method_enter_call(tstate, func);
if (meth == NULL) {
goto exit;
}
result = _PyCFunctionWithKeywords_TrampolineCall(
meth, args[0], argstuple, kwdict);
_Py_LeaveRecursiveCallTstate(tstate);
exit:
Py_DECREF(argstuple);
Py_XDECREF(kwdict);
return result;
}
static PyObject *
method_vectorcall_FASTCALL_KEYWORDS_METHOD(
PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames)
{
PyThreadState *tstate = _PyThreadState_GET();
Py_ssize_t nargs = PyVectorcall_NARGS(nargsf);
if (method_check_args(func, args, nargs, NULL)) {
return NULL;
}
PyCMethod meth = (PyCMethod) method_enter_call(tstate, func);
if (meth == NULL) {
return NULL;
}
PyObject *result = meth(args[0],
((PyMethodDescrObject *)func)->d_common.d_type,
args+1, nargs-1, kwnames);
_Py_LeaveRecursiveCall();
return result;
}
static PyObject *
method_vectorcall_FASTCALL(
PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames)
{
PyThreadState *tstate = _PyThreadState_GET();
Py_ssize_t nargs = PyVectorcall_NARGS(nargsf);
if (method_check_args(func, args, nargs, kwnames)) {
return NULL;
}
PyCFunctionFast meth = (PyCFunctionFast)
method_enter_call(tstate, func);
if (meth == NULL) {
return NULL;
}
PyObject *result = meth(args[0], args+1, nargs-1);
_Py_LeaveRecursiveCallTstate(tstate);
return result;
}
static PyObject *
method_vectorcall_FASTCALL_KEYWORDS(
PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames)
{
PyThreadState *tstate = _PyThreadState_GET();
Py_ssize_t nargs = PyVectorcall_NARGS(nargsf);
if (method_check_args(func, args, nargs, NULL)) {
return NULL;
}
PyCFunctionFastWithKeywords meth = (PyCFunctionFastWithKeywords)
method_enter_call(tstate, func);
if (meth == NULL) {
return NULL;
}
PyObject *result = meth(args[0], args+1, nargs-1, kwnames);
_Py_LeaveRecursiveCallTstate(tstate);
return result;
}
static PyObject *
method_vectorcall_NOARGS(
PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames)
{
PyThreadState *tstate = _PyThreadState_GET();
Py_ssize_t nargs = PyVectorcall_NARGS(nargsf);
if (method_check_args(func, args, nargs, kwnames)) {
return NULL;
}
if (nargs != 1) {
PyObject *funcstr = _PyObject_FunctionStr(func);
if (funcstr != NULL) {
PyErr_Format(PyExc_TypeError,
"%U takes no arguments (%zd given)", funcstr, nargs-1);
Py_DECREF(funcstr);
}
return NULL;
}
PyCFunction meth = (PyCFunction)method_enter_call(tstate, func);
if (meth == NULL) {
return NULL;
}
PyObject *result = _PyCFunction_TrampolineCall(meth, args[0], NULL);
_Py_LeaveRecursiveCallTstate(tstate);
return result;
}
static PyObject *
method_vectorcall_O(
PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames)
{
PyThreadState *tstate = _PyThreadState_GET();
Py_ssize_t nargs = PyVectorcall_NARGS(nargsf);
if (method_check_args(func, args, nargs, kwnames)) {
return NULL;
}
if (nargs != 2) {
PyObject *funcstr = _PyObject_FunctionStr(func);
if (funcstr != NULL) {
PyErr_Format(PyExc_TypeError,
"%U takes exactly one argument (%zd given)",
funcstr, nargs-1);
Py_DECREF(funcstr);
}
return NULL;
}
PyCFunction meth = (PyCFunction)method_enter_call(tstate, func);
if (meth == NULL) {
return NULL;
}
PyObject *result = _PyCFunction_TrampolineCall(meth, args[0], args[1]);
_Py_LeaveRecursiveCallTstate(tstate);
return result;
}
/* Instances of classmethod_descriptor are unlikely to be called directly.
For one, the analogous class "classmethod" (for Python classes) is not
callable. Second, users are not likely to access a classmethod_descriptor
directly, since it means pulling it from the class __dict__.
This is just an excuse to say that this doesn't need to be optimized:
we implement this simply by calling __get__ and then calling the result.
*/
static PyObject *
classmethoddescr_call(PyObject *_descr, PyObject *args,
PyObject *kwds)
{
PyMethodDescrObject *descr = (PyMethodDescrObject *)_descr;
Py_ssize_t argc = PyTuple_GET_SIZE(args);
if (argc < 1) {
PyErr_Format(PyExc_TypeError,
"descriptor '%V' of '%.100s' "
"object needs an argument",
descr_name((PyDescrObject *)descr), "?",
PyDescr_TYPE(descr)->tp_name);
return NULL;
}
PyObject *self = PyTuple_GET_ITEM(args, 0);
PyObject *bound = classmethod_get((PyObject *)descr, NULL, self);
if (bound == NULL) {
return NULL;
}
PyObject *res = PyObject_VectorcallDict(bound, _PyTuple_ITEMS(args)+1,
argc-1, kwds);
Py_DECREF(bound);
return res;
}
Py_LOCAL_INLINE(PyObject *)
wrapperdescr_raw_call(PyWrapperDescrObject *descr, PyObject *self,
PyObject *args, PyObject *kwds)
{
wrapperfunc wrapper = descr->d_base->wrapper;
if (descr->d_base->flags & PyWrapperFlag_KEYWORDS) {
wrapperfunc_kwds wk = _Py_FUNC_CAST(wrapperfunc_kwds, wrapper);
return (*wk)(self, args, descr->d_wrapped, kwds);
}
if (kwds != NULL && (!PyDict_Check(kwds) || PyDict_GET_SIZE(kwds) != 0)) {
PyErr_Format(PyExc_TypeError,
"wrapper %s() takes no keyword arguments",
descr->d_base->name);
return NULL;
}
return (*wrapper)(self, args, descr->d_wrapped);
}
static PyObject *
wrapperdescr_call(PyObject *_descr, PyObject *args, PyObject *kwds)
{
PyWrapperDescrObject *descr = (PyWrapperDescrObject *)_descr;
Py_ssize_t argc;
PyObject *self, *result;
/* Make sure that the first argument is acceptable as 'self' */
assert(PyTuple_Check(args));
argc = PyTuple_GET_SIZE(args);
if (argc < 1) {
PyErr_Format(PyExc_TypeError,
"descriptor '%V' of '%.100s' "
"object needs an argument",
descr_name((PyDescrObject *)descr), "?",
PyDescr_TYPE(descr)->tp_name);
return NULL;
}
self = PyTuple_GET_ITEM(args, 0);
if (!_PyObject_RealIsSubclass((PyObject *)Py_TYPE(self),
(PyObject *)PyDescr_TYPE(descr))) {
PyErr_Format(PyExc_TypeError,
"descriptor '%V' "
"requires a '%.100s' object "
"but received a '%.100s'",
descr_name((PyDescrObject *)descr), "?",
PyDescr_TYPE(descr)->tp_name,
Py_TYPE(self)->tp_name);
return NULL;
}
args = PyTuple_GetSlice(args, 1, argc);
if (args == NULL) {
return NULL;
}
result = wrapperdescr_raw_call(descr, self, args, kwds);
Py_DECREF(args);
return result;
}
static PyObject *
method_get_doc(PyObject *_descr, void *closure)
{
PyMethodDescrObject *descr = (PyMethodDescrObject *)_descr;
return _PyType_GetDocFromInternalDoc(descr->d_method->ml_name, descr->d_method->ml_doc);
}
static PyObject *
method_get_text_signature(PyObject *_descr, void *closure)
{
PyMethodDescrObject *descr = (PyMethodDescrObject *)_descr;
return _PyType_GetTextSignatureFromInternalDoc(descr->d_method->ml_name,
descr->d_method->ml_doc,
descr->d_method->ml_flags);
}
static PyObject *
calculate_qualname(PyDescrObject *descr)
{
PyObject *type_qualname, *res;
if (descr->d_name == NULL || !PyUnicode_Check(descr->d_name)) {
PyErr_SetString(PyExc_TypeError,
"<descriptor>.__name__ is not a unicode object");
return NULL;
}
type_qualname = PyObject_GetAttr(
(PyObject *)descr->d_type, &_Py_ID(__qualname__));
if (type_qualname == NULL)
return NULL;
if (!PyUnicode_Check(type_qualname)) {
PyErr_SetString(PyExc_TypeError, "<descriptor>.__objclass__."
"__qualname__ is not a unicode object");
Py_XDECREF(type_qualname);
return NULL;
}
res = PyUnicode_FromFormat("%S.%S", type_qualname, descr->d_name);
Py_DECREF(type_qualname);
return res;
}
static PyObject *
descr_get_qualname(PyObject *self, void *Py_UNUSED(ignored))
{
PyDescrObject *descr = (PyDescrObject *)self;
if (descr->d_qualname == NULL)
descr->d_qualname = calculate_qualname(descr);
return Py_XNewRef(descr->d_qualname);
}
static PyObject *
descr_reduce(PyObject *self, PyObject *Py_UNUSED(ignored))
{
PyDescrObject *descr = (PyDescrObject *)self;
return Py_BuildValue("N(OO)", _PyEval_GetBuiltin(&_Py_ID(getattr)),
PyDescr_TYPE(descr), PyDescr_NAME(descr));
}
static PyMethodDef descr_methods[] = {
{"__reduce__", descr_reduce, METH_NOARGS, NULL},
{NULL, NULL}
};
static PyMemberDef descr_members[] = {
{"__objclass__", _Py_T_OBJECT, offsetof(PyDescrObject, d_type), Py_READONLY},
{"__name__", _Py_T_OBJECT, offsetof(PyDescrObject, d_name), Py_READONLY},
{0}
};
static PyGetSetDef method_getset[] = {
{"__doc__", method_get_doc},
{"__qualname__", descr_get_qualname},
{"__text_signature__", method_get_text_signature},
{0}
};
static PyObject *
member_get_doc(PyObject *_descr, void *closure)
{
PyMemberDescrObject *descr = (PyMemberDescrObject *)_descr;
if (descr->d_member->doc == NULL) {
Py_RETURN_NONE;
}
return PyUnicode_FromString(descr->d_member->doc);
}
static PyGetSetDef member_getset[] = {
{"__doc__", member_get_doc},
{"__qualname__", descr_get_qualname},
{0}
};
static PyObject *
getset_get_doc(PyObject *self, void *closure)
{
PyGetSetDescrObject *descr = (PyGetSetDescrObject *)self;
if (descr->d_getset->doc == NULL) {
Py_RETURN_NONE;
}
return PyUnicode_FromString(descr->d_getset->doc);
}
static PyGetSetDef getset_getset[] = {
{"__doc__", getset_get_doc},
{"__qualname__", descr_get_qualname},
{0}
};
static PyObject *
wrapperdescr_get_doc(PyObject *self, void *closure)
{
PyWrapperDescrObject *descr = (PyWrapperDescrObject *)self;
return _PyType_GetDocFromInternalDoc(descr->d_base->name, descr->d_base->doc);
}
static PyObject *
wrapperdescr_get_text_signature(PyObject *self, void *closure)
{
PyWrapperDescrObject *descr = (PyWrapperDescrObject *)self;
return _PyType_GetTextSignatureFromInternalDoc(descr->d_base->name,
descr->d_base->doc, 0);
}
static PyGetSetDef wrapperdescr_getset[] = {
{"__doc__", wrapperdescr_get_doc},
{"__qualname__", descr_get_qualname},
{"__text_signature__", wrapperdescr_get_text_signature},
{0}
};
static int
descr_traverse(PyObject *self, visitproc visit, void *arg)
{
PyDescrObject *descr = (PyDescrObject *)self;
Py_VISIT(descr->d_type);
return 0;
}
PyTypeObject PyMethodDescr_Type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
"method_descriptor",
sizeof(PyMethodDescrObject),
0,
descr_dealloc, /* tp_dealloc */
offsetof(PyMethodDescrObject, vectorcall), /* tp_vectorcall_offset */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_as_async */
method_repr, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
PyVectorcall_Call, /* tp_call */
0, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
Py_TPFLAGS_HAVE_VECTORCALL |
Py_TPFLAGS_METHOD_DESCRIPTOR, /* tp_flags */
0, /* tp_doc */
descr_traverse, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
descr_methods, /* tp_methods */
descr_members, /* tp_members */
method_getset, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
method_get, /* tp_descr_get */
0, /* tp_descr_set */
};
/* This is for METH_CLASS in C, not for "f = classmethod(f)" in Python! */
PyTypeObject PyClassMethodDescr_Type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
"classmethod_descriptor",
sizeof(PyMethodDescrObject),
0,
descr_dealloc, /* tp_dealloc */
0, /* tp_vectorcall_offset */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_as_async */
method_repr, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
classmethoddescr_call, /* tp_call */
0, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
0, /* tp_doc */
descr_traverse, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
0, /* tp_methods */
descr_members, /* tp_members */
method_getset, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
classmethod_get, /* tp_descr_get */
0, /* tp_descr_set */
};
PyTypeObject PyMemberDescr_Type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
"member_descriptor",
sizeof(PyMemberDescrObject),
0,
descr_dealloc, /* tp_dealloc */
0, /* tp_vectorcall_offset */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_as_async */
member_repr, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
0, /* tp_doc */
descr_traverse, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
descr_methods, /* tp_methods */
descr_members, /* tp_members */
member_getset, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
member_get, /* tp_descr_get */
member_set, /* tp_descr_set */
};
PyTypeObject PyGetSetDescr_Type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
"getset_descriptor",
sizeof(PyGetSetDescrObject),
0,
descr_dealloc, /* tp_dealloc */
0, /* tp_vectorcall_offset */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_as_async */
getset_repr, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
0, /* tp_doc */
descr_traverse, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
0, /* tp_methods */
descr_members, /* tp_members */
getset_getset, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
getset_get, /* tp_descr_get */
getset_set, /* tp_descr_set */
};
PyTypeObject PyWrapperDescr_Type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
"wrapper_descriptor",
sizeof(PyWrapperDescrObject),
0,
descr_dealloc, /* tp_dealloc */
0, /* tp_vectorcall_offset */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_as_async */
wrapperdescr_repr, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
wrapperdescr_call, /* tp_call */
0, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
Py_TPFLAGS_METHOD_DESCRIPTOR, /* tp_flags */
0, /* tp_doc */
descr_traverse, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
descr_methods, /* tp_methods */
descr_members, /* tp_members */
wrapperdescr_getset, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
wrapperdescr_get, /* tp_descr_get */
0, /* tp_descr_set */
};
static PyDescrObject *
descr_new(PyTypeObject *descrtype, PyTypeObject *type, const char *name)
{
PyDescrObject *descr;
descr = (PyDescrObject *)PyType_GenericAlloc(descrtype, 0);
if (descr != NULL) {
_PyObject_SetDeferredRefcount((PyObject *)descr);
descr->d_type = (PyTypeObject*)Py_XNewRef(type);
descr->d_name = PyUnicode_InternFromString(name);
if (descr->d_name == NULL) {
Py_SETREF(descr, NULL);
}
else {
descr->d_qualname = NULL;
}
}
return descr;
}
PyObject *
PyDescr_NewMethod(PyTypeObject *type, PyMethodDef *method)
{
/* Figure out correct vectorcall function to use */
vectorcallfunc vectorcall;
switch (method->ml_flags & (METH_VARARGS | METH_FASTCALL | METH_NOARGS |
METH_O | METH_KEYWORDS | METH_METHOD))
{
case METH_VARARGS:
vectorcall = method_vectorcall_VARARGS;
break;
case METH_VARARGS | METH_KEYWORDS:
vectorcall = method_vectorcall_VARARGS_KEYWORDS;
break;
case METH_FASTCALL:
vectorcall = method_vectorcall_FASTCALL;
break;
case METH_FASTCALL | METH_KEYWORDS:
vectorcall = method_vectorcall_FASTCALL_KEYWORDS;
break;
case METH_NOARGS:
vectorcall = method_vectorcall_NOARGS;
break;
case METH_O:
vectorcall = method_vectorcall_O;
break;
case METH_METHOD | METH_FASTCALL | METH_KEYWORDS:
vectorcall = method_vectorcall_FASTCALL_KEYWORDS_METHOD;
break;
default:
PyErr_Format(PyExc_SystemError,
"%s() method: bad call flags", method->ml_name);
return NULL;
}
PyMethodDescrObject *descr;
descr = (PyMethodDescrObject *)descr_new(&PyMethodDescr_Type,
type, method->ml_name);
if (descr != NULL) {
descr->d_method = method;
descr->vectorcall = vectorcall;
}
return (PyObject *)descr;
}
PyObject *
PyDescr_NewClassMethod(PyTypeObject *type, PyMethodDef *method)
{
PyMethodDescrObject *descr;
descr = (PyMethodDescrObject *)descr_new(&PyClassMethodDescr_Type,
type, method->ml_name);
if (descr != NULL)
descr->d_method = method;
return (PyObject *)descr;
}
PyObject *
PyDescr_NewMember(PyTypeObject *type, PyMemberDef *member)
{
PyMemberDescrObject *descr;
if (member->flags & Py_RELATIVE_OFFSET) {
PyErr_SetString(
PyExc_SystemError,
"PyDescr_NewMember used with Py_RELATIVE_OFFSET");
return NULL;
}
descr = (PyMemberDescrObject *)descr_new(&PyMemberDescr_Type,
type, member->name);
if (descr != NULL)
descr->d_member = member;
return (PyObject *)descr;
}
PyObject *
PyDescr_NewGetSet(PyTypeObject *type, PyGetSetDef *getset)
{
PyGetSetDescrObject *descr;
descr = (PyGetSetDescrObject *)descr_new(&PyGetSetDescr_Type,
type, getset->name);
if (descr != NULL)
descr->d_getset = getset;
return (PyObject *)descr;
}
PyObject *
PyDescr_NewWrapper(PyTypeObject *type, struct wrapperbase *base, void *wrapped)
{
PyWrapperDescrObject *descr;
descr = (PyWrapperDescrObject *)descr_new(&PyWrapperDescr_Type,
type, base->name);
if (descr != NULL) {
descr->d_base = base;
descr->d_wrapped = wrapped;
}
return (PyObject *)descr;
}
int
PyDescr_IsData(PyObject *ob)
{
return Py_TYPE(ob)->tp_descr_set != NULL;
}
/* --- mappingproxy: read-only proxy for mappings --- */
/* This has no reason to be in this file except that adding new files is a
bit of a pain */
typedef struct {
PyObject_HEAD
PyObject *mapping;
} mappingproxyobject;
static Py_ssize_t
mappingproxy_len(PyObject *self)
{
mappingproxyobject *pp = (mappingproxyobject *)self;
return PyObject_Size(pp->mapping);
}
static PyObject *
mappingproxy_getitem(PyObject *self, PyObject *key)
{
mappingproxyobject *pp = (mappingproxyobject *)self;
return PyObject_GetItem(pp->mapping, key);
}
static PyMappingMethods mappingproxy_as_mapping = {
mappingproxy_len, /* mp_length */
mappingproxy_getitem, /* mp_subscript */
0, /* mp_ass_subscript */
};
static PyObject *
mappingproxy_or(PyObject *left, PyObject *right)
{
if (PyObject_TypeCheck(left, &PyDictProxy_Type)) {
left = ((mappingproxyobject*)left)->mapping;
}
if (PyObject_TypeCheck(right, &PyDictProxy_Type)) {
right = ((mappingproxyobject*)right)->mapping;
}
return PyNumber_Or(left, right);
}
static PyObject *
mappingproxy_ior(PyObject *self, PyObject *Py_UNUSED(other))
{
return PyErr_Format(PyExc_TypeError,
"'|=' is not supported by %s; use '|' instead", Py_TYPE(self)->tp_name);
}
static PyNumberMethods mappingproxy_as_number = {
.nb_or = mappingproxy_or,
.nb_inplace_or = mappingproxy_ior,
};
static int
mappingproxy_contains(PyObject *self, PyObject *key)
{
mappingproxyobject *pp = (mappingproxyobject *)self;
if (PyDict_CheckExact(pp->mapping))
return PyDict_Contains(pp->mapping, key);
else
return PySequence_Contains(pp->mapping, key);
}
static PySequenceMethods mappingproxy_as_sequence = {
0, /* sq_length */
0, /* sq_concat */
0, /* sq_repeat */
0, /* sq_item */
0, /* sq_slice */
0, /* sq_ass_item */
0, /* sq_ass_slice */
mappingproxy_contains, /* sq_contains */
0, /* sq_inplace_concat */
0, /* sq_inplace_repeat */
};
static PyObject *
mappingproxy_get(PyObject *self, PyObject *const *args, Py_ssize_t nargs)
{
mappingproxyobject *pp = (mappingproxyobject *)self;
/* newargs: mapping, key, default=None */
PyObject *newargs[3];
newargs[0] = pp->mapping;
newargs[2] = Py_None;
if (!_PyArg_UnpackStack(args, nargs, "get", 1, 2,
&newargs[1], &newargs[2]))
{
return NULL;
}
return PyObject_VectorcallMethod(&_Py_ID(get), newargs,
3 | PY_VECTORCALL_ARGUMENTS_OFFSET,
NULL);
}
static PyObject *
mappingproxy_keys(PyObject *self, PyObject *Py_UNUSED(ignored))
{
mappingproxyobject *pp = (mappingproxyobject *)self;
return PyObject_CallMethodNoArgs(pp->mapping, &_Py_ID(keys));
}
static PyObject *
mappingproxy_values(PyObject *self, PyObject *Py_UNUSED(ignored))
{
mappingproxyobject *pp = (mappingproxyobject *)self;
return PyObject_CallMethodNoArgs(pp->mapping, &_Py_ID(values));
}
static PyObject *
mappingproxy_items(PyObject *self, PyObject *Py_UNUSED(ignored))
{
mappingproxyobject *pp = (mappingproxyobject *)self;
return PyObject_CallMethodNoArgs(pp->mapping, &_Py_ID(items));
}
static PyObject *
mappingproxy_copy(PyObject *self, PyObject *Py_UNUSED(ignored))
{
mappingproxyobject *pp = (mappingproxyobject *)self;
return PyObject_CallMethodNoArgs(pp->mapping, &_Py_ID(copy));
}
static PyObject *
mappingproxy_reversed(PyObject *self, PyObject *Py_UNUSED(ignored))
{
mappingproxyobject *pp = (mappingproxyobject *)self;
return PyObject_CallMethodNoArgs(pp->mapping, &_Py_ID(__reversed__));
}
/* WARNING: mappingproxy methods must not give access
to the underlying mapping */
static PyMethodDef mappingproxy_methods[] = {
{"get", _PyCFunction_CAST(mappingproxy_get), METH_FASTCALL,
PyDoc_STR("get($self, key, default=None, /)\n--\n\n"
"Return the value for key if key is in the mapping, else default.")},
{"keys", mappingproxy_keys, METH_NOARGS,
PyDoc_STR("D.keys() -> a set-like object providing a view on D's keys")},
{"values", mappingproxy_values, METH_NOARGS,
PyDoc_STR("D.values() -> an object providing a view on D's values")},
{"items", mappingproxy_items, METH_NOARGS,
PyDoc_STR("D.items() -> a set-like object providing a view on D's items")},
{"copy", mappingproxy_copy, METH_NOARGS,
PyDoc_STR("D.copy() -> a shallow copy of D")},
{"__class_getitem__", Py_GenericAlias, METH_O|METH_CLASS,
PyDoc_STR("See PEP 585")},
{"__reversed__", mappingproxy_reversed, METH_NOARGS,
PyDoc_STR("D.__reversed__() -> reverse iterator")},
{0}
};
static void
mappingproxy_dealloc(PyObject *self)
{
mappingproxyobject *pp = (mappingproxyobject *)self;
_PyObject_GC_UNTRACK(pp);
Py_DECREF(pp->mapping);
PyObject_GC_Del(pp);
}
static PyObject *
mappingproxy_getiter(PyObject *self)
{
mappingproxyobject *pp = (mappingproxyobject *)self;
return PyObject_GetIter(pp->mapping);
}
static Py_hash_t
mappingproxy_hash(PyObject *self)
{
mappingproxyobject *pp = (mappingproxyobject *)self;
return PyObject_Hash(pp->mapping);
}
static PyObject *
mappingproxy_str(PyObject *self)
{
mappingproxyobject *pp = (mappingproxyobject *)self;
return PyObject_Str(pp->mapping);
}
static PyObject *
mappingproxy_repr(PyObject *self)
{
mappingproxyobject *pp = (mappingproxyobject *)self;
return PyUnicode_FromFormat("mappingproxy(%R)", pp->mapping);
}
static int
mappingproxy_traverse(PyObject *self, visitproc visit, void *arg)
{
mappingproxyobject *pp = (mappingproxyobject *)self;
Py_VISIT(pp->mapping);
return 0;
}
static PyObject *
mappingproxy_richcompare(PyObject *self, PyObject *w, int op)
{
mappingproxyobject *v = (mappingproxyobject *)self;
if (op == Py_EQ || op == Py_NE) {
return PyObject_RichCompare(v->mapping, w, op);
}
Py_RETURN_NOTIMPLEMENTED;
}
static int
mappingproxy_check_mapping(PyObject *mapping)
{
if (!PyMapping_Check(mapping)
|| PyList_Check(mapping)
|| PyTuple_Check(mapping)) {
PyErr_Format(PyExc_TypeError,
"mappingproxy() argument must be a mapping, not %s",
Py_TYPE(mapping)->tp_name);
return -1;
}
return 0;
}
/*[clinic input]
@classmethod
mappingproxy.__new__ as mappingproxy_new
mapping: object
Read-only proxy of a mapping.
[clinic start generated code]*/
static PyObject *
mappingproxy_new_impl(PyTypeObject *type, PyObject *mapping)
/*[clinic end generated code: output=65f27f02d5b68fa7 input=c156df096ef7590c]*/
{
mappingproxyobject *mappingproxy;
if (mappingproxy_check_mapping(mapping) == -1)
return NULL;
mappingproxy = PyObject_GC_New(mappingproxyobject, &PyDictProxy_Type);
if (mappingproxy == NULL)
return NULL;
mappingproxy->mapping = Py_NewRef(mapping);
_PyObject_GC_TRACK(mappingproxy);
return (PyObject *)mappingproxy;
}
PyObject *
PyDictProxy_New(PyObject *mapping)
{
mappingproxyobject *pp;
if (mappingproxy_check_mapping(mapping) == -1)
return NULL;
pp = PyObject_GC_New(mappingproxyobject, &PyDictProxy_Type);
if (pp != NULL) {
pp->mapping = Py_NewRef(mapping);
_PyObject_GC_TRACK(pp);
}
return (PyObject *)pp;
}
/* --- Wrapper object for "slot" methods --- */
/* This has no reason to be in this file except that adding new files is a
bit of a pain */
typedef struct {
PyObject_HEAD
PyWrapperDescrObject *descr;
PyObject *self;
} wrapperobject;
#define Wrapper_Check(v) Py_IS_TYPE(v, &_PyMethodWrapper_Type)
static void
wrapper_dealloc(PyObject *self)
{
wrapperobject *wp = (wrapperobject *)self;
PyObject_GC_UnTrack(wp);
Py_XDECREF(wp->descr);
Py_XDECREF(wp->self);
PyObject_GC_Del(wp);
}
static PyObject *
wrapper_richcompare(PyObject *a, PyObject *b, int op)
{
wrapperobject *wa, *wb;
int eq;
assert(a != NULL && b != NULL);
/* both arguments should be wrapperobjects */
if ((op != Py_EQ && op != Py_NE)
|| !Wrapper_Check(a) || !Wrapper_Check(b))
{
Py_RETURN_NOTIMPLEMENTED;
}
wa = (wrapperobject *)a;
wb = (wrapperobject *)b;
eq = (wa->descr == wb->descr && wa->self == wb->self);
if (eq == (op == Py_EQ)) {
Py_RETURN_TRUE;
}
else {
Py_RETURN_FALSE;
}
}
static Py_hash_t
wrapper_hash(PyObject *self)
{
wrapperobject *wp = (wrapperobject *)self;
Py_hash_t x, y;
x = PyObject_GenericHash(wp->self);
y = Py_HashPointer(wp->descr);
x = x ^ y;
if (x == -1)
x = -2;
return x;
}
static PyObject *
wrapper_repr(PyObject *self)
{
wrapperobject *wp = (wrapperobject *)self;
return PyUnicode_FromFormat("<method-wrapper '%s' of %s object at %p>",
wp->descr->d_base->name,
Py_TYPE(wp->self)->tp_name,
wp->self);
}
static PyObject *
wrapper_reduce(PyObject *self, PyObject *Py_UNUSED(ignored))
{
wrapperobject *wp = (wrapperobject *)self;
return Py_BuildValue("N(OO)", _PyEval_GetBuiltin(&_Py_ID(getattr)),
wp->self, PyDescr_NAME(wp->descr));
}
static PyMethodDef wrapper_methods[] = {
{"__reduce__", wrapper_reduce, METH_NOARGS, NULL},
{NULL, NULL}
};
static PyMemberDef wrapper_members[] = {
{"__self__", _Py_T_OBJECT, offsetof(wrapperobject, self), Py_READONLY},
{0}
};
static PyObject *
wrapper_objclass(PyObject *wp, void *Py_UNUSED(ignored))
{
PyObject *c = (PyObject *)PyDescr_TYPE(((wrapperobject *)wp)->descr);
return Py_NewRef(c);
}
static PyObject *
wrapper_name(PyObject *wp, void *Py_UNUSED(ignored))
{
const char *s = ((wrapperobject *)wp)->descr->d_base->name;
return PyUnicode_FromString(s);
}
static PyObject *
wrapper_doc(PyObject *self, void *Py_UNUSED(ignored))
{
wrapperobject *wp = (wrapperobject *)self;
return _PyType_GetDocFromInternalDoc(wp->descr->d_base->name, wp->descr->d_base->doc);
}
static PyObject *
wrapper_text_signature(PyObject *self, void *Py_UNUSED(ignored))
{
wrapperobject *wp = (wrapperobject *)self;
return _PyType_GetTextSignatureFromInternalDoc(wp->descr->d_base->name,
wp->descr->d_base->doc, 0);
}
static PyObject *
wrapper_qualname(PyObject *self, void *Py_UNUSED(ignored))
{
wrapperobject *wp = (wrapperobject *)self;
return descr_get_qualname((PyObject *)wp->descr, NULL);
}
static PyGetSetDef wrapper_getsets[] = {
{"__objclass__", wrapper_objclass},
{"__name__", wrapper_name},
{"__qualname__", wrapper_qualname},
{"__doc__", wrapper_doc},
{"__text_signature__", wrapper_text_signature},
{0}
};
static PyObject *
wrapper_call(PyObject *self, PyObject *args, PyObject *kwds)
{
wrapperobject *wp = (wrapperobject *)self;
return wrapperdescr_raw_call(wp->descr, wp->self, args, kwds);
}
static int
wrapper_traverse(PyObject *self, visitproc visit, void *arg)
{
wrapperobject *wp = (wrapperobject *)self;
Py_VISIT(wp->descr);
Py_VISIT(wp->self);
return 0;
}
PyTypeObject _PyMethodWrapper_Type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
"method-wrapper", /* tp_name */
sizeof(wrapperobject), /* tp_basicsize */
0, /* tp_itemsize */
/* methods */
wrapper_dealloc, /* tp_dealloc */
0, /* tp_vectorcall_offset */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_as_async */
wrapper_repr, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
wrapper_hash, /* tp_hash */
wrapper_call, /* tp_call */
0, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
0, /* tp_doc */
wrapper_traverse, /* tp_traverse */
0, /* tp_clear */
wrapper_richcompare, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
wrapper_methods, /* tp_methods */
wrapper_members, /* tp_members */
wrapper_getsets, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
};
PyObject *
PyWrapper_New(PyObject *d, PyObject *self)
{
wrapperobject *wp;
PyWrapperDescrObject *descr;
assert(PyObject_TypeCheck(d, &PyWrapperDescr_Type));
descr = (PyWrapperDescrObject *)d;
assert(_PyObject_RealIsSubclass((PyObject *)Py_TYPE(self),
(PyObject *)PyDescr_TYPE(descr)));
wp = PyObject_GC_New(wrapperobject, &_PyMethodWrapper_Type);
if (wp != NULL) {
wp->descr = (PyWrapperDescrObject*)Py_NewRef(descr);
wp->self = Py_NewRef(self);
_PyObject_GC_TRACK(wp);
}
return (PyObject *)wp;
}
/* A built-in 'property' type */
#define _propertyobject_CAST(op) ((propertyobject *)(op))
/*
class property(object):
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
if doc is None and fget is not None and hasattr(fget, "__doc__"):
doc = fget.__doc__
self.__get = fget
self.__set = fset
self.__del = fdel
try:
self.__doc__ = doc
except AttributeError: # read-only or dict-less class
pass
self.__name = None
def __set_name__(self, owner, name):
self.__name = name
@property
def __name__(self):
return self.__name if self.__name is not None else self.fget.__name__
@__name__.setter
def __name__(self, value):
self.__name = value
def __get__(self, inst, type=None):
if inst is None:
return self
if self.__get is None:
raise AttributeError("property has no getter")
return self.__get(inst)
def __set__(self, inst, value):
if self.__set is None:
raise AttributeError("property has no setter")
return self.__set(inst, value)
def __delete__(self, inst):
if self.__del is None:
raise AttributeError("property has no deleter")
return self.__del(inst)
*/
static PyObject * property_copy(PyObject *, PyObject *, PyObject *,
PyObject *);
static PyMemberDef property_members[] = {
{"fget", _Py_T_OBJECT, offsetof(propertyobject, prop_get), Py_READONLY},
{"fset", _Py_T_OBJECT, offsetof(propertyobject, prop_set), Py_READONLY},
{"fdel", _Py_T_OBJECT, offsetof(propertyobject, prop_del), Py_READONLY},
{"__doc__", _Py_T_OBJECT, offsetof(propertyobject, prop_doc), 0},
{0}
};
PyDoc_STRVAR(getter_doc,
"Descriptor to obtain a copy of the property with a different getter.");
static PyObject *
property_getter(PyObject *self, PyObject *getter)
{
return property_copy(self, getter, NULL, NULL);
}
PyDoc_STRVAR(setter_doc,
"Descriptor to obtain a copy of the property with a different setter.");
static PyObject *
property_setter(PyObject *self, PyObject *setter)
{
return property_copy(self, NULL, setter, NULL);
}
PyDoc_STRVAR(deleter_doc,
"Descriptor to obtain a copy of the property with a different deleter.");
static PyObject *
property_deleter(PyObject *self, PyObject *deleter)
{
return property_copy(self, NULL, NULL, deleter);
}
PyDoc_STRVAR(set_name_doc,
"__set_name__($self, owner, name, /)\n"
"--\n"
"\n"
"Method to set name of a property.");
static PyObject *
property_set_name(PyObject *self, PyObject *args) {
if (PyTuple_GET_SIZE(args) != 2) {
PyErr_Format(
PyExc_TypeError,
"__set_name__() takes 2 positional arguments but %d were given",
PyTuple_GET_SIZE(args));
return NULL;
}
propertyobject *prop = (propertyobject *)self;
PyObject *name = PyTuple_GET_ITEM(args, 1);
Py_XSETREF(prop->prop_name, Py_XNewRef(name));
Py_RETURN_NONE;
}
static PyMethodDef property_methods[] = {
{"getter", property_getter, METH_O, getter_doc},
{"setter", property_setter, METH_O, setter_doc},
{"deleter", property_deleter, METH_O, deleter_doc},
{"__set_name__", property_set_name, METH_VARARGS, set_name_doc},
{0}
};
static void
property_dealloc(PyObject *self)
{
propertyobject *gs = (propertyobject *)self;
_PyObject_GC_UNTRACK(self);
Py_XDECREF(gs->prop_get);
Py_XDECREF(gs->prop_set);
Py_XDECREF(gs->prop_del);
Py_XDECREF(gs->prop_doc);
Py_XDECREF(gs->prop_name);
Py_TYPE(self)->tp_free(self);
}
static int
property_name(propertyobject *prop, PyObject **name)
{
if (prop->prop_name != NULL) {
*name = Py_NewRef(prop->prop_name);
return 1;
}
if (prop->prop_get == NULL) {
*name = NULL;
return 0;
}
return PyObject_GetOptionalAttr(prop->prop_get, &_Py_ID(__name__), name);
}
static PyObject *
property_descr_get(PyObject *self, PyObject *obj, PyObject *type)
{
if (obj == NULL || obj == Py_None) {
return Py_NewRef(self);
}
propertyobject *gs = (propertyobject *)self;
if (gs->prop_get == NULL) {
PyObject *propname;
if (property_name(gs, &propname) < 0) {
return NULL;
}
PyObject *qualname = PyType_GetQualName(Py_TYPE(obj));
if (propname != NULL && qualname != NULL) {
PyErr_Format(PyExc_AttributeError,
"property %R of %R object has no getter",
propname,
qualname);
}
else if (qualname != NULL) {
PyErr_Format(PyExc_AttributeError,
"property of %R object has no getter",
qualname);
} else {
PyErr_SetString(PyExc_AttributeError,
"property has no getter");
}
Py_XDECREF(propname);
Py_XDECREF(qualname);
return NULL;
}
return PyObject_CallOneArg(gs->prop_get, obj);
}
static int
property_descr_set(PyObject *self, PyObject *obj, PyObject *value)
{
propertyobject *gs = (propertyobject *)self;
PyObject *func, *res;
if (value == NULL) {
func = gs->prop_del;
}
else {
func = gs->prop_set;
}
if (func == NULL) {
PyObject *propname;
if (property_name(gs, &propname) < 0) {
return -1;
}
PyObject *qualname = NULL;
if (obj != NULL) {
qualname = PyType_GetQualName(Py_TYPE(obj));
}
if (propname != NULL && qualname != NULL) {
PyErr_Format(PyExc_AttributeError,
value == NULL ?
"property %R of %R object has no deleter" :
"property %R of %R object has no setter",
propname,
qualname);
}
else if (qualname != NULL) {
PyErr_Format(PyExc_AttributeError,
value == NULL ?
"property of %R object has no deleter" :
"property of %R object has no setter",
qualname);
}
else {
PyErr_SetString(PyExc_AttributeError,
value == NULL ?
"property has no deleter" :
"property has no setter");
}
Py_XDECREF(propname);
Py_XDECREF(qualname);
return -1;
}
if (value == NULL) {
res = PyObject_CallOneArg(func, obj);
}
else {
EVAL_CALL_STAT_INC_IF_FUNCTION(EVAL_CALL_API, func);
PyObject *args[] = { obj, value };
res = PyObject_Vectorcall(func, args, 2, NULL);
}
if (res == NULL) {
return -1;
}
Py_DECREF(res);
return 0;
}
static PyObject *
property_copy(PyObject *old, PyObject *get, PyObject *set, PyObject *del)
{
propertyobject *pold = (propertyobject *)old;
PyObject *new, *type, *doc;
type = PyObject_Type(old);
if (type == NULL)
return NULL;
if (get == NULL || get == Py_None) {
get = pold->prop_get ? pold->prop_get : Py_None;
}
if (set == NULL || set == Py_None) {
set = pold->prop_set ? pold->prop_set : Py_None;
}
if (del == NULL || del == Py_None) {
del = pold->prop_del ? pold->prop_del : Py_None;
}
if (pold->getter_doc && get != Py_None) {
/* make _init use __doc__ from getter */
doc = Py_None;
}
else {
doc = pold->prop_doc ? pold->prop_doc : Py_None;
}
new = PyObject_CallFunctionObjArgs(type, get, set, del, doc, NULL);
Py_DECREF(type);
if (new == NULL)
return NULL;
if (PyObject_TypeCheck((new), &PyProperty_Type)) {
Py_XSETREF(((propertyobject *) new)->prop_name, Py_XNewRef(pold->prop_name));
}
return new;
}
/*[clinic input]
property.__init__ as property_init
fget: object(c_default="NULL") = None
function to be used for getting an attribute value
fset: object(c_default="NULL") = None
function to be used for setting an attribute value
fdel: object(c_default="NULL") = None
function to be used for del'ing an attribute
doc: object(c_default="NULL") = None
docstring
Property attribute.
Typical use is to define a managed attribute x:
class C(object):
def getx(self): return self._x
def setx(self, value): self._x = value
def delx(self): del self._x
x = property(getx, setx, delx, "I'm the 'x' property.")
Decorators make defining new properties or modifying existing ones easy:
class C(object):
@property
def x(self):
"I am the 'x' property."
return self._x
@x.setter
def x(self, value):
self._x = value
@x.deleter
def x(self):
del self._x
[clinic start generated code]*/
static int
property_init_impl(propertyobject *self, PyObject *fget, PyObject *fset,
PyObject *fdel, PyObject *doc)
/*[clinic end generated code: output=01a960742b692b57 input=dfb5dbbffc6932d5]*/
{
if (fget == Py_None)
fget = NULL;
if (fset == Py_None)
fset = NULL;
if (fdel == Py_None)
fdel = NULL;
Py_XSETREF(self->prop_get, Py_XNewRef(fget));
Py_XSETREF(self->prop_set, Py_XNewRef(fset));
Py_XSETREF(self->prop_del, Py_XNewRef(fdel));
Py_XSETREF(self->prop_doc, NULL);
Py_XSETREF(self->prop_name, NULL);
self->getter_doc = 0;
PyObject *prop_doc = NULL;
if (doc != NULL && doc != Py_None) {
prop_doc = Py_XNewRef(doc);
}
/* if no docstring given and the getter has one, use that one */
else if (fget != NULL) {
int rc = PyObject_GetOptionalAttr(fget, &_Py_ID(__doc__), &prop_doc);
if (rc < 0) {
return rc;
}
if (prop_doc == Py_None) {
prop_doc = NULL;
Py_DECREF(Py_None);
}
if (prop_doc != NULL){
self->getter_doc = 1;
}
}
/* At this point `prop_doc` is either NULL or
a non-None object with incremented ref counter */
if (Py_IS_TYPE(self, &PyProperty_Type)) {
Py_XSETREF(self->prop_doc, prop_doc);
} else {
/* If this is a property subclass, put __doc__ in the dict
or designated slot of the subclass instance instead, otherwise
it gets shadowed by __doc__ in the class's dict. */
if (prop_doc == NULL) {
prop_doc = Py_NewRef(Py_None);
}
int err = PyObject_SetAttr(
(PyObject *)self, &_Py_ID(__doc__), prop_doc);
Py_DECREF(prop_doc);
if (err < 0) {
assert(PyErr_Occurred());
if (!self->getter_doc &&
PyErr_ExceptionMatches(PyExc_AttributeError))
{
PyErr_Clear();
// https://github.com/python/cpython/issues/98963#issuecomment-1574413319
// Python silently dropped this doc assignment through 3.11.
// We preserve that behavior for backwards compatibility.
//
// If we ever want to deprecate this behavior, only raise a
// warning or error when proc_doc is not None so that
// property without a specific doc= still works.
return 0;
} else {
return -1;
}
}
}
return 0;
}
static PyObject *
property_get__name__(PyObject *op, void *Py_UNUSED(ignored))
{
propertyobject *prop = _propertyobject_CAST(op);
PyObject *name;
if (property_name(prop, &name) < 0) {
return NULL;
}
if (name == NULL) {
PyErr_SetString(PyExc_AttributeError,
"'property' object has no attribute '__name__'");
}
return name;
}
static int
property_set__name__(PyObject *op, PyObject *value, void *Py_UNUSED(ignored))
{
propertyobject *prop = _propertyobject_CAST(op);
Py_XSETREF(prop->prop_name, Py_XNewRef(value));
return 0;
}
static PyObject *
property_get___isabstractmethod__(PyObject *op, void *closure)
{
propertyobject *prop = _propertyobject_CAST(op);
int res = _PyObject_IsAbstract(prop->prop_get);
if (res == -1) {
return NULL;
}
else if (res) {
Py_RETURN_TRUE;
}
res = _PyObject_IsAbstract(prop->prop_set);
if (res == -1) {
return NULL;
}
else if (res) {
Py_RETURN_TRUE;
}
res = _PyObject_IsAbstract(prop->prop_del);
if (res == -1) {
return NULL;
}
else if (res) {
Py_RETURN_TRUE;
}
Py_RETURN_FALSE;
}
static PyGetSetDef property_getsetlist[] = {
{"__name__", property_get__name__, property_set__name__, NULL, NULL},
{"__isabstractmethod__", property_get___isabstractmethod__, NULL,
NULL,
NULL},
{NULL} /* Sentinel */
};
static int
property_traverse(PyObject *self, visitproc visit, void *arg)
{
propertyobject *pp = (propertyobject *)self;
Py_VISIT(pp->prop_get);
Py_VISIT(pp->prop_set);
Py_VISIT(pp->prop_del);
Py_VISIT(pp->prop_doc);
Py_VISIT(pp->prop_name);
return 0;
}
static int
property_clear(PyObject *self)
{
propertyobject *pp = (propertyobject *)self;
Py_CLEAR(pp->prop_doc);
return 0;
}
#include "clinic/descrobject.c.h"
PyTypeObject PyDictProxy_Type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
"mappingproxy", /* tp_name */
sizeof(mappingproxyobject), /* tp_basicsize */
0, /* tp_itemsize */
/* methods */
mappingproxy_dealloc, /* tp_dealloc */
0, /* tp_vectorcall_offset */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_as_async */
mappingproxy_repr, /* tp_repr */
&mappingproxy_as_number, /* tp_as_number */
&mappingproxy_as_sequence, /* tp_as_sequence */
&mappingproxy_as_mapping, /* tp_as_mapping */
mappingproxy_hash, /* tp_hash */
0, /* tp_call */
mappingproxy_str, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
Py_TPFLAGS_MAPPING, /* tp_flags */
mappingproxy_new__doc__, /* tp_doc */
mappingproxy_traverse, /* tp_traverse */
0, /* tp_clear */
mappingproxy_richcompare, /* tp_richcompare */
0, /* tp_weaklistoffset */
mappingproxy_getiter, /* tp_iter */
0, /* tp_iternext */
mappingproxy_methods, /* tp_methods */
0, /* tp_members */
0, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
mappingproxy_new, /* tp_new */
};
PyTypeObject PyProperty_Type = {
PyVarObject_HEAD_INIT(&PyType_Type, 0)
"property", /* tp_name */
sizeof(propertyobject), /* tp_basicsize */
0, /* tp_itemsize */
/* methods */
property_dealloc, /* tp_dealloc */
0, /* tp_vectorcall_offset */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_as_async */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
PyObject_GenericGetAttr, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
Py_TPFLAGS_BASETYPE, /* tp_flags */
property_init__doc__, /* tp_doc */
property_traverse, /* tp_traverse */
property_clear, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
property_methods, /* tp_methods */
property_members, /* tp_members */
property_getsetlist, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
property_descr_get, /* tp_descr_get */
property_descr_set, /* tp_descr_set */
0, /* tp_dictoffset */
property_init, /* tp_init */
PyType_GenericAlloc, /* tp_alloc */
PyType_GenericNew, /* tp_new */
PyObject_GC_Del, /* tp_free */
}; | c | github | https://github.com/python/cpython | Objects/descrobject.c |
# Outspline - A highly modular and extensible outliner.
# Copyright (C) 2011 Dario Giovannetti <dev@dariogiovannetti.net>
#
# This file is part of Outspline.
#
# Outspline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Outspline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outspline. If not, see <http://www.gnu.org/licenses/>.
def add(cursor):
pass
def remove(cursor):
# Remove the associated rows in the Rules table (bug #330)
pass
def upgrade_0_to_1(cursor):
# Placeholder/example
# These queries must stay here because they must not be updated with the
# normal queries
pass | unknown | codeparrot/codeparrot-clean | ||
# Temporary suites to assists the development of primary-driven index builds.
# Run all tests in replica set jscore passthrough suite with
# PrimaryDrivenIndexBuilds feature flag enabled.
#
# TODO(SERVER-108818): remove this temporary suite and all its usages.
base_suite: replica_sets_jscore_passthrough_base
overrides:
- "primary_driven_index_builds.enable_primary_driven_index_builds_feature_flag_mongod"
excludes:
- "primary_driven_index_builds.exclude_primary_driven_index_builds_incompatible_tag" | unknown | github | https://github.com/mongodb/mongo | buildscripts/resmokeconfig/matrix_suites/mappings/replica_sets_jscore_passthrough_primary_driven_index_builds.yml |
'''
Build your own deep network using TensorFlow library.
This example is using the MNIST database of handwritten digits
(http://yann.lecun.com/exdb/mnist/)
Code references:
https://github.com/shouvikmani/Tensorflow-Deep-Learning-Tutorial/blob/master/tutorial.ipynb
https://github.com/aymericdamien/TensorFlow-Examples/
The source code modified modified by S.W. Oh.
'''
from __future__ import print_function
import tensorflow as tf
import numpy as np
from matplotlib import pyplot as plt
# import Dense (fully-connected) layer and Convolution layer
from util.layer import Dense, Conv2D, BatchNorm
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./data/", one_hot=True)
# Parameters
learning_rate = 0.01
training_epochs = 5
batch_size = 10
display_step = 1
###### Build graph ######################################################
# Place holders
x = tf.placeholder(tf.float32, [None,28,28,1]) # mnist data image of shape [28,28,1]
y = tf.placeholder(tf.float32, [None,10]) # 0-9 digits recognition => 10 classes
is_train = tf.placeholder(tf.bool, shape=[]) # Train flag
######################################################################
# your code here !!
# Layer Usages:
# h = Conv2D(h, [3,3,1,8], [1,1,1,1], 'SAME', 'conv1')
# h = BatchNorm(h, is_train, decay=0.9, name='bn1')
# h = tf.nn.relu(h)
# h = tf.nn.max_pool(h, [1,2,2,1], [1,2,2,1], 'SAME')
# h = Dense(h, [8,10], 'fc1')
#######################################################################
pred = tf.nn.softmax(logit) # Softmax
# Directly compute loss from logit (to ensure stability and avoid overflow)
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logit, labels=y))
# Define optimizer and train_op
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost)
#########################################################################
###### Start Training ###################################################
# Open a Session
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
batch_xs = np.reshape(batch_xs, [batch_size,28,28,1])
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([train_op, cost], feed_dict={x: batch_xs, y: batch_ys, is_train: True})
# Compute average loss
avg_cost += c / total_batch
# Display logs per epoch step
if (epoch+1) % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
print("Optimization Finished!")
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("Accuracy:", accuracy.eval({x: np.reshape(mnist.test.images, [-1,28,28,1]), y: mnist.test.labels, is_train: False})) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for reading OAuth 2.0 client secret files.
A client_secrets.json file contains all the information needed to interact with
an OAuth 2.0 protected service.
"""
import json
import six
# Properties that make a client_secrets.json file valid.
TYPE_WEB = 'web'
TYPE_INSTALLED = 'installed'
VALID_CLIENT = {
TYPE_WEB: {
'required': [
'client_id',
'client_secret',
'redirect_uris',
'auth_uri',
'token_uri',
],
'string': [
'client_id',
'client_secret',
],
},
TYPE_INSTALLED: {
'required': [
'client_id',
'client_secret',
'redirect_uris',
'auth_uri',
'token_uri',
],
'string': [
'client_id',
'client_secret',
],
},
}
class Error(Exception):
"""Base error for this module."""
class InvalidClientSecretsError(Error):
"""Format of ClientSecrets file is invalid."""
def _validate_clientsecrets(clientsecrets_dict):
"""Validate parsed client secrets from a file.
Args:
clientsecrets_dict: dict, a dictionary holding the client secrets.
Returns:
tuple, a string of the client type and the information parsed
from the file.
"""
_INVALID_FILE_FORMAT_MSG = (
'Invalid file format. See '
'https://developers.google.com/api-client-library/'
'python/guide/aaa_client_secrets')
if clientsecrets_dict is None:
raise InvalidClientSecretsError(_INVALID_FILE_FORMAT_MSG)
try:
(client_type, client_info), = clientsecrets_dict.items()
except (ValueError, AttributeError):
raise InvalidClientSecretsError(
_INVALID_FILE_FORMAT_MSG + ' '
'Expected a JSON object with a single property for a "web" or '
'"installed" application')
if client_type not in VALID_CLIENT:
raise InvalidClientSecretsError(
'Unknown client type: {0}.'.format(client_type))
for prop_name in VALID_CLIENT[client_type]['required']:
if prop_name not in client_info:
raise InvalidClientSecretsError(
'Missing property "{0}" in a client type of "{1}".'.format(
prop_name, client_type))
for prop_name in VALID_CLIENT[client_type]['string']:
if client_info[prop_name].startswith('[['):
raise InvalidClientSecretsError(
'Property "{0}" is not configured.'.format(prop_name))
return client_type, client_info
def load(fp):
obj = json.load(fp)
return _validate_clientsecrets(obj)
def loads(s):
obj = json.loads(s)
return _validate_clientsecrets(obj)
def _loadfile(filename):
try:
with open(filename, 'r') as fp:
obj = json.load(fp)
except IOError as exc:
raise InvalidClientSecretsError('Error opening file', exc.filename,
exc.strerror, exc.errno)
return _validate_clientsecrets(obj)
def loadfile(filename, cache=None):
"""Loading of client_secrets JSON file, optionally backed by a cache.
Typical cache storage would be App Engine memcache service,
but you can pass in any other cache client that implements
these methods:
* ``get(key, namespace=ns)``
* ``set(key, value, namespace=ns)``
Usage::
# without caching
client_type, client_info = loadfile('secrets.json')
# using App Engine memcache service
from google.appengine.api import memcache
client_type, client_info = loadfile('secrets.json', cache=memcache)
Args:
filename: string, Path to a client_secrets.json file on a filesystem.
cache: An optional cache service client that implements get() and set()
methods. If not specified, the file is always being loaded from
a filesystem.
Raises:
InvalidClientSecretsError: In case of a validation error or some
I/O failure. Can happen only on cache miss.
Returns:
(client_type, client_info) tuple, as _loadfile() normally would.
JSON contents is validated only during first load. Cache hits are not
validated.
"""
_SECRET_NAMESPACE = 'oauth2client:secrets#ns'
if not cache:
return _loadfile(filename)
obj = cache.get(filename, namespace=_SECRET_NAMESPACE)
if obj is None:
client_type, client_info = _loadfile(filename)
obj = {client_type: client_info}
cache.set(filename, obj, namespace=_SECRET_NAMESPACE)
return next(six.iteritems(obj)) | unknown | codeparrot/codeparrot-clean | ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package stackruntime
import (
"context"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/terraform/internal/addrs"
"github.com/hashicorp/terraform/internal/providers"
"github.com/hashicorp/terraform/internal/stacks/stackaddrs"
"github.com/hashicorp/terraform/internal/stacks/stackconfig"
"github.com/hashicorp/terraform/internal/stacks/stackruntime/internal/stackeval"
"github.com/hashicorp/terraform/internal/stacks/stackstate"
"github.com/hashicorp/terraform/internal/tfdiags"
"github.com/zclconf/go-cty/cty"
)
// EvalExpr evaluates the given expression in a specified evaluation
// environment and scope.
//
// This is intended for situations like the "terraform console" command which
// need to evaluate arbitrary expressions against a configuration and
// previously-established state snapshot.
func EvalExpr(ctx context.Context, expr hcl.Expression, req *EvalExprRequest) (cty.Value, tfdiags.Diagnostics) {
main := stackeval.NewForInspecting(req.Config, req.State, stackeval.InspectOpts{
InputVariableValues: req.InputValues,
ProviderFactories: req.ProviderFactories,
})
main.AllowLanguageExperiments(req.ExperimentsAllowed)
return main.EvalExpr(ctx, expr, req.EvalStackInstance, stackeval.InspectPhase)
}
// EvalExprRequest represents the inputs to an [EvalExpr] call.
type EvalExprRequest struct {
// Config and State together provide the global environment in which
// the expression will be evaluated.
Config *stackconfig.Config
State *stackstate.State
// EvalStackInstance is the address of the stack instance where the
// expression is to be evaluated. If unspecified, the default is
// to evaluate in the root stack instance.
EvalStackInstance stackaddrs.StackInstance
// InputValues and ProviderFactories are both optional extras to
// provide a more complete evaluation environment, although neither
// needs to be provided if the expression to be evaluated doesn't
// (directly or indirectly) make use of input variables or provider
// configurations corresponding to these.
InputValues map[stackaddrs.InputVariable]ExternalInputValue
ProviderFactories map[addrs.Provider]providers.Factory
ExperimentsAllowed bool
} | go | github | https://github.com/hashicorp/terraform | internal/stacks/stackruntime/eval_expr.go |
//
// DispatchQueue+Alamofire.swift
//
// Copyright (c) 2014-2018 Alamofire Software Foundation (http://alamofire.org/)
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
import Dispatch
import Foundation
extension DispatchQueue {
/// Execute the provided closure after a `TimeInterval`.
///
/// - Parameters:
/// - delay: `TimeInterval` to delay execution.
/// - closure: Closure to execute.
func after(_ delay: TimeInterval, execute closure: @escaping @Sendable () -> Void) {
asyncAfter(deadline: .now() + delay, execute: closure)
}
} | swift | github | https://github.com/Alamofire/Alamofire | Source/Extensions/DispatchQueue+Alamofire.swift |
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal
import json
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
START_P2P_PORT=11000
START_RPC_PORT=11100
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = []
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
bitcoind and bitcoin-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run bitcoinds:
for i in range(4):
datadir = os.path.join("cache", "node"+str(i))
os.makedirs(datadir)
with open(os.path.join(datadir, "bitcoin.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(START_P2P_PORT+i)+"\n");
f.write("rpcport="+str(START_RPC_PORT+i)+"\n");
args = [ "bitcoind", "-keypool=1", "-datadir="+datadir ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(START_P2P_PORT))
bitcoind_processes.append(subprocess.Popen(args))
subprocess.check_call([ "bitcoin-cli", "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(START_RPC_PORT+i,)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
for i in range(4):
rpcs[i].setgenerate(True, 25)
sync_blocks(rpcs)
for i in range(4):
rpcs[i].setgenerate(True, 25)
sync_blocks(rpcs)
# Shut them down, and remove debug.logs:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(debug_log("cache", i))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_nodes(num_nodes, dir, extra_args=None, rpchost=None):
# Start bitcoinds, and wait for RPC interface to be up and running:
devnull = open("/dev/null", "w+")
for i in range(num_nodes):
datadir = os.path.join(dir, "node"+str(i))
args = [ "bitcoind", "-datadir="+datadir ]
if extra_args is not None:
args += extra_args[i]
bitcoind_processes.append(subprocess.Popen(args))
subprocess.check_call([ "bitcoin-cli", "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
# Create&return JSON-RPC connections
rpc_connections = []
for i in range(num_nodes):
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', START_RPC_PORT+i,)
rpc_connections.append(AuthServiceProxy(url))
return rpc_connections
def debug_log(dir, n_node):
return os.path.join(dir, "node"+str(n_node), "regtest", "debug.log")
def stop_nodes(nodes):
for i in range(len(nodes)):
nodes[i].stop()
del nodes[:] # Emptying array closes connections as a side effect
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes:
bitcoind.wait()
del bitcoind_processes[:]
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(START_P2P_PORT+node_num)
from_connection.addnode(ip_port, "onetry")
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2))) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) 2008 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.collect.testing;
import com.google.common.annotations.GwtCompatible;
import java.util.Map;
import org.jspecify.annotations.NullMarked;
import org.jspecify.annotations.Nullable;
/**
* Creates maps, containing sample elements, to be tested.
*
* @author George van den Driessche
*/
@GwtCompatible
@NullMarked
public interface TestMapGenerator<K extends @Nullable Object, V extends @Nullable Object>
extends TestContainerGenerator<Map<K, V>, Map.Entry<K, V>> {
K[] createKeyArray(int length);
V[] createValueArray(int length);
} | java | github | https://github.com/google/guava | android/guava-testlib/src/com/google/common/collect/testing/TestMapGenerator.java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.