hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a128c1e2a68b5dfe0ab2f2626be144908e50751
| 4,524
|
py
|
Python
|
nuremberg/settings/generic.py
|
emmalemma/nuremberg
|
10a5f789f5668aa4e7902e1765737c2c764ff2b2
|
[
"MIT"
] | null | null | null |
nuremberg/settings/generic.py
|
emmalemma/nuremberg
|
10a5f789f5668aa4e7902e1765737c2c764ff2b2
|
[
"MIT"
] | null | null | null |
nuremberg/settings/generic.py
|
emmalemma/nuremberg
|
10a5f789f5668aa4e7902e1765737c2c764ff2b2
|
[
"MIT"
] | null | null | null |
"""
Django settings for nuremberg project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'nuremberg',
'nuremberg.core',
'nuremberg.content',
'nuremberg.documents',
'nuremberg.transcripts',
'nuremberg.photographs',
'nuremberg.search',
'compressor',
'haystack',
'httpproxy',
'static_precompiler',
]
MIDDLEWARE_CLASSES = [
'nuremberg.core.middlewares.crawler.BlockCrawlerMiddleware',
'django.middleware.cache.UpdateCacheMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.gzip.GZipMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
ROOT_URLCONF = 'nuremberg.core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'nuremberg.core.middlewares.context_processors.show_mockups',
'nuremberg.core.middlewares.context_processors.settings_variables',
],
},
},
]
WSGI_APPLICATION = 'nuremberg.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
# Configured in environment files
DATABASES = {
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# compressor settings
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
]
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.rCSSMinFilter',
]
# Compress supports precompilers, but static_precompiler has better watch features for dev
#
# COMPRESS_PRECOMPILERS = (
# ('text/less', 'lessc {infile} {outfile}'),
# )
COMPRESS_STORAGE = 'compressor.storage.GzipCompressorFileStorage'
# whitenoise settings
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedStaticFilesStorage'
| 27.585366
| 91
| 0.720601
|
4a128c8a771cd7638cb92fb7692d58d8df985846
| 195,853
|
py
|
Python
|
BaseTools/Source/Python/Workspace/DscBuildData.py
|
cgjertsen/edk2
|
1b461403ee723dab01d5828714cca0b9396a6b3c
|
[
"Python-2.0",
"Zlib",
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-3-Clause"
] | 2
|
2021-04-11T10:53:37.000Z
|
2021-05-20T03:42:31.000Z
|
BaseTools/Source/Python/Workspace/DscBuildData.py
|
cgjertsen/edk2
|
1b461403ee723dab01d5828714cca0b9396a6b3c
|
[
"Python-2.0",
"Zlib",
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-3-Clause"
] | 1
|
2021-06-04T20:24:43.000Z
|
2021-06-04T20:24:43.000Z
|
BaseTools/Source/Python/Workspace/DscBuildData.py
|
cgjertsen/edk2
|
1b461403ee723dab01d5828714cca0b9396a6b3c
|
[
"Python-2.0",
"Zlib",
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-3-Clause"
] | 3
|
2020-10-26T00:20:14.000Z
|
2021-06-04T09:41:16.000Z
|
## @file
# This file is used to create a database used by build tool
#
# Copyright (c) 2008 - 2020, Intel Corporation. All rights reserved.<BR>
# (C) Copyright 2016 Hewlett Packard Enterprise Development LP<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
## Platform build information from DSC file
#
# This class is used to retrieve information stored in database and convert them
# into PlatformBuildClassObject form for easier use for AutoGen.
#
from __future__ import print_function
from __future__ import absolute_import
from Common.StringUtils import *
from Common.DataType import *
from Common.Misc import *
from types import *
from Common.Expression import *
from CommonDataClass.CommonClass import SkuInfoClass
from Common.TargetTxtClassObject import TargetTxtDict
from Common.ToolDefClassObject import ToolDefDict
from .MetaDataTable import *
from .MetaFileTable import *
from .MetaFileParser import *
from .WorkspaceCommon import GetDeclaredPcd
from Common.Misc import AnalyzeDscPcd
from Common.Misc import ProcessDuplicatedInf,RemoveCComments,ArrayIndex
import re
from Common.Parsing import IsValidWord
from Common.VariableAttributes import VariableAttributes
import Common.GlobalData as GlobalData
import subprocess
from functools import reduce
from Common.Misc import SaveFileOnChange
from Workspace.BuildClassObject import PlatformBuildClassObject, StructurePcd, PcdClassObject, ModuleBuildClassObject
from collections import OrderedDict, defaultdict
def _IsFieldValueAnArray (Value):
Value = Value.strip()
if Value.startswith(TAB_GUID) and Value.endswith(')'):
return True
if Value.startswith('L"') and Value.endswith('"') and len(list(Value[2:-1])) > 1:
return True
if Value[0] == '"' and Value[-1] == '"' and len(list(Value[1:-1])) > 1:
return True
if Value[0] == '{' and Value[-1] == '}':
return True
if Value.startswith("L'") and Value.endswith("'") and len(list(Value[2:-1])) > 1:
return True
if Value[0] == "'" and Value[-1] == "'" and len(list(Value[1:-1])) > 1:
return True
return False
PcdValueInitName = 'PcdValueInit'
PcdValueCommonName = 'PcdValueCommon'
PcdMainCHeader = '''
/**
DO NOT EDIT
FILE auto-generated
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <PcdValueCommon.h>
'''
PcdMainCEntry = '''
int
main (
int argc,
char *argv[]
)
{
return PcdValueMain (argc, argv);
}
'''
PcdMakefileHeader = '''
#
# DO NOT EDIT
# This file is auto-generated by build utility
#
'''
WindowsCFLAGS = 'CFLAGS = $(CFLAGS) /wd4200 /wd4034 /wd4101 '
LinuxCFLAGS = 'BUILD_CFLAGS += -Wno-pointer-to-int-cast -Wno-unused-variable '
PcdMakefileEnd = '''
!INCLUDE $(BASE_TOOLS_PATH)\Source\C\Makefiles\ms.common
!INCLUDE $(BASE_TOOLS_PATH)\Source\C\Makefiles\ms.app
'''
AppTarget = '''
all: $(APPFILE)
$(APPFILE): $(OBJECTS)
%s
'''
PcdGccMakefile = '''
MAKEROOT ?= $(EDK_TOOLS_PATH)/Source/C
LIBS = -lCommon
'''
variablePattern = re.compile(r'[\t\s]*0[xX][a-fA-F0-9]+$')
SkuIdPattern = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
## regular expressions for finding decimal and hex numbers
Pattern = re.compile('^[1-9]\d*|0$')
HexPattern = re.compile(r'0[xX][0-9a-fA-F]+$')
## Regular expression for finding header file inclusions
from AutoGen.GenMake import gIncludePattern
## Find dependencies for one source file
#
# By searching recursively "#include" directive in file, find out all the
# files needed by given source file. The dependecies will be only searched
# in given search path list.
#
# @param SearchPathList The list of search path
#
# @retval list The list of files the given source file depends on
#
def GetDependencyList(FileStack, SearchPathList):
DepDb = dict()
DependencySet = set(FileStack)
while len(FileStack) > 0:
F = FileStack.pop()
FullPathDependList = []
CurrentFileDependencyList = []
if F in DepDb:
CurrentFileDependencyList = DepDb[F]
else:
try:
Fd = open(F, 'r')
FileContent = Fd.read()
except BaseException as X:
EdkLogger.error("build", FILE_OPEN_FAILURE, ExtraData=F + "\n\t" + str(X))
finally:
if "Fd" in dir(locals()):
Fd.close()
if len(FileContent) == 0:
continue
try:
if FileContent[0] == 0xff or FileContent[0] == 0xfe:
FileContent = FileContent.decode('utf-16')
else:
FileContent = FileContent.decode()
except:
# The file is not txt file. for example .mcb file
continue
IncludedFileList = gIncludePattern.findall(FileContent)
for Inc in IncludedFileList:
Inc = Inc.strip()
Inc = os.path.normpath(Inc)
CurrentFileDependencyList.append(Inc)
DepDb[F] = CurrentFileDependencyList
CurrentFilePath = os.path.dirname(F)
PathList = [CurrentFilePath] + SearchPathList
for Inc in CurrentFileDependencyList:
for SearchPath in PathList:
FilePath = os.path.join(SearchPath, Inc)
if not os.path.exists(FilePath):
continue
if FilePath not in DependencySet:
FileStack.append(FilePath)
FullPathDependList.append(FilePath)
break
DependencySet.update(FullPathDependList)
DependencyList = list(DependencySet) # remove duplicate ones
return DependencyList
class DscBuildData(PlatformBuildClassObject):
# dict used to convert PCD type in database to string used by build tool
_PCD_TYPE_STRING_ = {
MODEL_PCD_FIXED_AT_BUILD : TAB_PCDS_FIXED_AT_BUILD,
MODEL_PCD_PATCHABLE_IN_MODULE : TAB_PCDS_PATCHABLE_IN_MODULE,
MODEL_PCD_FEATURE_FLAG : TAB_PCDS_FEATURE_FLAG,
MODEL_PCD_DYNAMIC : TAB_PCDS_DYNAMIC,
MODEL_PCD_DYNAMIC_DEFAULT : TAB_PCDS_DYNAMIC,
MODEL_PCD_DYNAMIC_HII : TAB_PCDS_DYNAMIC_HII,
MODEL_PCD_DYNAMIC_VPD : TAB_PCDS_DYNAMIC_VPD,
MODEL_PCD_DYNAMIC_EX : TAB_PCDS_DYNAMIC_EX,
MODEL_PCD_DYNAMIC_EX_DEFAULT : TAB_PCDS_DYNAMIC_EX,
MODEL_PCD_DYNAMIC_EX_HII : TAB_PCDS_DYNAMIC_EX_HII,
MODEL_PCD_DYNAMIC_EX_VPD : TAB_PCDS_DYNAMIC_EX_VPD,
}
# dict used to convert part of [Defines] to members of DscBuildData directly
_PROPERTY_ = {
#
# Required Fields
#
TAB_DSC_DEFINES_PLATFORM_NAME : "_PlatformName",
TAB_DSC_DEFINES_PLATFORM_GUID : "_Guid",
TAB_DSC_DEFINES_PLATFORM_VERSION : "_Version",
TAB_DSC_DEFINES_DSC_SPECIFICATION : "_DscSpecification",
# TAB_DSC_DEFINES_OUTPUT_DIRECTORY : "_OutputDirectory",
# TAB_DSC_DEFINES_SUPPORTED_ARCHITECTURES : "_SupArchList",
# TAB_DSC_DEFINES_BUILD_TARGETS : "_BuildTargets",
TAB_DSC_DEFINES_SKUID_IDENTIFIER : "_SkuName",
# TAB_DSC_DEFINES_FLASH_DEFINITION : "_FlashDefinition",
TAB_DSC_DEFINES_BUILD_NUMBER : "_BuildNumber",
TAB_DSC_DEFINES_MAKEFILE_NAME : "_MakefileName",
TAB_DSC_DEFINES_BS_BASE_ADDRESS : "_BsBaseAddress",
TAB_DSC_DEFINES_RT_BASE_ADDRESS : "_RtBaseAddress",
# TAB_DSC_DEFINES_RFC_LANGUAGES : "_RFCLanguages",
# TAB_DSC_DEFINES_ISO_LANGUAGES : "_ISOLanguages",
}
# used to compose dummy library class name for those forced library instances
_NullLibraryNumber = 0
## Constructor of DscBuildData
#
# Initialize object of DscBuildData
#
# @param FilePath The path of platform description file
# @param RawData The raw data of DSC file
# @param BuildDataBase Database used to retrieve module/package information
# @param Arch The target architecture
# @param Platform (not used for DscBuildData)
# @param Macros Macros used for replacement in DSC file
#
def __init__(self, FilePath, RawData, BuildDataBase, Arch=TAB_ARCH_COMMON, Target=None, Toolchain=None):
self.MetaFile = FilePath
self._RawData = RawData
self._Bdb = BuildDataBase
self._Arch = Arch
self._Target = Target
self._Toolchain = Toolchain
self._ToolChainFamily = None
self._Clear()
self.WorkspaceDir = os.getenv("WORKSPACE") if os.getenv("WORKSPACE") else ""
self.DefaultStores = None
self.SkuIdMgr = SkuClass(self.SkuName, self.SkuIds)
@property
def OutputPath(self):
if os.getenv("WORKSPACE"):
return os.path.join(os.getenv("WORKSPACE"), self.OutputDirectory, self._Target + "_" + self._Toolchain, PcdValueInitName)
else:
return os.path.dirname(self.DscFile)
## XXX[key] = value
def __setitem__(self, key, value):
self.__dict__[self._PROPERTY_[key]] = value
## value = XXX[key]
def __getitem__(self, key):
return self.__dict__[self._PROPERTY_[key]]
## "in" test support
def __contains__(self, key):
return key in self._PROPERTY_
## Set all internal used members of DscBuildData to None
def _Clear(self):
self._Header = None
self._PlatformName = None
self._Guid = None
self._Version = None
self._DscSpecification = None
self._OutputDirectory = None
self._SupArchList = None
self._BuildTargets = None
self._SkuName = None
self._PcdInfoFlag = None
self._VarCheckFlag = None
self._FlashDefinition = None
self._Prebuild = None
self._Postbuild = None
self._BuildNumber = None
self._MakefileName = None
self._BsBaseAddress = None
self._RtBaseAddress = None
self._SkuIds = None
self._Modules = None
self._LibraryInstances = None
self._LibraryClasses = None
self._Pcds = None
self._DecPcds = None
self._BuildOptions = None
self._ModuleTypeOptions = None
self._LoadFixAddress = None
self._RFCLanguages = None
self._ISOLanguages = None
self._VpdToolGuid = None
self._MacroDict = None
self.DefaultStores = None
## Get current effective macros
@property
def _Macros(self):
if self._MacroDict is None:
self._MacroDict = {}
self._MacroDict.update(GlobalData.gPlatformDefines)
self._MacroDict.update(GlobalData.gGlobalDefines)
self._MacroDict.update(GlobalData.gCommandLineDefines)
return self._MacroDict
## Get architecture
@property
def Arch(self):
return self._Arch
@property
def Dir(self):
return self.MetaFile.Dir
## Retrieve all information in [Defines] section
#
# (Retrieving all [Defines] information in one-shot is just to save time.)
#
def _GetHeaderInfo(self):
RecordList = self._RawData[MODEL_META_DATA_HEADER, self._Arch]
for Record in RecordList:
Name = Record[1]
# items defined _PROPERTY_ don't need additional processing
# some special items in [Defines] section need special treatment
if Name == TAB_DSC_DEFINES_OUTPUT_DIRECTORY:
self._OutputDirectory = NormPath(Record[2], self._Macros)
if ' ' in self._OutputDirectory:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in OUTPUT_DIRECTORY",
File=self.MetaFile, Line=Record[-1],
ExtraData=self._OutputDirectory)
elif Name == TAB_DSC_DEFINES_FLASH_DEFINITION:
self._FlashDefinition = PathClass(NormPath(Record[2], self._Macros), GlobalData.gWorkspace)
ErrorCode, ErrorInfo = self._FlashDefinition.Validate('.fdf')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, File=self.MetaFile, Line=Record[-1],
ExtraData=ErrorInfo)
elif Name == TAB_DSC_PREBUILD:
PrebuildValue = Record[2]
if Record[2][0] == '"':
if Record[2][-1] != '"':
EdkLogger.error('build', FORMAT_INVALID, 'Missing double quotes in the end of %s statement.' % TAB_DSC_PREBUILD,
File=self.MetaFile, Line=Record[-1])
PrebuildValue = Record[2][1:-1]
self._Prebuild = PrebuildValue
elif Name == TAB_DSC_POSTBUILD:
PostbuildValue = Record[2]
if Record[2][0] == '"':
if Record[2][-1] != '"':
EdkLogger.error('build', FORMAT_INVALID, 'Missing double quotes in the end of %s statement.' % TAB_DSC_POSTBUILD,
File=self.MetaFile, Line=Record[-1])
PostbuildValue = Record[2][1:-1]
self._Postbuild = PostbuildValue
elif Name == TAB_DSC_DEFINES_SUPPORTED_ARCHITECTURES:
self._SupArchList = GetSplitValueList(Record[2], TAB_VALUE_SPLIT)
elif Name == TAB_DSC_DEFINES_BUILD_TARGETS:
self._BuildTargets = GetSplitValueList(Record[2])
elif Name == TAB_DSC_DEFINES_SKUID_IDENTIFIER:
if self._SkuName is None:
self._SkuName = Record[2]
if GlobalData.gSKUID_CMD:
self._SkuName = GlobalData.gSKUID_CMD
elif Name == TAB_DSC_DEFINES_PCD_INFO_GENERATION:
self._PcdInfoFlag = Record[2]
elif Name == TAB_DSC_DEFINES_PCD_VAR_CHECK_GENERATION:
self._VarCheckFlag = Record[2]
elif Name == TAB_FIX_LOAD_TOP_MEMORY_ADDRESS:
try:
self._LoadFixAddress = int (Record[2], 0)
except:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS %s is not valid dec or hex string" % (Record[2]))
elif Name == TAB_DSC_DEFINES_RFC_LANGUAGES:
if not Record[2] or Record[2][0] != '"' or Record[2][-1] != '"' or len(Record[2]) == 1:
EdkLogger.error('build', FORMAT_NOT_SUPPORTED, 'language code for RFC_LANGUAGES must have double quotes around it, for example: RFC_LANGUAGES = "en-us;zh-hans"',
File=self.MetaFile, Line=Record[-1])
LanguageCodes = Record[2][1:-1]
if not LanguageCodes:
EdkLogger.error('build', FORMAT_NOT_SUPPORTED, 'one or more RFC4646 format language code must be provided for RFC_LANGUAGES statement',
File=self.MetaFile, Line=Record[-1])
LanguageList = GetSplitValueList(LanguageCodes, TAB_SEMI_COLON_SPLIT)
# check whether there is empty entries in the list
if None in LanguageList:
EdkLogger.error('build', FORMAT_NOT_SUPPORTED, 'one or more empty language code is in RFC_LANGUAGES statement',
File=self.MetaFile, Line=Record[-1])
self._RFCLanguages = LanguageList
elif Name == TAB_DSC_DEFINES_ISO_LANGUAGES:
if not Record[2] or Record[2][0] != '"' or Record[2][-1] != '"' or len(Record[2]) == 1:
EdkLogger.error('build', FORMAT_NOT_SUPPORTED, 'language code for ISO_LANGUAGES must have double quotes around it, for example: ISO_LANGUAGES = "engchn"',
File=self.MetaFile, Line=Record[-1])
LanguageCodes = Record[2][1:-1]
if not LanguageCodes:
EdkLogger.error('build', FORMAT_NOT_SUPPORTED, 'one or more ISO639-2 format language code must be provided for ISO_LANGUAGES statement',
File=self.MetaFile, Line=Record[-1])
if len(LanguageCodes) % 3:
EdkLogger.error('build', FORMAT_NOT_SUPPORTED, 'bad ISO639-2 format for ISO_LANGUAGES',
File=self.MetaFile, Line=Record[-1])
LanguageList = []
for i in range(0, len(LanguageCodes), 3):
LanguageList.append(LanguageCodes[i:i + 3])
self._ISOLanguages = LanguageList
elif Name == TAB_DSC_DEFINES_VPD_TOOL_GUID:
#
# try to convert GUID to a real UUID value to see whether the GUID is format
# for VPD_TOOL_GUID is correct.
#
try:
uuid.UUID(Record[2])
except:
EdkLogger.error("build", FORMAT_INVALID, "Invalid GUID format for VPD_TOOL_GUID", File=self.MetaFile)
self._VpdToolGuid = Record[2]
elif Name in self:
self[Name] = Record[2]
# set _Header to non-None in order to avoid database re-querying
self._Header = 'DUMMY'
## Retrieve platform name
@property
def PlatformName(self):
if self._PlatformName is None:
if self._Header is None:
self._GetHeaderInfo()
if self._PlatformName is None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No PLATFORM_NAME", File=self.MetaFile)
return self._PlatformName
@property
def Platform(self):
return self.PlatformName
## Retrieve file guid
@property
def Guid(self):
if self._Guid is None:
if self._Header is None:
self._GetHeaderInfo()
if self._Guid is None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No PLATFORM_GUID", File=self.MetaFile)
return self._Guid
## Retrieve platform version
@property
def Version(self):
if self._Version is None:
if self._Header is None:
self._GetHeaderInfo()
if self._Version is None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No PLATFORM_VERSION", File=self.MetaFile)
return self._Version
## Retrieve platform description file version
@property
def DscSpecification(self):
if self._DscSpecification is None:
if self._Header is None:
self._GetHeaderInfo()
if self._DscSpecification is None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No DSC_SPECIFICATION", File=self.MetaFile)
return self._DscSpecification
## Retrieve OUTPUT_DIRECTORY
@property
def OutputDirectory(self):
if self._OutputDirectory is None:
if self._Header is None:
self._GetHeaderInfo()
if self._OutputDirectory is None:
self._OutputDirectory = os.path.join("Build", self._PlatformName)
return self._OutputDirectory
## Retrieve SUPPORTED_ARCHITECTURES
@property
def SupArchList(self):
if self._SupArchList is None:
if self._Header is None:
self._GetHeaderInfo()
if self._SupArchList is None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No SUPPORTED_ARCHITECTURES", File=self.MetaFile)
return self._SupArchList
## Retrieve BUILD_TARGETS
@property
def BuildTargets(self):
if self._BuildTargets is None:
if self._Header is None:
self._GetHeaderInfo()
if self._BuildTargets is None:
EdkLogger.error('build', ATTRIBUTE_NOT_AVAILABLE, "No BUILD_TARGETS", File=self.MetaFile)
return self._BuildTargets
@property
def PcdInfoFlag(self):
if self._PcdInfoFlag is None or self._PcdInfoFlag.upper() == 'FALSE':
return False
elif self._PcdInfoFlag.upper() == 'TRUE':
return True
else:
return False
@property
def VarCheckFlag(self):
if self._VarCheckFlag is None or self._VarCheckFlag.upper() == 'FALSE':
return False
elif self._VarCheckFlag.upper() == 'TRUE':
return True
else:
return False
# # Retrieve SKUID_IDENTIFIER
@property
def SkuName(self):
if self._SkuName is None:
if self._Header is None:
self._GetHeaderInfo()
if self._SkuName is None:
self._SkuName = TAB_DEFAULT
return self._SkuName
## Override SKUID_IDENTIFIER
@SkuName.setter
def SkuName(self, Value):
self._SkuName = Value
@property
def FlashDefinition(self):
if self._FlashDefinition is None:
if self._Header is None:
self._GetHeaderInfo()
if self._FlashDefinition is None:
self._FlashDefinition = ''
return self._FlashDefinition
@property
def Prebuild(self):
if self._Prebuild is None:
if self._Header is None:
self._GetHeaderInfo()
if self._Prebuild is None:
self._Prebuild = ''
return self._Prebuild
@property
def Postbuild(self):
if self._Postbuild is None:
if self._Header is None:
self._GetHeaderInfo()
if self._Postbuild is None:
self._Postbuild = ''
return self._Postbuild
## Retrieve FLASH_DEFINITION
@property
def BuildNumber(self):
if self._BuildNumber is None:
if self._Header is None:
self._GetHeaderInfo()
if self._BuildNumber is None:
self._BuildNumber = ''
return self._BuildNumber
## Retrieve MAKEFILE_NAME
@property
def MakefileName(self):
if self._MakefileName is None:
if self._Header is None:
self._GetHeaderInfo()
if self._MakefileName is None:
self._MakefileName = ''
return self._MakefileName
## Retrieve BsBaseAddress
@property
def BsBaseAddress(self):
if self._BsBaseAddress is None:
if self._Header is None:
self._GetHeaderInfo()
if self._BsBaseAddress is None:
self._BsBaseAddress = ''
return self._BsBaseAddress
## Retrieve RtBaseAddress
@property
def RtBaseAddress(self):
if self._RtBaseAddress is None:
if self._Header is None:
self._GetHeaderInfo()
if self._RtBaseAddress is None:
self._RtBaseAddress = ''
return self._RtBaseAddress
## Retrieve the top address for the load fix address
@property
def LoadFixAddress(self):
if self._LoadFixAddress is None:
if self._Header is None:
self._GetHeaderInfo()
if self._LoadFixAddress is None:
self._LoadFixAddress = self._Macros.get(TAB_FIX_LOAD_TOP_MEMORY_ADDRESS, '0')
try:
self._LoadFixAddress = int (self._LoadFixAddress, 0)
except:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS %s is not valid dec or hex string" % (self._LoadFixAddress))
#
# If command line defined, should override the value in DSC file.
#
if 'FIX_LOAD_TOP_MEMORY_ADDRESS' in GlobalData.gCommandLineDefines:
try:
self._LoadFixAddress = int(GlobalData.gCommandLineDefines['FIX_LOAD_TOP_MEMORY_ADDRESS'], 0)
except:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS %s is not valid dec or hex string" % (GlobalData.gCommandLineDefines['FIX_LOAD_TOP_MEMORY_ADDRESS']))
if self._LoadFixAddress < 0:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS is set to the invalid negative value 0x%x" % (self._LoadFixAddress))
if self._LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self._LoadFixAddress % 0x1000 != 0:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS is set to the invalid unaligned 4K value 0x%x" % (self._LoadFixAddress))
return self._LoadFixAddress
## Retrieve RFCLanguage filter
@property
def RFCLanguages(self):
if self._RFCLanguages is None:
if self._Header is None:
self._GetHeaderInfo()
if self._RFCLanguages is None:
self._RFCLanguages = []
return self._RFCLanguages
## Retrieve ISOLanguage filter
@property
def ISOLanguages(self):
if self._ISOLanguages is None:
if self._Header is None:
self._GetHeaderInfo()
if self._ISOLanguages is None:
self._ISOLanguages = []
return self._ISOLanguages
## Retrieve the GUID string for VPD tool
@property
def VpdToolGuid(self):
if self._VpdToolGuid is None:
if self._Header is None:
self._GetHeaderInfo()
if self._VpdToolGuid is None:
self._VpdToolGuid = ''
return self._VpdToolGuid
## Retrieve [SkuIds] section information
@property
def SkuIds(self):
if self._SkuIds is None:
self._SkuIds = OrderedDict()
RecordList = self._RawData[MODEL_EFI_SKU_ID, self._Arch]
for Record in RecordList:
if not Record[0]:
EdkLogger.error('build', FORMAT_INVALID, 'No Sku ID number',
File=self.MetaFile, Line=Record[-1])
if not Record[1]:
EdkLogger.error('build', FORMAT_INVALID, 'No Sku ID name',
File=self.MetaFile, Line=Record[-1])
if not Pattern.match(Record[0]) and not HexPattern.match(Record[0]):
EdkLogger.error('build', FORMAT_INVALID, "The format of the Sku ID number is invalid. It only support Integer and HexNumber",
File=self.MetaFile, Line=Record[-1])
if not SkuIdPattern.match(Record[1]) or (Record[2] and not SkuIdPattern.match(Record[2])):
EdkLogger.error('build', FORMAT_INVALID, "The format of the Sku ID name is invalid. The correct format is '(a-zA-Z_)(a-zA-Z0-9_)*'",
File=self.MetaFile, Line=Record[-1])
self._SkuIds[Record[1].upper()] = (str(DscBuildData.ToInt(Record[0])), Record[1].upper(), Record[2].upper())
if TAB_DEFAULT not in self._SkuIds:
self._SkuIds[TAB_DEFAULT] = ("0", TAB_DEFAULT, TAB_DEFAULT)
if TAB_COMMON not in self._SkuIds:
self._SkuIds[TAB_COMMON] = ("0", TAB_DEFAULT, TAB_DEFAULT)
return self._SkuIds
@staticmethod
def ToInt(intstr):
return int(intstr, 16) if intstr.upper().startswith("0X") else int(intstr)
def _GetDefaultStores(self):
if self.DefaultStores is None:
self.DefaultStores = OrderedDict()
RecordList = self._RawData[MODEL_EFI_DEFAULT_STORES, self._Arch]
for Record in RecordList:
if not Record[0]:
EdkLogger.error('build', FORMAT_INVALID, 'No DefaultStores ID number',
File=self.MetaFile, Line=Record[-1])
if not Record[1]:
EdkLogger.error('build', FORMAT_INVALID, 'No DefaultStores ID name',
File=self.MetaFile, Line=Record[-1])
if not Pattern.match(Record[0]) and not HexPattern.match(Record[0]):
EdkLogger.error('build', FORMAT_INVALID, "The format of the DefaultStores ID number is invalid. It only support Integer and HexNumber",
File=self.MetaFile, Line=Record[-1])
if not IsValidWord(Record[1]):
EdkLogger.error('build', FORMAT_INVALID, "The format of the DefaultStores ID name is invalid. The correct format is '(a-zA-Z0-9_)(a-zA-Z0-9_-.)*'",
File=self.MetaFile, Line=Record[-1])
self.DefaultStores[Record[1].upper()] = (DscBuildData.ToInt(Record[0]), Record[1].upper())
if TAB_DEFAULT_STORES_DEFAULT not in self.DefaultStores:
self.DefaultStores[TAB_DEFAULT_STORES_DEFAULT] = (0, TAB_DEFAULT_STORES_DEFAULT)
GlobalData.gDefaultStores = sorted(self.DefaultStores.keys())
return self.DefaultStores
def OverrideDuplicateModule(self):
RecordList = self._RawData[MODEL_META_DATA_COMPONENT, self._Arch]
Macros = self._Macros
Components = {}
for Record in RecordList:
ModuleId = Record[6]
file_guid = self._RawData[MODEL_META_DATA_HEADER, self._Arch, None, ModuleId]
file_guid_str = file_guid[0][2] if file_guid else "NULL"
ModuleFile = PathClass(NormPath(Record[0], Macros), GlobalData.gWorkspace, Arch=self._Arch)
if self._Arch != TAB_ARCH_COMMON and (file_guid_str,str(ModuleFile)) in Components:
self._RawData.DisableOverrideComponent(Components[(file_guid_str,str(ModuleFile))])
Components[(file_guid_str,str(ModuleFile))] = ModuleId
self._RawData._PostProcessed = False
## Retrieve packages this Platform depends on
@cached_property
def Packages(self):
RetVal = set()
RecordList = self._RawData[MODEL_META_DATA_PACKAGE, self._Arch]
Macros = self._Macros
for Record in RecordList:
File = PathClass(NormPath(Record[0], Macros), GlobalData.gWorkspace, Arch=self._Arch)
# check the file validation
ErrorCode, ErrorInfo = File.Validate('.dec')
if ErrorCode != 0:
LineNo = Record[-1]
EdkLogger.error('build', ErrorCode, ExtraData=ErrorInfo, File=self.MetaFile, Line=LineNo)
# parse this package now. we need it to get protocol/ppi/guid value
RetVal.add(self._Bdb[File, self._Arch, self._Target, self._Toolchain])
return RetVal
## Retrieve [Components] section information
@property
def Modules(self):
if self._Modules is not None:
return self._Modules
self.OverrideDuplicateModule()
self._Modules = OrderedDict()
RecordList = self._RawData[MODEL_META_DATA_COMPONENT, self._Arch]
Macros = self._Macros
for Record in RecordList:
ModuleFile = PathClass(NormPath(Record[0], Macros), GlobalData.gWorkspace, Arch=self._Arch)
ModuleId = Record[6]
LineNo = Record[7]
# check the file validation
ErrorCode, ErrorInfo = ModuleFile.Validate('.inf')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, File=self.MetaFile, Line=LineNo,
ExtraData=ErrorInfo)
Module = ModuleBuildClassObject()
Module.MetaFile = ModuleFile
# get module private library instance
RecordList = self._RawData[MODEL_EFI_LIBRARY_CLASS, self._Arch, None, ModuleId]
for Record in RecordList:
LibraryClass = Record[0]
LibraryPath = PathClass(NormPath(Record[1], Macros), GlobalData.gWorkspace, Arch=self._Arch)
LineNo = Record[-1]
# check the file validation
ErrorCode, ErrorInfo = LibraryPath.Validate('.inf')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, File=self.MetaFile, Line=LineNo,
ExtraData=ErrorInfo)
if LibraryClass == '' or LibraryClass == 'NULL':
self._NullLibraryNumber += 1
LibraryClass = 'NULL%d' % self._NullLibraryNumber
EdkLogger.verbose("Found forced library for %s\n\t%s [%s]" % (ModuleFile, LibraryPath, LibraryClass))
Module.LibraryClasses[LibraryClass] = LibraryPath
if LibraryPath not in self.LibraryInstances:
self.LibraryInstances.append(LibraryPath)
# get module private PCD setting
for Type in [MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE, \
MODEL_PCD_FEATURE_FLAG, MODEL_PCD_DYNAMIC, MODEL_PCD_DYNAMIC_EX]:
RecordList = self._RawData[Type, self._Arch, None, ModuleId]
for TokenSpaceGuid, PcdCName, Setting, Dummy1, Dummy2, Dummy3, Dummy4, Dummy5 in RecordList:
TokenList = GetSplitValueList(Setting)
DefaultValue = TokenList[0]
# the format is PcdName| Value | VOID* | MaxDatumSize
if len(TokenList) > 2:
MaxDatumSize = TokenList[2]
else:
MaxDatumSize = ''
TypeString = self._PCD_TYPE_STRING_[Type]
Pcd = PcdClassObject(
PcdCName,
TokenSpaceGuid,
TypeString,
'',
DefaultValue,
'',
MaxDatumSize,
{},
False,
None
)
Module.Pcds[PcdCName, TokenSpaceGuid] = Pcd
# get module private build options
RecordList = self._RawData[MODEL_META_DATA_BUILD_OPTION, self._Arch, None, ModuleId]
for ToolChainFamily, ToolChain, Option, Dummy1, Dummy2, Dummy3, Dummy4, Dummy5 in RecordList:
if (ToolChainFamily, ToolChain) not in Module.BuildOptions:
Module.BuildOptions[ToolChainFamily, ToolChain] = Option
else:
OptionString = Module.BuildOptions[ToolChainFamily, ToolChain]
Module.BuildOptions[ToolChainFamily, ToolChain] = OptionString + " " + Option
RecordList = self._RawData[MODEL_META_DATA_HEADER, self._Arch, None, ModuleId]
if RecordList:
if len(RecordList) != 1:
EdkLogger.error('build', OPTION_UNKNOWN, 'Only FILE_GUID can be listed in <Defines> section.',
File=self.MetaFile, ExtraData=str(ModuleFile), Line=LineNo)
ModuleFile = ProcessDuplicatedInf(ModuleFile, RecordList[0][2], GlobalData.gWorkspace)
ModuleFile.Arch = self._Arch
self._Modules[ModuleFile] = Module
return self._Modules
## Retrieve all possible library instances used in this platform
@property
def LibraryInstances(self):
if self._LibraryInstances is None:
self.LibraryClasses
return self._LibraryInstances
## Retrieve [LibraryClasses] information
@property
def LibraryClasses(self):
if self._LibraryClasses is None:
self._LibraryInstances = []
#
# tdict is a special dict kind of type, used for selecting correct
# library instance for given library class and module type
#
LibraryClassDict = tdict(True, 3)
# track all library class names
LibraryClassSet = set()
RecordList = self._RawData[MODEL_EFI_LIBRARY_CLASS, self._Arch, None, -1]
Macros = self._Macros
for Record in RecordList:
LibraryClass, LibraryInstance, Dummy, Arch, ModuleType, Dummy, Dummy, LineNo = Record
if LibraryClass == '' or LibraryClass == 'NULL':
self._NullLibraryNumber += 1
LibraryClass = 'NULL%d' % self._NullLibraryNumber
EdkLogger.verbose("Found forced library for arch=%s\n\t%s [%s]" % (Arch, LibraryInstance, LibraryClass))
LibraryClassSet.add(LibraryClass)
LibraryInstance = PathClass(NormPath(LibraryInstance, Macros), GlobalData.gWorkspace, Arch=self._Arch)
# check the file validation
ErrorCode, ErrorInfo = LibraryInstance.Validate('.inf')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, File=self.MetaFile, Line=LineNo,
ExtraData=ErrorInfo)
if ModuleType != TAB_COMMON and ModuleType not in SUP_MODULE_LIST:
EdkLogger.error('build', OPTION_UNKNOWN, "Unknown module type [%s]" % ModuleType,
File=self.MetaFile, ExtraData=LibraryInstance, Line=LineNo)
LibraryClassDict[Arch, ModuleType, LibraryClass] = LibraryInstance
if LibraryInstance not in self._LibraryInstances:
self._LibraryInstances.append(LibraryInstance)
# resolve the specific library instance for each class and each module type
self._LibraryClasses = tdict(True)
for LibraryClass in LibraryClassSet:
# try all possible module types
for ModuleType in SUP_MODULE_LIST:
LibraryInstance = LibraryClassDict[self._Arch, ModuleType, LibraryClass]
if LibraryInstance is None:
continue
self._LibraryClasses[LibraryClass, ModuleType] = LibraryInstance
RecordList = self._RawData[MODEL_EFI_LIBRARY_INSTANCE, self._Arch]
for Record in RecordList:
File = PathClass(NormPath(Record[0], Macros), GlobalData.gWorkspace, Arch=self._Arch)
LineNo = Record[-1]
# check the file validation
ErrorCode, ErrorInfo = File.Validate('.inf')
if ErrorCode != 0:
EdkLogger.error('build', ErrorCode, File=self.MetaFile, Line=LineNo,
ExtraData=ErrorInfo)
if File not in self._LibraryInstances:
self._LibraryInstances.append(File)
#
# we need the module name as the library class name, so we have
# to parse it here. (self._Bdb[] will trigger a file parse if it
# hasn't been parsed)
#
Library = self._Bdb[File, self._Arch, self._Target, self._Toolchain]
self._LibraryClasses[Library.BaseName, ':dummy:'] = Library
return self._LibraryClasses
def _ValidatePcd(self, PcdCName, TokenSpaceGuid, Setting, PcdType, LineNo):
if not self._DecPcds:
FdfInfList = []
if GlobalData.gFdfParser:
FdfInfList = GlobalData.gFdfParser.Profile.InfList
PkgSet = set()
for Inf in FdfInfList:
ModuleFile = PathClass(NormPath(Inf), GlobalData.gWorkspace, Arch=self._Arch)
if ModuleFile in self._Modules:
continue
ModuleData = self._Bdb[ModuleFile, self._Arch, self._Target, self._Toolchain]
PkgSet.update(ModuleData.Packages)
if self.Packages:
PkgSet.update(self.Packages)
self._DecPcds, self._GuidDict = GetDeclaredPcd(self, self._Bdb, self._Arch, self._Target, self._Toolchain, PkgSet)
self._GuidDict.update(GlobalData.gPlatformPcds)
if (PcdCName, TokenSpaceGuid) not in self._DecPcds:
EdkLogger.error('build', PARSER_ERROR,
"Pcd (%s.%s) defined in DSC is not declared in DEC files referenced in INF files in FDF. Arch: ['%s']" % (TokenSpaceGuid, PcdCName, self._Arch),
File=self.MetaFile, Line=LineNo)
ValueList, IsValid, Index = AnalyzeDscPcd(Setting, PcdType, self._DecPcds[PcdCName, TokenSpaceGuid].DatumType)
if not IsValid:
if PcdType not in [MODEL_PCD_FEATURE_FLAG, MODEL_PCD_FIXED_AT_BUILD]:
EdkLogger.error('build', FORMAT_INVALID, "Pcd format incorrect.", File=self.MetaFile, Line=LineNo,
ExtraData="%s.%s|%s" % (TokenSpaceGuid, PcdCName, Setting))
else:
if ValueList[2] == '-1':
EdkLogger.error('build', FORMAT_INVALID, "Pcd format incorrect.", File=self.MetaFile, Line=LineNo,
ExtraData="%s.%s|%s" % (TokenSpaceGuid, PcdCName, Setting))
if ValueList[Index]:
DatumType = self._DecPcds[PcdCName, TokenSpaceGuid].DatumType
if "{CODE(" not in ValueList[Index]:
try:
ValueList[Index] = ValueExpressionEx(ValueList[Index], DatumType, self._GuidDict)(True)
except BadExpression as Value:
EdkLogger.error('Parser', FORMAT_INVALID, Value, File=self.MetaFile, Line=LineNo,
ExtraData="PCD [%s.%s] Value \"%s\" " % (
TokenSpaceGuid, PcdCName, ValueList[Index]))
except EvaluationException as Excpt:
if hasattr(Excpt, 'Pcd'):
if Excpt.Pcd in GlobalData.gPlatformOtherPcds:
EdkLogger.error('Parser', FORMAT_INVALID, "Cannot use this PCD (%s) in an expression as"
" it must be defined in a [PcdsFixedAtBuild] or [PcdsFeatureFlag] section"
" of the DSC file" % Excpt.Pcd,
File=self.MetaFile, Line=LineNo)
else:
EdkLogger.error('Parser', FORMAT_INVALID, "PCD (%s) is not defined in DSC file" % Excpt.Pcd,
File=self.MetaFile, Line=LineNo)
else:
EdkLogger.error('Parser', FORMAT_INVALID, "Invalid expression: %s" % str(Excpt),
File=self.MetaFile, Line=LineNo)
if ValueList[Index]:
Valid, ErrStr = CheckPcdDatum(self._DecPcds[PcdCName, TokenSpaceGuid].DatumType, ValueList[Index])
if not Valid:
EdkLogger.error('build', FORMAT_INVALID, ErrStr, File=self.MetaFile, Line=LineNo,
ExtraData="%s.%s" % (TokenSpaceGuid, PcdCName))
if PcdType in (MODEL_PCD_DYNAMIC_DEFAULT, MODEL_PCD_DYNAMIC_EX_DEFAULT, MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE):
if self._DecPcds[PcdCName, TokenSpaceGuid].DatumType.strip() != ValueList[1].strip():
DecPcd = self._DecPcds[PcdCName, TokenSpaceGuid]
EdkLogger.error('build', FORMAT_INVALID,
"Pcd datumtype used in DSC file is not the same as its declaration. DatumType:%s"%DecPcd.DatumType,
File=self.MetaFile, Line=LineNo,
ExtraData="Dsc:%s.%s|%s\n Dec:%s.%s|%s|%s|%s" % (TokenSpaceGuid, PcdCName, Setting, TokenSpaceGuid, \
PcdCName, DecPcd.DefaultValue, DecPcd.DatumType, DecPcd.TokenValue))
if (TokenSpaceGuid + '.' + PcdCName) in GlobalData.gPlatformPcds:
if GlobalData.gPlatformPcds[TokenSpaceGuid + '.' + PcdCName] != ValueList[Index]:
GlobalData.gPlatformPcds[TokenSpaceGuid + '.' + PcdCName] = ValueList[Index]
return ValueList
def _FilterPcdBySkuUsage(self, Pcds):
available_sku = self.SkuIdMgr.AvailableSkuIdSet
sku_usage = self.SkuIdMgr.SkuUsageType
if sku_usage == SkuClass.SINGLE:
for pcdname in Pcds:
pcd = Pcds[pcdname]
Pcds[pcdname].SkuInfoList = {TAB_DEFAULT:pcd.SkuInfoList[skuid] for skuid in pcd.SkuInfoList if skuid in available_sku}
if isinstance(pcd, StructurePcd) and pcd.SkuOverrideValues:
Pcds[pcdname].SkuOverrideValues = {TAB_DEFAULT:pcd.SkuOverrideValues[skuid] for skuid in pcd.SkuOverrideValues if skuid in available_sku}
else:
for pcdname in Pcds:
pcd = Pcds[pcdname]
Pcds[pcdname].SkuInfoList = {skuid:pcd.SkuInfoList[skuid] for skuid in pcd.SkuInfoList if skuid in available_sku}
if isinstance(pcd, StructurePcd) and pcd.SkuOverrideValues:
Pcds[pcdname].SkuOverrideValues = {skuid:pcd.SkuOverrideValues[skuid] for skuid in pcd.SkuOverrideValues if skuid in available_sku}
return Pcds
def CompleteHiiPcdsDefaultStores(self, Pcds):
HiiPcd = [Pcds[pcd] for pcd in Pcds if Pcds[pcd].Type in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII], self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII]]]
DefaultStoreMgr = DefaultStore(self.DefaultStores)
for pcd in HiiPcd:
for skuid in pcd.SkuInfoList:
skuobj = pcd.SkuInfoList.get(skuid)
if TAB_DEFAULT_STORES_DEFAULT not in skuobj.DefaultStoreDict:
PcdDefaultStoreSet = set(defaultstorename for defaultstorename in skuobj.DefaultStoreDict)
mindefaultstorename = DefaultStoreMgr.GetMin(PcdDefaultStoreSet)
skuobj.DefaultStoreDict[TAB_DEFAULT_STORES_DEFAULT] = skuobj.DefaultStoreDict[mindefaultstorename]
return Pcds
def RecoverCommandLinePcd(self):
def UpdateCommandLineValue(pcd):
if pcd.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_FIXED_AT_BUILD],
self._PCD_TYPE_STRING_[MODEL_PCD_PATCHABLE_IN_MODULE]]:
pcd.PcdValueFromComm = pcd.DefaultValue
elif pcd.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII], self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII]]:
pcd.PcdValueFromComm = pcd.SkuInfoList.get(TAB_DEFAULT).HiiDefaultValue
else:
pcd.PcdValueFromComm = pcd.SkuInfoList.get(TAB_DEFAULT).DefaultValue
for pcd in self._Pcds:
if isinstance(self._Pcds[pcd], StructurePcd) and (self._Pcds[pcd].PcdValueFromComm or self._Pcds[pcd].PcdFieldValueFromComm):
UpdateCommandLineValue(self._Pcds[pcd])
def __ParsePcdFromCommandLine(self):
if GlobalData.BuildOptionPcd:
for i, pcd in enumerate(GlobalData.BuildOptionPcd):
if isinstance(pcd, tuple):
continue
(pcdname, pcdvalue) = pcd.split('=')
if not pcdvalue:
EdkLogger.error('build', AUTOGEN_ERROR, "No Value specified for the PCD %s." % (pcdname))
if '.' in pcdname:
(Name1, Name2) = pcdname.split('.', 1)
if "." in Name2:
(Name3, FieldName) = Name2.split(".", 1)
if ((Name3, Name1)) in self.DecPcds:
HasTokenSpace = True
TokenCName = Name3
TokenSpaceGuidCName = Name1
else:
FieldName = Name2
TokenCName = Name1
TokenSpaceGuidCName = ''
HasTokenSpace = False
else:
if ((Name2, Name1)) in self.DecPcds:
HasTokenSpace = True
TokenCName = Name2
TokenSpaceGuidCName = Name1
FieldName =""
else:
FieldName = Name2
TokenCName = Name1
TokenSpaceGuidCName = ''
HasTokenSpace = False
else:
FieldName = ""
TokenCName = pcdname
TokenSpaceGuidCName = ''
HasTokenSpace = False
TokenSpaceGuidCNameList = []
FoundFlag = False
PcdDatumType = ''
DisplayName = TokenCName
if FieldName:
DisplayName = TokenCName + '.' + FieldName
if not HasTokenSpace:
for key in self.DecPcds:
PcdItem = self.DecPcds[key]
if TokenCName == PcdItem.TokenCName:
if not PcdItem.TokenSpaceGuidCName in TokenSpaceGuidCNameList:
if len (TokenSpaceGuidCNameList) < 1:
TokenSpaceGuidCNameList.append(PcdItem.TokenSpaceGuidCName)
TokenSpaceGuidCName = PcdItem.TokenSpaceGuidCName
PcdDatumType = PcdItem.DatumType
FoundFlag = True
else:
EdkLogger.error(
'build',
AUTOGEN_ERROR,
"The Pcd %s is found under multiple different TokenSpaceGuid: %s and %s." % (DisplayName, PcdItem.TokenSpaceGuidCName, TokenSpaceGuidCNameList[0])
)
else:
if (TokenCName, TokenSpaceGuidCName) in self.DecPcds:
PcdDatumType = self.DecPcds[(TokenCName, TokenSpaceGuidCName)].DatumType
FoundFlag = True
if not FoundFlag:
if HasTokenSpace:
EdkLogger.error('build', AUTOGEN_ERROR, "The Pcd %s.%s is not found in the DEC file." % (TokenSpaceGuidCName, DisplayName))
else:
EdkLogger.error('build', AUTOGEN_ERROR, "The Pcd %s is not found in the DEC file." % (DisplayName))
pcdvalue = pcdvalue.replace("\\\\\\'", '\\\\\\"').replace('\\\'', '\'').replace('\\\\\\"', "\\'")
if FieldName:
pcdvalue = DscBuildData.HandleFlexiblePcd(TokenSpaceGuidCName, TokenCName, pcdvalue, PcdDatumType, self._GuidDict, FieldName)
else:
pcdvalue = DscBuildData.HandleFlexiblePcd(TokenSpaceGuidCName, TokenCName, pcdvalue, PcdDatumType, self._GuidDict)
IsValid, Cause = CheckPcdDatum(PcdDatumType, pcdvalue)
if not IsValid:
EdkLogger.error("build", FORMAT_INVALID, Cause, ExtraData="%s.%s" % (TokenSpaceGuidCName, TokenCName))
GlobalData.BuildOptionPcd[i] = (TokenSpaceGuidCName, TokenCName, FieldName, pcdvalue, ("build command options", 1))
if GlobalData.BuildOptionPcd:
inf_objs = [item for item in self._Bdb._CACHE_.values() if item.Arch == self.Arch and item.MetaFile.Ext.lower() == '.inf']
for pcd in GlobalData.BuildOptionPcd:
(TokenSpaceGuidCName, TokenCName, FieldName, pcdvalue, _) = pcd
for BuildData in inf_objs:
for key in BuildData.Pcds:
PcdItem = BuildData.Pcds[key]
if (TokenSpaceGuidCName, TokenCName) == (PcdItem.TokenSpaceGuidCName, PcdItem.TokenCName) and FieldName =="":
PcdItem.DefaultValue = pcdvalue
PcdItem.PcdValueFromComm = pcdvalue
#In command line, the latter full assign value in commandLine should override the former field assign value.
#For example, --pcd Token.pcd.field="" --pcd Token.pcd=H"{}"
delete_assign = []
field_assign = {}
if GlobalData.BuildOptionPcd:
for pcdTuple in GlobalData.BuildOptionPcd:
TokenSpaceGuid, Token, Field = pcdTuple[0], pcdTuple[1], pcdTuple[2]
if Field:
if (TokenSpaceGuid, Token) not in field_assign:
field_assign[TokenSpaceGuid, Token] = []
field_assign[TokenSpaceGuid, Token].append(pcdTuple)
else:
if (TokenSpaceGuid, Token) in field_assign:
delete_assign.extend(field_assign[TokenSpaceGuid, Token])
field_assign[TokenSpaceGuid, Token] = []
for item in delete_assign:
GlobalData.BuildOptionPcd.remove(item)
@staticmethod
def HandleFlexiblePcd(TokenSpaceGuidCName, TokenCName, PcdValue, PcdDatumType, GuidDict, FieldName=''):
if FieldName:
IsArray = False
TokenCName += '.' + FieldName
if PcdValue.startswith('H'):
if FieldName and _IsFieldValueAnArray(PcdValue[1:]):
PcdDatumType = TAB_VOID
IsArray = True
if FieldName and not IsArray:
return PcdValue
try:
PcdValue = ValueExpressionEx(PcdValue[1:], PcdDatumType, GuidDict)(True)
except BadExpression as Value:
EdkLogger.error('Parser', FORMAT_INVALID, 'PCD [%s.%s] Value "%s", %s' %
(TokenSpaceGuidCName, TokenCName, PcdValue, Value))
elif PcdValue.startswith("L'") or PcdValue.startswith("'"):
if FieldName and _IsFieldValueAnArray(PcdValue):
PcdDatumType = TAB_VOID
IsArray = True
if FieldName and not IsArray:
return PcdValue
try:
PcdValue = ValueExpressionEx(PcdValue, PcdDatumType, GuidDict)(True)
except BadExpression as Value:
EdkLogger.error('Parser', FORMAT_INVALID, 'PCD [%s.%s] Value "%s", %s' %
(TokenSpaceGuidCName, TokenCName, PcdValue, Value))
elif PcdValue.startswith('L'):
PcdValue = 'L"' + PcdValue[1:] + '"'
if FieldName and _IsFieldValueAnArray(PcdValue):
PcdDatumType = TAB_VOID
IsArray = True
if FieldName and not IsArray:
return PcdValue
try:
PcdValue = ValueExpressionEx(PcdValue, PcdDatumType, GuidDict)(True)
except BadExpression as Value:
EdkLogger.error('Parser', FORMAT_INVALID, 'PCD [%s.%s] Value "%s", %s' %
(TokenSpaceGuidCName, TokenCName, PcdValue, Value))
else:
if PcdValue.upper() == 'FALSE':
PcdValue = str(0)
if PcdValue.upper() == 'TRUE':
PcdValue = str(1)
if not FieldName:
if PcdDatumType not in TAB_PCD_NUMERIC_TYPES:
PcdValue = '"' + PcdValue + '"'
elif not PcdValue.isdigit() and not PcdValue.upper().startswith('0X'):
PcdValue = '"' + PcdValue + '"'
else:
IsArray = False
Base = 10
if PcdValue.upper().startswith('0X'):
Base = 16
try:
Num = int(PcdValue, Base)
except:
PcdValue = '"' + PcdValue + '"'
if _IsFieldValueAnArray(PcdValue):
PcdDatumType = TAB_VOID
IsArray = True
if not IsArray:
return PcdValue
try:
PcdValue = ValueExpressionEx(PcdValue, PcdDatumType, GuidDict)(True)
except BadExpression as Value:
EdkLogger.error('Parser', FORMAT_INVALID, 'PCD [%s.%s] Value "%s", %s' %
(TokenSpaceGuidCName, TokenCName, PcdValue, Value))
return PcdValue
## Retrieve all PCD settings in platform
@property
def Pcds(self):
if self._Pcds is None:
self._Pcds = OrderedDict()
self.__ParsePcdFromCommandLine()
self._Pcds.update(self._GetPcd(MODEL_PCD_FIXED_AT_BUILD))
self._Pcds.update(self._GetPcd(MODEL_PCD_PATCHABLE_IN_MODULE))
self._Pcds.update(self._GetPcd(MODEL_PCD_FEATURE_FLAG))
self._Pcds.update(self._GetDynamicPcd(MODEL_PCD_DYNAMIC_DEFAULT))
self._Pcds.update(self._GetDynamicHiiPcd(MODEL_PCD_DYNAMIC_HII))
self._Pcds.update(self._GetDynamicVpdPcd(MODEL_PCD_DYNAMIC_VPD))
self._Pcds.update(self._GetDynamicPcd(MODEL_PCD_DYNAMIC_EX_DEFAULT))
self._Pcds.update(self._GetDynamicHiiPcd(MODEL_PCD_DYNAMIC_EX_HII))
self._Pcds.update(self._GetDynamicVpdPcd(MODEL_PCD_DYNAMIC_EX_VPD))
self._Pcds = self.CompletePcdValues(self._Pcds)
self._Pcds = self.OverrideByFdfOverAll(self._Pcds)
self._Pcds = self.OverrideByCommOverAll(self._Pcds)
self._Pcds = self.UpdateStructuredPcds(MODEL_PCD_TYPE_LIST, self._Pcds)
self._Pcds = self.CompleteHiiPcdsDefaultStores(self._Pcds)
self._Pcds = self._FilterPcdBySkuUsage(self._Pcds)
self.RecoverCommandLinePcd()
return self._Pcds
## Retrieve [BuildOptions]
@property
def BuildOptions(self):
if self._BuildOptions is None:
self._BuildOptions = OrderedDict()
#
# Retrieve build option for EDKII and EDK style module
#
for CodeBase in (EDKII_NAME, EDK_NAME):
RecordList = self._RawData[MODEL_META_DATA_BUILD_OPTION, self._Arch, CodeBase]
for ToolChainFamily, ToolChain, Option, Dummy1, Dummy2, Dummy3, Dummy4, Dummy5 in RecordList:
if Dummy3.upper() != TAB_COMMON:
continue
CurKey = (ToolChainFamily, ToolChain, CodeBase)
#
# Only flags can be appended
#
if CurKey not in self._BuildOptions or not ToolChain.endswith('_FLAGS') or Option.startswith('='):
self._BuildOptions[CurKey] = Option
else:
if ' ' + Option not in self._BuildOptions[CurKey]:
self._BuildOptions[CurKey] += ' ' + Option
return self._BuildOptions
def GetBuildOptionsByPkg(self, Module, ModuleType):
local_pkg = os.path.split(Module.LocalPkg())[0]
if self._ModuleTypeOptions is None:
self._ModuleTypeOptions = OrderedDict()
if ModuleType not in self._ModuleTypeOptions:
options = OrderedDict()
self._ModuleTypeOptions[ ModuleType] = options
RecordList = self._RawData[MODEL_META_DATA_BUILD_OPTION, self._Arch]
for ToolChainFamily, ToolChain, Option, Dummy1, Dummy2, Dummy3, Dummy4, Dummy5 in RecordList:
if Dummy2 not in (TAB_COMMON,local_pkg.upper(),"EDKII"):
continue
Type = Dummy3
if Type.upper() == ModuleType.upper():
Key = (ToolChainFamily, ToolChain)
if Key not in options or not ToolChain.endswith('_FLAGS') or Option.startswith('='):
options[Key] = Option
else:
if ' ' + Option not in options[Key]:
options[Key] += ' ' + Option
return self._ModuleTypeOptions[ModuleType]
def GetBuildOptionsByModuleType(self, Edk, ModuleType):
if self._ModuleTypeOptions is None:
self._ModuleTypeOptions = OrderedDict()
if (Edk, ModuleType) not in self._ModuleTypeOptions:
options = OrderedDict()
self._ModuleTypeOptions[Edk, ModuleType] = options
DriverType = '%s.%s' % (Edk, ModuleType)
CommonDriverType = '%s.%s' % (TAB_COMMON, ModuleType)
RecordList = self._RawData[MODEL_META_DATA_BUILD_OPTION, self._Arch]
for ToolChainFamily, ToolChain, Option, Dummy1, Dummy2, Dummy3, Dummy4, Dummy5 in RecordList:
Type = Dummy2 + '.' + Dummy3
if Type.upper() == DriverType.upper() or Type.upper() == CommonDriverType.upper():
Key = (ToolChainFamily, ToolChain, Edk)
if Key not in options or not ToolChain.endswith('_FLAGS') or Option.startswith('='):
options[Key] = Option
else:
if ' ' + Option not in options[Key]:
options[Key] += ' ' + Option
return self._ModuleTypeOptions[Edk, ModuleType]
@staticmethod
def GetStructurePcdInfo(PcdSet):
structure_pcd_data = defaultdict(list)
for item in PcdSet:
structure_pcd_data[(item[0], item[1])].append(item)
return structure_pcd_data
@staticmethod
def OverrideByFdf(StruPcds,workspace):
if GlobalData.gFdfParser is None:
return StruPcds
StructurePcdInFdf = OrderedDict()
fdfpcd = GlobalData.gFdfParser.Profile.PcdDict
fdfpcdlocation = GlobalData.gFdfParser.Profile.PcdLocalDict
for item in fdfpcd :
if len(item[2]) and (item[0],item[1]) in StruPcds:
StructurePcdInFdf[(item[1],item[0],item[2] )] = fdfpcd[item]
GlobalPcds = {(item[0],item[1]) for item in StructurePcdInFdf}
for Pcd in StruPcds.values():
if (Pcd.TokenSpaceGuidCName,Pcd.TokenCName) not in GlobalPcds:
continue
FieldValues = OrderedDict()
for item in StructurePcdInFdf:
if (Pcd.TokenSpaceGuidCName,Pcd.TokenCName) == (item[0],item[1]) and item[2]:
FieldValues[item[2]] = StructurePcdInFdf[item]
for field in FieldValues:
if field not in Pcd.PcdFieldValueFromFdf:
Pcd.PcdFieldValueFromFdf[field] = ["","",""]
Pcd.PcdFieldValueFromFdf[field][0] = FieldValues[field]
Pcd.PcdFieldValueFromFdf[field][1] = os.path.relpath(fdfpcdlocation[(Pcd.TokenCName,Pcd.TokenSpaceGuidCName,field)][0],workspace)
Pcd.PcdFieldValueFromFdf[field][2] = fdfpcdlocation[(Pcd.TokenCName,Pcd.TokenSpaceGuidCName,field)][1]
return StruPcds
@staticmethod
def OverrideByComm(StruPcds):
StructurePcdInCom = OrderedDict()
for item in GlobalData.BuildOptionPcd:
if len(item) == 5 and (item[1], item[0]) in StruPcds:
StructurePcdInCom[(item[0], item[1], item[2] )] = (item[3], item[4])
GlobalPcds = {(item[0], item[1]) for item in StructurePcdInCom}
for Pcd in StruPcds.values():
if (Pcd.TokenSpaceGuidCName, Pcd.TokenCName) not in GlobalPcds:
continue
FieldValues = OrderedDict()
for item in StructurePcdInCom:
if (Pcd.TokenSpaceGuidCName, Pcd.TokenCName) == (item[0], item[1]) and item[2]:
FieldValues[item[2]] = StructurePcdInCom[item]
for field in FieldValues:
if field not in Pcd.PcdFieldValueFromComm:
Pcd.PcdFieldValueFromComm[field] = ["", "", ""]
Pcd.PcdFieldValueFromComm[field][0] = FieldValues[field][0]
Pcd.PcdFieldValueFromComm[field][1] = FieldValues[field][1][0]
Pcd.PcdFieldValueFromComm[field][2] = FieldValues[field][1][1]
return StruPcds
def OverrideByCommOverAll(self,AllPcds):
def CheckStructureInComm(commpcds):
if not commpcds:
return False
if len(commpcds[0]) == 5:
return True
return False
NoFiledValues = OrderedDict()
if CheckStructureInComm(GlobalData.BuildOptionPcd):
StructurePcdInCom = OrderedDict()
for item in GlobalData.BuildOptionPcd:
StructurePcdInCom[(item[0], item[1], item[2] )] = (item[3], item[4])
for item in StructurePcdInCom:
if not item[2]:
NoFiledValues[(item[0], item[1])] = StructurePcdInCom[item]
else:
for item in GlobalData.BuildOptionPcd:
NoFiledValues[(item[0], item[1])] = [item[2]]
for Guid, Name in NoFiledValues:
if (Name, Guid) in AllPcds:
Pcd = AllPcds.get((Name, Guid))
if isinstance(self._DecPcds.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName), None), StructurePcd):
self._DecPcds.get((Pcd.TokenCName, Pcd.TokenSpaceGuidCName)).PcdValueFromComm = NoFiledValues[(Pcd.TokenSpaceGuidCName, Pcd.TokenCName)][0]
else:
Pcd.PcdValueFromComm = NoFiledValues[(Pcd.TokenSpaceGuidCName, Pcd.TokenCName)][0]
Pcd.DefaultValue = NoFiledValues[(Pcd.TokenSpaceGuidCName, Pcd.TokenCName)][0]
for sku in Pcd.SkuInfoList:
SkuInfo = Pcd.SkuInfoList[sku]
if SkuInfo.DefaultValue:
SkuInfo.DefaultValue = NoFiledValues[(Pcd.TokenSpaceGuidCName, Pcd.TokenCName)][0]
else:
SkuInfo.HiiDefaultValue = NoFiledValues[(Pcd.TokenSpaceGuidCName, Pcd.TokenCName)][0]
for defaultstore in SkuInfo.DefaultStoreDict:
SkuInfo.DefaultStoreDict[defaultstore] = NoFiledValues[(Pcd.TokenSpaceGuidCName, Pcd.TokenCName)][0]
if Pcd.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII], self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII]]:
if Pcd.DatumType == TAB_VOID:
if not Pcd.MaxDatumSize:
Pcd.MaxDatumSize = '0'
CurrentSize = int(Pcd.MaxDatumSize, 16) if Pcd.MaxDatumSize.upper().startswith("0X") else int(Pcd.MaxDatumSize)
OptionSize = len((StringToArray(Pcd.PcdValueFromComm)).split(","))
MaxSize = max(CurrentSize, OptionSize)
Pcd.MaxDatumSize = str(MaxSize)
else:
PcdInDec = self.DecPcds.get((Name, Guid))
if PcdInDec:
PcdInDec.PcdValueFromComm = NoFiledValues[(Guid, Name)][0]
if PcdInDec.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_FIXED_AT_BUILD],
self._PCD_TYPE_STRING_[MODEL_PCD_PATCHABLE_IN_MODULE],
self._PCD_TYPE_STRING_[MODEL_PCD_FEATURE_FLAG],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX]]:
self._Pcds[Name, Guid] = copy.deepcopy(PcdInDec)
self._Pcds[Name, Guid].DefaultValue = NoFiledValues[( Guid, Name)][0]
if PcdInDec.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX]]:
self._Pcds[Name, Guid].SkuInfoList = {TAB_DEFAULT:SkuInfoClass(TAB_DEFAULT, self.SkuIds[TAB_DEFAULT][0], '', '', '', '', '', NoFiledValues[( Guid, Name)][0])}
return AllPcds
def OverrideByFdfOverAll(self,AllPcds):
if GlobalData.gFdfParser is None:
return AllPcds
NoFiledValues = GlobalData.gFdfParser.Profile.PcdDict
for Name,Guid,Field in NoFiledValues:
if len(Field):
continue
Value = NoFiledValues[(Name,Guid,Field)]
if (Name,Guid) in AllPcds:
Pcd = AllPcds.get((Name,Guid))
if isinstance(self._DecPcds.get((Pcd.TokenCName,Pcd.TokenSpaceGuidCName), None),StructurePcd):
self._DecPcds.get((Pcd.TokenCName,Pcd.TokenSpaceGuidCName)).PcdValueFromComm = Value
else:
Pcd.PcdValueFromComm = Value
Pcd.DefaultValue = Value
for sku in Pcd.SkuInfoList:
SkuInfo = Pcd.SkuInfoList[sku]
if SkuInfo.DefaultValue:
SkuInfo.DefaultValue = Value
else:
SkuInfo.HiiDefaultValue = Value
for defaultstore in SkuInfo.DefaultStoreDict:
SkuInfo.DefaultStoreDict[defaultstore] = Value
if Pcd.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII], self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII]]:
if Pcd.DatumType == TAB_VOID:
if not Pcd.MaxDatumSize:
Pcd.MaxDatumSize = '0'
CurrentSize = int(Pcd.MaxDatumSize,16) if Pcd.MaxDatumSize.upper().startswith("0X") else int(Pcd.MaxDatumSize)
OptionSize = len((StringToArray(Pcd.PcdValueFromComm)).split(","))
MaxSize = max(CurrentSize, OptionSize)
Pcd.MaxDatumSize = str(MaxSize)
else:
PcdInDec = self.DecPcds.get((Name,Guid))
if PcdInDec:
PcdInDec.PcdValueFromFdf = Value
if PcdInDec.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_FIXED_AT_BUILD],
self._PCD_TYPE_STRING_[MODEL_PCD_PATCHABLE_IN_MODULE],
self._PCD_TYPE_STRING_[MODEL_PCD_FEATURE_FLAG]]:
self._Pcds[Name, Guid] = copy.deepcopy(PcdInDec)
self._Pcds[Name, Guid].DefaultValue = Value
return AllPcds
def ParsePcdNameStruct(self,NamePart1,NamePart2):
TokenSpaceCName = PcdCName = DimensionAttr = Field = ""
if "." in NamePart1:
TokenSpaceCName, TempPcdCName = NamePart1.split(".")
if "[" in TempPcdCName:
PcdCName = TempPcdCName[:TempPcdCName.index("[")]
DimensionAttr = TempPcdCName[TempPcdCName.index("["):]
else:
PcdCName = TempPcdCName
Field = NamePart2
else:
TokenSpaceCName = NamePart1
if "[" in NamePart2:
PcdCName = NamePart2[:NamePart2.index("[")]
DimensionAttr = NamePart2[NamePart2.index("["):]
else:
PcdCName = NamePart2
return TokenSpaceCName,PcdCName,DimensionAttr,Field
def UpdateStructuredPcds(self, TypeList, AllPcds):
DynamicPcdType = [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_DEFAULT],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_VPD],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_DEFAULT],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_VPD]]
Pcds = AllPcds
DefaultStoreMgr = DefaultStore(self.DefaultStores)
SkuIds = self.SkuIds
self.SkuIdMgr.AvailableSkuIdSet.update({TAB_DEFAULT:0})
DefaultStores = {storename for pcdobj in AllPcds.values() for skuobj in pcdobj.SkuInfoList.values() for storename in skuobj.DefaultStoreDict}
DefaultStores.add(TAB_DEFAULT_STORES_DEFAULT)
S_PcdSet = []
# Find out all possible PCD candidates for self._Arch
RecordList = []
for Type in TypeList:
RecordList.extend(self._RawData[Type, self._Arch])
for TokenSpaceGuid, PcdCName, Setting, Arch, SkuName, default_store, Dummy4, Dummy5 in RecordList:
SkuName = SkuName.upper()
default_store = default_store.upper()
SkuName = TAB_DEFAULT if SkuName == TAB_COMMON else SkuName
if SkuName not in SkuIds:
continue
TCName,PCName,DimensionAttr,Field = self.ParsePcdNameStruct(TokenSpaceGuid, PcdCName)
pcd_in_dec = self._DecPcds.get((PCName,TCName), None)
if pcd_in_dec is None:
EdkLogger.error('build', PARSER_ERROR,
"Pcd (%s.%s) defined in DSC is not declared in DEC files. Arch: ['%s']" % (TCName, PCName, self._Arch),
File=self.MetaFile, Line = Dummy5)
if SkuName in SkuIds and ("." in TokenSpaceGuid or "[" in PcdCName):
if not isinstance (pcd_in_dec, StructurePcd):
EdkLogger.error('build', PARSER_ERROR,
"Pcd (%s.%s) is not declared as Structure PCD in DEC files. Arch: ['%s']" % (TCName, PCName, self._Arch),
File=self.MetaFile, Line = Dummy5)
S_PcdSet.append([ TCName,PCName,DimensionAttr,Field, SkuName, default_store, Dummy5, AnalyzePcdExpression(Setting)[0]])
# handle pcd value override
StrPcdSet = DscBuildData.GetStructurePcdInfo(S_PcdSet)
S_pcd_set = OrderedDict()
for str_pcd in StrPcdSet:
str_pcd_obj = Pcds.get((str_pcd[1], str_pcd[0]), None)
str_pcd_dec = self._DecPcds.get((str_pcd[1], str_pcd[0]), None)
str_pcd_obj_str = StructurePcd()
str_pcd_obj_str.copy(str_pcd_dec)
if str_pcd_obj:
str_pcd_obj_str.copy(str_pcd_obj)
if str_pcd_obj.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII], self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII]]:
str_pcd_obj_str.DefaultFromDSC = {skuname:{defaultstore: str_pcd_obj.SkuInfoList[skuname].DefaultStoreDict.get(defaultstore, str_pcd_obj.SkuInfoList[skuname].HiiDefaultValue) for defaultstore in DefaultStores} for skuname in str_pcd_obj.SkuInfoList}
else:
str_pcd_obj_str.DefaultFromDSC = {skuname:{defaultstore: str_pcd_obj.SkuInfoList[skuname].DefaultStoreDict.get(defaultstore, str_pcd_obj.SkuInfoList[skuname].DefaultValue) for defaultstore in DefaultStores} for skuname in str_pcd_obj.SkuInfoList}
for str_pcd_data in StrPcdSet[str_pcd]:
if str_pcd_data[4] in SkuIds:
str_pcd_obj_str.AddOverrideValue(str_pcd_data[3], str(str_pcd_data[7]), TAB_DEFAULT if str_pcd_data[4] == TAB_COMMON else str_pcd_data[4], TAB_DEFAULT_STORES_DEFAULT if str_pcd_data[5] == TAB_COMMON else str_pcd_data[5], self.MetaFile.File if self.WorkspaceDir not in self.MetaFile.File else self.MetaFile.File[len(self.WorkspaceDir) if self.WorkspaceDir.endswith(os.path.sep) else len(self.WorkspaceDir)+1:], LineNo=str_pcd_data[6],DimensionAttr = str_pcd_data[2])
S_pcd_set[str_pcd[1], str_pcd[0]] = str_pcd_obj_str
# Add the Structure PCD that only defined in DEC, don't have override in DSC file
for Pcd in self.DecPcds:
if isinstance(self._DecPcds[Pcd], StructurePcd):
if Pcd not in S_pcd_set:
str_pcd_obj_str = StructurePcd()
str_pcd_obj_str.copy(self._DecPcds[Pcd])
str_pcd_obj = Pcds.get(Pcd, None)
if str_pcd_obj:
str_pcd_obj_str.copy(str_pcd_obj)
if str_pcd_obj.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII], self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII]]:
str_pcd_obj_str.DefaultFromDSC = {skuname:{defaultstore: str_pcd_obj.SkuInfoList[skuname].DefaultStoreDict.get(defaultstore, str_pcd_obj.SkuInfoList[skuname].HiiDefaultValue) for defaultstore in DefaultStores} for skuname in str_pcd_obj.SkuInfoList}
else:
str_pcd_obj_str.DefaultFromDSC = {skuname:{defaultstore: str_pcd_obj.SkuInfoList[skuname].DefaultStoreDict.get(defaultstore, str_pcd_obj.SkuInfoList[skuname].DefaultValue) for defaultstore in DefaultStores} for skuname in str_pcd_obj.SkuInfoList}
S_pcd_set[Pcd] = str_pcd_obj_str
if S_pcd_set:
GlobalData.gStructurePcd[self.Arch] = S_pcd_set.copy()
self.FilterStrcturePcd(S_pcd_set)
for stru_pcd in S_pcd_set.values():
for skuid in SkuIds:
if skuid in stru_pcd.SkuOverrideValues:
continue
nextskuid = self.SkuIdMgr.GetNextSkuId(skuid)
NoDefault = False
if skuid not in stru_pcd.SkuOverrideValues:
while nextskuid not in stru_pcd.SkuOverrideValues:
if nextskuid == TAB_DEFAULT:
NoDefault = True
break
nextskuid = self.SkuIdMgr.GetNextSkuId(nextskuid)
stru_pcd.SkuOverrideValues[skuid] = copy.deepcopy(stru_pcd.SkuOverrideValues[nextskuid]) if not NoDefault else copy.deepcopy({defaultstorename: stru_pcd.DefaultValues for defaultstorename in DefaultStores} if DefaultStores else {}) #{TAB_DEFAULT_STORES_DEFAULT:stru_pcd.DefaultValues})
if not NoDefault:
stru_pcd.ValueChain.add((skuid, ''))
if 'DEFAULT' in stru_pcd.SkuOverrideValues and not GlobalData.gPcdSkuOverrides.get((stru_pcd.TokenCName, stru_pcd.TokenSpaceGuidCName)):
GlobalData.gPcdSkuOverrides.update(
{(stru_pcd.TokenCName, stru_pcd.TokenSpaceGuidCName): {'DEFAULT':stru_pcd.SkuOverrideValues['DEFAULT']}})
if stru_pcd.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII], self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII]]:
for skuid in SkuIds:
nextskuid = skuid
NoDefault = False
if skuid not in stru_pcd.SkuOverrideValues:
while nextskuid not in stru_pcd.SkuOverrideValues:
if nextskuid == TAB_DEFAULT:
NoDefault = True
break
nextskuid = self.SkuIdMgr.GetNextSkuId(nextskuid)
if NoDefault:
continue
PcdDefaultStoreSet = set(defaultstorename for defaultstorename in stru_pcd.SkuOverrideValues[nextskuid])
mindefaultstorename = DefaultStoreMgr.GetMin(PcdDefaultStoreSet)
for defaultstoreid in DefaultStores:
if defaultstoreid not in stru_pcd.SkuOverrideValues[skuid]:
stru_pcd.SkuOverrideValues[skuid][defaultstoreid] = CopyDict(stru_pcd.SkuOverrideValues[nextskuid][mindefaultstorename])
stru_pcd.ValueChain.add((skuid, defaultstoreid))
S_pcd_set = DscBuildData.OverrideByFdf(S_pcd_set,self.WorkspaceDir)
S_pcd_set = DscBuildData.OverrideByComm(S_pcd_set)
Str_Pcd_Values = self.GenerateByteArrayValue(S_pcd_set)
if Str_Pcd_Values:
for (skuname, StoreName, PcdGuid, PcdName, PcdValue) in Str_Pcd_Values:
str_pcd_obj = S_pcd_set.get((PcdName, PcdGuid))
if str_pcd_obj is None:
print(PcdName, PcdGuid)
raise
if str_pcd_obj.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII]]:
if skuname not in str_pcd_obj.SkuInfoList:
str_pcd_obj.SkuInfoList[skuname] = SkuInfoClass(SkuIdName=skuname, SkuId=self.SkuIds[skuname][0], HiiDefaultValue=PcdValue, DefaultStore = {StoreName:PcdValue})
else:
str_pcd_obj.SkuInfoList[skuname].HiiDefaultValue = PcdValue
str_pcd_obj.SkuInfoList[skuname].DefaultStoreDict.update({StoreName:PcdValue})
elif str_pcd_obj.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_FIXED_AT_BUILD],
self._PCD_TYPE_STRING_[MODEL_PCD_PATCHABLE_IN_MODULE]]:
if skuname in (self.SkuIdMgr.SystemSkuId, TAB_DEFAULT, TAB_COMMON):
str_pcd_obj.DefaultValue = PcdValue
else:
if skuname not in str_pcd_obj.SkuInfoList:
nextskuid = self.SkuIdMgr.GetNextSkuId(skuname)
NoDefault = False
while nextskuid not in str_pcd_obj.SkuInfoList:
if nextskuid == TAB_DEFAULT:
NoDefault = True
break
nextskuid = self.SkuIdMgr.GetNextSkuId(nextskuid)
str_pcd_obj.SkuInfoList[skuname] = copy.deepcopy(str_pcd_obj.SkuInfoList[nextskuid]) if not NoDefault else SkuInfoClass(SkuIdName=skuname, SkuId=self.SkuIds[skuname][0], DefaultValue=PcdValue)
str_pcd_obj.SkuInfoList[skuname].SkuId = self.SkuIds[skuname][0]
str_pcd_obj.SkuInfoList[skuname].SkuIdName = skuname
else:
str_pcd_obj.SkuInfoList[skuname].DefaultValue = PcdValue
for str_pcd_obj in S_pcd_set.values():
if str_pcd_obj.Type not in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII]]:
continue
PcdDefaultStoreSet = set(defaultstorename for skuobj in str_pcd_obj.SkuInfoList.values() for defaultstorename in skuobj.DefaultStoreDict)
DefaultStoreObj = DefaultStore(self._GetDefaultStores())
mindefaultstorename = DefaultStoreObj.GetMin(PcdDefaultStoreSet)
str_pcd_obj.SkuInfoList[self.SkuIdMgr.SystemSkuId].HiiDefaultValue = str_pcd_obj.SkuInfoList[self.SkuIdMgr.SystemSkuId].DefaultStoreDict[mindefaultstorename]
for str_pcd_obj in S_pcd_set.values():
str_pcd_obj.MaxDatumSize = DscBuildData.GetStructurePcdMaxSize(str_pcd_obj)
Pcds[str_pcd_obj.TokenCName, str_pcd_obj.TokenSpaceGuidCName] = str_pcd_obj
Pcds[str_pcd_obj.TokenCName, str_pcd_obj.TokenSpaceGuidCName].CustomAttribute['IsStru']=True
for pcdkey in Pcds:
pcd = Pcds[pcdkey]
if TAB_DEFAULT not in pcd.SkuInfoList and TAB_COMMON in pcd.SkuInfoList:
pcd.SkuInfoList[TAB_DEFAULT] = pcd.SkuInfoList[TAB_COMMON]
del pcd.SkuInfoList[TAB_COMMON]
elif TAB_DEFAULT in pcd.SkuInfoList and TAB_COMMON in pcd.SkuInfoList:
del pcd.SkuInfoList[TAB_COMMON]
list(map(self.FilterSkuSettings, [Pcds[pcdkey] for pcdkey in Pcds if Pcds[pcdkey].Type in DynamicPcdType]))
return Pcds
@cached_property
def PlatformUsedPcds(self):
FdfInfList = []
if GlobalData.gFdfParser:
FdfInfList = GlobalData.gFdfParser.Profile.InfList
FdfModuleList = [PathClass(NormPath(Inf), GlobalData.gWorkspace, Arch=self._Arch) for Inf in FdfInfList]
AllModulePcds = set()
ModuleSet = set(list(self._Modules.keys()) + FdfModuleList)
for ModuleFile in ModuleSet:
ModuleData = self._Bdb[ModuleFile, self._Arch, self._Target, self._Toolchain]
AllModulePcds = AllModulePcds | ModuleData.PcdsName
for ModuleFile in self.LibraryInstances:
ModuleData = self._Bdb.CreateBuildObject(ModuleFile, self._Arch, self._Target, self._Toolchain)
AllModulePcds = AllModulePcds | ModuleData.PcdsName
return AllModulePcds
#Filter the StrucutrePcd that is not used by any module in dsc file and fdf file.
def FilterStrcturePcd(self, S_pcd_set):
UnusedStruPcds = set(S_pcd_set.keys()) - self.PlatformUsedPcds
for (Token, TokenSpaceGuid) in UnusedStruPcds:
del S_pcd_set[(Token, TokenSpaceGuid)]
## Retrieve non-dynamic PCD settings
#
# @param Type PCD type
#
# @retval a dict object contains settings of given PCD type
#
def _GetPcd(self, Type):
Pcds = OrderedDict()
#
# tdict is a special dict kind of type, used for selecting correct
# PCD settings for certain ARCH
#
AvailableSkuIdSet = copy.copy(self.SkuIds)
PcdDict = tdict(True, 4)
PcdList = []
# Find out all possible PCD candidates for self._Arch
RecordList = self._RawData[Type, self._Arch]
PcdValueDict = OrderedDict()
for TokenSpaceGuid, PcdCName, Setting, Arch, SkuName, Dummy3, Dummy4, Dummy5 in RecordList:
SkuName = SkuName.upper()
SkuName = TAB_DEFAULT if SkuName == TAB_COMMON else SkuName
if SkuName not in AvailableSkuIdSet:
EdkLogger.error('build ', PARAMETER_INVALID, 'Sku %s is not defined in [SkuIds] section' % SkuName,
File=self.MetaFile, Line=Dummy5)
if SkuName in (self.SkuIdMgr.SystemSkuId, TAB_DEFAULT, TAB_COMMON):
if "." not in TokenSpaceGuid and "[" not in PcdCName and (PcdCName, TokenSpaceGuid, SkuName, Dummy5) not in PcdList:
PcdList.append((PcdCName, TokenSpaceGuid, SkuName, Dummy5))
PcdDict[Arch, PcdCName, TokenSpaceGuid, SkuName] = Setting
for PcdCName, TokenSpaceGuid, SkuName, Dummy4 in PcdList:
Setting = PcdDict[self._Arch, PcdCName, TokenSpaceGuid, SkuName]
if Setting is None:
continue
PcdValue, DatumType, MaxDatumSize = self._ValidatePcd(PcdCName, TokenSpaceGuid, Setting, Type, Dummy4)
if MaxDatumSize:
if int(MaxDatumSize, 0) > 0xFFFF:
EdkLogger.error('build', FORMAT_INVALID, "The size value must not exceed the maximum value of 0xFFFF (UINT16) for %s." % ".".join((TokenSpaceGuid, PcdCName)),
File=self.MetaFile, Line=Dummy4)
if int(MaxDatumSize, 0) < 0:
EdkLogger.error('build', FORMAT_INVALID, "The size value can't be set to negative value for %s." % ".".join((TokenSpaceGuid, PcdCName)),
File=self.MetaFile, Line=Dummy4)
if (PcdCName, TokenSpaceGuid) in PcdValueDict:
PcdValueDict[PcdCName, TokenSpaceGuid][SkuName] = (PcdValue, DatumType, MaxDatumSize,Dummy4)
else:
PcdValueDict[PcdCName, TokenSpaceGuid] = {SkuName:(PcdValue, DatumType, MaxDatumSize,Dummy4)}
for ((PcdCName, TokenSpaceGuid), PcdSetting) in PcdValueDict.items():
if self.SkuIdMgr.SystemSkuId in PcdSetting:
PcdValue, DatumType, MaxDatumSize,_ = PcdSetting[self.SkuIdMgr.SystemSkuId]
elif TAB_DEFAULT in PcdSetting:
PcdValue, DatumType, MaxDatumSize,_ = PcdSetting[TAB_DEFAULT]
elif TAB_COMMON in PcdSetting:
PcdValue, DatumType, MaxDatumSize,_ = PcdSetting[TAB_COMMON]
else:
PcdValue = None
DatumType = None
MaxDatumSize = None
Pcds[PcdCName, TokenSpaceGuid] = PcdClassObject(
PcdCName,
TokenSpaceGuid,
self._PCD_TYPE_STRING_[Type],
DatumType,
PcdValue,
'',
MaxDatumSize,
{},
False,
None,
IsDsc=True)
for SkuName in PcdValueDict[PcdCName, TokenSpaceGuid]:
Settings = PcdValueDict[PcdCName, TokenSpaceGuid][SkuName]
if SkuName not in Pcds[PcdCName, TokenSpaceGuid].DscRawValue:
Pcds[PcdCName, TokenSpaceGuid].DscRawValue[SkuName] = {}
Pcds[PcdCName, TokenSpaceGuid].DscRawValueInfo[SkuName] = {}
Pcds[PcdCName, TokenSpaceGuid].DscRawValue[SkuName][TAB_DEFAULT_STORES_DEFAULT] = Settings[0]
Pcds[PcdCName, TokenSpaceGuid].DscRawValueInfo[SkuName][TAB_DEFAULT_STORES_DEFAULT] = (self.MetaFile.File,Settings[3])
return Pcds
@staticmethod
def GetStructurePcdMaxSize(str_pcd):
pcd_default_value = str_pcd.DefaultValue
sku_values = [skuobj.HiiDefaultValue if str_pcd.Type in [DscBuildData._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII], DscBuildData._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII]] else skuobj.DefaultValue for skuobj in str_pcd.SkuInfoList.values()]
sku_values.append(pcd_default_value)
def get_length(value):
Value = value.strip()
if len(value) > 1:
if Value.startswith(TAB_GUID) and Value.endswith(')'):
return 16
if Value.startswith('L"') and Value.endswith('"'):
return len(Value[2:-1])
if Value[0] == '"' and Value[-1] == '"':
return len(Value) - 2
if Value.strip().startswith("{CODE("):
tmpValue = RemoveCComments(Value)
return len(tmpValue.split(","))
if (Value[0] == '{' and Value[-1] == '}'):
return len(Value.split(","))
if Value.startswith("L'") and Value.endswith("'") and len(list(Value[2:-1])) > 1:
return len(list(Value[2:-1]))
if Value[0] == "'" and Value[-1] == "'" and len(list(Value[1:-1])) > 1:
return len(Value) - 2
return len(Value)
return str(max(get_length(item) for item in sku_values))
@staticmethod
def ExecuteCommand (Command):
try:
Process = subprocess.Popen(Command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
except:
EdkLogger.error('Build', COMMAND_FAILURE, 'Can not execute command: %s' % Command)
Result = Process.communicate()
return Process.returncode, Result[0].decode(errors='ignore'), Result[1].decode(errors='ignore')
@staticmethod
def IntToCString(Value, ValueSize):
Result = '"'
if not isinstance (Value, str):
for Index in range(0, ValueSize):
Result = Result + '\\x%02x' % (Value & 0xff)
Value = Value >> 8
Result = Result + '"'
return Result
def GenerateSizeFunction(self, Pcd):
CApp = "// Default Value in Dec \n"
CApp = CApp + "void Cal_%s_%s_Size(UINT32 *Size){\n" % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
if Pcd.IsArray() and Pcd.Capacity[-1] != "-1":
CApp += " *Size = (sizeof (%s) > *Size ? sizeof (%s) : *Size);\n" % (Pcd.DatumType,Pcd.DatumType)
else:
if "{CODE(" in Pcd.DefaultValueFromDec:
CApp += " *Size = (sizeof (%s_%s_INIT_Value) > *Size ? sizeof (%s_%s_INIT_Value) : *Size);\n" % (Pcd.TokenSpaceGuidCName,Pcd.TokenCName,Pcd.TokenSpaceGuidCName,Pcd.TokenCName)
if Pcd.Type in PCD_DYNAMIC_TYPE_SET | PCD_DYNAMIC_EX_TYPE_SET:
for skuname in Pcd.SkuInfoList:
skuobj = Pcd.SkuInfoList[skuname]
if skuobj.VariableName:
for defaultstore in skuobj.DefaultStoreDict:
pcddef = self.GetPcdDscRawDefaultValue(Pcd,skuname,defaultstore)
if pcddef:
if "{CODE(" in pcddef:
CApp += " *Size = (sizeof (%s_%s_%s_%s_Value) > *Size ? sizeof (%s_%s_%s_%s_Value) : *Size);\n" % (Pcd.TokenSpaceGuidCName,Pcd.TokenCName,skuname,defaultstore,Pcd.TokenSpaceGuidCName,Pcd.TokenCName,skuname,defaultstore)
else:
CApp += " *Size = %s > *Size ? %s : *Size;\n" % (self.GetStructurePcdMaxSize(Pcd),self.GetStructurePcdMaxSize(Pcd))
else:
pcddef = self.GetPcdDscRawDefaultValue(Pcd,skuname,TAB_DEFAULT_STORES_DEFAULT)
if pcddef:
if "{CODE(" in pcddef:
CApp += " *Size = (sizeof (%s_%s_%s_%s_Value) > *Size ? sizeof (%s_%s_%s_%s_Value) : *Size);\n" % (Pcd.TokenSpaceGuidCName,Pcd.TokenCName,skuname,TAB_DEFAULT_STORES_DEFAULT,Pcd.TokenSpaceGuidCName,Pcd.TokenCName,skuname,TAB_DEFAULT_STORES_DEFAULT)
else:
CApp += " *Size = %s > *Size ? %s : *Size;\n" % (self.GetStructurePcdMaxSize(Pcd),self.GetStructurePcdMaxSize(Pcd))
else:
pcddef = self.GetPcdDscRawDefaultValue(Pcd,TAB_DEFAULT,TAB_DEFAULT_STORES_DEFAULT)
if pcddef:
if "{CODE(" in pcddef:
CApp += " *Size = (sizeof (%s_%s_%s_%s_Value) > *Size ? sizeof (%s_%s_%s_%s_Value) : *Size);\n" % (Pcd.TokenSpaceGuidCName,Pcd.TokenCName,TAB_DEFAULT,TAB_DEFAULT_STORES_DEFAULT,Pcd.TokenSpaceGuidCName,Pcd.TokenCName,TAB_DEFAULT,TAB_DEFAULT_STORES_DEFAULT)
else:
CApp += " *Size = %s > *Size ? %s : *Size;\n" % (self.GetStructurePcdMaxSize(Pcd),self.GetStructurePcdMaxSize(Pcd))
ActualCap = []
for index in Pcd.DefaultValues:
if index:
ActualCap.append(index)
FieldList = Pcd.DefaultValues[index]
if not FieldList:
continue
for FieldName in FieldList:
FieldName = "." + FieldName
IsArray = _IsFieldValueAnArray(FieldList[FieldName.strip(".")][0])
if IsArray and not (FieldList[FieldName.strip(".")][0].startswith('{GUID') and FieldList[FieldName.strip(".")][0].endswith('}')):
try:
Value = ValueExpressionEx(FieldList[FieldName.strip(".")][0], TAB_VOID, self._GuidDict)(True)
except BadExpression:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. From %s Line %d " %
(".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldName.strip('.'))), FieldList[FieldName.strip(".")][1], FieldList[FieldName.strip(".")][2]))
Value, ValueSize = ParseFieldValue(Value)
if not Pcd.IsArray():
CApp = CApp + ' __FLEXIBLE_SIZE(*Size, %s, %s, %d / __ARRAY_ELEMENT_SIZE(%s, %s) + ((%d %% __ARRAY_ELEMENT_SIZE(%s, %s)) ? 1 : 0)); // From %s Line %d Value %s \n' % (Pcd.DatumType, FieldName.strip("."), ValueSize, Pcd.DatumType, FieldName.strip("."), ValueSize, Pcd.DatumType, FieldName.strip("."), FieldList[FieldName.strip(".")][1], FieldList[FieldName.strip(".")][2], FieldList[FieldName.strip(".")][0]);
else:
NewFieldName = ''
FieldName_ori = FieldName.strip('.')
while '[' in FieldName:
NewFieldName = NewFieldName + FieldName.split('[', 1)[0] + '[0]'
Array_Index = int(FieldName.split('[', 1)[1].split(']', 1)[0])
FieldName = FieldName.split(']', 1)[1]
FieldName = NewFieldName + FieldName
while '[' in FieldName and not Pcd.IsArray():
FieldName = FieldName.rsplit('[', 1)[0]
CApp = CApp + ' __FLEXIBLE_SIZE(*Size, %s, %s, %d); // From %s Line %d Value %s\n' % (Pcd.DatumType, FieldName.strip("."), Array_Index + 1, FieldList[FieldName_ori][1], FieldList[FieldName_ori][2], FieldList[FieldName_ori][0])
for skuname in Pcd.SkuOverrideValues:
if skuname == TAB_COMMON:
continue
for defaultstorenameitem in Pcd.SkuOverrideValues[skuname]:
CApp = CApp + "// SkuName: %s, DefaultStoreName: %s \n" % (skuname, defaultstorenameitem)
for index in Pcd.SkuOverrideValues[skuname][defaultstorenameitem]:
if index:
ActualCap.append(index)
for FieldList in [Pcd.SkuOverrideValues[skuname][defaultstorenameitem][index]]:
if not FieldList:
continue
for FieldName in FieldList:
FieldName = "." + FieldName
IsArray = _IsFieldValueAnArray(FieldList[FieldName.strip(".")][0])
if IsArray and not (FieldList[FieldName.strip(".")][0].startswith('{GUID') and FieldList[FieldName.strip(".")][0].endswith('}')):
try:
Value = ValueExpressionEx(FieldList[FieldName.strip(".")][0], TAB_VOID, self._GuidDict)(True)
except BadExpression:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. From %s Line %d " %
(".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldName.strip('.'))), FieldList[FieldName.strip(".")][1], FieldList[FieldName.strip(".")][2]))
Value, ValueSize = ParseFieldValue(Value)
if not Pcd.IsArray():
CApp = CApp + ' __FLEXIBLE_SIZE(*Size, %s, %s, %d / __ARRAY_ELEMENT_SIZE(%s, %s) + ((%d %% __ARRAY_ELEMENT_SIZE(%s, %s)) ? 1 : 0)); // From %s Line %d Value %s\n' % (Pcd.DatumType, FieldName.strip("."), ValueSize, Pcd.DatumType, FieldName.strip("."), ValueSize, Pcd.DatumType, FieldName.strip("."), FieldList[FieldName.strip(".")][1], FieldList[FieldName.strip(".")][2], FieldList[FieldName.strip(".")][0]);
else:
NewFieldName = ''
FieldName_ori = FieldName.strip('.')
while '[' in FieldName:
NewFieldName = NewFieldName + FieldName.split('[', 1)[0] + '[0]'
Array_Index = int(FieldName.split('[', 1)[1].split(']', 1)[0])
FieldName = FieldName.split(']', 1)[1]
FieldName = NewFieldName + FieldName
while '[' in FieldName and not Pcd.IsArray():
FieldName = FieldName.rsplit('[', 1)[0]
CApp = CApp + ' __FLEXIBLE_SIZE(*Size, %s, %s, %d); // From %s Line %d Value %s \n' % (Pcd.DatumType, FieldName.strip("."), Array_Index + 1, FieldList[FieldName_ori][1], FieldList[FieldName_ori][2], FieldList[FieldName_ori][0])
if Pcd.PcdFieldValueFromFdf:
CApp = CApp + "// From fdf \n"
for FieldName in Pcd.PcdFieldValueFromFdf:
FieldName = "." + FieldName
IsArray = _IsFieldValueAnArray(Pcd.PcdFieldValueFromFdf[FieldName.strip(".")][0])
if IsArray and not (Pcd.PcdFieldValueFromFdf[FieldName.strip(".")][0].startswith('{GUID') and Pcd.PcdFieldValueFromFdf[FieldName.strip(".")][0].endswith('}')):
try:
Value = ValueExpressionEx(Pcd.PcdFieldValueFromFdf[FieldName.strip(".")][0], TAB_VOID, self._GuidDict)(True)
except BadExpression:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. From %s Line %d " %
(".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldName.strip('.'))), Pcd.PcdFieldValueFromFdf[FieldName.strip(".")][1], Pcd.PcdFieldValueFromFdf[FieldName.strip(".")][2]))
Value, ValueSize = ParseFieldValue(Value)
if not Pcd.IsArray():
CApp = CApp + ' __FLEXIBLE_SIZE(*Size, %s, %s, %d / __ARRAY_ELEMENT_SIZE(%s, %s) + ((%d %% __ARRAY_ELEMENT_SIZE(%s, %s)) ? 1 : 0)); // From %s Line %d Value %s\n' % (Pcd.DatumType, FieldName.strip("."), ValueSize, Pcd.DatumType, FieldName.strip("."), ValueSize, Pcd.DatumType, FieldName.strip("."), Pcd.PcdFieldValueFromFdf[FieldName.strip(".")][1], Pcd.PcdFieldValueFromFdf[FieldName.strip(".")][2], Pcd.PcdFieldValueFromFdf[FieldName.strip(".")][0]);
else:
NewFieldName = ''
FieldName_ori = FieldName.strip('.')
while '[' in FieldName:
NewFieldName = NewFieldName + FieldName.split('[', 1)[0] + '[0]'
Array_Index = int(FieldName.split('[', 1)[1].split(']', 1)[0])
FieldName = FieldName.split(']', 1)[1]
FieldName = NewFieldName + FieldName
while '[' in FieldName:
FieldName = FieldName.rsplit('[', 1)[0]
CApp = CApp + ' __FLEXIBLE_SIZE(*Size, %s, %s, %d); // From %s Line %s Value %s \n' % (Pcd.DatumType, FieldName.strip("."), Array_Index + 1, Pcd.PcdFieldValueFromFdf[FieldName_ori][1], Pcd.PcdFieldValueFromFdf[FieldName_ori][2], Pcd.PcdFieldValueFromFdf[FieldName_ori][0])
if Pcd.PcdFieldValueFromComm:
CApp = CApp + "// From Command Line \n"
for FieldName in Pcd.PcdFieldValueFromComm:
FieldName = "." + FieldName
IsArray = _IsFieldValueAnArray(Pcd.PcdFieldValueFromComm[FieldName.strip(".")][0])
if IsArray and not (Pcd.PcdFieldValueFromComm[FieldName.strip(".")][0].startswith('{GUID') and Pcd.PcdFieldValueFromComm[FieldName.strip(".")][0].endswith('}')):
try:
Value = ValueExpressionEx(Pcd.PcdFieldValueFromComm[FieldName.strip(".")][0], TAB_VOID, self._GuidDict)(True)
except BadExpression:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. From %s Line %d " %
(".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldName.strip('.'))), Pcd.PcdFieldValueFromComm[FieldName.strip(".")][1], Pcd.PcdFieldValueFromComm[FieldName.strip(".")][2]))
Value, ValueSize = ParseFieldValue(Value)
if not Pcd.IsArray():
CApp = CApp + ' __FLEXIBLE_SIZE(*Size, %s, %s, %d / __ARRAY_ELEMENT_SIZE(%s, %s) + ((%d %% __ARRAY_ELEMENT_SIZE(%s, %s)) ? 1 : 0)); // From %s Line %d Value %s\n' % (Pcd.DatumType, FieldName.strip("."), ValueSize, Pcd.DatumType, FieldName.strip("."), ValueSize, Pcd.DatumType, FieldName.strip("."), Pcd.PcdFieldValueFromComm[FieldName.strip(".")][1], Pcd.PcdFieldValueFromComm[FieldName.strip(".")][2], Pcd.PcdFieldValueFromComm[FieldName.strip(".")][0]);
else:
NewFieldName = ''
FieldName_ori = FieldName.strip('.')
while '[' in FieldName:
NewFieldName = NewFieldName + FieldName.split('[', 1)[0] + '[0]'
Array_Index = int(FieldName.split('[', 1)[1].split(']', 1)[0])
FieldName = FieldName.split(']', 1)[1]
FieldName = NewFieldName + FieldName
while '[' in FieldName and not Pcd.IsArray():
FieldName = FieldName.rsplit('[', 1)[0]
CApp = CApp + ' __FLEXIBLE_SIZE(*Size, %s, %s, %d); // From %s Line %d Value %s \n' % (Pcd.DatumType, FieldName.strip("."), Array_Index + 1, Pcd.PcdFieldValueFromComm[FieldName_ori][1], Pcd.PcdFieldValueFromComm[FieldName_ori][2], Pcd.PcdFieldValueFromComm[FieldName_ori][0])
if Pcd.GetPcdMaxSize():
CApp = CApp + " *Size = (%d > *Size ? %d : *Size); // The Pcd maxsize is %d \n" % (Pcd.GetPcdMaxSize(), Pcd.GetPcdMaxSize(), Pcd.GetPcdMaxSize())
ArraySizeByAssign = self.CalculateActualCap(ActualCap)
if ArraySizeByAssign > 1:
CApp = CApp + " *Size = (%d > *Size ? %d : *Size); \n" % (ArraySizeByAssign, ArraySizeByAssign)
CApp = CApp + "}\n"
return CApp
def CalculateActualCap(self,ActualCap):
if not ActualCap:
return 1
maxsize = 1
for item in ActualCap:
index_elements = ArrayIndex.findall(item)
rt = 1
for index_e in index_elements:
index_num = index_e.lstrip("[").rstrip("]").strip()
if not index_num:
# Not support flexiable pcd array assignment
return 1
index_num = int(index_num,16) if index_num.startswith(("0x","0X")) else int(index_num)
rt = rt * (index_num+1)
if rt >maxsize:
maxsize = rt
return maxsize
@staticmethod
def GenerateSizeStatments(Pcd,skuname,defaultstorename):
if Pcd.IsArray():
r_datatype = [Pcd.BaseDatumType]
lastoneisEmpty = False
for dem in Pcd.Capacity:
if lastoneisEmpty:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. " %
(".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName))))
if dem == '0' or dem == "-1":
r_datatype.append("[1]")
lastoneisEmpty = True
else:
r_datatype.append("[" + dem + "]")
if Pcd.Type in [MODEL_PCD_DYNAMIC_EX_HII, MODEL_PCD_DYNAMIC_HII]:
PcdDefValue = Pcd.SkuInfoList.get(skuname).DefaultStoreDict.get(defaultstorename)
elif Pcd.Type in [MODEL_PCD_DYNAMIC_EX_DEFAULT,MODEL_PCD_DYNAMIC_VPD,MODEL_PCD_DYNAMIC_DEFAULT,MODEL_PCD_DYNAMIC_EX_VPD]:
PcdDefValue = Pcd.SkuInfoList.get(skuname).DefaultValue
else:
PcdDefValue = Pcd.DefaultValue
if lastoneisEmpty:
if "{CODE(" not in PcdDefValue:
sizebasevalue_plus = "(%s / sizeof(%s) + 1)" % ((DscBuildData.GetStructurePcdMaxSize(Pcd), Pcd.BaseDatumType))
sizebasevalue = "(%s / sizeof(%s))" % ((DscBuildData.GetStructurePcdMaxSize(Pcd), Pcd.BaseDatumType))
sizeof = "sizeof(%s)" % Pcd.BaseDatumType
CApp = ' int ArraySize = %s %% %s ? %s : %s ;\n' % ( (DscBuildData.GetStructurePcdMaxSize(Pcd), sizeof, sizebasevalue_plus, sizebasevalue))
CApp += ' Size = ArraySize * sizeof(%s); \n' % Pcd.BaseDatumType
else:
CApp = " Size = 0;\n"
else:
CApp = ' Size = sizeof(%s);\n' % ("".join(r_datatype) )
else:
CApp = ' Size = sizeof(%s);\n' % (Pcd.DatumType)
CApp = CApp + ' Cal_%s_%s_Size(&Size);\n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
return CApp
def GetIndicator(self,index,FieldName,Pcd):
def cleanupindex(indexstr):
return indexstr.strip("[").strip("]").strip()
index_elements = ArrayIndex.findall(index)
pcd_capacity = Pcd.Capacity
if index:
indicator = "(Pcd"
if len(pcd_capacity)>2:
for i in range(0,len(index_elements)):
index_ele = index_elements[i]
index_num = index_ele.strip("[").strip("]").strip()
if i == len(index_elements) -2:
indicator += "+ %d*Size/sizeof(%s)/%d + %s)" %(int(cleanupindex(index_elements[i+1])),Pcd.BaseDatumType,reduce(lambda x,y: int(x)*int(y),pcd_capacity[:-1]), cleanupindex(index_elements[i]))
break
else:
indicator += " + %d*%s*Size/sizeof(%s)/%d" %(int(cleanupindex(index_elements[i])),reduce(lambda x,y: int(x)*int(y),pcd_capacity[i+1:-1]),Pcd.BaseDatumType,reduce(lambda x,y: int(x)*int(y),pcd_capacity[:-1]))
elif len(pcd_capacity) == 2:
indicator += "+ %d*Size/sizeof(%s)/%d + %s)" %(int(cleanupindex(index_elements[0])),Pcd.BaseDatumType,int(pcd_capacity[0]), index_elements[1].strip("[").strip("]").strip())
elif len(pcd_capacity) == 1:
index_ele = index_elements[0]
index_num = index_ele.strip("[").strip("]").strip()
indicator += " + %s)" % (index_num)
else:
indicator = "Pcd"
if FieldName:
indicator += "->" + FieldName
return indicator
def GetStarNum(self,Pcd):
if not Pcd.IsArray():
return 1
elif Pcd.IsSimpleTypeArray():
return len(Pcd.Capacity)
else:
return len(Pcd.Capacity) + 1
def GenerateDefaultValueAssignFunction(self, Pcd):
CApp = "// Default value in Dec \n"
CApp = CApp + "void Assign_%s_%s_Default_Value(%s *Pcd){\n" % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName, Pcd.BaseDatumType)
CApp = CApp + ' UINT32 FieldSize;\n'
CApp = CApp + ' CHAR8 *Value;\n'
CApp = CApp + ' UINT32 PcdArraySize;\n'
DefaultValueFromDec = Pcd.DefaultValueFromDec
IsArray = _IsFieldValueAnArray(Pcd.DefaultValueFromDec)
if IsArray:
try:
DefaultValueFromDec = ValueExpressionEx(Pcd.DefaultValueFromDec, TAB_VOID)(True)
except BadExpression:
EdkLogger.error("Build", FORMAT_INVALID, "Invalid value format for %s.%s, from DEC: %s" %
(Pcd.TokenSpaceGuidCName, Pcd.TokenCName, DefaultValueFromDec))
DefaultValueFromDec = StringToArray(DefaultValueFromDec)
Value, ValueSize = ParseFieldValue (DefaultValueFromDec)
if IsArray:
#
# Use memcpy() to copy value into field
#
if Pcd.IsArray():
pcdarraysize = Pcd.PcdArraySize()
if "{CODE(" in Pcd.DefaultValueFromDec:
if Pcd.Capacity[-1] != "-1":
CApp = CApp + '__STATIC_ASSERT(sizeof(%s_%s_INIT_Value) < %d * sizeof(%s), "Pcd %s.%s Value in Dec exceed the array capability %s"); // From %s Line %s \n ' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,pcdarraysize,Pcd.BaseDatumType,Pcd.TokenSpaceGuidCName, Pcd.TokenCName,Pcd.DatumType,Pcd.DefaultValueFromDecInfo[0],Pcd.DefaultValueFromDecInfo[1])
CApp = CApp + ' PcdArraySize = sizeof(%s_%s_INIT_Value);\n ' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
CApp = CApp + ' memcpy (Pcd, %s_%s_INIT_Value,PcdArraySize);\n ' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
else:
if Pcd.Capacity[-1] != "-1":
CApp = CApp + '__STATIC_ASSERT(%d < %d * sizeof(%s), "Pcd %s.%s Value in Dec exceed the array capability %s"); // From %s Line %s \n' % (ValueSize,pcdarraysize,Pcd.BaseDatumType,Pcd.TokenSpaceGuidCName, Pcd.TokenCName,Pcd.DatumType,Pcd.DefaultValueFromDecInfo[0],Pcd.DefaultValueFromDecInfo[1])
CApp = CApp + ' PcdArraySize = %d;\n' % ValueSize
CApp = CApp + ' Value = %s; // From DEC Default Value %s\n' % (DscBuildData.IntToCString(Value, ValueSize), Pcd.DefaultValueFromDec)
CApp = CApp + ' memcpy (Pcd, Value, PcdArraySize);\n'
else:
if "{CODE(" in Pcd.DefaultValueFromDec:
CApp = CApp + ' PcdArraySize = sizeof(%s_%s_INIT_Value);\n ' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
CApp = CApp + ' memcpy (Pcd, &%s_%s_INIT_Value,PcdArraySize);\n ' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
else:
CApp = CApp + ' Value = %s; // From DEC Default Value %s\n' % (DscBuildData.IntToCString(Value, ValueSize), Pcd.DefaultValueFromDec)
CApp = CApp + ' memcpy (Pcd, Value, %d);\n' % (ValueSize)
elif isinstance(Value, str):
CApp = CApp + ' Pcd = %s; // From DEC Default Value %s\n' % (Value, Pcd.DefaultValueFromDec)
for index in Pcd.DefaultValues:
FieldList = Pcd.DefaultValues[index]
if not FieldList:
continue
for FieldName in FieldList:
IsArray = _IsFieldValueAnArray(FieldList[FieldName][0])
if IsArray:
try:
FieldList[FieldName][0] = ValueExpressionEx(FieldList[FieldName][0], TAB_VOID, self._GuidDict)(True)
except BadExpression:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. From %s Line %d " %
(".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldName)), FieldList[FieldName][1], FieldList[FieldName][2]))
try:
Value, ValueSize = ParseFieldValue (FieldList[FieldName][0])
except Exception:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. From %s Line %d " % (".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldName)), FieldList[FieldName][1], FieldList[FieldName][2]))
indicator = self.GetIndicator(index, FieldName,Pcd)
if IsArray:
#
# Use memcpy() to copy value into field
#
CApp = CApp + ' FieldSize = __FIELD_SIZE(%s, %s);\n' % (Pcd.BaseDatumType, FieldName)
CApp = CApp + ' Value = %s; // From %s Line %d Value %s\n' % (DscBuildData.IntToCString(Value, ValueSize), FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
CApp = CApp + ' __STATIC_ASSERT((__FIELD_SIZE(%s, %s) >= %d) || (__FIELD_SIZE(%s, %s) == 0), "Input buffer exceeds the buffer array"); // From %s Line %d Value %s\n' % (Pcd.BaseDatumType, FieldName, ValueSize, Pcd.BaseDatumType, FieldName, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
CApp = CApp + ' memcpy (&%s, Value, (FieldSize > 0 && FieldSize < %d) ? FieldSize : %d);\n' % (indicator, ValueSize, ValueSize)
elif isinstance(Value, str):
CApp = CApp + ' %s = %s; // From %s Line %d Value %s\n' % (indicator, Value, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
else:
if '[' in FieldName and ']' in FieldName:
Index = int(FieldName.split('[')[1].split(']')[0])
CApp = CApp + ' __STATIC_ASSERT((%d < __ARRAY_SIZE(Pcd->%s)) || (__ARRAY_SIZE(Pcd->%s) == 0), "array index exceeds the array number"); // From %s Line %d Index of %s\n' % (Index, FieldName.split('[')[0], FieldName.split('[')[0], FieldList[FieldName][1], FieldList[FieldName][2], FieldName)
if ValueSize > 4:
CApp = CApp + ' %s = %dULL; // From %s Line %d Value %s\n' % (indicator, Value, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
else:
CApp = CApp + ' %s = %d; // From %s Line %d Value %s\n' % (indicator, Value, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
CApp = CApp + "}\n"
return CApp
@staticmethod
def GenerateDefaultValueAssignStatement(Pcd):
CApp = ' Assign_%s_%s_Default_Value(Pcd);\n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
return CApp
def GetPcdDscRawDefaultValue(self,Pcd, SkuName,DefaultStoreName):
if Pcd.Type in PCD_DYNAMIC_TYPE_SET or Pcd.Type in PCD_DYNAMIC_EX_TYPE_SET:
if (SkuName, DefaultStoreName) == (TAB_DEFAULT, TAB_DEFAULT_STORES_DEFAULT):
pcddefaultvalue = Pcd.DefaultFromDSC.get(TAB_DEFAULT, {}).get(TAB_DEFAULT_STORES_DEFAULT) if Pcd.DefaultFromDSC else None
else:
pcddefaultvalue = Pcd.DscRawValue.get(SkuName, {}).get(DefaultStoreName)
else:
pcddefaultvalue = Pcd.DscRawValue.get(SkuName, {}).get(TAB_DEFAULT_STORES_DEFAULT)
return pcddefaultvalue
def GetPcdDscRawValueInfo(self,Pcd, SkuName,DefaultStoreName):
DscValueInfo = Pcd.DscRawValueInfo.get(SkuName, {}).get(DefaultStoreName)
if DscValueInfo:
dscfilepath,lineno = DscValueInfo
else:
dscfilepath = self.MetaFile.File
lineno = ""
return dscfilepath,lineno
def GenerateInitValueFunction(self, Pcd, SkuName, DefaultStoreName):
CApp = "// Value in Dsc for Sku: %s, DefaultStore %s\n" % (SkuName, DefaultStoreName)
CApp = CApp + "void Assign_%s_%s_%s_%s_Value(%s *Pcd){\n" % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName, SkuName, DefaultStoreName, Pcd.BaseDatumType)
CApp = CApp + ' UINT32 FieldSize;\n'
CApp = CApp + ' CHAR8 *Value;\n'
CApp = CApp + ' UINT32 PcdArraySize;\n'
CApp = CApp + "// SkuName: %s, DefaultStoreName: %s \n" % (TAB_DEFAULT, TAB_DEFAULT_STORES_DEFAULT)
inherit_OverrideValues = Pcd.SkuOverrideValues[SkuName]
dscfilepath,lineno = self.GetPcdDscRawValueInfo(Pcd, SkuName, DefaultStoreName)
if lineno:
valuefrom = "%s Line %s" % (dscfilepath,str(lineno))
else:
valuefrom = dscfilepath
pcddefaultvalue = self.GetPcdDscRawDefaultValue(Pcd, SkuName, DefaultStoreName)
if pcddefaultvalue:
FieldList = pcddefaultvalue
IsArray = _IsFieldValueAnArray(FieldList)
if IsArray:
if "{CODE(" not in FieldList:
try:
FieldList = ValueExpressionEx(FieldList, TAB_VOID)(True)
except BadExpression:
EdkLogger.error("Build", FORMAT_INVALID, "Invalid value format for %s.%s, from DSC: %s" %
(Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldList))
Value, ValueSize = ParseFieldValue (FieldList)
if (SkuName, DefaultStoreName) == (TAB_DEFAULT, TAB_DEFAULT_STORES_DEFAULT):
if isinstance(Value, str):
if "{CODE(" in Value:
if Pcd.IsArray() and Pcd.Capacity[-1] != "-1":
pcdarraysize = Pcd.PcdArraySize()
CApp = CApp + '__STATIC_ASSERT(sizeof(%s_%s_%s_%s_Value) < %d * sizeof(%s), "Pcd %s.%s Value in Dsc exceed the array capability %s"); // From %s \n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName,pcdarraysize,Pcd.BaseDatumType,Pcd.TokenSpaceGuidCName, Pcd.TokenCName,Pcd.DatumType, valuefrom)
CApp = CApp+ ' PcdArraySize = sizeof(%s_%s_%s_%s_Value);\n ' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName)
CApp = CApp + ' memcpy (Pcd, &%s_%s_%s_%s_Value,PcdArraySize);\n ' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName)
else:
CApp = CApp + ' Pcd = %s; // From DSC Default Value %s\n' % (Value, Pcd.DefaultFromDSC.get(TAB_DEFAULT, {}).get(TAB_DEFAULT_STORES_DEFAULT, Pcd.DefaultValue) if Pcd.DefaultFromDSC else Pcd.DefaultValue)
elif IsArray:
#
# Use memcpy() to copy value into field
#
if Pcd.IsArray():
pcdarraysize = Pcd.PcdArraySize()
if "{CODE(" in pcddefaultvalue:
if Pcd.Capacity[-1] != "-1":
CApp = CApp + '__STATIC_ASSERT(sizeof(%s_%s_%s_%s_Value) < %d * sizeof(%s), "Pcd %s.%s Value in Dsc exceed the array capability %s"); // From %s \n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName,pcdarraysize,Pcd.BaseDatumType,Pcd.TokenSpaceGuidCName, Pcd.TokenCName,Pcd.DatumType,valuefrom)
CApp = CApp + ' PcdArraySize = sizeof(%s_%s_%s_%s_Value);\n ' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName)
CApp = CApp + ' memcpy (Pcd, %s_%s_%s_%s_Value, PcdArraySize);\n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName)
else:
if Pcd.Capacity[-1] != "-1":
CApp = CApp + '__STATIC_ASSERT(%d < %d * sizeof(%s), "Pcd %s.%s Value in Dsc exceed the array capability %s"); // From %s \n' % (ValueSize,pcdarraysize,Pcd.BaseDatumType,Pcd.TokenSpaceGuidCName, Pcd.TokenCName,Pcd.DatumType,valuefrom)
CApp = CApp + ' PcdArraySize = %d;\n' % ValueSize
CApp = CApp + ' Value = %s; // From DSC Default Value %s\n' % (DscBuildData.IntToCString(Value, ValueSize), Pcd.DefaultFromDSC.get(TAB_DEFAULT, {}).get(TAB_DEFAULT_STORES_DEFAULT, Pcd.DefaultValue) if Pcd.DefaultFromDSC else Pcd.DefaultValue)
CApp = CApp + ' memcpy (Pcd, Value, PcdArraySize);\n'
else:
if "{CODE(" in pcddefaultvalue:
CApp = CApp + ' PcdArraySize = %d < sizeof(%s) * %d ? %d: sizeof(%s) * %d;\n ' % (ValueSize,Pcd.BaseDatumType,pcdarraysize,ValueSize,Pcd.BaseDatumType,pcdarraysize)
CApp = CApp + ' memcpy (Pcd, &%s_%s_%s_%s_Value, PcdArraySize);\n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName)
else:
CApp = CApp + ' Value = %s; // From DSC Default Value %s\n' % (DscBuildData.IntToCString(Value, ValueSize), Pcd.DefaultFromDSC.get(TAB_DEFAULT, {}).get(TAB_DEFAULT_STORES_DEFAULT, Pcd.DefaultValue) if Pcd.DefaultFromDSC else Pcd.DefaultValue)
CApp = CApp + ' memcpy (Pcd, Value, %d);\n' % (ValueSize)
else:
if isinstance(Value, str):
if "{CODE(" in Value:
if Pcd.IsArray() and Pcd.Capacity[-1] != "-1":
pcdarraysize = Pcd.PcdArraySize()
CApp = CApp + '__STATIC_ASSERT(sizeof(%s_%s_%s_%s_Value) < %d * sizeof(%s), "Pcd %s.%s Value in Dsc exceed the array capability %s"); // From %s \n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName,pcdarraysize,Pcd.BaseDatumType,Pcd.TokenSpaceGuidCName, Pcd.TokenCName,Pcd.DatumType,valuefrom)
CApp = CApp + ' PcdArraySize = sizeof(%s_%s_%s_%s_Value);\n '% (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName)
CApp = CApp + ' memcpy (Pcd, &%s_%s_%s_%s_Value, PcdArraySize);\n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName)
else:
CApp = CApp + ' Pcd = %s; // From DSC Default Value %s\n' % (Value, Pcd.DscRawValue.get(SkuName, {}).get(DefaultStoreName))
elif IsArray:
#
# Use memcpy() to copy value into field
#
if Pcd.IsArray():
pcdarraysize = Pcd.PcdArraySize()
if "{CODE(" in pcddefaultvalue:
if Pcd.Capacity[-1] != "-1":
CApp = CApp + '__STATIC_ASSERT(sizeof(%s_%s_%s_%s_Value) < %d * sizeof(%s), "Pcd %s.%s Value in Dsc exceed the array capability %s"); // From %s \n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName,pcdarraysize,Pcd.BaseDatumType,Pcd.TokenSpaceGuidCName, Pcd.TokenCName,Pcd.DatumType,valuefrom)
CApp + ' PcdArraySize = sizeof(%s_%s_%s_%s_Value);\n ' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName)
CApp = CApp + ' memcpy (Pcd, %s_%s_%s_%s_Value, PcdArraySize);\n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName)
else:
if Pcd.Capacity[-1] != "-1":
CApp = CApp + '__STATIC_ASSERT(%d < %d * sizeof(%s), "Pcd %s.%s Value in Dsc exceed the array capability %s"); // From %s \n' % (ValueSize,pcdarraysize,Pcd.BaseDatumType,Pcd.TokenSpaceGuidCName, Pcd.TokenCName,Pcd.DatumType,valuefrom)
CApp = CApp + ' PcdArraySize = %d;\n' % ValueSize
CApp = CApp + ' Value = %s; // From DSC Default Value %s\n' % (DscBuildData.IntToCString(Value, ValueSize), Pcd.DscRawValue.get(TAB_DEFAULT, {}).get(TAB_DEFAULT_STORES_DEFAULT, Pcd.DefaultValue) if Pcd.DefaultFromDSC else Pcd.DefaultValue)
CApp = CApp + ' memcpy (Pcd, Value, PcdArraySize);\n'
else:
if "{CODE(" in pcddefaultvalue:
CApp = CApp + ' PcdArraySize = %d < sizeof(%s) * %d ? %d: sizeof(%s) * %d;\n ' % (ValueSize,Pcd.BaseDatumType,pcdarraysize,ValueSize,Pcd.BaseDatumType,pcdarraysize)
CApp = CApp + ' memcpy (Pcd, &%s_%s_%s_%s_Value, PcdArraySize);\n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,SkuName, DefaultStoreName)
else:
CApp = CApp + ' Value = %s; // From DSC Default Value %s\n' % (DscBuildData.IntToCString(Value, ValueSize), Pcd.DscRawValue.get(SkuName, {}).get(DefaultStoreName))
CApp = CApp + ' memcpy (Pcd, Value, %d);\n' % (ValueSize)
inheritvalue = inherit_OverrideValues.get(DefaultStoreName)
if not inheritvalue:
inheritvalue = []
for index in inheritvalue:
FieldList = inheritvalue[index]
if not FieldList:
continue
if (SkuName, DefaultStoreName) == (TAB_DEFAULT, TAB_DEFAULT_STORES_DEFAULT) or (( (SkuName, '') not in Pcd.ValueChain) and ( (SkuName, DefaultStoreName) not in Pcd.ValueChain )):
for FieldName in FieldList:
indicator = self.GetIndicator(index, FieldName,Pcd)
IsArray = _IsFieldValueAnArray(FieldList[FieldName][0])
if IsArray:
try:
FieldList[FieldName][0] = ValueExpressionEx(FieldList[FieldName][0], TAB_VOID, self._GuidDict)(True)
except BadExpression:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. From %s Line %d " %
(".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldName)), FieldList[FieldName][1], FieldList[FieldName][2]))
try:
Value, ValueSize = ParseFieldValue (FieldList[FieldName][0])
except Exception:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. From %s Line %d " % (".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldName)), FieldList[FieldName][1], FieldList[FieldName][2]))
if isinstance(Value, str):
CApp = CApp + ' Pcd->%s = %s; // From %s Line %d Value %s\n' % (FieldName, Value, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
elif IsArray:
#
# Use memcpy() to copy value into field
#
CApp = CApp + ' FieldSize = __FIELD_SIZE(%s, %s);\n' % (Pcd.BaseDatumType, FieldName)
CApp = CApp + ' Value = %s; // From %s Line %d Value %s\n' % (DscBuildData.IntToCString(Value, ValueSize), FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
CApp = CApp + ' __STATIC_ASSERT((__FIELD_SIZE(%s, %s) >= %d) || (__FIELD_SIZE(%s, %s) == 0), "Input buffer exceeds the buffer array"); // From %s Line %d Value %s\n' % (Pcd.BaseDatumType, FieldName, ValueSize, Pcd.BaseDatumType, FieldName, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
CApp = CApp + ' memcpy (&%s, Value, (FieldSize > 0 && FieldSize < %d) ? FieldSize : %d);\n' % (indicator, ValueSize, ValueSize)
else:
if '[' in FieldName and ']' in FieldName:
Index = int(FieldName.split('[')[1].split(']')[0])
CApp = CApp + ' __STATIC_ASSERT((%d < __ARRAY_SIZE(Pcd->%s)) || (__ARRAY_SIZE(Pcd->%s) == 0), "array index exceeds the array number"); // From %s Line %d Index of %s\n' % (Index, FieldName.split('[')[0], FieldName.split('[')[0], FieldList[FieldName][1], FieldList[FieldName][2], FieldName)
if ValueSize > 4:
CApp = CApp + ' %s = %dULL; // From %s Line %d Value %s\n' % (indicator, Value, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
else:
CApp = CApp + ' %s = %d; // From %s Line %d Value %s\n' % (indicator, Value, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
CApp = CApp + "}\n"
return CApp
@staticmethod
def GenerateInitValueStatement(Pcd, SkuName, DefaultStoreName):
CApp = ' Assign_%s_%s_%s_%s_Value(Pcd);\n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName, SkuName, DefaultStoreName)
return CApp
def GenerateCommandLineValue(self, Pcd):
CApp = "// Value in CommandLine\n"
CApp = CApp + "void Assign_%s_%s_CommandLine_Value(%s *Pcd){\n" % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName, Pcd.BaseDatumType)
CApp = CApp + ' UINT32 FieldSize;\n'
CApp = CApp + ' CHAR8 *Value;\n'
pcddefaultvalue = Pcd.PcdValueFromComm
for FieldList in [pcddefaultvalue, Pcd.PcdFieldValueFromComm]:
if not FieldList:
continue
if pcddefaultvalue and FieldList == pcddefaultvalue:
IsArray = _IsFieldValueAnArray(FieldList)
if IsArray:
try:
FieldList = ValueExpressionEx(FieldList, TAB_VOID)(True)
except BadExpression:
EdkLogger.error("Build", FORMAT_INVALID, "Invalid value format for %s.%s, from Command: %s" %
(Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldList))
Value, ValueSize = ParseFieldValue (FieldList)
if isinstance(Value, str):
CApp = CApp + ' Pcd = %s; // From Command Line \n' % (Value)
elif IsArray:
#
# Use memcpy() to copy value into field
#
CApp = CApp + ' Value = %s; // From Command Line.\n' % (DscBuildData.IntToCString(Value, ValueSize))
CApp = CApp + ' memcpy (Pcd, Value, %d);\n' % (ValueSize)
continue
for FieldName in FieldList:
IsArray = _IsFieldValueAnArray(FieldList[FieldName][0])
if IsArray:
try:
FieldList[FieldName][0] = ValueExpressionEx(FieldList[FieldName][0], TAB_VOID, self._GuidDict)(True)
except BadExpression:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. From %s Line %d " %
(".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldName)), FieldList[FieldName][1], FieldList[FieldName][2]))
except:
print("error")
try:
Value, ValueSize = ParseFieldValue (FieldList[FieldName][0])
except Exception:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. From %s Line %d " % (".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldName)), FieldList[FieldName][1], FieldList[FieldName][2]))
if isinstance(Value, str):
CApp = CApp + ' Pcd->%s = %s; // From %s Line %d Value %s\n' % (FieldName, Value, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
elif IsArray:
#
# Use memcpy() to copy value into field
#
CApp = CApp + ' FieldSize = __FIELD_SIZE(%s, %s);\n' % (Pcd.BaseDatumType, FieldName)
CApp = CApp + ' Value = %s; // From %s Line %d Value %s\n' % (DscBuildData.IntToCString(Value, ValueSize), FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
CApp = CApp + ' __STATIC_ASSERT((__FIELD_SIZE(%s, %s) >= %d) || (__FIELD_SIZE(%s, %s) == 0), "Input buffer exceeds the buffer array"); // From %s Line %d Value %s\n' % (Pcd.BaseDatumType, FieldName, ValueSize, Pcd.BaseDatumType, FieldName, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
CApp = CApp + ' memcpy (&Pcd->%s, Value, (FieldSize > 0 && FieldSize < %d) ? FieldSize : %d);\n' % (FieldName, ValueSize, ValueSize)
else:
if '[' in FieldName and ']' in FieldName:
Index = int(FieldName.split('[')[1].split(']')[0])
CApp = CApp + ' __STATIC_ASSERT((%d < __ARRAY_SIZE(Pcd->%s)) || (__ARRAY_SIZE(Pcd->%s) == 0), "array index exceeds the array number"); // From %s Line %d Index of %s\n' % (Index, FieldName.split('[')[0], FieldName.split('[')[0], FieldList[FieldName][1], FieldList[FieldName][2], FieldName)
if ValueSize > 4:
CApp = CApp + ' Pcd->%s = %dULL; // From %s Line %d Value %s\n' % (FieldName, Value, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
else:
CApp = CApp + ' Pcd->%s = %d; // From %s Line %d Value %s\n' % (FieldName, Value, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
CApp = CApp + "}\n"
return CApp
@staticmethod
def GenerateCommandLineValueStatement(Pcd):
CApp = ' Assign_%s_%s_CommandLine_Value(Pcd);\n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
return CApp
def GenerateFdfValue(self,Pcd):
CApp = "// Value in Fdf\n"
CApp = CApp + "void Assign_%s_%s_Fdf_Value(%s *Pcd){\n" % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName,Pcd.BaseDatumType)
CApp = CApp + ' UINT32 FieldSize;\n'
CApp = CApp + ' CHAR8 *Value;\n'
pcddefaultvalue = Pcd.PcdValueFromFdf
for FieldList in [pcddefaultvalue,Pcd.PcdFieldValueFromFdf]:
if not FieldList:
continue
if pcddefaultvalue and FieldList == pcddefaultvalue:
IsArray = _IsFieldValueAnArray(FieldList)
if IsArray:
try:
FieldList = ValueExpressionEx(FieldList, TAB_VOID)(True)
except BadExpression:
EdkLogger.error("Build", FORMAT_INVALID, "Invalid value format for %s.%s, from Fdf: %s" %
(Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldList))
Value, ValueSize = ParseFieldValue (FieldList)
if isinstance(Value, str):
CApp = CApp + ' Pcd = %s; // From Fdf \n' % (Value)
elif IsArray:
#
# Use memcpy() to copy value into field
#
CApp = CApp + ' Value = %s; // From Fdf .\n' % (DscBuildData.IntToCString(Value, ValueSize))
CApp = CApp + ' memcpy (Pcd, Value, %d);\n' % (ValueSize)
continue
for FieldName in FieldList:
IsArray = _IsFieldValueAnArray(FieldList[FieldName][0])
if IsArray:
try:
FieldList[FieldName][0] = ValueExpressionEx(FieldList[FieldName][0], TAB_VOID, self._GuidDict)(True)
except BadExpression:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. From %s Line %d " %
(".".join((Pcd.TokenSpaceGuidCName, Pcd.TokenCName, FieldName)), FieldList[FieldName][1], FieldList[FieldName][2]))
except:
print("error")
try:
Value, ValueSize = ParseFieldValue (FieldList[FieldName][0])
except Exception:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid value format for %s. From %s Line %d " % (".".join((Pcd.TokenSpaceGuidCName,Pcd.TokenCName,FieldName)),FieldList[FieldName][1], FieldList[FieldName][2]))
if isinstance(Value, str):
CApp = CApp + ' Pcd->%s = %s; // From %s Line %d Value %s\n' % (FieldName, Value, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
elif IsArray:
#
# Use memcpy() to copy value into field
#
CApp = CApp + ' FieldSize = __FIELD_SIZE(%s, %s);\n' % (Pcd.BaseDatumType, FieldName)
CApp = CApp + ' Value = %s; // From %s Line %d Value %s\n' % (DscBuildData.IntToCString(Value, ValueSize), FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
CApp = CApp + ' __STATIC_ASSERT((__FIELD_SIZE(%s, %s) >= %d) || (__FIELD_SIZE(%s, %s) == 0), "Input buffer exceeds the buffer array"); // From %s Line %d Value %s\n' % (Pcd.BaseDatumType, FieldName, ValueSize, Pcd.BaseDatumType, FieldName, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
CApp = CApp + ' memcpy (&Pcd->%s, Value, (FieldSize > 0 && FieldSize < %d) ? FieldSize : %d);\n' % (FieldName, ValueSize, ValueSize)
else:
if '[' in FieldName and ']' in FieldName:
Index = int(FieldName.split('[')[1].split(']')[0])
CApp = CApp + ' __STATIC_ASSERT((%d < __ARRAY_SIZE(Pcd->%s)) || (__ARRAY_SIZE(Pcd->%s) == 0), "array index exceeds the array number"); // From %s Line %d Index of %s\n' % (Index, FieldName.split('[')[0], FieldName.split('[')[0], FieldList[FieldName][1], FieldList[FieldName][2], FieldName)
if ValueSize > 4:
CApp = CApp + ' Pcd->%s = %dULL; // From %s Line %d Value %s\n' % (FieldName, Value, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
else:
CApp = CApp + ' Pcd->%s = %d; // From %s Line %s Value %s\n' % (FieldName, Value, FieldList[FieldName][1], FieldList[FieldName][2], FieldList[FieldName][0])
CApp = CApp + "}\n"
return CApp
@staticmethod
def GenerateFdfValueStatement(Pcd):
CApp = ' Assign_%s_%s_Fdf_Value(Pcd);\n' % (Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
return CApp
def GenerateInitializeFunc(self, SkuName, DefaultStore, Pcd, InitByteValue, CApp):
OverrideValues = {DefaultStore:{}}
if Pcd.SkuOverrideValues:
OverrideValues = Pcd.SkuOverrideValues[SkuName]
if not OverrideValues:
OverrideValues = {TAB_DEFAULT_STORES_DEFAULT:Pcd.DefaultValues}
for DefaultStoreName in OverrideValues:
CApp = CApp + 'void\n'
CApp = CApp + 'Initialize_%s_%s_%s_%s(\n' % (SkuName, DefaultStoreName, Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
CApp = CApp + ' void\n'
CApp = CApp + ' )\n'
CApp = CApp + '{\n'
CApp = CApp + ' UINT32 Size;\n'
CApp = CApp + ' UINT32 FieldSize;\n'
CApp = CApp + ' CHAR8 *Value;\n'
CApp = CApp + ' UINT32 OriginalSize;\n'
CApp = CApp + ' VOID *OriginalPcd;\n'
CApp = CApp + ' %s *Pcd; // From %s Line %d \n' % (Pcd.BaseDatumType,Pcd.PkgPath, Pcd.PcdDefineLineNo)
CApp = CApp + '\n'
PcdDefaultValue = StringToArray(Pcd.DefaultValueFromDec.strip())
InitByteValue += '%s.%s.%s.%s|%s|%s\n' % (SkuName, DefaultStoreName, Pcd.TokenSpaceGuidCName, Pcd.TokenCName, Pcd.DatumType, PcdDefaultValue)
#
# Get current PCD value and size
#
CApp = CApp + ' OriginalPcd = PcdGetPtr (%s, %s, %s, %s, &OriginalSize);\n' % (SkuName, DefaultStoreName, Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
#
# Determine the size of the PCD. For simple structures, sizeof(TYPE) provides
# the correct value. For structures with a flexible array member, the flexible
# array member is detected, and the size is based on the highest index used with
# the flexible array member. The flexible array member must be the last field
# in a structure. The size formula for this case is:
# OFFSET_OF(FlexbleArrayField) + sizeof(FlexibleArray[0]) * (HighestIndex + 1)
#
CApp = CApp + DscBuildData.GenerateSizeStatments(Pcd,SkuName,DefaultStoreName)
if Pcd.IsArray() and Pcd.Capacity[-1] != "-1":
CApp = CApp + ' OriginalSize = OriginalSize < sizeof(%s) * %d? OriginalSize:sizeof(%s) * %d; \n' % (Pcd.BaseDatumType,Pcd.PcdArraySize(),Pcd.BaseDatumType,Pcd.PcdArraySize())
CApp = CApp + ' Size = sizeof(%s) * %d; \n' % (Pcd.BaseDatumType,Pcd.PcdArraySize())
#
# Allocate and zero buffer for the PCD
# Must handle cases where current value is smaller, larger, or same size
# Always keep that larger one as the current size
#
CApp = CApp + ' Size = (OriginalSize > Size ? OriginalSize : Size);\n'
CApp = CApp + ' Pcd = (%s *)malloc (Size);\n' % (Pcd.BaseDatumType,)
CApp = CApp + ' memset (Pcd, 0, Size);\n'
#
# Copy current PCD value into allocated buffer.
#
CApp = CApp + ' memcpy (Pcd, OriginalPcd, OriginalSize);\n'
#
# Assign field values in PCD
#
CApp = CApp + DscBuildData.GenerateDefaultValueAssignStatement(Pcd)
if Pcd.Type not in [self._PCD_TYPE_STRING_[MODEL_PCD_FIXED_AT_BUILD],
self._PCD_TYPE_STRING_[MODEL_PCD_PATCHABLE_IN_MODULE]]:
for skuname in self.SkuIdMgr.GetSkuChain(SkuName):
storeset = [DefaultStoreName] if DefaultStoreName == TAB_DEFAULT_STORES_DEFAULT else [TAB_DEFAULT_STORES_DEFAULT, DefaultStoreName]
for defaultstorenameitem in storeset:
CApp = CApp + "// SkuName: %s, DefaultStoreName: %s \n" % (skuname, defaultstorenameitem)
CApp = CApp + DscBuildData.GenerateInitValueStatement(Pcd, skuname, defaultstorenameitem)
if skuname == SkuName:
break
else:
CApp = CApp + "// SkuName: %s, DefaultStoreName: STANDARD \n" % self.SkuIdMgr.SystemSkuId
CApp = CApp + DscBuildData.GenerateInitValueStatement(Pcd, self.SkuIdMgr.SystemSkuId, TAB_DEFAULT_STORES_DEFAULT)
CApp = CApp + DscBuildData.GenerateFdfValueStatement(Pcd)
CApp = CApp + DscBuildData.GenerateCommandLineValueStatement(Pcd)
#
# Set new PCD value and size
#
CApp = CApp + ' PcdSetPtr (%s, %s, %s, %s, Size, (void *)Pcd);\n' % (SkuName, DefaultStoreName, Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
#
# Free PCD
#
CApp = CApp + ' free (Pcd);\n'
CApp = CApp + '}\n'
CApp = CApp + '\n'
return InitByteValue, CApp
def GenerateArrayAssignment(self, Pcd):
CApp = ""
if not Pcd:
return CApp
Demesion = ""
for d in Pcd.Capacity:
Demesion += "[]"
Value = Pcd.DefaultValueFromDec
if "{CODE(" in Pcd.DefaultValueFromDec:
realvalue = Pcd.DefaultValueFromDec.strip()[6:-2] # "{CODE(").rstrip(")}"
CApp += "static %s %s_%s_INIT_Value%s = %s;\n" % (Pcd.BaseDatumType,Pcd.TokenSpaceGuidCName,Pcd.TokenCName,Demesion,realvalue)
if Pcd.Type in PCD_DYNAMIC_TYPE_SET | PCD_DYNAMIC_EX_TYPE_SET:
for skuname in Pcd.SkuInfoList:
skuinfo = Pcd.SkuInfoList[skuname]
if skuinfo.VariableName:
for defaultstore in skuinfo.DefaultStoreDict:
pcddscrawdefaultvalue = self.GetPcdDscRawDefaultValue(Pcd, skuname, defaultstore)
if pcddscrawdefaultvalue:
Value = skuinfo.DefaultStoreDict[defaultstore]
if "{CODE(" in Value:
realvalue = Value.strip()[6:-2] # "{CODE(").rstrip(")}"
CApp += "static %s %s_%s_%s_%s_Value%s = %s;\n" % (Pcd.BaseDatumType,Pcd.TokenSpaceGuidCName,Pcd.TokenCName,skuname,defaultstore,Demesion,realvalue)
else:
pcddscrawdefaultvalue = self.GetPcdDscRawDefaultValue(Pcd, skuname, TAB_DEFAULT_STORES_DEFAULT)
if pcddscrawdefaultvalue:
Value = skuinfo.DefaultValue
if "{CODE(" in Value:
realvalue = Value.strip()[6:-2] # "{CODE(").rstrip(")}"
CApp += "static %s %s_%s_%s_%s_Value%s = %s;\n" % (Pcd.BaseDatumType,Pcd.TokenSpaceGuidCName,Pcd.TokenCName,skuname,TAB_DEFAULT_STORES_DEFAULT,Demesion,realvalue)
else:
pcddscrawdefaultvalue = self.GetPcdDscRawDefaultValue(Pcd, TAB_DEFAULT, TAB_DEFAULT_STORES_DEFAULT)
if pcddscrawdefaultvalue:
if "{CODE(" in Pcd.DefaultValue:
realvalue = Pcd.DefaultValue.strip()[6:-2] # "{CODE(").rstrip(")}"
CApp += "static %s %s_%s_DEFAULT_STANDARD_Value%s = %s;\n" % (Pcd.BaseDatumType,Pcd.TokenSpaceGuidCName,Pcd.TokenCName,Demesion,realvalue)
return CApp
def SkuOverrideValuesEmpty(self,OverrideValues):
if not OverrideValues:
return True
for key in OverrideValues:
if OverrideValues[key]:
return False
return True
def ParseCCFlags(self, ccflag):
ccflags = set()
ccflaglist = ccflag.split(" ")
i = 0
while i < len(ccflaglist):
item = ccflaglist[i].strip()
if item in (r"/D", r"/U","-D","-U"):
ccflags.add(" ".join((ccflaglist[i],ccflaglist[i+1])))
i = i+1
elif item.startswith((r"/D", r"/U","-D","-U")):
ccflags.add(item)
i +=1
return ccflags
def GenerateByteArrayValue (self, StructuredPcds):
#
# Generate/Compile/Run C application to determine if there are any flexible array members
#
if not StructuredPcds:
return
InitByteValue = ""
CApp = PcdMainCHeader
IncludeFiles = set()
for PcdName in StructuredPcds:
Pcd = StructuredPcds[PcdName]
for IncludeFile in Pcd.StructuredPcdIncludeFile:
if IncludeFile not in IncludeFiles:
IncludeFiles.add(IncludeFile)
CApp = CApp + '#include <%s>\n' % (IncludeFile)
CApp = CApp + '\n'
for Pcd in StructuredPcds.values():
CApp = CApp + self.GenerateArrayAssignment(Pcd)
for PcdName in sorted(StructuredPcds.keys()):
Pcd = StructuredPcds[PcdName]
CApp = CApp + self.GenerateSizeFunction(Pcd)
CApp = CApp + self.GenerateDefaultValueAssignFunction(Pcd)
CApp = CApp + self.GenerateFdfValue(Pcd)
CApp = CApp + self.GenerateCommandLineValue(Pcd)
if self.SkuOverrideValuesEmpty(Pcd.SkuOverrideValues) or Pcd.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_FIXED_AT_BUILD],
self._PCD_TYPE_STRING_[MODEL_PCD_PATCHABLE_IN_MODULE]]:
CApp = CApp + self.GenerateInitValueFunction(Pcd, self.SkuIdMgr.SystemSkuId, TAB_DEFAULT_STORES_DEFAULT)
else:
for SkuName in self.SkuIdMgr.SkuOverrideOrder():
if SkuName not in Pcd.SkuOverrideValues:
continue
for DefaultStoreName in Pcd.SkuOverrideValues[SkuName]:
CApp = CApp + self.GenerateInitValueFunction(Pcd, SkuName, DefaultStoreName)
if self.SkuOverrideValuesEmpty(Pcd.SkuOverrideValues) or Pcd.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_FIXED_AT_BUILD],
self._PCD_TYPE_STRING_[MODEL_PCD_PATCHABLE_IN_MODULE]]:
InitByteValue, CApp = self.GenerateInitializeFunc(self.SkuIdMgr.SystemSkuId, TAB_DEFAULT_STORES_DEFAULT, Pcd, InitByteValue, CApp)
else:
for SkuName in self.SkuIdMgr.SkuOverrideOrder():
if SkuName not in Pcd.SkuOverrideValues:
continue
for DefaultStoreName in Pcd.DefaultStoreName:
Pcd = StructuredPcds[PcdName]
InitByteValue, CApp = self.GenerateInitializeFunc(SkuName, DefaultStoreName, Pcd, InitByteValue, CApp)
CApp = CApp + 'VOID\n'
CApp = CApp + 'PcdEntryPoint(\n'
CApp = CApp + ' VOID\n'
CApp = CApp + ' )\n'
CApp = CApp + '{\n'
for Pcd in StructuredPcds.values():
if self.SkuOverrideValuesEmpty(Pcd.SkuOverrideValues) or Pcd.Type in [self._PCD_TYPE_STRING_[MODEL_PCD_FIXED_AT_BUILD], self._PCD_TYPE_STRING_[MODEL_PCD_PATCHABLE_IN_MODULE]]:
CApp = CApp + ' Initialize_%s_%s_%s_%s();\n' % (self.SkuIdMgr.SystemSkuId, TAB_DEFAULT_STORES_DEFAULT, Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
else:
for SkuName in self.SkuIdMgr.SkuOverrideOrder():
if SkuName not in self.SkuIdMgr.AvailableSkuIdSet:
continue
for DefaultStoreName in Pcd.SkuOverrideValues[SkuName]:
CApp = CApp + ' Initialize_%s_%s_%s_%s();\n' % (SkuName, DefaultStoreName, Pcd.TokenSpaceGuidCName, Pcd.TokenCName)
CApp = CApp + '}\n'
CApp = CApp + PcdMainCEntry + '\n'
if not os.path.exists(self.OutputPath):
os.makedirs(self.OutputPath)
CAppBaseFileName = os.path.join(self.OutputPath, PcdValueInitName)
SaveFileOnChange(CAppBaseFileName + '.c', CApp, False)
MakeApp = PcdMakefileHeader
if sys.platform == "win32":
MakeApp = MakeApp + 'APPFILE = %s\%s.exe\n' % (self.OutputPath, PcdValueInitName) + 'APPNAME = %s\n' % (PcdValueInitName) + 'OBJECTS = %s\%s.obj %s.obj\n' % (self.OutputPath, PcdValueInitName, os.path.join(self.OutputPath, PcdValueCommonName)) + 'INC = '
else:
MakeApp = MakeApp + PcdGccMakefile
MakeApp = MakeApp + 'APPFILE = %s/%s\n' % (self.OutputPath, PcdValueInitName) + 'APPNAME = %s\n' % (PcdValueInitName) + 'OBJECTS = %s/%s.o %s.o\n' % (self.OutputPath, PcdValueInitName, os.path.join(self.OutputPath, PcdValueCommonName)) + \
'include $(MAKEROOT)/Makefiles/app.makefile\n' + 'INCLUDE +='
IncSearchList = []
PlatformInc = OrderedDict()
for Cache in self._Bdb._CACHE_.values():
if Cache.MetaFile.Ext.lower() != '.dec':
continue
if Cache.Includes:
if str(Cache.MetaFile.Path) not in PlatformInc:
PlatformInc[str(Cache.MetaFile.Path)] = []
PlatformInc[str(Cache.MetaFile.Path)].append (os.path.dirname(Cache.MetaFile.Path))
PlatformInc[str(Cache.MetaFile.Path)].extend (Cache.CommonIncludes)
PcdDependDEC = []
for Pcd in StructuredPcds.values():
for PackageDec in Pcd.PackageDecs:
Package = os.path.normpath(mws.join(GlobalData.gWorkspace, PackageDec))
if not os.path.exists(Package):
EdkLogger.error('Build', RESOURCE_NOT_AVAILABLE, "The dependent Package %s of PCD %s.%s is not exist." % (PackageDec, Pcd.TokenSpaceGuidCName, Pcd.TokenCName))
if Package not in PcdDependDEC:
PcdDependDEC.append(Package)
if PlatformInc and PcdDependDEC:
for pkg in PcdDependDEC:
if pkg in PlatformInc:
for inc in PlatformInc[pkg]:
#
# Get list of files in potential -I include path
#
FileList = os.listdir (str(inc))
#
# Skip -I include path if one of the include files required
# by PcdValueInit.c are present in the include paths from
# the DEC file. PcdValueInit.c must use the standard include
# files from the host compiler.
#
if 'stdio.h' in FileList:
continue
if 'stdlib.h' in FileList:
continue
if 'string.h' in FileList:
continue
MakeApp += '-I' + str(inc) + ' '
IncSearchList.append(inc)
MakeApp = MakeApp + '\n'
CC_FLAGS = LinuxCFLAGS
if sys.platform == "win32":
CC_FLAGS = WindowsCFLAGS
BuildOptions = OrderedDict()
for Options in self.BuildOptions:
if Options[2] != EDKII_NAME:
continue
Family = Options[0]
if Family and Family != self.ToolChainFamily:
continue
Target, Tag, Arch, Tool, Attr = Options[1].split("_")
if Tool != 'CC':
continue
if Attr != "FLAGS":
continue
if Target == TAB_STAR or Target == self._Target:
if Tag == TAB_STAR or Tag == self._Toolchain:
if 'COMMON' not in BuildOptions:
BuildOptions['COMMON'] = set()
if Arch == TAB_STAR:
BuildOptions['COMMON']|= self.ParseCCFlags(self.BuildOptions[Options])
if Arch in self.SupArchList:
if Arch not in BuildOptions:
BuildOptions[Arch] = set()
BuildOptions[Arch] |= self.ParseCCFlags(self.BuildOptions[Options])
if BuildOptions:
ArchBuildOptions = {arch:flags for arch,flags in BuildOptions.items() if arch != 'COMMON'}
if len(ArchBuildOptions.keys()) == 1:
BuildOptions['COMMON'] |= (list(ArchBuildOptions.values())[0])
elif len(ArchBuildOptions.keys()) > 1:
CommonBuildOptions = reduce(lambda x,y: x&y, ArchBuildOptions.values())
BuildOptions['COMMON'] |= CommonBuildOptions
ValueList = [item for item in BuildOptions['COMMON'] if item.startswith((r"/U","-U"))]
ValueList.extend([item for item in BuildOptions['COMMON'] if item.startswith((r"/D", "-D"))])
CC_FLAGS += " ".join(ValueList)
MakeApp += CC_FLAGS
if sys.platform == "win32":
MakeApp = MakeApp + PcdMakefileEnd
MakeApp = MakeApp + AppTarget % ("""\tcopy $(APPLICATION) $(APPFILE) /y """)
else:
MakeApp = MakeApp + AppTarget % ("""\tcp $(APPLICATION) $(APPFILE) """)
MakeApp = MakeApp + '\n'
IncludeFileFullPaths = []
for includefile in IncludeFiles:
for includepath in IncSearchList:
includefullpath = os.path.join(str(includepath), includefile)
if os.path.exists(includefullpath):
IncludeFileFullPaths.append(os.path.normpath(includefullpath))
break
SearchPathList = []
SearchPathList.append(os.path.normpath(mws.join(GlobalData.gGlobalDefines["EDK_TOOLS_PATH"], "BaseTools/Source/C/Include")))
SearchPathList.append(os.path.normpath(mws.join(GlobalData.gGlobalDefines["EDK_TOOLS_PATH"], "BaseTools/Source/C/Common")))
SearchPathList.extend(str(item) for item in IncSearchList)
IncFileList = GetDependencyList(IncludeFileFullPaths, SearchPathList)
for include_file in IncFileList:
MakeApp += "$(OBJECTS) : %s\n" % include_file
if sys.platform == "win32":
PcdValueCommonPath = os.path.normpath(mws.join(GlobalData.gGlobalDefines["EDK_TOOLS_PATH"], "Source\C\Common\PcdValueCommon.c"))
MakeApp = MakeApp + '%s\PcdValueCommon.c : %s\n' % (self.OutputPath, PcdValueCommonPath)
MakeApp = MakeApp + '\tcopy /y %s $@\n' % (PcdValueCommonPath)
else:
PcdValueCommonPath = os.path.normpath(mws.join(GlobalData.gGlobalDefines["EDK_TOOLS_PATH"], "Source/C/Common/PcdValueCommon.c"))
MakeApp = MakeApp + '%s/PcdValueCommon.c : %s\n' % (self.OutputPath, PcdValueCommonPath)
MakeApp = MakeApp + '\tcp -f %s %s/PcdValueCommon.c\n' % (PcdValueCommonPath, self.OutputPath)
MakeFileName = os.path.join(self.OutputPath, 'Makefile')
MakeApp += "$(OBJECTS) : %s\n" % MakeFileName
SaveFileOnChange(MakeFileName, MakeApp, False)
InputValueFile = os.path.join(self.OutputPath, 'Input.txt')
OutputValueFile = os.path.join(self.OutputPath, 'Output.txt')
SaveFileOnChange(InputValueFile, InitByteValue, False)
Dest_PcdValueInitExe = PcdValueInitName
if not sys.platform == "win32":
Dest_PcdValueInitExe = os.path.join(self.OutputPath, PcdValueInitName)
else:
Dest_PcdValueInitExe = os.path.join(self.OutputPath, PcdValueInitName) +".exe"
Messages = ''
if sys.platform == "win32":
MakeCommand = 'nmake -f %s' % (MakeFileName)
returncode, StdOut, StdErr = DscBuildData.ExecuteCommand (MakeCommand)
Messages = StdOut
else:
MakeCommand = 'make -f %s' % (MakeFileName)
returncode, StdOut, StdErr = DscBuildData.ExecuteCommand (MakeCommand)
Messages = StdErr
EdkLogger.verbose ('%s\n%s\n%s' % (MakeCommand, StdOut, StdErr))
Messages = Messages.split('\n')
MessageGroup = []
if returncode != 0:
CAppBaseFileName = os.path.join(self.OutputPath, PcdValueInitName)
File = open (CAppBaseFileName + '.c', 'r')
FileData = File.readlines()
File.close()
for Message in Messages:
if " error" in Message or "warning" in Message:
try:
FileInfo = Message.strip().split('(')
if len (FileInfo) > 1:
FileName = FileInfo [0]
FileLine = FileInfo [1].split (')')[0]
else:
FileInfo = Message.strip().split(':')
if len(FileInfo) < 2:
continue
FileName = FileInfo [0]
FileLine = FileInfo [1]
except:
continue
if "PcdValueInit.c" not in FileName:
continue
if FileLine.isdigit():
error_line = FileData[int (FileLine) - 1]
if r"//" in error_line:
c_line, dsc_line = error_line.split(r"//")
else:
dsc_line = error_line
message_itmes = Message.split(":")
Index = 0
if "PcdValueInit.c" not in Message:
if not MessageGroup:
MessageGroup.append(Message)
break
else:
for item in message_itmes:
if "PcdValueInit.c" in item:
Index = message_itmes.index(item)
message_itmes[Index] = dsc_line.strip()
break
MessageGroup.append(":".join(message_itmes[Index:]).strip())
continue
else:
MessageGroup.append(Message)
if MessageGroup:
EdkLogger.error("build", PCD_STRUCTURE_PCD_ERROR, "\n".join(MessageGroup) )
else:
EdkLogger.error('Build', COMMAND_FAILURE, 'Can not execute command: %s\n%s\n%s' % (MakeCommand, StdOut, StdErr))
if DscBuildData.NeedUpdateOutput(OutputValueFile, Dest_PcdValueInitExe, InputValueFile):
Command = Dest_PcdValueInitExe + ' -i %s -o %s' % (InputValueFile, OutputValueFile)
returncode, StdOut, StdErr = DscBuildData.ExecuteCommand (Command)
EdkLogger.verbose ('%s\n%s\n%s' % (Command, StdOut, StdErr))
if returncode != 0:
EdkLogger.warn('Build', COMMAND_FAILURE, 'Can not collect output from command: %s\n%s\n' % (Command, StdOut, StdErr))
File = open (OutputValueFile, 'r')
FileBuffer = File.readlines()
File.close()
StructurePcdSet = []
for Pcd in FileBuffer:
PcdValue = Pcd.split ('|')
PcdInfo = PcdValue[0].split ('.')
StructurePcdSet.append((PcdInfo[0], PcdInfo[1], PcdInfo[2], PcdInfo[3], PcdValue[2].strip()))
return StructurePcdSet
@staticmethod
def NeedUpdateOutput(OutputFile, ValueCFile, StructureInput):
if not os.path.exists(OutputFile):
return True
if os.stat(OutputFile).st_mtime <= os.stat(ValueCFile).st_mtime:
return True
if os.stat(OutputFile).st_mtime <= os.stat(StructureInput).st_mtime:
return True
return False
## Retrieve dynamic PCD settings
#
# @param Type PCD type
#
# @retval a dict object contains settings of given PCD type
#
def _GetDynamicPcd(self, Type):
Pcds = OrderedDict()
#
# tdict is a special dict kind of type, used for selecting correct
# PCD settings for certain ARCH and SKU
#
PcdDict = tdict(True, 4)
PcdList = []
# Find out all possible PCD candidates for self._Arch
RecordList = self._RawData[Type, self._Arch]
AvailableSkuIdSet = copy.copy(self.SkuIds)
for TokenSpaceGuid, PcdCName, Setting, Arch, SkuName, Dummy3, Dummy4, Dummy5 in RecordList:
SkuName = SkuName.upper()
SkuName = TAB_DEFAULT if SkuName == TAB_COMMON else SkuName
if SkuName not in AvailableSkuIdSet:
EdkLogger.error('build', PARAMETER_INVALID, 'Sku %s is not defined in [SkuIds] section' % SkuName,
File=self.MetaFile, Line=Dummy5)
if "." not in TokenSpaceGuid and "[" not in PcdCName and (PcdCName, TokenSpaceGuid, SkuName, Dummy5) not in PcdList:
PcdList.append((PcdCName, TokenSpaceGuid, SkuName, Dummy5))
PcdDict[Arch, SkuName, PcdCName, TokenSpaceGuid] = Setting
# Remove redundant PCD candidates, per the ARCH and SKU
for PcdCName, TokenSpaceGuid, SkuName, Dummy4 in PcdList:
Setting = PcdDict[self._Arch, SkuName, PcdCName, TokenSpaceGuid]
if Setting is None:
continue
PcdValue, DatumType, MaxDatumSize = self._ValidatePcd(PcdCName, TokenSpaceGuid, Setting, Type, Dummy4)
if MaxDatumSize:
if int(MaxDatumSize, 0) > 0xFFFF:
EdkLogger.error('build', FORMAT_INVALID, "The size value must not exceed the maximum value of 0xFFFF (UINT16) for %s." % ".".join((TokenSpaceGuid, PcdCName)),
File=self.MetaFile, Line=Dummy4)
if int(MaxDatumSize, 0) < 0:
EdkLogger.error('build', FORMAT_INVALID, "The size value can't be set to negative value for %s." % ".".join((TokenSpaceGuid, PcdCName)),
File=self.MetaFile, Line=Dummy4)
SkuInfo = SkuInfoClass(SkuName, self.SkuIds[SkuName][0], '', '', '', '', '', PcdValue)
if (PcdCName, TokenSpaceGuid) in Pcds:
pcdObject = Pcds[PcdCName, TokenSpaceGuid]
pcdObject.SkuInfoList[SkuName] = SkuInfo
if MaxDatumSize.strip():
CurrentMaxSize = int(MaxDatumSize.strip(), 0)
else:
CurrentMaxSize = 0
if pcdObject.MaxDatumSize:
PcdMaxSize = int(pcdObject.MaxDatumSize, 0)
else:
PcdMaxSize = 0
if CurrentMaxSize > PcdMaxSize:
pcdObject.MaxDatumSize = str(CurrentMaxSize)
else:
Pcds[PcdCName, TokenSpaceGuid] = PcdClassObject(
PcdCName,
TokenSpaceGuid,
self._PCD_TYPE_STRING_[Type],
DatumType,
PcdValue,
'',
MaxDatumSize,
OrderedDict({SkuName : SkuInfo}),
False,
None,
IsDsc=True)
if SkuName not in Pcds[PcdCName, TokenSpaceGuid].DscRawValue:
Pcds[PcdCName, TokenSpaceGuid].DscRawValue[SkuName] = {}
Pcds[PcdCName, TokenSpaceGuid].DscRawValueInfo[SkuName] = {}
Pcds[PcdCName, TokenSpaceGuid].DscRawValue[SkuName][TAB_DEFAULT_STORES_DEFAULT] = PcdValue
Pcds[PcdCName, TokenSpaceGuid].DscRawValueInfo[SkuName][TAB_DEFAULT_STORES_DEFAULT] = (self.MetaFile.File,Dummy4)
for pcd in Pcds.values():
pcdDecObject = self._DecPcds[pcd.TokenCName, pcd.TokenSpaceGuidCName]
# Only fix the value while no value provided in DSC file.
for sku in pcd.SkuInfoList.values():
if not sku.DefaultValue:
sku.DefaultValue = pcdDecObject.DefaultValue
if TAB_DEFAULT not in pcd.SkuInfoList and TAB_COMMON not in pcd.SkuInfoList:
valuefromDec = pcdDecObject.DefaultValue
SkuInfo = SkuInfoClass(TAB_DEFAULT, '0', '', '', '', '', '', valuefromDec)
pcd.SkuInfoList[TAB_DEFAULT] = SkuInfo
elif TAB_DEFAULT not in pcd.SkuInfoList and TAB_COMMON in pcd.SkuInfoList:
pcd.SkuInfoList[TAB_DEFAULT] = pcd.SkuInfoList[TAB_COMMON]
del pcd.SkuInfoList[TAB_COMMON]
elif TAB_DEFAULT in pcd.SkuInfoList and TAB_COMMON in pcd.SkuInfoList:
del pcd.SkuInfoList[TAB_COMMON]
list(map(self.FilterSkuSettings, Pcds.values()))
return Pcds
def FilterSkuSettings(self, PcdObj):
if self.SkuIdMgr.SkuUsageType == self.SkuIdMgr.SINGLE:
if TAB_DEFAULT in PcdObj.SkuInfoList and self.SkuIdMgr.SystemSkuId not in PcdObj.SkuInfoList:
PcdObj.SkuInfoList[self.SkuIdMgr.SystemSkuId] = PcdObj.SkuInfoList[TAB_DEFAULT]
PcdObj.SkuInfoList = {TAB_DEFAULT:PcdObj.SkuInfoList[self.SkuIdMgr.SystemSkuId]}
PcdObj.SkuInfoList[TAB_DEFAULT].SkuIdName = TAB_DEFAULT
PcdObj.SkuInfoList[TAB_DEFAULT].SkuId = '0'
elif self.SkuIdMgr.SkuUsageType == self.SkuIdMgr.DEFAULT:
PcdObj.SkuInfoList = {TAB_DEFAULT:PcdObj.SkuInfoList[TAB_DEFAULT]}
return PcdObj
@staticmethod
def CompareVarAttr(Attr1, Attr2):
if not Attr1 or not Attr2: # for empty string
return True
Attr1s = [attr.strip() for attr in Attr1.split(",")]
Attr1Set = set(Attr1s)
Attr2s = [attr.strip() for attr in Attr2.split(",")]
Attr2Set = set(Attr2s)
if Attr2Set == Attr1Set:
return True
else:
return False
def CompletePcdValues(self, PcdSet):
Pcds = OrderedDict()
DefaultStoreObj = DefaultStore(self._GetDefaultStores())
SkuIds = {skuname:skuid for skuname, skuid in self.SkuIdMgr.AvailableSkuIdSet.items() if skuname != TAB_COMMON}
DefaultStores = set(storename for pcdobj in PcdSet.values() for skuobj in pcdobj.SkuInfoList.values() for storename in skuobj.DefaultStoreDict)
for PcdCName, TokenSpaceGuid in PcdSet:
PcdObj = PcdSet[(PcdCName, TokenSpaceGuid)]
if PcdObj.Type not in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_DEFAULT],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_VPD],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_DEFAULT],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII],
self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_VPD]]:
Pcds[PcdCName, TokenSpaceGuid]= PcdObj
continue
PcdType = PcdObj.Type
if PcdType in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII], self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII]]:
for skuid in PcdObj.SkuInfoList:
skuobj = PcdObj.SkuInfoList[skuid]
mindefaultstorename = DefaultStoreObj.GetMin(set(defaultstorename for defaultstorename in skuobj.DefaultStoreDict))
for defaultstorename in DefaultStores:
if defaultstorename not in skuobj.DefaultStoreDict:
skuobj.DefaultStoreDict[defaultstorename] = skuobj.DefaultStoreDict[mindefaultstorename]
skuobj.HiiDefaultValue = skuobj.DefaultStoreDict[mindefaultstorename]
for skuname, skuid in SkuIds.items():
if skuname not in PcdObj.SkuInfoList:
nextskuid = self.SkuIdMgr.GetNextSkuId(skuname)
while nextskuid not in PcdObj.SkuInfoList:
nextskuid = self.SkuIdMgr.GetNextSkuId(nextskuid)
PcdObj.SkuInfoList[skuname] = copy.deepcopy(PcdObj.SkuInfoList[nextskuid])
PcdObj.SkuInfoList[skuname].SkuId = skuid
PcdObj.SkuInfoList[skuname].SkuIdName = skuname
if PcdType in [self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_HII], self._PCD_TYPE_STRING_[MODEL_PCD_DYNAMIC_EX_HII]]:
PcdObj.DefaultValue = list(PcdObj.SkuInfoList.values())[0].HiiDefaultValue if self.SkuIdMgr.SkuUsageType == self.SkuIdMgr.SINGLE else PcdObj.SkuInfoList[TAB_DEFAULT].HiiDefaultValue
Pcds[PcdCName, TokenSpaceGuid]= PcdObj
return Pcds
## Retrieve dynamic HII PCD settings
#
# @param Type PCD type
#
# @retval a dict object contains settings of given PCD type
#
def _GetDynamicHiiPcd(self, Type):
VariableAttrs = {}
Pcds = OrderedDict()
UserDefinedDefaultStores = []
#
# tdict is a special dict kind of type, used for selecting correct
# PCD settings for certain ARCH and SKU
#
PcdDict = tdict(True, 5)
PcdList = []
RecordList = self._RawData[Type, self._Arch]
# Find out all possible PCD candidates for self._Arch
AvailableSkuIdSet = copy.copy(self.SkuIds)
DefaultStoresDefine = self._GetDefaultStores()
for TokenSpaceGuid, PcdCName, Setting, Arch, SkuName, DefaultStore, Dummy4, Dummy5 in RecordList:
SkuName = SkuName.upper()
SkuName = TAB_DEFAULT if SkuName == TAB_COMMON else SkuName
DefaultStore = DefaultStore.upper()
if DefaultStore == TAB_COMMON:
DefaultStore = TAB_DEFAULT_STORES_DEFAULT
else:
#The end user define [DefaultStores] and [SKUID_IDENTIFIER.Menufacturing] in DSC
UserDefinedDefaultStores.append((PcdCName, TokenSpaceGuid))
if SkuName not in AvailableSkuIdSet:
EdkLogger.error('build', PARAMETER_INVALID, 'Sku %s is not defined in [SkuIds] section' % SkuName,
File=self.MetaFile, Line=Dummy5)
if DefaultStore not in DefaultStoresDefine:
EdkLogger.error('build', PARAMETER_INVALID, 'DefaultStores %s is not defined in [DefaultStores] section' % DefaultStore,
File=self.MetaFile, Line=Dummy5)
if "." not in TokenSpaceGuid and "[" not in PcdCName and (PcdCName, TokenSpaceGuid, SkuName, DefaultStore, Dummy5) not in PcdList:
PcdList.append((PcdCName, TokenSpaceGuid, SkuName, DefaultStore, Dummy5))
PcdDict[Arch, SkuName, PcdCName, TokenSpaceGuid, DefaultStore] = Setting
# Remove redundant PCD candidates, per the ARCH and SKU
for index,(PcdCName, TokenSpaceGuid, SkuName, DefaultStore, Dummy4) in enumerate(PcdList):
Setting = PcdDict[self._Arch, SkuName, PcdCName, TokenSpaceGuid, DefaultStore]
if Setting is None:
continue
VariableName, VariableGuid, VariableOffset, DefaultValue, VarAttribute = self._ValidatePcd(PcdCName, TokenSpaceGuid, Setting, Type, Dummy4)
rt, Msg = VariableAttributes.ValidateVarAttributes(VarAttribute)
if not rt:
EdkLogger.error("build", PCD_VARIABLE_ATTRIBUTES_ERROR, "Variable attributes settings for %s is incorrect.\n %s" % (".".join((TokenSpaceGuid, PcdCName)), Msg),
ExtraData="[%s]" % VarAttribute)
ExceedMax = False
FormatCorrect = True
if VariableOffset.isdigit():
if int(VariableOffset, 10) > 0xFFFF:
ExceedMax = True
elif variablePattern.match(VariableOffset):
if int(VariableOffset, 16) > 0xFFFF:
ExceedMax = True
# For Offset written in "A.B"
elif VariableOffset.find('.') > -1:
VariableOffsetList = VariableOffset.split(".")
if not (len(VariableOffsetList) == 2
and IsValidWord(VariableOffsetList[0])
and IsValidWord(VariableOffsetList[1])):
FormatCorrect = False
else:
FormatCorrect = False
if not FormatCorrect:
EdkLogger.error('Build', FORMAT_INVALID, "Invalid syntax or format of the variable offset value is incorrect for %s." % ".".join((TokenSpaceGuid, PcdCName)))
if ExceedMax:
EdkLogger.error('Build', OPTION_VALUE_INVALID, "The variable offset value must not exceed the maximum value of 0xFFFF (UINT16) for %s." % ".".join((TokenSpaceGuid, PcdCName)))
if (VariableName, VariableGuid) not in VariableAttrs:
VariableAttrs[(VariableName, VariableGuid)] = VarAttribute
else:
if not DscBuildData.CompareVarAttr(VariableAttrs[(VariableName, VariableGuid)], VarAttribute):
EdkLogger.error('Build', PCD_VARIABLE_ATTRIBUTES_CONFLICT_ERROR, "The variable %s.%s for DynamicHii PCDs has conflicting attributes [%s] and [%s] " % (VariableGuid, VariableName, VarAttribute, VariableAttrs[(VariableName, VariableGuid)]))
pcdDecObject = self._DecPcds[PcdCName, TokenSpaceGuid]
if (PcdCName, TokenSpaceGuid) in Pcds:
pcdObject = Pcds[PcdCName, TokenSpaceGuid]
if SkuName in pcdObject.SkuInfoList:
Skuitem = pcdObject.SkuInfoList[SkuName]
Skuitem.DefaultStoreDict.update({DefaultStore:DefaultValue})
else:
SkuInfo = SkuInfoClass(SkuName, self.SkuIds[SkuName][0], VariableName, VariableGuid, VariableOffset, DefaultValue, VariableAttribute=VarAttribute, DefaultStore={DefaultStore:DefaultValue})
pcdObject.SkuInfoList[SkuName] = SkuInfo
else:
SkuInfo = SkuInfoClass(SkuName, self.SkuIds[SkuName][0], VariableName, VariableGuid, VariableOffset, DefaultValue, VariableAttribute=VarAttribute, DefaultStore={DefaultStore:DefaultValue})
PcdClassObj = PcdClassObject(
PcdCName,
TokenSpaceGuid,
self._PCD_TYPE_STRING_[Type],
'',
DefaultValue,
'',
'',
OrderedDict({SkuName : SkuInfo}),
False,
None,
pcdDecObject.validateranges,
pcdDecObject.validlists,
pcdDecObject.expressions,
IsDsc=True)
if (PcdCName, TokenSpaceGuid) in UserDefinedDefaultStores:
PcdClassObj.UserDefinedDefaultStoresFlag = True
Pcds[PcdCName, TokenSpaceGuid] = PcdClassObj
Pcds[PcdCName, TokenSpaceGuid].CustomAttribute['DscPosition'] = index
if SkuName not in Pcds[PcdCName, TokenSpaceGuid].DscRawValue:
Pcds[PcdCName, TokenSpaceGuid].DscRawValue[SkuName] = {}
Pcds[PcdCName, TokenSpaceGuid].DscRawValueInfo[SkuName] = {}
Pcds[PcdCName, TokenSpaceGuid].DscRawValue[SkuName][DefaultStore] = DefaultValue
Pcds[PcdCName, TokenSpaceGuid].DscRawValueInfo[SkuName][DefaultStore] = (self.MetaFile.File,Dummy4)
for pcd in Pcds.values():
pcdDecObject = self._DecPcds[pcd.TokenCName, pcd.TokenSpaceGuidCName]
pcd.DatumType = pcdDecObject.DatumType
# Only fix the value while no value provided in DSC file.
for sku in pcd.SkuInfoList.values():
if (sku.HiiDefaultValue == "" or sku.HiiDefaultValue is None):
sku.HiiDefaultValue = pcdDecObject.DefaultValue
for default_store in sku.DefaultStoreDict:
sku.DefaultStoreDict[default_store]=pcdDecObject.DefaultValue
pcd.DefaultValue = pcdDecObject.DefaultValue
if TAB_DEFAULT not in pcd.SkuInfoList and TAB_COMMON not in pcd.SkuInfoList:
SkuInfoObj = list(pcd.SkuInfoList.values())[0]
valuefromDec = pcdDecObject.DefaultValue
SkuInfo = SkuInfoClass(TAB_DEFAULT, '0', SkuInfoObj.VariableName, SkuInfoObj.VariableGuid, SkuInfoObj.VariableOffset, valuefromDec, VariableAttribute=SkuInfoObj.VariableAttribute, DefaultStore={DefaultStore:valuefromDec})
pcd.SkuInfoList[TAB_DEFAULT] = SkuInfo
elif TAB_DEFAULT not in pcd.SkuInfoList and TAB_COMMON in pcd.SkuInfoList:
pcd.SkuInfoList[TAB_DEFAULT] = pcd.SkuInfoList[TAB_COMMON]
del pcd.SkuInfoList[TAB_COMMON]
elif TAB_DEFAULT in pcd.SkuInfoList and TAB_COMMON in pcd.SkuInfoList:
del pcd.SkuInfoList[TAB_COMMON]
if pcd.MaxDatumSize.strip():
MaxSize = int(pcd.MaxDatumSize, 0)
else:
MaxSize = 0
if pcd.DatumType not in TAB_PCD_NUMERIC_TYPES:
for (_, skuobj) in pcd.SkuInfoList.items():
datalen = 0
skuobj.HiiDefaultValue = StringToArray(skuobj.HiiDefaultValue)
datalen = len(skuobj.HiiDefaultValue.split(","))
if datalen > MaxSize:
MaxSize = datalen
for defaultst in skuobj.DefaultStoreDict:
skuobj.DefaultStoreDict[defaultst] = StringToArray(skuobj.DefaultStoreDict[defaultst])
pcd.DefaultValue = StringToArray(pcd.DefaultValue)
pcd.MaxDatumSize = str(MaxSize)
rt, invalidhii = DscBuildData.CheckVariableNameAssignment(Pcds)
if not rt:
invalidpcd = ",".join(invalidhii)
EdkLogger.error('build', PCD_VARIABLE_INFO_ERROR, Message='The same HII PCD must map to the same EFI variable for all SKUs', File=self.MetaFile, ExtraData=invalidpcd)
list(map(self.FilterSkuSettings, Pcds.values()))
return Pcds
@staticmethod
def CheckVariableNameAssignment(Pcds):
invalidhii = []
for pcdname in Pcds:
pcd = Pcds[pcdname]
varnameset = set(sku.VariableName for (skuid, sku) in pcd.SkuInfoList.items())
if len(varnameset) > 1:
invalidhii.append(".".join((pcdname[1], pcdname[0])))
if len(invalidhii):
return False, invalidhii
else:
return True, []
## Retrieve dynamic VPD PCD settings
#
# @param Type PCD type
#
# @retval a dict object contains settings of given PCD type
#
def _GetDynamicVpdPcd(self, Type):
Pcds = OrderedDict()
#
# tdict is a special dict kind of type, used for selecting correct
# PCD settings for certain ARCH and SKU
#
PcdDict = tdict(True, 4)
PcdList = []
# Find out all possible PCD candidates for self._Arch
RecordList = self._RawData[Type, self._Arch]
AvailableSkuIdSet = copy.copy(self.SkuIds)
for TokenSpaceGuid, PcdCName, Setting, Arch, SkuName, Dummy3, Dummy4, Dummy5 in RecordList:
SkuName = SkuName.upper()
SkuName = TAB_DEFAULT if SkuName == TAB_COMMON else SkuName
if SkuName not in AvailableSkuIdSet:
EdkLogger.error('build', PARAMETER_INVALID, 'Sku %s is not defined in [SkuIds] section' % SkuName,
File=self.MetaFile, Line=Dummy5)
if "." not in TokenSpaceGuid and "[" not in PcdCName and (PcdCName, TokenSpaceGuid, SkuName, Dummy5) not in PcdList:
PcdList.append((PcdCName, TokenSpaceGuid, SkuName, Dummy5))
PcdDict[Arch, SkuName, PcdCName, TokenSpaceGuid] = Setting
# Remove redundant PCD candidates, per the ARCH and SKU
for PcdCName, TokenSpaceGuid, SkuName, Dummy4 in PcdList:
Setting = PcdDict[self._Arch, SkuName, PcdCName, TokenSpaceGuid]
if Setting is None:
continue
#
# For the VOID* type, it can have optional data of MaxDatumSize and InitialValue
# For the Integer & Boolean type, the optional data can only be InitialValue.
# At this point, we put all the data into the PcdClssObject for we don't know the PCD's datumtype
# until the DEC parser has been called.
#
VpdOffset, MaxDatumSize, InitialValue = self._ValidatePcd(PcdCName, TokenSpaceGuid, Setting, Type, Dummy4)
if MaxDatumSize:
if int(MaxDatumSize, 0) > 0xFFFF:
EdkLogger.error('build', FORMAT_INVALID, "The size value must not exceed the maximum value of 0xFFFF (UINT16) for %s." % ".".join((TokenSpaceGuid, PcdCName)),
File=self.MetaFile, Line=Dummy4)
if int(MaxDatumSize, 0) < 0:
EdkLogger.error('build', FORMAT_INVALID, "The size value can't be set to negative value for %s." % ".".join((TokenSpaceGuid, PcdCName)),
File=self.MetaFile, Line=Dummy4)
SkuInfo = SkuInfoClass(SkuName, self.SkuIds[SkuName][0], '', '', '', '', VpdOffset, InitialValue)
if (PcdCName, TokenSpaceGuid) in Pcds:
pcdObject = Pcds[PcdCName, TokenSpaceGuid]
pcdObject.SkuInfoList[SkuName] = SkuInfo
if MaxDatumSize.strip():
CurrentMaxSize = int(MaxDatumSize.strip(), 0)
else:
CurrentMaxSize = 0
if pcdObject.MaxDatumSize:
PcdMaxSize = int(pcdObject.MaxDatumSize, 0)
else:
PcdMaxSize = 0
if CurrentMaxSize > PcdMaxSize:
pcdObject.MaxDatumSize = str(CurrentMaxSize)
else:
Pcds[PcdCName, TokenSpaceGuid] = PcdClassObject(
PcdCName,
TokenSpaceGuid,
self._PCD_TYPE_STRING_[Type],
'',
InitialValue,
'',
MaxDatumSize,
OrderedDict({SkuName : SkuInfo}),
False,
None,
IsDsc=True)
if SkuName not in Pcds[PcdCName, TokenSpaceGuid].DscRawValue:
Pcds[PcdCName, TokenSpaceGuid].DscRawValue[SkuName] = {}
Pcds[PcdCName, TokenSpaceGuid].DscRawValueInfo[SkuName] = {}
Pcds[PcdCName, TokenSpaceGuid].DscRawValue[SkuName][TAB_DEFAULT_STORES_DEFAULT] = InitialValue
Pcds[PcdCName, TokenSpaceGuid].DscRawValueInfo[SkuName][TAB_DEFAULT_STORES_DEFAULT] = (self.MetaFile.File,Dummy4)
for pcd in Pcds.values():
pcdDecObject = self._DecPcds[pcd.TokenCName, pcd.TokenSpaceGuidCName]
pcd.DatumType = pcdDecObject.DatumType
# Only fix the value while no value provided in DSC file.
for sku in pcd.SkuInfoList.values():
if not sku.DefaultValue:
sku.DefaultValue = pcdDecObject.DefaultValue
if TAB_DEFAULT not in pcd.SkuInfoList and TAB_COMMON not in pcd.SkuInfoList:
SkuInfoObj = list(pcd.SkuInfoList.values())[0]
valuefromDec = pcdDecObject.DefaultValue
SkuInfo = SkuInfoClass(TAB_DEFAULT, '0', '', '', '', '', SkuInfoObj.VpdOffset, valuefromDec)
pcd.SkuInfoList[TAB_DEFAULT] = SkuInfo
elif TAB_DEFAULT not in pcd.SkuInfoList and TAB_COMMON in pcd.SkuInfoList:
pcd.SkuInfoList[TAB_DEFAULT] = pcd.SkuInfoList[TAB_COMMON]
del pcd.SkuInfoList[TAB_COMMON]
elif TAB_DEFAULT in pcd.SkuInfoList and TAB_COMMON in pcd.SkuInfoList:
del pcd.SkuInfoList[TAB_COMMON]
#For the same one VOID* pcd, if the default value type of one SKU is "Unicode string",
#the other SKUs are "OtherVOID*"(ASCII string or byte array),Then convert "Unicode string" to "byte array".
for pcd in Pcds.values():
PcdValueTypeSet = set()
for sku in pcd.SkuInfoList.values():
PcdValueTypeSet.add("UnicodeString" if sku.DefaultValue.startswith(('L"',"L'")) else "OtherVOID*")
if len(PcdValueTypeSet) > 1:
for sku in pcd.SkuInfoList.values():
sku.DefaultValue = StringToArray(sku.DefaultValue) if sku.DefaultValue.startswith(('L"',"L'")) else sku.DefaultValue
list(map(self.FilterSkuSettings, Pcds.values()))
return Pcds
## Add external modules
#
# The external modules are mostly those listed in FDF file, which don't
# need "build".
#
# @param FilePath The path of module description file
#
def AddModule(self, FilePath):
FilePath = NormPath(FilePath)
if FilePath not in self.Modules:
Module = ModuleBuildClassObject()
Module.MetaFile = FilePath
self.Modules.append(Module)
@property
def ToolChainFamily(self):
self._ToolChainFamily = TAB_COMPILER_MSFT
TargetObj = TargetTxtDict()
TargetTxt = TargetObj.Target
BuildConfigurationFile = os.path.normpath(os.path.join(GlobalData.gConfDirectory, "target.txt"))
if os.path.isfile(BuildConfigurationFile) == True:
ToolDefinitionFile = TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TOOL_CHAIN_CONF]
if ToolDefinitionFile == '':
ToolDefinitionFile = "tools_def.txt"
ToolDefinitionFile = os.path.normpath(mws.join(self.WorkspaceDir, 'Conf', ToolDefinitionFile))
if os.path.isfile(ToolDefinitionFile) == True:
ToolDefObj = ToolDefDict((os.path.join(os.getenv("WORKSPACE"), "Conf")))
ToolDefinition = ToolDefObj.ToolDef.ToolsDefTxtDatabase
if TAB_TOD_DEFINES_FAMILY not in ToolDefinition \
or self._Toolchain not in ToolDefinition[TAB_TOD_DEFINES_FAMILY] \
or not ToolDefinition[TAB_TOD_DEFINES_FAMILY][self._Toolchain]:
self._ToolChainFamily = TAB_COMPILER_MSFT
else:
self._ToolChainFamily = ToolDefinition[TAB_TOD_DEFINES_FAMILY][self._Toolchain]
return self._ToolChainFamily
## Add external PCDs
#
# The external PCDs are mostly those listed in FDF file to specify address
# or offset information.
#
# @param Name Name of the PCD
# @param Guid Token space guid of the PCD
# @param Value Value of the PCD
#
def AddPcd(self, Name, Guid, Value):
if (Name, Guid) not in self.Pcds:
self.Pcds[Name, Guid] = PcdClassObject(Name, Guid, '', '', '', '', '', {}, False, None)
self.Pcds[Name, Guid].DefaultValue = Value
@property
def DecPcds(self):
if self._DecPcds is None:
FdfInfList = []
if GlobalData.gFdfParser:
FdfInfList = GlobalData.gFdfParser.Profile.InfList
PkgSet = set()
for Inf in FdfInfList:
ModuleFile = PathClass(NormPath(Inf), GlobalData.gWorkspace, Arch=self._Arch)
if ModuleFile in self._Modules:
continue
ModuleData = self._Bdb[ModuleFile, self._Arch, self._Target, self._Toolchain]
PkgSet.update(ModuleData.Packages)
if self.Packages:
PkgSet.update(self.Packages)
self._DecPcds, self._GuidDict = GetDeclaredPcd(self, self._Bdb, self._Arch, self._Target, self._Toolchain, PkgSet)
self._GuidDict.update(GlobalData.gPlatformPcds)
return self._DecPcds
| 57.961823
| 486
| 0.565092
|
4a128c9a113bf369e5b3cb1239283af4b0f0afc5
| 5,589
|
py
|
Python
|
sites/altdev/settings_mkt.py
|
clouserw/zamboni
|
c4a568b69c1613f27da41d46328b2975cbdc1c07
|
[
"BSD-3-Clause"
] | null | null | null |
sites/altdev/settings_mkt.py
|
clouserw/zamboni
|
c4a568b69c1613f27da41d46328b2975cbdc1c07
|
[
"BSD-3-Clause"
] | null | null | null |
sites/altdev/settings_mkt.py
|
clouserw/zamboni
|
c4a568b69c1613f27da41d46328b2975cbdc1c07
|
[
"BSD-3-Clause"
] | null | null | null |
"""private_mkt will be populated from puppet and placed in this directory"""
from mkt.settings import * # noqa
from settings_base import * # noqa
import private_mkt
DOMAIN = "marketplace-altdev.allizom.org"
SERVER_EMAIL = 'zmarketplacedev@addons.mozilla.org'
SITE_URL = 'https://marketplace-altdev.allizom.org'
BROWSERID_AUDIENCES = [SITE_URL, 'localhost', 'localhost:8675']
STATIC_URL = os.getenv('CUSTOM_CDN',
'https://marketplace-altdev-cdn.allizom.org/')
LOCAL_MIRROR_URL = '%s_files' % STATIC_URL
CSP_SCRIPT_SRC = CSP_SCRIPT_SRC + (STATIC_URL[:-1],)
ADDON_ICON_URL = 'img/uploads/addon_icons/%s/%s-%s.png?modified=%s'
PREVIEW_THUMBNAIL_URL = 'img/uploads/previews/thumbs/%s/%d.png?modified=%d'
PREVIEW_FULL_URL = 'img/uploads/previews/full/%s/%d.%s?modified=%d'
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_DOMAIN = ".%s" % DOMAIN
MEDIA_URL = STATIC_URL + 'media/'
CACHE_PREFIX = 'altdev.mkt.%s' % CACHE_PREFIX
CACHE_MIDDLEWARE_KEY_PREFIX = CACHE_PREFIX
CACHES['default']['KEY_PREFIX'] = CACHE_PREFIX
SYSLOG_TAG = "http_app_mkt_altdev"
SYSLOG_TAG2 = "http_app_mkt_altdev_timer"
SYSLOG_CSP = "http_app_mkt_altdev_csp"
STATSD_PREFIX = 'marketplace-dev'
# Redis
REDIS_BACKEND = getattr(
private_mkt, 'REDIS_BACKENDS_CACHE', private.REDIS_BACKENDS_CACHE)
REDIS_BACKENDS_CACHE_SLAVE = getattr(
private_mkt, 'REDIS_BACKENDS_CACHE_SLAVE',
private.REDIS_BACKENDS_CACHE_SLAVE)
REDIS_BACKENDS_MASTER = getattr(
private_mkt, 'REDIS_BACKENDS_MASTER', private.REDIS_BACKENDS_MASTER)
REDIS_BACKENDS_SLAVE = getattr(
private_mkt, 'REDIS_BACKENDS_SLAVE', private.REDIS_BACKENDS_SLAVE)
REDIS_BACKENDS = {
'cache': REDIS_BACKEND,
'cache_slave': REDIS_BACKENDS_CACHE_SLAVE,
'master': REDIS_BACKENDS_MASTER,
'slave': REDIS_BACKENDS_SLAVE,
}
# Celery
BROKER_URL = private_mkt.BROKER_URL
CELERY_ALWAYS_EAGER = False
CELERY_IGNORE_RESULT = True
CELERY_DISABLE_RATE_LIMITS = True
CELERYD_PREFETCH_MULTIPLIER = 1
WEBAPPS_RECEIPT_KEY = private_mkt.WEBAPPS_RECEIPT_KEY
WEBAPPS_RECEIPT_URL = private_mkt.WEBAPPS_RECEIPT_URL
WEBAPPS_UNIQUE_BY_DOMAIN = False
SENTRY_DSN = private_mkt.SENTRY_DSN
WEBAPPS_PUBLIC_KEY_DIRECTORY = NETAPP_STORAGE + '/public_keys'
PRODUCT_ICON_PATH = NETAPP_STORAGE + '/product-icons'
DUMPED_APPS_PATH = NETAPP_STORAGE + '/dumped-apps'
DUMPED_USERS_PATH = NETAPP_STORAGE + '/dumped-users'
SOLITUDE_HOSTS = ('https://payments-dev.allizom.org',)
SOLITUDE_OAUTH = {'key': private_mkt.SOLITUDE_OAUTH_KEY,
'secret': private_mkt.SOLITUDE_OAUTH_SECRET}
VALIDATOR_TIMEOUT = 180
VALIDATOR_IAF_URLS = ['https://marketplace.firefox.com',
'https://marketplace.allizom.org',
'https://marketplace-dev.allizom.org',
'https://marketplace-altdev.allizom.org']
# Override the limited marketplace ones with these ones from AMO. Because
# the base gets overridden in the mkt.settings file, we'll set them back again.
# Note the addition of dbg here.
AMO_LANGUAGES = AMO_LANGUAGES + ('dbg',)
LANGUAGES = lazy(langs, dict)(AMO_LANGUAGES)
LANGUAGE_URL_MAP = dict([(i.lower(), i) for i in AMO_LANGUAGES])
HIDDEN_LANGUAGES = (
'cy',
)
# Bug 748403
SIGNING_SERVER = private_mkt.SIGNING_SERVER
SIGNING_SERVER_ACTIVE = True
SIGNING_VALID_ISSUERS = ['marketplace-dev-cdn.allizom.org']
# Bug 793876
SIGNED_APPS_KEY = private_mkt.SIGNED_APPS_KEY
SIGNED_APPS_SERVER_ACTIVE = True
SIGNED_APPS_SERVER = private_mkt.SIGNED_APPS_SERVER
SIGNED_APPS_REVIEWER_SERVER_ACTIVE = True
SIGNED_APPS_REVIEWER_SERVER = private_mkt.SIGNED_APPS_REVIEWER_SERVER
GOOGLE_ANALYTICS_DOMAIN = 'marketplace.firefox.com'
# Pass through the DSN to the Raven client and force signal
# registration so that exceptions are passed through to sentry
# RAVEN_CONFIG = {'dsn': SENTRY_DSN, 'register_signals': True}
# See mkt/settings.py for more info.
APP_PURCHASE_KEY = DOMAIN
APP_PURCHASE_AUD = DOMAIN
APP_PURCHASE_TYP = 'mozilla-dev/payments/pay/v1'
APP_PURCHASE_SECRET = private_mkt.APP_PURCHASE_SECRET
MONOLITH_PASSWORD = private_mkt.MONOLITH_PASSWORD
# This is mainly for Marionette tests.
WEBAPP_MANIFEST_NAME = 'Marketplace Dev'
ENABLE_API_ERROR_SERVICE = True
# Until Bango can properly do refunds.
BANGO_FAKE_REFUNDS = True
if NEWRELIC_ENABLE:
NEWRELIC_INI = '/etc/newrelic.d/marketplace-altdev.allizom.org.ini'
ES_DEFAULT_NUM_REPLICAS = 2
ES_USE_PLUGINS = True
# Cache timeout on the /search/featured API.
CACHE_SEARCH_FEATURED_API_TIMEOUT = 60 * 5 # 5 min.
ALLOWED_CLIENTS_EMAIL_API = private_mkt.ALLOWED_CLIENTS_EMAIL_API
POSTFIX_AUTH_TOKEN = private_mkt.POSTFIX_AUTH_TOKEN
POSTFIX_DOMAIN = 'marketplace-dev.allizom.org'
MONOLITH_INDEX = 'mktdev-time_*'
# IARC content ratings.
IARC_ENV = 'test'
IARC_MOCK = False
IARC_PASSWORD = private_mkt.IARC_PASSWORD
IARC_PLATFORM = 'Firefox'
IARC_SERVICE_ENDPOINT = 'https://www.globalratings.com/IARCDEMOService/IARCServices.svc' # noqa
IARC_STOREFRONT_ID = 4
IARC_SUBMISSION_ENDPOINT = 'https://www.globalratings.com/IARCDEMORating/Submission.aspx' # noqa
IARC_ALLOW_CERT_REUSE = True
# We'll use zippy, the reference implementation on -dev.
PAYMENT_PROVIDERS = ['reference']
PRE_GENERATE_APK_URL = 'http://dapk.net/application.apk'
FXA_AUTH_DOMAIN = getattr(private_mkt, 'FXA_AUTH_DOMAIN', '')
FXA_OAUTH_URL = getattr(private_mkt, 'FXA_OAUTH_URL', '')
FXA_CLIENT_ID = getattr(private_mkt, 'FXA_CLIENT_ID', '')
FXA_CLIENT_SECRET = getattr(private_mkt, 'FXA_CLIENT_SECRET', '')
FXA_SECRETS = {
FXA_CLIENT_ID: FXA_CLIENT_SECRET,
}
# Bug 1145338
IAF_OVERRIDE_APPS = private_mkt.IAF_OVERRIDE_APPS
| 32.12069
| 97
| 0.778493
|
4a128ca8f02997c74f90e9770d811ef58667286f
| 1,189
|
py
|
Python
|
api/test/test_serializers/cashback.py
|
ghalonso94/wswallet
|
8f1f13a0d646166adad45b3872c2db6558d48f38
|
[
"MIT"
] | null | null | null |
api/test/test_serializers/cashback.py
|
ghalonso94/wswallet
|
8f1f13a0d646166adad45b3872c2db6558d48f38
|
[
"MIT"
] | null | null | null |
api/test/test_serializers/cashback.py
|
ghalonso94/wswallet
|
8f1f13a0d646166adad45b3872c2db6558d48f38
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from django.contrib.auth.models import User
from django.test import TestCase
from api.serializer import CashbackSerializer
from core.models import Cashback, Sale, Customer
class CashbackSerializerTestCase(TestCase):
user = User(username='test')
customer = Customer(name='Test Customer', document='000.000.000-00')
def setUp(self):
self.cashback = Cashback(
status='PENDING', value='2.00', sale=Sale(sold_at=datetime.now())
)
self.serializer = CashbackSerializer(instance=self.cashback)
def test_verify_fields_serialized(self):
"""Verification test of Cashback fields serializeds"""
data = self.serializer.data
self.assertEqual(set(data.keys()), set(['created_at', 'updated_at', 'cashback_id', 'status', 'value', 'sale']))
def test_verify_content_fields_serialized(self):
"""Verification test of Cashback content fields serializeds"""
data = self.serializer.data
self.assertEqual(data['status'], self.cashback.status)
self.assertEqual(data['value'], self.cashback.value)
self.assertEqual(data['sale'], self.cashback.sale.sale_id)
| 34.970588
| 119
| 0.702271
|
4a128ce4312ecbf47fbae3e91dd375c3042ddb30
| 2,454
|
py
|
Python
|
bripinfo/registro_br.py
|
rogeriopaulos/BRIpinfo
|
739579a626892b32474588752073a2041af6acb8
|
[
"MIT"
] | null | null | null |
bripinfo/registro_br.py
|
rogeriopaulos/BRIpinfo
|
739579a626892b32474588752073a2041af6acb8
|
[
"MIT"
] | 2
|
2021-08-31T02:18:20.000Z
|
2021-09-07T03:57:12.000Z
|
bripinfo/registro_br.py
|
rogeriopaulos/BRIpinfo
|
739579a626892b32474588752073a2041af6acb8
|
[
"MIT"
] | null | null | null |
import datetime as dt
import json
from bripinfo import settings
from bripinfo.core import BaseData
class RegistroBrMetadata(BaseData):
"""
self.data -> {
'source': <str>,
'timestamp': <str>,
'sha256': <str>,
}
"""
url = settings.CONFIG['registro_br']['sha256_mainfile']
content_name = 'metadados'
source = 'Registro.br'
def __init__(self):
super().__init__()
self.equal_sha256 = self._is_equal_sha256()
def _is_equal_sha256(self):
try:
with open(self._full_filepath, 'r') as f:
data = json.load(f)
local_sha256 = data['sha256'].strip()
remote_sha256 = self._raw_content.splitlines()[0].split('=')[-1].strip()
result = True if local_sha256 == remote_sha256 else False
except FileNotFoundError:
result = False
return result
def _can_save(self):
return True if not self.equal_sha256 else False
def _data(self) -> dict:
sha256 = self._raw_content.splitlines()[0].split('=')[-1].strip()
data = {
'source': self.url,
'timestamp': dt.datetime.now().strftime('%Y-%d-%mT%H:%M:%S'),
'sha256': sha256
}
return data
class RegistroBrData(BaseData):
"""
self.data -> [{
'ref': <str>,
'name': <str>,
'cnpj': <str>,
'ips': <list>
}, (...)]
"""
url = settings.CONFIG['registro_br']['main_file']
content_name = 'conteúdo'
source = 'Registro.br'
def _data(self) -> dict:
content = self._raw_content
settings.LOGGER.info('Estruturando o conteúdo do Registro.br')
content = [line.split('|') for line in content.splitlines()]
dataset = [
{
'ref': line[0],
'name': line[1],
'cnpj': line[2],
'ips': list(line[3:])
}
for line in content]
return dataset
def _can_save(self):
return True
def setup_registrobr():
metadata = RegistroBrMetadata()
is_equal_sha256 = metadata.equal_sha256
if not is_equal_sha256:
metadata.create_or_update()
main_content = RegistroBrData()
main_content.create_or_update()
else:
settings.LOGGER.info('Os dados do Registro.br encontram-se atualizados.')
settings.LOGGER.info('Configuração finalizada!')
| 24.54
| 84
| 0.560717
|
4a128d11e246347c080ff9b6c9ba2e8081b28068
| 4,678
|
py
|
Python
|
crossdock/server/endtoend.py
|
ctripops/jaeger-client-python
|
2b2ff0249b756285aadab4175d8c8332912dd1b4
|
[
"Apache-2.0"
] | 372
|
2017-10-31T21:51:26.000Z
|
2022-03-23T10:36:19.000Z
|
crossdock/server/endtoend.py
|
ctripops/jaeger-client-python
|
2b2ff0249b756285aadab4175d8c8332912dd1b4
|
[
"Apache-2.0"
] | 276
|
2017-10-10T11:33:50.000Z
|
2022-03-24T16:36:16.000Z
|
crossdock/server/endtoend.py
|
ctripops/jaeger-client-python
|
2b2ff0249b756285aadab4175d8c8332912dd1b4
|
[
"Apache-2.0"
] | 157
|
2017-10-09T07:16:41.000Z
|
2021-12-29T14:49:26.000Z
|
# Copyright (c) 2016-2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tornado.web
import logging
import json
import os
from jaeger_client.local_agent_net import LocalAgentSender
from jaeger_client.config import (
Config,
DEFAULT_SAMPLING_PORT,
DEFAULT_REPORTING_PORT,
)
from jaeger_client.constants import (
SAMPLER_TYPE_CONST,
SAMPLER_TYPE_REMOTE,
)
from jaeger_client.sampler import RemoteControlledSampler, ConstSampler
from jaeger_client.reporter import Reporter
from jaeger_client.throttler import RemoteThrottler
from jaeger_client.tracer import Tracer
config = {
'service_name': 'crossdock-python',
'enabled': True,
'sampler': {
'type': 'probabilistic',
'param': 1,
},
'reporter_flush_interval': 1,
'sampling_refresh_interval': 5,
}
class EndToEndHandler(object):
"""
Handler that creates traces from a http request.
json: {
"type": "remote"
"operation": "operationName",
"count": 2,
"tags": {
"key": "value"
}
}
Given the above json payload, the handler will use a tracer with the RemoteControlledSampler
to create 2 traces for the "operationName" operation with the tags: {"key":"value"}. These
traces are reported to the agent with the hostname "test_driver".
"""
def __init__(self):
cfg = Config(config)
init_sampler = cfg.sampler
channel = self.local_agent_sender
reporter = Reporter(channel=channel,
flush_interval=cfg.reporter_flush_interval)
remote_sampler = RemoteControlledSampler(
channel=channel,
service_name=cfg.service_name,
sampling_refresh_interval=cfg.sampling_refresh_interval,
init_sampler=init_sampler)
throttler = RemoteThrottler(channel, cfg.service_name)
remote_tracer = Tracer(
service_name=cfg.service_name,
reporter=reporter,
sampler=remote_sampler,
throttler=throttler)
const_tracer = Tracer(
service_name=cfg.service_name,
reporter=reporter,
sampler=ConstSampler(decision=True),
throttler=throttler
)
self._tracers = {
SAMPLER_TYPE_CONST: const_tracer,
SAMPLER_TYPE_REMOTE: remote_tracer
}
@property
def tracers(self):
return self._tracers
@tracers.setter
def tracers(self, tracers):
self._tracers = tracers
@property
def local_agent_sender(self):
host, port = _determine_host_port()
return LocalAgentSender(
host=host,
sampling_port=DEFAULT_SAMPLING_PORT,
reporting_port=port,
throttling_port=DEFAULT_SAMPLING_PORT,
)
@tornado.gen.coroutine
def generate_traces(self, request, response_writer):
if isinstance(request.body, (bytes, bytearray)):
request.body = request.body.decode('utf-8')
req = json.loads(request.body)
sampler_type = req.get('type', 'remote')
tracer = self.tracers[sampler_type]
for _ in range(req.get('count', 0)):
span = tracer.start_span(req['operation'])
for k, v in req.get('tags', {}).items():
span.set_tag(k, v)
span.finish()
response_writer.finish()
def _determine_host_port():
host_port = os.environ.get('AGENT_HOST_PORT', None)
if host_port:
host, port = _parse_host_port(host_port,
'jaeger-agent',
DEFAULT_REPORTING_PORT)
else:
host, port = 'jaeger-agent', DEFAULT_REPORTING_PORT
return host, port
def _parse_host_port(host_port, default_host, default_port):
try:
host, port_str = host_port.split(':')
port = int(port_str)
return host, port
except ValueError:
logging.getLogger().error(
'Invalid host port (%s), using default host port (%s:%d)',
host_port, default_host, default_port)
return default_host, default_port
| 30.575163
| 96
| 0.645147
|
4a128d259ebc23e7113ad4d79941b4929966b071
| 832
|
py
|
Python
|
data/import/import_gdp.py
|
soyrochus/worlddata-python-example
|
8c35600e107d7fc00886bb4d3429615243e4b3be
|
[
"MIT"
] | 1
|
2019-12-03T11:38:50.000Z
|
2019-12-03T11:38:50.000Z
|
data/import/import_gdp.py
|
soyrochus/worlddata-python-example
|
8c35600e107d7fc00886bb4d3429615243e4b3be
|
[
"MIT"
] | null | null | null |
data/import/import_gdp.py
|
soyrochus/worlddata-python-example
|
8c35600e107d7fc00886bb4d3429615243e4b3be
|
[
"MIT"
] | 1
|
2019-12-03T11:39:02.000Z
|
2019-12-03T11:39:02.000Z
|
import csv
import sqlite3
conn = sqlite3.connect('/home/iwk/src/data-pack/world-gdp.db')
c = conn.cursor()
csv_growth = open("/home/iwk/src/data-pack/data.gdp.1.csv")
csv_gdp = open("/home/iwk/src/data-pack/data.gdp.2.csv")
growth_reader = csv.DictReader(csv_growth)
gdp_reader = csv.DictReader(csv_gdp)
growth = { e["Country Code"] : e for e in growth_reader }
gdp = { e["Country Code"] : e for e in gdp_reader }
for country in growth:
growth_item = growth[country]
gdp_item = gdp[country]
for i in range(1960, 2019):
year = str(i)
data = country, year, gdp_item[year], growth_item[year]
conn.execute("INSERT INTO gdp (CountryCode, Year, gdp, growth) VALUES (?, ?,?,?)", data)
print(country, year, data)
csv_gdp.close()
csv_growth.close()
conn.commit()
conn.close()
| 26
| 100
| 0.66226
|
4a128e5224ca89f794cb7acedbee3557dde09aeb
| 2,744
|
py
|
Python
|
citizenshell/telnetshell.py
|
meuter/citizenshell
|
43964b6ec57b15e1dcd6f7a0723eb1533abe7aaa
|
[
"MIT"
] | 14
|
2018-03-22T19:54:14.000Z
|
2021-03-28T15:07:23.000Z
|
citizenshell/telnetshell.py
|
meuter/citizenshell
|
43964b6ec57b15e1dcd6f7a0723eb1533abe7aaa
|
[
"MIT"
] | 15
|
2018-02-07T21:31:37.000Z
|
2022-02-28T14:08:21.000Z
|
citizenshell/telnetshell.py
|
meuter/citizenshell
|
43964b6ec57b15e1dcd6f7a0723eb1533abe7aaa
|
[
"MIT"
] | 7
|
2018-05-13T11:50:53.000Z
|
2021-04-14T13:05:21.000Z
|
from telnetlib import Telnet
from uuid import uuid4
from time import sleep
from hashlib import md5
from os import chmod
from re import compile as compile_regex
from sys import version_info
from .abstractremoteshell import AbstractRemoteShell
from .shellresult import ShellResult
from .streamreader import PrefixedStreamReader
from .queue import Queue
from logging import CRITICAL
class TelnetShell(AbstractRemoteShell):
def __init__(self, hostname, username, password=None, port=23,
check_xc=False, check_err=False, wait=True, log_level=CRITICAL, **kwargs):
super(TelnetShell, self).__init__(hostname, check_xc=check_xc, check_err=check_err,
wait=wait, log_level=log_level, **kwargs)
self._prompt = self._id
self._hostname = hostname
self._username = username
self._password = password
self._port = port
self._telnet = Telnet()
self._is_connected = False
self._buffer = ""
self.connect()
def do_connect(self):
self._telnet.open(self._hostname, self._port)
self._read_until("login: ")
self._write(self._username + "\n")
if self._password:
self._read_until("Password: ")
self._write(self._password + "\n")
sleep(.1)
self._write("export PS1='%s'\n" % self._prompt)
self._read_until(self._prompt)
self._read_until(self._prompt)
self._write("export COLUMNS=1024\n")
self._read_until(self._prompt)
self._write("stty columns 1027\n")
self._read_until(self._prompt)
def do_disconnect(self):
self._telnet.close()
def _write(self, text):
self.log_spy_write(text)
self._telnet.write(text.encode('utf-8'))
def _read_until(self, marker):
out = self._telnet.read_until(marker.encode('utf-8'))
self.log_spy_read(out)
return out
def readline(self):
choices = [ "\n", self._prompt ]
if version_info[0] > 2: choices = [ bytes(x, 'utf-8') for x in choices ]
(index, _, line) = self._telnet.expect(choices)
self.log_spy_read(line.decode('utf-8').rstrip("\n\r"))
if index == 0:
return line
return None
def execute_command(self, command, env={}, wait=True, check_err=False, cwd=None):
wrapped_command = PrefixedStreamReader.wrap_command(command, env, cwd)
self._write(wrapped_command + "\n")
self.readline()
sleep(.2)
queue = Queue()
PrefixedStreamReader(self, queue)
return ShellResult(self, command, queue, wait, check_err)
def do_reboot(self):
self._write("reboot\n")
sleep(.3)
| 33.463415
| 92
| 0.636297
|
4a128ec12f63b71571d41077ef5eb7d39c3cc156
| 3,097
|
py
|
Python
|
trader/broker/base.py
|
tianhm/pyfx
|
515dc8eaa9862d2bb28656a8c5c5c21d2a054f69
|
[
"MIT"
] | 19
|
2016-12-13T12:55:09.000Z
|
2021-11-19T00:21:54.000Z
|
trader/broker/base.py
|
tianhm/pyfx
|
515dc8eaa9862d2bb28656a8c5c5c21d2a054f69
|
[
"MIT"
] | null | null | null |
trader/broker/base.py
|
tianhm/pyfx
|
515dc8eaa9862d2bb28656a8c5c5c21d2a054f69
|
[
"MIT"
] | 16
|
2017-03-10T18:52:28.000Z
|
2021-10-04T05:18:42.000Z
|
import logging
from dateutil import parser as date_parse
from time import sleep
from OpenSSL.SSL import SysCallError
import pandas as pd
from requests.packages.urllib3.exceptions import ProtocolError
from ..lib.oandapy import OandaError
log = logging.getLogger('pyFx')
class OandaBrokerBase(object):
'''
Base class for broker objects. Not to be instantiated by itself, always as
part of a child class.
'''
default_history_dataframe_columns = (
'time',
'volume',
'complete',
'closeBid',
'closeAsk',
'openBid',
'openAsk',
'highBid',
'highAsk',
'lowBid',
'lowAsk',
)
def __init__(self, api):
self._api = api
self._tick = None
def get_instrument_detail(self, instrument):
params = {'instruments': instrument}
ret = self._api.get_instruments(self._account_id, **params)
return ret
def set_current_tick(self, tick):
self._tick = tick
def get_history(self, *args, **kwargs):
'''
Query the API for a given instrument and timeframe and return its df.
'''
columns = kwargs.pop('columns', self.default_history_dataframe_columns)
include_current = kwargs.pop('include_current', False)
if 'time' not in columns:
columns = ('time',) + tuple(columns)
while True:
try:
response = self._api.get_history(*args, **kwargs)
if response and response.get('candles'):
df = pd.DataFrame(
data=response['candles'],
columns=columns,
)
df['time'] = df['time'].map(date_parse.parse)
df['closeMid'] = df.loc[:,('closeBid','closeAsk')].mean(axis=1)
df.index = df['time']
if not include_current:
df = df[df.complete == True]
return df
else:
log.info("no history for {} and timeframe {}".format(
kwargs['instrument']), kwargs['granularity'])
return pd.DataFrame()
except ValueError as e:
log.warning("[!] Error when loading candles for {}: {}".format(
kwargs['instrument'], e))
return pd.DataFrame()
except (ProtocolError, OandaError, SysCallError) as e:
log.warning("[!] Connection error ({0:s}). Reconnecting...".format(e))
sleep(3)
def get_price(self, instrument):
raise NotImplementedError()
def open_order(self, instrument, units, side, order_type,
price=None, expiry=None, stop_loss=None, take_profit=None):
raise NotImplementedError()
def sync_transactions(self, position):
raise NotImplementedError()
def delete_pending_order(self, position):
raise NotImplementedError()
def close_trade(self, position):
raise NotImplementedError()
| 32.946809
| 86
| 0.560542
|
4a128f7baab2b37dc20a6686b68ac3818b2a59ac
| 2,009
|
py
|
Python
|
src/python/pants/backend/python/tasks/wrapped_pex.py
|
ghthor/pants
|
450de702414f87f563081ddefaefd8a554de07a3
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/python/tasks/wrapped_pex.py
|
ghthor/pants
|
450de702414f87f563081ddefaefd8a554de07a3
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/python/tasks/wrapped_pex.py
|
ghthor/pants
|
450de702414f87f563081ddefaefd8a554de07a3
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import object
from copy import copy
class WrappedPEX(object):
"""Wrapper around a PEX that exposes only its run() method.
Allows us to set the PEX_PATH in the environment when running.
"""
_PEX_PATH_ENV_VAR_NAME = 'PEX_PATH'
# TODO(benjy): I think we can get rid of the interpreter argument.
# In all cases it appears to be set to pex.interpreter.
def __init__(self, pex, interpreter, extra_pex_paths=None):
"""
:param pex: The main pex we wrap.
:param interpreter: The interpreter the main pex will run on.
:param extra_pex_paths: Other pexes, to "merge" in via the PEX_PATH mechanism.
"""
self._pex = pex
self._interpreter = interpreter
self._extra_pex_paths = extra_pex_paths
@property
def interpreter(self):
return self._interpreter
def path(self):
return self._pex.path()
def cmdline(self, args=()):
cmdline = ' '.join(self._pex.cmdline(args))
pex_path = self._pex_path()
if pex_path:
return '{env_var_name}={pex_path} {cmdline}'.format(env_var_name=self._PEX_PATH_ENV_VAR_NAME,
pex_path=pex_path,
cmdline=cmdline)
else:
return cmdline
def run(self, *args, **kwargs):
pex_path = self._pex_path()
if pex_path:
kwargs_copy = copy(kwargs)
env = copy(kwargs_copy.get('env')) if 'env' in kwargs_copy else {}
env[self._PEX_PATH_ENV_VAR_NAME] = self._pex_path()
kwargs_copy['env'] = env
return self._pex.run(*args, **kwargs_copy)
else:
return self._pex.run(*args, **kwargs)
def _pex_path(self):
if self._extra_pex_paths:
return ':'.join(self._extra_pex_paths)
else:
return None
| 31.390625
| 99
| 0.658537
|
4a1291ffe84665e7d351d66c2470a3a127abf3ce
| 4,933
|
py
|
Python
|
RecoTauTag/HLTProducers/python/applyL2TauTag.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 2
|
2020-10-26T18:40:32.000Z
|
2021-04-10T16:33:25.000Z
|
RecoTauTag/HLTProducers/python/applyL2TauTag.py
|
gartung/cmssw
|
3072dde3ce94dcd1791d778988198a44cde02162
|
[
"Apache-2.0"
] | 30
|
2015-11-04T11:42:27.000Z
|
2021-12-01T07:56:34.000Z
|
RecoTauTag/HLTProducers/python/applyL2TauTag.py
|
gartung/cmssw
|
3072dde3ce94dcd1791d778988198a44cde02162
|
[
"Apache-2.0"
] | 8
|
2016-03-25T07:17:43.000Z
|
2021-07-08T17:11:21.000Z
|
import FWCore.ParameterSet.Config as cms
from HLTrigger.Configuration.customizeHLTforPatatrack import customizeHLTforPatatrackTriplets
from RecoTauTag.HLTProducers.l2TauNNProducer_cfi import *
from RecoTauTag.HLTProducers.l2TauTagFilter_cfi import *
def insertL2TauSequence(process, path, ref_module):
ref_idx = path.index(ref_module)
path.insert(ref_idx + 1, process.hltL2TauTagNNSequence)
path.insert(ref_idx + 2, process.hltL2DoubleTauTagNNFilter)
path.insert(ref_idx + 3, process.HLTGlobalPFTauHPSSequence)
def update(process):
thWp = {
'Tight': 0.180858813224404,
'Medium': 0.12267940863785043,
'Loose': 0.08411243185219064,
}
working_point = "Tight"
graphPath = 'RecoTauTag/TrainingFiles/data/L2TauNNTag/L2TauTag_Run3v1.pb'
normalizationDict = 'RecoTauTag/TrainingFiles/data/L2TauNNTag/NormalizationDict.json'
if 'statusOnGPU' not in process. __dict__:
process = customizeHLTforPatatrackTriplets(process)
process.hltL2TauTagNNProducer = l2TauNNProducer.clone(
debugLevel = 0,
L1Taus = [
cms.PSet(
L1CollectionName = cms.string('DoubleTau'),
L1TauTrigger = cms.InputTag('hltL1sDoubleTauBigOR'),
),
],
hbheInput = "hltHbhereco",
hoInput = "hltHoreco",
ebInput = "hltEcalRecHit:EcalRecHitsEB",
eeInput = "hltEcalRecHit:EcalRecHitsEE",
pataVertices = "hltPixelVerticesSoA",
pataTracks = "hltPixelTracksSoA",
BeamSpot = "hltOnlineBeamSpot",
maxVtx = 100,
fractionSumPt2 = 0.3,
minSumPt2 = 0.,
track_pt_min = 1.,
track_pt_max = 20.,
track_chi2_max = 20.,
graphPath = graphPath,
normalizationDict = normalizationDict
)
process.hltL2DoubleTauTagNNFilter = l2TauTagFilter.clone(
nExpected = 2,
L1TauSrc = 'hltL1sDoubleTauBigOR',
L2Outcomes = 'hltL2TauTagNNProducer:DoubleTau',
DiscrWP = thWp[working_point],
l1TauPtThreshold = 250,
)
# L2 updated Sequence
process.hltL2TauTagNNSequence = cms.Sequence(process.HLTDoCaloSequence + process.hltL1sDoubleTauBigOR + process.hltL2TauTagNNProducer)
# Regional -> Global customization
process.hltHpsPFTauTrackPt1DiscriminatorReg.PFTauProducer = "hltHpsPFTauProducer"
process.hltHpsDoublePFTau35Reg.inputTag = "hltHpsPFTauProducer"
process.hltHpsSelectedPFTausTrackPt1Reg.src = "hltHpsPFTauProducer"
process.hltHpsPFTauMediumAbsoluteChargedIsolationDiscriminatorReg.PFTauProducer = "hltHpsPFTauProducer"
process.hltHpsPFTauMediumAbsoluteChargedIsolationDiscriminatorReg.particleFlowSrc = "hltParticleFlow"
process.hltHpsPFTauMediumRelativeChargedIsolationDiscriminatorReg.PFTauProducer = "hltHpsPFTauProducer"
process.hltHpsPFTauMediumRelativeChargedIsolationDiscriminatorReg.particleFlowSrc = "hltParticleFlow"
process.hltHpsPFTauMediumAbsOrRelChargedIsolationDiscriminatorReg.PFTauProducer = "hltHpsPFTauProducer"
process.hltHpsSelectedPFTausTrackPt1MediumChargedIsolationReg.src = "hltHpsPFTauProducer"
process.HLT_DoubleMediumChargedIsoPFTauHPS35_Trk1_eta2p1_Reg_v4.remove(process.HLTL2TauJetsL1TauSeededSequence)
process.HLT_DoubleMediumChargedIsoPFTauHPS35_Trk1_eta2p1_Reg_v4.remove(process.hltDoubleL2Tau26eta2p2)
process.HLT_DoubleMediumChargedIsoPFTauHPS35_Trk1_eta2p1_Reg_v4.remove(process.HLTL2p5IsoTauL1TauSeededSequence)
process.HLT_DoubleMediumChargedIsoPFTauHPS35_Trk1_eta2p1_Reg_v4.remove(process.hltDoubleL2IsoTau26eta2p2 )
process.HLT_DoubleMediumChargedIsoPFTauHPS35_Trk1_eta2p1_Reg_v4.remove(process.HLTRegionalPFTauHPSSequence )
insertL2TauSequence(process, process.HLT_DoubleMediumChargedIsoPFTauHPS35_Trk1_eta2p1_Reg_v4, process.hltPreDoubleMediumChargedIsoPFTauHPS35Trk1eta2p1Reg)
old_diTau_paths = ['HLT_IsoMu24_eta2p1_TightChargedIsoPFTauHPS35_Trk1_eta2p1_Reg_CrossL1_v1', 'HLT_IsoMu24_eta2p1_MediumChargedIsoPFTauHPS35_Trk1_TightID_eta2p1_Reg_CrossL1_v1','HLT_IsoMu24_eta2p1_TightChargedIsoPFTauHPS35_Trk1_TightID_eta2p1_Reg_CrossL1_v1','HLT_IsoMu24_eta2p1_MediumChargedIsoPFTauHPS35_Trk1_eta2p1_Reg_CrossL1_v4','HLT_IsoMu24_eta2p1_MediumChargedIsoPFTauHPS30_Trk1_eta2p1_Reg_CrossL1_v1','HLT_DoubleMediumChargedIsoPFTauHPS30_L1MaxMass_Trk1_eta2p1_Reg_v1','HLT_DoubleTightChargedIsoPFTauHPS35_Trk1_eta2p1_Reg_v1','HLT_DoubleMediumChargedIsoPFTauHPS35_Trk1_TightID_eta2p1_Reg_v1','HLT_DoubleTightChargedIsoPFTauHPS35_Trk1_TightID_eta2p1_Reg_v1','HLT_DoubleMediumChargedIsoPFTauHPS40_Trk1_eta2p1_Reg_v1','HLT_DoubleTightChargedIsoPFTauHPS40_Trk1_eta2p1_Reg_v1','HLT_DoubleMediumChargedIsoPFTauHPS40_Trk1_TightID_eta2p1_Reg_v1','HLT_DoubleTightChargedIsoPFTauHPS40_Trk1_TightID_eta2p1_Reg_v1']
for path in old_diTau_paths:
if path in process.__dict__:
process.schedule.remove(getattr(process, path))
return process
| 55.426966
| 915
| 0.797081
|
4a1292228fedf92fbb6a17a84d70d5740551e4bc
| 178
|
py
|
Python
|
PyStacks/PyStacks/recordsets.py
|
0xack13/PyStacks
|
13136c43089c241680beb216a233d1846119dd7c
|
[
"MIT"
] | 11
|
2018-02-15T04:27:05.000Z
|
2020-10-02T11:20:08.000Z
|
PyStacks/PyStacks/recordsets.py
|
0xack13/PyStacks
|
13136c43089c241680beb216a233d1846119dd7c
|
[
"MIT"
] | 3
|
2018-02-15T05:46:54.000Z
|
2018-03-05T04:46:51.000Z
|
PyStacks/PyStacks/recordsets.py
|
0xack13/PyStacks
|
13136c43089c241680beb216a233d1846119dd7c
|
[
"MIT"
] | 8
|
2018-03-05T04:40:41.000Z
|
2021-02-22T08:07:58.000Z
|
class recordsets():
def __init__(self, name, rectype, value, ttl):
self.name = name
self.rectype = rectype
self.value = value
self.ttl = ttl
| 22.25
| 50
| 0.578652
|
4a12947a5675b01b4410bedf4a39ad3e872dbdfe
| 11,147
|
py
|
Python
|
powerline/bindings/config.py
|
PH111P/powerline
|
f8dfe7e7e3d021cd66bc0e19b19ea4a51949cb9a
|
[
"MIT"
] | 23
|
2016-12-16T09:03:18.000Z
|
2022-02-25T19:19:23.000Z
|
powerline/bindings/config.py
|
PH111P/powerline
|
f8dfe7e7e3d021cd66bc0e19b19ea4a51949cb9a
|
[
"MIT"
] | 30
|
2016-12-20T11:11:42.000Z
|
2019-11-19T15:23:59.000Z
|
powerline/bindings/config.py
|
PH111P/powerline
|
f8dfe7e7e3d021cd66bc0e19b19ea4a51949cb9a
|
[
"MIT"
] | 4
|
2016-12-11T18:29:11.000Z
|
2018-04-22T07:51:28.000Z
|
import os
import re
import sys
import subprocess
import shlex
from powerline.config import POWERLINE_ROOT, TMUX_CONFIG_DIRECTORY
from powerline.lib.config import ConfigLoader
from powerline import generate_config_finder, load_config, create_logger, finish_common_config
from powerline.shell import ShellPowerline
from powerline.lib.shell import which
from powerline.bindings.tmux import (TmuxVersionInfo, run_tmux_command, set_tmux_environment, get_tmux_version,
source_tmux_file)
from powerline.lib.encoding import get_preferred_output_encoding
from powerline.renderers.tmux import attrs_to_tmux_attrs
from powerline.commands.main import finish_args
CONFIG_FILE_NAME = re.compile(r'powerline_tmux_(?P<major>\d+)\.(?P<minor>\d+)(?P<suffix>[a-z]+)?(?:_(?P<mod>plus|minus))?\.conf')
CONFIG_MATCHERS = {
None: (lambda a, b: a.major == b.major and a.minor == b.minor),
'plus': (lambda a, b: a[:2] <= b[:2]),
'minus': (lambda a, b: a[:2] >= b[:2]),
}
CONFIG_PRIORITY = {
None: 3,
'plus': 2,
'minus': 1,
}
def list_all_tmux_configs():
'''List all version-specific tmux configuration files'''
for root, dirs, files in os.walk(TMUX_CONFIG_DIRECTORY):
dirs[:] = ()
for fname in files:
match = CONFIG_FILE_NAME.match(fname)
if match:
assert match.group('suffix') is None
yield (
os.path.join(root, fname),
CONFIG_MATCHERS[match.group('mod')],
CONFIG_PRIORITY[match.group('mod')],
TmuxVersionInfo(
int(match.group('major')),
int(match.group('minor')),
match.group('suffix'),
),
)
def get_tmux_configs(version):
'''Get tmux configuration suffix given parsed tmux version
:param TmuxVersionInfo version: Parsed tmux version.
'''
for fname, matcher, priority, file_version in list_all_tmux_configs():
if matcher(file_version, version):
yield (fname, priority + file_version.minor * 10 + file_version.major * 10000)
def source_tmux_files(pl, args, tmux_version=None, source_tmux_file=source_tmux_file):
'''Source relevant version-specific tmux configuration files
Files are sourced in the following order:
* First relevant files with older versions are sourced.
* If files for same versions are to be sourced then first _minus files are
sourced, then _plus files and then files without _minus or _plus suffixes.
'''
tmux_version = tmux_version or get_tmux_version(pl)
source_tmux_file(os.path.join(TMUX_CONFIG_DIRECTORY, 'powerline-base.conf'))
for fname, priority in sorted(get_tmux_configs(tmux_version), key=(lambda v: v[1])):
source_tmux_file(fname)
if not os.environ.get('POWERLINE_COMMAND'):
cmd = deduce_command()
if cmd:
set_tmux_environment('POWERLINE_COMMAND', deduce_command(), remove=False)
try:
run_tmux_command('refresh-client')
except subprocess.CalledProcessError:
# On tmux-2.0 this command may fail for whatever reason. Since it is
# critical just ignore the failure.
pass
class EmptyArgs(object):
def __init__(self, ext, config_path):
self.ext = [ext]
self.side = 'left'
self.config_path = None
def __getattr__(self, attr):
return None
def init_tmux_environment(pl, args, set_tmux_environment=set_tmux_environment):
'''Initialize tmux environment from tmux configuration
'''
powerline = ShellPowerline(finish_args(None, os.environ, EmptyArgs('tmux', args.config_path)))
# TODO Move configuration files loading out of Powerline object and use it
# directly
powerline.update_renderer()
# FIXME Use something more stable then `theme_kwargs`
colorscheme = powerline.renderer_options['theme_kwargs']['colorscheme']
def get_highlighting(group):
return colorscheme.get_highlighting([group], None)
for varname, highlight_group in (
('_POWERLINE_BACKGROUND_COLOR', 'background'),
('_POWERLINE_ACTIVE_WINDOW_STATUS_COLOR', 'active_window_status'),
('_POWERLINE_WINDOW_STATUS_COLOR', 'window_status'),
('_POWERLINE_ACTIVITY_STATUS_COLOR', 'activity_status'),
('_POWERLINE_BELL_STATUS_COLOR', 'bell_status'),
('_POWERLINE_WINDOW_COLOR', 'window'),
('_POWERLINE_WINDOW_DIVIDER_COLOR', 'window:divider'),
('_POWERLINE_WINDOW_CURRENT_COLOR', 'window:current'),
('_POWERLINE_WINDOW_NAME_COLOR', 'window_name'),
('_POWERLINE_SESSION_COLOR', 'session'),
):
highlight = get_highlighting(highlight_group)
set_tmux_environment(varname, powerline.renderer.hlstyle(**highlight)[2:-1])
for varname, prev_group, next_group in (
('_POWERLINE_WINDOW_CURRENT_HARD_DIVIDER_COLOR', 'window', 'window:current'),
('_POWERLINE_WINDOW_CURRENT_HARD_DIVIDER_NEXT_COLOR', 'window:current', 'window'),
('_POWERLINE_SESSION_HARD_DIVIDER_NEXT_COLOR', 'session', 'background'),
):
prev_highlight = get_highlighting(prev_group)
next_highlight = get_highlighting(next_group)
set_tmux_environment(
varname,
powerline.renderer.hlstyle(
fg=prev_highlight['bg'],
bg=next_highlight['bg'],
attrs=0,
)[2:-1]
)
for varname, attr, group in (
('_POWERLINE_ACTIVE_WINDOW_FG', 'fg', 'active_window_status'),
('_POWERLINE_WINDOW_STATUS_FG', 'fg', 'window_status'),
('_POWERLINE_ACTIVITY_STATUS_FG', 'fg', 'activity_status'),
('_POWERLINE_ACTIVITY_STATUS_ATTR', 'attrs', 'activity_status'),
('_POWERLINE_BELL_STATUS_FG', 'fg', 'bell_status'),
('_POWERLINE_BELL_STATUS_ATTR', 'attrs', 'bell_status'),
('_POWERLINE_BACKGROUND_FG', 'fg', 'background'),
('_POWERLINE_BACKGROUND_BG', 'bg', 'background'),
('_POWERLINE_SESSION_FG', 'fg', 'session'),
('_POWERLINE_SESSION_BG', 'bg', 'session'),
('_POWERLINE_SESSION_ATTR', 'attrs', 'session'),
('_POWERLINE_SESSION_PREFIX_FG', 'fg', 'session:prefix'),
('_POWERLINE_SESSION_PREFIX_BG', 'bg', 'session:prefix'),
('_POWERLINE_SESSION_PREFIX_ATTR', 'attrs', 'session:prefix'),
):
if attr == 'attrs':
attrs = attrs_to_tmux_attrs(get_highlighting(group)[attr])
set_tmux_environment(varname, ']#['.join(attrs))
set_tmux_environment(varname + '_LEGACY', (','.join(
# Tmux-1.6 does not accept no… attributes in
# window-status-…-attr options.
(attr for attr in attrs if not attr.startswith('no')))
# But it does not support empty attributes as well.
or 'none'))
else:
if powerline.common_config['term_truecolor']:
set_tmux_environment(varname, '#{0:06x}'.format(get_highlighting(group)[attr][1]))
else:
set_tmux_environment(varname, 'colour' + str(get_highlighting(group)[attr][0]))
left_dividers = powerline.renderer.theme.dividers['left']
set_tmux_environment('_POWERLINE_LEFT_HARD_DIVIDER', left_dividers['hard'])
set_tmux_environment('_POWERLINE_LEFT_SOFT_DIVIDER', left_dividers['soft'])
set_tmux_environment('_POWERLINE_LEFT_HARD_DIVIDER_SPACES', (
' ' * powerline.renderer.strwidth(left_dividers['hard'])))
TMUX_VAR_RE = re.compile('\$(_POWERLINE_\w+)')
def tmux_setup(pl, args):
tmux_environ = {}
tmux_version = get_tmux_version(pl)
def set_tmux_environment_nosource(varname, value, remove=True):
tmux_environ[varname] = value
def replace_cb(match):
return tmux_environ[match.group(1)]
def replace_env(s):
return TMUX_VAR_RE.subn(replace_cb, s)[0]
def source_tmux_file_nosource(fname):
with open(fname) as fd:
for line in fd:
if line.startswith('#') or line == '\n':
continue
args = shlex.split(line)
args = [args[0]] + [replace_env(arg) for arg in args[1:]]
run_tmux_command(*args)
if args.source is None:
args.source = tmux_version < (1, 9)
if args.source:
ste = set_tmux_environment
stf = source_tmux_file
else:
ste = set_tmux_environment_nosource
stf = source_tmux_file_nosource
init_tmux_environment(pl, args, set_tmux_environment=ste)
source_tmux_files(pl, args, tmux_version=tmux_version, source_tmux_file=stf)
def get_main_config(args):
find_config_files = generate_config_finder()
config_loader = ConfigLoader(run_once=True)
return load_config('config', find_config_files, config_loader)
def create_powerline_logger(args):
config = get_main_config(args)
common_config = finish_common_config(get_preferred_output_encoding(), config['common'])
logger, pl, get_module_attr = create_logger(common_config)
return pl
def check_command(cmd):
if which(cmd):
return cmd
def deduce_command():
'''Deduce which command to use for ``powerline``
Candidates:
* ``powerline``. Present only when installed system-wide.
* ``{powerline_root}/scripts/powerline``. Present after ``pip install -e``
was run and C client was compiled (in this case ``pip`` does not install
binary file).
* ``{powerline_root}/client/powerline.sh``. Useful when ``sh``, ``sed`` and
``socat`` are present, but ``pip`` or ``setup.py`` was not run.
* ``{powerline_root}/client/powerline.py``. Like above, but when one of
``sh``, ``sed`` and ``socat`` was not present.
* ``powerline-render``. Should not really ever be used.
* ``{powerline_root}/scripts/powerline-render``. Same.
'''
return (
None
or check_command('powerline')
or check_command(os.path.join(POWERLINE_ROOT, 'scripts', 'powerline'))
or ((which('sh') and which('sed') and which('socat'))
and check_command(os.path.join(POWERLINE_ROOT, 'client', 'powerline.sh')))
or check_command(os.path.join(POWERLINE_ROOT, 'client', 'powerline.py'))
or check_command('powerline-render')
or check_command(os.path.join(POWERLINE_ROOT, 'scripts', 'powerline-render'))
)
def shell_command(pl, args):
cmd = deduce_command()
if cmd:
print(cmd)
else:
sys.exit(1)
def uses(pl, args):
component = args.component
if not component:
raise ValueError('Must specify component')
shell = args.shell
template = 'POWERLINE_NO_{shell}_{component}'
for sh in (shell, 'shell') if shell else ('shell'):
varname = template.format(shell=sh.upper(), component=component.upper())
if os.environ.get(varname):
sys.exit(1)
config = get_main_config(args)
if component in config.get('ext', {}).get('shell', {}).get('components', ('tmux', 'prompt')):
sys.exit(0)
else:
sys.exit(1)
| 39.25
| 129
| 0.65318
|
4a1294e515da1ab84eea0d4eb5e8a44c73c118a5
| 14,037
|
py
|
Python
|
lib/densityx/__init__.py
|
kaylai/GlassDensityCalc
|
fd524fb97728c166cb0756a4a130d57fac1be43c
|
[
"MIT"
] | null | null | null |
lib/densityx/__init__.py
|
kaylai/GlassDensityCalc
|
fd524fb97728c166cb0756a4a130d57fac1be43c
|
[
"MIT"
] | null | null | null |
lib/densityx/__init__.py
|
kaylai/GlassDensityCalc
|
fd524fb97728c166cb0756a4a130d57fac1be43c
|
[
"MIT"
] | null | null | null |
from math import *
import pandas
#Molecular Weights
MW_SiO2 = 60.0855
MW_TiO2 = 79.88
MW_Al2O3 = 101.96
MW_Fe2O3 = 159.69
MW_FeO = 71.85
MW_MgO = 40.3
MW_CaO = 56.08
MW_Na2O = 61.98
MW_K2O = 94.2
MW_H2O = 18.02
#Partial Molar Volumes
#Volumes for SiO2, Al2O3, MgO, CaO, Na2O, K2O at Tref=1773 K (Lange, 1997; CMP)
#Volume for H2O at Tref=1273 K (Ochs and Lange, 1999)
#Volume for FeO at Tref=1723 K (Guo et al., 2014)
#Volume for Fe2O3 at Tref=1723 K (Liu and Lange, 2006)
#Volume for TiO2 at Tref=1773 K (Lange and Carmichael, 1987)
MV_SiO2 = 26.86
MV_TiO2 = 28.32
MV_Al2O3 = 37.42
MV_Fe2O3 = 41.50
MV_FeO = 12.68
MV_MgO = 12.02
MV_CaO = 16.90
MV_Na2O = 29.65
MV_K2O = 47.28
MV_H2O = 22.9
#Partial Molar Volume uncertainties
#value = 0 if not reported
unc_MV_SiO2 = 0.03
unc_MV_TiO2 = 0
unc_MV_Al2O3 = 0.09
unc_MV_Fe2O3 = 0
unc_MV_FeO = 0
unc_MV_MgO = 0.07
unc_MV_CaO = 0.06
unc_MV_Na2O = 0.07
unc_MV_K2O = 0.10
unc_MV_H2O = 0.60
#dV/dT values
#MgO, CaO, Na2O, K2O Table 4 (Lange, 1997)
#SiO2, TiO2, Al2O3 Table 9 (Lange and Carmichael, 1987)
#H2O from Ochs & Lange (1999)
#Fe2O3 from Liu & Lange (2006)
#FeO from Guo et al (2014)
dVdT_SiO2 = 0.0
dVdT_TiO2 = 0.00724
dVdT_Al2O3 = 0.00262
dVdT_Fe2O3 = 0.0
dVdT_FeO = 0.00369
dVdT_MgO = 0.00327
dVdT_CaO = 0.00374
dVdT_Na2O = 0.00768
dVdT_K2O = 0.01208
dVdT_H2O = 0.0095
#dV/dT uncertainties
#value = 0 if not reported
unc_dVdT_SiO2 = 0
unc_dVdT_TiO2 = 0
unc_dVdT_Al2O3 = 0
unc_dVdT_Fe2O3 = 0
unc_dVdT_FeO = 0
unc_dVdT_MgO = 0
unc_dVdT_CaO = 0
unc_dVdT_Na2O = 0
unc_dVdT_K2O = 0
unc_dVdT_H2O = 0.00080
#dV/dP values
#Anhydrous component data from Kess and Carmichael (1991)
#H2O data from Ochs & Lange (1999)
dVdP_SiO2 = -0.000189
dVdP_TiO2 = -0.000231
dVdP_Al2O3 = -0.000226
dVdP_Fe2O3 = -0.000253
dVdP_FeO = -0.000045
dVdP_MgO = 0.000027
dVdP_CaO = 0.000034
dVdP_Na2O = -0.00024
dVdP_K2O = -0.000675
dVdP_H2O = -0.00032
#dV/dP uncertainties
unc_dVdP_SiO2 = 0.000002
unc_dVdP_TiO2 = 0.000006
unc_dVdP_Al2O3 = 0.000009
unc_dVdP_Fe2O3 = 0.000009
unc_dVdP_FeO = 0.000003
unc_dVdP_MgO = 0.000007
unc_dVdP_CaO = 0.000005
unc_dVdP_Na2O = 0.000005
unc_dVdP_K2O = 0.000014
unc_dVdP_H2O = 0.000060
#Tref values
Tref_SiO2 = 1773
Tref_TiO2 = 1773
Tref_Al2O3 = 1773
Tref_Fe2O3 = 1723
Tref_FeO = 1723
Tref_MgO = 1773
Tref_CaO = 1773
Tref_Na2O = 1773
Tref_K2O = 1773
Tref_H2O = 1273
def NormalizeWtPercentVals(dataframe):
data = dataframe
#Save original wt% values
orig_WP_SiO2 = data["SiO2"]
orig_WP_TiO2 = data["TiO2"]
orig_WP_Al2O3 = data["Al2O3"]
orig_WP_Fe2O3 = data["Fe2O3"]
orig_WP_FeO = data["FeO"]
orig_WP_MgO = data["MgO"]
orig_WP_CaO = data["CaO"]
orig_WP_Na2O = data["Na2O"]
orig_WP_K2O = data["K2O"]
orig_WP_H2O = data["H2O"]
#also save SiO2 in duplicate to avoid corruption
data["SiO2 (User Input)"] = orig_WP_SiO2
#sum original wt% values
data["OriginalSum"] = data["SiO2"] + data["TiO2"] + data["Al2O3"] + data["Fe2O3"] + data["FeO"] + data["MgO"] + data["CaO"] + data["Na2O"] + data["K2O"] + data["H2O"]
#Normalize original wt% values
data.loc[:,'SiO2'] /= data['OriginalSum']
data.loc[:,'TiO2'] /= data['OriginalSum']
data.loc[:,'Al2O3'] /= data['OriginalSum']
data.loc[:,'Fe2O3'] /= data['OriginalSum']
data.loc[:,'FeO'] /= data['OriginalSum']
data.loc[:,'MgO'] /= data['OriginalSum']
data.loc[:,'CaO'] /= data['OriginalSum']
data.loc[:,'Na2O'] /= data['OriginalSum']
data.loc[:,'K2O'] /= data['OriginalSum']
data.loc[:,'H2O'] /= data['OriginalSum']
data.loc[:,'SiO2'] *= 100
data.loc[:,'TiO2'] *= 100
data.loc[:,'Al2O3'] *= 100
data.loc[:,'Fe2O3'] *= 100
data.loc[:,'FeO'] *= 100
data.loc[:,'MgO'] *= 100
data.loc[:,'CaO'] *= 100
data.loc[:,'Na2O'] *= 100
data.loc[:,'K2O'] *= 100
data.loc[:,'H2O'] *= 100
data["NormedSum"] = data["SiO2"] + data["TiO2"] + data["Al2O3"] + data["Fe2O3"] + data["FeO"] + data["MgO"] + data["CaO"] + data["Na2O"] + data["K2O"] + data["H2O"]
#From this point, oxide column values are in normalized wt%
return data
def MoleFraction(dataframe):
data = NormalizeWtPercentVals(dataframe)
#divide normalized wt% values by molecular weights
data.loc[:,'SiO2'] /= MW_SiO2
data.loc[:,'TiO2'] /= MW_TiO2
data.loc[:,'Al2O3'] /= MW_Al2O3
data.loc[:,'Fe2O3'] /= MW_Fe2O3
data.loc[:,'FeO'] /= MW_FeO
data.loc[:,'MgO'] /= MW_MgO
data.loc[:,'CaO'] /= MW_CaO
data.loc[:,'Na2O'] /= MW_Na2O
data.loc[:,'K2O'] /= MW_K2O
data.loc[:,'H2O'] /= MW_H2O
data["MolPropOxSum"] = data["SiO2"] + data["TiO2"] + data["Al2O3"] + data["Fe2O3"] + data["FeO"] + data["MgO"] + data["CaO"] + data["Na2O"] + data["K2O"] + data["H2O"]
#convert to mol fraction
data.loc[:,'SiO2'] /= data['MolPropOxSum']
data.loc[:,'TiO2'] /= data['MolPropOxSum']
data.loc[:,'Al2O3'] /= data['MolPropOxSum']
data.loc[:,'Fe2O3'] /= data['MolPropOxSum']
data.loc[:,'FeO'] /= data['MolPropOxSum']
data.loc[:,'MgO'] /= data['MolPropOxSum']
data.loc[:,'CaO'] /= data['MolPropOxSum']
data.loc[:,'Na2O'] /= data['MolPropOxSum']
data.loc[:,'K2O'] /= data['MolPropOxSum']
data.loc[:,'H2O'] /= data['MolPropOxSum']
#From this point, oxide column values are in mole fraction
return data
def Density(dataframe, verbose=False):
data = dataframe #takes in a Pandas dataframe with compositional information, P, and T
data = data.fillna(value=0) #Replace any empty cells (which read in as NaN) with 0, otherwise Pandas will break
data_moleFraction = MoleFraction(data)
#calculating the component density in two equations: one for the denominator, one for the numerator.
#A new numerator is calculated for each oxide.
data["numerSiO2"] = data["SiO2"] * MW_SiO2
data["numerTiO2"] = data["TiO2"] * MW_TiO2
data["numerAl2O3"] = data["Al2O3"] * MW_Al2O3
data["numerFe2O3"] = data["Fe2O3"] * MW_Fe2O3
data["numerFeO"] = data["FeO"] * MW_FeO
data["numerMgO"] = data["MgO"] * MW_MgO
data["numerCaO"] = data["CaO"] * MW_CaO
data["numerNa2O"] = data["Na2O"] * MW_Na2O
data["numerK2O"] = data["K2O"] * MW_K2O
data["numerH2O"] = data["H2O"] * MW_H2O
#Caclulate temperature in Kelvin
data["T_K"] = data["T"] + 273
#A new denominator is calculated for each oxide
data["denomSiO2"] = MV_SiO2 + (dVdT_SiO2 * (data["T_K"] - Tref_SiO2)) + (dVdP_SiO2 * (data["P"] - 1))
data["denomTiO2"] = MV_TiO2 + (dVdT_TiO2 * (data["T_K"] - Tref_TiO2)) + (dVdP_TiO2 * (data["P"] - 1))
data["denomAl2O3"] = MV_Al2O3 + (dVdT_Al2O3 * (data["T_K"] - Tref_Al2O3)) + (dVdP_Al2O3 * (data["P"] - 1))
data["denomFe2O3"] = MV_Fe2O3 + (dVdT_Fe2O3 * (data["T_K"] - Tref_Fe2O3)) + (dVdP_Fe2O3 * (data["P"] - 1))
data["denomFeO"] = MV_FeO + (dVdT_FeO * (data["T_K"] - Tref_FeO)) + (dVdP_FeO * (data["P"] - 1))
data["denomMgO"] = MV_MgO + (dVdT_MgO * (data["T_K"] - Tref_MgO)) + (dVdP_MgO * (data["P"] - 1))
data["denomCaO"] = MV_CaO + (dVdT_CaO * (data["T_K"] - Tref_CaO)) + (dVdP_CaO * (data["P"] - 1))
data["denomNa2O"] = MV_Na2O + (dVdT_Na2O * (data["T_K"] - Tref_Na2O)) + (dVdP_Na2O * (data["P"] - 1))
data["denomK2O"] = MV_K2O + (dVdT_K2O * (data["T_K"] - Tref_K2O)) + (dVdP_K2O * (data["P"] - 1))
data["denomH2O"] = MV_H2O + (dVdT_H2O * (data["T_K"] - Tref_H2O)) + (dVdP_H2O * (data["P"] - 1))
#Calculate component density by dividing numerator by denominator
data["ComponentDensity_SiO2"] = data["numerSiO2"] / data["denomSiO2"]
data["ComponentDensity_TiO2"] = data["numerTiO2"] / data["denomTiO2"]
data["ComponentDensity_Al2O3"] = data["numerAl2O3"] / data["denomAl2O3"]
data["ComponentDensity_Fe2O3"] = data["numerFe2O3"] / data["denomFe2O3"]
data["ComponentDensity_FeO"] = data["numerFeO"] / data["denomFeO"]
data["ComponentDensity_MgO"] = data["numerMgO"] / data["denomMgO"]
data["ComponentDensity_CaO"] = data["numerCaO"] / data["denomCaO"]
data["ComponentDensity_Na2O"] = data["numerNa2O"] / data["denomNa2O"]
data["ComponentDensity_K2O"] = data["numerK2O"] / data["denomK2O"]
data["ComponentDensity_H2O"] = data["numerH2O"] / data["denomH2O"]
#Calculate the individual Vliq for each oxide
data["IndivVliq_SiO2"] = (MV_SiO2 + (dVdT_SiO2 * (data["T_K"] - Tref_SiO2)) + (dVdP_SiO2 * (data["P"]-1))) * data["SiO2"]
data["IndivVliq_TiO2"] = (MV_TiO2 + (dVdT_TiO2 * (data["T_K"] - Tref_TiO2)) + (dVdP_TiO2 * (data["P"]-1))) * data["TiO2"]
data["IndivVliq_Al2O3"] = (MV_Al2O3 + (dVdT_Al2O3 * (data["T_K"] - Tref_Al2O3)) + (dVdP_Al2O3 * (data["P"]-1))) * data["Al2O3"]
data["IndivVliq_Fe2O3"] = (MV_Fe2O3 + (dVdT_Fe2O3 * (data["T_K"] - Tref_Fe2O3)) + (dVdP_Fe2O3 * (data["P"]-1))) * data["Fe2O3"]
data["IndivVliq_FeO"] = (MV_FeO + (dVdT_FeO * (data["T_K"] - Tref_FeO)) + (dVdP_FeO * (data["P"]-1))) * data["FeO"]
data["IndivVliq_MgO"] = (MV_MgO + (dVdT_MgO * (data["T_K"] - Tref_MgO)) + (dVdP_MgO * (data["P"]-1))) * data["MgO"]
data["IndivVliq_CaO"] = (MV_CaO + (dVdT_CaO * (data["T_K"] - Tref_CaO)) + (dVdP_CaO * (data["P"]-1))) * data["CaO"]
data["IndivVliq_Na2O"] = (MV_Na2O + (dVdT_Na2O * (data["T_K"] - Tref_Na2O)) + (dVdP_Na2O * (data["P"]-1))) * data["Na2O"]
data["IndivVliq_K2O"] = (MV_K2O + (dVdT_K2O * (data["T_K"] - Tref_K2O)) + (dVdP_K2O * (data["P"]-1))) * data["K2O"]
data["IndivVliq_H2O"] = (MV_H2O + (dVdT_H2O * (data["T_K"] - Tref_H2O)) + (dVdP_H2O * (data["P"]-1))) * data["H2O"]
#Calculate the sum of all Vliq oxides for each sample
data["VliqSum"] = (data["IndivVliq_SiO2"] + data["IndivVliq_TiO2"] + data["IndivVliq_Al2O3"] + data["IndivVliq_Fe2O3"] + data["IndivVliq_FeO"] +
data["IndivVliq_MgO"] + data["IndivVliq_CaO"] + data["IndivVliq_Na2O"] + data["IndivVliq_K2O"] + data["IndivVliq_H2O"])
#Calculate Indiv X*MW
data.loc[:,'SiO2'] *= MW_SiO2
data.loc[:,'TiO2'] *= MW_TiO2
data.loc[:,'Al2O3'] *= MW_Al2O3
data.loc[:,'Fe2O3'] *= MW_Fe2O3
data.loc[:,'FeO'] *= MW_FeO
data.loc[:,'MgO'] *= MW_MgO
data.loc[:,'CaO'] *= MW_CaO
data.loc[:,'Na2O'] *= MW_Na2O
data.loc[:,'K2O'] *= MW_K2O
data.loc[:,'H2O'] *= MW_H2O
#From this point, oxide column values are in X*MW
#Calculate the sume of X*MW oxides
data["XMW_Sum"] = (data["SiO2"] + data["TiO2"] + data["Al2O3"] + data["Fe2O3"] + data["FeO"] +
data["MgO"] + data["CaO"] + data["Na2O"] + data["K2O"] + data["H2O"])
#Calculate the density of the melt in g/cm3 and in g/L
data["Density_g_per_cm3"] = data["XMW_Sum"] / data["VliqSum"]
data["Density_g_per_L"] = data["Density_g_per_cm3"] * 1000
#Uncertainty Calculations
#Partial Molar Volume,
error_MV = {'SiO2' : (unc_MV_SiO2 / MV_SiO2),
'TiO2' : (unc_MV_TiO2 / MV_TiO2),
'Al2O3' : (unc_MV_Al2O3 / MV_Al2O3),
'Fe2O3' : (unc_MV_Fe2O3 / MV_Fe2O3),
'FeO' : (unc_MV_FeO / MV_FeO),
'MgO' : (unc_MV_MgO / MV_MgO),
'CaO' : (unc_MV_CaO / MV_CaO),
'Na2O' : (unc_MV_Na2O / MV_Na2O),
'K2O' : (unc_MV_K2O / MV_K2O),
'H2O' : (unc_MV_H2O / MV_H2O)}
#dVdT values
error_dVdT = {
'SiO2' : (unc_dVdT_SiO2 / 1),
'TiO2' : (unc_dVdT_TiO2 / dVdT_TiO2),
'Al2O3' : (unc_dVdT_Al2O3 / dVdT_Al2O3),
'Fe2O3' : 0,
'FeO' : (unc_dVdT_FeO / dVdT_FeO),
'MgO' : (unc_dVdT_MgO / dVdT_MgO),
'CaO' : (unc_dVdT_CaO / dVdT_CaO),
'Na2O' : (unc_dVdT_Na2O / dVdT_Na2O),
'K2O' : (unc_dVdT_K2O / dVdT_K2O),
'H2O' : (unc_dVdT_H2O / dVdT_H2O)}
#dVdP values
error_dVdP = {
'SiO2' : (unc_dVdP_SiO2 / dVdP_SiO2),
'TiO2' : (unc_dVdP_TiO2 / dVdP_TiO2),
'Al2O3' : (unc_dVdP_Al2O3 / dVdP_Al2O3),
'Fe2O3' : (unc_dVdP_Fe2O3 / dVdP_Fe2O3),
'FeO' : (unc_dVdP_FeO / dVdP_FeO),
'MgO' : (unc_dVdP_MgO / dVdP_MgO),
'CaO' : (unc_dVdP_CaO / dVdP_CaO),
'Na2O' : (unc_dVdP_Na2O / dVdP_Na2O),
'K2O' : (unc_dVdP_K2O / dVdP_K2O),
'H2O' : (unc_dVdP_H2O / dVdP_H2O)}
#calculate square values
percent_error_Vliq = {}
for key in error_MV:
percent_error_Vliq[key] = sqrt(error_MV[key]**2 + error_dVdT[key]**2 + error_dVdP[key]**2)
data["Unc_Vliq_SiO2"] = data["IndivVliq_SiO2"] * percent_error_Vliq['SiO2']
data["Unc_Vliq_TiO2"] = data["IndivVliq_TiO2"] * percent_error_Vliq['TiO2']
data["Unc_Vliq_Al2O3"] = data["IndivVliq_Al2O3"] * percent_error_Vliq['Al2O3']
data["Unc_Vliq_Fe2O3"] = data["IndivVliq_Fe2O3"] * percent_error_Vliq['Fe2O3']
data["Unc_Vliq_FeO"] = data["IndivVliq_FeO"] * percent_error_Vliq['FeO']
data["Unc_Vliq_MgO"] = data["IndivVliq_MgO"] * percent_error_Vliq['MgO']
data["Unc_Vliq_CaO"] = data["IndivVliq_CaO"] * percent_error_Vliq['CaO']
data["Unc_Vliq_Na2O"] = data["IndivVliq_Na2O"] * percent_error_Vliq['Na2O']
data["Unc_Vliq_K2O"] = data["IndivVliq_K2O"] * percent_error_Vliq['K2O']
data["Unc_Vliq_H2O"] = data["IndivVliq_H2O"] * percent_error_Vliq['H2O']
data["unc_VliqSum"] = ( data["Unc_Vliq_SiO2"] +
data["Unc_Vliq_TiO2"] +
data["Unc_Vliq_Al2O3"]+
data["Unc_Vliq_Fe2O3"]+
data["Unc_Vliq_FeO"] +
data["Unc_Vliq_MgO"] +
data["Unc_Vliq_CaO"] +
data["Unc_Vliq_Na2O"] +
data["Unc_Vliq_K2O"] +
data["Unc_Vliq_H2O"] )
#calculate error on density value
data['Uncertainty_g_per_cm3'] = data["unc_VliqSum"] / data["VliqSum"]
data['Uncertainty_g_per_L'] = data["Uncertainty_g_per_cm3"] * 1000
data_to_return = pandas.DataFrame({"Sample_ID": data["Sample_ID"],
"density": data["Density_g_per_cm3"],
"density_unc": data["Uncertainty_g_per_cm3"]})
if verbose is False:
return data_to_return
if verbose is True:
return data
| 39.319328
| 168
| 0.616798
|
4a1295698dc5c88a1716604b0f94c66298de5ad8
| 9,548
|
py
|
Python
|
jupyterlab/handlers/extension_manager_handler.py
|
nmichaud/jupyterlab
|
ebbe90df0826baf81e4067bf1c15157812abe978
|
[
"BSD-3-Clause"
] | 3
|
2017-11-30T13:02:36.000Z
|
2020-09-11T01:26:35.000Z
|
jupyterlab/handlers/extension_manager_handler.py
|
nmichaud/jupyterlab
|
ebbe90df0826baf81e4067bf1c15157812abe978
|
[
"BSD-3-Clause"
] | 2
|
2017-05-03T21:24:52.000Z
|
2019-01-15T23:15:11.000Z
|
jupyterlab/handlers/extension_manager_handler.py
|
ianhi/jupyterlab
|
13c1964250d2772739fe3688360e4ef3f25564c0
|
[
"BSD-3-Clause"
] | 1
|
2016-07-16T15:45:53.000Z
|
2016-07-16T15:45:53.000Z
|
"""Tornado handlers for extension management."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import json
import os
import re
from concurrent.futures import ThreadPoolExecutor
from jupyter_server.base.handlers import APIHandler
from jupyter_server.extension.handler import ExtensionHandlerMixin
from tornado import gen, web
from ..commands import (
get_app_info, install_extension, uninstall_extension,
enable_extension, disable_extension, read_package,
_AppHandler, get_latest_compatible_package_versions,
AppOptions, _ensure_options
)
def _make_extension_entry(name, description, url, enabled, core, latest_version,
installed_version, status, installed=None):
"""Create an extension entry that can be sent to the client"""
ret = dict(
name=name,
description=description,
url=url,
enabled=enabled,
core=core,
latest_version=latest_version,
installed_version=installed_version,
status=status,
)
if installed is not None:
ret['installed'] = installed
return ret
def _ensure_compat_errors(info, app_options):
"""Ensure that the app info has compat_errors field"""
handler = _AppHandler(app_options)
info['compat_errors'] = handler._get_extension_compat()
_message_map = {
'install': re.compile(r'(?P<name>.*) needs to be included in build'),
'uninstall': re.compile(r'(?P<name>.*) needs to be removed from build'),
'update': re.compile(r'(?P<name>.*) changed from (?P<oldver>.*) to (?P<newver>.*)'),
}
def _build_check_info(app_options):
"""Get info about packages scheduled for (un)install/update"""
handler = _AppHandler(app_options)
messages = handler.build_check(fast=True)
# Decode the messages into a dict:
status = {'install': [], 'uninstall': [], 'update': []}
for msg in messages:
for key, pattern in _message_map.items():
match = pattern.match(msg)
if match:
status[key].append(match.group('name'))
return status
class ExtensionManager(object):
executor = ThreadPoolExecutor(max_workers=1)
def __init__(self, app_options=None):
app_options = _ensure_options(app_options)
self.log = app_options.logger
self.app_dir = app_options.app_dir
self.core_config = app_options.core_config
self.app_options = app_options
self._outdated = None
# To start fetching data on outdated extensions immediately, uncomment:
# IOLoop.current().spawn_callback(self._get_outdated)
@gen.coroutine
def list_extensions(self):
"""Handle a request for all installed extensions"""
app_options = self.app_options
info = get_app_info(app_options=app_options)
build_check_info = _build_check_info(app_options)
_ensure_compat_errors(info, app_options)
extensions = []
# TODO: Ensure loops can run in parallel
for name, data in info['extensions'].items():
status = 'ok'
pkg_info = yield self._get_pkg_info(name, data)
if info['compat_errors'].get(name, None):
status = 'error'
else:
for packages in build_check_info.values():
if name in packages:
status = 'warning'
extensions.append(_make_extension_entry(
name=name,
description=pkg_info.get('description', ''),
url=data['url'],
enabled=(name not in info['disabled']),
core=False,
# Use wanted version to ensure we limit ourselves
# within semver restrictions
latest_version=pkg_info['latest_version'],
installed_version=data['version'],
status=status,
))
for name in build_check_info['uninstall']:
data = yield self._get_scheduled_uninstall_info(name)
if data is not None:
extensions.append(_make_extension_entry(
name=name,
description=data.get('description', ''),
url=data.get('homepage', ''),
installed=False,
enabled=False,
core=False,
latest_version=data['version'],
installed_version=data['version'],
status='warning',
))
raise gen.Return(extensions)
@gen.coroutine
def install(self, extension):
"""Handle an install/update request"""
try:
install_extension(extension, app_options=self.app_options)
except ValueError as e:
raise gen.Return(dict(status='error', message=str(e)))
raise gen.Return(dict(status='ok',))
@gen.coroutine
def uninstall(self, extension):
"""Handle an uninstall request"""
did_uninstall = uninstall_extension(
extension, app_options=self.app_options)
raise gen.Return(dict(status='ok' if did_uninstall else 'error',))
@gen.coroutine
def enable(self, extension):
"""Handle an enable request"""
enable_extension(extension, app_options=self.app_options)
raise gen.Return(dict(status='ok',))
@gen.coroutine
def disable(self, extension):
"""Handle a disable request"""
disable_extension(extension, app_options=self.app_options)
raise gen.Return(dict(status='ok',))
@gen.coroutine
def _get_pkg_info(self, name, data):
"""Get information about a package"""
info = read_package(data['path'])
# Get latest version that is compatible with current lab:
outdated = yield self._get_outdated()
if outdated and name in outdated:
info['latest_version'] = outdated[name]
else:
# Fallback to indicating that current is latest
info['latest_version'] = info['version']
raise gen.Return(info)
def _get_outdated(self):
"""Get a Future to information from `npm/yarn outdated`.
This will cache the results. To refresh the cache, set
self._outdated to None before calling. To bypass the cache,
call self._load_outdated directly.
"""
# Ensure self._outdated is a Future for data on outdated extensions
if self._outdated is None:
self._outdated = self._load_outdated()
# Return the Future
return self._outdated
def refresh_outdated(self):
self._outdated = self._load_outdated()
return self._outdated
@gen.coroutine
def _load_outdated(self):
"""Get the latest compatible version"""
info = get_app_info(app_options=self.app_options)
names = tuple(info['extensions'].keys())
data = yield self.executor.submit(
get_latest_compatible_package_versions,
names,
app_options=self.app_options
)
raise gen.Return(data)
@gen.coroutine
def _get_scheduled_uninstall_info(self, name):
"""Get information about a package that is scheduled for uninstallation"""
target = os.path.join(
self.app_dir, 'staging', 'node_modules', name, 'package.json')
if os.path.exists(target):
with open(target) as fid:
raise gen.Return(json.load(fid))
else:
raise gen.Return(None)
class ExtensionHandler(ExtensionHandlerMixin, APIHandler):
def initialize(self, manager=None, name=None):
super(ExtensionHandler, self).initialize(name=name)
self.manager = manager
@web.authenticated
@gen.coroutine
def get(self):
"""GET query returns info on all installed extensions"""
if self.get_argument('refresh', False) == '1':
yield self.manager.refresh_outdated()
extensions = yield self.manager.list_extensions()
self.finish(json.dumps(extensions))
@web.authenticated
@gen.coroutine
def post(self):
"""POST query performs an action on a specific extension"""
data = self.get_json_body()
cmd = data['cmd']
name = data['extension_name']
if (cmd not in ('install', 'uninstall', 'enable', 'disable') or
not name):
raise web.HTTPError(
422, 'Could not process instrution %r with extension name %r' % (
cmd, name))
# TODO: Can we trust extension_name? Does it need sanitation?
# It comes from an authenticated session, but its name is
# ultimately from the NPM repository.
ret_value = None
try:
if cmd == 'install':
ret_value = yield self.manager.install(name)
elif cmd == 'uninstall':
ret_value = yield self.manager.uninstall(name)
elif cmd == 'enable':
ret_value = yield self.manager.enable(name)
elif cmd == 'disable':
ret_value = yield self.manager.disable(name)
except gen.Return as e:
ret_value = e.value
except Exception as e:
raise web.HTTPError(500, str(e))
if ret_value is None:
self.set_status(200)
else:
self.finish(json.dumps(ret_value))
# The path for lab extensions handler.
extensions_handler_path = r"/lab/api/extensions"
| 35.894737
| 88
| 0.615941
|
4a1296b426485c1a1a19f05b0b2645331a54f48a
| 22,988
|
bzl
|
Python
|
tensorflow/core/platform/default/build_refactor.bzl
|
LuBingtan/tensorflow
|
064a4a0dabe0689fc3d3e90ee4ad39c7161e49ac
|
[
"Apache-2.0"
] | 1
|
2019-09-02T08:18:46.000Z
|
2019-09-02T08:18:46.000Z
|
tensorflow/core/platform/default/build_refactor.bzl
|
LuBingtan/tensorflow
|
064a4a0dabe0689fc3d3e90ee4ad39c7161e49ac
|
[
"Apache-2.0"
] | 1
|
2018-04-02T23:42:30.000Z
|
2018-05-03T23:12:23.000Z
|
tensorflow/core/platform/default/build_refactor.bzl
|
LuBingtan/tensorflow
|
064a4a0dabe0689fc3d3e90ee4ad39c7161e49ac
|
[
"Apache-2.0"
] | null | null | null |
"""
Build targets for default implementations of tf/core/platform libraries.
"""
# This is a temporary hack to mimic the presence of a BUILD file under
# tensorflow/core/platform/default. This is part of a large refactoring
# of BUILD rules under tensorflow/core/platform. We will remove this file
# and add real BUILD files under tensorflow/core/platform/default and
# tensorflow/core/platform/windows after the refactoring is complete.
load(
"//tensorflow:tensorflow.bzl",
"tf_copts",
)
TF_DEFAULT_PLATFORM_LIBRARIES = {
"context": {
"name": "context_impl",
"hdrs": ["//tensorflow/core/platform:context.h"],
"textual_hdrs": ["//tensorflow/core/platform:default/context.h"],
"deps": [
"//tensorflow/core/platform",
],
"visibility": ["//visibility:private"],
"tags": ["no_oss", "manual"],
},
"cord": {
"name": "cord_impl",
"hdrs": ["//tensorflow/core/platform:default/cord.h"],
"visibility": ["//visibility:private"],
"tags": ["no_oss", "manual"],
},
"cuda_libdevice_path": {
"name": "cuda_libdevice_path_impl",
"hdrs": [
"//tensorflow/core/platform:cuda_libdevice_path.h",
],
"srcs": [
"//tensorflow/core/platform:default/cuda_libdevice_path.cc",
],
"deps": [
"@local_config_cuda//cuda:cuda_headers",
"//tensorflow/core:lib",
# TODO(bmzhao): When bazel gains cc_shared_library support, the targets below are
# the actual granular targets we should depend on, instead of tf/core:lib.
# "//tensorflow/core/platform:logging",
# "//tensorflow/core/platform:types",
],
"visibility": ["//visibility:private"],
"tags": ["no_oss", "manual"],
},
"dynamic_annotations": {
"name": "dynamic_annotations_impl",
"hdrs": [
"//tensorflow/core/platform:default/dynamic_annotations.h",
],
"visibility": ["//visibility:private"],
"tags": ["no_oss", "manual"],
},
"env": {
"name": "env_impl",
"hdrs": [
"//tensorflow/core/platform:env.h",
"//tensorflow/core/platform:file_system.h",
"//tensorflow/core/platform:file_system_helper.h",
"//tensorflow/core/platform:threadpool.h",
],
"srcs": [
"//tensorflow/core/platform:env.cc",
"//tensorflow/core/platform:file_system.cc",
"//tensorflow/core/platform:file_system_helper.cc",
"//tensorflow/core/platform:threadpool.cc",
"//tensorflow/core/platform:default/env.cc",
"//tensorflow/core/platform:default/posix_file_system.h",
"//tensorflow/core/platform:default/posix_file_system.cc",
],
"deps": [
"@com_google_absl//absl/time",
"@com_google_absl//absl/types:optional",
"//third_party/eigen3",
"//tensorflow/core/lib/core:blocking_counter",
"//tensorflow/core/lib/core:error_codes_proto_cc",
"//tensorflow/core/lib/core:errors",
"//tensorflow/core/lib/core:status",
"//tensorflow/core/lib/core:stringpiece",
"//tensorflow/core/lib/io:path",
"//tensorflow/core/platform",
"//tensorflow/core/platform:context",
"//tensorflow/core/platform:cord",
"//tensorflow/core/platform:denormal",
"//tensorflow/core/platform:error",
"//tensorflow/core/platform:env_time",
"//tensorflow/core/platform:file_statistics",
"//tensorflow/core/platform:load_library",
"//tensorflow/core/platform:logging",
"//tensorflow/core/platform:macros",
"//tensorflow/core/platform:mutex",
"//tensorflow/core/platform:platform_port",
"//tensorflow/core/platform:protobuf",
"//tensorflow/core/platform:setround",
"//tensorflow/core/platform:stringpiece",
"//tensorflow/core/platform:stringprintf",
"//tensorflow/core/platform:strcat",
"//tensorflow/core/platform:str_util",
"//tensorflow/core/platform:threadpool_interface",
"//tensorflow/core/platform:tracing",
"//tensorflow/core/platform:types",
],
"visibility": ["//visibility:private"],
"tags": ["no_oss", "manual"],
},
"env_time": {
"name": "env_time_impl",
"hdrs": [
"//tensorflow/core/platform:env_time.h",
],
"srcs": [
"//tensorflow/core/platform:default/env_time.cc",
],
"deps": [
"//tensorflow/core/platform:types",
],
"visibility": ["//visibility:private"],
"tags": ["no_oss", "manual"],
},
"human_readable_json": {
"name": "human_readable_json_impl",
"hdrs": [
"//tensorflow/core/platform:human_readable_json.h",
],
"srcs": [
"//tensorflow/core/platform:default/human_readable_json.cc",
],
"deps": [
"//tensorflow/core/lib/core:errors",
"//tensorflow/core/lib/core:status",
"//tensorflow/core/platform:strcat",
"//tensorflow/core/platform:protobuf",
],
"visibility": ["//visibility:private"],
"tags": ["no_oss", "manual"],
},
"load_library": {
"name": "load_library_impl",
"hdrs": [
"//tensorflow/core/platform:load_library.h",
],
"srcs": [
"//tensorflow/core/platform:default/load_library.cc",
],
"deps": [
"//tensorflow/core/lib/core:errors",
"//tensorflow/core/lib/core:status",
],
"visibility": ["//visibility:private"],
"tags": ["no_oss", "manual"],
},
"logging": {
"name": "logging_impl",
"hdrs": [
"//tensorflow/core/platform:logging.h",
],
"textual_hdrs": [
"//tensorflow/core/platform:default/logging.h",
],
"srcs": [
"//tensorflow/core/platform:default/logging.cc",
],
"deps": [
"@com_google_absl//absl/base",
"@com_google_absl//absl/strings",
"//tensorflow/core/platform",
"//tensorflow/core/platform:env_time",
"//tensorflow/core/platform:macros",
"//tensorflow/core/platform:types",
],
"visibility": ["//visibility:private"],
"tags": ["no_oss", "manual"],
},
"mutex": {
"name": "mutex_impl",
"hdrs": [
"//tensorflow/core/platform:mutex.h",
],
"textual_hdrs": [
"//tensorflow/core/platform:default/mutex.h",
],
"srcs": [
"//tensorflow/core/platform:default/mutex.cc",
"//tensorflow/core/platform:default/mutex_data.h",
],
"deps": [
"@nsync//:nsync_cpp",
"//tensorflow/core/platform",
"//tensorflow/core/platform:macros",
"//tensorflow/core/platform:thread_annotations",
"//tensorflow/core/platform:types",
],
"visibility": ["//visibility:private"],
"tags": ["no_oss", "manual"],
},
"net": {
"name": "net_impl",
"hdrs": [
"//tensorflow/core/platform:net.h",
],
"srcs": [
"//tensorflow/core/platform:default/net.cc",
],
"deps": [
"//tensorflow/core/platform:strcat",
"//tensorflow/core/platform:logging",
],
"visibility": ["//visibility:private"],
"tags": ["no_oss", "manual"],
"alwayslink": 1,
},
"notification": {
"name": "notification_impl",
"hdrs": [
"//tensorflow/core/platform:default/notification.h",
],
"deps": [
"//tensorflow/core/platform:mutex",
"//tensorflow/core/platform:types",
],
"visibility": ["//visibility:private"],
"tags": ["no_oss", "manual"],
},
"rocm_rocdl_path": {
"name": "rocm_rocdl_path_impl",
"hdrs": [
"//tensorflow/core/platform:rocm_rocdl_path.h",
],
"srcs": [
"//tensorflow/core/platform:default/rocm_rocdl_path.cc",
],
"deps": [
"@local_config_rocm//rocm:rocm_headers",
"//tensorflow/core:lib",
# TODO(bmzhao): When bazel gains cc_shared_library support, the targets below are
# the actual granular targets we should depend on, instead of tf/core:lib.
# "//tensorflow/core/lib/io:path",
# "//tensorflow/core/platform:logging",
# "//tensorflow/core/platform:types",
],
"visibility": ["//visibility:private"],
"tags": ["no_oss", "manual"],
},
"stacktrace": {
"name": "stacktrace_impl",
"hdrs": [
"//tensorflow/core/platform:default/stacktrace.h",
],
"deps": [
"//tensorflow/core/platform:abi",
"//tensorflow/core/platform:platform",
],
"tags": ["no_oss", "manual"],
"visibility": ["//visibility:private"],
},
"stacktrace_handler": {
"name": "stacktrace_handler_impl",
"hdrs": [
"//tensorflow/core/platform:stacktrace_handler.h",
],
"srcs": [
"//tensorflow/core/platform:default/stacktrace_handler.cc",
],
"deps": [
"//tensorflow/core/platform",
"//tensorflow/core/platform:stacktrace",
],
"tags": ["no_oss", "manual"],
"visibility": ["//visibility:private"],
},
"strong_hash": {
"name": "strong_hash_impl",
"textual_hdrs": [
"//tensorflow/core/platform:default/strong_hash.h",
],
"deps": [
"@highwayhash//:sip_hash",
],
"visibility": ["//visibility:private"],
"tags": ["no_oss", "manual"],
},
"subprocess": {
"name": "subprocess_impl",
"textual_hdrs": [
"//tensorflow/core/platform:default/subprocess.h",
],
"hdrs": [
"//tensorflow/core/platform:subprocess.h",
],
"srcs": [
"//tensorflow/core/platform:default/subprocess.cc",
],
"deps": [
"//tensorflow/core/platform",
"//tensorflow/core/platform:logging",
"//tensorflow/core/platform:macros",
"//tensorflow/core/platform:mutex",
"//tensorflow/core/platform:types",
],
"tags": ["no_oss", "manual"],
"visibility": ["//visibility:private"],
"alwayslink": 1,
},
"test": {
"name": "test_impl",
"testonly": True,
"srcs": [
"//tensorflow/core/platform:default/test.cc",
],
"hdrs": [
"//tensorflow/core/platform:test.h",
],
"deps": [
"@com_google_googletest//:gtest",
"//tensorflow/core/platform",
"//tensorflow/core/platform:logging",
"//tensorflow/core/platform:macros",
"//tensorflow/core/platform:net",
"//tensorflow/core/platform:strcat",
"//tensorflow/core/platform:types",
],
"tags": ["no_oss", "manual"],
"visibility": ["//visibility:private"],
},
"tracing": {
"name": "tracing_impl",
"textual_hdrs": [
"//tensorflow/core/platform:default/tracing_impl.h",
],
"hdrs": [
"//tensorflow/core/platform:tracing.h",
],
"srcs": [
"//tensorflow/core/platform:default/tracing.cc",
"//tensorflow/core/platform:tracing.cc",
],
"deps": [
"//tensorflow/core/lib/core:errors",
"//tensorflow/core/lib/hash",
"//tensorflow/core/platform",
"//tensorflow/core/platform:logging",
"//tensorflow/core/platform:macros",
"//tensorflow/core/platform:strcat",
"//tensorflow/core/platform:str_util",
"//tensorflow/core/platform:stringpiece",
"//tensorflow/core/platform:types",
],
"tags": ["no_oss", "manual"],
"visibility": ["//visibility:private"],
},
"types": {
"name": "types_impl",
"textual_hdrs": [
"//tensorflow/core/platform:default/integral_types.h",
],
"tags": ["no_oss", "manual"],
"visibility": ["//visibility:private"],
},
"unbounded_work_queue": {
"name": "unbounded_work_queue_impl",
"hdrs": [
"//tensorflow/core/platform:default/unbounded_work_queue.h",
],
"srcs": [
"//tensorflow/core/platform:default/unbounded_work_queue.cc",
],
"deps": [
"@com_google_absl//absl/memory",
"//tensorflow/core/platform:env",
"//tensorflow/core/platform:mutex",
"//tensorflow/core/lib/core:notification",
],
"tags": ["no_oss", "manual"],
"visibility": ["//visibility:private"],
},
}
TF_WINDOWS_PLATFORM_LIBRARIES = {
"env": {
"name": "windows_env_impl",
"hdrs": [
"//tensorflow/core/platform:env.h",
"//tensorflow/core/platform:file_system.h",
"//tensorflow/core/platform:file_system_helper.h",
"//tensorflow/core/platform:threadpool.h",
],
"srcs": [
"//tensorflow/core/platform:env.cc",
"//tensorflow/core/platform:file_system.cc",
"//tensorflow/core/platform:file_system_helper.cc",
"//tensorflow/core/platform:threadpool.cc",
"//tensorflow/core/platform:windows/env.cc",
"//tensorflow/core/platform:windows/windows_file_system.h",
"//tensorflow/core/platform:windows/windows_file_system.cc",
],
"deps": [
"@com_google_absl//absl/time",
"@com_google_absl//absl/types:optional",
"//third_party/eigen3",
"//tensorflow/core/lib/core:blocking_counter",
"//tensorflow/core/lib/core:error_codes_proto_cc",
"//tensorflow/core/lib/core:errors",
"//tensorflow/core/lib/core:status",
"//tensorflow/core/lib/core:stringpiece",
"//tensorflow/core/lib/io:path",
"//tensorflow/core/platform",
"//tensorflow/core/platform:context",
"//tensorflow/core/platform:cord",
"//tensorflow/core/platform:denormal",
"//tensorflow/core/platform:error",
"//tensorflow/core/platform:env_time",
"//tensorflow/core/platform:file_statistics",
"//tensorflow/core/platform:load_library",
"//tensorflow/core/platform:logging",
"//tensorflow/core/platform:macros",
"//tensorflow/core/platform:mutex",
"//tensorflow/core/platform:platform_port",
"//tensorflow/core/platform:protobuf",
"//tensorflow/core/platform:setround",
"//tensorflow/core/platform:stringpiece",
"//tensorflow/core/platform:stringprintf",
"//tensorflow/core/platform:strcat",
"//tensorflow/core/platform:str_util",
"//tensorflow/core/platform:threadpool_interface",
"//tensorflow/core/platform:tracing",
"//tensorflow/core/platform:types",
"//tensorflow/core/platform:windows_wide_char_impl",
],
"visibility": ["//visibility:private"],
"tags": ["no_oss", "manual", "nobuilder"],
},
"env_time": {
"name": "windows_env_time_impl",
"hdrs": [
"//tensorflow/core/platform:env_time.h",
],
"srcs": [
"//tensorflow/core/platform:windows/env_time.cc",
],
"deps": [
"//tensorflow/core/platform:types",
],
"visibility": ["//visibility:private"],
"tags": ["no_oss", "manual", "nobuilder"],
},
"load_library": {
"name": "windows_load_library_impl",
"hdrs": [
"//tensorflow/core/platform:load_library.h",
],
"srcs": [
"//tensorflow/core/platform:windows/load_library.cc",
],
"deps": [
"//tensorflow/core/lib/core:errors",
"//tensorflow/core/lib/core:status",
"//tensorflow/core/platform:windows_wide_char_impl",
],
"visibility": ["//visibility:private"],
"tags": ["no_oss", "manual", "nobuilder"],
},
"net": {
"name": "windows_net_impl",
"hdrs": [
"//tensorflow/core/platform:net.h",
],
"srcs": [
"//tensorflow/core/platform:windows/net.cc",
],
"deps": [
"//tensorflow/core/platform:error",
"//tensorflow/core/platform:logging",
],
"visibility": ["//visibility:private"],
"tags": ["no_oss", "manual", "nobuilder"],
},
"stacktrace": {
"name": "windows_stacktrace_impl",
"hdrs": [
"//tensorflow/core/platform:windows/stacktrace.h",
],
"srcs": [
"//tensorflow/core/platform:windows/stacktrace.cc",
],
"deps": [
"//tensorflow/core/platform:mutex",
],
"tags": ["no_oss", "manual", "nobuilder"],
"visibility": ["//visibility:private"],
},
"stacktrace_handler": {
"name": "windows_stacktrace_handler_impl",
"hdrs": [
"//tensorflow/core/platform:stacktrace_handler.h",
],
"srcs": [
"//tensorflow/core/platform:windows/stacktrace_handler.cc",
],
"deps": [
"//tensorflow/core/platform:mutex",
"//tensorflow/core/platform:stacktrace",
"//tensorflow/core/platform:types",
],
"tags": ["no_oss", "manual", "nobuilder"],
"visibility": ["//visibility:private"],
},
"subprocess": {
"name": "windows_subprocess_impl",
"textual_hdrs": [
"//tensorflow/core/platform:windows/subprocess.h",
],
"hdrs": [
"//tensorflow/core/platform:subprocess.h",
],
"deps": [
"//tensorflow/core/platform",
"//tensorflow/core/platform:logging",
"//tensorflow/core/platform:macros",
"//tensorflow/core/platform:types",
],
"tags": ["no_oss", "manual", "nobuilder"],
"visibility": ["//visibility:private"],
},
"wide_char": {
"name": "windows_wide_char_impl",
"hdrs": [
"//tensorflow/core/platform:windows/wide_char.h",
],
"tags": ["no_oss", "manual", "nobuilder"],
"visibility": ["//visibility:private"],
},
}
def tf_instantiate_platform_libraries(names = []):
for name in names:
# Unfortunately, this target cannot be represented as a dictionary
# because it uses "select"
if name == "platform_port":
native.cc_library(
name = "platform_port_impl",
srcs = [
"//tensorflow/core/platform:cpu_info.cc",
"//tensorflow/core/platform:default/port.cc",
],
hdrs = [
"//tensorflow/core/platform:cpu_info.h",
"//tensorflow/core/platform:demangle.h",
"//tensorflow/core/platform:host_info.h",
"//tensorflow/core/platform:init_main.h",
"//tensorflow/core/platform:mem.h",
"//tensorflow/core/platform:numa.h",
"//tensorflow/core/platform:snappy.h",
],
defines = ["TF_USE_SNAPPY"] + select({
# TF Additional NUMA defines
"//tensorflow:with_numa_support": ["TENSORFLOW_USE_NUMA"],
"//conditions:default": [],
}),
copts = tf_copts(),
deps = [
"@com_google_absl//absl/base",
"//tensorflow/core/platform:byte_order",
"//tensorflow/core/platform:dynamic_annotations",
"//tensorflow/core/platform:logging",
"//tensorflow/core/platform:types",
"//tensorflow/core/platform",
"@snappy",
] + select({
# TF Additional NUMA dependencies
"//tensorflow:android": [],
"//tensorflow:ios": [],
"//tensorflow:macos": [],
"//conditions:default": [
"@hwloc",
],
}),
visibility = ["//visibility:private"],
tags = ["no_oss", "manual"],
)
native.cc_library(
name = "windows_platform_port_impl",
srcs = [
"//tensorflow/core/platform:cpu_info.cc",
"//tensorflow/core/platform:windows/port.cc",
],
hdrs = [
"//tensorflow/core/platform:cpu_info.h",
"//tensorflow/core/platform:demangle.h",
"//tensorflow/core/platform:host_info.h",
"//tensorflow/core/platform:init_main.h",
"//tensorflow/core/platform:mem.h",
"//tensorflow/core/platform:numa.h",
"//tensorflow/core/platform:snappy.h",
],
defines = ["TF_USE_SNAPPY"],
copts = tf_copts(),
deps = [
"//tensorflow/core/platform",
"//tensorflow/core/platform:byte_order",
"//tensorflow/core/platform:dynamic_annotations",
"//tensorflow/core/platform:logging",
"//tensorflow/core/platform:types",
"@snappy",
],
visibility = ["//visibility:private"],
tags = ["no_oss", "manual"],
)
else:
if name in TF_DEFAULT_PLATFORM_LIBRARIES:
native.cc_library(**TF_DEFAULT_PLATFORM_LIBRARIES[name])
if name in TF_WINDOWS_PLATFORM_LIBRARIES:
native.cc_library(**TF_WINDOWS_PLATFORM_LIBRARIES[name])
def tf_mobile_aware_deps(name):
return [":" + name]
def tf_platform_helper_deps(name):
return select({
"//tensorflow:windows": [":windows_" + name],
"//conditions:default": [":" + name],
})
def tf_logging_deps():
return [":logging_impl"]
| 36.488889
| 93
| 0.517226
|
4a1296f1b0f149c809b91bdee1f0a24e69941706
| 1,009
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/authorization/v20171001preview/__init__.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/authorization/v20171001preview/__init__.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/authorization/v20171001preview/__init__.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .get_role_assignment import *
from .role_assignment import *
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-nextgen:authorization/v20171001preview:RoleAssignment":
return RoleAssignment(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-nextgen", "authorization/v20171001preview", _module_instance)
_register_module()
| 32.548387
| 112
| 0.688801
|
4a129711c93e1ea7504dd04955b3c52a81dabe68
| 4,490
|
py
|
Python
|
all_repos/clone.py
|
charlievieth/all-repos
|
279d2910c56567d9518ab41bd8894216b9f649e5
|
[
"MIT"
] | 1
|
2020-12-23T18:26:54.000Z
|
2020-12-23T18:26:54.000Z
|
all_repos/clone.py
|
charlievieth/all-repos
|
279d2910c56567d9518ab41bd8894216b9f649e5
|
[
"MIT"
] | null | null | null |
all_repos/clone.py
|
charlievieth/all-repos
|
279d2910c56567d9518ab41bd8894216b9f649e5
|
[
"MIT"
] | 2
|
2020-09-03T12:50:13.000Z
|
2020-10-30T07:45:29.000Z
|
import argparse
import functools
import json
import os.path
import shutil
import subprocess
from typing import Dict
from typing import Generator
from typing import Optional
from typing import Sequence
from typing import Tuple
from all_repos import cli
from all_repos import git
from all_repos import mapper
from all_repos.config import load_config
def _get_current_state_helper(
path: str,
) -> Generator[Tuple[str, str], None, None]:
if not os.path.exists(path):
return
pths = []
seen_git = False
for direntry in os.scandir(path):
if direntry.name == '.git':
seen_git = True
elif direntry.is_dir(): # pragma: no branch (defensive)
pths.append(direntry)
if seen_git:
yield path, git.remote(path)
else:
for pth in pths:
yield from _get_current_state_helper(os.fspath(pth))
def _get_current_state(path: str) -> Dict[str, str]:
return {
os.path.relpath(k, path): v for k, v in _get_current_state_helper(path)
}
def _remove(dest: str, path: str) -> None:
print(f'Removing {path}')
shutil.rmtree(os.path.join(dest, path))
# Remove any empty directories
path = os.path.dirname(path)
while path and not os.listdir(os.path.join(dest, path)):
os.rmdir(os.path.join(dest, path))
path = os.path.dirname(path)
def _init(dest: str, path: str, remote: str) -> None:
print(f'Initializing {path}')
path = os.path.join(dest, path)
os.makedirs(path, exist_ok=True)
subprocess.check_call(('git', 'init', path))
subprocess.check_output((
'git', '-C', path, 'remote', 'add', 'origin', remote,
))
def _default_branch(remote: str) -> str:
cmd = ('git', 'ls-remote', '--exit-code', '--symref', remote, 'HEAD')
out = subprocess.check_output(cmd, encoding='UTF-8')
line = out.splitlines()[0]
start, end = 'ref: refs/heads/', '\tHEAD'
assert line.startswith(start) and line.endswith(end), line
return line[len(start):-1 * len(end)]
def _fetch_reset(path: str, *, all_branches: bool) -> None:
def _git(*cmd: str) -> None:
subprocess.check_call(('git', '-C', path, *cmd))
try:
branch = _default_branch(git.remote(path))
if all_branches:
_git(
'config', 'remote.origin.fetch',
'+refs/heads/*:refs/remotes/origin/*',
)
else:
_git('remote', 'set-branches', 'origin', branch)
_git('fetch', 'origin')
_git('checkout', branch)
_git('reset', '--hard', f'origin/{branch}')
except subprocess.CalledProcessError:
# TODO: color / tty
print(f'Error fetching {path}')
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser(
description=(
'Clone all the repositories into the `output_dir`. If '
'run again, this command will update existing repositories.'
),
usage='%(prog)s [options]',
)
cli.add_common_args(parser)
cli.add_jobs_arg(parser)
args = parser.parse_args(argv)
config = load_config(args.config_filename)
repos = config.list_repos(config.source_settings)
repos_filtered = {
k: v for k, v in repos.items()
if config.include.search(k) and not config.exclude.search(k)
}
# If the previous `repos.json` / `repos_filtered.json` files exist
# remove them.
for path in (config.repos_path, config.repos_filtered_path):
if os.path.exists(path):
os.remove(path)
current_repos = set(_get_current_state(config.output_dir).items())
filtered_repos = set(repos_filtered.items())
# Remove old no longer cloned repositories
for path, _ in current_repos - filtered_repos:
_remove(config.output_dir, path)
for path, remote in filtered_repos - current_repos:
_init(config.output_dir, path, remote)
fn = functools.partial(_fetch_reset, all_branches=config.all_branches)
todo = [os.path.join(config.output_dir, p) for p in repos_filtered]
with mapper.thread_mapper(args.jobs) as do_map:
mapper.exhaust(do_map(fn, todo))
# write these last
os.makedirs(config.output_dir, exist_ok=True)
with open(config.repos_path, 'w') as f:
f.write(json.dumps(repos))
with open(config.repos_filtered_path, 'w') as f:
f.write(json.dumps(repos_filtered))
return 0
if __name__ == '__main__':
exit(main())
| 30.544218
| 79
| 0.641203
|
4a129764ca3f6b4da14be65bc0e096cb7f5027d4
| 1,684
|
py
|
Python
|
RESTApi/flaskapp/routes.py
|
Peacecake/HoloMu
|
98b422b226c2274e6d7e96df31724b0d2abd8ebb
|
[
"MIT"
] | null | null | null |
RESTApi/flaskapp/routes.py
|
Peacecake/HoloMu
|
98b422b226c2274e6d7e96df31724b0d2abd8ebb
|
[
"MIT"
] | 32
|
2018-06-19T15:27:04.000Z
|
2018-09-30T20:17:23.000Z
|
RESTApi/flaskapp/routes.py
|
Peacecake/HoloMu
|
98b422b226c2274e6d7e96df31724b0d2abd8ebb
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, request, Response
from db import get_db
from db import init_db
from uploader import Uploader
from jsonParser import JsonParser
import json
from . import trainAndTest
from . import label_image
import os
from collections import Counter
import recommend
bp = Blueprint("routes", __name__)
@bp.route("/setup")
def setup():
init_db()
trainAndTest.train()
return "success"
@bp.route("/recognize", methods=["POST"])
def recognize_image():
upl = Uploader(request.files, "file")
upload_result = upl.upload()
if upload_result is not True:
return Response(upload_result, status=500)
# Image recognition
objectId = trainAndTest.trainOrTest(upl.uploaded_file)
if objectId is "":
return Response("Bild nicht erkannt", status=500)
jp = JsonParser(os.path.join(os.getcwd(), "flaskapp", "data.JSON"))
jp.parse()
exh = jp.get_item_by_id(objectId)
upl.delete_file()
return exh
@bp.route("/recommend/<string:watched_exhibit_id>")
def recommend_exhibit(watched_exhibit_id):
db = get_db()
jp = JsonParser(os.path.join(os.getcwd(), "flaskapp", "data.JSON"))
jp.parse()
watched_name = jp.get_value_by_key(watched_exhibit_id, "name")
watched_cat = jp.get_value_by_key(watched_exhibit_id, "category")
recommendData = recommend.calcRecommendation(watched_name, watched_cat)
db.execute("INSERT INTO recommend (data) VALUES (?)", (json.dumps(recommendData),))
db.commit()
recommendedExhibit = recommend.recommendExhibit(watched_name)
return "Vielleicht interessiert Sie das besonders: " + recommendedExhibit
| 30.618182
| 88
| 0.7019
|
4a1297e5bd53afa74794b320745a1f3460daaca8
| 681
|
py
|
Python
|
utils/utils.py
|
Irvinfaith/numpy_neural_network
|
46c86884611d0174e6ab96eb70d1f4ebec8caafb
|
[
"MIT"
] | 26
|
2021-01-12T03:00:21.000Z
|
2022-01-22T10:36:40.000Z
|
utils/utils.py
|
Irvinfaith/numpy_neural_network
|
46c86884611d0174e6ab96eb70d1f4ebec8caafb
|
[
"MIT"
] | null | null | null |
utils/utils.py
|
Irvinfaith/numpy_neural_network
|
46c86884611d0174e6ab96eb70d1f4ebec8caafb
|
[
"MIT"
] | 1
|
2021-01-13T06:47:58.000Z
|
2021-01-13T06:47:58.000Z
|
# -*- coding: utf-8 -*-
"""
Created on 2021/1/12 18:22
@author: Irvinfaith
@email: Irvinfaith@hotmail.com
"""
import pandas as pd
import numpy as np
def series_to_array(x):
if isinstance(x, pd.DataFrame) or isinstance(x, pd.Series):
return np.array(x)
elif isinstance(x, np.ndarray):
return x
else:
raise TypeError(f"Input type has to be `pandas.dataframe` or `numpy.ndarray`, your type is `{type(x)}`")
def array1d_to_onehot(y, num_classes):
if not isinstance(y, np.ndarray):
y = np.array(y)
trans_y = np.zeros((y.shape[0], num_classes))
for index, _ in enumerate(y):
trans_y[index][_] += 1
return trans_y
| 23.482759
| 112
| 0.640235
|
4a1298162da43ec54b2bcd1f8de73764ffe8eb48
| 7,393
|
py
|
Python
|
tests/integration/test_recovery_replica/test.py
|
chalice19/ClickHouse
|
2f38e7bc5c2113935ab86260439bb543a1737291
|
[
"Apache-2.0"
] | 8,629
|
2016-06-14T21:03:01.000Z
|
2019-09-23T07:46:38.000Z
|
tests/integration/test_recovery_replica/test.py
|
chalice19/ClickHouse
|
2f38e7bc5c2113935ab86260439bb543a1737291
|
[
"Apache-2.0"
] | 4,335
|
2016-06-15T12:58:31.000Z
|
2019-09-23T11:18:43.000Z
|
tests/integration/test_recovery_replica/test.py
|
chalice19/ClickHouse
|
2f38e7bc5c2113935ab86260439bb543a1737291
|
[
"Apache-2.0"
] | 1,700
|
2016-06-15T09:25:11.000Z
|
2019-09-23T11:16:38.000Z
|
import time
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import assert_eq_with_retry
SETTINGS = "SETTINGS min_replicated_logs_to_keep=3, max_replicated_logs_to_keep=5, cleanup_delay_period=0, cleanup_delay_period_random_add=0"
def fill_nodes(nodes):
for node in nodes:
node.query(
"""
CREATE TABLE test_table(date Date, id UInt32)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/test/replicated', '{replica}') ORDER BY id PARTITION BY toYYYYMM(date)
{settings};
""".format(
replica=node.name, settings=SETTINGS
)
)
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance("node1", with_zookeeper=True)
node2 = cluster.add_instance("node2", with_zookeeper=True)
node3 = cluster.add_instance("node3", with_zookeeper=True)
nodes = [node1, node2, node3]
def sync_replicas(table):
for node in nodes:
node.query("SYSTEM SYNC REPLICA {}".format(table))
@pytest.fixture(scope="module")
def start_cluster():
try:
cluster.start()
fill_nodes([node1, node2, node3])
yield cluster
except Exception as ex:
print(ex)
finally:
cluster.shutdown()
def test_recovery(start_cluster):
node1.query("INSERT INTO test_table VALUES (1, 0)")
sync_replicas("test_table")
node2.query("DETACH TABLE test_table")
for i in range(1, 11):
node1.query("INSERT INTO test_table VALUES (1, {})".format(i))
node2.query_with_retry(
"ATTACH TABLE test_table",
check_callback=lambda x: len(node2.query("select * from test_table")) > 0,
)
assert_eq_with_retry(
node2,
"SELECT count(*) FROM test_table",
node1.query("SELECT count(*) FROM test_table"),
)
lost_marker = "Will mark replica node2 as lost"
assert node1.contains_in_log(lost_marker) or node3.contains_in_log(lost_marker)
sync_replicas("test_table")
for node in nodes:
assert (
node.query("SELECT count(), sum(id) FROM test_table WHERE date=toDate(1)")
== "11\t55\n"
)
def test_choose_source_replica(start_cluster):
node3.query("INSERT INTO test_table VALUES (2, 0)")
sync_replicas("test_table")
node2.query("DETACH TABLE test_table")
node1.query(
"SYSTEM STOP FETCHES test_table"
) # node1 will have many entries in queue, so node2 will clone node3
for i in range(1, 11):
node3.query("INSERT INTO test_table VALUES (2, {})".format(i))
node2.query_with_retry(
"ATTACH TABLE test_table",
check_callback=lambda x: len(node2.query("select * from test_table")) > 0,
)
node1.query("SYSTEM START FETCHES test_table")
node1.query("SYSTEM SYNC REPLICA test_table")
node2.query("SYSTEM SYNC REPLICA test_table")
assert node1.query("SELECT count(*) FROM test_table") == node3.query(
"SELECT count(*) FROM test_table"
)
assert node2.query("SELECT count(*) FROM test_table") == node3.query(
"SELECT count(*) FROM test_table"
)
lost_marker = "Will mark replica node2 as lost"
assert node1.contains_in_log(lost_marker) or node3.contains_in_log(lost_marker)
assert node2.contains_in_log("Will mimic node3")
sync_replicas("test_table")
for node in nodes:
assert (
node.query("SELECT count(), sum(id) FROM test_table WHERE date=toDate(2)")
== "11\t55\n"
)
def test_update_metadata(start_cluster):
for node in nodes:
node.query(
"""
CREATE TABLE update_metadata(key UInt32)
ENGINE = ReplicatedMergeTree('/test/update_metadata', '{replica}') ORDER BY key PARTITION BY key % 10
{settings};
""".format(
replica=node.name, settings=SETTINGS
)
)
for i in range(1, 11):
node1.query("INSERT INTO update_metadata VALUES ({})".format(i))
node2.query("DETACH TABLE update_metadata")
# alter without mutation
node1.query("ALTER TABLE update_metadata ADD COLUMN col1 UInt32")
for i in range(1, 11):
node1.query(
"INSERT INTO update_metadata VALUES ({}, {})".format(i * 10, i * 10)
)
lost_marker = "Will mark replica node2 as lost"
assert node1.contains_in_log(lost_marker) or node3.contains_in_log(lost_marker)
node2.query("ATTACH TABLE update_metadata")
sync_replicas("update_metadata")
assert node1.query("DESC TABLE update_metadata") == node2.query(
"DESC TABLE update_metadata"
)
assert node1.query("DESC TABLE update_metadata") == node3.query(
"DESC TABLE update_metadata"
)
for node in nodes:
assert (
node.query("SELECT count(), sum(key), sum(col1) FROM update_metadata")
== "20\t605\t550\n"
)
node2.query("DETACH TABLE update_metadata")
# alter with mutation
node1.query("ALTER TABLE update_metadata DROP COLUMN col1")
for i in range(1, 11):
node1.query("INSERT INTO update_metadata VALUES ({})".format(i * 100))
lost_marker = "Will mark replica node2 as lost"
assert node1.contains_in_log(lost_marker) or node3.contains_in_log(lost_marker)
node2.query("ATTACH TABLE update_metadata")
sync_replicas("update_metadata")
assert node1.query("DESC TABLE update_metadata") == node2.query(
"DESC TABLE update_metadata"
)
assert node1.query("DESC TABLE update_metadata") == node3.query(
"DESC TABLE update_metadata"
)
# check that it's possible to execute alter on cloned replica
node2.query("ALTER TABLE update_metadata ADD COLUMN col1 UInt32")
sync_replicas("update_metadata")
for node in nodes:
assert (
node.query("SELECT count(), sum(key), sum(col1) FROM update_metadata")
== "30\t6105\t0\n"
)
# more complex case with multiple alters
node2.query("TRUNCATE TABLE update_metadata")
for i in range(1, 11):
node1.query("INSERT INTO update_metadata VALUES ({}, {})".format(i, i))
# The following alters hang because of "No active replica has part ... or covering part"
# node2.query("SYSTEM STOP REPLICATED SENDS update_metadata")
# node2.query("INSERT INTO update_metadata VALUES (42, 42)") # this part will be lost
node2.query("DETACH TABLE update_metadata")
node1.query("ALTER TABLE update_metadata MODIFY COLUMN col1 String")
node1.query("ALTER TABLE update_metadata ADD COLUMN col2 INT")
for i in range(1, 11):
node3.query(
"INSERT INTO update_metadata VALUES ({}, '{}', {})".format(
i * 10, i * 10, i * 10
)
)
node1.query("ALTER TABLE update_metadata DROP COLUMN col1")
node1.query("ALTER TABLE update_metadata ADD COLUMN col3 Date")
node2.query("ATTACH TABLE update_metadata")
sync_replicas("update_metadata")
assert node1.query("DESC TABLE update_metadata") == node2.query(
"DESC TABLE update_metadata"
)
assert node1.query("DESC TABLE update_metadata") == node3.query(
"DESC TABLE update_metadata"
)
for node in nodes:
assert (
node.query("SELECT count(), sum(key), sum(col2) FROM update_metadata")
== "20\t605\t550\n"
)
| 33.452489
| 141
| 0.649533
|
4a1298555fd42c0959b7b1b9fd05bb39ebe27c31
| 769
|
py
|
Python
|
device/management/commands/add_device.py
|
sharmapacific/homeAutomation
|
77f7f415ff9813ad86e1f93d9a405bd221f2abba
|
[
"MIT"
] | null | null | null |
device/management/commands/add_device.py
|
sharmapacific/homeAutomation
|
77f7f415ff9813ad86e1f93d9a405bd221f2abba
|
[
"MIT"
] | null | null | null |
device/management/commands/add_device.py
|
sharmapacific/homeAutomation
|
77f7f415ff9813ad86e1f93d9a405bd221f2abba
|
[
"MIT"
] | null | null | null |
from device.models import DeviceInfo
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Add New Device'
def add_arguments(self, parser):
parser.add_argument('device',
type=str,
help='Indicates the device to be add'
)
def handle(self, *args, **kwargs):
data = {
'name': kwargs.get('device')
}
if self.check_duplicate(data):
return 'The Device detail is already presented.'
DeviceInfo.objects.create(**data)
return 'The Device has been added.'
def check_duplicate(self, data):
if DeviceInfo.objects.filter(**data).exists():
return True
| 28.481481
| 65
| 0.56827
|
4a12987bda4533e7a2a2d3f1958a74cb01e121d7
| 1,432
|
py
|
Python
|
simsurvey_tools.py
|
sPaMFouR/simsurvey-examples
|
ef034f729a5dd74e4bdd9ae5052c69780d917cbe
|
[
"BSD-3-Clause"
] | null | null | null |
simsurvey_tools.py
|
sPaMFouR/simsurvey-examples
|
ef034f729a5dd74e4bdd9ae5052c69780d917cbe
|
[
"BSD-3-Clause"
] | null | null | null |
simsurvey_tools.py
|
sPaMFouR/simsurvey-examples
|
ef034f729a5dd74e4bdd9ae5052c69780d917cbe
|
[
"BSD-3-Clause"
] | 1
|
2021-04-29T07:31:43.000Z
|
2021-04-29T07:31:43.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import sncosmo
def load_ztf_fields(filename='data/ZTF_Fields.txt', mwebv=False, galactic=False):
"""Load the ZTF fields propose by Eran from the included file.
Parameters
----------
filename: [str]
File name of the ASCII file containing the field definitions
mwebv: [bool]
Include the Milky Way E(B-V) from the file in the output
Return
------
Dictionary of np.arrays with the field coordinates, IDs (and exinction)
"""
fields = np.genfromtxt(filename, comments='%')
out = {'field_id': np.array(fields[:,0], dtype=int),
'ra': fields[:,1],
'dec': fields[:,2]}
if mwebv:
out['mwebv'] = fields[:,3]
if galactic:
out['l'] = fields[:,4]
out['b'] = fields[:,5]
return out
def load_ztf_ccds(filename='data/ZTF_corners.txt', num_segs=16):
"""
"""
ccd_corners = np.genfromtxt('data/ZTF_corners.txt', skip_header=1)
ccds = [ccd_corners[4*k:4*k+4, :2] for k in range(num_segs)]
return ccds
def load_ztf_filters():
"""
"""
bands = {
'ztfr' : 'data/ztfr_eff.txt',
'ztfg' : 'data/ztfg_eff.txt',
}
for bandname in bands.keys() :
fname = bands[bandname]
b = np.loadtxt(fname)
band = sncosmo.Bandpass(b[:,0], b[:,1], name=bandname)
sncosmo.registry.register(band)
| 25.122807
| 81
| 0.585196
|
4a1298c58e5a70d12ce9008e24aea3eec1065e0a
| 4,365
|
py
|
Python
|
code/ARAX/ARAXQuery/Overlay/GraphSage_train/py_scripts/generate_random_walk.py
|
andrewsu/RTX
|
dd1de262d0817f7e6d2f64e5bec7d5009a3a2740
|
[
"MIT"
] | 31
|
2018-03-05T20:01:10.000Z
|
2022-02-01T03:31:22.000Z
|
code/ARAX/ARAXQuery/Overlay/GraphSage_train/py_scripts/generate_random_walk.py
|
andrewsu/RTX
|
dd1de262d0817f7e6d2f64e5bec7d5009a3a2740
|
[
"MIT"
] | 1,774
|
2018-03-06T01:55:03.000Z
|
2022-03-31T03:09:04.000Z
|
code/ARAX/ARAXQuery/Overlay/GraphSage_train/py_scripts/generate_random_walk.py
|
andrewsu/RTX
|
dd1de262d0817f7e6d2f64e5bec7d5009a3a2740
|
[
"MIT"
] | 19
|
2018-05-10T00:43:19.000Z
|
2022-03-08T19:26:16.000Z
|
## This script is used to generate random walk file (eg. walks.txt, please see https://github.com/williamleif/GraphSAGE
# for more details) via batch by batch for running Graphsage
from __future__ import print_function
import json
import numpy as np
import pandas as pd
import random
import os
import sys
import argparse
from networkx.readwrite import json_graph
import multiprocessing
from datetime import datetime
from itertools import chain
parser = argparse.ArgumentParser()
parser.add_argument("--Gjson", type=str, help="The path of G.json file")
parser.add_argument("-l", "--walk_length", type=int, help="Random walk length", default=200)
parser.add_argument("-r", "--number_of_walks", type=int, help="Number of random walks per node", default=10)
parser.add_argument("-b", "--batch_size", type=int, help="Size of batch for each run", default=100000)
parser.add_argument("-p", "--process", type=int, help="Number of processes to be used", default=-1)
parser.add_argument("-o", "--output", type=str, help="The path of output folder", default="/graphsage_input")
args = parser.parse_args()
## setting functions compatible with parallel running
def run_random_walks(this):
pairs = []
node, num_walks, walk_len = this
if G.degree(node) == 0:
pairs = pairs
else:
for i in range(num_walks):
curr_node = node
for j in range(walk_len):
next_node = random.choice([n for n in G.neighbors(curr_node)])
# self co-occurrences are useless
if curr_node != node:
pairs.append((node, curr_node))
curr_node = next_node
return pairs
if __name__ == "__main__":
# change to the current path
current_path = os.path.split(os.path.realpath(__file__))[0]
os.chdir(current_path)
# check the input arguments
if args.Gjson == None or not os.path.exists(os.path.realpath(args.Gjson)):
sys.exit('Error Occurred! Please provide the correct path of your G.json file.')
else:
Gjson = args.Gjson
# setting the path of output directory
if args.output == "/graphsage_input":
outpath = current_path + '/graphsage_input'
else:
outpath = os.path.realpath(args.output)
#create output directory
try:
os.mkdir(outpath)
except:
error_type, error, _ = sys.exc_info()
print(f'Something wrong with creating output directory! Error Message is as follow:')
print(f'{error_type} {error}')
#read the graph file
with open(Gjson,'r') as input_file:
G_data = json.load(input_file)
# transform to networkx graph format
G = json_graph.node_link_graph(G_data)
# pull out the training nodes and generate the training subgraph
G_nodes = [n for n in G.nodes() if not G.nodes[n]["val"] and not G.nodes[n]["test"]]
G = G.subgraph(G_nodes)
del G_data ## delete variable to release ram
# set up the batches
batch =list(range(0,len(G_nodes),args.batch_size))
batch.append(len(G_nodes))
print(f'Total training data: {len(G_nodes)}')
print(f'The number of nodes in training graph: {len(G.nodes)}')
print(f'total batch: {len(batch)-1}')
## run each batch in parallel
for i in range(len(batch)):
if((i+1)<len(batch)):
print(f'Here is batch{i+1}')
start = batch[i]
end = batch[i+1]
if args.process == -1:
with multiprocessing.Pool() as executor:
out_iters = [(node, args.number_of_walks, args.walk_length) for node in G_nodes[start:end]]
out_res = [elem for elem in chain.from_iterable(executor.map(run_random_walks, out_iters))]
else:
with multiprocessing.Pool(processes=args.process) as executor:
out_iters = [(node, args.number_of_walks, args.walk_length) for node in G_nodes[start:end]]
out_res = [elem for elem in chain.from_iterable(executor.map(run_random_walks, out_iters))]
with open(outpath+'/data-walks.txt', "a") as fp:
if i==0:
fp.write("\n".join([str(p[0]) + "\t" + str(p[1]) for p in out_res]))
else:
fp.write("\n")
fp.write("\n".join([str(p[0]) + "\t" + str(p[1]) for p in out_res]))
| 38.289474
| 119
| 0.636426
|
4a12995cb97f792733494fb9006a3ed902d5453b
| 5,228
|
py
|
Python
|
temboo/core/Library/SendGrid/WebAPI/Statistics/GetAllTimeCategoryTotals.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 7
|
2016-03-07T02:07:21.000Z
|
2022-01-21T02:22:41.000Z
|
temboo/core/Library/SendGrid/WebAPI/Statistics/GetAllTimeCategoryTotals.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | null | null | null |
temboo/core/Library/SendGrid/WebAPI/Statistics/GetAllTimeCategoryTotals.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 8
|
2016-06-14T06:01:11.000Z
|
2020-04-22T09:21:44.000Z
|
# -*- coding: utf-8 -*-
###############################################################################
#
# GetAllTimeCategoryTotals
# Obtain statistics by specified categories.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetAllTimeCategoryTotals(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetAllTimeCategoryTotals Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetAllTimeCategoryTotals, self).__init__(temboo_session, '/Library/SendGrid/WebAPI/Statistics/GetAllTimeCategoryTotals')
def new_input_set(self):
return GetAllTimeCategoryTotalsInputSet()
def _make_result_set(self, result, path):
return GetAllTimeCategoryTotalsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetAllTimeCategoryTotalsChoreographyExecution(session, exec_id, path)
class GetAllTimeCategoryTotalsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetAllTimeCategoryTotals
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key obtained from SendGrid.)
"""
super(GetAllTimeCategoryTotalsInputSet, self)._set_input('APIKey', value)
def set_APIUser(self, value):
"""
Set the value of the APIUser input for this Choreo. ((required, string) The username registered with SendGrid.)
"""
super(GetAllTimeCategoryTotalsInputSet, self)._set_input('APIUser', value)
def set_Aggregate(self, value):
"""
Set the value of the Aggregate input for this Choreo. ((required, integer) Retrieve category statistics. Default is set to 1.)
"""
super(GetAllTimeCategoryTotalsInputSet, self)._set_input('Aggregate', value)
def set_Category(self, value):
"""
Set the value of the Category input for this Choreo. ((required, string) Enter a category for which statistics will be retrieved. It must be an existing category that has statistics. If the category entered does not exist, an empty result set will be returned.)
"""
super(GetAllTimeCategoryTotalsInputSet, self)._set_input('Category', value)
def set_Days(self, value):
"""
Set the value of the Days input for this Choreo. ((optional, integer) The number of days (greater than 0) for which block data will be retrieved. Note that you can use either the days parameter or the start_date and end_date parameter.)
"""
super(GetAllTimeCategoryTotalsInputSet, self)._set_input('Days', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format of the response from SendGrid, in either json, or xml. Default is set to json.)
"""
super(GetAllTimeCategoryTotalsInputSet, self)._set_input('ResponseFormat', value)
def set_StartDate(self, value):
"""
Set the value of the StartDate input for this Choreo. ((optional, string) The start of the date range for which blocks are to be retireved. The specified date must be in YYYY-MM-DD format, and must be earlier than the EndDate variable value. Use this ,or Days.)
"""
super(GetAllTimeCategoryTotalsInputSet, self)._set_input('StartDate', value)
class GetAllTimeCategoryTotalsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetAllTimeCategoryTotals Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from SendGrid. The format corresponds to the ResponseFormat input. Default is json.)
"""
return self._output.get('Response', None)
class GetAllTimeCategoryTotalsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetAllTimeCategoryTotalsResultSet(response, path)
| 45.859649
| 269
| 0.702946
|
4a1299e11ba4be2b7b316d5e954b0ae566c229c9
| 39,577
|
py
|
Python
|
cvxpy/problems/problem.py
|
dberkens/cvxpy
|
b639e4a691d4986b9952de268282c9ece570411b
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cvxpy/problems/problem.py
|
dberkens/cvxpy
|
b639e4a691d4986b9952de268282c9ece570411b
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cvxpy/problems/problem.py
|
dberkens/cvxpy
|
b639e4a691d4986b9952de268282c9ece570411b
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-04-12T05:17:18.000Z
|
2020-04-12T05:17:18.000Z
|
"""
Copyright 2013 Steven Diamond, 2017 Akshay Agrawal
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cvxpy.settings as s
from cvxpy import error
from cvxpy.problems.objective import Minimize, Maximize
from cvxpy.reductions.chain import Chain
from cvxpy.reductions.dqcp2dcp import dqcp2dcp
from cvxpy.reductions.eval_params import EvalParams
from cvxpy.reductions.flip_objective import FlipObjective
from cvxpy.reductions.solvers.solving_chain import construct_solving_chain
from cvxpy.interface.matrix_utilities import scalar_value
from cvxpy.reductions.solvers import bisection
from cvxpy.reductions.solvers import defines as slv_def
from cvxpy.utilities.deterministic import unique_list
import cvxpy.utilities.performance_utils as perf
from cvxpy.constraints import Equality, Inequality, NonPos, Zero
import cvxpy.utilities as u
from collections import namedtuple
import numpy as np
SolveResult = namedtuple(
'SolveResult',
['opt_value', 'status', 'primal_values', 'dual_values'])
class Cache(object):
def __init__(self):
self.key = None
self.solving_chain = None
self.param_cone_prog = None
self.inverse_data = None
def invalidate(self):
self.key = None
self.solving_chain = None
self.param_cone_prog = None
self.inverse_data = None
def make_key(self, solver, gp):
return (solver, gp)
class Problem(u.Canonical):
"""A convex optimization problem.
Problems are immutable, save for modification through the specification
of :class:`~cvxpy.expressions.constants.parameters.Parameter`
Parameters
----------
objective : Minimize or Maximize
The problem's objective.
constraints : list
The constraints on the problem variables.
"""
# The solve methods available.
REGISTERED_SOLVE_METHODS = {}
def __init__(self, objective, constraints=None):
if constraints is None:
constraints = []
# Check that objective is Minimize or Maximize.
if not isinstance(objective, (Minimize, Maximize)):
raise error.DCPError("Problem objective must be Minimize or Maximize.")
# Constraints and objective are immutable.
self._objective = objective
self._constraints = [c for c in constraints]
self._value = None
self._status = None
self._solution = None
self._cache = Cache()
self._solver_cache = {}
# Information about the shape of the problem and its constituent parts
self._size_metrics = None
# Benchmarks reported by the solver:
self._solver_stats = None
self.args = [self._objective, self._constraints]
@property
def value(self):
"""float : The value from the last time the problem was solved
(or None if not solved).
"""
if self._value is None:
return None
else:
return scalar_value(self._value)
@property
def status(self):
"""str : The status from the last time the problem was solved; one
of optimal, infeasible, or unbounded (with or without
suffix inaccurate).
"""
return self._status
@property
def solution(self):
"""Solution : The solution from the last time the problem was solved.
"""
return self._solution
@property
def objective(self):
"""Minimize or Maximize : The problem's objective.
Note that the objective cannot be reassigned after creation,
and modifying the objective after creation will result in
undefined behavior.
"""
return self._objective
@property
def constraints(self):
"""A shallow copy of the problem's constraints.
Note that constraints cannot be reassigned, appended to, or otherwise
modified after creation, except through parameters.
"""
return self._constraints[:]
@perf.compute_once
def is_dcp(self):
"""Does the problem satisfy DCP rules?
"""
return all(
expr.is_dcp() for expr in self.constraints + [self.objective])
@perf.compute_once
def is_dpp(self):
"""Does the problem satisfy DPP rules?
"""
return all(
expr.is_dpp() for expr in self.constraints + [self.objective])
@perf.compute_once
def is_dgp(self):
"""Does the problem satisfy DGP rules?
"""
return all(
expr.is_dgp() for expr in self.constraints + [self.objective])
@perf.compute_once
def is_dqcp(self):
"""Does the problem satisfy the DQCP rules?
"""
return all(
expr.is_dqcp() for expr in self.constraints + [self.objective])
@perf.compute_once
def is_qp(self):
"""Is problem a quadratic program?
"""
for c in self.constraints:
if not (isinstance(c, (Equality, Zero)) or c.args[0].is_pwl()):
return False
for var in self.variables():
if var.is_psd() or var.is_nsd():
return False
return (self.is_dcp() and self.objective.args[0].is_qpwa())
@perf.compute_once
def is_mixed_integer(self):
return any(v.attributes['boolean'] or v.attributes['integer']
for v in self.variables())
@perf.compute_once
def variables(self):
"""Accessor method for variables.
Returns
-------
list of :class:`~cvxpy.expressions.variable.Variable`
A list of the variables in the problem.
"""
vars_ = self.objective.variables()
for constr in self.constraints:
vars_ += constr.variables()
return unique_list(vars_)
@perf.compute_once
def parameters(self):
"""Accessor method for parameters.
Returns
-------
list of :class:`~cvxpy.expressions.constants.parameter.Parameter`
A list of the parameters in the problem.
"""
params = self.objective.parameters()
for constr in self.constraints:
params += constr.parameters()
return unique_list(params)
@perf.compute_once
def constants(self):
"""Accessor method for parameters.
Returns
-------
list of :class:`~cvxpy.expressions.constants.constant.Constant`
A list of the constants in the problem.
"""
const_dict = {}
constants_ = self.objective.constants()
for constr in self.constraints:
constants_ += constr.constants()
# Note that numpy matrices are not hashable, so we use the built-in
# function "id"
const_dict = {id(constant): constant for constant in constants_}
return list(const_dict.values())
def atoms(self):
"""Accessor method for atoms.
Returns
-------
list of :class:`~cvxpy.atoms.Atom`
A list of the atom types in the problem; note that this list
contains classes, not instances.
"""
atoms = self.objective.atoms()
for constr in self.constraints:
atoms += constr.atoms()
return unique_list(atoms)
@property
def size_metrics(self):
""":class:`~cvxpy.problems.problem.SizeMetrics` : Information about the problem's size.
"""
if self._size_metrics is None:
self._size_metrics = SizeMetrics(self)
return self._size_metrics
@property
def solver_stats(self):
""":class:`~cvxpy.problems.problem.SolverStats` : Information returned by the solver.
"""
return self._solver_stats
def solve(self, *args, **kwargs):
"""Solves the problem using the specified method.
Populates the :code:`status` and :code:`value` attributes on the
problem object as a side-effect.
Parameters
----------
solver : str, optional
The solver to use. For example, 'ECOS', 'SCS', or 'OSQP'.
verbose : bool, optional
Overrides the default of hiding solver output.
gp : bool, optional
If True, parses the problem as a disciplined geometric program
instead of a disciplined convex program.
qcp : bool, optional
If True, parses the problem as a disciplined quasiconvex program
instead of a disciplined convex program.
requires_grad : bool, optional
Makes it possible to compute gradients with respect to
parameters by calling `.backward()` after solving, or to compute
perturbations to the variables by calling `.derivative()`. When
True, the solver must be SCS, and gp, qcp must be false; a DPPError is
thrown when problem is not DPP.
method : function, optional
A custom solve method to use.
kwargs : keywords, optional
Additional solver specific arguments. See Notes below.
Notes
------
CVXPY interfaces with a wide range of solvers; the algorithms used by these solvers
have parameters relating to stopping criteria, and strategies to improve solution quality.
There is no one choice of parameters which is perfect for every problem. If you are not
getting satisfactory results from a solver, you can try changing its parameters. The
exact way this is done depends on the specific solver. Here are some examples:
prob.solve(solver='ECOS', abstol=1e-6)
prob.solve(solver='OSQP', max_iter=10000).
mydict = {"MSK_DPAR_INTPNT_CO_TOL_NEAR_REL": 10}
prob.solve(solver='MOSEK', mosek_params=mydict).
You should refer to CVXPY's web documentation for details on how to pass solver
parameters.
Returns
-------
float
The optimal value for the problem, or a string indicating
why the problem could not be solved.
Raises
------
cvxpy.error.DCPError
Raised if the problem is not DCP and `gp` is False.
cvxpy.error.DGPError
Raised if the problem is not DGP and `gp` is True.
cvxpy.error.SolverError
Raised if no suitable solver exists among the installed solvers,
or if an unanticipated error is encountered.
"""
func_name = kwargs.pop("method", None)
if func_name is not None:
solve_func = Problem.REGISTERED_SOLVE_METHODS[func_name]
else:
solve_func = Problem._solve
return solve_func(self, *args, **kwargs)
@classmethod
def register_solve(cls, name, func):
"""Adds a solve method to the Problem class.
Parameters
----------
name : str
The keyword for the method.
func : function
The function that executes the solve method. This function must
take as its first argument the problem instance to solve.
"""
cls.REGISTERED_SOLVE_METHODS[name] = func
def get_problem_data(self, solver, gp=False):
"""Returns the problem data used in the call to the solver.
When a problem is solved, CVXPY creates a chain of reductions enclosed
in a :class:`~cvxpy.reductions.solvers.solving_chain.SolvingChain`,
and compiles it to some low-level representation that is
compatible with the targeted solver. This method returns that low-level
representation.
For some solving chains, this low-level representation is a dictionary
that contains exactly those arguments that were supplied to the solver;
however, for other solving chains, the data is an intermediate
representation that is compiled even further by the solver interfaces.
A solution to the equivalent low-level problem can be obtained via the
data by invoking the `solve_via_data` method of the returned solving
chain, a thin wrapper around the code external to CVXPY that further
processes and solves the problem. Invoke the unpack_results method
to recover a solution to the original problem.
For example:
::
objective = ...
constraints = ...
problem = cp.Problem(objective, constraints)
data, chain, inverse_data = problem.get_problem_data(cp.SCS)
# calls SCS using `data`
soln = chain.solve_via_data(problem, data)
# unpacks the solution returned by SCS into `problem`
problem.unpack_results(soln, chain, inverse_data)
Alternatively, the `data` dictionary returned by this method
contains enough information to bypass CVXPY and call the solver
directly.
For example:
::
problem = cp.Problem(objective, constraints)
data, _, _ = problem.get_problem_data(cp.SCS)
import scs
probdata = {
'A': data['A'],
'b': data['b'],
'c': data['c'],
}
cone_dims = data['dims']
cones = {
"f": cone_dims.zero,
"l": cone_dims.nonpos,
"q": cone_dims.soc,
"ep": cone_dims.exp,
"s": cone_dims.psd,
}
soln = scs.solve(data, cones)
The structure of the data dict that CVXPY returns depends on the
solver. For details, consult the solver interfaces in
`cvxpy/reductions/solvers`.
Parameters
----------
solver : str
The solver the problem data is for.
gp : bool, optional
If True, then parses the problem as a disciplined geometric program
instead of a disciplined convex program.
Returns
-------
dict or object
lowest level representation of problem
SolvingChain
The solving chain that created the data.
list
The inverse data generated by the chain.
"""
key = self._cache.make_key(solver, gp)
if key != self._cache.key:
self._cache.invalidate()
solving_chain = self._construct_chain(solver=solver, gp=gp)
self._cache.key = key
self._cache.solving_chain = solving_chain
self._solver_cache = {}
else:
solving_chain = self._cache.solving_chain
if self._cache.param_cone_prog is not None:
# fast path, bypasses application of reductions
data, solver_inverse_data = solving_chain.solver.apply(
self._cache.param_cone_prog)
inverse_data = self._cache.inverse_data + [solver_inverse_data]
else:
data, inverse_data = solving_chain.apply(self)
safe_to_cache = (
isinstance(data, dict)
and s.PARAM_PROB in data
and not any(isinstance(reduction, EvalParams)
for reduction in solving_chain.reductions)
)
if safe_to_cache:
self._cache.param_cone_prog = data[s.PARAM_PROB]
# the last datum in inverse_data corresponds to the solver,
# so we shouldn't cache it
self._cache.inverse_data = inverse_data[:-1]
return data, solving_chain, inverse_data
def _find_candidate_solvers(self,
solver=None,
gp=False):
"""
Find candiate solvers for the current problem. If solver
is not None, it checks if the specified solver is compatible
with the problem passed.
Parameters
----------
solver : string
The name of the solver with which to solve the problem. If no
solver is supplied (i.e., if solver is None), then the targeted
solver may be any of those that are installed. If the problem
is variable-free, then this parameter is ignored.
gp : bool
If True, the problem is parsed as a Disciplined Geometric Program
instead of as a Disciplined Convex Program.
Returns
-------
dict
A dictionary of compatible solvers divided in `qp_solvers`
and `conic_solvers`.
Raises
------
cvxpy.error.SolverError
Raised if the problem is not DCP and `gp` is False.
cvxpy.error.DGPError
Raised if the problem is not DGP and `gp` is True.
"""
candidates = {'qp_solvers': [],
'conic_solvers': []}
if solver is not None:
if solver not in slv_def.INSTALLED_SOLVERS:
raise error.SolverError("The solver %s is not installed." % solver)
if solver in slv_def.CONIC_SOLVERS:
candidates['conic_solvers'] += [solver]
if solver in slv_def.QP_SOLVERS:
candidates['qp_solvers'] += [solver]
else:
candidates['qp_solvers'] = [s for s in slv_def.INSTALLED_SOLVERS
if s in slv_def.QP_SOLVERS]
candidates['conic_solvers'] = [s for s in slv_def.INSTALLED_SOLVERS
if s in slv_def.CONIC_SOLVERS]
# If gp we must have only conic solvers
if gp:
if solver is not None and solver not in slv_def.CONIC_SOLVERS:
raise error.SolverError(
"When `gp=True`, `solver` must be a conic solver "
"(received '%s'); try calling " % solver +
" `solve()` with `solver=cvxpy.ECOS`."
)
elif solver is None:
candidates['qp_solvers'] = [] # No QP solvers allowed
if self.is_mixed_integer():
candidates['qp_solvers'] = [
s for s in candidates['qp_solvers']
if slv_def.SOLVER_MAP_QP[s].MIP_CAPABLE]
candidates['conic_solvers'] = [
s for s in candidates['conic_solvers']
if slv_def.SOLVER_MAP_CONIC[s].MIP_CAPABLE]
if not candidates['conic_solvers'] and \
not candidates['qp_solvers']:
raise error.SolverError(
"Problem is mixed-integer, but candidate "
"QP/Conic solvers (%s) are not MIP-capable." %
[candidates['qp_solvers'], candidates['conic_solvers']])
return candidates
def _construct_chain(self, solver=None, gp=False):
"""
Construct the chains required to reformulate and solve the problem.
In particular, this function
# finds the candidate solvers
# constructs the solving chain that performs the
numeric reductions and solves the problem.
Parameters
----------
solver : str, optional
The solver to use. Defaults to ECOS.
gp : bool, optional
If True, the problem is parsed as a Disciplined Geometric Program
instead of as a Disciplined Convex Program.
Returns
-------
A solving chain
"""
candidate_solvers = self._find_candidate_solvers(solver=solver, gp=gp)
return construct_solving_chain(self, candidate_solvers, gp=gp)
def _invalidate_cache(self):
self._cache_key = None
self._solving_chain = None
self._param_cone_prog = None
self._inverse_data = None
def _solve(self,
solver=None,
warm_start=True,
verbose=False,
gp=False, qcp=False, requires_grad=False, **kwargs):
"""Solves a DCP compliant optimization problem.
Saves the values of primal and dual variables in the variable
and constraint objects, respectively.
Parameters
----------
solver : str, optional
The solver to use. Defaults to ECOS.
warm_start : bool, optional
Should the previous solver result be used to warm start?
verbose : bool, optional
Overrides the default of hiding solver output.
gp : bool, optional
If True, parses the problem as a disciplined geometric program.
qcp : bool, optional
If True, parses the problem as a disciplined quasiconvex program.
requires_grad : bool, optional
Makes it possible to compute gradients with respect to
parameters by calling `.backward()` after solving, or to compute
perturbations to the variables by calling `.derivative()`. When
True, the solver must be SCS, and gp, qcp must be False;
a DPPError is thrown when problem is not DPP.
kwargs : dict, optional
A dict of options that will be passed to the specific solver.
In general, these options will override any default settings
imposed by cvxpy.
Returns
-------
float
The optimal value for the problem, or a string indicating
why the problem could not be solved.
"""
for parameter in self.parameters():
if parameter.value is None:
raise error.ParameterError(
"A Parameter (whose name is '%s') does not have a value "
"associated with it; all Parameter objects must have "
"values before solving a problem." % parameter.name())
if requires_grad:
if not self.is_dpp():
raise error.DPPError("Problem is not DPP (when requires_grad "
"is True, problem must be DPP).")
elif gp:
raise ValueError("Cannot compute gradients of DGP problems.")
elif qcp:
raise ValueError("Cannot compute gradients of DQCP problems.")
elif solver is not None and solver not in [s.SCS, s.DIFFCP]:
raise ValueError("When requires_grad is True, the only "
"supported solver is SCS "
"(received %s)." % solver)
elif s.DIFFCP not in slv_def.INSTALLED_SOLVERS:
raise ImportError(
"The Python package diffcp must be installed to "
"differentiate through problems. Please follow the "
"installation instructions at "
"https://github.com/cvxgrp/diffcp")
else:
solver = s.DIFFCP
else:
if gp and qcp:
raise ValueError("At most one of `gp` and `qcp` can be True.")
if qcp and not self.is_dcp():
if not self.is_dqcp():
raise error.DQCPError("The problem is not DQCP.")
reductions = [dqcp2dcp.Dqcp2Dcp()]
if type(self.objective) == Maximize:
reductions = [FlipObjective()] + reductions
chain = Chain(problem=self, reductions=reductions)
soln = bisection.bisect(
chain.reduce(), solver=solver, verbose=verbose, **kwargs)
self.unpack(chain.retrieve(soln))
return self.value
data, solving_chain, inverse_data = self.get_problem_data(solver, gp)
solution = solving_chain.solve_via_data(
self, data, warm_start, verbose, kwargs)
self.unpack_results(solution, solving_chain, inverse_data)
return self.value
def backward(self):
"""Compute the gradient of a solution with respect to parameters.
This method differentiates through the solution map of the problem,
to obtain the gradient of a solution with respect to the parameters.
In other words, it calculates the sensitivities of the parameters
with respect to perturbations in the optimal variable values.
.backward() populates the .gradient attribute of each parameter as a
side-effect. It can only be called after calling .solve() with
`requires_grad=True`.
Below is a simple example:
::
import cvxpy as cp
import numpy as np
p = cp.Parameter()
x = cp.Variable()
quadratic = cp.square(x - 2 * p)
problem = cp.Problem(cp.Minimize(quadratic), [x >= 0])
p.value = 3.0
problem.solve(requires_grad=True, eps=1e-10)
# .backward() populates the .gradient attribute of the parameters
problem.backward()
# Because x* = 2 * p, dx*/dp = 2
np.testing.assert_allclose(p.gradient, 2.0)
In the above example, the gradient could easily be computed by hand;
however, .backward() can be used to differentiate through any DCP
program (that is also DPP-compliant).
This method uses the chain rule to evaluate the gradients of a
scalar-valued function of the variables with respect to the parameters.
For example, let x be a variable and p a parameter; x and p might be
scalars, vectors, or matrices. Let f be a scalar-valued function, with
z = f(x). Then this method computes dz/dp = (dz/dx) (dx/p). dz/dx
is chosen to be the all ones vector by default, corresponding to
choosing f to be the sum function. You can specify a custom value for
dz/dx by setting the .gradient attribute on your variables. For example,
::
import cvxpy as cp
import numpy as np
b = cp.Parameter()
x = cp.Variable()
quadratic = cp.square(x - 2 * b)
problem = cp.Problem(cp.Minimize(quadratic), [x >= 0])
b.value = 3.
problem.solve(requires_grad=True, eps=1e-10)
x.gradient = 4.
problem.backward()
# dz/dp = dz/dx dx/dp = 4. * 2. == 8.
np.testing.assert_allclose(b.gradient, 8.)
The .gradient attribute on a variable can also be interpreted as a
perturbation to its optimal value.
Raises
------
ValueError
if solve was not called with `requires_grad=True`
SolverError
if the problem is infeasible or unbounded
"""
if s.DIFFCP not in self._solver_cache:
raise ValueError("backward can only be called after calling "
"solve with `requires_grad=True`")
elif self.status not in s.SOLUTION_PRESENT:
raise error.SolverError("Backpropagating through "
"infeasible/unbounded problems is not "
"yet supported. Please file an issue on "
"Github if you need this feature.")
# TODO(akshayka): Backpropagate through dual variables as well.
backward_cache = self._solver_cache[s.DIFFCP]
DT = backward_cache["DT"]
zeros = np.zeros(backward_cache["s"].shape)
del_vars = {}
for variable in self.variables():
if variable.gradient is None:
del_vars[variable.id] = np.ones(variable.shape)
else:
del_vars[variable.id] = np.asarray(variable.gradient,
dtype=np.float64)
dx = self._cache.param_cone_prog.split_adjoint(del_vars)
dA, db, dc = DT(dx, zeros, zeros)
dparams = self._cache.param_cone_prog.apply_param_jac(dc, -dA, db)
for parameter in self.parameters():
parameter.gradient = dparams[parameter.id]
def derivative(self):
"""Apply the derivative of the solution map to perturbations in the parameters
This method applies the derivative of the solution map to perturbations
in the parameters, to obtain perturbations in the optimal values of the
variables. In other words, it tells you how the optimal values of the
variables would be changed.
You can specify perturbations in a parameter by setting its .delta
attribute (if unspecified, the perturbation defaults to 0). This method
populates the .delta attribute of the variables as a side-effect.
This method can only be called after calling .solve() with
`requires_grad=True`.
Below is a simple example:
::
import cvxpy as cp
import numpy as np
p = cp.Parameter()
x = cp.Variable()
quadratic = cp.square(x - 2 * p)
problem = cp.Problem(cp.Minimize(quadratic), [x >= 0])
p.value = 3.0
problem.solve(requires_grad=True, eps=1e-10)
# .derivative() populates the .delta attribute of the variables
problem.derivative()
p.delta = 1e-3
# Because x* = 2 * p, dx*/dp = 2, so (dx*/dp)(p.delta) == 2e-3
np.testing.assert_allclose(x.delta, 2e-3)
Raises
------
ValueError
if solve was not called with `requires_grad=True`
SolverError
if the problem is infeasible or unbounded
"""
if s.DIFFCP not in self._solver_cache:
raise ValueError("derivative can only be called after calling "
"solve with `requires_grad=True`")
elif self.status not in s.SOLUTION_PRESENT:
raise ValueError("Differentiating through infeasible/unbounded "
"problems is not yet supported. Please file an "
"issue on Github if you need this feature.")
# TODO(akshayka): Forward differentiate dual variables as well
backward_cache = self._solver_cache[s.DIFFCP]
param_cone_prog = self._cache.param_cone_prog
D = backward_cache["D"]
param_deltas = {}
for parameter in self.parameters():
if parameter.delta is None:
param_deltas[parameter.id] = np.zeros(parameter.shape)
else:
param_deltas[parameter.id] = np.asarray(parameter.delta,
dtype=np.float64)
dc, _, dA, db = param_cone_prog.apply_parameters(param_deltas,
zero_offset=True)
dx, _, _ = D(-dA, db, dc)
dvars = param_cone_prog.split_solution(
dx, [v.id for v in self.variables()])
for variable in self.variables():
variable.delta = dvars[variable.id]
def _clear_solution(self):
for v in self.variables():
v.save_value(None)
for c in self.constraints:
for dv in c.dual_variables:
dv.save_value(None)
self._value = None
self._status = None
self._solution = None
def unpack(self, solution):
"""Updates the problem state given a Solution.
Updates problem.status, problem.value and value of primal and dual
variables. If solution.status is in cvxpy.settins.ERROR, this method
is a no-op.
Parameters
__________
solution : cvxpy.Solution
A Solution object.
Raises
------
ValueError
If the solution object has an invalid status
"""
if solution.status in s.SOLUTION_PRESENT:
for v in self.variables():
v.save_value(solution.primal_vars[v.id])
for c in self.constraints:
if c.id in solution.dual_vars:
c.save_dual_value(solution.dual_vars[c.id])
elif solution.status in s.INF_OR_UNB:
for v in self.variables():
v.save_value(None)
for constr in self.constraints:
for dv in constr.dual_variables:
dv.save_value(None)
else:
raise ValueError("Cannot unpack invalid solution: %s" % solution)
self._value = solution.opt_val
self._status = solution.status
self._solution = solution
def unpack_results(self, solution, chain, inverse_data):
"""Updates the problem state given the solver results.
Updates problem.status, problem.value and value of
primal and dual variables.
Parameters
__________
solution : object
The solution returned by applying the chain to the problem
and invoking the solver on the resulting data.
chain : SolvingChain
A solving chain that was used to solve the problem.
inverse_data : list
The inverse data returned by applying the chain to the problem.
Raises
------
cvxpy.error.SolverError
If the solver failed
"""
solution = chain.invert(solution, inverse_data)
if solution.status in s.ERROR:
raise error.SolverError(
"Solver '%s' failed. " % chain.solver.name() +
"Try another solver, or solve with verbose=True for more "
"information.")
self.unpack(solution)
self._solver_stats = SolverStats(self._solution.attr,
chain.solver.name())
def __str__(self):
if len(self.constraints) == 0:
return str(self.objective)
else:
subject_to = "subject to "
lines = [str(self.objective),
subject_to + str(self.constraints[0])]
for constr in self.constraints[1:]:
lines += [len(subject_to) * " " + str(constr)]
return '\n'.join(lines)
def __repr__(self):
return "Problem(%s, %s)" % (repr(self.objective),
repr(self.constraints))
def __neg__(self):
return Problem(-self.objective, self.constraints)
def __add__(self, other):
if other == 0:
return self
elif not isinstance(other, Problem):
return NotImplemented
return Problem(self.objective + other.objective,
unique_list(self.constraints + other.constraints))
def __radd__(self, other):
if other == 0:
return self
else:
return NotImplemented
def __sub__(self, other):
if not isinstance(other, Problem):
return NotImplemented
return Problem(self.objective - other.objective,
unique_list(self.constraints + other.constraints))
def __rsub__(self, other):
if other == 0:
return -self
else:
return NotImplemented
def __mul__(self, other):
if not isinstance(other, (int, float)):
return NotImplemented
return Problem(self.objective * other, self.constraints)
__rmul__ = __mul__
def __div__(self, other):
if not isinstance(other, (int, float)):
return NotImplemented
return Problem(self.objective * (1.0 / other), self.constraints)
def is_constant(self):
return False
__truediv__ = __div__
class SolverStats(object):
"""Reports some of the miscellaneous information that is returned
by the solver after solving but that is not captured directly by
the Problem instance.
Attributes
----------
solve_time : double
The time (in seconds) it took for the solver to solve the problem.
setup_time : double
The time (in seconds) it took for the solver to setup the problem.
num_iters : int
The number of iterations the solver had to go through to find a solution.
"""
def __init__(self, results_dict, solver_name):
self.solver_name = solver_name
self.solve_time = None
self.setup_time = None
self.num_iters = None
if s.SOLVE_TIME in results_dict:
self.solve_time = results_dict[s.SOLVE_TIME]
if s.SETUP_TIME in results_dict:
self.setup_time = results_dict[s.SETUP_TIME]
if s.NUM_ITERS in results_dict:
self.num_iters = results_dict[s.NUM_ITERS]
class SizeMetrics(object):
"""Reports various metrics regarding the problem.
Attributes
----------
num_scalar_variables : integer
The number of scalar variables in the problem.
num_scalar_data : integer
The number of scalar constants and parameters in the problem. The number of
constants used across all matrices, vectors, in the problem.
Some constants are not apparent when the problem is constructed: for example,
The sum_squares expression is a wrapper for a quad_over_lin expression with a
constant 1 in the denominator.
num_scalar_eq_constr : integer
The number of scalar equality constraints in the problem.
num_scalar_leq_constr : integer
The number of scalar inequality constraints in the problem.
max_data_dimension : integer
The longest dimension of any data block constraint or parameter.
max_big_small_squared : integer
The maximum value of (big)(small)^2 over all data blocks of the problem, where
(big) is the larger dimension and (small) is the smaller dimension
for each data block.
"""
def __init__(self, problem):
# num_scalar_variables
self.num_scalar_variables = 0
for var in problem.variables():
self.num_scalar_variables += var.size
# num_scalar_data, max_data_dimension, and max_big_small_squared
self.max_data_dimension = 0
self.num_scalar_data = 0
self.max_big_small_squared = 0
for const in problem.constants()+problem.parameters():
big = 0
# Compute number of data
self.num_scalar_data += const.size
big = 1 if len(const.shape) == 0 else max(const.shape)
small = 1 if len(const.shape) == 0 else min(const.shape)
# Get max data dimension:
if self.max_data_dimension < big:
self.max_data_dimension = big
max_big_small_squared = float(big)*(float(small)**2)
if self.max_big_small_squared < max_big_small_squared:
self.max_big_small_squared = max_big_small_squared
# num_scalar_eq_constr
self.num_scalar_eq_constr = 0
for constraint in problem.constraints:
if isinstance(constraint, (Equality, Zero)):
self.num_scalar_eq_constr += constraint.expr.size
# num_scalar_leq_constr
self.num_scalar_leq_constr = 0
for constraint in problem.constraints:
if isinstance(constraint, (Inequality, NonPos)):
self.num_scalar_leq_constr += constraint.expr.size
| 38.091434
| 98
| 0.599666
|
4a129ab1c25d92d01467194fe09cdc3f55be5d7e
| 1,295
|
py
|
Python
|
benchmarks/test_benchmark.py
|
ludovicchabant/Wikked
|
02ec3c0361ac90b0366e7a90f8928a54d40616b5
|
[
"Apache-2.0"
] | 17
|
2015-10-10T11:37:33.000Z
|
2021-11-21T02:10:38.000Z
|
benchmarks/test_benchmark.py
|
ludovicchabant/Wikked
|
02ec3c0361ac90b0366e7a90f8928a54d40616b5
|
[
"Apache-2.0"
] | 1
|
2018-11-10T19:40:58.000Z
|
2019-03-09T07:47:53.000Z
|
benchmarks/test_benchmark.py
|
ludovicchabant/Wikked
|
02ec3c0361ac90b0366e7a90f8928a54d40616b5
|
[
"Apache-2.0"
] | null | null | null |
import re
import urllib.parse
import random
import unittest
from funkload.FunkLoadTestCase import FunkLoadTestCase
class Benchmark(FunkLoadTestCase):
"""This test uses a configuration file Benchmark.conf."""
def setUp(self):
self.server_url = self.conf_get('main', 'url')
def test_simple(self):
server_url = self.server_url
if not re.match('https?://', server_url):
raise Exception("The `server_url` setting doesn't have a scheme.")
username = self.conf_get('test_benchmark', 'username', None)
password = self.conf_get('test_benchmark', 'password', None)
if username and password:
self.post(self.server_url + "/api/user/login",
params=[['username', username],
['password', password]],
description="Login as %s" % username)
nb_times = self.conf_getInt('test_benchmark', 'nb_times')
names = self.conf_get('test_benchmark', 'page_names').split(';')
for i in range(nb_times):
r = random.randint(0, len(names) - 1)
url = server_url + '/api/read/' + urllib.parse.quote(names[r])
self.get(url, description='Getting %s' % names[r])
if __name__ in ('main', '__main__'):
unittest.main()
| 35
| 78
| 0.616216
|
4a129bbcd3eb80dceaac4efbd2224f4dd7154e7d
| 4,406
|
py
|
Python
|
lldb/packages/Python/lldbsuite/test/functionalities/abbreviation/TestAbbreviations.py
|
medismailben/llvm-project
|
e334a839032fe500c3bba22bf976ab7af13ce1c1
|
[
"Apache-2.0"
] | 2,338
|
2018-06-19T17:34:51.000Z
|
2022-03-31T11:00:37.000Z
|
lldb/packages/Python/lldbsuite/test/functionalities/abbreviation/TestAbbreviations.py
|
medismailben/llvm-project
|
e334a839032fe500c3bba22bf976ab7af13ce1c1
|
[
"Apache-2.0"
] | 3,740
|
2019-01-23T15:36:48.000Z
|
2022-03-31T22:01:13.000Z
|
lldb/packages/Python/lldbsuite/test/functionalities/abbreviation/TestAbbreviations.py
|
medismailben/llvm-project
|
e334a839032fe500c3bba22bf976ab7af13ce1c1
|
[
"Apache-2.0"
] | 500
|
2019-01-23T07:49:22.000Z
|
2022-03-30T02:59:37.000Z
|
"""
Test some lldb command abbreviations and aliases for proper resolution.
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class AbbreviationsTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@no_debug_info_test
def test_command_abbreviations_and_aliases(self):
command_interpreter = self.dbg.GetCommandInterpreter()
self.assertTrue(command_interpreter, VALID_COMMAND_INTERPRETER)
result = lldb.SBCommandReturnObject()
# Check that abbreviations are expanded to the full command.
command_interpreter.ResolveCommand("ap script", result)
self.assertTrue(result.Succeeded())
self.assertEqual("apropos script", result.GetOutput())
command_interpreter.ResolveCommand("h", result)
self.assertTrue(result.Succeeded())
self.assertEqual("help", result.GetOutput())
# Check resolution of abbreviations for multi-word commands.
command_interpreter.ResolveCommand("lo li", result)
self.assertTrue(result.Succeeded())
self.assertEqual("log list", result.GetOutput())
command_interpreter.ResolveCommand("br s", result)
self.assertTrue(result.Succeeded())
self.assertEqual("breakpoint set", result.GetOutput())
# Try an ambiguous abbreviation.
# "pl" could be "platform" or "plugin".
command_interpreter.ResolveCommand("pl", result)
self.assertFalse(result.Succeeded())
self.assertTrue(result.GetError().startswith("Ambiguous command"))
# Make sure an unabbreviated command is not mangled.
command_interpreter.ResolveCommand(
"breakpoint set --name main --line 123", result)
self.assertTrue(result.Succeeded())
self.assertEqual(
"breakpoint set --name main --line 123",
result.GetOutput())
# Create some aliases.
self.runCmd("com a alias com al")
self.runCmd("alias gurp help")
# Check that an alias is replaced with the actual command
command_interpreter.ResolveCommand("gurp target create", result)
self.assertTrue(result.Succeeded())
self.assertEqual("help target create", result.GetOutput())
# Delete the alias and make sure it no longer has an effect.
self.runCmd("com u gurp")
command_interpreter.ResolveCommand("gurp", result)
self.assertFalse(result.Succeeded())
# Check aliases with text replacement.
self.runCmd("alias pltty process launch -s -o %1 -e %1")
command_interpreter.ResolveCommand("pltty /dev/tty0", result)
self.assertTrue(result.Succeeded())
self.assertEqual(
"process launch -s -o /dev/tty0 -e /dev/tty0",
result.GetOutput())
self.runCmd("alias xyzzy breakpoint set -n %1 -l %2")
command_interpreter.ResolveCommand("xyzzy main 123", result)
self.assertTrue(result.Succeeded())
self.assertEqual(
"breakpoint set -n main -l 123",
result.GetOutput().strip())
# And again, without enough parameters.
command_interpreter.ResolveCommand("xyzzy main", result)
self.assertFalse(result.Succeeded())
# Check a command that wants the raw input.
command_interpreter.ResolveCommand(
r'''sc print("\n\n\tHello!\n")''', result)
self.assertTrue(result.Succeeded())
self.assertEqual(
r'''script print("\n\n\tHello!\n")''',
result.GetOutput())
# Prompt changing stuff should be tested, but this doesn't seem like the
# right test to do it in. It has nothing to do with aliases or abbreviations.
#self.runCmd("com sou ./change_prompt.lldb")
# self.expect("settings show prompt",
# startstr = 'prompt (string) = "[with-three-trailing-spaces] "')
#self.runCmd("settings clear prompt")
# self.expect("settings show prompt",
# startstr = 'prompt (string) = "(lldb) "')
#self.runCmd("se se prompt 'Sycamore> '")
# self.expect("se sh prompt",
# startstr = 'prompt (string) = "Sycamore> "')
#self.runCmd("se cl prompt")
# self.expect("set sh prompt",
# startstr = 'prompt (string) = "(lldb) "')
| 40.796296
| 86
| 0.642079
|
4a129c39da06b58d9ad8d6411a6799636a8af6c5
| 14,090
|
py
|
Python
|
tests/run_unit_tests.py
|
CyberPoint/libpgm
|
be32202d28f53cf3761a2e31ea8ffcdc3e383789
|
[
"BSD-3-Clause"
] | 83
|
2015-02-04T02:05:50.000Z
|
2020-12-21T03:22:39.000Z
|
tests/run_unit_tests.py
|
CyberPoint/libpgm
|
be32202d28f53cf3761a2e31ea8ffcdc3e383789
|
[
"BSD-3-Clause"
] | 26
|
2015-03-10T11:22:39.000Z
|
2020-05-27T23:48:21.000Z
|
tests/run_unit_tests.py
|
CyberPoint/libpgm
|
be32202d28f53cf3761a2e31ea8ffcdc3e383789
|
[
"BSD-3-Clause"
] | 50
|
2015-03-02T12:49:31.000Z
|
2020-05-26T07:36:48.000Z
|
'''
A module that conducts unit tests on all top-level methods within each class.
Created on Jun 20, 2012
@author: ccabot
'''
import unittest
import sys
# add to PYTHONPATH
sys.path.append("../")
from libpgm.dictionary import Dictionary
from libpgm.graphskeleton import GraphSkeleton
from libpgm.orderedskeleton import OrderedSkeleton
from libpgm.discretebayesiannetwork import DiscreteBayesianNetwork
from libpgm.hybayesiannetwork import HyBayesianNetwork
from libpgm.nodedata import NodeData
from libpgm.tablecpdfactor import TableCPDFactor
from libpgm.sampleaggregator import SampleAggregator
from libpgm.tablecpdfactorization import TableCPDFactorization
from libpgm.lgbayesiannetwork import LGBayesianNetwork
from libpgm.dyndiscbayesiannetwork import DynDiscBayesianNetwork
from libpgm.pgmlearner import PGMLearner
class TestNodeData(unittest.TestCase):
def setUp(self):
self.nd = NodeData()
def test_entriestoinstances(self):
self.nd.load("unittesthdict.txt")
self.nd.entriestoinstances()
result = self.nd.nodes["Intelligence"].choose([])
self.assertTrue(result == 'low' or result == 'high')
class TestGraphSkeleton(unittest.TestCase):
def setUp(self):
self.instance = GraphSkeleton()
self.instance.V = [1,2,3,4,5]
self.instance.E = [[5,1],[1,2]]
def test_getparents(self):
self.assertEqual(self.instance.getparents(1), [5])
self.assertEqual(self.instance.getparents(4), [])
def test_getchildren(self):
self.assertEqual(self.instance.getchildren(5), [1])
self.assertEqual(self.instance.getchildren(4), [])
def test_toporder(self):
self.instance.toporder()
self.assertTrue(self.instance.V.index(5)<self.instance.V.index(1))
self.assertTrue(self.instance.V.index(5)<self.instance.V.index(2))
class TestOrderedSkeleton(unittest.TestCase):
def setUp(self):
self.os = OrderedSkeleton()
self.os.load("unittestdict.txt")
self.gs = GraphSkeleton()
self.gs.load("unittestdict.txt")
def test_constructor(self):
self.assertNotEqual(self.os.V, self.gs.V)
self.gs.toporder()
self.assertEqual(self.os.V, self.gs.V)
class TestDiscreteBayesianNetwork(unittest.TestCase):
def setUp(self):
skel = GraphSkeleton()
skel.load("unittestdict.txt")
skel.toporder()
nodedata = NodeData()
nodedata.load("unittestdict.txt")
self.instance = DiscreteBayesianNetwork(skel, nodedata)
def test_randomsample(self):
randomsample = self.instance.randomsample(5)
self.assertTrue(randomsample[0]["Difficulty"] == 'easy' or randomsample[0]["Difficulty"] == 'hard')
for key in randomsample[0].keys():
self.assertTrue(randomsample[0][key] != "default")
def test_randomsamplewithevidence(self):
evidence = dict(Difficulty='easy')
randomsample = self.instance.randomsample(10, evidence)
for entry in randomsample:
self.assertEqual(entry["Difficulty"], 'easy')
class TestLGBayesianNetwork(unittest.TestCase):
def setUp(self):
nodedata = NodeData()
nodedata.load("unittestlgdict.txt")
skel = GraphSkeleton()
skel.load("unittestdict.txt")
skel.toporder()
self.lgb = LGBayesianNetwork(skel, nodedata)
def test_randomsample(self):
seq = self.lgb.randomsample(1)
ctr = 0
for entry in seq[0].keys():
self.assertTrue(seq[0][entry], float)
ctr = ctr + 1
self.assertEqual(ctr, 5)
class TestTableCPDFactor(unittest.TestCase):
def setUp(self):
skel = GraphSkeleton()
skel.load("unittestdict.txt")
skel.toporder()
nodedata = NodeData()
nodedata.load("unittestdict.txt")
self.instance = DiscreteBayesianNetwork(skel, nodedata)
self.factor = TableCPDFactor("Grade", self.instance)
self.factor2 = TableCPDFactor("Letter", self.instance)
def test_constructor(self):
product = 1
for var in self.factor.card:
product *= var
self.assertTrue(len(self.factor.vals) == product)
for i in range(1, len(self.factor.scope)):
self.assertTrue(self.factor.stride[self.factor.scope[i]] == self.factor.stride[self.factor.scope[i-1]] * self.factor.card[i-1])
def test_multiplyfactor(self):
self.factor.multiplyfactor(self.factor2)
a = [0.03, 0.16000000000000003, 0.297, 0.09000000000000001, 0.032, 0.0198, 0.005000000000000001, 0.1, 0.693, 0.05, 0.12, 0.198, 0.27, 0.24, 0.003, 0.81, 0.048, 0.0002, 0.045000000000000005, 0.15, 0.006999999999999999, 0.45, 0.18, 0.002]
b = [3, 2, 2, 2]
c = ['Grade', 'Intelligence', 'Difficulty', 'Letter']
d = {'Grade': 1, 'Intelligence': 3, 'Letter': 12, 'Difficulty': 6}
self.assertEqual(self.factor.vals, a)
self.assertEqual(self.factor.card, b)
self.assertEqual(self.factor.scope, c)
self.assertEqual(self.factor.stride, d)
def test_sumout(self):
self.factor.sumout("Difficulty")
a = [0.35, 0.65, 1.0, 1.4, 0.38, 0.22]
b = [3, 2]
c = ['Grade', 'Intelligence']
d = {'Grade': 1, 'Intelligence': 3}
self.assertEqual(self.factor.vals, a)
self.assertEqual(self.factor.card, b)
self.assertEqual(self.factor.scope, c)
self.assertEqual(self.factor.stride, d)
def test_reducefactor(self):
self.factor.reducefactor("Difficulty", 'easy')
a = [0.3, 0.4, 0.3, 0.9, 0.08, 0.02]
b = [3, 2]
c = ['Grade', 'Intelligence']
d = {'Grade': 1, 'Intelligence': 3}
self.assertEqual(self.factor.vals, a)
self.assertEqual(self.factor.card, b)
self.assertEqual(self.factor.scope, c)
self.assertEqual(self.factor.stride, d)
def test_copy(self):
copy = self.factor.copy()
self.assertTrue((copy is self.factor) == False)
self.assertEqual(copy.vals, self.factor.vals)
self.assertEqual(copy.card, self.factor.card)
self.assertEqual(copy.scope, self.factor.scope)
self.assertEqual(copy.stride, self.factor.stride)
class TestTableCPDFactorization(unittest.TestCase):
def setUp(self):
skel = GraphSkeleton()
skel.load("unittestdict.txt")
skel.toporder()
nodedata = NodeData()
nodedata.load("unittestdict.txt")
self.bn = DiscreteBayesianNetwork(skel, nodedata)
self.fn = TableCPDFactorization(self.bn)
def test_constructor(self):
self.assertTrue(len(self.fn.originalfactorlist) == 5)
for x in range(5):
self.assertTrue(isinstance(self.fn.originalfactorlist[x], TableCPDFactor))
def test_refresh(self):
evidence = dict(Letter='weak')
query = dict(Intelligence=['high'])
result1 = self.fn.specificquery(query, evidence)
self.fn.refresh()
result2 = self.fn.specificquery(query, evidence)
self.assertEqual(result1, result2)
def test_sumproducteliminatevar(self):
self.fn.refresh()
self.fn.sumproducteliminatevar("Difficulty")
yes = 0
for x in range(len(self.fn.factorlist)):
if (self.fn.factorlist[x].scope == ['Grade', 'Intelligence']):
yes += 1
index = x
self.assertTrue(yes == 1)
exp = [0.2, 0.33999999999999997, 0.45999999999999996, 0.74, 0.16799999999999998, 0.09200000000000001]
for x in range(6):
self.assertTrue(abs(self.fn.factorlist[index].vals[x] - exp[x]) < .01)
def test_sumproductve(self):
input = ["Difficulty", "Grade", "Intelligence", "SAT"]
self.fn.refresh()
self.fn.sumproductve(input)
exp = [.498, .502]
for x in range(2):
self.assertTrue(abs(self.fn.factorlist.vals[x] - exp[x]) < .01)
def test_condprobve(self):
evidence = dict(Grade='C', SAT='highscore')
query = dict(Intelligence='high')
self.fn.refresh()
self.fn.condprobve(query, evidence)
exp = [.422, .578]
for x in range(2):
self.assertTrue(abs(self.fn.factorlist.vals[x] - exp[x]) < .01)
def test_specificquery(self):
evidence = dict(Difficulty='easy')
query = dict(Grade=['A', 'B'])
self.fn.refresh()
answer = self.fn.specificquery(query, evidence)
self.assertTrue(abs(answer - .784) < .01)
def test_gibbssample(self):
evidence = dict(Letter='weak')
gs = self.fn.gibbssample(evidence, 5)
self.assertTrue(gs[0]["Difficulty"] == 'easy' or gs[0]["Difficulty"] == 'hard')
self.assertTrue(len(gs) == 5)
for entry in gs:
self.assertTrue(entry["Letter"] == 'weak')
class TestSampleAggregator(unittest.TestCase):
def setUp(self):
skel = GraphSkeleton()
skel.load("unittestdict.txt")
skel.toporder()
nodedata = NodeData()
nodedata.load("unittestdict.txt")
self.bn = DiscreteBayesianNetwork(skel, nodedata)
agg = SampleAggregator()
agg.aggregate(self.bn.randomsample(50))
self.rseq = agg.seq
self.ravg = agg.avg
self.fn = TableCPDFactorization(self.bn)
evidence = dict(Letter='weak')
agg.aggregate(self.fn.gibbssample(evidence, 51))
self.gseq = agg.seq
self.gavg = agg.avg
def test_rseq(self):
self.assertTrue(len(self.rseq) == 50)
for key in self.ravg.keys():
summ = 0
for entry in self.ravg[key].keys():
summ += self.ravg[key][entry]
self.assertTrue(summ > .99 and summ < 1.01)
def test_gseq(self):
self.assertTrue(len(self.gseq) == 51)
for key in self.gavg.keys():
summ = 0
for entry in self.gavg[key].keys():
summ += self.gavg[key][entry]
self.assertTrue(summ > .99 and summ < 1.01)
class TestHyBayesianNetwork(unittest.TestCase):
def setUp(self):
self.nd = NodeData()
self.nd.load("unittesthdict.txt")
self.nd.entriestoinstances()
self.skel = GraphSkeleton()
self.skel.load("unittestdict.txt")
self.skel.toporder()
self.hybn = HyBayesianNetwork(self.skel, self.nd)
def test_randomsample(self):
sample = self.hybn.randomsample(1)[0]
self.assertTrue(isinstance(sample['Grade'], float))
self.assertTrue(isinstance(sample['Intelligence'], str))
self.assertEqual(sample["SAT"][-12:], 'blueberries!')
class TestDynDiscBayesianNetwork(unittest.TestCase):
def setUp(self):
self.nd = NodeData()
self.nd.load("unittestdyndict.txt")
self.skel = GraphSkeleton()
self.skel.load("unittestdyndict.txt")
self.skel.toporder()
self.d = DynDiscBayesianNetwork(self.skel, self.nd)
def test_randomsample(self):
sample = self.d.randomsample(10)
for i in range(1, 10):
self.assertEqual(sample[0]['Difficulty'], sample[i]['Difficulty'])
class TestPGMLearner(unittest.TestCase):
def setUp(self):
# instantiate learner
self.l = PGMLearner()
# generate graph skeleton
skel = GraphSkeleton()
skel.load("unittestdict.txt")
skel.toporder()
# generate sample sequence to try to learn from - discrete
nd = NodeData()
nd.load("unittestdict.txt")
self.samplediscbn = DiscreteBayesianNetwork(skel, nd)
self.samplediscseq = self.samplediscbn.randomsample(5000)
# generate sample sequence to try to learn from - discrete
nda = NodeData()
nda.load("unittestlgdict.txt")
self.samplelgbn = LGBayesianNetwork(skel, nda)
self.samplelgseq = self.samplelgbn.randomsample(10000)
self.skel = skel
def test_discrete_mle_estimateparams(self):
result = self.l.discrete_mle_estimateparams(self.skel, self.samplediscseq)
indexa = result.Vdata['SAT']['vals'].index('lowscore')
self.assertTrue(result.Vdata['SAT']['cprob']["['low']"][indexa] < 1 and result.Vdata['SAT']['cprob']["['low']"][indexa] > .9)
indexb = result.Vdata['Letter']['vals'].index('weak')
self.assertTrue(result.Vdata['Letter']['cprob']["['A']"][indexb] < .15 and result.Vdata['Letter']['cprob']["['A']"][indexb] > .05)
def test_lg_mle_estimateparams(self):
result = self.l.lg_mle_estimateparams(self.skel, self.samplelgseq)
self.assertTrue(result.Vdata['SAT']['mean_base'] < 15 and result.Vdata['SAT']['mean_base'] > 5)
self.assertTrue(result.Vdata['Letter']['variance'] < 15 and result.Vdata['Letter']['variance'] > 5)
def test_discrete_constraint_estimatestruct(self):
result = self.l.discrete_constraint_estimatestruct(self.samplediscseq)
self.assertTrue(["Difficulty", "Grade"] in result.E)
def test_lg_constraint_estimatestruct(self):
result = self.l.lg_constraint_estimatestruct(self.samplelgseq)
self.assertTrue(["Intelligence", "Grade"] in result.E)
def test_discrete_condind(self):
chi, pv, witness = self.l.discrete_condind(self.samplediscseq, "Difficulty", "Letter", ["Grade"])
self.assertTrue(pv > .05)
self.assertTrue(witness, ["Grade"])
chia, pva, witnessa = self.l.discrete_condind(self.samplediscseq, "Difficulty", "Intelligence", [])
self.assertTrue(pva < .05)
def test_discrete_estimatebn(self):
result = self.l.discrete_estimatebn(self.samplediscseq)
self.assertTrue(result.V)
self.assertTrue(result.E)
self.assertTrue(result.Vdata["Difficulty"]["cprob"][0])
def test_lg_estimatebn(self):
result = self.l.lg_estimatebn(self.samplelgseq)
self.assertTrue(result.V)
self.assertTrue(result.E)
self.assertTrue(result.Vdata["Intelligence"]["mean_base"])
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| 37.275132
| 244
| 0.637615
|
4a129c721e40365989e38ee52dff8c8c46f18593
| 351
|
py
|
Python
|
tlnmf/__init__.py
|
sixin-zh/tlnmf-gcm
|
4ff1d61acfa65eb51e90f56d02d227cb77847558
|
[
"MIT"
] | null | null | null |
tlnmf/__init__.py
|
sixin-zh/tlnmf-gcm
|
4ff1d61acfa65eb51e90f56d02d227cb77847558
|
[
"MIT"
] | null | null | null |
tlnmf/__init__.py
|
sixin-zh/tlnmf-gcm
|
4ff1d61acfa65eb51e90f56d02d227cb77847558
|
[
"MIT"
] | null | null | null |
"""Transform Learning - NMF"""
__version__ = '0.1' # noqa
from .tl_nmf_batch import tl_nmf_batch # gcm model, mle loss, batch samples
from .tl_nmf_gcm_newton import tl_nmf_gcm_newton # gcm model, mle loss in expectation
from .utils import signal_to_frames, unitary_projection, synthesis_windowing # noqa
import numpy as np
np.seterr(all='raise')
| 31.909091
| 85
| 0.777778
|
4a129d20aa5d1ee2b8ba6cbcd4eaf9af4ec7e3f6
| 40,988
|
py
|
Python
|
examples/my_agent/my_agent_1/macro_action_mask.py
|
Hotpotfish/python-sc2
|
31675d62d3241dc84e538df9b77d15132939be85
|
[
"MIT"
] | null | null | null |
examples/my_agent/my_agent_1/macro_action_mask.py
|
Hotpotfish/python-sc2
|
31675d62d3241dc84e538df9b77d15132939be85
|
[
"MIT"
] | null | null | null |
examples/my_agent/my_agent_1/macro_action_mask.py
|
Hotpotfish/python-sc2
|
31675d62d3241dc84e538df9b77d15132939be85
|
[
"MIT"
] | null | null | null |
import random
from examples.my_agent.my_agent_1.action_list import *
from sc2.ids.unit_typeid import UnitTypeId
from sc2.position import Point2
from sc2.units import Units
from sc2.unit import Unit
ATTACK_FREQUENCY = 32
BUILD_FREQUENCY = 32
DETECTION_FREQUENCY = 64
DISTRIBUTE_FREQUENCY = 32
# 动作若无法执行直接输出no-op
# 修建补给站
async def buildSupplydepot_mask(self):
# 是否能承担
if self.state.game_loop % BUILD_FREQUENCY:
if self.supply_cap < 200 and self.supply_left < 10:
if self.can_afford(UnitTypeId.SUPPLYDEPOT):
CCs: Units = self.townhalls()
# 指挥中心是否还在
if CCs:
worker_candidates = self.workers.filter(lambda worker: (worker.is_collecting or worker.is_idle) and worker.tag not in self.unit_tags_received_action)
# 是否有空闲工人
if worker_candidates:
for cc in CCs:
map_center = self.game_info.map_center
position_towards_map_center = cc.position.towards(map_center, distance=-8)
placement_position = await self.find_placement(UnitTypeId.SUPPLYDEPOT, near=position_towards_map_center)
# Placement_position can be None
# 是否有合适的位置
if placement_position:
return 1
return 0
# 修建兵营
async def buildBarracks_mask(self):
if self.state.game_loop % BUILD_FREQUENCY:
# 是否能承担
if self.can_afford(UnitTypeId.BARRACKS) and (len(self.structures(UnitTypeId.BARRACKS)) + len(self.structures(UnitTypeId.BARRACKS)) <= 4 * len(self.townhalls())):
# 科技树依赖
if self.structures(UnitTypeId.SUPPLYDEPOT) or self.structures(UnitTypeId.SUPPLYDEPOTLOWERED):
CCs: Units = self.townhalls()
# 指挥中心是否还在
if CCs:
worker_candidates = self.workers.filter(lambda worker: (worker.is_collecting or worker.is_idle) and worker.tag not in self.unit_tags_received_action)
# 是否有空闲工人
if worker_candidates:
for cc in CCs:
map_center = self.game_info.map_center
position_towards_map_center = cc.position.towards(map_center, distance=8)
placement_position = await self.find_placement(UnitTypeId.BARRACKS, near=position_towards_map_center)
# Placement_position can be None
# 是否有合适的位置
if placement_position:
return 1
return 0
async def buildBarracksReactor_mask(self):
if self.state.game_loop % BUILD_FREQUENCY:
for Barracks in self.structures(UnitTypeId.BARRACKS).ready:
if not Barracks.has_add_on and self.can_afford(UnitTypeId.BARRACKSREACTOR):
addon_points = points_to_build_addon(Barracks.position)
if all(
self.in_map_bounds(addon_point)
and self.in_placement_grid(addon_point)
and self.in_pathing_grid(addon_point)
for addon_point in addon_points
):
return 1
return 0
async def buildBarracksTechlab_mask(self):
if self.state.game_loop % BUILD_FREQUENCY:
for Barracks in self.structures(UnitTypeId.BARRACKS).ready:
if not Barracks.has_add_on and self.can_afford(UnitTypeId.BARRACKSTECHLAB):
addon_points = points_to_build_addon(Barracks.position)
if all(
self.in_map_bounds(addon_point)
and self.in_placement_grid(addon_point)
and self.in_pathing_grid(addon_point)
for addon_point in addon_points
):
return 1
return 0
async def liftBarracks_mask(self):
if self.structures(UnitTypeId.BARRACKS):
if self.structures(UnitTypeId.BARRACKS).idle:
return 1
return 0
async def landAndReadyToBuildBarracksAddOn_mask(self):
if self.structures(UnitTypeId.BARRACKSFLYING):
if self.structures(UnitTypeId.BARRACKSFLYING).idle:
if self.can_afford(UnitTypeId.BARRACKSREACTOR) and self.can_afford(UnitTypeId.BARRACKSTECHLAB):
for Barracks in self.structures(UnitTypeId.BARRACKSFLYING).idle:
possible_land_positions_offset = sorted(
(Point2((x, y)) for x in range(-10, 10) for y in range(-10, 10)),
key=lambda point: point.x ** 2 + point.y ** 2,
)
offset_point: Point2 = Point2((-0.5, -0.5))
possible_land_positions = (Barracks.position.rounded + offset_point + p for p in possible_land_positions_offset)
for target_land_position in possible_land_positions:
land_and_addon_points: List[Point2] = land_positions(target_land_position)
if all(
self.in_map_bounds(land_pos) and self.in_placement_grid(land_pos) and self.in_pathing_grid(land_pos)
for land_pos in land_and_addon_points
):
Barracks(AbilityId.LAND, target_land_position)
return 1
return 0
async def buildEngineeringbay_mask(self):
if self.state.game_loop % BUILD_FREQUENCY:
# 是否能承担
if self.can_afford(UnitTypeId.ENGINEERINGBAY) and len(self.structures(UnitTypeId.ENGINEERINGBAY)) < 2:
# 科技树依赖
if self.structures(UnitTypeId.SUPPLYDEPOT) or self.structures(UnitTypeId.SUPPLYDEPOTLOWERED):
CCs: Units = self.townhalls()
# 指挥中心是否还在
if CCs:
worker_candidates = self.workers.filter(lambda worker: (worker.is_collecting or worker.is_idle) and worker.tag not in self.unit_tags_received_action)
# 是否有空闲工人
if worker_candidates:
for cc in CCs:
map_center = self.game_info.map_center
position_towards_map_center = cc.position.towards(map_center, distance=-8)
placement_position = await self.find_placement(UnitTypeId.ENGINEERINGBAY, near=position_towards_map_center)
# Placement_position can be None
# 是否有合适的位置
if placement_position:
return 1
return 0
async def buildRefinery_mask(self):
# 是否能承担
if self.state.game_loop % BUILD_FREQUENCY:
if self.can_afford(UnitTypeId.REFINERY):
CCs: Units = self.townhalls()
# 指挥中心是否还在
if CCs:
worker_candidates = self.workers.filter(lambda worker: (worker.is_collecting or worker.is_idle) and worker.tag not in self.unit_tags_received_action)
# 是否有空闲工人
if worker_candidates:
for cc in CCs:
vgs = self.vespene_geyser.closer_than(10, cc)
for vg in vgs:
if self.gas_buildings.filter(lambda unit: unit.distance_to(vg) < 1):
continue
# 是否有合适的位置
return 1
return 0
# 修建重工厂
async def buildFactory_mask(self):
if self.state.game_loop % BUILD_FREQUENCY:
# 是否能承担
if self.can_afford(UnitTypeId.FACTORY) and (len(self.structures(UnitTypeId.FACTORY)) + len(self.structures(UnitTypeId.FACTORY)) <= 1):
# 科技树依赖
if self.structures(UnitTypeId.SUPPLYDEPOT) or self.structures(UnitTypeId.BARRACKS):
CCs: Units = self.townhalls()
# 指挥中心是否还在
if CCs:
worker_candidates = self.workers.filter(lambda worker: (worker.is_collecting or worker.is_idle) and worker.tag not in self.unit_tags_received_action)
# 是否有空闲工人
if worker_candidates:
for cc in CCs:
map_center = self.game_info.map_center
position_towards_map_center = cc.position.towards(map_center, distance=8)
placement_position = await self.find_placement(UnitTypeId.FACTORY, near=position_towards_map_center)
# Placement_position can be None
# 是否有合适的位置
if placement_position:
return 1
return 0
async def buildFactoryReactor_mask(self):
if self.state.game_loop % BUILD_FREQUENCY:
for factory in self.structures(UnitTypeId.FACTORY).ready:
if not factory.has_add_on and self.can_afford(UnitTypeId.FACTORYREACTOR):
addon_points = points_to_build_addon(factory.position)
if all(
self.in_map_bounds(addon_point)
and self.in_placement_grid(addon_point)
and self.in_pathing_grid(addon_point)
for addon_point in addon_points
):
return 1
return 0
async def buildFactoryTechlab_mask(self):
if self.state.game_loop % BUILD_FREQUENCY:
for Factory in self.structures(UnitTypeId.FACTORY).ready:
if not Factory.has_add_on and self.can_afford(UnitTypeId.FACTORYTECHLAB):
addon_points = points_to_build_addon(Factory.position)
if all(
self.in_map_bounds(addon_point)
and self.in_placement_grid(addon_point)
and self.in_pathing_grid(addon_point)
for addon_point in addon_points
):
return 1
return 0
async def liftFactory_mask(self):
if self.structures(UnitTypeId.FACTORY):
if self.structures(UnitTypeId.FACTORY).idle:
return 1
return 0
async def landAndReadyToBuildFactoryAddOn_mask(self):
if self.structures(UnitTypeId.FACTORYFLYING):
if self.structures(UnitTypeId.FACTORYFLYING).idle:
if self.can_afford(UnitTypeId.FACTORYREACTOR) and self.can_afford(UnitTypeId.FACTORYTECHLAB):
for Factory in self.structures(UnitTypeId.FACTORYFLYING).idle:
possible_land_positions_offset = sorted(
(Point2((x, y)) for x in range(-10, 10) for y in range(-10, 10)),
key=lambda point: point.x ** 2 + point.y ** 2,
)
offset_point: Point2 = Point2((-0.5, -0.5))
possible_land_positions = (Factory.position.rounded + offset_point + p for p in possible_land_positions_offset)
for target_land_position in possible_land_positions:
land_and_addon_points: List[Point2] = land_positions(target_land_position)
if all(
self.in_map_bounds(land_pos) and self.in_placement_grid(land_pos) and self.in_pathing_grid(land_pos)
for land_pos in land_and_addon_points
):
Factory(AbilityId.LAND, target_land_position)
return 1
return 0
async def buildGhostAcademy_mask(self):
if self.state.game_loop % BUILD_FREQUENCY:
# 是否能承担
if self.can_afford(UnitTypeId.GHOSTACADEMY) and not self.structures(UnitTypeId.GHOSTACADEMY) and not self.already_pending(UnitTypeId.GHOSTACADEMY):
# 科技树依赖
if (self.structures(UnitTypeId.SUPPLYDEPOT) or self.structures(UnitTypeId.SUPPLYDEPOTLOWERED)) and \
self.structures(UnitTypeId.BARRACKS or self.structures(UnitTypeId.BARRACKSFLYING)):
CCs: Units = self.townhalls()
# 指挥中心是否还在
if CCs:
worker_candidates = self.workers.filter(lambda worker: (worker.is_collecting or worker.is_idle) and worker.tag not in self.unit_tags_received_action)
# 是否有空闲工人
if worker_candidates:
for cc in CCs:
map_center = self.game_info.map_center
position_towards_map_center = cc.position.towards(map_center, distance=8)
placement_position = await self.find_placement(UnitTypeId.GHOSTACADEMY, near=position_towards_map_center)
# Placement_position can be None
# 是否有合适的位置
if placement_position:
return 1
return 0
async def buildMissileturret_mask(self):
if self.state.game_loop % BUILD_FREQUENCY:
# 是否能承担
if self.can_afford(UnitTypeId.MISSILETURRET) and (len(self.structures(UnitTypeId.MISSILETURRET)) + len(self.structures(UnitTypeId.MISSILETURRET)) <= 1):
# 科技树依赖
if self.structures(UnitTypeId.ENGINEERINGBAY):
CCs: Units = self.townhalls()
# 指挥中心是否还在
if CCs:
worker_candidates = self.workers.filter(lambda worker: (worker.is_collecting or worker.is_idle) and worker.tag not in self.unit_tags_received_action)
# 是否有空闲工人
if worker_candidates:
for cc in CCs:
map_center = self.game_info.map_center
position_towards_map_center = cc.position.towards(map_center, distance=10)
placement_position = await self.find_placement(UnitTypeId.MISSILETURRET, near=position_towards_map_center)
# Placement_position can be None
# 是否有合适的位置
if placement_position:
return 1
return 0
async def buildSensortower_mask(self):
if self.state.game_loop % BUILD_FREQUENCY:
# 是否能承担
if self.can_afford(UnitTypeId.SENSORTOWER) and (len(self.structures(UnitTypeId.SENSORTOWER)) + len(self.structures(UnitTypeId.MISSILETURRET)) <= 1):
# 科技树依赖
if self.structures(UnitTypeId.ENGINEERINGBAY):
CCs: Units = self.townhalls()
# 指挥中心是否还在
if CCs:
worker_candidates = self.workers.filter(lambda worker: (worker.is_collecting or worker.is_idle) and worker.tag not in self.unit_tags_received_action)
# 是否有空闲工人
if worker_candidates:
for cc in CCs:
map_center = self.game_info.map_center
position_towards_map_center = cc.position.towards(map_center, distance=10)
placement_position = await self.find_placement(UnitTypeId.SENSORTOWER, near=position_towards_map_center)
# Placement_position can be None
# 是否有合适的位置
if placement_position:
return 1
return 0
async def buildBunker_mask(self):
if self.state.game_loop % BUILD_FREQUENCY:
# 是否能承担
if self.can_afford(UnitTypeId.BUNKER) and (len(self.structures(UnitTypeId.BUNKER)) + len(self.structures(UnitTypeId.BUNKER)) <= 1):
# 科技树依赖
if self.structures(UnitTypeId.BARRACKS):
CCs: Units = self.townhalls()
# 指挥中心是否还在
if CCs:
worker_candidates = self.workers.filter(lambda worker: (worker.is_collecting or worker.is_idle) and worker.tag not in self.unit_tags_received_action)
# 是否有空闲工人
if worker_candidates:
for cc in CCs:
map_center = self.game_info.map_center
position_towards_map_center = cc.position.towards(map_center, distance=12)
placement_position = await self.find_placement(UnitTypeId.BUNKER, near=position_towards_map_center)
# Placement_position can be None
# 是否有合适的位置
if placement_position:
return 1
return 0
async def buildArmory_mask(self):
if self.state.game_loop % BUILD_FREQUENCY:
# 是否能承担
if self.can_afford(UnitTypeId.ARMORY) and (len(self.structures(UnitTypeId.ARMORY)) + len(self.structures(UnitTypeId.ARMORY)) <= 2):
# 科技树依赖
if (self.structures(UnitTypeId.BARRACKS) or self.structures(UnitTypeId.BARRACKSFLYING)) and \
(self.structures(UnitTypeId.FACTORY) or self.structures(UnitTypeId.FACTORYFLYING)):
CCs: Units = self.townhalls()
# 指挥中心是否还在
if CCs:
worker_candidates = self.workers.filter(lambda worker: (worker.is_collecting or worker.is_idle) and worker.tag not in self.unit_tags_received_action)
# 是否有空闲工人
if worker_candidates:
for cc in CCs:
map_center = self.game_info.map_center
position_towards_map_center = cc.position.towards(map_center, distance=9)
placement_position = await self.find_placement(UnitTypeId.ARMORY, near=position_towards_map_center)
# Placement_position can be None
# 是否有合适的位置
if placement_position:
return 1
return 0
async def buildFusioncore_mask(self):
if self.state.game_loop % BUILD_FREQUENCY:
# 是否能承担
if self.can_afford(UnitTypeId.FUSIONCORE) and (len(self.structures(UnitTypeId.FUSIONCORE)) + len(self.structures(UnitTypeId.FUSIONCORE)) <= 1):
# 科技树依赖
if (self.structures(UnitTypeId.SUPPLYDEPOT) or self.structures(UnitTypeId.SUPPLYDEPOTLOWERED)) and \
(self.structures(UnitTypeId.BARRACKS) or self.structures(UnitTypeId.BARRACKSFLYING)) and \
(self.structures(UnitTypeId.FACTORY) or self.structures(UnitTypeId.FACTORYFLYING)) and \
(self.structures(UnitTypeId.STARPORT) or self.structures(UnitTypeId.STARPORTFLYING)):
CCs: Units = self.townhalls()
# 指挥中心是否还在
if CCs:
worker_candidates = self.workers.filter(lambda worker: (worker.is_collecting or worker.is_idle) and worker.tag not in self.unit_tags_received_action)
# 是否有空闲工人
if worker_candidates:
for cc in CCs:
map_center = self.game_info.map_center
position_towards_map_center = cc.position.towards(map_center, distance=9)
placement_position = await self.find_placement(UnitTypeId.FUSIONCORE, near=position_towards_map_center)
# Placement_position can be None
# 是否有合适的位置
if placement_position:
return 1
return 0
async def buildStarport_mask(self):
if self.state.game_loop % BUILD_FREQUENCY:
# 是否能承担
if self.can_afford(UnitTypeId.STARPORT) and (len(self.structures(UnitTypeId.STARPORT)) + len(self.structures(UnitTypeId.STARPORT)) <= 2):
# 科技树依赖
if (self.structures(UnitTypeId.SUPPLYDEPOT) or self.structures(UnitTypeId.SUPPLYDEPOTLOWERED)) and \
(self.structures(UnitTypeId.BARRACKS) or self.structures(UnitTypeId.BARRACKSFLYING)) and \
(self.structures(UnitTypeId.FACTORY) or self.structures(UnitTypeId.FACTORYFLYING)):
CCs: Units = self.townhalls()
# 指挥中心是否还在
if CCs:
worker_candidates = self.workers.filter(lambda worker: (worker.is_collecting or worker.is_idle) and worker.tag not in self.unit_tags_received_action)
# 是否有空闲工人
if worker_candidates:
for cc in CCs:
map_center = self.game_info.map_center
position_towards_map_center = cc.position.towards(map_center, distance=8)
placement_position = await self.find_placement(UnitTypeId.STARPORT, near=position_towards_map_center)
# Placement_position can be None
# 是否有合适的位置
if placement_position:
return 1
return 0
async def buildStarportReactor_mask(self):
if self.state.game_loop % BUILD_FREQUENCY:
for Starport in self.structures(UnitTypeId.STARPORT).ready:
if not Starport.has_add_on and self.can_afford(UnitTypeId.STARPORTREACTOR):
addon_points = points_to_build_addon(Starport.position)
if all(
self.in_map_bounds(addon_point)
and self.in_placement_grid(addon_point)
and self.in_pathing_grid(addon_point)
for addon_point in addon_points
):
return 1
return 0
async def buildStarportTechlab_mask(self):
if self.state.game_loop % BUILD_FREQUENCY:
for Starport in self.structures(UnitTypeId.STARPORT).ready:
if not Starport.has_add_on and self.can_afford(UnitTypeId.STARPORTTECHLAB):
addon_points = points_to_build_addon(Starport.position)
if all(
self.in_map_bounds(addon_point)
and self.in_placement_grid(addon_point)
and self.in_pathing_grid(addon_point)
for addon_point in addon_points
):
return 1
return 0
async def liftStarport_mask(self):
if self.structures(UnitTypeId.STARPORT):
if self.structures(UnitTypeId.STARPORT).idle:
return 1
return 0
async def landAndReadyToBuildStarportAddOn_mask(self):
if self.structures(UnitTypeId.STARPORTFLYING):
if self.structures(UnitTypeId.STARPORTFLYING).idle:
if self.can_afford(UnitTypeId.STARPORTREACTOR) and self.can_afford(UnitTypeId.STARPORTTECHLAB):
for Starport in self.structures(UnitTypeId.STARPORTFLYING).idle:
possible_land_positions_offset = sorted(
(Point2((x, y)) for x in range(-10, 10) for y in range(-10, 10)),
key=lambda point: point.x ** 2 + point.y ** 2,
)
offset_point: Point2 = Point2((-0.5, -0.5))
possible_land_positions = (Starport.position.rounded + offset_point + p for p in possible_land_positions_offset)
for target_land_position in possible_land_positions:
land_and_addon_points: List[Point2] = land_positions(target_land_position)
if all(
self.in_map_bounds(land_pos) and self.in_placement_grid(land_pos) and self.in_pathing_grid(land_pos)
for land_pos in land_and_addon_points
):
Starport(AbilityId.LAND, target_land_position)
return 1
return 0
async def expand_mask(self):
if self.can_afford(UnitTypeId.COMMANDCENTER) and self.expansion_locations_list:
return 1
return 0
async def trainScv_mask(self):
if self.can_afford(UnitTypeId.SCV):
if self.supply_left >= 1 and len(self.units(UnitTypeId.SCV)) < 70:
CCs: Units = self.townhalls()
if CCs and len(self.units(UnitTypeId.SCV)) <= (22 * len(CCs)):
for cc in CCs:
if cc.is_idle:
# cc.train(UnitTypeId.SCV)
return 1
return 0
# 训练枪兵(至少一个)
async def trainMarine_mask(self):
if self.structures(UnitTypeId.BARRACKS):
if self.structures(UnitTypeId.BARRACKS).ready:
if self.can_afford(UnitTypeId.MARINE):
if self.supply_left >= 1:
return 1
return 0
async def trainMarauder_mask(self):
if self.structures(UnitTypeId.BARRACKS):
barracks_ready = self.structures(UnitTypeId.BARRACKS).ready
barracks_techlab_ready = barracks_ready.filter(lambda unit: unit.has_techlab == True)
if barracks_techlab_ready:
if self.can_afford(UnitTypeId.MARAUDER):
if self.supply_left >= 2:
return 1
return 0
async def trainGhost_mask(self):
if self.structures(UnitTypeId.BARRACKS) and self.structures(UnitTypeId.GHOSTACADEMY):
barracks_ready = self.structures(UnitTypeId.BARRACKS).ready
barracks_techlab_ready = barracks_ready.filter(lambda unit: unit.has_techlab == True)
if barracks_techlab_ready:
if self.can_afford(UnitTypeId.MARAUDER):
if self.supply_left >= 2:
return 1
return 0
# 训练暴风(至少一个)
async def trainHellion_mask(self):
if self.structures(UnitTypeId.FACTORY):
if self.structures(UnitTypeId.FACTORY).ready:
if self.can_afford(UnitTypeId.HELLION):
if self.supply_left >= 2:
# factory.train(UnitTypeId.HELLION)
return 1
return 0
async def trainViking_mask(self):
if self.structures(UnitTypeId.STARPORT):
starport_ready = self.structures(UnitTypeId.STARPORT).ready
starport_techlab_ready = starport_ready.filter(lambda unit: unit.has_techlab == True)
if starport_techlab_ready:
if self.can_afford(UnitTypeId.VIKINGFIGHTER):
if self.supply_left >= 2:
return 1
return 0
async def trainThor_mask(self):
if self.structures(UnitTypeId.FACTORY) and self.structures(UnitTypeId.ARMORY):
factory_ready = self.structures(UnitTypeId.FACTORY).ready
factory_techlab_ready = factory_ready.filter(lambda unit: unit.has_techlab == True)
if factory_techlab_ready:
if self.can_afford(UnitTypeId.THOR):
if self.supply_left >= 2:
return 1
return 0
async def trainRaven_mask(self):
if self.structures(UnitTypeId.STARPORT):
starport_ready = self.structures(UnitTypeId.STARPORT).ready
starport_techlab_ready = starport_ready.filter(lambda unit: unit.has_techlab == True)
if starport_techlab_ready:
if self.can_afford(UnitTypeId.RAVEN):
if self.supply_left >= 2:
return 1
return 0
async def trainMedivac_mask(self):
if self.structures(UnitTypeId.STARPORT):
starport_ready = self.structures(UnitTypeId.STARPORT).ready
starport_techlab_ready = starport_ready.filter(lambda unit: unit.has_techlab == True)
if starport_techlab_ready:
if self.can_afford(UnitTypeId.MEDIVAC):
if self.supply_left >= 2:
return 1
return 0
async def trainWidowmine_mask(self):
if self.structures(UnitTypeId.FACTORY):
if self.structures(UnitTypeId.FACTORY).ready:
if self.can_afford(UnitTypeId.WIDOWMINE):
if self.supply_left >= 2:
return 1
return 0
async def trainBanshee_mask(self):
if self.structures(UnitTypeId.STARPORT):
starport_ready = self.structures(UnitTypeId.STARPORT).ready
starport_techlab_ready = starport_ready.filter(lambda unit: unit.has_techlab == True)
if starport_techlab_ready:
if self.can_afford(UnitTypeId.BANSHEE):
if self.supply_left >= 3:
return 1
return 0
async def trainLiberator_mask(self):
if self.structures(UnitTypeId.STARPORT):
starport_ready = self.structures(UnitTypeId.LIBERATOR).ready
starport_techlab_ready = starport_ready.filter(lambda unit: unit.has_techlab == True)
if starport_techlab_ready:
if self.can_afford(UnitTypeId.LIBERATOR):
if self.supply_left >= 3:
return 1
return 0
async def trainCyclone_mask(self):
if self.structures(UnitTypeId.FACTORY):
factory_ready = self.structures(UnitTypeId.FACTORY).ready
factory_techlab_ready = factory_ready.filter(lambda unit: unit.has_techlab == True)
if factory_techlab_ready:
if self.can_afford(UnitTypeId.CYCLONE):
if self.supply_left >= 3:
return 1
return 0
async def trainSiegetank_mask(self):
if self.structures(UnitTypeId.FACTORY):
factory_ready = self.structures(UnitTypeId.FACTORY).ready
factory_techlab_ready = factory_ready.filter(lambda unit: unit.has_techlab == True)
if factory_techlab_ready:
if self.can_afford(UnitTypeId.SIEGETANK):
if self.supply_left >= 3:
return 1
return 0
async def trainBattlecruiser_mask(self):
if self.structures(UnitTypeId.STARPORT) and self.structures(UnitTypeId.FUSIONCORE):
starport_ready = self.structures(UnitTypeId.STARPORT).ready
starport_techlab_ready = starport_ready.filter(lambda unit: unit.has_techlab == True)
if starport_techlab_ready:
if self.can_afford(UnitTypeId.BATTLECRUISER):
if self.supply_left >= 6:
return 1
return 0
async def upgradeCombatShield_mask(self):
if self.structures(UnitTypeId.BARRACKSTECHLAB).idle.ready:
for barrackstechlab in self.structures(UnitTypeId.BARRACKSTECHLAB).idle.ready:
if await self.can_cast(barrackstechlab, AbilityId.RESEARCH_COMBATSHIELD) and self.research_combatshield == 0:
return 1
return 0
async def upgradeConcussiveshells_mask(self):
if self.structures(UnitTypeId.BARRACKSTECHLAB).idle.ready:
for barrackstechlab in self.structures(UnitTypeId.BARRACKSTECHLAB).idle.ready:
if await self.can_cast(barrackstechlab, AbilityId.RESEARCH_CONCUSSIVESHELLS) and self.eConcussiveshells == 0:
return 1
return 0
async def upgradeInfantryWeaponsLevel1_mask(self):
if self.can_afford(AbilityId.ENGINEERINGBAYRESEARCH_TERRANINFANTRYWEAPONSLEVEL1) and self.already_pending_upgrade(UpgradeId.TERRANINFANTRYWEAPONSLEVEL1) == 0:
if self.structures(UnitTypeId.ENGINEERINGBAY).idle.ready:
return 1
return 0
async def upgradeInfantryArmorLevel1_mask(self):
if self.can_afford(AbilityId.ENGINEERINGBAYRESEARCH_TERRANINFANTRYARMORLEVEL1) and self.already_pending_upgrade(UpgradeId.TERRANINFANTRYARMORSLEVEL1) == 0:
if self.structures(UnitTypeId.ENGINEERINGBAY).idle.ready:
return 1
return 0
async def upgradeInfantryWeaponsLevel2_mask(self):
if self.can_afford(AbilityId.ENGINEERINGBAYRESEARCH_TERRANINFANTRYWEAPONSLEVEL2) and self.already_pending_upgrade(UpgradeId.TERRANINFANTRYWEAPONSLEVEL2) == 0 and self.structures(
UnitTypeId.ARMORY):
if self.structures(UnitTypeId.ENGINEERINGBAY).idle.ready:
return 1
return 0
async def upgradeInfantryArmorLevel2_mask(self):
if self.can_afford(AbilityId.ENGINEERINGBAYRESEARCH_TERRANINFANTRYARMORLEVEL2) and self.already_pending_upgrade(UpgradeId.TERRANINFANTRYARMORSLEVEL2) == 0 and self.structures(UnitTypeId.ARMORY):
if self.structures(UnitTypeId.ENGINEERINGBAY).idle.ready:
return 1
return 0
async def upgradeInfantryWeaponsLevel3_mask(self):
if self.can_afford(AbilityId.ENGINEERINGBAYRESEARCH_TERRANINFANTRYWEAPONSLEVEL3) and self.already_pending_upgrade(UpgradeId.TERRANINFANTRYWEAPONSLEVEL3) == 0 and self.structures(
UnitTypeId.ARMORY):
if self.structures(UnitTypeId.ENGINEERINGBAY).idle.ready:
return 1
return 0
async def upgradeInfantryArmorLevel3_mask(self):
if self.can_afford(AbilityId.ENGINEERINGBAYRESEARCH_TERRANINFANTRYARMORLEVEL3) and self.already_pending_upgrade(UpgradeId.TERRANINFANTRYARMORSLEVEL3) == 0 and self.structures(UnitTypeId.ARMORY):
if self.structures(UnitTypeId.ENGINEERINGBAY).idle.ready:
return 1
return 0
async def scvBackToWork_mask(self):
if self.state.game_loop % DISTRIBUTE_FREQUENCY == 0:
if self.workers.idle:
return 1
return 0
async def detectionAndAttack_mask(self):
if self.state.game_loop % DETECTION_FREQUENCY == 0:
if self.mineral_field:
if not self.enemy_structures:
if not self.enemy_units:
if self.supply_army > 0:
return 1
return 0
async def massNearEnemyBase_mask(self):
if self.state.game_loop % ATTACK_FREQUENCY == 0:
if self.enemy_structures:
if self.supply_army > 30:
return 1
return 0
async def massNearBase_mask(self):
if self.state.game_loop % ATTACK_FREQUENCY == 0:
if self.townhalls():
if self.supply_army > 30:
return 1
return 0
async def retreat_mask(self):
if self.townhalls():
if self.supply_army < 10:
return 1
return 0
async def defence_mask(self):
if self.supply_army > 0:
if self.structures:
if self.enemy_units:
enemy_units = next((unit for unit in self.enemy_units), None)
if self.structures.closest_distance_to(enemy_units) < 10:
return 1
return 0
async def attackEnemySquad_mask(self):
if self.state.game_loop % ATTACK_FREQUENCY == 0:
if self.supply_army > 30:
if self.enemy_units:
return 1
return 0
async def attackNearestBase_mask(self):
if self.state.game_loop % ATTACK_FREQUENCY == 0:
if self.enemy_structures:
if self.structures:
if self.supply_army > 30:
return 1
return 0
async def getMask(self):
mask = []
a_length = len(economic_action)
for i in range(a_length):
if economic_action[i] == doNothing:
mask.append(1)
if economic_action[i] == buildSupplydepot:
mask.append(await buildSupplydepot_mask(self))
if economic_action[i] == buildBarracksReactor:
mask.append(await buildBarracksReactor_mask(self))
if economic_action[i] == buildBarracksTechlab:
mask.append(await buildBarracksTechlab_mask(self))
if economic_action[i] == buildBarracks:
mask.append(await buildBarracks_mask(self))
if economic_action[i] == liftBarracks:
mask.append(await liftBarracks_mask(self))
if economic_action[i] == landAndReadyToBuildBarracksAddOn:
mask.append(await landAndReadyToBuildBarracksAddOn_mask(self))
if economic_action[i] == buildEngineeringbay:
mask.append(await buildEngineeringbay_mask(self))
if economic_action[i] == buildRefinery:
mask.append(await buildRefinery_mask(self))
if economic_action[i] == buildFactoryReactor:
mask.append(await buildFactoryReactor_mask(self))
if economic_action[i] == buildFactoryTechlab:
mask.append(await buildFactoryTechlab_mask(self))
if economic_action[i] == buildFactory:
mask.append(await buildFactory_mask(self))
if economic_action[i] == liftFactory:
mask.append(await liftFactory_mask(self))
if economic_action[i] == landAndReadyToBuildFactoryAddOn:
mask.append(await landAndReadyToBuildFactoryAddOn_mask(self))
if economic_action[i] == buildGhostAcademy:
mask.append(await buildGhostAcademy_mask(self))
if economic_action[i] == buildMissileturret:
mask.append(await buildMissileturret_mask(self))
if economic_action[i] == buildSensortower:
mask.append(await buildSensortower_mask(self))
if economic_action[i] == buildBunker:
mask.append(await buildBunker_mask(self))
if economic_action[i] == buildArmory:
mask.append(await buildArmory_mask(self))
if economic_action[i] == buildFusioncore:
mask.append(await buildFusioncore_mask(self))
if economic_action[i] == buildStarport:
mask.append(await buildStarport_mask(self))
if economic_action[i] == buildStarportReactor:
mask.append(await buildStarportReactor_mask(self))
if economic_action[i] == buildStarportTechlab:
mask.append(await buildStarportTechlab_mask(self))
if economic_action[i] == liftStarport:
mask.append(await liftStarport_mask(self))
if economic_action[i] == landAndReadyToBuildStarportAddOn:
mask.append(await landAndReadyToBuildStarportAddOn_mask(self))
if economic_action[i] == expand:
mask.append(await expand_mask(self))
if economic_action[i] == trainScv:
mask.append(await trainScv_mask(self))
if economic_action[i] == trainMarine:
mask.append(await trainMarine_mask(self))
if economic_action[i] == trainHellion:
mask.append(await trainHellion_mask(self))
if economic_action[i] == trainMarauder:
mask.append(await trainMarauder_mask(self))
if economic_action[i] == trainGhost:
mask.append(await trainGhost_mask(self))
if economic_action[i] == trainViking:
mask.append(await trainViking_mask(self))
if economic_action[i] == trainBanshee:
mask.append(await trainBanshee_mask(self))
if economic_action[i] == trainThor:
mask.append(await trainThor_mask(self))
if economic_action[i] == trainRaven:
mask.append(await trainRaven_mask(self))
if economic_action[i] == trainMedivac:
mask.append(await trainMedivac_mask(self))
if economic_action[i] == trainWidowmine:
mask.append(await trainWidowmine_mask(self))
if economic_action[i] == trainCyclone:
mask.append(await trainCyclone_mask(self))
if economic_action[i] == trainSiegetank:
mask.append(await trainSiegetank_mask(self))
if economic_action[i] == trainBattlecruiser:
mask.append(await trainBattlecruiser_mask(self))
if economic_action[i] == trainLiberator:
mask.append(await trainLiberator_mask(self))
if economic_action[i] == upgradeCombatShield:
mask.append(await upgradeCombatShield_mask(self))
if economic_action[i] == upgradeConcussiveshells:
mask.append(await upgradeConcussiveshells_mask(self))
if economic_action[i] == upgradeInfantryWeaponsLevel1:
mask.append(await upgradeInfantryWeaponsLevel1_mask(self))
if economic_action[i] == upgradeInfantryArmorLevel1:
mask.append(await upgradeInfantryArmorLevel1_mask(self))
if economic_action[i] == upgradeInfantryWeaponsLevel2:
mask.append(await upgradeInfantryWeaponsLevel2_mask(self))
if economic_action[i] == upgradeInfantryArmorLevel2:
mask.append(await upgradeInfantryArmorLevel2_mask(self))
if economic_action[i] == upgradeInfantryWeaponsLevel3:
mask.append(await upgradeInfantryWeaponsLevel3_mask(self))
if economic_action[i] == upgradeInfantryArmorLevel3:
mask.append(await upgradeInfantryArmorLevel3_mask(self))
if economic_action[i] == scvBackToWork:
mask.append(await scvBackToWork_mask(self))
if economic_action[i] == detectionAndAttack:
mask.append(await detectionAndAttack_mask(self))
if economic_action[i] == massNearEnemyBase:
mask.append(await massNearEnemyBase_mask(self))
if economic_action[i] == massNearBase:
mask.append(await massNearBase_mask(self))
if economic_action[i] == retreat:
mask.append(await retreat_mask(self))
if economic_action[i] == defence:
mask.append(await defence_mask(self))
if economic_action[i] == attackEnemySquad:
mask.append(await attackEnemySquad_mask(self))
if economic_action[i] == attackNearestBase:
mask.append(await attackNearestBase_mask(self))
return mask
| 45.847875
| 198
| 0.617937
|
4a129e2ad433ea94283c1a44d95eb1df80aabe60
| 14,309
|
py
|
Python
|
python/GafferOSLTest/OSLImageTest.py
|
mattigruener/gaffer
|
8216ba1a884712575a0acae747c51b02f7a99a5d
|
[
"BSD-3-Clause"
] | 1
|
2019-08-02T16:49:59.000Z
|
2019-08-02T16:49:59.000Z
|
python/GafferOSLTest/OSLImageTest.py
|
rkoschmitzky/gaffer
|
ec6262ae1292767bdeb9520d1447d65a4a511884
|
[
"BSD-3-Clause"
] | 2
|
2017-08-23T21:35:45.000Z
|
2018-01-29T08:59:33.000Z
|
python/GafferOSLTest/OSLImageTest.py
|
rkoschmitzky/gaffer
|
ec6262ae1292767bdeb9520d1447d65a4a511884
|
[
"BSD-3-Clause"
] | null | null | null |
##########################################################################
#
# Copyright (c) 2013-2015, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import imath
import IECore
import Gaffer
import GafferTest
import GafferImage
import GafferScene
import GafferOSL
import GafferOSLTest
class OSLImageTest( GafferOSLTest.OSLTestCase ) :
def test( self ) :
getRed = GafferOSL.OSLShader()
getRed.loadShader( "ImageProcessing/InChannel" )
getRed["parameters"]["channelName"].setValue( "R" )
getGreen = GafferOSL.OSLShader()
getGreen.loadShader( "ImageProcessing/InChannel" )
getGreen["parameters"]["channelName"].setValue( "G" )
getBlue = GafferOSL.OSLShader()
getBlue.loadShader( "ImageProcessing/InChannel" )
getBlue["parameters"]["channelName"].setValue( "B" )
buildColor = GafferOSL.OSLShader()
buildColor.loadShader( "Utility/BuildColor" )
buildColor["parameters"]["r"].setInput( getBlue["out"]["channelValue"] )
buildColor["parameters"]["g"].setInput( getGreen["out"]["channelValue"] )
buildColor["parameters"]["b"].setInput( getRed["out"]["channelValue"] )
outRGB = GafferOSL.OSLShader()
outRGB.loadShader( "ImageProcessing/OutLayer" )
outRGB["parameters"]["layerColor"].setInput( buildColor["out"]["c"] )
imageShader = GafferOSL.OSLShader()
imageShader.loadShader( "ImageProcessing/OutImage" )
imageShader["parameters"]["in0"].setInput( outRGB["out"]["layer"] )
reader = GafferImage.ImageReader()
reader["fileName"].setValue( os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/rgb.100x100.exr" ) )
image = GafferOSL.OSLImage()
image["in"].setInput( reader["out"] )
# we haven't connected the shader yet, so the node should act as a pass through
self.assertEqual( image["out"].image(), reader["out"].image() )
self.assertEqual( image["out"].imageHash(), reader["out"].imageHash() )
# that should all change when we hook up a shader
cs = GafferTest.CapturingSlot( image.plugDirtiedSignal() )
image["shader"].setInput( imageShader["out"] )
self.assertEqual( len( cs ), 5 )
self.assertTrue( cs[0][0].isSame( image["shader"] ) )
self.assertTrue( cs[1][0].isSame( image["__shading"] ) )
self.assertTrue( cs[2][0].isSame( image["out"]["channelNames"] ) )
self.assertTrue( cs[3][0].isSame( image["out"]["channelData"] ) )
self.assertTrue( cs[4][0].isSame( image["out"] ) )
inputImage = reader["out"].image()
outputImage = image["out"].image()
self.assertNotEqual( inputImage, outputImage )
self.assertEqual( outputImage["R"], inputImage["B"] )
self.assertEqual( outputImage["G"], inputImage["G"] )
self.assertEqual( outputImage["B"], inputImage["R"] )
# changes in the shader network should signal more dirtiness
del cs[:]
getGreen["parameters"]["channelName"].setValue( "R" )
self.assertEqual( len( cs ), 5 )
self.assertTrue( cs[0][0].isSame( image["shader"] ) )
self.assertTrue( cs[1][0].isSame( image["__shading"] ) )
self.assertTrue( cs[2][0].isSame( image["out"]["channelNames"] ) )
self.assertTrue( cs[3][0].isSame( image["out"]["channelData"] ) )
self.assertTrue( cs[4][0].isSame( image["out"] ) )
del cs[:]
buildColor["parameters"]["r"].setInput( getRed["out"]["channelValue"] )
self.assertEqual( len( cs ), 5 )
self.assertTrue( cs[0][0].isSame( image["shader"] ) )
self.assertTrue( cs[1][0].isSame( image["__shading"] ) )
self.assertTrue( cs[2][0].isSame( image["out"]["channelNames"] ) )
self.assertTrue( cs[3][0].isSame( image["out"]["channelData"] ) )
self.assertTrue( cs[4][0].isSame( image["out"] ) )
inputImage = reader["out"].image()
outputImage = image["out"].image()
self.assertEqual( outputImage["R"], inputImage["R"] )
self.assertEqual( outputImage["G"], inputImage["R"] )
self.assertEqual( outputImage["B"], inputImage["R"] )
def testOnlyAcceptsSurfaceShaders( self ) :
image = GafferOSL.OSLImage()
shader = GafferOSL.OSLShader()
shader.loadShader( "ObjectProcessing/OutPoint" )
self.assertFalse( image["shader"].acceptsInput( shader["out"] ) )
shader.loadShader( "ImageProcessing/OutImage" )
self.assertTrue( image["shader"].acceptsInput( shader["out"] ) )
def testAcceptsNone( self ) :
image = GafferOSL.OSLImage()
self.assertTrue( image["shader"].acceptsInput( None ) )
def testAcceptsShaderSwitch( self ) :
script = Gaffer.ScriptNode()
script["image"] = GafferOSL.OSLImage()
script["switch"] = GafferScene.ShaderSwitch()
# We're testing a backwards compatibility special case that is
# only enabled when loading a script, hence the use of `execute()`.
script.execute( """script["image"]["shader"].setInput( script["switch"]["out"] )""" )
self.assertTrue( script["image"]["shader"].getInput().isSame( script["switch"]["out"] ) )
def testAcceptsDot( self ) :
script = Gaffer.ScriptNode()
script["image"] = GafferOSL.OSLImage()
script["switch"] = GafferScene.ShaderSwitch()
script["dot"] = Gaffer.Dot()
script["dot"].setup( script["switch"]["out"] )
# We're testing a backwards compatibility special case that is
# only enabled when loading a script, hence the use of `execute()`.
script.execute( """script["image"]["shader"].setInput( script["dot"]["out"] )""" )
self.assertTrue( script["image"]["shader"].getInput().isSame( script["dot"]["out"] ) )
def testChannelWithZeroValue( self ) :
outR = GafferOSL.OSLShader()
outR.loadShader( "ImageProcessing/OutChannel" )
outR["parameters"]["channelName"].setValue( "R" )
outR["parameters"]["channelValue"].setValue( 0 )
imageShader = GafferOSL.OSLShader()
imageShader.loadShader( "ImageProcessing/OutImage" )
imageShader["parameters"]["in0"].setInput( outR["out"]["channel"] )
reader = GafferImage.ImageReader()
reader["fileName"].setValue( os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/rgb.100x100.exr" ) )
image = GafferOSL.OSLImage()
image["in"].setInput( reader["out"] )
image["shader"].setInput( imageShader["out"] )
inputImage = reader["out"].image()
outputImage = image["out"].image()
self.assertEqual( outputImage["R"], IECore.FloatVectorData( [ 0 ] * inputImage["R"].size() ) )
self.assertEqual( outputImage["G"], inputImage["G"] )
self.assertEqual( outputImage["B"], inputImage["B"] )
def testPassThrough( self ) :
outR = GafferOSL.OSLShader()
outR.loadShader( "ImageProcessing/OutChannel" )
outR["parameters"]["channelName"].setValue( "R" )
outR["parameters"]["channelValue"].setValue( 0 )
imageShader = GafferOSL.OSLShader()
imageShader.loadShader( "ImageProcessing/OutImage" )
imageShader["parameters"]["in0"].setInput( outR["out"]["channel"] )
reader = GafferImage.ImageReader()
reader["fileName"].setValue( os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/rgb.100x100.exr" ) )
image = GafferOSL.OSLImage()
image["in"].setInput( reader["out"] )
image["shader"].setInput( imageShader["out"] )
self.assertEqual( image["out"]["format"].hash(), reader["out"]["format"].hash() )
self.assertEqual( image["out"]["dataWindow"].hash(), reader["out"]["dataWindow"].hash() )
self.assertEqual( image["out"]["metadata"].hash(), reader["out"]["metadata"].hash() )
self.assertEqual( image["out"]["format"].getValue(), reader["out"]["format"].getValue() )
self.assertEqual( image["out"]["dataWindow"].getValue(), reader["out"]["dataWindow"].getValue() )
self.assertEqual( image["out"]["metadata"].getValue(), reader["out"]["metadata"].getValue() )
def testReferencePromotedPlug( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["i"] = GafferOSL.OSLImage()
p = Gaffer.PlugAlgo.promote( s["b"]["i"]["shader"] )
p.setName( "p" )
s["b"].exportForReference( self.temporaryDirectory() + "/test.grf" )
s["r"] = Gaffer.Reference()
s["r"].load( self.temporaryDirectory() + "/test.grf" )
s["s"] = GafferOSL.OSLShader()
s["s"].loadShader( "ImageProcessing/OutImage" )
s["r"]["p"].setInput( s["s"]["out"] )
def testDirtyPropagation( self ) :
c = GafferImage.Constant()
o = GafferOSL.OSLImage()
o["in"].setInput( c["out"] )
cs = GafferTest.CapturingSlot( o.plugDirtiedSignal() )
c["color"]["r"].setValue( 1 )
self.assertTrue( o["out"]["channelData"] in set( x[0] for x in cs ) )
def testNegativeTileCoordinates( self ) :
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( imath.Box2i( imath.V2i( -128 ), imath.V2i( 128 ) ) ) )
outR = GafferOSL.OSLShader()
outR.loadShader( "ImageProcessing/OutChannel" )
outR["parameters"]["channelName"].setValue( "R" )
outR["parameters"]["channelValue"].setValue( 1 )
imageShader = GafferOSL.OSLShader()
imageShader.loadShader( "ImageProcessing/OutImage" )
imageShader["parameters"]["in0"].setInput( outR["out"]["channel"] )
image = GafferOSL.OSLImage()
image["in"].setInput( constant["out"] )
image["shader"].setInput( imageShader["out"] )
sampler = GafferImage.Sampler( image["out"], "R", image["out"]["dataWindow"].getValue() )
for y in range( -128, 128 ) :
for x in range( -128, 128 ) :
self.assertEqual( sampler.sample( x, y ), 1, "Pixel {},{}".format( x, y ) )
def testGlobals( self ) :
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( imath.Box2i( imath.V2i( -10 ), imath.V2i( 10 ) ) ) )
globals = GafferOSL.OSLShader()
globals.loadShader( "Utility/Globals" )
outP = GafferOSL.OSLShader()
outP.loadShader( "ImageProcessing/OutLayer" )
outP["parameters"]["layerColor"].setInput( globals["out"]["globalP"] )
outU = GafferOSL.OSLShader()
outU.loadShader( "ImageProcessing/OutChannel" )
outU["parameters"]["channelName"].setValue( "u" )
outU["parameters"]["channelValue"].setInput( globals["out"]["globalU"] )
outV = GafferOSL.OSLShader()
outV.loadShader( "ImageProcessing/OutChannel" )
outV["parameters"]["channelName"].setValue( "v" )
outV["parameters"]["channelValue"].setInput( globals["out"]["globalV"] )
imageShader = GafferOSL.OSLShader()
imageShader.loadShader( "ImageProcessing/OutImage" )
imageShader["parameters"]["in0"].setInput( outP["out"]["layer"] )
imageShader["parameters"]["in1"].setInput( outU["out"]["channel"] )
imageShader["parameters"]["in2"].setInput( outV["out"]["channel"] )
image = GafferOSL.OSLImage()
image["in"].setInput( constant["out"] )
image["shader"].setInput( imageShader["out"] )
displayWindow = image["out"]["format"].getValue().getDisplayWindow()
samplerR = GafferImage.Sampler( image["out"], "R", displayWindow )
samplerG = GafferImage.Sampler( image["out"], "G", displayWindow )
samplerB = GafferImage.Sampler( image["out"], "B", displayWindow )
samplerU = GafferImage.Sampler( image["out"], "u", displayWindow )
samplerV = GafferImage.Sampler( image["out"], "v", displayWindow )
size = imath.V2f( displayWindow.size() )
uvStep = imath.V2f( 1.0 ) / size
uvMin = 0.5 * uvStep
for y in range( displayWindow.min().y, displayWindow.max().y ) :
for x in range( displayWindow.min().x, displayWindow.max().x ) :
self.assertEqual( samplerR.sample( x, y ), x + 0.5, "Pixel {},{}".format( x, y ) )
self.assertEqual( samplerG.sample( x, y ), y + 0.5, "Pixel {},{}".format( x, y ) )
self.assertEqual( samplerB.sample( x, y ), 0, "Pixel {},{}".format( x, y ) )
uv = uvMin + uvStep * imath.V2f( imath.V2i( x, y ) - displayWindow.min() )
self.assertAlmostEqual( samplerU.sample( x, y ), uv.x, delta = 0.0000001, msg = "Pixel {},{}".format( x, y ) )
self.assertAlmostEqual( samplerV.sample( x, y ), uv.y, delta = 0.0000001, msg = "Pixel {},{}".format( x, y ) )
def testTextureOrientation( self ) :
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( 32, 32 ) )
textureFileName = os.path.dirname( __file__ ) + "/images/vRamp.tx"
outLayer = GafferOSL.OSLCode()
outLayer["out"]["layer"] = GafferOSL.ClosurePlug(
direction = Gaffer.Plug.Direction.Out,
flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic
)
outLayer["code"].setValue( 'layer = outLayer( "", texture( "{}", u, v ) )'.format( textureFileName ) )
outImage = GafferOSL.OSLShader()
outImage.loadShader( "ImageProcessing/OutImage" )
outImage["parameters"]["in0"].setInput( outLayer["out"]["layer"] )
oslImage = GafferOSL.OSLImage()
oslImage["in"].setInput( constant["out"] )
oslImage["shader"].setInput( outImage["out"] )
sampler = GafferImage.Sampler( oslImage["out"], "R", oslImage["out"]["dataWindow"].getValue() )
for y in range( 0, 31 ) :
self.assertAlmostEqual( sampler.sample( 5, y ), (y + 0.5) / 32.0, delta = 0.02 )
if __name__ == "__main__":
unittest.main()
| 38.883152
| 115
| 0.675938
|
4a129f458bf807577a03c99d4ad79526435062c0
| 31,883
|
py
|
Python
|
src/cogent3/format/table.py
|
jamesmartini/cogent3
|
5d0aab1871561aa3d4cd6b629be6cc7a23f15c49
|
[
"BSD-3-Clause"
] | null | null | null |
src/cogent3/format/table.py
|
jamesmartini/cogent3
|
5d0aab1871561aa3d4cd6b629be6cc7a23f15c49
|
[
"BSD-3-Clause"
] | null | null | null |
src/cogent3/format/table.py
|
jamesmartini/cogent3
|
5d0aab1871561aa3d4cd6b629be6cc7a23f15c49
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
"""
Tool for creating tables and representing them as text, or writing to file for
import into other packages. These classes still under development.
Current formats include restructured text (keyed by 'rest'), latex, html,
columns separated by a provided string, and a simple text format.
"""
import re
import textwrap
from xml.sax.saxutils import escape
import numpy
__author__ = "Gavin Huttley"
__copyright__ = "Copyright 2007-2021, The Cogent Project"
__credits__ = ["Gavin Huttley", "Peter Maxwell", "Matthew Wakefield", "Jeremy Widmann"]
__license__ = "BSD-3"
__version__ = "2021.5.7a"
__maintainer__ = "Gavin Huttley"
__email__ = "gavin.huttley@anu.edu.au"
__status__ = "Production"
known_formats = (
"bedgraph",
"phylip",
"rest",
"rst",
"markdown",
"md",
"latex",
"tex",
"html",
"simple",
"csv",
"tsv",
)
css_c3table_template = "\n".join(
(
".c3table table {margin: 10px 0;}",
".c3table tr:last-child {border-bottom: 1px solid #000;} ",
".c3table tr > th {text-align: left; padding: 0 5px;}",
".c3table tr > td {text-align: left; padding: 5px;}",
".c3table tr:nth-child(even) {background: #f7f7f7 !important;}",
".c3table .ellipsis {background: rgba(0, 0, 0, .01);}",
".c3table .index {background: %(colour)s; margin: 10px; font-weight: 600;}",
".c3table .head_cell {background: %(head_colour)s; font-weight: bold; text-align: center;}",
".c3table caption {color: rgb(250, 250, 250); background: "
"rgba(30, 140, 200, 1); padding: 3px; white-space: nowrap; "
"caption-side: top;}",
".c3table .cell_title {font-weight: bold;}",
".c3col_left { text-align: left !important; display: block;}",
".c3col_right { text-align: right !important; display: block;}",
".c3col_center { text-align: center !important; display: block;}",
)
)
def _merged_cell_text_wrap(text, max_line_length, space):
"""left justify wraps text into multiple rows"""
max_line_width = max_line_length - (2 * space)
if len(text) < max_line_length:
return [text]
buffer = " " * space
wrapped = textwrap.wrap(
text, width=max_line_width, initial_indent=buffer, subsequent_indent=buffer
)
wrapped = ["%s" % line.ljust(max_line_width + 2 * space) for line in wrapped]
return wrapped
def _merge_cells(row):
"""merges runs of identical row cells.
returns a list with structure [((span_start, span_end), cell value),..]"""
new_row = []
last = 0
span = 1 # the minimum
for i in range(1, len(row), 1):
if row[i - 1] != row[i]:
new_row.append(((last, last + span), row[i - 1]))
last = i
span = 1
continue
span += 1
new_row.append(((last, last + span), row[i]))
return new_row
def rich_html(
rows,
row_cell_func=None,
header=None,
header_cell_func=None,
element_formatters=None,
merge_identical=True,
compact=True,
caption=None,
):
"""returns just the html Table string
Parameters
----------
rows
table rows
row_cell_func
callback function that formats the row values. Must
take the row value and coordinates (row index, column index).
header
the table header
header_cell_func
callback function that formats the column headings
must take the header label value and coordinate
element_formatters
a dictionary of specific callback funcs for
formatting individual html table elements.
e.g. {'table': lambda x: '<table border="1" class="docutils">'}
merge_identical
cells within a row are merged to one span.
caption
Table title / legend
Note: header_cell_func and row_cell_func override element_formatters.
"""
element_formatters = element_formatters or {}
formatted = element_formatters.get
data = [formatted("table", "<table>")]
if caption:
data.append(
'<caption style="font-weight: bold;"'
'background:rgba(30, 140, 200, 1)"; '
f'align="top">{caption}</caption>'
)
if row_cell_func is None:
def row_cell_func(v, r, c):
return "<td>%s</td>" % v
if header_cell_func is None:
def header_cell_func(v, c):
return "<th>%s</th>" % v
if merge_identical:
row_iterator = _merge_cells
else:
row_iterator = enumerate
if header:
thead = formatted("thead", '<thead style="font-weight: bold;">')
row = [header_cell_func(escape(label), i) for i, label in enumerate(header)]
data += [thead] + row + ["</thead>"]
formatted_rows = []
for ridx, row in enumerate(rows):
new = [formatted("tr", "<tr>")]
for cidx, cell in row_iterator(row):
new += [row_cell_func(escape(cell), ridx, cidx)]
new += ["</tr>"]
formatted_rows += new
tbody = formatted("tbody", "<tbody>")
data += [tbody] + formatted_rows + ["</tbody>"]
data += ["</table>"]
if compact:
data = "".join(data)
else:
data = "\n".join(data)
return data
def latex(
rows,
header=None,
caption=None,
legend=None,
justify=None,
label=None,
position=None,
):
"""Returns the text a LaTeX table.
Parameters
----------
rows
table data in row orientation
header
table header
caption
title text.
legend
If provided, the text is placed in a \\caption*{} command at the
bottom of the table and the caption is placed at the top.
justify
column justification, default is right aligned.
label
for cross referencing
position
table page position, default is here, top separate page
Notes
-----
The \\caption*{} command is provided with the caption package. See
https://ctan.org/pkg/caption for more details.
"""
if not justify:
numcols = [len(header), len(rows[0])][not header]
justify = "r" * numcols
justify = "{ %s }" % " ".join(list(justify))
if header:
header = "%s \\\\" % " & ".join([r"\bf{%s}" % head.strip() for head in header])
rows = ["%s \\\\" % " & ".join(row) for row in rows]
position = position or "htp!"
table_format = [
r"\begin{table}[%s]" % position,
r"\centering",
r"\begin{tabular}%s" % justify,
r"\hline",
header,
r"\hline",
r"\hline",
]
table_format += rows
table_format.append(r"\hline")
table_format.append(r"\end{tabular}")
caption = r"\caption{%s}" % caption if caption else ""
label = r"\label{%s}" % label if label else ""
legend = r"\caption*{%s}" % legend if isinstance(legend, str) else None
if caption and label:
caption = f"{caption}\n{label}"
elif caption or label:
caption = caption or label
if caption and legend:
table_format.insert(2, caption)
elif caption:
table_format.append(caption)
if legend is not None:
table_format.append(legend)
table_format.append(r"\end{table}")
return "\n".join(table_format)
def get_continuation_tables(
header, formatted_table, identifiers=None, space=2, max_width=1e100
):
"""returns series of tables segmented to not exceed max_width"""
tables = []
try:
space = " " * space
except TypeError:
pass
# if we are to split the table, creating sub tables, determine
# the boundaries
if len(space.join(header)) < max_width:
return [(header, formatted_table)]
# having determined the maximum string lengths we now need to
# produce subtables of width <= max_width
col_widths = [len(head) for head in header]
sep = len(space)
min_length = col_widths[0]
if min_length > max_width:
raise RuntimeError("Maximum width too small for identifiers")
# if we have an index column, every new table block includes that width
# in calculating the number of columns; otherwise it's simply the sum
if identifiers:
id_width = col_widths[0] + sep
begin = 1
else:
id_width = 0
begin = 0
width = id_width
boundaries = []
for i in range(begin, len(header)):
width += col_widths[i] + sep
if width > max_width:
boundaries.append((begin, i))
begin = i
width = id_width + col_widths[i]
boundaries.append((begin, len(header)))
data = {c[0].strip(): c[1:] for c in zip(header, *formatted_table)}
for start, end in boundaries:
if identifiers:
subhead = header[:1] + header[start:end]
else:
subhead = header[start:end]
rows = numpy.array([data[c.strip()] for c in subhead], dtype="<U15")
if rows.ndim == 1:
rows = [rows.tolist()]
else:
rows = rows.T.tolist()
tables.append((subhead, rows))
return tables
def simple_format(
header,
formatted_table,
title=None,
legend=None,
max_width=1e100,
identifiers=None,
borders=True,
space=2,
):
"""Returns a table in a simple text format.
Parameters
----------
header
series with column headings
formatted_table
a two dimensional structure (list/tuple) of strings
previously formatted to the same width within a column.
title
optional table title
legend
optional table legend
max_width
forces wrapping of table onto successive lines if its'
width exceeds that specified
identifiers
index for the column that uniquely identify rows. Required if table
width exceeds max_width.
borders
whether to display borders.
space
minimum number of spaces between columns.
"""
table = []
try:
space = " " * space
except TypeError:
pass
# if we are to split the table, creating sub tables, determine
# the boundaries
subtables = get_continuation_tables(
header, formatted_table, identifiers, space, max_width
)
for i, (h, t) in enumerate(subtables):
st = title if i == 0 else f"continued: {title}"
if st:
table.append(st)
sh = space.join(h)
length_head = len(sh)
if borders:
table.extend(["=" * length_head, sh, "-" * length_head])
else:
table.append(sh)
rows = [space.join(r) for r in t]
rows = "\n".join(rows)
if rows:
table.append(rows)
if borders:
table.append("-" * length_head)
if len(subtables) > 1:
table.append("")
# add the legend, wrapped to the table widths
if legend:
wrapped = _merged_cell_text_wrap(legend, max_width, 0)
table += wrapped
return "\n".join(table)
_pipe = re.compile(r"\|")
def _escape_pipes(formatted_table, header):
"""returns text with | replaced by \\|, adjusting column widths"""
resized = False
widths = list(map(len, formatted_table[0]))
num_rows = len(formatted_table)
num_cols = len(formatted_table[0])
for i in range(num_rows):
for j in range(num_cols):
cell = formatted_table[i][j]
if "|" in cell:
cell = _pipe.sub(r"\|", cell)
formatted_table[i][j] = cell
widths[j] = max(len(cell), widths[j])
resized = True
if resized:
for j in range(num_cols):
header[j] = header[j].center(widths[j])
for i in range(num_rows):
cell = formatted_table[i][j]
formatted_table[i][j] = cell.center(widths[j])
return formatted_table, header
def markdown(header, formatted_table, space=1, justify=None):
"""Returns a table in Markdown format
Parameters
----------
header
series with column headings
formatted_table
a two dimensional structure (list/tuple) of strings
previously formatted to the same width within a column.
space
number of spaces surrounding the cell contents, must be >= 1
justify
characters indicating alignment of columns
"""
assert space >= 1, "space must be >= 1"
if justify is not None:
assert len(justify) == len(
header
), "column number and justify entries must match"
justify = [c.lower() for c in justify]
formatted_table, header = _escape_pipes(formatted_table, header)
row_template = "| %s |"
sep = "".join([" " * space, "|", " " * space])
divider = ["-" * (len(c) + 2 * space) for c in header]
if justify is not None:
for i in range(len(divider)):
d = divider[i]
if justify[i] == "c":
d = ":%s:" % d[:-2]
elif justify[i] == "r":
d = "%s:" % d[:-1]
elif justify[i] == "l":
d = ":%s" % d[:-1]
else:
raise ValueError("invalid justfication character '%s'" % justify[i])
divider[i] = d
divider = "|%s|" % "|".join(divider)
rows = [row_template % sep.join(header), divider] + [
row_template % sep.join(r) for r in formatted_table
]
return "\n".join(rows)
def rst_csv_table(header, formatted_table, title=None, legend=None):
"""Returns a table in restructured text csv-table format
Parameters
----------
header
series of strings
formatted_table
formatted strings, row based
title, legend
combined in this format
Returns
-------
str
Notes
-----
We only support a subset of available attr, see
https://docutils.sourceforge.io/docs/ref/rst/directives.html#csv-table
"""
header = ", ".join(f'"{c}"' for c in header)
header = f" :header: {header}"
rows = "\n".join(f" {', '.join(r)}" for r in formatted_table)
if title or legend:
title = f" {title}" if title else ""
title = f"{title} {legend}" if legend else title
else:
title = ""
table = [f".. csv-table::{title}", header, "", rows]
return "\n".join(table)
def grid_table_format(header, formatted_table, title=None, legend=None):
"""Returns a table in restructured text grid format.
Parameters
----------
header
series with column headings
formatted_table
a two dimensional structure (list/tuple) of strings
previously formatted to the same width within a column.
title
optional table title
legend
optional table legend
"""
space = 2
# make the delineators
row_delineate = []
heading_delineate = []
col_widths = [len(col) for col in header]
for width in col_widths:
row_delineate.append("-" * width)
heading_delineate.append("=" * width)
row_delineate = "+-" + "-+-".join(row_delineate) + "-+"
heading_delineate = "+=" + "=+=".join(heading_delineate) + "=+"
contiguous_delineator = "+" + "-" * (len(row_delineate) - 2) + "+"
table = []
# insert the title
if title:
table.append(contiguous_delineator)
if len(title) > len(row_delineate) - 2:
wrapped = _merged_cell_text_wrap(
title, len(contiguous_delineator) - 2, space
)
for wdex, line in enumerate(wrapped):
wrapped[wdex] = "|" + line + "|"
table += wrapped
else:
centered = title.center(len(row_delineate) - 2)
table.append("|" + centered + "|")
# insert the heading row
table.append(row_delineate)
table.append("| " + " | ".join(header) + " |")
table.append(heading_delineate)
# concatenate the rows, separating by delineators
for row in formatted_table:
table.append("| " + " | ".join(row) + " |")
table.append(row_delineate)
if legend:
if len(legend) > len(row_delineate) - 2:
wrapped = _merged_cell_text_wrap(
legend, len(contiguous_delineator) - 2, space
)
for wdex, line in enumerate(wrapped):
wrapped[wdex] = "|" + line + "|"
table += wrapped
else:
ljust = legend.ljust(len(row_delineate) - 3)
table.append("| " + ljust + "|")
table.append(contiguous_delineator)
return "\n".join(table)
def separator_format(header, formatted_table, title=None, legend=None, sep=None):
"""Returns a table with column entries separated by a delimiter. If an entry
contains the sep character, that entry is put in quotes. Also, title and
legends (if provided) are forced to a single line and all words forced to
single spaces.
Parameters
----------
header
series with column headings
formatted_table
a two dimensional structure (list/tuple) of strings
previously formatted to the same width within a column.
sep
character to separate column entries (eg tab
title
optional table title
legend
optional table legend
"""
if sep is None:
raise RuntimeError("no separator provided")
if title:
title = " ".join(" ".join(title.splitlines()).split())
if legend:
legend = " ".join(" ".join(legend.splitlines()).split())
new_table = [sep.join(header)]
for row in formatted_table:
for cdex, cell in enumerate(row):
if sep in cell:
row[cdex] = '"%s"' % cell
new_table += [sep.join(row) for row in formatted_table]
table = "\n".join(new_table)
# add the title to top of list
if title:
table = "\n".join([title, table])
if legend:
table = "\n".join([table, legend])
return table
def format_fields(formats):
"""Formats row fields by index.
Parameters
----------
formats
a series consisting of index,formatter callable pairs,
eg [(0, "'%s'"), (4, '%.4f')]. All non-specified columns are
formatted as strings.
"""
index_format = []
def callable(line, index_format=index_format):
if not index_format:
index_format = ["%s" for index in range(len(line))]
for index, format in formats:
index_format[index] = format
formatted = [index_format[i] % line[i] for i in range(len(line))]
return formatted
return callable
def separator_formatter(formatter=None, ignore=None, sep=","):
"""Returns a writer for a delimited tabular file. The writer has a
has_header argument which ignores the formatter for a header line. Default
format is string. Does not currently handle Titles or Legends.
Parameters
----------
formatter
a callable that returns a correctly formatted line.
ignore
lines for which ignore returns True are ignored
sep
the delimiter deparating fields.
"""
formatter = formatter or []
def callable(lines, formatter=formatter, has_header=False):
if not formatter:
formatter = format_fields([(i, "%s") for i in range(len(lines[0]))])
header_done = None
for line in lines:
if has_header and not header_done:
formatted = sep.join(["%s" % field for field in line])
header_done = True
else:
formatted = sep.join(formatter(line))
yield formatted
return callable
def formatted_cells(
rows, header=None, digits=4, column_templates=None, missing_data="", center=False
):
"""Return rows with each columns cells formatted as an equal length
string.
Parameters
----------
row
the series of table rows
header
optional header
digits
number of decimal places. Can be overridden by following.
column_templates
specific format templates for each column.
missing_data
default cell value.
"""
if not header:
num_col = max(len(row) for row in rows)
header = [""] * num_col
else:
num_col = len(header)
col_widths = [len(col) for col in header]
column_templates = column_templates or {}
float_template = "{0:.%df}" % digits
# if we have column templates, we use those, otherwise we adaptively
# apply str/num format
matrix = []
for row in rows:
formatted = []
for cdex, col_head in enumerate(header):
try:
entry = row[cdex]
except IndexError:
entry = "%s" % missing_data
else:
not_missing = True if isinstance(entry, numpy.ndarray) else entry
if not not_missing:
try:
float(entry) # could numerically be 0, so not missing
except (ValueError, TypeError):
entry = "%s" % missing_data
# attempt formatting
if col_head in column_templates:
try: # for functions
entry = column_templates[col_head](entry)
except TypeError:
entry = column_templates[col_head] % entry
elif isinstance(entry, float):
entry = float_template.format(float(entry))
else: # for any other python object
entry = "%s" % str(entry)
formatted.append(entry)
col_widths[cdex] = max(col_widths[cdex], len(entry))
matrix.append(formatted)
# now normalise all cell entries to max column widths
func = {True: lambda x, y: x.center(y)}.get(center, lambda x, y: x.rjust(y))
new_header = [func(header[i], col_widths[i]) for i in range(num_col)]
for row in matrix:
for cdex in range(num_col):
row[cdex] = func(row[cdex], col_widths[cdex])
return new_header, matrix
def phylip_matrix(rows, names):
"""Return as a distance matrix in phylip's matrix format."""
# phylip compatible format is num taxa starting at col 4
# rows start with taxa names, length 8
# distances start at 13th col, 2 spaces between each col wrapped
# at 75th col
# follow on dists start at col 3
# outputs a square matrix
def new_name(names, oldname):
# the name has to be unique in that number, the best way to ensure that
# is to determine the number and revise the existing name so it has a
# int as its end portion
num = len(names)
max_num_digits = len(str(num))
assert max_num_digits < 10, "can't create a unique name for %s" % oldname
name_base = oldname[: 10 - max_num_digits]
newname = None
for i in range(max_num_digits):
trial_name = "%s%s" % (name_base, i)
if trial_name not in names:
newname = trial_name
break
if not newname:
raise RuntimeError("Can't create a unique name for %s" % oldname)
else:
print("WARN: Seqname %s changed to %s" % (oldname, newname))
return newname
def append_species(name, formatted_dists, mat_breaks):
rows = []
name = name.ljust(12)
# format the distances first
for i in range(len(mat_breaks)):
if i == len(mat_breaks):
break
start = mat_breaks[i]
try:
end = mat_breaks[i + 1]
except IndexError:
end = len(formatted_dists)
prefix = ["", " "][i > 0]
rows.append("%s%s" % (prefix, " ".join(formatted_dists[start:end])))
# mod first row of formatted_dists
rows[0] = "%s%s" % (name.ljust(12), rows[0])
return rows
# number of seqs
numseqs = len(names)
# determine wrapped table boundaries, if any
prefix = 13
mat_breaks = [0]
line_len = 75 # for the first block
col_widths = [len(col) for col in rows[0]]
for i in range(numseqs):
num_cols = i - mat_breaks[-1]
if prefix + 2 * num_cols + sum(col_widths[mat_breaks[-1] : i]) > line_len:
prefix = 3
line_len = 73
mat_breaks.append(i)
# build the formatted distance matrix
dmat = [" %d" % numseqs]
for i in range(numseqs):
name = names[i].strip() # we determine white space
if len(name) > 10:
name = new_name(names, name)
dmat += append_species(name, rows[i], mat_breaks)
return "\n".join(dmat)
def get_continuation_tables_headers(
cols_widths, index_name=None, space=2, max_width=1e100
):
"""
returns column headers for continuation tables segmented to not exceed max_width
Parameters
----------
cols_widths : list
[[col_name, length of longest string], ...]
index_name : str
column name of an index. This column included in all sub table headers.
space : int
how much white space between columns
max_width : int
maximum width
Returns
-------
list of lists, each inner list is the column names for a subtable
"""
width_map = dict(cols_widths)
index_width = 0 if index_name is None else width_map[index_name]
for name, width in width_map.items():
if index_width + width > max_width:
raise ValueError(
f"{index_name}={index_width} + {name} width={width} > max_width={max_width}"
)
if sum(v + space + index_width for _, v in cols_widths) < max_width:
return [[l for l, _ in cols_widths]]
headers = []
curr = [index_name] if index_name is not None else []
cum_sum = index_width
for name, width in cols_widths:
if name == index_name:
continue
cum_sum += space + width
if cum_sum > max_width:
headers.append(curr)
curr = [index_name, name] if index_name is not None else [name]
cum_sum = index_width + space + width
continue
curr.append(name)
headers.append(curr)
return headers
class _MixedFormatter:
"""handles formatting of mixed data types"""
def __init__(
self, alignment, length, precision=4, float_type="f", missing_data=None
):
self.missing_data = missing_data
self.length = length
self.alignment = alignment
self.precision = precision
self.float_type = float_type
def __call__(self, val):
prefix = f"{self.alignment}{self.length}"
float_spec = f"{prefix}.{self.precision}{self.float_type}"
int_spec = f"{prefix}d"
result = str(val)
if self.missing_data is not None and not result:
return self.missing_data
for fspec in (int_spec, float_spec, prefix):
try:
result = format(val, fspec)
break
except (TypeError, ValueError):
pass
return result
def formatted_array(
series,
title="",
precision=4,
format_spec=None,
missing_data="",
pad=True,
align="r",
):
"""converts elements in a numpy array series to an equal length string.
Parameters
----------
series
the series of table rows
title
title of series
precision
number of decimal places. Can be overridden by following.
format_spec
format specification as per the python Format Specification, Mini-Language
or a callable function.
missing_data
default missing data value.
pad : bool
Whether to pad all strings to same length. If False, alignment setting is
ignored.
align : str
either 'l', 'c', 'r' for left, center or right alignment, Defaults to 'r'.
Only applied if pad==True
Returns
-------
list of formatted series, formatted title, maximum string length
Notes
-----
The precedence for formatting is format_spec supersedes pad, precision and
align values.
"""
assert isinstance(series, numpy.ndarray), "must be numpy array"
if pad and align.lower() not in set("lrc"):
raise ValueError(f"align value '{align}' not in 'l,c,r'")
if pad:
align = {"l": "<", "c": "^", "r": ">"}[align]
if callable(format_spec):
formatter = format_spec
format_spec = None
else:
formatter = None
if format_spec and set(format_spec.strip()) <= set("<>^"):
# format_spec just an alignment character, in which case we assign
# that to align and reset format_spec as None so other formatting
# options have an effect
align = format_spec
format_spec = None
if isinstance(format_spec, str):
format_spec = format_spec.replace("%", "")
if not any([format_spec, formatter]):
type_name = series.dtype.name
if "int" in type_name:
base_format = "d"
elif "float" in type_name:
base_format = f".{precision}f"
elif "bool" == type_name:
base_format = ""
else:
# handle mixed types with a custom formatter
formatter = _MixedFormatter(
align, len(title), precision, missing_data=missing_data
)
base_format = ""
format_spec = base_format
formatted = []
max_length = len(title)
for i, v in enumerate(series):
if formatter:
v = formatter(v)
else:
try:
v = format(v, format_spec)
except (TypeError, ValueError):
# could be a python object
v = str(v)
l = len(v)
if l > max_length:
max_length = l
formatted.append(v)
if not pad:
return formatted, title.strip(), max_length
if format_spec:
match = re.search("[<>^]", format_spec[:2])
final_align = align if match is None else match.group()
else:
final_align = align
# now adjust to max_len
format_spec = f"{final_align}{max_length}s"
title = format(title, format_spec)
formatted = [format(v.strip(), format_spec) for v in formatted]
return formatted, title, max_length
class HtmlElement:
"""wrapper for text to become a HTML element"""
def __init__(self, text, tag, css_classes=None, newline=False):
"""
Parameters
----------
text : str
cell content
tag : str
html table cell tag, e.g. 'td', 'th'
classes : list
list of custom CSS classes
newline : bool
puts the open, close tags on new lines
"""
self.text = str(text)
self.tag = tag
css_classes = [css_classes] if isinstance(css_classes, str) else css_classes
self.css_classes = css_classes
self.newline = newline
def __str__(self):
txt = self.text
classes = "" if self.css_classes is None else " ".join(self.css_classes)
classes = f' class="{classes}"' if classes else ""
nl = "\n" if self.newline else ""
return f"{nl}<{self.tag}{classes}>{nl}{txt}{nl}</{self.tag}>"
def __repr__(self):
return repr(self.text)
def is_html_markup(text):
"""checks if text contains balanced html markup
<token ...> body </token>
"""
pattern = re.compile("(?<=[<])[a-z]+")
tokens = set(pattern.findall(text))
if not tokens:
return False
for token in tokens:
num_start = len(re.findall(f"<{token}", text))
num_end = len(re.findall(f"</{token}", text))
if num_start != num_end:
return False
return True
| 29.769374
| 100
| 0.586112
|
4a12a01307f9cf776bf5f73db0a011b86cf1e1fc
| 2,333
|
py
|
Python
|
4/learn.py
|
Terfno/learn_DL
|
0e1f3049c2c342915e1b7237506029a42539029e
|
[
"MIT"
] | null | null | null |
4/learn.py
|
Terfno/learn_DL
|
0e1f3049c2c342915e1b7237506029a42539029e
|
[
"MIT"
] | null | null | null |
4/learn.py
|
Terfno/learn_DL
|
0e1f3049c2c342915e1b7237506029a42539029e
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pylab as plt
import os, sys
sys.path.append(os.pardir)
from two_layer_net import TwoLayerNet
from dataset.mnist import load_mnist
def graph_loss(train_loss_list: list):
print("now creating graph of loss")
x = np.arange(len(train_loss_list))
plt.plot(x, train_loss_list, label='loss')
plt.xlabel("iteration")
plt.ylabel("loss")
plt.xlim(left=0)
plt.ylim(bottom=0)
plt.savefig('loss.png')
def graph_acc(train_acc_list: list, test_acc_list: list):
print("now creating graph of accuracy")
x2 = np.arange(len(train_acc_list))
plt.plot(x2, train_acc_list, label='train acc')
plt.plot(x2, test_acc_list, label='test acc', linestyle='--')
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.xlim(left=0)
plt.ylim(0, 1.0)
plt.legend(loc='lower right')
plt.savefig('acc.png')
def main():
# get mnist train data and test data
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
# hyper param
iters_num = 10000
batch_size = 100
learning_rate = 0.1
# log of result
train_loss_list = []
train_acc_list = []
test_acc_list = []
# size of train data
train_size = x_train.shape[0]
iter_per_epoch = max(train_size / batch_size, 1)
# init tow layer nn
network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
# learning
for i in range(iters_num):
# mini batch
batch_mask = np.random.choice(train_size, batch_size, replace=False)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
# calc gradient
grad = network.gradient(x_batch, t_batch)
# update weight param
for key in ('W1', 'b1', 'W2', 'b2'):
network.params[key] -= learning_rate * grad[key]
# calc value of loss
loss = network.loss(x_batch, t_batch)
train_loss_list.append(loss)
# calc value of recognition accuracy per 1 epoch
if i % iter_per_epoch == 0:
train_acc = network.accuracy(x_train, t_train)
test_acc = network.accuracy(x_test, t_test)
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
# print log
print("train acc, test acc | " + str(train_acc) + ", " + str(test_acc))
# graph_loss(train_loss_list)
# graph_acc(train_acc_list, test_acc_list)
print("done.")
if __name__ == "__main__":
main()
| 25.922222
| 87
| 0.691384
|
4a12a1403eb11c795b3522a85042c56bded57c81
| 2,915
|
py
|
Python
|
starlette_login/login_manager.py
|
jockerz/Starlette-Login
|
8e517d33b2100417ff71db72404b3a3cde6cdd7d
|
[
"MIT"
] | null | null | null |
starlette_login/login_manager.py
|
jockerz/Starlette-Login
|
8e517d33b2100417ff71db72404b3a3cde6cdd7d
|
[
"MIT"
] | null | null | null |
starlette_login/login_manager.py
|
jockerz/Starlette-Login
|
8e517d33b2100417ff71db72404b3a3cde6cdd7d
|
[
"MIT"
] | null | null | null |
import typing as t
from dataclasses import dataclass
from datetime import timedelta
from enum import Enum
from starlette.requests import HTTPConnection
from starlette.responses import Response
from .mixins import AnonymousUser
from .utils import decode_cookie, encode_cookie
class ProtectionLevel(Enum):
Basic = 1
Strong = 2
@dataclass
class Config:
SESSION_NAME_FRESH: str = '_fresh'
SESSION_NAME_ID: str = '_id'
SESSION_NAME_KEY: str = '_user_id'
SESSION_NAME_NEXT: str = 'next'
REMEMBER_COOKIE_NAME: str = '_remember'
REMEMBER_SECONDS_NAME: str = '_remember_seconds'
EXEMPT_METHODS: t.Tuple = ('OPTIONS')
protection_level: t.Optional[ProtectionLevel] = ProtectionLevel.Basic
# Cookie configuration
COOKIE_NAME: str = 'remember_token'
COOKIE_DOMAIN: t.Optional[str] = None
COOKIE_PATH: str = '/'
COOKIE_SECURE: bool = False
COOKIE_HTTPONLY: bool = True
COOKIE_SAMESITE: t.Optional[str] = None
COOKIE_DURATION: timedelta = timedelta(days=365)
@property
def session_keys(self):
return (
self.SESSION_NAME_FRESH,
self.SESSION_NAME_ID,
self.SESSION_NAME_KEY,
self.SESSION_NAME_NEXT,
self.REMEMBER_COOKIE_NAME,
self.REMEMBER_SECONDS_NAME,
)
class LoginManager:
_user_loader: t.Callable = None
def __init__(
self, redirect_to: str, secret_key: str, config: Config = None
):
self.config = config or Config()
self.anonymous_user_cls = AnonymousUser
# Name of redirect view when user need to log in.
self.redirect_to = redirect_to
self.secret_key = secret_key
def set_user_loader(self, callback: t.Callable):
self._user_loader = callback
@property
def user_loader(self):
assert self._user_loader is not None, \
'`user_loader` is required'
return self._user_loader
def build_redirect_url(self, request: HTTPConnection):
if '/' in self.redirect_to:
return self.redirect_to
return request.url_for(self.redirect_to)
def protection_is_strong(self):
return self.config.protection_level == ProtectionLevel.Strong
def set_cookie(self, response: Response, user_id: t.Any):
# if not isinstance(user_id, str):
# user_id = str(user_id)
response.set_cookie(
key=self.config.COOKIE_NAME,
value=encode_cookie(user_id, self.secret_key),
expires=int(self.config.COOKIE_DURATION.total_seconds()),
path=self.config.COOKIE_PATH,
domain=self.config.COOKIE_DOMAIN,
secure=self.config.COOKIE_SECURE,
httponly=self.config.COOKIE_HTTPONLY,
samesite=self.config.COOKIE_SAMESITE
)
def get_cookie(self, cookie: str):
return decode_cookie(cookie, self.secret_key)
| 30.051546
| 73
| 0.67307
|
4a12a1d76af3137dbf2acd1a4d68a301ce707330
| 13,333
|
py
|
Python
|
ironic/common/release_mappings.py
|
arnewiebalck/ironic
|
41a10cffce8bd85048d939f79fd64371b7382997
|
[
"Apache-2.0"
] | 1
|
2021-07-19T16:42:19.000Z
|
2021-07-19T16:42:19.000Z
|
ironic/common/release_mappings.py
|
arnewiebalck/ironic
|
41a10cffce8bd85048d939f79fd64371b7382997
|
[
"Apache-2.0"
] | null | null | null |
ironic/common/release_mappings.py
|
arnewiebalck/ironic
|
41a10cffce8bd85048d939f79fd64371b7382997
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironic.common.i18n import _
# NOTE(xek): This decides the version cap of RPC messages sent to conductor
# and objects during rolling upgrades, when [DEFAULT]/pin_release_version
# configuration is set.
#
# Remember to add a new entry for the new version that is shipping in a new
# release.
#
# We support a rolling upgrade between adjacent named releases, as well as
# between a release and master, so old, unsupported releases can be removed,
# together with the supporting code, which is typically found in an object's
# make_compatible methods and RPC client code.
# NOTE(xek): The format of this dict is:
# { '<release version>': {
# 'api': '<Bare Metal API version>',
# 'rpc': '<RPC API version>',
# 'objects': {
# '<object class name>': ['<object version>'],
# }
# },
# }
# The list should contain all objects which are persisted in the database and
# sent over RPC. Notifications/Payloads are not being included here since we
# don't need to pin them during rolling upgrades.
#
# For each object, list the versions that the object can be in for a particular
# release. That is, any new versions that were added in that release. If there
# were no new versions, it should have the same (latest) version as the
# previous release.
# NOTE(rloo): We need a list, not just the latest version, for the DB queries
# that filter for objects that are not in particular versions; for more info,
# see comments after L1128 of
# https://review.opendev.org/#/c/408556/52/ironic/db/sqlalchemy/api.py.
#
# There should always be a 'master' entry that reflects the objects in the
# master branch.
#
# Just before doing a release, copy the 'master' entry, and rename the first
# 'master' entry to the (semver) version being released.
#
# Just after doing a named release, delete any entries associated with the
# oldest named release.
RELEASE_MAPPING = {
'9.2': {
'rpc': '1.41',
'api': '1.35',
'objects': {
'Node': ['1.21'],
'Conductor': ['1.2'],
'Chassis': ['1.3'],
'Port': ['1.7'],
'Portgroup': ['1.3'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'10.0': {
'api': '1.36',
'rpc': '1.42',
'objects': {
'Node': ['1.22'],
'Conductor': ['1.2'],
'Chassis': ['1.3'],
'Port': ['1.7'],
'Portgroup': ['1.3'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'10.1': {
'api': '1.38',
'rpc': '1.44',
'objects': {
'Node': ['1.23'],
'Conductor': ['1.2'],
'Chassis': ['1.3'],
'Port': ['1.7'],
'Portgroup': ['1.3'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'11.0': {
'api': '1.43',
'rpc': '1.44',
'objects': {
'BIOSSetting': ['1.0'],
'Node': ['1.25', '1.24'],
'Conductor': ['1.2'],
'Chassis': ['1.3'],
'Port': ['1.8'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'11.1': {
'api': '1.46',
'rpc': '1.47',
'objects': {
'BIOSSetting': ['1.0'],
'Node': ['1.27', '1.26'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'Port': ['1.8'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'12.0': {
'api': '1.49',
'rpc': '1.47',
'objects': {
'BIOSSetting': ['1.0'],
'Node': ['1.29', '1.28'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'Port': ['1.8'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'12.1': {
'api': '1.56',
'rpc': '1.48',
'objects': {
'Allocation': ['1.0'],
'BIOSSetting': ['1.0'],
'Node': ['1.32', '1.31', '1.30'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'DeployTemplate': ['1.0', '1.1'],
'Port': ['1.9'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'12.2': {
'api': '1.58',
'rpc': '1.48',
'objects': {
'Allocation': ['1.0'],
'BIOSSetting': ['1.0'],
'Node': ['1.32'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'DeployTemplate': ['1.1'],
'Port': ['1.9'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'13.0': {
'api': '1.58',
'rpc': '1.48',
'objects': {
'Allocation': ['1.0'],
'BIOSSetting': ['1.0'],
'Node': ['1.32'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'DeployTemplate': ['1.1'],
'Port': ['1.9'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'14.0': {
'api': '1.61',
'rpc': '1.48',
'objects': {
'Allocation': ['1.1'],
'BIOSSetting': ['1.0'],
'Node': ['1.33', '1.32'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'DeployTemplate': ['1.1'],
'Port': ['1.9'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'15.0': {
'api': '1.65',
'rpc': '1.50',
'objects': {
'Allocation': ['1.1'],
'BIOSSetting': ['1.0'],
'Node': ['1.34', '1.33', '1.32'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'DeployTemplate': ['1.1'],
'Port': ['1.9'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'15.1': {
'api': '1.67',
'rpc': '1.50',
'objects': {
'Allocation': ['1.1'],
'BIOSSetting': ['1.0'],
'Node': ['1.35', '1.34'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'DeployTemplate': ['1.1'],
'Port': ['1.9'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'16.0': {
'api': '1.68',
'rpc': '1.51',
'objects': {
'Allocation': ['1.1'],
'BIOSSetting': ['1.0'],
'Node': ['1.35'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'Deployment': ['1.0'],
'DeployTemplate': ['1.1'],
'Port': ['1.9'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'16.1': {
'api': '1.68',
'rpc': '1.51',
'objects': {
'Allocation': ['1.1'],
'BIOSSetting': ['1.0'],
'Node': ['1.35'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'Deployment': ['1.0'],
'DeployTemplate': ['1.1'],
'Port': ['1.9'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'16.2': {
'api': '1.69',
'rpc': '1.52',
'objects': {
'Allocation': ['1.1'],
'BIOSSetting': ['1.0'],
'Node': ['1.35'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'Deployment': ['1.0'],
'DeployTemplate': ['1.1'],
'Port': ['1.10'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'17.0': {
'api': '1.72',
'rpc': '1.54',
'objects': {
'Allocation': ['1.1'],
'BIOSSetting': ['1.0'],
'Node': ['1.35'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'Deployment': ['1.0'],
'DeployTemplate': ['1.1'],
'Port': ['1.10'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'18.0': {
'api': '1.74',
'rpc': '1.54',
'objects': {
'Allocation': ['1.1'],
'BIOSSetting': ['1.1'],
'Node': ['1.35'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'Deployment': ['1.0'],
'DeployTemplate': ['1.1'],
'Port': ['1.10'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
'master': {
'api': '1.75',
'rpc': '1.54',
'objects': {
'Allocation': ['1.1'],
'BIOSSetting': ['1.1'],
'Node': ['1.36', '1.35'],
'Conductor': ['1.3'],
'Chassis': ['1.3'],
'Deployment': ['1.0'],
'DeployTemplate': ['1.1'],
'Port': ['1.10'],
'Portgroup': ['1.4'],
'Trait': ['1.0'],
'TraitList': ['1.0'],
'VolumeConnector': ['1.0'],
'VolumeTarget': ['1.0'],
}
},
}
# NOTE(xek): Assign each named release to the appropriate semver.
#
# Just before we do a new named release (more specifically, create
# a stable/<release> branch), add a mapping for the new named
# release. This is needed; otherwise CI: a unit test (common.
# ReleaseMappingsTestCase.test_contains_current_release_entry())
# and grenade that tests old/new (new-release -> master) will fail.
#
# Just after we do a new named release, delete the oldest named
# release (that we are no longer supporting for a rolling upgrade).
#
# There should be at most two named mappings here.
# NOTE(mgoddard): remove victoria prior to the xena release.
RELEASE_MAPPING['victoria'] = RELEASE_MAPPING['16.0']
RELEASE_MAPPING['wallaby'] = RELEASE_MAPPING['17.0']
# List of available versions with named versions first; 'master' is excluded.
RELEASE_VERSIONS = sorted(set(RELEASE_MAPPING) - {'master'}, reverse=True)
# List of available (version, description) tuples.
RELEASE_VERSIONS_DESCS = [(v, _('"%s" release') % v) for v in RELEASE_VERSIONS]
def get_object_versions(releases=None, objects=None):
"""Gets the supported versions for all objects.
Supported versions are from the RELEASE_MAPPINGs.
:param releases: a list of release names; if empty/None, versions from all
releases are returned (the default).
:param objects: a list of names of objects of interest. If empty/None,
versions of all objects are returned (the default).
:returns: a dictionary where the key is the object name and the value is
a set of supported versions.
"""
if not releases:
releases = list(RELEASE_MAPPING)
versions = {}
for release in releases:
object_mapping = RELEASE_MAPPING[release]['objects']
for obj, version_list in object_mapping.items():
if not objects or obj in objects:
versions.setdefault(obj, set()).update(version_list)
return versions
| 31.520095
| 79
| 0.441536
|
4a12a23298858a1885877381eb495e7dbe4614a9
| 844
|
py
|
Python
|
thinkpython_allen_downey/exercise_11_1.py
|
alirkaya/programming-textbook-solutions
|
7362dce474b8a881d654f95604e09d1d0e76aec2
|
[
"MIT"
] | null | null | null |
thinkpython_allen_downey/exercise_11_1.py
|
alirkaya/programming-textbook-solutions
|
7362dce474b8a881d654f95604e09d1d0e76aec2
|
[
"MIT"
] | null | null | null |
thinkpython_allen_downey/exercise_11_1.py
|
alirkaya/programming-textbook-solutions
|
7362dce474b8a881d654f95604e09d1d0e76aec2
|
[
"MIT"
] | null | null | null |
with open('words.txt', 'r') as fin:
lines = fin.readlines()
words_dict = {}
for line in lines:
word = line.strip()
words_dict[word] = 0
words = []
for line in lines:
words.append(line.strip())
def bisect(words, target):
if len(words) == 0:
return False
mid_value = len(words)//2
if target == words[mid_value]:
return True
if target < words[mid_value]:
return bisect(words[:mid_value], target)
else:
return bisect(words[mid_value+1:], target)
import time
print('Test List')
start = time.time()
print('zymology' in words)
print(time.time() - start)
print('\nTest Dictionary Keys')
start = time.time()
print('zymology' in words_dict)
print(time.time() - start)
print('\nTest Bisection Search')
start = time.time()
print(bisect(words, 'zymology'))
print(time.time() - start)
| 22.210526
| 50
| 0.648104
|
4a12a242d164fc41065480b30be6c3e80e1ac03b
| 44,912
|
py
|
Python
|
silx/math/fit/fitmanager.py
|
PiRK/silx
|
db6c1d2bdccfc6ec0811f2068dfbe9edefc38f20
|
[
"CC0-1.0"
] | null | null | null |
silx/math/fit/fitmanager.py
|
PiRK/silx
|
db6c1d2bdccfc6ec0811f2068dfbe9edefc38f20
|
[
"CC0-1.0"
] | 1
|
2019-05-16T14:18:23.000Z
|
2019-05-16T14:18:23.000Z
|
silx/math/fit/fitmanager.py
|
PiRK/silx
|
db6c1d2bdccfc6ec0811f2068dfbe9edefc38f20
|
[
"CC0-1.0"
] | 1
|
2022-01-24T16:19:27.000Z
|
2022-01-24T16:19:27.000Z
|
# coding: utf-8
# /*#########################################################################
#
# Copyright (c) 2004-2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ##########################################################################*/
"""
This module provides a tool to perform advanced fitting. The actual fit relies
on :func:`silx.math.fit.leastsq`.
This module deals with:
- handling of the model functions (using a set of default functions or
loading custom user functions)
- handling of estimation function, that are used to determine the number
of parameters to be fitted for functions with unknown number of
parameters (such as the sum of a variable number of gaussian curves),
and find reasonable initial parameters for input to the iterative
fitting algorithm
- handling of custom derivative functions that can be passed as a
parameter to :func:`silx.math.fit.leastsq`
- providing different background models
"""
from collections import OrderedDict
import logging
import numpy
from numpy.linalg.linalg import LinAlgError
import os
import sys
from .filters import strip, smooth1d
from .leastsq import leastsq
from .fittheory import FitTheory
from . import bgtheories
__authors__ = ["V.A. Sole", "P. Knobel"]
__license__ = "MIT"
__date__ = "16/01/2017"
_logger = logging.getLogger(__name__)
class FitManager(object):
"""
Fit functions manager
:param x: Abscissa data. If ``None``, :attr:`xdata` is set to
``numpy.array([0.0, 1.0, 2.0, ..., len(y)-1])``
:type x: Sequence or numpy array or None
:param y: The dependant data ``y = f(x)``. ``y`` must have the same
shape as ``x`` if ``x`` is not ``None``.
:type y: Sequence or numpy array or None
:param sigmay: The uncertainties in the ``ydata`` array. These can be
used as weights in the least-squares problem, if ``weight_flag``
is ``True``.
If ``None``, the uncertainties are assumed to be 1, unless
``weight_flag`` is ``True``, in which case the square-root
of ``y`` is used.
:type sigmay: Sequence or numpy array or None
:param weight_flag: If this parameter is ``True`` and ``sigmay``
uncertainties are not specified, the square root of ``y`` is used
as weights in the least-squares problem. If ``False``, the
uncertainties are set to 1.
:type weight_flag: boolean
"""
def __init__(self, x=None, y=None, sigmay=None, weight_flag=False):
"""
"""
self.fitconfig = {
'WeightFlag': weight_flag,
'fitbkg': 'No Background',
'fittheory': None,
# Next few parameters are defined for compatibility with legacy theories
# which take the background as argument for their estimation function
'StripWidth': 2,
'StripIterations': 5000,
'StripThresholdFactor': 1.0,
'SmoothingFlag': False
}
"""Dictionary of fit configuration parameters.
These parameters can be modified using the :meth:`configure` method.
Keys are:
- 'fitbkg': name of the function used for fitting a low frequency
background signal
- 'FwhmPoints': default full width at half maximum value for the
peaks'.
- 'Sensitivity': Sensitivity parameter for the peak detection
algorithm (:func:`silx.math.fit.peak_search`)
"""
self.theories = OrderedDict()
"""Dictionary of fit theories, defining functions to be fitted
to individual peaks.
Keys are descriptive theory names (e.g "Gaussians" or "Step up").
Values are :class:`silx.math.fit.fittheory.FitTheory` objects with
the following attributes:
- *"function"* is the fit function for an individual peak
- *"parameters"* is a sequence of parameter names
- *"estimate"* is the parameter estimation function
- *"configure"* is the function returning the configuration dict
for the theory in the format described in the :attr:` fitconfig`
documentation
- *"derivative"* (optional) is a custom derivative function, whose
signature is described in the documentation of
:func:`silx.math.fit.leastsq.leastsq`
(``model_deriv(xdata, parameters, index)``).
- *"description"* is a description string
"""
self.selectedtheory = None
"""Name of currently selected theory. This name matches a key in
:attr:`theories`."""
self.bgtheories = OrderedDict()
"""Dictionary of background theories.
See :attr:`theories` for documentation on theories.
"""
# Load default theories (constant, linear, strip)
self.loadbgtheories(bgtheories)
self.selectedbg = 'No Background'
"""Name of currently selected background theory. This name must be
an existing key in :attr:`bgtheories`."""
self.fit_results = []
"""This list stores detailed information about all fit parameters.
It is initialized in :meth:`estimate` and completed with final fit
values in :meth:`runfit`.
Each fit parameter is stored as a dictionary with following fields:
- 'name': Parameter name.
- 'estimation': Estimated value.
- 'group': Group number. Group 0 corresponds to the background
function parameters. Group ``n`` (for ``n>0``) corresponds to
the fit function parameters for the n-th peak.
- 'code': Constraint code
- 0 - FREE
- 1 - POSITIVE
- 2 - QUOTED
- 3 - FIXED
- 4 - FACTOR
- 5 - DELTA
- 6 - SUM
- 'cons1':
- Ignored if 'code' is FREE, POSITIVE or FIXED.
- Min value of the parameter if code is QUOTED
- Index of fitted parameter to which 'cons2' is related
if code is FACTOR, DELTA or SUM.
- 'cons2':
- Ignored if 'code' is FREE, POSITIVE or FIXED.
- Max value of the parameter if QUOTED
- Factor to apply to related parameter with index 'cons1' if
'code' is FACTOR
- Difference with parameter with index 'cons1' if
'code' is DELTA
- Sum obtained when adding parameter with index 'cons1' if
'code' is SUM
- 'fitresult': Fitted value.
- 'sigma': Standard deviation for the parameter estimate
- 'xmin': Lower limit of the ``x`` data range on which the fit
was performed
- 'xmax': Upeer limit of the ``x`` data range on which the fit
was performed
"""
self.parameter_names = []
"""This list stores all fit parameter names: background function
parameters and fit function parameters for every peak. It is filled
in :meth:`estimate`.
It is the responsibility of the estimate function defined in
:attr:`theories` to determine how many parameters are needed,
based on how many peaks are detected and how many parameters are needed
to fit an individual peak.
"""
self.setdata(x, y, sigmay)
##################
# Public methods #
##################
def addbackground(self, bgname, bgtheory):
"""Add a new background theory to dictionary :attr:`bgtheories`.
:param bgname: String with the name describing the function
:param bgtheory: :class:`FitTheory` object
:type bgtheory: :class:`silx.math.fit.fittheory.FitTheory`
"""
self.bgtheories[bgname] = bgtheory
def addtheory(self, name, theory=None,
function=None, parameters=None,
estimate=None, configure=None, derivative=None,
description=None, pymca_legacy=False):
"""Add a new theory to dictionary :attr:`theories`.
You can pass a name and a :class:`FitTheory` object as arguments, or
alternatively provide all arguments necessary to instantiate a new
:class:`FitTheory` object.
See :meth:`loadtheories` for more information on estimation functions,
configuration functions and custom derivative functions.
:param name: String with the name describing the function
:param theory: :class:`FitTheory` object, defining a fit function and
associated information (estimation function, description…).
If this parameter is provided, all other parameters, except for
``name``, are ignored.
:type theory: :class:`silx.math.fit.fittheory.FitTheory`
:param function function: Mandatory argument if ``theory`` is not provided.
See documentation for :attr:`silx.math.fit.fittheory.FitTheory.function`.
:param list[str] parameters: Mandatory argument if ``theory`` is not provided.
See documentation for :attr:`silx.math.fit.fittheory.FitTheory.parameters`.
:param function estimate: See documentation for
:attr:`silx.math.fit.fittheory.FitTheory.estimate`
:param function configure: See documentation for
:attr:`silx.math.fit.fittheory.FitTheory.configure`
:param function derivative: See documentation for
:attr:`silx.math.fit.fittheory.FitTheory.derivative`
:param str description: See documentation for
:attr:`silx.math.fit.fittheory.FitTheory.description`
:param config_widget: See documentation for
:attr:`silx.math.fit.fittheory.FitTheory.config_widget`
:param bool pymca_legacy: See documentation for
:attr:`silx.math.fit.fittheory.FitTheory.pymca_legacy`
"""
if theory is not None:
self.theories[name] = theory
elif function is not None and parameters is not None:
self.theories[name] = FitTheory(
description=description,
function=function,
parameters=parameters,
estimate=estimate,
configure=configure,
derivative=derivative,
pymca_legacy=pymca_legacy
)
else:
raise TypeError("You must supply a FitTheory object or define " +
"a fit function and its parameters.")
def addbgtheory(self, name, theory=None,
function=None, parameters=None,
estimate=None, configure=None,
derivative=None, description=None):
"""Add a new theory to dictionary :attr:`bgtheories`.
You can pass a name and a :class:`FitTheory` object as arguments, or
alternatively provide all arguments necessary to instantiate a new
:class:`FitTheory` object.
:param name: String with the name describing the function
:param theory: :class:`FitTheory` object, defining a fit function and
associated information (estimation function, description…).
If this parameter is provided, all other parameters, except for
``name``, are ignored.
:type theory: :class:`silx.math.fit.fittheory.FitTheory`
:param function function: Mandatory argument if ``theory`` is not provided.
See documentation for :attr:`silx.math.fit.fittheory.FitTheory.function`.
:param list[str] parameters: Mandatory argument if ``theory`` is not provided.
See documentation for :attr:`silx.math.fit.fittheory.FitTheory.parameters`.
:param function estimate: See documentation for
:attr:`silx.math.fit.fittheory.FitTheory.estimate`
:param function configure: See documentation for
:attr:`silx.math.fit.fittheory.FitTheory.configure`
:param function derivative: See documentation for
:attr:`silx.math.fit.fittheory.FitTheory.derivative`
:param str description: See documentation for
:attr:`silx.math.fit.fittheory.FitTheory.description`
"""
if theory is not None:
self.bgtheories[name] = theory
elif function is not None and parameters is not None:
self.bgtheories[name] = FitTheory(
description=description,
function=function,
parameters=parameters,
estimate=estimate,
configure=configure,
derivative=derivative,
is_background=True
)
else:
raise TypeError("You must supply a FitTheory object or define " +
"a background function and its parameters.")
def configure(self, **kw):
"""Configure the current theory by filling or updating the
:attr:`fitconfig` dictionary.
Call the custom configuration function, if any. This allows the user
to modify the behavior of the custom fit function or the custom
estimate function.
This methods accepts only named parameters. All ``**kw`` parameters
are expected to be fields of :attr:`fitconfig` to be updated, unless
they have a special meaning for the custom configuration function
of the currently selected theory..
This method returns the modified config dictionary returned by the
custom configuration function.
"""
# inspect **kw to find known keys, update them in self.fitconfig
for key in self.fitconfig:
if key in kw:
self.fitconfig[key] = kw[key]
# initialize dict with existing config dict
result = {}
result.update(self.fitconfig)
if "WeightFlag" in kw:
if kw["WeightFlag"]:
self.enableweight()
else:
self.disableweight()
if self.selectedtheory is None:
return result
# Apply custom configuration function
custom_config_fun = self.theories[self.selectedtheory].configure
if custom_config_fun is not None:
result.update(custom_config_fun(**kw))
custom_bg_config_fun = self.bgtheories[self.selectedbg].configure
if custom_bg_config_fun is not None:
result.update(custom_bg_config_fun(**kw))
# Update self.fitconfig with custom config
for key in self.fitconfig:
if key in result:
self.fitconfig[key] = result[key]
result.update(self.fitconfig)
return result
def estimate(self, callback=None):
"""
Fill :attr:`fit_results` with an estimation of the fit parameters.
At first, the background parameters are estimated, if a background
model has been specified.
Then, a custom estimation function related to the model function is
called.
This process determines the number of needed fit parameters and
provides an initial estimation for them, to serve as an input for the
actual iterative fitting performed in :meth:`runfit`.
:param callback: Optional callback function, conforming to the
signature ``callback(data)`` with ``data`` being a dictionary.
This callback function is called before and after the estimation
process, and is given a dictionary containing the values of
:attr:`state` (``'Estimate in progress'`` or ``'Ready to Fit'``)
and :attr:`chisq`.
This is used for instance in :mod:`silx.gui.fit.FitWidget` to
update a widget displaying a status message.
:return: Estimated parameters
"""
self.state = 'Estimate in progress'
self.chisq = None
if callback is not None:
callback(data={'chisq': self.chisq,
'status': self.state})
CONS = {0: 'FREE',
1: 'POSITIVE',
2: 'QUOTED',
3: 'FIXED',
4: 'FACTOR',
5: 'DELTA',
6: 'SUM',
7: 'IGNORE'}
xwork = self.xdata
ywork = self.ydata
# estimate the background
bg_params, bg_constraints = self.estimate_bkg(xwork, ywork)
# estimate the function
try:
fun_params, fun_constraints = self.estimate_fun(xwork, ywork)
except LinAlgError:
self.state = 'Estimate failed'
if callback is not None:
callback(data={'status': self.state})
raise
# build the names
self.parameter_names = []
for bg_param_name in self.bgtheories[self.selectedbg].parameters:
self.parameter_names.append(bg_param_name)
fun_param_names = self.theories[self.selectedtheory].parameters
param_index, peak_index = 0, 0
while param_index < len(fun_params):
peak_index += 1
for fun_param_name in fun_param_names:
self.parameter_names.append(fun_param_name + "%d" % peak_index)
param_index += 1
self.fit_results = []
nb_fun_params_per_group = len(fun_param_names)
group_number = 0
xmin = min(xwork)
xmax = max(xwork)
nb_bg_params = len(bg_params)
for (pindex, pname) in enumerate(self.parameter_names):
# First come background parameters
if pindex < nb_bg_params:
estimation_value = bg_params[pindex]
constraint_code = CONS[int(bg_constraints[pindex][0])]
cons1 = bg_constraints[pindex][1]
cons2 = bg_constraints[pindex][2]
# then come peak function parameters
else:
fun_param_index = pindex - nb_bg_params
# increment group_number for each new fitted peak
if (fun_param_index % nb_fun_params_per_group) == 0:
group_number += 1
estimation_value = fun_params[fun_param_index]
constraint_code = CONS[int(fun_constraints[fun_param_index][0])]
# cons1 is the index of another fit parameter. In the global
# fit_results, we must adjust the index to account for the bg
# params added to the start of the list.
cons1 = fun_constraints[fun_param_index][1]
if constraint_code in ["FACTOR", "DELTA", "SUM"]:
cons1 += nb_bg_params
cons2 = fun_constraints[fun_param_index][2]
self.fit_results.append({'name': pname,
'estimation': estimation_value,
'group': group_number,
'code': constraint_code,
'cons1': cons1,
'cons2': cons2,
'fitresult': 0.0,
'sigma': 0.0,
'xmin': xmin,
'xmax': xmax})
self.state = 'Ready to Fit'
self.chisq = None
self.niter = 0
if callback is not None:
callback(data={'chisq': self.chisq,
'status': self.state})
return numpy.append(bg_params, fun_params)
def fit(self):
"""Convenience method to call :meth:`estimate` followed by :meth:`runfit`.
:return: Output of :meth:`runfit`"""
self.estimate()
return self.runfit()
def gendata(self, x=None, paramlist=None, estimated=False):
"""Return a data array using the currently selected fit function
and the fitted parameters.
:param x: Independent variable where the function is calculated.
If ``None``, use :attr:`xdata`.
:param paramlist: List of dictionaries, each dictionary item being a
fit parameter. The dictionary's format is documented in
:attr:`fit_results`.
If ``None`` (default), use parameters from :attr:`fit_results`.
:param estimated: If *True*, use estimated parameters.
:return: :meth:`fitfunction` calculated for parameters whose code is
not set to ``"IGNORE"``.
This calculates :meth:`fitfunction` on `x` data using fit parameters
from a list of parameter dictionaries, if field ``code`` is not set
to ``"IGNORE"``.
"""
if x is None:
x = self.xdata
if paramlist is None:
paramlist = self.fit_results
active_params = []
for param in paramlist:
if param['code'] not in ['IGNORE', 7]:
if not estimated:
active_params.append(param['fitresult'])
else:
active_params.append(param['estimation'])
newdata = self.fitfunction(numpy.array(x), *active_params)
return newdata
def get_estimation(self):
"""Return the list of fit parameter names."""
if self.state not in ["Ready to fit", "Fit in progress", "Ready"]:
_logger.warning("get_estimation() called before estimate() completed")
return [param["estimation"] for param in self.fit_results]
def get_names(self):
"""Return the list of fit parameter estimations."""
if self.state not in ["Ready to fit", "Fit in progress", "Ready"]:
msg = "get_names() called before estimate() completed, "
msg += "names are not populated at this stage"
_logger.warning(msg)
return [param["name"] for param in self.fit_results]
def get_fitted_parameters(self):
"""Return the list of fitted parameters."""
if self.state not in ["Ready"]:
msg = "get_fitted_parameters() called before runfit() completed, "
msg += "results are not available a this stage"
_logger.warning(msg)
return [param["fitresult"] for param in self.fit_results]
def loadtheories(self, theories):
"""Import user defined fit functions defined in an external Python
source file, and save them in :attr:`theories`.
An example of such a file can be found in the sources of
:mod:`silx.math.fit.fittheories`. It must contain a
dictionary named ``THEORY`` with the following structure::
THEORY = {
'theory_name_1':
FitTheory(description='Description of theory 1',
function=fitfunction1,
parameters=('param name 1', 'param name 2', …),
estimate=estimation_function1,
configure=configuration_function1,
derivative=derivative_function1),
'theory_name_2':
FitTheory(…),
}
See documentation of :mod:`silx.math.fit.fittheories` and
:mod:`silx.math.fit.fittheory` for more
information on designing your fit functions file.
This method can also load user defined functions in the legacy
format used in *PyMca*.
:param theories: Name of python source file, or module containing the
definition of fit functions.
:raise: ImportError if theories cannot be imported
"""
from types import ModuleType
if isinstance(theories, ModuleType):
theories_module = theories
else:
# if theories is not a module, it must be a string
string_types = (basestring,) if sys.version_info[0] == 2 else (str,) # noqa
if not isinstance(theories, string_types):
raise ImportError("theory must be a python module, a module" +
"name or a python filename")
# if theories is a filename
if os.path.isfile(theories):
sys.path.append(os.path.dirname(theories))
f = os.path.basename(os.path.splitext(theories)[0])
theories_module = __import__(f)
# if theories is a module name
else:
theories_module = __import__(theories)
if hasattr(theories_module, "INIT"):
theories.INIT()
if not hasattr(theories_module, "THEORY"):
msg = "File %s does not contain a THEORY dictionary" % theories
raise ImportError(msg)
elif isinstance(theories_module.THEORY, dict):
# silx format for theory definition
for theory_name, fittheory in list(theories_module.THEORY.items()):
self.addtheory(theory_name, fittheory)
else:
self._load_legacy_theories(theories_module)
def loadbgtheories(self, theories):
"""Import user defined background functions defined in an external Python
module (source file), and save them in :attr:`theories`.
An example of such a file can be found in the sources of
:mod:`silx.math.fit.fittheories`. It must contain a
dictionary named ``THEORY`` with the following structure::
THEORY = {
'theory_name_1':
FitTheory(description='Description of theory 1',
function=fitfunction1,
parameters=('param name 1', 'param name 2', …),
estimate=estimation_function1,
configure=configuration_function1,
'theory_name_2':
FitTheory(…),
}
See documentation of :mod:`silx.math.fit.bgtheories` and
:mod:`silx.math.fit.fittheory` for more
information on designing your background functions file.
:param theories: Module or name of python source file containing the
definition of background functions.
:raise: ImportError if theories cannot be imported
"""
from types import ModuleType
if isinstance(theories, ModuleType):
theories_module = theories
else:
# if theories is not a module, it must be a string
string_types = (basestring,) if sys.version_info[0] == 2 else (str,) # noqa
if not isinstance(theories, string_types):
raise ImportError("theory must be a python module, a module" +
"name or a python filename")
# if theories is a filename
if os.path.isfile(theories):
sys.path.append(os.path.dirname(theories))
f = os.path.basename(os.path.splitext(theories)[0])
theories_module = __import__(f)
# if theories is a module name
else:
theories_module = __import__(theories)
if hasattr(theories_module, "INIT"):
theories.INIT()
if not hasattr(theories_module, "THEORY"):
msg = "File %s does not contain a THEORY dictionary" % theories
raise ImportError(msg)
elif isinstance(theories_module.THEORY, dict):
# silx format for theory definition
for theory_name, fittheory in list(theories_module.THEORY.items()):
self.addbgtheory(theory_name, fittheory)
def setbackground(self, theory):
"""Choose a background type from within :attr:`bgtheories`.
This updates :attr:`selectedbg`.
:param theory: The name of the background to be used.
:raise: KeyError if ``theory`` is not a key of :attr:`bgtheories``.
"""
if theory in self.bgtheories:
self.selectedbg = theory
else:
msg = "No theory with name %s in bgtheories.\n" % theory
msg += "Available theories: %s\n" % self.bgtheories.keys()
raise KeyError(msg)
# run configure to apply our fitconfig to the selected theory
# through its custom config function
self.configure(**self.fitconfig)
def setdata(self, x, y, sigmay=None, xmin=None, xmax=None):
"""Set data attributes:
- ``xdata0``, ``ydata0`` and ``sigmay0`` store the initial data
and uncertainties. These attributes are not modified after
initialization.
- ``xdata``, ``ydata`` and ``sigmay`` store the data after
removing values where ``xdata < xmin`` or ``xdata > xmax``.
These attributes may be modified at a latter stage by filters.
:param x: Abscissa data. If ``None``, :attr:`xdata`` is set to
``numpy.array([0.0, 1.0, 2.0, ..., len(y)-1])``
:type x: Sequence or numpy array or None
:param y: The dependant data ``y = f(x)``. ``y`` must have the same
shape as ``x`` if ``x`` is not ``None``.
:type y: Sequence or numpy array or None
:param sigmay: The uncertainties in the ``ydata`` array. These are
used as weights in the least-squares problem.
If ``None``, the uncertainties are assumed to be 1.
:type sigmay: Sequence or numpy array or None
:param xmin: Lower value of x values to use for fitting
:param xmax: Upper value of x values to use for fitting
"""
if y is None:
self.xdata0 = numpy.array([], numpy.float)
self.ydata0 = numpy.array([], numpy.float)
# self.sigmay0 = numpy.array([], numpy.float)
self.xdata = numpy.array([], numpy.float)
self.ydata = numpy.array([], numpy.float)
# self.sigmay = numpy.array([], numpy.float)
else:
self.ydata0 = numpy.array(y)
self.ydata = numpy.array(y)
if x is None:
self.xdata0 = numpy.arange(len(self.ydata0))
self.xdata = numpy.arange(len(self.ydata0))
else:
self.xdata0 = numpy.array(x)
self.xdata = numpy.array(x)
# default weight
if sigmay is None:
self.sigmay0 = None
self.sigmay = numpy.sqrt(self.ydata) if self.fitconfig["WeightFlag"] else None
else:
self.sigmay0 = numpy.array(sigmay)
self.sigmay = numpy.array(sigmay) if self.fitconfig["WeightFlag"] else None
# take the data between limits, using boolean array indexing
if (xmin is not None or xmax is not None) and len(self.xdata):
xmin = xmin if xmin is not None else min(self.xdata)
xmax = xmax if xmax is not None else max(self.xdata)
bool_array = (self.xdata >= xmin) & (self.xdata <= xmax)
self.xdata = self.xdata[bool_array]
self.ydata = self.ydata[bool_array]
self.sigmay = self.sigmay[bool_array] if sigmay is not None else None
def enableweight(self):
"""This method can be called to set :attr:`sigmay`. If :attr:`sigmay0` was filled with
actual uncertainties in :meth:`setdata`, use these values.
Else, use ``sqrt(self.ydata)``.
"""
if self.sigmay0 is None:
self.sigmay = numpy.sqrt(self.ydata) if self.fitconfig["WeightFlag"] else None
else:
self.sigmay = self.sigmay0
def disableweight(self):
"""This method can be called to set :attr:`sigmay` equal to ``None``.
As a result, :func:`leastsq` will consider that the weights in the
least square problem are 1 for all samples."""
self.sigmay = None
def settheory(self, theory):
"""Pick a theory from :attr:`theories`.
:param theory: Name of the theory to be used.
:raise: KeyError if ``theory`` is not a key of :attr:`theories`.
"""
if theory is None:
self.selectedtheory = None
elif theory in self.theories:
self.selectedtheory = theory
else:
msg = "No theory with name %s in theories.\n" % theory
msg += "Available theories: %s\n" % self.theories.keys()
raise KeyError(msg)
# run configure to apply our fitconfig to the selected theory
# through its custom config function
self.configure(**self.fitconfig)
def runfit(self, callback=None):
"""Run the actual fitting and fill :attr:`fit_results` with fit results.
Before running this method, :attr:`fit_results` must already be
populated with a list of all parameters and their estimated values.
For this, run :meth:`estimate` beforehand.
:param callback: Optional callback function, conforming to the
signature ``callback(data)`` with ``data`` being a dictionary.
This callback function is called before and after the estimation
process, and is given a dictionary containing the values of
:attr:`state` (``'Fit in progress'`` or ``'Ready'``)
and :attr:`chisq`.
This is used for instance in :mod:`silx.gui.fit.FitWidget` to
update a widget displaying a status message.
:return: Tuple ``(fitted parameters, uncertainties, infodict)``.
*infodict* is the dictionary returned by
:func:`silx.math.fit.leastsq` when called with option
``full_output=True``. Uncertainties is a sequence of uncertainty
values associated with each fitted parameter.
"""
# self.dataupdate()
self.state = 'Fit in progress'
self.chisq = None
if callback is not None:
callback(data={'chisq': self.chisq,
'status': self.state})
param_val = []
param_constraints = []
# Initial values are set to the ones computed in estimate()
for param in self.fit_results:
param_val.append(param['estimation'])
param_constraints.append([param['code'], param['cons1'], param['cons2']])
ywork = self.ydata
try:
params, covariance_matrix, infodict = leastsq(
self.fitfunction, # bg + actual model function
self.xdata, ywork, param_val,
sigma=self.sigmay,
constraints=param_constraints,
model_deriv=self.theories[self.selectedtheory].derivative,
full_output=True, left_derivative=True)
except LinAlgError:
self.state = 'Fit failed'
callback(data={'status': self.state})
raise
sigmas = infodict['uncertainties']
for i, param in enumerate(self.fit_results):
if param['code'] != 'IGNORE':
param['fitresult'] = params[i]
param['sigma'] = sigmas[i]
self.chisq = infodict["reduced_chisq"]
self.niter = infodict["niter"]
self.state = 'Ready'
if callback is not None:
callback(data={'chisq': self.chisq,
'status': self.state})
return params, sigmas, infodict
###################
# Private methods #
###################
def fitfunction(self, x, *pars):
"""Function to be fitted.
This is the sum of the selected background function plus
the selected fit model function.
:param x: Independent variable where the function is calculated.
:param pars: Sequence of all fit parameters. The first few parameters
are background parameters, then come the peak function parameters.
:return: Output of the fit function with ``x`` as input and ``pars``
as fit parameters.
"""
result = numpy.zeros(numpy.shape(x), numpy.float)
if self.selectedbg is not None:
bg_pars_list = self.bgtheories[self.selectedbg].parameters
nb_bg_pars = len(bg_pars_list)
bgfun = self.bgtheories[self.selectedbg].function
result += bgfun(x, self.ydata, *pars[0:nb_bg_pars])
else:
nb_bg_pars = 0
selectedfun = self.theories[self.selectedtheory].function
result += selectedfun(x, *pars[nb_bg_pars:])
return result
def estimate_bkg(self, x, y):
"""Estimate background parameters using the function defined in
the current fit configuration.
To change the selected background model, attribute :attr:`selectdbg`
must be changed using method :meth:`setbackground`.
The actual background function to be used is
referenced in :attr:`bgtheories`
:param x: Sequence of x data
:param y: sequence of y data
:return: Tuple of two sequences and one data array
``(estimated_param, constraints, bg_data)``:
- ``estimated_param`` is a list of estimated values for each
background parameter.
- ``constraints`` is a 2D sequence of dimension ``(n_parameters, 3)``
- ``constraints[i][0]``: Constraint code.
See explanation about codes in :attr:`fit_results`
- ``constraints[i][1]``
See explanation about 'cons1' in :attr:`fit_results`
documentation.
- ``constraints[i][2]``
See explanation about 'cons2' in :attr:`fit_results`
documentation.
"""
background_estimate_function = self.bgtheories[self.selectedbg].estimate
if background_estimate_function is not None:
return background_estimate_function(x, y)
else:
return [], []
def estimate_fun(self, x, y):
"""Estimate fit parameters using the function defined in
the current fit configuration.
:param x: Sequence of x data
:param y: sequence of y data
:param bg: Background signal, to be subtracted from ``y`` before fitting.
:return: Tuple of two sequences ``(estimated_param, constraints)``:
- ``estimated_param`` is a list of estimated values for each
background parameter.
- ``constraints`` is a 2D sequence of dimension (n_parameters, 3)
- ``constraints[i][0]``: Constraint code.
See explanation about codes in :attr:`fit_results`
- ``constraints[i][1]``
See explanation about 'cons1' in :attr:`fit_results`
documentation.
- ``constraints[i][2]``
See explanation about 'cons2' in :attr:`fit_results`
documentation.
:raise: ``TypeError`` if estimation function is not callable
"""
estimatefunction = self.theories[self.selectedtheory].estimate
if hasattr(estimatefunction, '__call__'):
if not self.theories[self.selectedtheory].pymca_legacy:
return estimatefunction(x, y)
else:
# legacy pymca estimate functions have a different signature
if self.fitconfig["fitbkg"] == "No Background":
bg = numpy.zeros_like(y)
else:
if self.fitconfig["SmoothingFlag"]:
y = smooth1d(y)
bg = strip(y,
w=self.fitconfig["StripWidth"],
niterations=self.fitconfig["StripIterations"],
factor=self.fitconfig["StripThresholdFactor"])
# fitconfig can be filled by user defined config function
xscaling = self.fitconfig.get('Xscaling', 1.0)
yscaling = self.fitconfig.get('Yscaling', 1.0)
return estimatefunction(x, y, bg, xscaling, yscaling)
else:
raise TypeError("Estimation function in attribute " +
"theories[%s]" % self.selectedtheory +
" must be callable.")
def _load_legacy_theories(self, theories_module):
"""Load theories from a custom module in the old PyMca format.
See PyMca5.PyMcaMath.fitting.SpecfitFunctions for an example.
"""
mandatory_attributes = ["THEORY", "PARAMETERS",
"FUNCTION", "ESTIMATE"]
err_msg = "Custom fit function file must define: "
err_msg += ", ".join(mandatory_attributes)
for attr in mandatory_attributes:
if not hasattr(theories_module, attr):
raise ImportError(err_msg)
derivative = theories_module.DERIVATIVE if hasattr(theories_module, "DERIVATIVE") else None
configure = theories_module.CONFIGURE if hasattr(theories_module, "CONFIGURE") else None
estimate = theories_module.ESTIMATE if hasattr(theories_module, "ESTIMATE") else None
if isinstance(theories_module.THEORY, (list, tuple)):
# multiple fit functions
for i in range(len(theories_module.THEORY)):
deriv = derivative[i] if derivative is not None else None
config = configure[i] if configure is not None else None
estim = estimate[i] if estimate is not None else None
self.addtheory(theories_module.THEORY[i],
FitTheory(
theories_module.FUNCTION[i],
theories_module.PARAMETERS[i],
estim,
config,
deriv,
pymca_legacy=True))
else:
# single fit function
self.addtheory(theories_module.THEORY,
FitTheory(
theories_module.FUNCTION,
theories_module.PARAMETERS,
estimate,
configure,
derivative,
pymca_legacy=True))
def test():
from .functions import sum_gauss
from . import fittheories
from . import bgtheories
# Create synthetic data with a sum of gaussian functions
x = numpy.arange(1000).astype(numpy.float)
p = [1000, 100., 250,
255, 690., 45,
1500, 800.5, 95]
y = 0.5 * x + 13 + sum_gauss(x, *p)
# Fitting
fit = FitManager()
# more sensitivity necessary to resolve
# overlapping peaks at x=690 and x=800.5
fit.setdata(x=x, y=y)
fit.loadtheories(fittheories)
fit.settheory('Gaussians')
fit.loadbgtheories(bgtheories)
fit.setbackground('Linear')
fit.estimate()
fit.runfit()
print("Searched parameters = ", p)
print("Obtained parameters : ")
dummy_list = []
for param in fit.fit_results:
print(param['name'], ' = ', param['fitresult'])
dummy_list.append(param['fitresult'])
print("chisq = ", fit.chisq)
# Plot
constant, slope = dummy_list[:2]
p1 = dummy_list[2:]
print(p1)
y2 = slope * x + constant + sum_gauss(x, *p1)
try:
from silx.gui import qt
from silx.gui.plot.PlotWindow import PlotWindow
app = qt.QApplication([])
pw = PlotWindow(control=True)
pw.addCurve(x, y, "Original")
pw.addCurve(x, y2, "Fit result")
pw.legendsDockWidget.show()
pw.show()
app.exec_()
except ImportError:
_logger.warning("Could not import qt to display fit result as curve")
if __name__ == "__main__":
test()
| 41.895522
| 99
| 0.588172
|
4a12a2543be6cc7a792c8310596012da841a9b7c
| 5,232
|
py
|
Python
|
training/src/tests/tests/python/cnnBiasTrain1d.py
|
steelONIONknight/bolt
|
9bd3d08f2abb14435ca3ad0179889e48fa7e9b47
|
[
"MIT"
] | null | null | null |
training/src/tests/tests/python/cnnBiasTrain1d.py
|
steelONIONknight/bolt
|
9bd3d08f2abb14435ca3ad0179889e48fa7e9b47
|
[
"MIT"
] | null | null | null |
training/src/tests/tests/python/cnnBiasTrain1d.py
|
steelONIONknight/bolt
|
9bd3d08f2abb14435ca3ad0179889e48fa7e9b47
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2022. Huawei Technologies Co., Ltd. All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import torch
import torch.nn as nn
import numpy as np
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
import os
import time
num_classes = 10
batch_size = 50
learning_rate = 0.01
curdir = "./weights/"
class Softmax(nn.Module):
def forward(self, input):
exp_x = torch.exp(input)
y = exp_x / exp_x.sum(1).unsqueeze(1).expand_as(exp_x)
return y
class NeuralNet(nn.Module):
def __init__(self, num_classes):
super(NeuralNet, self).__init__()
torch.manual_seed(0)
self.conv1 = nn.Conv1d(1, 16, kernel_size=5, stride=2, padding=2, bias=True)
self.fc1 = nn.Linear(16 * 392, num_classes, bias=True)
self.softmax = Softmax()
nn.init.xavier_uniform_(self.conv1.weight)
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.uniform_(self.conv1.bias, 0, 1)
nn.init.uniform_(self.fc1.bias, 0, 1)
def forward(self, x):
x = x.view(50, 1, -1)
out = self.conv1(x)
out = out.reshape(-1, self.fc1.in_features)
out = self.fc1(out)
out = self.softmax(out)
return out
def predict(test_loader, model):
correct = 0
total = 0
# ~ with torch.no_grad():
for images, labels in test_loader:
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(
"Accuracy of the network on the 10000 test images: {:.2f} %".format(
100 * correct / total
)
)
def saveWeights(index, model, dir):
if not os.path.exists(dir):
os.mkdir(dir)
for name, param in model.named_parameters():
if param.requires_grad:
if param.data.dim() == 4:
for i in range(0, param.data.shape[0]):
with open(
dir + str(index) + "_" + name + "_" + str(i) + ".txt", "w"
) as outfile:
for j in range(0, param.data.shape[1]):
np.savetxt(outfile, param.data[i, j])
else:
with open(dir + str(index) + "_" + name + ".txt", "w") as outfile:
np.savetxt(outfile, torch.squeeze(param.data))
def CrossEntropy(y, target):
ones = torch.sparse.torch.eye(num_classes)
t = ones.index_select(0, target).type(y.data.type())
t = Variable(t)
loss = (-t * torch.log(y)).sum() / y.size(0)
return loss, y
def main():
train_dataset = torchvision.datasets.MNIST(
root="./data/mnist", train=True, transform=transforms.ToTensor(), download=True
)
test_dataset = torchvision.datasets.MNIST(
root="./data/mnist", train=False, transform=transforms.ToTensor()
)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset, batch_size=batch_size, shuffle=False
)
test_loader = torch.utils.data.DataLoader(
dataset=test_dataset, batch_size=batch_size, shuffle=False
)
model = NeuralNet(num_classes)
predict(test_loader, model)
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
total_step = len(train_loader)
if os.path.exists(curdir + "loss.txt"):
os.remove(curdir + "loss.txt")
timeTaken = 0
for i, (images, labels) in enumerate(train_loader):
start = time.time()
outputs = model(images)
if i < 1:
saveWeights(i, model, "../../../../testAssets/test_cnn_layer/conv1d/")
loss, lossInput = CrossEntropy(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
timeTaken += time.time() - start
# if i % 100 == 0:
# with open(curdir + 'loss.txt', 'a') as outfile:
# print(loss.item(), file = outfile)
if i % 100 == 0:
print("Step [{:4d}/{}], Loss: {:.6f}".format(i, total_step, loss.item()))
predict(test_loader, model)
print("Time taken = {:.4f}".format(timeTaken))
if __name__ == "__main__":
main()
| 31.902439
| 148
| 0.629205
|
4a12a37d9c7ca5a9291d1edda817cee83a2fea67
| 9,186
|
py
|
Python
|
references/detection/train.py
|
soldierofhell/vision
|
9197596f86c62b3de7965e80d644189cabb78f2b
|
[
"BSD-3-Clause"
] | null | null | null |
references/detection/train.py
|
soldierofhell/vision
|
9197596f86c62b3de7965e80d644189cabb78f2b
|
[
"BSD-3-Clause"
] | null | null | null |
references/detection/train.py
|
soldierofhell/vision
|
9197596f86c62b3de7965e80d644189cabb78f2b
|
[
"BSD-3-Clause"
] | null | null | null |
r"""PyTorch Detection Training.
To run in a multi-gpu environment, use the distributed launcher::
python -m torch.distributed.launch --nproc_per_node=$NGPU --use_env \
train.py ... --world-size $NGPU
The default hyperparameters are tuned for training on 8 gpus and 2 images per gpu.
--lr 0.02 --batch-size 2 --world-size 8
If you use different number of gpus, the learning rate should be changed to 0.02/8*$NGPU.
On top of that, for training Faster/Mask R-CNN, the default hyperparameters are
--epochs 26 --lr-steps 16 22 --aspect-ratio-group-factor 3
Also, if you train Keypoint R-CNN, the default hyperparameters are
--epochs 46 --lr-steps 36 43 --aspect-ratio-group-factor 3
Because the number of images is smaller in the person keypoint subset of COCO,
the number of epochs should be adapted so that we have the same number of iterations.
"""
import datetime
import os
import time
import torch
import torch.utils.data
from torch import nn
import torchvision
import torchvision.models.detection
import torchvision.models.detection.mask_rcnn
from coco_utils import get_coco, get_coco_kp
from group_by_aspect_ratio import GroupedBatchSampler, create_aspect_ratio_groups
from engine import train_one_epoch, evaluate
import utils
import transforms as T
def get_dataset(name, image_set, transform, data_path):
paths = {
"coco": (data_path, get_coco, 91),
"coco_kp": (data_path, get_coco_kp, 2)
}
p, ds_fn, num_classes = paths[name]
ds = ds_fn(p, image_set=image_set, transforms=transform)
return ds, num_classes
def get_transform(train):
transforms = []
transforms.append(T.ToTensor())
if train:
transforms.append(T.RandomHorizontalFlip(0.5))
return T.Compose(transforms)
def main(args):
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
# Data loading code
print("Loading data")
dataset, num_classes = get_dataset(args.dataset, "train", get_transform(train=True), args.data_path)
dataset_test, _ = get_dataset(args.dataset, "val", get_transform(train=False), args.data_path)
print("Creating data loaders")
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test)
else:
train_sampler = torch.utils.data.RandomSampler(dataset)
test_sampler = torch.utils.data.SequentialSampler(dataset_test)
if args.aspect_ratio_group_factor >= 0:
group_ids = create_aspect_ratio_groups(dataset, k=args.aspect_ratio_group_factor)
train_batch_sampler = GroupedBatchSampler(train_sampler, group_ids, args.batch_size)
else:
train_batch_sampler = torch.utils.data.BatchSampler(
train_sampler, args.batch_size, drop_last=True)
data_loader = torch.utils.data.DataLoader(
dataset, batch_sampler=train_batch_sampler, num_workers=args.workers,
collate_fn=utils.collate_fn)
data_loader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=1,
sampler=test_sampler, num_workers=args.workers,
collate_fn=utils.collate_fn)
print('train dataset length: ', len(dataset))
print('RandomSamler length: ', len(train_sampler))
print('train data_loader length: ', len(data_loader))
print("Creating model")
model = torchvision.models.detection.__dict__[args.model](num_classes=num_classes,
pretrained=args.pretrained)
if args.num_classes != num_classes:
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
box_in_features = model.roi_heads.box_predictor.cls_score.in_features
mask_in_features = 256 # model.roi_heads.mask_predictor.mask_fcn_logits.in_features
print('box_in_features: ', box_in_features)
print('mask_in_features: ', mask_in_features)
model.roi_heads.box_predictor = FastRCNNPredictor(box_in_features, args.num_classes)
model.roi_heads.mask_predictor = MaskRCNNPredictor(mask_in_features, 256, args.num_classes)
model.to(device)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(
params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_step_size, gamma=args.lr_gamma)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_steps, gamma=args.lr_gamma)
if args.resume:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.test_only:
evaluate(model, data_loader_test, device=device)
return
print("Start training")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train_one_epoch(model, optimizer, data_loader, device, epoch, args.print_freq)
lr_scheduler.step()
if args.output_dir:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'args': args,
'epoch': epoch},
os.path.join(args.output_dir, 'model_{}.pth'.format(epoch)))
# evaluate after every epoch
#evaluate(model, data_loader_test, device=device)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description=__doc__)
parser.add_argument('--data-path', default='/datasets01/COCO/022719/', help='dataset')
parser.add_argument('--dataset', default='coco', help='dataset')
parser.add_argument('--model', default='maskrcnn_resnet50_fpn', help='model')
parser.add_argument('--device', default='cuda', help='device')
parser.add_argument('-b', '--batch-size', default=2, type=int,
help='images per gpu, the total batch size is $NGPU x batch_size')
parser.add_argument('--epochs', default=26, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--lr', default=0.02, type=float,
help='initial learning rate, 0.02 is the default value for training '
'on 8 gpus and 2 images_per_gpu')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('--lr-step-size', default=8, type=int, help='decrease lr every step-size epochs')
parser.add_argument('--lr-steps', default=[16, 22], nargs='+', type=int, help='decrease lr every step-size epochs')
parser.add_argument('--lr-gamma', default=0.1, type=float, help='decrease lr by a factor of lr-gamma')
parser.add_argument('--print-freq', default=20, type=int, help='print frequency')
parser.add_argument('--output-dir', default='.', help='path where to save')
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, help='start epoch')
parser.add_argument('--aspect-ratio-group-factor', default=3, type=int)
parser.add_argument(
"--test-only",
dest="test_only",
help="Only test the model",
action="store_true",
)
parser.add_argument(
"--pretrained",
dest="pretrained",
help="Use pre-trained models from the modelzoo",
action="store_true",
)
parser.add_argument('--num_classes', default=91, type=int, metavar='N',
help='number of classes')
# distributed training parameters
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')
args = parser.parse_args()
if args.output_dir:
utils.mkdir(args.output_dir)
main(args)
| 41.945205
| 119
| 0.681907
|
4a12a426bc1ac4b6234ee3ca1a94ab9ea54340fa
| 60
|
py
|
Python
|
pokerl/envs/__init__.py
|
sneppy/pokerl
|
2ea8cd6c3dad7531ac5cad79aa0dc8879f771e2c
|
[
"MIT"
] | 6
|
2020-05-26T23:04:39.000Z
|
2022-03-01T18:11:08.000Z
|
pokerl/envs/__init__.py
|
sneppy/pokerl
|
2ea8cd6c3dad7531ac5cad79aa0dc8879f771e2c
|
[
"MIT"
] | 14
|
2020-05-26T23:30:13.000Z
|
2020-05-28T20:50:32.000Z
|
pokerl/envs/__init__.py
|
sneppy/pokerl
|
2ea8cd6c3dad7531ac5cad79aa0dc8879f771e2c
|
[
"MIT"
] | null | null | null |
from .env import PokerEnv
from .game_env import PokerGameEnv
| 30
| 34
| 0.85
|
4a12a435c88684eea97cea41736116f92b9bfa14
| 337
|
py
|
Python
|
Country cleaning/Argentina.py
|
Demonliquid/cars-python-cleaning
|
91c516a33c4522114dc024cfaf04f1c1d594f973
|
[
"MIT"
] | null | null | null |
Country cleaning/Argentina.py
|
Demonliquid/cars-python-cleaning
|
91c516a33c4522114dc024cfaf04f1c1d594f973
|
[
"MIT"
] | null | null | null |
Country cleaning/Argentina.py
|
Demonliquid/cars-python-cleaning
|
91c516a33c4522114dc024cfaf04f1c1d594f973
|
[
"MIT"
] | null | null | null |
# %%
import os
import pandas as pd
import numpy as np
import datetime
import codecs
# %% CARGA DE DATOS
# MODELO
#base = pd.read_csv(r'D:\Basededatos\esquema.csv')
# PAIS
argentina2 = pd.read_csv(r'D:\Documentos\historico_afac\historico_afac.txt', encoding='latin1', engine="python" ,error_bad_lines=False, sep="\t")
# %%
# %%
| 14.041667
| 145
| 0.703264
|
4a12a452cbbc2e1029d0082a7f67e5f3f5379f54
| 9,764
|
py
|
Python
|
lib/opentypesvg/fonts2svg.py
|
davidgodzsak/opentype-svg
|
038bb25bcf9ccf0408bde708c4758674d7db5247
|
[
"MIT"
] | null | null | null |
lib/opentypesvg/fonts2svg.py
|
davidgodzsak/opentype-svg
|
038bb25bcf9ccf0408bde708c4758674d7db5247
|
[
"MIT"
] | null | null | null |
lib/opentypesvg/fonts2svg.py
|
davidgodzsak/opentype-svg
|
038bb25bcf9ccf0408bde708c4758674d7db5247
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2016 Adobe. All rights reserved.
"""
Generates a set of SVG glyph files from one or more fonts and hex colors
for each of them. The fonts' format can be either OpenType, TrueType, WOFF,
or WOFF2.
"""
import argparse
import os
import re
import sys
from fontTools import ttLib
from fontTools.pens.basePen import BasePen
from fontTools.pens.transformPen import TransformPen
from opentypesvg.__version__ import version as __version__
from opentypesvg.utils import (
create_folder,
create_nested_folder,
final_message,
get_gnames_to_save_in_nested_folder,
get_output_folder_path,
split_comma_sequence,
validate_font_paths,
write_file,
)
class SVGPen(BasePen):
def __init__(self, glyphSet):
BasePen.__init__(self, glyphSet)
self.d = u''
self._lastX = self._lastY = None
def _moveTo(self, pt):
ptx, pty = self._isInt(pt)
self.d += u'M{} {}'.format(ptx, pty)
self._lastX, self._lastY = pt
def _lineTo(self, pt):
ptx, pty = self._isInt(pt)
if (ptx, pty) == (self._lastX, self._lastY):
return
elif ptx == self._lastX:
self.d += u'V{}'.format(pty)
elif pty == self._lastY:
self.d += u'H{}'.format(ptx)
else:
self.d += u'L{} {}'.format(ptx, pty)
self._lastX, self._lastY = pt
def _curveToOne(self, pt1, pt2, pt3):
pt1x, pt1y = self._isInt(pt1)
pt2x, pt2y = self._isInt(pt2)
pt3x, pt3y = self._isInt(pt3)
self.d += u'C{} {} {} {} {} {}'.format(pt1x, pt1y, pt2x, pt2y,
pt3x, pt3y)
self._lastX, self._lastY = pt3
def _qCurveToOne(self, pt1, pt2):
pt1x, pt1y = self._isInt(pt1)
pt2x, pt2y = self._isInt(pt2)
self.d += u'Q{} {} {} {}'.format(pt1x, pt1y, pt2x, pt2y)
self._lastX, self._lastY = pt2
def _closePath(self):
self.d += u'Z'
self._lastX = self._lastY = None
def _endPath(self):
self._closePath()
@staticmethod
def _isInt(tup):
return [int(flt) if (flt).is_integer() else flt for flt in tup]
def processFonts(font_paths_list, hex_colors_list, outputFolderPath, options):
glyphSetsList = []
allGlyphNamesList = []
# Load the fonts and collect their glyph sets
for fontPath in font_paths_list:
font = ttLib.TTFont(fontPath)
gSet = font.getGlyphSet()
glyphSetsList.append(gSet)
allGlyphNamesList.append(gSet.keys())
font.close()
if not glyphSetsList:
raise AssertionError("No glyph sets.")
# Define the list of glyph names to convert to SVG
if options.gnames_to_generate:
glyphNamesList = sorted(options.gnames_to_generate)
else:
if options.glyphsets_union:
glyphNamesList = sorted(
list(set.union(*map(set, allGlyphNamesList))))
else:
glyphNamesList = sorted(
list(set.intersection(*map(set, allGlyphNamesList))))
# Extend the list with additional glyph names
if options.gnames_to_add:
glyphNamesList.extend(options.gnames_to_add)
# Remove any duplicates and sort
glyphNamesList = sorted(list(set(glyphNamesList)))
# Confirm that there's something to process
if not glyphNamesList:
print("The fonts and options provided can't produce any SVG files.",
file=sys.stdout)
return
# Define the list of glyph names to skip
glyphNamesToSkipList = [".notdef"]
if options.gnames_to_exclude:
glyphNamesToSkipList.extend(options.gnames_to_exclude)
# Determine which glyph names need to be saved in a nested folder
glyphNamesToSaveInNestedFolder = get_gnames_to_save_in_nested_folder(
glyphNamesList)
# Gather the fonts' UPM. For simplicity, it's assumed that all fonts have
# the same UPM value. If fetching the UPM value fails, default to 1000.
try:
upm = ttLib.TTFont(font_paths_list[0])['head'].unitsPerEm
except KeyError:
upm = 1000
nestedFolderPath = None
filesSaved = 0
# Generate the SVGs
for gName in glyphNamesList:
svgStr = (u"""<svg xmlns="http://www.w3.org/2000/svg" """
u"""viewBox="0 -{} {} {}">\n""".format(upm, upm, upm))
for index, gSet in enumerate(glyphSetsList):
# Skip glyphs that don't exist in the current font,
# or that were requested to be skipped
if gName not in gSet.keys() or gName in glyphNamesToSkipList:
continue
pen = SVGPen(gSet)
tpen = TransformPen(pen, (1.0, 0.0, 0.0, -1.0, 0.0, 0.0))
glyph = gSet[gName]
glyph.draw(tpen)
d = pen.d
# Skip glyphs with no contours
if not len(d):
continue
hex_str = hex_colors_list[index]
opc = ''
if len(hex_str) != 6:
opcHex = hex_str[6:]
hex_str = hex_str[:6]
if opcHex.lower() != 'ff':
opc = ' opacity="{:.2f}"'.format(int(opcHex, 16) / 255)
svgStr += u'\t<path{} fill="#{}" d="{}"/>\n'.format(
opc, hex_str, d)
svgStr += u'</svg>'
# Skip saving files that have no paths
if '<path' not in svgStr:
continue
# Create the output folder.
# This may be necessary if the folder was not provided.
# The folder is made this late in the process because
# only now it's clear that's needed.
create_folder(outputFolderPath)
# Create the nested folder, if there are conflicting glyph names.
if gName in glyphNamesToSaveInNestedFolder:
folderPath = create_nested_folder(nestedFolderPath,
outputFolderPath)
else:
folderPath = outputFolderPath
svgFilePath = os.path.join(folderPath, gName + '.svg')
write_file(svgFilePath, svgStr)
filesSaved += 1
font.close()
final_message(filesSaved)
RE_HEXCOLOR = re.compile(r"^(?=[a-fA-F0-9]*$)(?:.{6}|.{8})$")
def validate_hex_values(hex_str):
hex_values = split_comma_sequence(hex_str)
for hex_val in hex_values:
if not RE_HEXCOLOR.match(hex_val):
raise argparse.ArgumentTypeError(
"{} is not a valid hex color.".format(hex_val))
return hex_values
def get_options(args):
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description=__doc__
)
parser.add_argument(
'--version',
action='version',
version=__version__
)
parser.add_argument(
'-c',
metavar='HEX_VALUES',
dest='colors_list',
type=validate_hex_values,
default=[],
help='comma-separated list of hex colors in RRGGBBAA format.\n'
'The alpha value (AA) is optional.'
)
parser.add_argument(
'-o',
metavar='FOLDER_PATH',
dest='output_folder_path',
help='path to folder for outputting the SVG files to.'
)
parser.add_argument(
'-g',
metavar='GLYPH_NAMES',
dest='gnames_to_generate',
type=split_comma_sequence,
default=[],
help='comma-separated sequence of glyph names to make SVG files from.'
)
parser.add_argument(
'-a',
metavar='GLYPH_NAMES',
dest='gnames_to_add',
type=split_comma_sequence,
default=[],
help='comma-separated sequence of glyph names to add.'
)
parser.add_argument(
'-x',
metavar='GLYPH_NAMES',
dest='gnames_to_exclude',
type=split_comma_sequence,
default=[],
help='comma-separated sequence of glyph names to exclude.'
)
parser.add_argument(
'-u',
action='store_true',
dest='glyphsets_union',
help="do union (instead of intersection) of the fonts' glyph sets."
)
parser.add_argument(
'input_paths',
metavar='FONT',
nargs='+',
help='OTF/TTF/WOFF/WOFF2 font file.',
)
options = parser.parse_args(args)
options.font_paths_list = validate_font_paths(options.input_paths)
return options
def main(args=None):
opts = get_options(args)
if not opts.font_paths_list:
return 1
font_paths_list = opts.font_paths_list
hex_colors_list = opts.colors_list
# Confirm that the number of colors is the same as the fonts. If it's not,
# extend the list of colors using SVG's default color (black), or trim the
# list of colors.
if len(hex_colors_list) < len(font_paths_list):
num_add_col = len(font_paths_list) - len(hex_colors_list)
hex_colors_list.extend(['000000'] * num_add_col)
print("WARNING: The list of colors was extended with {} #000000 "
"value(s).".format(num_add_col), file=sys.stderr)
elif len(hex_colors_list) > len(font_paths_list):
num_xtr_col = len(hex_colors_list) - len(font_paths_list)
del hex_colors_list[len(font_paths_list):]
print("WARNING: The list of colors got the last {} value(s) truncated:"
" {}".format(num_xtr_col, ' '.join(
hex_colors_list[-num_xtr_col:])), file=sys.stderr)
output_folder_path = get_output_folder_path(opts.output_folder_path,
font_paths_list[0])
processFonts(font_paths_list, hex_colors_list, output_folder_path, opts)
if __name__ == "__main__":
sys.exit(main())
| 31.80456
| 79
| 0.604261
|
4a12a4a0de3777d26c77bf1b970f96687f762383
| 420
|
py
|
Python
|
src/gobjcreator3/model/genum.py
|
ThomasBollmeier/GObjectCreator3
|
20f2ad66efbae5e270f08612e5115be75399c55c
|
[
"Apache-2.0"
] | 1
|
2015-03-31T12:21:14.000Z
|
2015-03-31T12:21:14.000Z
|
src/gobjcreator3/model/genum.py
|
ThomasBollmeier/GObjectCreator3
|
20f2ad66efbae5e270f08612e5115be75399c55c
|
[
"Apache-2.0"
] | null | null | null |
src/gobjcreator3/model/genum.py
|
ThomasBollmeier/GObjectCreator3
|
20f2ad66efbae5e270f08612e5115be75399c55c
|
[
"Apache-2.0"
] | null | null | null |
from gobjcreator3.model.type import Type
class GEnum(Type):
def __init__(self, name, code_names_values):
Type.__init__(self, name, Type.ENUMERATION)
self.code_names_values = code_names_values
def has_code(self, code_name):
for item in self.code_names_values:
if item[0] == code_name:
return True
return False
| 23.333333
| 51
| 0.592857
|
4a12a4d91b5d46ada8ffb2e92a74686e5d458627
| 2,910
|
py
|
Python
|
src/tempo/unit.py
|
techdragon/python-tempo
|
c146959a5bd3a6f510a784d89ad3ee0537342677
|
[
"BSD-3-Clause"
] | null | null | null |
src/tempo/unit.py
|
techdragon/python-tempo
|
c146959a5bd3a6f510a784d89ad3ee0537342677
|
[
"BSD-3-Clause"
] | null | null | null |
src/tempo/unit.py
|
techdragon/python-tempo
|
c146959a5bd3a6f510a784d89ad3ee0537342677
|
[
"BSD-3-Clause"
] | null | null | null |
# coding=utf-8
"""Date/time related constants."""
import datetime as dt
from tempo.utils import Enum
# Minimum and maximum points of time within which
# the library is able to operate
MIN = dt.datetime(year=1, month=1, day=1)
MAX = dt.datetime(year=9999, month=12, day=31,
hour=23, minute=59, second=59)
# Units relations
SECONDS_IN_MINUTE = 60
MINUTES_IN_HOUR = 60
SECONDS_IN_HOUR = SECONDS_IN_MINUTE * MINUTES_IN_HOUR
HOURS_IN_DAY = 24
MINUTES_IN_DAY = MINUTES_IN_HOUR * HOURS_IN_DAY
SECONDS_IN_DAY = MINUTES_IN_DAY * SECONDS_IN_MINUTE
DAYS_IN_WEEK = 7
HOURS_IN_WEEK = HOURS_IN_DAY * DAYS_IN_WEEK
MINUTES_IN_WEEK = HOURS_IN_WEEK * MINUTES_IN_HOUR
SECONDS_IN_WEEK = MINUTES_IN_WEEK * SECONDS_IN_MINUTE
MONTHS_IN_YEAR = 12
DAYS_IN_COMMON_YEAR = 365
DAYS_IN_LEAP_YEAR = 366
DAYS_OF_COMMON_YEAR = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
DAYS_OF_LEAP_YEAR = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
class Unit(Enum): # pylint: disable=no-init
""""Enumeration of supported time units."""
SECOND = 'second'
MINUTE = 'minute'
HOUR = 'hour'
DAY = 'day'
WEEK = 'week'
MONTH = 'month'
YEAR = 'year'
# Order of places in time representation
ORDER = {
Unit.SECOND: 1,
Unit.MINUTE: 2,
Unit.HOUR : 3,
Unit.DAY : 4,
Unit.WEEK : 5,
Unit.MONTH : 6,
Unit.YEAR : 7
}
# Used for distinguishing zero-based and one-based units.
BASE = {
Unit.SECOND: 0,
Unit.MINUTE: 0,
Unit.HOUR : 0,
Unit.DAY : 1,
Unit.WEEK : 1,
Unit.MONTH : 1,
Unit.YEAR : 1
}
# Maximum values of time components
UNITS_MAX = {
Unit.SECOND: {
Unit.MINUTE: SECONDS_IN_MINUTE,
Unit.HOUR: SECONDS_IN_HOUR,
Unit.DAY: SECONDS_IN_DAY,
Unit.WEEK: SECONDS_IN_WEEK,
Unit.MONTH: SECONDS_IN_DAY * max(DAYS_OF_COMMON_YEAR + DAYS_OF_LEAP_YEAR),
Unit.YEAR: SECONDS_IN_DAY * max(DAYS_IN_COMMON_YEAR, DAYS_IN_LEAP_YEAR),
},
Unit.MINUTE: {
Unit.HOUR: MINUTES_IN_HOUR,
Unit.DAY: MINUTES_IN_DAY,
Unit.WEEK: MINUTES_IN_WEEK,
Unit.MONTH: MINUTES_IN_DAY * max(DAYS_OF_COMMON_YEAR + DAYS_OF_LEAP_YEAR),
Unit.YEAR: MINUTES_IN_DAY * max(DAYS_IN_COMMON_YEAR, DAYS_IN_LEAP_YEAR),
},
Unit.HOUR: {
Unit.DAY: HOURS_IN_DAY,
Unit.WEEK: HOURS_IN_WEEK,
Unit.MONTH: HOURS_IN_DAY * max(DAYS_OF_COMMON_YEAR + DAYS_OF_LEAP_YEAR),
Unit.YEAR: HOURS_IN_DAY * max(DAYS_IN_COMMON_YEAR, DAYS_IN_LEAP_YEAR)
},
Unit.DAY: {
Unit.WEEK: DAYS_IN_WEEK,
Unit.MONTH: max(DAYS_OF_COMMON_YEAR + DAYS_OF_LEAP_YEAR),
Unit.YEAR: max(DAYS_IN_COMMON_YEAR, DAYS_IN_LEAP_YEAR)
},
Unit.WEEK: {
Unit.MONTH: 6,
Unit.YEAR: 64,
},
Unit.MONTH: {
Unit.YEAR: MONTHS_IN_YEAR
}
}
| 28.811881
| 83
| 0.640893
|
4a12a570c5c6bf7684b9c0676d03fa0b35c3775e
| 6,820
|
py
|
Python
|
trakt/objects/show.py
|
jannon/trakt.py
|
096496e453cadf6718f0f6de6f82a6b4bd6cb56c
|
[
"MIT"
] | null | null | null |
trakt/objects/show.py
|
jannon/trakt.py
|
096496e453cadf6718f0f6de6f82a6b4bd6cb56c
|
[
"MIT"
] | null | null | null |
trakt/objects/show.py
|
jannon/trakt.py
|
096496e453cadf6718f0f6de6f82a6b4bd6cb56c
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function
from trakt.core.helpers import from_iso8601_datetime, to_iso8601_datetime, deprecated
from trakt.objects.core.helpers import update_attributes
from trakt.objects.media import Media
from six import iteritems
class Show(Media):
def __init__(self, client, keys, index=None):
super(Show, self).__init__(client, keys, index)
self.title = None
"""
:type: :class:`~python:str`
Title
"""
self.year = None
"""
:type: :class:`~python:int`
Year
"""
self.seasons = {}
"""
:type: :class:`~python:dict`
Seasons, defined as :code:`{season_num: Season}`
**Note:** this field might not be available with some methods
"""
self.watchers = None
"""
:type: :class:`~python:int`
Number of active watchers (returned by the :code:`Trakt['movies'].trending()`
and :code:`Trakt['shows'].trending()` methods)
"""
self.first_aired = None
"""
:type: :class:`~python:datetime.datetime`
First air date
"""
self.airs = None
"""
:type: :class:`~python:dict`
Dictionary with day, time and timezone in which the show airs
"""
self.runtime = None
"""
:type: :class:`~python:int`
Duration (in minutes)
"""
self.certification = None
"""
:type: :class:`~python:str`
Content certification (e.g :code:`TV-MA`)
"""
self.network = None
"""
:type: :class:`~python:str`
Network in which the show is aired
"""
self.country = None
"""
:type: :class:`~python:str`
Country in which the show is aired
"""
self.updated_at = None
"""
:type: :class:`~python:datetime.datetime`
Updated date/time
"""
self.status = None
"""
:type: :class:`~python:str`
Value of :code:`returning series` (airing right now),
:code:`in production` (airing soon), :code:`planned` (in development),
:code:`canceled`, or :code:`ended`
"""
self.homepage = None
"""
:type: :class:`~python:str`
Homepage URL
"""
self.language = None
"""
:type: :class:`~python:str`
Language (for title, overview, etc..)
"""
self.available_translations = None
"""
:type: :class:`~python:list`
Available translations (for title, overview, etc..)
"""
self.genres = None
"""
:type: :class:`~python:list`
Genres
"""
self.aired_episodes = None
"""
:type: :class:`~python:int`
Aired episode count
"""
def episodes(self):
"""Return a flat episode iterator.
:returns: Iterator :code:`((season_num, episode_num), Episode)`
:rtype: iterator
"""
for sk, season in iteritems(self.seasons):
# Yield each episode in season
for ek, episode in iteritems(season.episodes):
yield (sk, ek), episode
def to_identifier(self):
"""Return the show identifier which is compatible with requests that require show definitions.
:return: Show identifier/definition
:rtype: :class:`~python:dict`
"""
return {
'ids': dict(self.keys),
'title': self.title,
'year': self.year
}
@deprecated('Show.to_info() has been moved to Show.to_dict()')
def to_info(self):
"""**Deprecated:** use the :code:`to_dict()` method instead."""
return self.to_dict()
def to_dict(self):
"""Dump show to a dictionary.
:return: Show dictionary
:rtype: :class:`~python:dict`
"""
result = self.to_identifier()
result['seasons'] = [
season.to_dict()
for season in self.seasons.values()
]
result['in_watchlist'] = self.in_watchlist if self.in_watchlist is not None else 0
if self.rating:
result['rating'] = self.rating.value
result['rated_at'] = to_iso8601_datetime(self.rating.timestamp)
# Extended Info
if self.first_aired:
result['first_aired'] = to_iso8601_datetime(self.first_aired)
if self.updated_at:
result['updated_at'] = to_iso8601_datetime(self.updated_at)
if self.overview:
result['overview'] = self.overview
if self.airs:
result['airs'] = self.airs
if self.runtime:
result['runtime'] = self.runtime
if self.certification:
result['certification'] = self.certification
if self.network:
result['network'] = self.network
if self.country:
result['country'] = self.country
if self.status:
result['status'] = self.status
if self.homepage:
result['homepage'] = self.homepage
if self.language:
result['language'] = self.language
if self.available_translations:
result['available_translations'] = self.available_translations
if self.genres:
result['genres'] = self.genres
if self.aired_episodes:
result['aired_episodes'] = self.aired_episodes
return result
def _update(self, info=None, **kwargs):
if not info:
return
super(Show, self)._update(info, **kwargs)
update_attributes(self, info, [
'title',
# Trending
'watchers',
# Extended Info
'airs',
'runtime',
'certification',
'network',
'country',
'status',
'homepage',
'language',
'available_translations',
'genres',
'aired_episodes'
])
# Ensure `year` attribute is an integer (fixes incorrect type returned by search)
if info.get('year'):
self.year = int(info['year'])
# Extended Info
if 'first_aired' in info:
self.first_aired = from_iso8601_datetime(info.get('first_aired'))
if 'updated_at' in info:
self.updated_at = from_iso8601_datetime(info.get('updated_at'))
@classmethod
def _construct(cls, client, keys, info=None, index=None, **kwargs):
show = cls(client, keys, index=index)
show._update(info, **kwargs)
return show
def __repr__(self):
return '<Show %r (%s)>' % (self.title, self.year)
| 24.444444
| 102
| 0.537977
|
4a12a5a4a2d41fc5325ac7474664e59b649a19ea
| 10,690
|
py
|
Python
|
argo/workflows/client/models/v1_scale_io_volume_source.py
|
fvdnabee/argo-client-python
|
0caa743442d37f2f2e3b30867398ed2708c1bf4d
|
[
"Apache-2.0"
] | 35
|
2019-10-25T09:19:36.000Z
|
2022-03-04T11:22:27.000Z
|
argo/workflows/client/models/v1_scale_io_volume_source.py
|
fvdnabee/argo-client-python
|
0caa743442d37f2f2e3b30867398ed2708c1bf4d
|
[
"Apache-2.0"
] | 17
|
2019-10-30T03:49:20.000Z
|
2020-07-02T15:54:50.000Z
|
argo/workflows/client/models/v1_scale_io_volume_source.py
|
fvdnabee/argo-client-python
|
0caa743442d37f2f2e3b30867398ed2708c1bf4d
|
[
"Apache-2.0"
] | 9
|
2019-11-06T13:30:08.000Z
|
2021-06-12T03:00:05.000Z
|
# coding: utf-8
"""
Argo
Python client for Argo Workflows # noqa: E501
OpenAPI spec version: master
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class V1ScaleIOVolumeSource(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'fs_type': 'str',
'gateway': 'str',
'protection_domain': 'str',
'read_only': 'bool',
'secret_ref': 'V1LocalObjectReference',
'ssl_enabled': 'bool',
'storage_mode': 'str',
'storage_pool': 'str',
'system': 'str',
'volume_name': 'str'
}
attribute_map = {
'fs_type': 'fsType',
'gateway': 'gateway',
'protection_domain': 'protectionDomain',
'read_only': 'readOnly',
'secret_ref': 'secretRef',
'ssl_enabled': 'sslEnabled',
'storage_mode': 'storageMode',
'storage_pool': 'storagePool',
'system': 'system',
'volume_name': 'volumeName'
}
def __init__(self, fs_type=None, gateway=None, protection_domain=None, read_only=None, secret_ref=None, ssl_enabled=None, storage_mode=None, storage_pool=None, system=None, volume_name=None): # noqa: E501
"""V1ScaleIOVolumeSource - a model defined in Swagger""" # noqa: E501
self._fs_type = None
self._gateway = None
self._protection_domain = None
self._read_only = None
self._secret_ref = None
self._ssl_enabled = None
self._storage_mode = None
self._storage_pool = None
self._system = None
self._volume_name = None
self.discriminator = None
if fs_type is not None:
self.fs_type = fs_type
if gateway is not None:
self.gateway = gateway
if protection_domain is not None:
self.protection_domain = protection_domain
if read_only is not None:
self.read_only = read_only
if secret_ref is not None:
self.secret_ref = secret_ref
if ssl_enabled is not None:
self.ssl_enabled = ssl_enabled
if storage_mode is not None:
self.storage_mode = storage_mode
if storage_pool is not None:
self.storage_pool = storage_pool
if system is not None:
self.system = system
if volume_name is not None:
self.volume_name = volume_name
@property
def fs_type(self):
"""Gets the fs_type of this V1ScaleIOVolumeSource. # noqa: E501
:return: The fs_type of this V1ScaleIOVolumeSource. # noqa: E501
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""Sets the fs_type of this V1ScaleIOVolumeSource.
:param fs_type: The fs_type of this V1ScaleIOVolumeSource. # noqa: E501
:type: str
"""
self._fs_type = fs_type
@property
def gateway(self):
"""Gets the gateway of this V1ScaleIOVolumeSource. # noqa: E501
The host address of the ScaleIO API Gateway. # noqa: E501
:return: The gateway of this V1ScaleIOVolumeSource. # noqa: E501
:rtype: str
"""
return self._gateway
@gateway.setter
def gateway(self, gateway):
"""Sets the gateway of this V1ScaleIOVolumeSource.
The host address of the ScaleIO API Gateway. # noqa: E501
:param gateway: The gateway of this V1ScaleIOVolumeSource. # noqa: E501
:type: str
"""
self._gateway = gateway
@property
def protection_domain(self):
"""Gets the protection_domain of this V1ScaleIOVolumeSource. # noqa: E501
:return: The protection_domain of this V1ScaleIOVolumeSource. # noqa: E501
:rtype: str
"""
return self._protection_domain
@protection_domain.setter
def protection_domain(self, protection_domain):
"""Sets the protection_domain of this V1ScaleIOVolumeSource.
:param protection_domain: The protection_domain of this V1ScaleIOVolumeSource. # noqa: E501
:type: str
"""
self._protection_domain = protection_domain
@property
def read_only(self):
"""Gets the read_only of this V1ScaleIOVolumeSource. # noqa: E501
:return: The read_only of this V1ScaleIOVolumeSource. # noqa: E501
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""Sets the read_only of this V1ScaleIOVolumeSource.
:param read_only: The read_only of this V1ScaleIOVolumeSource. # noqa: E501
:type: bool
"""
self._read_only = read_only
@property
def secret_ref(self):
"""Gets the secret_ref of this V1ScaleIOVolumeSource. # noqa: E501
SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail. # noqa: E501
:return: The secret_ref of this V1ScaleIOVolumeSource. # noqa: E501
:rtype: V1LocalObjectReference
"""
return self._secret_ref
@secret_ref.setter
def secret_ref(self, secret_ref):
"""Sets the secret_ref of this V1ScaleIOVolumeSource.
SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail. # noqa: E501
:param secret_ref: The secret_ref of this V1ScaleIOVolumeSource. # noqa: E501
:type: V1LocalObjectReference
"""
self._secret_ref = secret_ref
@property
def ssl_enabled(self):
"""Gets the ssl_enabled of this V1ScaleIOVolumeSource. # noqa: E501
:return: The ssl_enabled of this V1ScaleIOVolumeSource. # noqa: E501
:rtype: bool
"""
return self._ssl_enabled
@ssl_enabled.setter
def ssl_enabled(self, ssl_enabled):
"""Sets the ssl_enabled of this V1ScaleIOVolumeSource.
:param ssl_enabled: The ssl_enabled of this V1ScaleIOVolumeSource. # noqa: E501
:type: bool
"""
self._ssl_enabled = ssl_enabled
@property
def storage_mode(self):
"""Gets the storage_mode of this V1ScaleIOVolumeSource. # noqa: E501
:return: The storage_mode of this V1ScaleIOVolumeSource. # noqa: E501
:rtype: str
"""
return self._storage_mode
@storage_mode.setter
def storage_mode(self, storage_mode):
"""Sets the storage_mode of this V1ScaleIOVolumeSource.
:param storage_mode: The storage_mode of this V1ScaleIOVolumeSource. # noqa: E501
:type: str
"""
self._storage_mode = storage_mode
@property
def storage_pool(self):
"""Gets the storage_pool of this V1ScaleIOVolumeSource. # noqa: E501
:return: The storage_pool of this V1ScaleIOVolumeSource. # noqa: E501
:rtype: str
"""
return self._storage_pool
@storage_pool.setter
def storage_pool(self, storage_pool):
"""Sets the storage_pool of this V1ScaleIOVolumeSource.
:param storage_pool: The storage_pool of this V1ScaleIOVolumeSource. # noqa: E501
:type: str
"""
self._storage_pool = storage_pool
@property
def system(self):
"""Gets the system of this V1ScaleIOVolumeSource. # noqa: E501
The name of the storage system as configured in ScaleIO. # noqa: E501
:return: The system of this V1ScaleIOVolumeSource. # noqa: E501
:rtype: str
"""
return self._system
@system.setter
def system(self, system):
"""Sets the system of this V1ScaleIOVolumeSource.
The name of the storage system as configured in ScaleIO. # noqa: E501
:param system: The system of this V1ScaleIOVolumeSource. # noqa: E501
:type: str
"""
self._system = system
@property
def volume_name(self):
"""Gets the volume_name of this V1ScaleIOVolumeSource. # noqa: E501
The name of a volume already created in the ScaleIO system that is associated with this volume source. # noqa: E501
:return: The volume_name of this V1ScaleIOVolumeSource. # noqa: E501
:rtype: str
"""
return self._volume_name
@volume_name.setter
def volume_name(self, volume_name):
"""Sets the volume_name of this V1ScaleIOVolumeSource.
The name of a volume already created in the ScaleIO system that is associated with this volume source. # noqa: E501
:param volume_name: The volume_name of this V1ScaleIOVolumeSource. # noqa: E501
:type: str
"""
self._volume_name = volume_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(V1ScaleIOVolumeSource, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ScaleIOVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.860335
| 209
| 0.614874
|
4a12a5fe4d773eb0b5cbe03f70ddcf354c28002c
| 14,023
|
py
|
Python
|
onmt/utils/parse.py
|
comydream/OpenNMT-py
|
2f3c810069ca03b752d9886782648e576b39a06d
|
[
"MIT"
] | 1
|
2021-10-01T15:03:35.000Z
|
2021-10-01T15:03:35.000Z
|
onmt/utils/parse.py
|
urialon/OpenNMT-py
|
bdca05a3fac8f864b21c86a8ad03c09895212e70
|
[
"MIT"
] | null | null | null |
onmt/utils/parse.py
|
urialon/OpenNMT-py
|
bdca05a3fac8f864b21c86a8ad03c09895212e70
|
[
"MIT"
] | null | null | null |
import configargparse as cfargparse
import os
import torch
import onmt.opts as opts
from onmt.utils.logging import logger
from onmt.constants import CorpusName, ModelTask
from onmt.transforms import AVAILABLE_TRANSFORMS
class DataOptsCheckerMixin(object):
"""Checker with methods for validate data related options."""
@staticmethod
def _validate_file(file_path, info):
"""Check `file_path` is valid or raise `IOError`."""
if not os.path.isfile(file_path):
raise IOError(f"Please check path of your {info} file!")
@classmethod
def _validate_data(cls, opt):
"""Parse corpora specified in data field of YAML file."""
import yaml
default_transforms = opt.transforms
if len(default_transforms) != 0:
logger.info(f"Default transforms: {default_transforms}.")
corpora = yaml.safe_load(opt.data)
for cname, corpus in corpora.items():
# Check Transforms
_transforms = corpus.get('transforms', None)
if _transforms is None:
logger.info(f"Missing transforms field for {cname} data, "
f"set to default: {default_transforms}.")
corpus['transforms'] = default_transforms
# Check path
path_src = corpus.get('path_src', None)
path_tgt = corpus.get('path_tgt', None)
if path_src is None:
raise ValueError(f'Corpus {cname} src path is required.'
'tgt path is also required for non language'
' modeling tasks.')
else:
opt.data_task = ModelTask.SEQ2SEQ
if path_tgt is None:
logger.warning(
"path_tgt is None, it should be set unless the task"
" is language modeling"
)
opt.data_task = ModelTask.LANGUAGE_MODEL
# tgt is src for LM task
corpus["path_tgt"] = path_src
corpora[cname] = corpus
path_tgt = path_src
cls._validate_file(path_src, info=f'{cname}/path_src')
cls._validate_file(path_tgt, info=f'{cname}/path_tgt')
path_align = corpus.get('path_align', None)
if path_align is None:
if hasattr(opt, 'lambda_align') and opt.lambda_align > 0.0:
raise ValueError(f'Corpus {cname} alignment file path are '
'required when lambda_align > 0.0')
corpus['path_align'] = None
else:
cls._validate_file(path_align, info=f'{cname}/path_align')
# Check prefix: will be used when use prefix transform
src_prefix = corpus.get('src_prefix', None)
tgt_prefix = corpus.get('tgt_prefix', None)
if src_prefix is None or tgt_prefix is None:
if 'prefix' in corpus['transforms']:
raise ValueError(f'Corpus {cname} prefix are required.')
# Check weight
weight = corpus.get('weight', None)
if weight is None:
if cname != CorpusName.VALID:
logger.warning(f"Corpus {cname}'s weight should be given."
" We default it to 1 for you.")
corpus['weight'] = 1
# Check features
src_feats = corpus.get("src_feats", None)
if src_feats is not None:
for feature_name, feature_file in src_feats.items():
cls._validate_file(feature_file, info=f'{cname}/path_{feature_name}')
if 'inferfeats' not in corpus["transforms"]:
raise ValueError(f"'inferfeats' transform is required when setting source features")
if 'filterfeats' not in corpus["transforms"]:
raise ValueError(f"'filterfeats' transform is required when setting source features")
else:
corpus["src_feats"] = None
logger.info(f"Parsed {len(corpora)} corpora from -data.")
opt.data = corpora
@classmethod
def _validate_transforms_opts(cls, opt):
"""Check options used by transforms."""
for name, transform_cls in AVAILABLE_TRANSFORMS.items():
if name in opt._all_transform:
transform_cls._validate_options(opt)
@classmethod
def _get_all_transform(cls, opt):
"""Should only called after `_validate_data`."""
all_transforms = set(opt.transforms)
for cname, corpus in opt.data.items():
_transforms = set(corpus['transforms'])
if len(_transforms) != 0:
all_transforms.update(_transforms)
if hasattr(opt, 'lambda_align') and opt.lambda_align > 0.0:
if not all_transforms.isdisjoint(
{'sentencepiece', 'bpe', 'onmt_tokenize'}):
raise ValueError('lambda_align is not compatible with'
' on-the-fly tokenization.')
if not all_transforms.isdisjoint(
{'tokendrop', 'prefix', 'bart'}):
raise ValueError('lambda_align is not compatible yet with'
' potentiel token deletion/addition.')
opt._all_transform = all_transforms
@classmethod
def _validate_fields_opts(cls, opt, build_vocab_only=False):
"""Check options relate to vocab and fields."""
for cname, corpus in opt.data.items():
if cname != CorpusName.VALID and corpus["src_feats"] is not None:
assert opt.src_feats_vocab, \
"-src_feats_vocab is required if using source features."
import yaml
opt.src_feats_vocab = yaml.safe_load(opt.src_feats_vocab)
for feature in corpus["src_feats"].keys():
assert feature in opt.src_feats_vocab, \
f"No vocab file set for feature {feature}"
if build_vocab_only:
if not opt.share_vocab:
assert opt.tgt_vocab, \
"-tgt_vocab is required if not -share_vocab."
return
# validation when train:
cls._validate_file(opt.src_vocab, info='src vocab')
if not opt.share_vocab:
cls._validate_file(opt.tgt_vocab, info='tgt vocab')
if opt.dump_fields or opt.dump_transforms:
assert opt.save_data, "-save_data should be set if set \
-dump_fields or -dump_transforms."
# Check embeddings stuff
if opt.both_embeddings is not None:
assert (opt.src_embeddings is None
and opt.tgt_embeddings is None), \
"You don't need -src_embeddings or -tgt_embeddings \
if -both_embeddings is set."
if any([opt.both_embeddings is not None,
opt.src_embeddings is not None,
opt.tgt_embeddings is not None]):
assert opt.embeddings_type is not None, \
"You need to specify an -embedding_type!"
assert opt.save_data, "-save_data should be set if use \
pretrained embeddings."
@classmethod
def _validate_language_model_compatibilities_opts(cls, opt):
if opt.model_task != ModelTask.LANGUAGE_MODEL:
return
logger.info("encoder is not used for LM task")
assert opt.share_vocab and (
opt.tgt_vocab is None
), "vocab must be shared for LM task"
assert (
opt.decoder_type == "transformer"
), "Only transformer decoder is supported for LM task"
@classmethod
def validate_prepare_opts(cls, opt, build_vocab_only=False):
"""Validate all options relate to prepare (data/transform/vocab)."""
if opt.n_sample != 0:
assert opt.save_data, "-save_data should be set if \
want save samples."
cls._validate_data(opt)
cls._get_all_transform(opt)
cls._validate_transforms_opts(opt)
cls._validate_fields_opts(opt, build_vocab_only=build_vocab_only)
@classmethod
def validate_model_opts(cls, opt):
cls._validate_language_model_compatibilities_opts(opt)
class ArgumentParser(cfargparse.ArgumentParser, DataOptsCheckerMixin):
"""OpenNMT option parser powered with option check methods."""
def __init__(
self,
config_file_parser_class=cfargparse.YAMLConfigFileParser,
formatter_class=cfargparse.ArgumentDefaultsHelpFormatter,
**kwargs):
super(ArgumentParser, self).__init__(
config_file_parser_class=config_file_parser_class,
formatter_class=formatter_class,
**kwargs)
@classmethod
def defaults(cls, *args):
"""Get default arguments added to a parser by all ``*args``."""
dummy_parser = cls()
for callback in args:
callback(dummy_parser)
defaults = dummy_parser.parse_known_args([])[0]
return defaults
@classmethod
def update_model_opts(cls, model_opt):
if model_opt.word_vec_size > 0:
model_opt.src_word_vec_size = model_opt.word_vec_size
model_opt.tgt_word_vec_size = model_opt.word_vec_size
# Backward compatibility with "fix_word_vecs_*" opts
if hasattr(model_opt, 'fix_word_vecs_enc'):
model_opt.freeze_word_vecs_enc = model_opt.fix_word_vecs_enc
if hasattr(model_opt, 'fix_word_vecs_dec'):
model_opt.freeze_word_vecs_dec = model_opt.fix_word_vecs_dec
if model_opt.layers > 0:
model_opt.enc_layers = model_opt.layers
model_opt.dec_layers = model_opt.layers
if model_opt.rnn_size > 0:
model_opt.enc_rnn_size = model_opt.rnn_size
model_opt.dec_rnn_size = model_opt.rnn_size
model_opt.brnn = model_opt.encoder_type == "brnn"
if model_opt.copy_attn_type is None:
model_opt.copy_attn_type = model_opt.global_attention
if model_opt.alignment_layer is None:
model_opt.alignment_layer = -2
model_opt.lambda_align = 0.0
model_opt.full_context_alignment = False
@classmethod
def validate_model_opts(cls, model_opt):
assert model_opt.model_type in ["text"], \
"Unsupported model type %s" % model_opt.model_type
# encoder and decoder should be same sizes
same_size = model_opt.enc_rnn_size == model_opt.dec_rnn_size
assert same_size, \
"The encoder and decoder rnns must be the same size for now"
assert model_opt.rnn_type != "SRU" or model_opt.gpu_ranks, \
"Using SRU requires -gpu_ranks set."
if model_opt.share_embeddings:
if model_opt.model_type != "text":
raise AssertionError(
"--share_embeddings requires --model_type text.")
if model_opt.lambda_align > 0.0:
assert model_opt.decoder_type == 'transformer', \
"Only transformer is supported to joint learn alignment."
assert model_opt.alignment_layer < model_opt.dec_layers and \
model_opt.alignment_layer >= -model_opt.dec_layers, \
"N° alignment_layer should be smaller than number of layers."
logger.info("Joint learn alignment at layer [{}] "
"with {} heads in full_context '{}'.".format(
model_opt.alignment_layer,
model_opt.alignment_heads,
model_opt.full_context_alignment))
@classmethod
def ckpt_model_opts(cls, ckpt_opt):
# Load default opt values, then overwrite with the opts in
# the checkpoint. That way, if there are new options added,
# the defaults are used.
opt = cls.defaults(opts.model_opts)
opt.__dict__.update(ckpt_opt.__dict__)
return opt
@classmethod
def validate_train_opts(cls, opt):
if opt.epochs:
raise AssertionError(
"-epochs is deprecated please use -train_steps.")
if opt.truncated_decoder > 0 and max(opt.accum_count) > 1:
raise AssertionError("BPTT is not compatible with -accum > 1")
if opt.gpuid:
raise AssertionError(
"gpuid is deprecated see world_size and gpu_ranks")
if torch.cuda.is_available() and not opt.gpu_ranks:
logger.warn("You have a CUDA device, should run with -gpu_ranks")
if opt.world_size < len(opt.gpu_ranks):
raise AssertionError(
"parameter counts of -gpu_ranks must be less or equal "
"than -world_size.")
if opt.world_size == len(opt.gpu_ranks) and \
min(opt.gpu_ranks) > 0:
raise AssertionError(
"-gpu_ranks should have master(=0) rank "
"unless -world_size is greater than len(gpu_ranks).")
assert len(opt.dropout) == len(opt.dropout_steps), \
"Number of dropout values must match accum_steps values"
assert len(opt.attention_dropout) == len(opt.dropout_steps), \
"Number of attention_dropout values must match accum_steps values"
assert len(opt.accum_count) == len(opt.accum_steps), \
'Number of accum_count values must match number of accum_steps'
if opt.update_vocab:
assert opt.train_from, \
"-update_vocab needs -train_from option"
assert opt.reset_optim in ['states', 'all'], \
'-update_vocab needs -reset_optim "states" or "all"'
@classmethod
def validate_translate_opts(cls, opt):
opt.src_feats = eval(opt.src_feats) if opt.src_feats else {}
| 43.280864
| 105
| 0.598231
|
4a12a721f3e1ce77ccf0c2e74af80490c161f01b
| 2,417
|
py
|
Python
|
lab_exercises/le_03-2019Fall/lab_exercise_03_solution.py
|
jadeharr/SI506-practice
|
57001199c70e9b332b3dbad2ae6ce1be0e96946c
|
[
"BSD-3-Clause"
] | 12
|
2020-11-12T17:42:54.000Z
|
2022-02-03T15:51:45.000Z
|
lab_exercises/le_03-2019Fall/lab_exercise_03_solution.py
|
jadeharr/SI506-practice
|
57001199c70e9b332b3dbad2ae6ce1be0e96946c
|
[
"BSD-3-Clause"
] | 14
|
2020-10-07T13:44:33.000Z
|
2020-10-23T16:03:13.000Z
|
lab_exercises/le_03-2019Fall/lab_exercise_03_solution.py
|
jadeharr/SI506-practice
|
57001199c70e9b332b3dbad2ae6ce1be0e96946c
|
[
"BSD-3-Clause"
] | 15
|
2020-08-10T17:29:37.000Z
|
2022-01-18T02:15:52.000Z
|
# START LAB EXERCISE 03
print('Lab Exercise 03 \n')
# SETUP - We provide you with a select list of UMSI faculty. In the future, such data
# will be provided in a file which you will read into Python with some useful functions. However,
# for today, the teaching team has provided this list for you to use.
#
# Data description:
#
# Each item in the umsi_faculty list is a string containing the name, title, and email address of
# the faculty member. Each piece of information relating to the faculty member is separated by a
# pipe ('|') delimiter.
umsi_faculty = ["Charles Severance|Clinical Professor of Information|csev@umich.edu",
"Colleen Van Lent|Lecturer|collemc@umich.edu",
"Chris Teplovs|Lecturer|cteplovs@umich.edu",
"Anthony Whyte|Lecturer|arwhyte@umich.edu",
"Christopher Brooks|Research Assistant Professor|brooksch@umich.edu"]
# END SETUP
# PROBLEM 1
# Part I. Extract the second item in umsi_faculty using its index value and assign it to a new
# variable named collemc.
# Part II. Extract the last element in umsi_faculty using its index value and assign it to a new
# the variable named brookcsh.
# BEGIN PROBLEM 1 SOLUTION
collemc = umsi_faculty[1]
brookcsh = umsi_faculty[-1]
# END PROBLEM 1 SOLUTION
# PROBLEM 2
# Use list slicing to extract the 2nd, 3rd and 4th items from the list umsi_faculty and save
# the items to a new list called lecturers.
# BEGIN PROBLEM 2 SOLUTION
lecturers = umsi_faculty[1:4]
# END PROBLEM 2 SOLUTION
# PROBLEM 3
# There are two parts to this problem:
# Part I. Extract each faculty member's email addresses from the umsi_faculty list and append the
# extracted email address to the list named email_addresses.
# Hint : You can use loops, split() and list slicing to craft your solution. Use print() statements to debug.
# BEGIN PROBLEM 3 SOLUTION
email_addresses = []
for faculty in umsi_faculty:
email = faculty.split('|')[2]
email_addresses.append(email)
# Part II. Using an if statement, check the length of each email address in the list.
# If the length of the email address is greater than ('>') 15 characters, extract the
# email address and append it to a new list called long_email_addresses.
long_email_addresses = []
for email in email_addresses:
if len(email) > 15:
long_email_addresses.append(email)
# END PROBLEM 3 SOLUTION
# END LAB EXERCISE
| 32.226667
| 109
| 0.735623
|
4a12a7d59993f4c6f584aca04da3b0daf99ced06
| 7,550
|
py
|
Python
|
awx/api/views/organization.py
|
DamoR25/awxnew
|
03ed6e97558ae090ea52703caf6ed1b196557981
|
[
"Apache-2.0"
] | 11,396
|
2017-09-07T04:56:02.000Z
|
2022-03-31T13:56:17.000Z
|
awx/api/views/organization.py
|
DamoR25/awxnew
|
03ed6e97558ae090ea52703caf6ed1b196557981
|
[
"Apache-2.0"
] | 11,046
|
2017-09-07T09:30:46.000Z
|
2022-03-31T20:28:01.000Z
|
awx/api/views/organization.py
|
DamoR25/awxnew
|
03ed6e97558ae090ea52703caf6ed1b196557981
|
[
"Apache-2.0"
] | 3,592
|
2017-09-07T04:14:31.000Z
|
2022-03-31T23:53:09.000Z
|
# Copyright (c) 2018 Red Hat, Inc.
# All Rights Reserved.
# Python
import logging
# Django
from django.db.models import Count
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
# AWX
from awx.main.models import (
ActivityStream,
Inventory,
Host,
Project,
ExecutionEnvironment,
JobTemplate,
WorkflowJobTemplate,
Organization,
NotificationTemplate,
Role,
User,
Team,
InstanceGroup,
Credential,
)
from awx.api.generics import (
ListCreateAPIView,
RetrieveUpdateDestroyAPIView,
SubListAPIView,
SubListCreateAttachDetachAPIView,
SubListAttachDetachAPIView,
SubListCreateAPIView,
ResourceAccessList,
BaseUsersList,
)
from awx.api.serializers import (
OrganizationSerializer,
InventorySerializer,
UserSerializer,
TeamSerializer,
ActivityStreamSerializer,
RoleSerializer,
NotificationTemplateSerializer,
InstanceGroupSerializer,
ExecutionEnvironmentSerializer,
ProjectSerializer,
JobTemplateSerializer,
WorkflowJobTemplateSerializer,
CredentialSerializer,
)
from awx.api.views.mixin import RelatedJobsPreventDeleteMixin, OrganizationCountsMixin
logger = logging.getLogger('awx.api.views.organization')
class OrganizationList(OrganizationCountsMixin, ListCreateAPIView):
model = Organization
serializer_class = OrganizationSerializer
def get_queryset(self):
qs = Organization.accessible_objects(self.request.user, 'read_role')
qs = qs.select_related('admin_role', 'auditor_role', 'member_role', 'read_role')
qs = qs.prefetch_related('created_by', 'modified_by')
return qs
class OrganizationDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView):
model = Organization
serializer_class = OrganizationSerializer
def get_serializer_context(self, *args, **kwargs):
full_context = super(OrganizationDetail, self).get_serializer_context(*args, **kwargs)
if not hasattr(self, 'kwargs') or 'pk' not in self.kwargs:
return full_context
org_id = int(self.kwargs['pk'])
org_counts = {}
access_kwargs = {'accessor': self.request.user, 'role_field': 'read_role'}
direct_counts = (
Organization.objects.filter(id=org_id)
.annotate(users=Count('member_role__members', distinct=True), admins=Count('admin_role__members', distinct=True))
.values('users', 'admins')
)
if not direct_counts:
return full_context
org_counts = direct_counts[0]
org_counts['inventories'] = Inventory.accessible_objects(**access_kwargs).filter(organization__id=org_id).count()
org_counts['teams'] = Team.accessible_objects(**access_kwargs).filter(organization__id=org_id).count()
org_counts['projects'] = Project.accessible_objects(**access_kwargs).filter(organization__id=org_id).count()
org_counts['job_templates'] = JobTemplate.accessible_objects(**access_kwargs).filter(organization__id=org_id).count()
org_counts['hosts'] = Host.objects.org_active_count(org_id)
full_context['related_field_counts'] = {}
full_context['related_field_counts'][org_id] = org_counts
return full_context
class OrganizationInventoriesList(SubListAPIView):
model = Inventory
serializer_class = InventorySerializer
parent_model = Organization
relationship = 'inventories'
class OrganizationUsersList(BaseUsersList):
model = User
serializer_class = UserSerializer
parent_model = Organization
relationship = 'member_role.members'
ordering = ('username',)
class OrganizationAdminsList(BaseUsersList):
model = User
serializer_class = UserSerializer
parent_model = Organization
relationship = 'admin_role.members'
ordering = ('username',)
class OrganizationProjectsList(SubListCreateAPIView):
model = Project
serializer_class = ProjectSerializer
parent_model = Organization
parent_key = 'organization'
class OrganizationExecutionEnvironmentsList(SubListCreateAttachDetachAPIView):
model = ExecutionEnvironment
serializer_class = ExecutionEnvironmentSerializer
parent_model = Organization
relationship = 'executionenvironments'
parent_key = 'organization'
swagger_topic = "Execution Environments"
class OrganizationJobTemplatesList(SubListCreateAPIView):
model = JobTemplate
serializer_class = JobTemplateSerializer
parent_model = Organization
parent_key = 'organization'
class OrganizationWorkflowJobTemplatesList(SubListCreateAPIView):
model = WorkflowJobTemplate
serializer_class = WorkflowJobTemplateSerializer
parent_model = Organization
parent_key = 'organization'
class OrganizationTeamsList(SubListCreateAttachDetachAPIView):
model = Team
serializer_class = TeamSerializer
parent_model = Organization
relationship = 'teams'
parent_key = 'organization'
class OrganizationActivityStreamList(SubListAPIView):
model = ActivityStream
serializer_class = ActivityStreamSerializer
parent_model = Organization
relationship = 'activitystream_set'
search_fields = ('changes',)
class OrganizationNotificationTemplatesList(SubListCreateAttachDetachAPIView):
model = NotificationTemplate
serializer_class = NotificationTemplateSerializer
parent_model = Organization
relationship = 'notification_templates'
parent_key = 'organization'
class OrganizationNotificationTemplatesAnyList(SubListCreateAttachDetachAPIView):
model = NotificationTemplate
serializer_class = NotificationTemplateSerializer
parent_model = Organization
class OrganizationNotificationTemplatesStartedList(OrganizationNotificationTemplatesAnyList):
relationship = 'notification_templates_started'
class OrganizationNotificationTemplatesErrorList(OrganizationNotificationTemplatesAnyList):
relationship = 'notification_templates_error'
class OrganizationNotificationTemplatesSuccessList(OrganizationNotificationTemplatesAnyList):
relationship = 'notification_templates_success'
class OrganizationNotificationTemplatesApprovalList(OrganizationNotificationTemplatesAnyList):
relationship = 'notification_templates_approvals'
class OrganizationInstanceGroupsList(SubListAttachDetachAPIView):
model = InstanceGroup
serializer_class = InstanceGroupSerializer
parent_model = Organization
relationship = 'instance_groups'
class OrganizationGalaxyCredentialsList(SubListAttachDetachAPIView):
model = Credential
serializer_class = CredentialSerializer
parent_model = Organization
relationship = 'galaxy_credentials'
def is_valid_relation(self, parent, sub, created=False):
if sub.kind != 'galaxy_api_token':
return {'msg': _(f"Credential must be a Galaxy credential, not {sub.credential_type.name}.")}
class OrganizationAccessList(ResourceAccessList):
model = User # needs to be User for AccessLists's
parent_model = Organization
class OrganizationObjectRolesList(SubListAPIView):
model = Role
serializer_class = RoleSerializer
parent_model = Organization
search_fields = ('role_field', 'content_type__model')
def get_queryset(self):
po = self.get_parent_object()
content_type = ContentType.objects.get_for_model(self.parent_model)
return Role.objects.filter(content_type=content_type, object_id=po.pk)
| 29.150579
| 125
| 0.758411
|
4a12a90fd6a3ed94bf580d8bd4d332c76596faac
| 3,753
|
py
|
Python
|
src/sentry/plugins/sentry_webhooks/plugin.py
|
gecka/sentry
|
9bfcde5f244dc4a8d5cf81222f14d3f8de1d9877
|
[
"BSD-3-Clause"
] | 1
|
2018-12-04T12:57:00.000Z
|
2018-12-04T12:57:00.000Z
|
src/sentry/plugins/sentry_webhooks/plugin.py
|
gecka/sentry
|
9bfcde5f244dc4a8d5cf81222f14d3f8de1d9877
|
[
"BSD-3-Clause"
] | 1
|
2021-05-09T11:43:43.000Z
|
2021-05-09T11:43:43.000Z
|
src/sentry/plugins/sentry_webhooks/plugin.py
|
gecka/sentry
|
9bfcde5f244dc4a8d5cf81222f14d3f8de1d9877
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import logging
import six
import sentry
from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from sentry.exceptions import PluginError
from sentry.plugins.bases import notify
from sentry.http import is_valid_url, safe_urlopen
from sentry.utils.safe import safe_execute
def split_urls(value):
if not value:
return ()
return filter(bool, (url.strip() for url in value.splitlines()))
def validate_urls(value, **kwargs):
urls = split_urls(value)
if any((not u.startswith(('http://', 'https://')) or not is_valid_url(u)) for u in urls):
raise PluginError('Not a valid URL.')
return '\n'.join(urls)
class WebHooksOptionsForm(notify.NotificationConfigurationForm):
urls = forms.CharField(
label=_('Callback URLs'),
widget=forms.Textarea(
attrs={'class': 'span6',
'placeholder': 'https://sentry.io/callback/url'}
),
help_text=_('Enter callback URLs to POST new events to (one per line).')
)
class WebHooksPlugin(notify.NotificationPlugin):
author = 'Sentry Team'
author_url = 'https://github.com/getsentry/sentry'
version = sentry.VERSION
description = "Integrates web hooks."
resource_links = [
('Bug Tracker', 'https://github.com/getsentry/sentry/issues'),
('Source', 'https://github.com/getsentry/sentry'),
]
slug = 'webhooks'
title = 'WebHooks'
conf_title = title
conf_key = 'webhooks'
# TODO(dcramer): remove when this is migrated to React
project_conf_form = WebHooksOptionsForm
timeout = getattr(settings, 'SENTRY_WEBHOOK_TIMEOUT', 3)
logger = logging.getLogger('sentry.plugins.webhooks')
user_agent = 'sentry-webhooks/%s' % version
def is_configured(self, project, **kwargs):
return bool(self.get_option('urls', project))
def get_config(self, project, **kwargs):
return [
{
'name': 'urls',
'label': 'Callback URLs',
'type': 'textarea',
'help': 'Enter callback URLs to POST new events to (one per line).',
'placeholder': 'https://sentry.io/callback/url',
'validators': [validate_urls],
'required': False
}
]
def get_group_data(self, group, event, triggering_rules):
data = {
'id': six.text_type(group.id),
'project': group.project.slug,
'project_name': group.project.name,
'project_slug': group.project.slug,
'logger': event.get_tag('logger'),
'level': event.get_tag('level'),
'culprit': group.culprit,
'message': event.real_message,
'url': group.get_absolute_url(params={'referrer': 'webhooks_plugin'}),
'triggering_rules': triggering_rules,
}
data['event'] = dict(event.data or {})
data['event']['tags'] = event.get_tags()
data['event']['event_id'] = event.event_id
data['event']['id'] = event.id
return data
def get_webhook_urls(self, project):
return split_urls(self.get_option('urls', project))
def send_webhook(self, url, payload):
return safe_urlopen(
url=url,
json=payload,
timeout=self.timeout,
verify_ssl=False,
)
def notify_users(self, group, event, triggering_rules, fail_silently=False, **kwargs):
payload = self.get_group_data(group, event, triggering_rules)
for url in self.get_webhook_urls(group.project):
safe_execute(self.send_webhook, url, payload, _with_transaction=False)
| 33.810811
| 93
| 0.622968
|
4a12aa1968fa0bd003821a037d2116be555083e7
| 1,493
|
py
|
Python
|
joulescope/usb/impl_tools.py
|
rnestler/pyjoulescope
|
b9eff73d2236e05d5c3631dbd112c1ef54854005
|
[
"Apache-2.0"
] | 29
|
2018-12-19T22:42:09.000Z
|
2022-01-31T12:26:52.000Z
|
joulescope/usb/impl_tools.py
|
rnestler/pyjoulescope
|
b9eff73d2236e05d5c3631dbd112c1ef54854005
|
[
"Apache-2.0"
] | 23
|
2019-07-21T23:44:46.000Z
|
2022-03-11T13:29:11.000Z
|
joulescope/usb/impl_tools.py
|
rnestler/pyjoulescope
|
b9eff73d2236e05d5c3631dbd112c1ef54854005
|
[
"Apache-2.0"
] | 9
|
2019-07-22T00:07:53.000Z
|
2021-11-26T11:46:19.000Z
|
# Copyright 2018 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common tools used by :class:`joulescope.usb.api.DeviceDriverApi` implementations."""
import time
class RunUntilDone:
def __init__(self, timeout, name=''):
self._timeout = timeout
self._name = name
self._value = None
self._time_start = time.time()
def __str__(self):
return 'RunUntilDone(timeout=%r, name=%r)' % (self._timeout, self._name)
@property
def value(self):
return self._value
@property
def value_args0(self):
return self._value[0][0]
def cbk_fn(self, *args, **kwargs):
self._value = (args, kwargs)
def is_done(self):
if self._value is not None:
return True
time_delta = time.time() - self._time_start
if time_delta > self._timeout:
raise TimeoutError('RunUntilDone %s: timeout %s > %s' % (self._name, time_delta, self._timeout))
return False
| 29.86
| 108
| 0.671132
|
4a12aa430bf65095e06d564ef615c6c7db316999
| 3,632
|
py
|
Python
|
python/archive/calculate_dist_mat_casia1.py
|
adi-nawal/Iris-Recognition
|
1cec1471776e0b023e6629ea2e9fedf8ae659354
|
[
"MIT"
] | 87
|
2019-09-20T07:04:38.000Z
|
2022-03-26T17:23:34.000Z
|
python/archive/calculate_dist_mat_casia1.py
|
adi-nawal/Iris-Recognition
|
1cec1471776e0b023e6629ea2e9fedf8ae659354
|
[
"MIT"
] | 14
|
2019-10-07T00:27:14.000Z
|
2022-03-11T23:33:40.000Z
|
python/archive/calculate_dist_mat_casia1.py
|
adi-nawal/Iris-Recognition
|
1cec1471776e0b023e6629ea2e9fedf8ae659354
|
[
"MIT"
] | 64
|
2019-09-04T16:08:25.000Z
|
2022-03-31T16:10:32.000Z
|
#------------------------------------------------------------------------------
# Libraries
#------------------------------------------------------------------------------
import os
import numpy as np
from glob import glob
from tqdm import tqdm
from time import time
from random import shuffle
from matplotlib import pyplot as plt
from itertools import repeat
from collections import defaultdict
from multiprocessing import Pool, cpu_count
from fnc.extractFeature import extractFeature
from fnc.matching import calHammingDist
#------------------------------------------------------------------------------
# Parameters
#------------------------------------------------------------------------------
CASIA1_DIR = "/home/antiaegis/Downloads/Iris-Recognition/CASIA1"
EYELASHES_THRES = 80
N_IMAGES = 4
#------------------------------------------------------------------------------
# Pool function of extracting feature
#------------------------------------------------------------------------------
def pool_func_extract_feature(args):
im_filename, eyelashes_thres, use_multiprocess = args
template, mask, im_filename = extractFeature(
im_filename=im_filename,
eyelashes_thres=eyelashes_thres,
use_multiprocess=use_multiprocess,
)
return template, mask, im_filename
#------------------------------------------------------------------------------
# Pool function of calculating Hamming distance
#------------------------------------------------------------------------------
def pool_func_calHammingDist(args):
template1, mask1, template2, mask2 = args
dist = calHammingDist(template1, mask1, template2, mask2)
return dist
#------------------------------------------------------------------------------
# Main execution
#------------------------------------------------------------------------------
# Get identities of MMU2 dataset
identities = glob(os.path.join(CASIA1_DIR, "**"))
identities = sorted([os.path.basename(identity) for identity in identities])
n_identities = len(identities)
print("Number of identities:", n_identities)
# Construct a dictionary of files
files_dict = {}
image_files = []
for identity in identities:
files = glob(os.path.join(CASIA1_DIR, identity, "*.*"))
shuffle(files)
files_dict[identity] = files[:N_IMAGES]
# print("Identity %s: %d images" % (identity, len(files_dict[identity])))
image_files += files[:N_IMAGES]
n_image_files = len(image_files)
print("Number of image files:", n_image_files)
# Extract features
args = zip(image_files, repeat(EYELASHES_THRES), repeat(False))
pools = Pool(processes=cpu_count())
start_time = time()
features = list(pools.map(pool_func_extract_feature, args))
finish_time = time()
print("Extraction time: %.3f [s]" % (finish_time-start_time))
# Calculate the distances
args = []
for i in range(n_image_files):
for j in range(n_image_files):
if i>=j:
continue
arg = (features[i][0], features[i][1], features[j][0], features[j][1])
args.append(arg)
print("Number of pairs:", len(args))
start_time = time()
distances = pools.map(pool_func_calHammingDist, args)
finish_time = time()
print("Extraction time: %.3f [s]" % (finish_time-start_time))
# Construct a distance matrix
dist_mat = np.zeros([n_image_files, n_image_files])
k = 0
for i in range(n_image_files):
for j in range(n_image_files):
if i<j:
dist_mat[i, j] = distances[k]
k += 1
elif i>j:
dist_mat[i, j] = dist_mat[j, i]
np.save("dist_mat_casia1.npy", dist_mat)
plt.figure()
plt.imshow(dist_mat)
plt.show()
| 31.310345
| 79
| 0.55837
|
4a12ab1590b558f2e86b82c7dee579e895eb60eb
| 11,047
|
py
|
Python
|
google/cloud/aiplatform_v1beta1/types/machine_resources.py
|
lclc19/python-aiplatform
|
d8da2e365277441abadb04328943f23345d72b0e
|
[
"Apache-2.0"
] | 180
|
2020-09-23T17:21:15.000Z
|
2022-03-30T17:25:47.000Z
|
google/cloud/aiplatform_v1beta1/types/machine_resources.py
|
lclc19/python-aiplatform
|
d8da2e365277441abadb04328943f23345d72b0e
|
[
"Apache-2.0"
] | 601
|
2020-09-23T16:23:44.000Z
|
2022-03-31T19:08:23.000Z
|
google/cloud/aiplatform_v1beta1/types/machine_resources.py
|
lclc19/python-aiplatform
|
d8da2e365277441abadb04328943f23345d72b0e
|
[
"Apache-2.0"
] | 109
|
2020-09-23T16:22:04.000Z
|
2022-03-28T21:18:29.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1beta1.types import (
accelerator_type as gca_accelerator_type,
)
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1",
manifest={
"MachineSpec",
"DedicatedResources",
"AutomaticResources",
"BatchDedicatedResources",
"ResourcesConsumed",
"DiskSpec",
"AutoscalingMetricSpec",
},
)
class MachineSpec(proto.Message):
r"""Specification of a single machine.
Attributes:
machine_type (str):
Immutable. The type of the machine.
See the `list of machine types supported for
prediction <https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types>`__
See the `list of machine types supported for custom
training <https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types>`__.
For
[DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel]
this field is optional, and the default value is
``n1-standard-2``. For
[BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]
or as part of
[WorkerPoolSpec][google.cloud.aiplatform.v1beta1.WorkerPoolSpec]
this field is required.
accelerator_type (google.cloud.aiplatform_v1beta1.types.AcceleratorType):
Immutable. The type of accelerator(s) that may be attached
to the machine as per
[accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count].
accelerator_count (int):
The number of accelerators to attach to the
machine.
"""
machine_type = proto.Field(proto.STRING, number=1,)
accelerator_type = proto.Field(
proto.ENUM, number=2, enum=gca_accelerator_type.AcceleratorType,
)
accelerator_count = proto.Field(proto.INT32, number=3,)
class DedicatedResources(proto.Message):
r"""A description of resources that are dedicated to a
DeployedModel, and that need a higher degree of manual
configuration.
Attributes:
machine_spec (google.cloud.aiplatform_v1beta1.types.MachineSpec):
Required. Immutable. The specification of a
single machine used by the prediction.
min_replica_count (int):
Required. Immutable. The minimum number of
machine replicas this DeployedModel will be
always deployed on. This value must be greater
than or equal to 1.
If traffic against the DeployedModel increases,
it may dynamically be deployed onto more
replicas, and as traffic decreases, some of
these extra replicas may be freed.
max_replica_count (int):
Immutable. The maximum number of replicas this DeployedModel
may be deployed on when the traffic against it increases. If
the requested value is too large, the deployment will error,
but if deployment succeeds then the ability to scale the
model to that many replicas is guaranteed (barring service
outages). If traffic against the DeployedModel increases
beyond what its replicas at maximum may handle, a portion of
the traffic will be dropped. If this value is not provided,
will use
[min_replica_count][google.cloud.aiplatform.v1beta1.DedicatedResources.min_replica_count]
as the default value.
autoscaling_metric_specs (Sequence[google.cloud.aiplatform_v1beta1.types.AutoscalingMetricSpec]):
Immutable. The metric specifications that overrides a
resource utilization metric (CPU utilization, accelerator's
duty cycle, and so on) target value (default to 60 if not
set). At most one entry is allowed per metric.
If
[machine_spec.accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count]
is above 0, the autoscaling will be based on both CPU
utilization and accelerator's duty cycle metrics and scale
up when either metrics exceeds its target value while scale
down if both metrics are under their target value. The
default target value is 60 for both metrics.
If
[machine_spec.accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count]
is 0, the autoscaling will be based on CPU utilization
metric only with default target value 60 if not explicitly
set.
For example, in the case of Online Prediction, if you want
to override target CPU utilization to 80, you should set
[autoscaling_metric_specs.metric_name][google.cloud.aiplatform.v1beta1.AutoscalingMetricSpec.metric_name]
to
``aiplatform.googleapis.com/prediction/online/cpu/utilization``
and
[autoscaling_metric_specs.target][google.cloud.aiplatform.v1beta1.AutoscalingMetricSpec.target]
to ``80``.
"""
machine_spec = proto.Field(proto.MESSAGE, number=1, message="MachineSpec",)
min_replica_count = proto.Field(proto.INT32, number=2,)
max_replica_count = proto.Field(proto.INT32, number=3,)
autoscaling_metric_specs = proto.RepeatedField(
proto.MESSAGE, number=4, message="AutoscalingMetricSpec",
)
class AutomaticResources(proto.Message):
r"""A description of resources that to large degree are decided
by Vertex AI, and require only a modest additional
configuration. Each Model supporting these resources documents
its specific guidelines.
Attributes:
min_replica_count (int):
Immutable. The minimum number of replicas this DeployedModel
will be always deployed on. If traffic against it increases,
it may dynamically be deployed onto more replicas up to
[max_replica_count][google.cloud.aiplatform.v1beta1.AutomaticResources.max_replica_count],
and as traffic decreases, some of these extra replicas may
be freed. If the requested value is too large, the
deployment will error.
max_replica_count (int):
Immutable. The maximum number of replicas
this DeployedModel may be deployed on when the
traffic against it increases. If the requested
value is too large, the deployment will error,
but if deployment succeeds then the ability to
scale the model to that many replicas is
guaranteed (barring service outages). If traffic
against the DeployedModel increases beyond what
its replicas at maximum may handle, a portion of
the traffic will be dropped. If this value is
not provided, a no upper bound for scaling under
heavy traffic will be assume, though Vertex AI
may be unable to scale beyond certain replica
number.
"""
min_replica_count = proto.Field(proto.INT32, number=1,)
max_replica_count = proto.Field(proto.INT32, number=2,)
class BatchDedicatedResources(proto.Message):
r"""A description of resources that are used for performing batch
operations, are dedicated to a Model, and need manual
configuration.
Attributes:
machine_spec (google.cloud.aiplatform_v1beta1.types.MachineSpec):
Required. Immutable. The specification of a
single machine.
starting_replica_count (int):
Immutable. The number of machine replicas used at the start
of the batch operation. If not set, Vertex AI decides
starting number, not greater than
[max_replica_count][google.cloud.aiplatform.v1beta1.BatchDedicatedResources.max_replica_count]
max_replica_count (int):
Immutable. The maximum number of machine
replicas the batch operation may be scaled to.
The default value is 10.
"""
machine_spec = proto.Field(proto.MESSAGE, number=1, message="MachineSpec",)
starting_replica_count = proto.Field(proto.INT32, number=2,)
max_replica_count = proto.Field(proto.INT32, number=3,)
class ResourcesConsumed(proto.Message):
r"""Statistics information about resource consumption.
Attributes:
replica_hours (float):
Output only. The number of replica hours
used. Note that many replicas may run in
parallel, and additionally any given work may be
queued for some time. Therefore this value is
not strictly related to wall time.
"""
replica_hours = proto.Field(proto.DOUBLE, number=1,)
class DiskSpec(proto.Message):
r"""Represents the spec of disk options.
Attributes:
boot_disk_type (str):
Type of the boot disk (default is "pd-ssd").
Valid values: "pd-ssd" (Persistent Disk Solid
State Drive) or "pd-standard" (Persistent Disk
Hard Disk Drive).
boot_disk_size_gb (int):
Size in GB of the boot disk (default is
100GB).
"""
boot_disk_type = proto.Field(proto.STRING, number=1,)
boot_disk_size_gb = proto.Field(proto.INT32, number=2,)
class AutoscalingMetricSpec(proto.Message):
r"""The metric specification that defines the target resource
utilization (CPU utilization, accelerator's duty cycle, and so
on) for calculating the desired replica count.
Attributes:
metric_name (str):
Required. The resource metric name. Supported metrics:
- For Online Prediction:
- ``aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle``
- ``aiplatform.googleapis.com/prediction/online/cpu/utilization``
target (int):
The target resource utilization in percentage
(1% - 100%) for the given metric; once the real
usage deviates from the target by a certain
percentage, the machine replicas change. The
default value is 60 (representing 60%) if not
provided.
"""
metric_name = proto.Field(proto.STRING, number=1,)
target = proto.Field(proto.INT32, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
| 42.164122
| 117
| 0.673214
|
4a12ab7988633ca171a82f4d01b00d40a3d40824
| 83,356
|
py
|
Python
|
gnocchi/tests/test_rest.py
|
lamby/gnocchi
|
87928a7c92d46b31bf0e8333064a4d0b83e6131b
|
[
"Apache-2.0"
] | null | null | null |
gnocchi/tests/test_rest.py
|
lamby/gnocchi
|
87928a7c92d46b31bf0e8333064a4d0b83e6131b
|
[
"Apache-2.0"
] | null | null | null |
gnocchi/tests/test_rest.py
|
lamby/gnocchi
|
87928a7c92d46b31bf0e8333064a4d0b83e6131b
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
#
# Copyright © 2016 Red Hat, Inc.
# Copyright © 2014-2015 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import calendar
import contextlib
import datetime
from email import utils as email_utils
import hashlib
import json
import uuid
import fixtures
import iso8601
from keystonemiddleware import fixture as ksm_fixture
import mock
import pbr.version
import six
import testscenarios
from testtools import testcase
import webtest
from gnocchi import archive_policy
from gnocchi.rest import api
from gnocchi.rest import app
from gnocchi.tests import base as tests_base
from gnocchi.tests import utils as tests_utils
from gnocchi import utils
load_tests = testscenarios.load_tests_apply_scenarios
class TestingApp(webtest.TestApp):
VALID_TOKEN_ADMIN = str(uuid.uuid4())
USER_ID_ADMIN = str(uuid.uuid4())
PROJECT_ID_ADMIN = str(uuid.uuid4())
VALID_TOKEN = str(uuid.uuid4())
USER_ID = str(uuid.uuid4())
PROJECT_ID = str(uuid.uuid4())
VALID_TOKEN_2 = str(uuid.uuid4())
USER_ID_2 = str(uuid.uuid4())
PROJECT_ID_2 = str(uuid.uuid4())
INVALID_TOKEN = str(uuid.uuid4())
def __init__(self, *args, **kwargs):
self.auth_mode = kwargs.pop('auth_mode')
self.chef = kwargs.pop('chef')
super(TestingApp, self).__init__(*args, **kwargs)
# Setup Keystone auth_token fake cache
self.token = self.VALID_TOKEN
# Setup default user for basic auth
self.user = self.USER_ID.encode('ascii')
@contextlib.contextmanager
def use_admin_user(self):
if self.auth_mode == "keystone":
old_token = self.token
self.token = self.VALID_TOKEN_ADMIN
try:
yield
finally:
self.token = old_token
elif self.auth_mode == "basic":
old_user = self.user
self.user = b"admin"
try:
yield
finally:
self.user = old_user
elif self.auth_mode == "remoteuser":
old_user = self.user
self.user = b"admin"
try:
yield
finally:
self.user = old_user
else:
raise RuntimeError("Unknown auth_mode")
@contextlib.contextmanager
def use_another_user(self):
if self.auth_mode != "keystone":
raise testcase.TestSkipped("Auth mode is not Keystone")
old_token = self.token
self.token = self.VALID_TOKEN_2
try:
yield
finally:
self.token = old_token
@contextlib.contextmanager
def use_invalid_token(self):
if self.auth_mode != "keystone":
raise testcase.TestSkipped("Auth mode is not Keystone")
old_token = self.token
self.token = self.INVALID_TOKEN
try:
yield
finally:
self.token = old_token
def do_request(self, req, *args, **kwargs):
if self.auth_mode in "keystone":
if self.token is not None:
req.headers['X-Auth-Token'] = self.token
elif self.auth_mode == "basic":
req.headers['Authorization'] = (
b"basic " + base64.b64encode(self.user + b":")
)
elif self.auth_mode == "remoteuser":
req.remote_user = self.user
response = super(TestingApp, self).do_request(req, *args, **kwargs)
metrics = tests_utils.list_all_incoming_metrics(self.chef.incoming)
self.chef.process_new_measures(metrics, sync=True)
return response
class RestTest(tests_base.TestCase, testscenarios.TestWithScenarios):
scenarios = [
('basic', dict(auth_mode="basic")),
('keystone', dict(auth_mode="keystone")),
('remoteuser', dict(auth_mode="remoteuser")),
]
def setUp(self):
super(RestTest, self).setUp()
if self.auth_mode == "keystone":
self.auth_token_fixture = self.useFixture(
ksm_fixture.AuthTokenFixture())
self.auth_token_fixture.add_token_data(
is_v2=True,
token_id=TestingApp.VALID_TOKEN_ADMIN,
user_id=TestingApp.USER_ID_ADMIN,
user_name='adminusername',
project_id=TestingApp.PROJECT_ID_ADMIN,
role_list=['admin'])
self.auth_token_fixture.add_token_data(
is_v2=True,
token_id=TestingApp.VALID_TOKEN,
user_id=TestingApp.USER_ID,
user_name='myusername',
project_id=TestingApp.PROJECT_ID,
role_list=["member"])
self.auth_token_fixture.add_token_data(
is_v2=True,
token_id=TestingApp.VALID_TOKEN_2,
user_id=TestingApp.USER_ID_2,
user_name='myusername2',
project_id=TestingApp.PROJECT_ID_2,
role_list=["member"])
self.conf.set_override("auth_mode", self.auth_mode, group="api")
self.useFixture(fixtures.MockPatchObject(
app.GnocchiHook, "_lazy_load", self._fake_lazy_load))
self.app = TestingApp(app.load_app(conf=self.conf,
not_implemented_middleware=False),
chef=self.chef,
auth_mode=self.auth_mode)
def _fake_lazy_load(self, name):
if name == "storage":
return self.storage
elif name == "indexer":
return self.index
elif name == "incoming":
return self.incoming
elif name == "coordinator":
return self.coord
else:
raise RuntimeError("Invalid driver type: %s" % name)
# NOTE(jd) Used at least by docs
@staticmethod
def runTest():
pass
class RootTest(RestTest):
def test_deserialize_force_json(self):
with self.app.use_admin_user():
self.app.post(
"/v1/archive_policy",
params="foo",
status=415)
def test_capabilities(self):
aggregation_methods = set(
archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS)
result = self.app.get("/v1/capabilities").json
self.assertEqual(
sorted(aggregation_methods),
sorted(result['aggregation_methods']))
def test_version(self):
with self.app.use_admin_user():
r = self.app.get("/")
self.assertEqual(
json.loads(r.text)['build'],
pbr.version.VersionInfo('gnocchi').version_string())
def test_status(self):
with self.app.use_admin_user():
r = self.app.get("/v1/status")
status = json.loads(r.text)
self.assertIsInstance(status['storage']['measures_to_process'], dict)
self.assertIsInstance(status['storage']['summary']['metrics'], int)
self.assertIsInstance(status['storage']['summary']['measures'], int)
class ArchivePolicyTest(RestTest):
"""Test the ArchivePolicies REST API.
See also gnocchi/tests/gabbi/gabbits/archive.yaml
"""
# TODO(chdent): The tests left here involve inspecting the
# aggregation methods which gabbi can't currently handle because
# the ordering of the results is not predictable.
def test_post_archive_policy_with_agg_methods(self):
name = str(uuid.uuid4())
with self.app.use_admin_user():
result = self.app.post_json(
"/v1/archive_policy",
params={"name": name,
"aggregation_methods": ["mean"],
"definition":
[{
"granularity": "1 minute",
"points": 20,
}]},
status=201)
self.assertEqual("application/json", result.content_type)
ap = json.loads(result.text)
self.assertEqual(['mean'], ap['aggregation_methods'])
def test_post_archive_policy_with_agg_methods_minus(self):
name = str(uuid.uuid4())
with self.app.use_admin_user():
result = self.app.post_json(
"/v1/archive_policy",
params={"name": name,
"aggregation_methods": ["-mean"],
"definition":
[{
"granularity": "1 minute",
"points": 20,
}]},
status=201)
self.assertEqual("application/json", result.content_type)
ap = json.loads(result.text)
self.assertEqual(
(set(self.conf.archive_policy.default_aggregation_methods)
- set(['mean'])),
set(ap['aggregation_methods']))
def test_get_archive_policy(self):
result = self.app.get("/v1/archive_policy/medium")
ap = json.loads(result.text)
ap_dict = self.archive_policies['medium'].jsonify()
ap_dict['definition'] = [
archive_policy.ArchivePolicyItem(**d).jsonify()
for d in ap_dict['definition']
]
self.assertEqual(set(ap['aggregation_methods']),
ap_dict['aggregation_methods'])
del ap['aggregation_methods']
del ap_dict['aggregation_methods']
self.assertEqual(ap_dict, ap)
def test_list_archive_policy(self):
result = self.app.get("/v1/archive_policy")
aps = json.loads(result.text)
# Transform list to set
for ap in aps:
ap['aggregation_methods'] = set(ap['aggregation_methods'])
for name, ap in six.iteritems(self.archive_policies):
apj = ap.jsonify()
apj['definition'] = [
archive_policy.ArchivePolicyItem(**d).jsonify()
for d in ap.definition
]
self.assertIn(apj, aps)
class MetricTest(RestTest):
def test_get_metric_with_another_user_linked_resource(self):
result = self.app.post_json(
"/v1/resource/generic",
params={
"id": str(uuid.uuid4()),
"started_at": "2014-01-01 02:02:02",
"user_id": TestingApp.USER_ID_2,
"project_id": TestingApp.PROJECT_ID_2,
"metrics": {"foobar": {"archive_policy_name": "low"}},
})
resource = json.loads(result.text)
metric_id = resource["metrics"]["foobar"]
with self.app.use_another_user():
self.app.get("/v1/metric/%s" % metric_id)
def test_list_metric_with_another_user(self):
metric_created = self.app.post_json(
"/v1/metric",
params={"archive_policy_name": "medium"},
status=201)
metric_id = metric_created.json["id"]
with self.app.use_another_user():
metric_list = self.app.get("/v1/metric")
self.assertNotIn(metric_id, [m["id"] for m in metric_list.json])
def test_list_metric_with_another_user_allowed(self):
rid = str(uuid.uuid4())
r = self.app.post_json("/v1/resource/generic",
params={
"id": rid,
"project_id": TestingApp.PROJECT_ID_2,
"metrics": {
"disk": {"archive_policy_name": "low"},
}
})
metric_id = r.json['metrics']['disk']
with self.app.use_another_user():
metric_list = self.app.get("/v1/metric")
self.assertIn(metric_id, [m["id"] for m in metric_list.json])
def test_get_metric_with_another_user(self):
result = self.app.post_json("/v1/metric",
params={"archive_policy_name": "medium"},
status=201)
self.assertEqual("application/json", result.content_type)
with self.app.use_another_user():
self.app.get(result.headers['Location'], status=403)
def test_post_archive_policy_no_mean(self):
"""Test that we have a 404 if mean is not in AP."""
ap = str(uuid.uuid4())
with self.app.use_admin_user():
self.app.post_json(
"/v1/archive_policy",
params={"name": ap,
"aggregation_methods": ["max"],
"definition": [{
"granularity": "10s",
"points": 20,
}]},
status=201)
result = self.app.post_json(
"/v1/metric",
params={"archive_policy_name": ap},
status=201)
metric = json.loads(result.text)
self.app.post_json("/v1/metric/%s/measures" % metric['id'],
params=[{"timestamp": '2013-01-01 12:00:01',
"value": 8},
{"timestamp": '2013-01-01 12:00:02',
"value": 16}])
self.app.get("/v1/metric/%s/measures" % metric['id'],
status=404)
def test_delete_metric_another_user(self):
result = self.app.post_json("/v1/metric",
params={"archive_policy_name": "medium"})
metric = json.loads(result.text)
with self.app.use_another_user():
self.app.delete("/v1/metric/" + metric['id'], status=403)
def test_add_measure_with_another_user(self):
result = self.app.post_json("/v1/metric",
params={"archive_policy_name": "high"})
metric = json.loads(result.text)
with self.app.use_another_user():
self.app.post_json(
"/v1/metric/%s/measures" % metric['id'],
params=[{"timestamp": '2013-01-01 23:23:23',
"value": 1234.2}],
status=403)
def test_add_measures_back_window(self):
ap_name = str(uuid.uuid4())
with self.app.use_admin_user():
self.app.post_json(
"/v1/archive_policy",
params={"name": ap_name,
"back_window": 2,
"definition":
[{
"granularity": "1 minute",
"points": 20,
}]},
status=201)
result = self.app.post_json("/v1/metric",
params={"archive_policy_name": ap_name})
metric = json.loads(result.text)
self.app.post_json(
"/v1/metric/%s/measures" % metric['id'],
params=[{"timestamp": '2013-01-01 23:30:23',
"value": 1234.2}],
status=202)
self.app.post_json(
"/v1/metric/%s/measures" % metric['id'],
params=[{"timestamp": '2013-01-01 23:29:23',
"value": 1234.2}],
status=202)
self.app.post_json(
"/v1/metric/%s/measures" % metric['id'],
params=[{"timestamp": '2013-01-01 23:28:23',
"value": 1234.2}],
status=202)
# This one is too old and should not be taken into account
self.app.post_json(
"/v1/metric/%s/measures" % metric['id'],
params=[{"timestamp": '2012-01-01 23:27:23',
"value": 1234.2}],
status=202)
ret = self.app.get("/v1/metric/%s/measures" % metric['id'])
result = json.loads(ret.text)
self.assertEqual(
[[u'2013-01-01T23:28:00+00:00', 60.0, 1234.2],
[u'2013-01-01T23:29:00+00:00', 60.0, 1234.2],
[u'2013-01-01T23:30:00+00:00', 60.0, 1234.2]],
result)
def test_get_measure_with_another_user(self):
result = self.app.post_json("/v1/metric",
params={"archive_policy_name": "low"})
metric = json.loads(result.text)
self.app.post_json("/v1/metric/%s/measures" % metric['id'],
params=[{"timestamp": '2013-01-01 23:23:23',
"value": 1234.2}])
with self.app.use_another_user():
self.app.get("/v1/metric/%s/measures" % metric['id'],
status=403)
def test_get_measures_with_another_user_allowed(self):
rid = str(uuid.uuid4())
result = self.app.post_json(
"/v1/resource/generic",
params={
"id": rid,
"project_id": TestingApp.PROJECT_ID_2,
"metrics": {
"disk": {"archive_policy_name": "low"},
}
})
metric_id = result.json['metrics']['disk']
measures_url = "/v1/resource/generic/%s/metric/disk/measures" % rid
self.app.post_json(measures_url,
params=[{"timestamp": '2013-01-01 23:23:23',
"value": 1234.2}])
with self.app.use_another_user():
result = self.app.get(measures_url)
self.assertEqual(
[['2013-01-01T00:00:00+00:00', 86400.0, 1234.2],
['2013-01-01T23:00:00+00:00', 3600.0, 1234.2],
['2013-01-01T23:20:00+00:00', 300.0, 1234.2]],
result.json)
result = self.app.get("/v1/metric/%s/measures" % metric_id)
self.assertEqual(
[['2013-01-01T00:00:00+00:00', 86400.0, 1234.2],
['2013-01-01T23:00:00+00:00', 3600.0, 1234.2],
['2013-01-01T23:20:00+00:00', 300.0, 1234.2]],
result.json)
def test_get_measures_with_another_user_disallowed(self):
rid = str(uuid.uuid4())
result = self.app.post_json(
"/v1/resource/generic",
params={
"id": rid,
"metrics": {
"disk": {"archive_policy_name": "low"},
}
})
metric_id = result.json['metrics']['disk']
measures_url = "/v1/resource/generic/%s/metric/disk/measures" % rid
self.app.post_json(measures_url,
params=[{"timestamp": '2013-01-01 23:23:23',
"value": 1234.2}])
with self.app.use_another_user():
self.app.get(measures_url, status=403)
self.app.get("/v1/metric/%s/measures" % metric_id, status=403)
@mock.patch.object(utils, 'utcnow')
def test_get_measure_start_relative(self, utcnow):
"""Make sure the timestamps can be relative to now."""
utcnow.return_value = datetime.datetime(2014, 1, 1, 10, 23)
result = self.app.post_json("/v1/metric",
params={"archive_policy_name": "high"})
metric = json.loads(result.text)
self.app.post_json("/v1/metric/%s/measures" % metric['id'],
params=[{"timestamp": utils.utcnow().isoformat(),
"value": 1234.2}])
ret = self.app.get(
"/v1/metric/%s/measures?start=-10 minutes"
% metric['id'],
status=200)
result = json.loads(ret.text)
now = utils.datetime_utc(2014, 1, 1, 10, 23)
self.assertEqual([
['2014-01-01T10:00:00+00:00', 3600.0, 1234.2],
[(now
- datetime.timedelta(
seconds=now.second,
microseconds=now.microsecond)).isoformat(),
60.0, 1234.2],
[(now
- datetime.timedelta(
microseconds=now.microsecond)).isoformat(),
1.0, 1234.2]], result)
def test_get_measure_stop(self):
result = self.app.post_json("/v1/metric",
params={"archive_policy_name": "high"})
metric = json.loads(result.text)
self.app.post_json("/v1/metric/%s/measures" % metric['id'],
params=[{"timestamp": '2013-01-01 12:00:00',
"value": 1234.2},
{"timestamp": '2013-01-01 12:00:02',
"value": 456}])
ret = self.app.get("/v1/metric/%s/measures"
"?stop=2013-01-01 12:00:01" % metric['id'],
status=200)
result = json.loads(ret.text)
self.assertEqual(
[[u'2013-01-01T12:00:00+00:00', 3600.0, 845.1],
[u'2013-01-01T12:00:00+00:00', 60.0, 845.1],
[u'2013-01-01T12:00:00+00:00', 1.0, 1234.2]],
result)
def test_get_measure_aggregation(self):
result = self.app.post_json("/v1/metric",
params={"archive_policy_name": "medium"})
metric = json.loads(result.text)
self.app.post_json("/v1/metric/%s/measures" % metric['id'],
params=[{"timestamp": '2013-01-01 12:00:01',
"value": 123.2},
{"timestamp": '2013-01-01 12:00:03',
"value": 12345.2},
{"timestamp": '2013-01-01 12:00:02',
"value": 1234.2}])
ret = self.app.get(
"/v1/metric/%s/measures?aggregation=max" % metric['id'],
status=200)
result = json.loads(ret.text)
self.assertEqual([[u'2013-01-01T00:00:00+00:00', 86400.0, 12345.2],
[u'2013-01-01T12:00:00+00:00', 3600.0, 12345.2],
[u'2013-01-01T12:00:00+00:00', 60.0, 12345.2]],
result)
def test_get_resource_missing_named_metric_measure_aggregation(self):
mgr = self.index.get_resource_type_schema()
resource_type = str(uuid.uuid4())
self.index.create_resource_type(
mgr.resource_type_from_dict(resource_type, {
"server_group": {"type": "string",
"min_length": 1,
"max_length": 40,
"required": True}
}, 'creating'))
attributes = {
"server_group": str(uuid.uuid4()),
}
result = self.app.post_json("/v1/metric",
params={"archive_policy_name": "medium"})
metric1 = json.loads(result.text)
self.app.post_json("/v1/metric/%s/measures" % metric1['id'],
params=[{"timestamp": '2013-01-01 12:00:01',
"value": 8},
{"timestamp": '2013-01-01 12:00:02',
"value": 16}])
result = self.app.post_json("/v1/metric",
params={"archive_policy_name": "medium"})
metric2 = json.loads(result.text)
self.app.post_json("/v1/metric/%s/measures" % metric2['id'],
params=[{"timestamp": '2013-01-01 12:00:01',
"value": 0},
{"timestamp": '2013-01-01 12:00:02',
"value": 4}])
attributes['id'] = str(uuid.uuid4())
attributes['metrics'] = {'foo': metric1['id']}
self.app.post_json("/v1/resource/" + resource_type,
params=attributes)
attributes['id'] = str(uuid.uuid4())
attributes['metrics'] = {'bar': metric2['id']}
self.app.post_json("/v1/resource/" + resource_type,
params=attributes)
result = self.app.post_json(
"/v1/aggregation/resource/%s/metric/foo?aggregation=max"
% resource_type,
params={"=": {"server_group": attributes['server_group']}})
measures = json.loads(result.text)
self.assertEqual([[u'2013-01-01T00:00:00+00:00', 86400.0, 16.0],
[u'2013-01-01T12:00:00+00:00', 3600.0, 16.0],
[u'2013-01-01T12:00:00+00:00', 60.0, 16.0]],
measures)
def test_search_value(self):
result = self.app.post_json("/v1/metric",
params={"archive_policy_name": "high"})
metric = json.loads(result.text)
self.app.post_json("/v1/metric/%s/measures" % metric['id'],
params=[{"timestamp": '2013-01-01 12:00:00',
"value": 1234.2},
{"timestamp": '2013-01-01 12:00:02',
"value": 456}])
metric1 = metric['id']
result = self.app.post_json("/v1/metric",
params={"archive_policy_name": "high"})
metric = json.loads(result.text)
self.app.post_json("/v1/metric/%s/measures" % metric['id'],
params=[{"timestamp": '2013-01-01 12:30:00',
"value": 1234.2},
{"timestamp": '2013-01-01 12:00:02',
"value": 456}])
metric2 = metric['id']
ret = self.app.post_json(
"/v1/search/metric?metric_id=%s&metric_id=%s"
"&stop=2013-01-01 12:10:00" % (metric1, metric2),
params={u"∧": [{u"≥": 1000}]},
status=200)
result = json.loads(ret.text)
self.assertEqual(
{metric1: [[u'2013-01-01T12:00:00+00:00', 1.0, 1234.2]],
metric2: []},
result)
class ResourceTest(RestTest):
def setUp(self):
super(ResourceTest, self).setUp()
self.attributes = {
"id": str(uuid.uuid4()),
"started_at": "2014-01-03T02:02:02+00:00",
"user_id": str(uuid.uuid4()),
"project_id": str(uuid.uuid4()),
"name": "my-name",
}
self.patchable_attributes = {
"ended_at": "2014-01-03T02:02:02+00:00",
"name": "new-name",
}
self.resource = self.attributes.copy()
# Set original_resource_id
self.resource['original_resource_id'] = self.resource['id']
self.resource['created_by_user_id'] = TestingApp.USER_ID
if self.auth_mode == "keystone":
self.resource['created_by_project_id'] = TestingApp.PROJECT_ID
self.resource['creator'] = (
TestingApp.USER_ID + ":" + TestingApp.PROJECT_ID
)
elif self.auth_mode in ["basic", "remoteuser"]:
self.resource['created_by_project_id'] = ""
self.resource['creator'] = TestingApp.USER_ID
self.resource['ended_at'] = None
self.resource['metrics'] = {}
if 'user_id' not in self.resource:
self.resource['user_id'] = None
if 'project_id' not in self.resource:
self.resource['project_id'] = None
mgr = self.index.get_resource_type_schema()
self.resource_type = str(uuid.uuid4())
self.index.create_resource_type(
mgr.resource_type_from_dict(self.resource_type, {
"name": {"type": "string",
"min_length": 1,
"max_length": 40,
"required": True}
}, "creating"))
self.resource['type'] = self.resource_type
@mock.patch.object(utils, 'utcnow')
def test_post_resource(self, utcnow):
utcnow.return_value = utils.datetime_utc(2014, 1, 1, 10, 23)
result = self.app.post_json(
"/v1/resource/" + self.resource_type,
params=self.attributes,
status=201)
resource = json.loads(result.text)
self.assertEqual("http://localhost/v1/resource/"
+ self.resource_type + "/" + self.attributes['id'],
result.headers['Location'])
self.assertIsNone(resource['revision_end'])
self.assertEqual(resource['revision_start'],
"2014-01-01T10:23:00+00:00")
self._check_etag(result, resource)
del resource['revision_start']
del resource['revision_end']
self.assertEqual(self.resource, resource)
def test_post_resource_with_invalid_metric(self):
metric_id = str(uuid.uuid4())
self.attributes['metrics'] = {"foo": metric_id}
result = self.app.post_json(
"/v1/resource/" + self.resource_type,
params=self.attributes,
status=400)
self.assertIn("Metric %s does not exist" % metric_id,
result.text)
def test_post_resource_with_metric_from_other_user(self):
with self.app.use_another_user():
metric = self.app.post_json(
"/v1/metric",
params={'archive_policy_name': "high"})
metric_id = json.loads(metric.text)['id']
self.attributes['metrics'] = {"foo": metric_id}
result = self.app.post_json(
"/v1/resource/" + self.resource_type,
params=self.attributes,
status=400)
self.assertIn("Metric %s does not exist" % metric_id,
result.text)
def test_post_resource_already_exist(self):
result = self.app.post_json(
"/v1/resource/" + self.resource_type,
params=self.attributes,
status=201)
result = self.app.post_json(
"/v1/resource/" + self.resource_type,
params=self.attributes,
status=409)
self.assertIn("Resource %s already exists" % self.attributes['id'],
result.text)
def test_post_invalid_timestamp(self):
self.attributes['started_at'] = "2014-01-01 02:02:02"
self.attributes['ended_at'] = "2013-01-01 02:02:02"
self.app.post_json(
"/v1/resource/" + self.resource_type,
params=self.attributes,
status=400)
@staticmethod
def _strtime_to_httpdate(dt):
return email_utils.formatdate(calendar.timegm(
iso8601.parse_date(dt).timetuple()), usegmt=True)
def _check_etag(self, response, resource):
lastmodified = self._strtime_to_httpdate(resource['revision_start'])
etag = hashlib.sha1()
etag.update(resource['id'].encode('utf-8'))
etag.update(resource['revision_start'].encode('utf8'))
self.assertEqual(response.headers['Last-Modified'], lastmodified)
self.assertEqual(response.headers['ETag'], '"%s"' % etag.hexdigest())
@mock.patch.object(utils, 'utcnow')
def test_get_resource(self, utcnow):
utcnow.return_value = utils.datetime_utc(2014, 1, 1, 10, 23)
result = self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes,
status=201)
result = self.app.get("/v1/resource/"
+ self.resource_type
+ "/"
+ self.attributes['id'])
resource = json.loads(result.text)
self.assertIsNone(resource['revision_end'])
self.assertEqual(resource['revision_start'],
"2014-01-01T10:23:00+00:00")
self._check_etag(result, resource)
del resource['revision_start']
del resource['revision_end']
self.assertEqual(self.resource, resource)
def test_get_resource_etag(self):
result = self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes,
status=201)
result = self.app.get("/v1/resource/"
+ self.resource_type
+ "/"
+ self.attributes['id'])
resource = json.loads(result.text)
etag = hashlib.sha1()
etag.update(resource['id'].encode('utf-8'))
etag.update(resource['revision_start'].encode('utf-8'))
etag = etag.hexdigest()
lastmodified = self._strtime_to_httpdate(resource['revision_start'])
oldlastmodified = self._strtime_to_httpdate("2000-01-01 00:00:00")
# if-match and if-unmodified-since
self.app.get("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'],
headers={'if-match': 'fake'},
status=412)
self.app.get("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'],
headers={'if-match': etag},
status=200)
self.app.get("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'],
headers={'if-unmodified-since': lastmodified},
status=200)
self.app.get("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'],
headers={'if-unmodified-since': oldlastmodified},
status=412)
# Some case with '*'
self.app.get("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'],
headers={'if-none-match': '*'},
status=304)
self.app.get("/v1/resource/" + self.resource_type
+ "/wrongid",
headers={'if-none-match': '*'},
status=404)
# always prefers if-match if both provided
self.app.get("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'],
headers={'if-match': etag,
'if-unmodified-since': lastmodified},
status=200)
self.app.get("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'],
headers={'if-match': etag,
'if-unmodified-since': oldlastmodified},
status=200)
self.app.get("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'],
headers={'if-match': '*',
'if-unmodified-since': oldlastmodified},
status=200)
# if-none-match and if-modified-since
self.app.get("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'],
headers={'if-none-match': etag},
status=304)
self.app.get("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'],
headers={'if-none-match': 'fake'},
status=200)
self.app.get("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'],
headers={'if-modified-since': lastmodified},
status=304)
self.app.get("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'],
headers={'if-modified-since': oldlastmodified},
status=200)
# always prefers if-none-match if both provided
self.app.get("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'],
headers={'if-modified-since': oldlastmodified,
'if-none-match': etag},
status=304)
self.app.get("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'],
headers={'if-modified-since': oldlastmodified,
'if-none-match': '*'},
status=304)
self.app.get("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'],
headers={'if-modified-since': lastmodified,
'if-none-match': '*'},
status=304)
# Some case with '*'
self.app.get("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'],
headers={'if-match': '*'},
status=200)
self.app.get("/v1/resource/" + self.resource_type
+ "/wrongid",
headers={'if-match': '*'},
status=404)
# if-none-match and if-match
self.app.get("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'],
headers={'if-none-match': etag,
'if-match': etag},
status=304)
# if-none-match returns 412 instead 304 for PUT/PATCH/DELETE
self.app.patch_json("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'],
headers={'if-none-match': '*'},
status=412)
self.app.delete("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'],
headers={'if-none-match': '*'},
status=412)
# if-modified-since is ignored with PATCH/PUT/DELETE
self.app.patch_json("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'],
params=self.patchable_attributes,
headers={'if-modified-since': lastmodified},
status=200)
self.app.delete("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'],
headers={'if-modified-since': lastmodified},
status=204)
def test_get_resource_non_admin(self):
with self.app.use_another_user():
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes,
status=201)
self.app.get("/v1/resource/"
+ self.resource_type
+ "/"
+ self.attributes['id'],
status=200)
def test_get_resource_unauthorized(self):
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes,
status=201)
with self.app.use_another_user():
self.app.get("/v1/resource/"
+ self.resource_type
+ "/"
+ self.attributes['id'],
status=403)
def test_get_resource_named_metric(self):
self.attributes['metrics'] = {'foo': {'archive_policy_name': "high"}}
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes)
self.app.get("/v1/resource/"
+ self.resource_type
+ "/"
+ self.attributes['id']
+ "/metric/foo/measures",
status=200)
def test_list_resource_metrics_unauthorized(self):
self.attributes['metrics'] = {'foo': {'archive_policy_name': "high"}}
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes)
with self.app.use_another_user():
self.app.get(
"/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'] + "/metric",
status=403)
def test_delete_resource_named_metric(self):
self.attributes['metrics'] = {'foo': {'archive_policy_name': "high"}}
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes)
self.app.delete("/v1/resource/"
+ self.resource_type
+ "/"
+ self.attributes['id']
+ "/metric/foo",
status=204)
self.app.delete("/v1/resource/"
+ self.resource_type
+ "/"
+ self.attributes['id']
+ "/metric/foo/measures",
status=404)
def test_get_resource_unknown_named_metric(self):
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes)
self.app.get("/v1/resource/"
+ self.resource_type
+ "/"
+ self.attributes['id']
+ "/metric/foo",
status=404)
def test_post_append_metrics_already_exists(self):
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes)
metrics = {'foo': {'archive_policy_name': "high"}}
self.app.post_json("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'] + "/metric",
params=metrics, status=200)
metrics = {'foo': {'archive_policy_name': "low"}}
self.app.post_json("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id']
+ "/metric",
params=metrics,
status=409)
result = self.app.get("/v1/resource/"
+ self.resource_type + "/"
+ self.attributes['id'])
result = json.loads(result.text)
self.assertTrue(uuid.UUID(result['metrics']['foo']))
def test_post_append_metrics(self):
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes)
metrics = {'foo': {'archive_policy_name': "high"}}
self.app.post_json("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'] + "/metric",
params=metrics, status=200)
result = self.app.get("/v1/resource/"
+ self.resource_type + "/"
+ self.attributes['id'])
result = json.loads(result.text)
self.assertTrue(uuid.UUID(result['metrics']['foo']))
def test_post_append_metrics_created_by_different_user(self):
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes)
with self.app.use_another_user():
metric = self.app.post_json(
"/v1/metric",
params={'archive_policy_name': "high"})
metric_id = json.loads(metric.text)['id']
result = self.app.post_json("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'] + "/metric",
params={str(uuid.uuid4()): metric_id},
status=400)
self.assertIn("Metric %s does not exist" % metric_id, result.text)
@mock.patch.object(utils, 'utcnow')
def test_patch_resource_metrics(self, utcnow):
utcnow.return_value = utils.datetime_utc(2014, 1, 1, 10, 23)
result = self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes,
status=201)
r = json.loads(result.text)
utcnow.return_value = utils.datetime_utc(2014, 1, 2, 6, 49)
new_metrics = {'foo': {'archive_policy_name': "medium"}}
self.app.patch_json(
"/v1/resource/" + self.resource_type + "/"
+ self.attributes['id'],
params={'metrics': new_metrics},
status=200)
result = self.app.get("/v1/resource/"
+ self.resource_type + "/"
+ self.attributes['id'])
result = json.loads(result.text)
self.assertTrue(uuid.UUID(result['metrics']['foo']))
self.assertIsNone(result['revision_end'])
self.assertIsNone(r['revision_end'])
self.assertEqual(result['revision_start'], "2014-01-01T10:23:00+00:00")
self.assertEqual(r['revision_start'], "2014-01-01T10:23:00+00:00")
del result['metrics']
del result['revision_start']
del result['revision_end']
del r['metrics']
del r['revision_start']
del r['revision_end']
self.assertEqual(r, result)
def test_patch_resource_existent_metrics_from_another_user(self):
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes)
with self.app.use_another_user():
result = self.app.post_json(
"/v1/metric",
params={'archive_policy_name': "medium"})
metric_id = json.loads(result.text)['id']
result = self.app.patch_json(
"/v1/resource/"
+ self.resource_type
+ "/"
+ self.attributes['id'],
params={'metrics': {'foo': metric_id}},
status=400)
self.assertIn("Metric %s does not exist" % metric_id, result.text)
result = self.app.get("/v1/resource/"
+ self.resource_type
+ "/"
+ self.attributes['id'])
result = json.loads(result.text)
self.assertEqual({}, result['metrics'])
def test_patch_resource_non_existent_metrics(self):
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes,
status=201)
e1 = str(uuid.uuid4())
result = self.app.patch_json(
"/v1/resource/"
+ self.resource_type
+ "/"
+ self.attributes['id'],
params={'metrics': {'foo': e1}},
status=400)
self.assertIn("Metric %s does not exist" % e1, result.text)
result = self.app.get("/v1/resource/"
+ self.resource_type
+ "/"
+ self.attributes['id'])
result = json.loads(result.text)
self.assertEqual({}, result['metrics'])
@mock.patch.object(utils, 'utcnow')
def test_patch_resource_attributes(self, utcnow):
utcnow.return_value = utils.datetime_utc(2014, 1, 1, 10, 23)
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes,
status=201)
utcnow.return_value = utils.datetime_utc(2014, 1, 2, 6, 48)
presponse = self.app.patch_json(
"/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'],
params=self.patchable_attributes,
status=200)
response = self.app.get("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'])
result = json.loads(response.text)
presult = json.loads(presponse.text)
self.assertEqual(result, presult)
for k, v in six.iteritems(self.patchable_attributes):
self.assertEqual(v, result[k])
self.assertIsNone(result['revision_end'])
self.assertEqual(result['revision_start'],
"2014-01-02T06:48:00+00:00")
self._check_etag(response, result)
# Check the history
history = self.app.post_json(
"/v1/search/resource/" + self.resource_type,
headers={"Accept": "application/json; history=true"},
params={"=": {"id": result['id']}},
status=200)
history = json.loads(history.text)
self.assertGreaterEqual(len(history), 2)
self.assertEqual(result, history[1])
h = history[0]
for k, v in six.iteritems(self.attributes):
self.assertEqual(v, h[k])
self.assertEqual(h['revision_end'],
"2014-01-02T06:48:00+00:00")
self.assertEqual(h['revision_start'],
"2014-01-01T10:23:00+00:00")
def test_patch_resource_attributes_unauthorized(self):
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes,
status=201)
with self.app.use_another_user():
self.app.patch_json(
"/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'],
params=self.patchable_attributes,
status=403)
def test_patch_resource_ended_at_before_started_at(self):
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes,
status=201)
self.app.patch_json(
"/v1/resource/"
+ self.resource_type
+ "/"
+ self.attributes['id'],
params={'ended_at': "2000-05-05 23:23:23"},
status=400)
def test_patch_resource_no_partial_update(self):
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes,
status=201)
e1 = str(uuid.uuid4())
result = self.app.patch_json(
"/v1/resource/" + self.resource_type + "/"
+ self.attributes['id'],
params={'ended_at': "2044-05-05 23:23:23",
'metrics': {"foo": e1}},
status=400)
self.assertIn("Metric %s does not exist" % e1, result.text)
result = self.app.get("/v1/resource/"
+ self.resource_type + "/"
+ self.attributes['id'])
result = json.loads(result.text)
del result['revision_start']
del result['revision_end']
self.assertEqual(self.resource, result)
def test_patch_resource_non_existent(self):
self.app.patch_json(
"/v1/resource/" + self.resource_type
+ "/" + str(uuid.uuid4()),
params={},
status=404)
def test_patch_resource_non_existent_with_body(self):
self.app.patch_json(
"/v1/resource/" + self.resource_type
+ "/" + str(uuid.uuid4()),
params=self.patchable_attributes,
status=404)
def test_patch_resource_unknown_field(self):
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes)
result = self.app.patch_json(
"/v1/resource/" + self.resource_type + "/"
+ self.attributes['id'],
params={'foobar': 123},
status=400)
self.assertIn(b'Invalid input: extra keys not allowed @ data['
+ repr(u'foobar').encode('ascii') + b"]",
result.body)
def test_delete_resource(self):
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes)
self.app.get("/v1/resource/" + self.resource_type + "/"
+ self.attributes['id'],
status=200)
self.app.delete("/v1/resource/" + self.resource_type + "/"
+ self.attributes['id'],
status=204)
self.app.get("/v1/resource/" + self.resource_type + "/"
+ self.attributes['id'],
status=404)
def test_delete_resource_with_metrics(self):
metric = self.app.post_json(
"/v1/metric",
params={'archive_policy_name': "high"})
metric_id = json.loads(metric.text)['id']
metric_name = six.text_type(uuid.uuid4())
self.attributes['metrics'] = {metric_name: metric_id}
self.app.get("/v1/metric/" + metric_id,
status=200)
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes)
self.app.get("/v1/resource/" + self.resource_type + "/"
+ self.attributes['id'],
status=200)
self.app.delete("/v1/resource/" + self.resource_type + "/"
+ self.attributes['id'],
status=204)
self.app.get("/v1/resource/" + self.resource_type + "/"
+ self.attributes['id'],
status=404)
self.app.get("/v1/metric/" + metric_id,
status=404)
def test_delete_resource_unauthorized(self):
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes)
with self.app.use_another_user():
self.app.delete("/v1/resource/" + self.resource_type + "/"
+ self.attributes['id'],
status=403)
def test_delete_resource_non_existent(self):
result = self.app.delete("/v1/resource/" + self.resource_type + "/"
+ self.attributes['id'],
status=404)
self.assertIn(
"Resource %s does not exist" % self.attributes['id'],
result.text)
def test_post_resource_with_metrics(self):
result = self.app.post_json("/v1/metric",
params={"archive_policy_name": "medium"})
metric = json.loads(result.text)
self.attributes['metrics'] = {"foo": metric['id']}
result = self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes,
status=201)
resource = json.loads(result.text)
self.assertEqual("http://localhost/v1/resource/"
+ self.resource_type + "/"
+ self.attributes['id'],
result.headers['Location'])
self.resource['metrics'] = self.attributes['metrics']
del resource['revision_start']
del resource['revision_end']
self.assertEqual(self.resource, resource)
def test_post_resource_with_null_metrics(self):
self.attributes['metrics'] = {"foo": {"archive_policy_name": "low"}}
result = self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes,
status=201)
resource = json.loads(result.text)
self.assertEqual("http://localhost/v1/resource/"
+ self.resource_type + "/"
+ self.attributes['id'],
result.headers['Location'])
self.assertEqual(self.attributes['id'], resource["id"])
metric_id = uuid.UUID(resource['metrics']['foo'])
result = self.app.get("/v1/metric/" + str(metric_id) + "/measures",
status=200)
def test_search_datetime(self):
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes,
status=201)
result = self.app.get("/v1/resource/" + self.resource_type
+ "/" + self.attributes['id'])
result = json.loads(result.text)
resources = self.app.post_json(
"/v1/search/resource/" + self.resource_type,
params={"and": [{"=": {"id": result['id']}},
{"=": {"ended_at": None}}]},
status=200)
resources = json.loads(resources.text)
self.assertGreaterEqual(len(resources), 1)
self.assertEqual(result, resources[0])
resources = self.app.post_json(
"/v1/search/resource/" + self.resource_type,
headers={"Accept": "application/json; history=true"},
params={"and": [
{"=": {"id": result['id']}},
{"or": [{">=": {"revision_end": '2014-01-03T02:02:02'}},
{"=": {"revision_end": None}}]}
]},
status=200)
resources = json.loads(resources.text)
self.assertGreaterEqual(len(resources), 1)
self.assertEqual(result, resources[0])
def test_search_resource_by_original_resource_id(self):
result = self.app.post_json(
"/v1/resource/" + self.resource_type,
params=self.attributes)
created_resource = json.loads(result.text)
original_id = created_resource['original_resource_id']
result = self.app.post_json(
"/v1/search/resource/" + self.resource_type,
params={"eq": {"original_resource_id": original_id}},
status=200)
resources = json.loads(result.text)
self.assertGreaterEqual(len(resources), 1)
self.assertEqual(created_resource, resources[0])
def test_search_resources_by_user(self):
u1 = str(uuid.uuid4())
self.attributes['user_id'] = u1
result = self.app.post_json(
"/v1/resource/" + self.resource_type,
params=self.attributes)
created_resource = json.loads(result.text)
result = self.app.post_json("/v1/search/resource/generic",
params={"eq": {"user_id": u1}},
status=200)
resources = json.loads(result.text)
self.assertGreaterEqual(len(resources), 1)
result = self.app.post_json(
"/v1/search/resource/" + self.resource_type,
params={"=": {"user_id": u1}},
status=200)
resources = json.loads(result.text)
self.assertGreaterEqual(len(resources), 1)
self.assertEqual(created_resource, resources[0])
def test_search_resources_with_another_project_id(self):
u1 = str(uuid.uuid4())
result = self.app.post_json(
"/v1/resource/generic",
params={
"id": str(uuid.uuid4()),
"started_at": "2014-01-01 02:02:02",
"user_id": u1,
"project_id": TestingApp.PROJECT_ID_2,
})
g = json.loads(result.text)
with self.app.use_another_user():
result = self.app.post_json(
"/v1/resource/generic",
params={
"id": str(uuid.uuid4()),
"started_at": "2014-01-01 03:03:03",
"user_id": u1,
"project_id": str(uuid.uuid4()),
})
j = json.loads(result.text)
g_found = False
j_found = False
result = self.app.post_json(
"/v1/search/resource/generic",
params={"=": {"user_id": u1}},
status=200)
resources = json.loads(result.text)
self.assertGreaterEqual(len(resources), 2)
for r in resources:
if r['id'] == str(g['id']):
self.assertEqual(g, r)
g_found = True
elif r['id'] == str(j['id']):
self.assertEqual(j, r)
j_found = True
if g_found and j_found:
break
else:
self.fail("Some resources were not found")
def test_search_resources_by_unknown_field(self):
result = self.app.post_json(
"/v1/search/resource/" + self.resource_type,
params={"=": {"foobar": "baz"}},
status=400)
self.assertIn("Resource type " + self.resource_type
+ " has no foobar attribute",
result.text)
def test_search_resources_started_after(self):
# NOTE(jd) So this test is a bit fuzzy right now as we uses the same
# database for all tests and the tests are running concurrently, but
# for now it'll be better than nothing.
result = self.app.post_json(
"/v1/resource/generic/",
params={
"id": str(uuid.uuid4()),
"started_at": "2014-01-01 02:02:02",
"user_id": str(uuid.uuid4()),
"project_id": str(uuid.uuid4()),
})
g = json.loads(result.text)
result = self.app.post_json(
"/v1/resource/" + self.resource_type,
params=self.attributes)
i = json.loads(result.text)
result = self.app.post_json(
"/v1/search/resource/generic",
params={"≥": {"started_at": "2014-01-01"}},
status=200)
resources = json.loads(result.text)
self.assertGreaterEqual(len(resources), 2)
i_found = False
g_found = False
for r in resources:
if r['id'] == str(g['id']):
self.assertEqual(g, r)
g_found = True
elif r['id'] == str(i['id']):
i_found = True
if i_found and g_found:
break
else:
self.fail("Some resources were not found")
result = self.app.post_json(
"/v1/search/resource/" + self.resource_type,
params={">=": {"started_at": "2014-01-03"}})
resources = json.loads(result.text)
self.assertGreaterEqual(len(resources), 1)
for r in resources:
if r['id'] == str(i['id']):
self.assertEqual(i, r)
break
else:
self.fail("Some resources were not found")
def test_list_resources_with_bad_details(self):
result = self.app.get("/v1/resource/generic?details=awesome",
status=400)
self.assertIn(
b"Unable to parse `details': invalid truth value",
result.body)
def test_list_resources_with_bad_details_in_accept(self):
result = self.app.get("/v1/resource/generic",
headers={
"Accept": "application/json; details=foo",
},
status=400)
self.assertIn(
b"Unable to parse `Accept header': invalid truth value",
result.body)
def _do_test_list_resources_with_detail(self, request):
# NOTE(jd) So this test is a bit fuzzy right now as we uses the same
# database for all tests and the tests are running concurrently, but
# for now it'll be better than nothing.
result = self.app.post_json(
"/v1/resource/generic",
params={
"id": str(uuid.uuid4()),
"started_at": "2014-01-01 02:02:02",
"user_id": str(uuid.uuid4()),
"project_id": str(uuid.uuid4()),
})
g = json.loads(result.text)
result = self.app.post_json(
"/v1/resource/" + self.resource_type,
params=self.attributes)
i = json.loads(result.text)
result = request()
self.assertEqual(200, result.status_code)
resources = json.loads(result.text)
self.assertGreaterEqual(len(resources), 2)
i_found = False
g_found = False
for r in resources:
if r['id'] == str(g['id']):
self.assertEqual(g, r)
g_found = True
elif r['id'] == str(i['id']):
i_found = True
# Check we got all the details
self.assertEqual(i, r)
if i_found and g_found:
break
else:
self.fail("Some resources were not found")
result = self.app.get("/v1/resource/" + self.resource_type)
resources = json.loads(result.text)
self.assertGreaterEqual(len(resources), 1)
for r in resources:
if r['id'] == str(i['id']):
self.assertEqual(i, r)
break
else:
self.fail("Some resources were not found")
def test_list_resources_with_another_project_id(self):
result = self.app.post_json(
"/v1/resource/generic",
params={
"id": str(uuid.uuid4()),
"started_at": "2014-01-01 02:02:02",
"user_id": TestingApp.USER_ID_2,
"project_id": TestingApp.PROJECT_ID_2,
})
g = json.loads(result.text)
with self.app.use_another_user():
result = self.app.post_json(
"/v1/resource/generic",
params={
"id": str(uuid.uuid4()),
"started_at": "2014-01-01 03:03:03",
"user_id": str(uuid.uuid4()),
"project_id": str(uuid.uuid4()),
})
j = json.loads(result.text)
g_found = False
j_found = False
result = self.app.get("/v1/resource/generic")
self.assertEqual(200, result.status_code)
resources = json.loads(result.text)
self.assertGreaterEqual(len(resources), 2)
for r in resources:
if r['id'] == str(g['id']):
self.assertEqual(g, r)
g_found = True
elif r['id'] == str(j['id']):
self.assertEqual(j, r)
j_found = True
if g_found and j_found:
break
else:
self.fail("Some resources were not found")
def test_list_resources_with_details(self):
self._do_test_list_resources_with_detail(
lambda: self.app.get("/v1/resource/generic?details=true"))
def test_list_resources_with_details_via_accept(self):
self._do_test_list_resources_with_detail(
lambda: self.app.get(
"/v1/resource/generic",
headers={"Accept": "application/json; details=true"}))
def test_search_resources_with_details(self):
self._do_test_list_resources_with_detail(
lambda: self.app.post("/v1/search/resource/generic?details=true"))
def test_search_resources_with_details_via_accept(self):
self._do_test_list_resources_with_detail(
lambda: self.app.post(
"/v1/search/resource/generic",
headers={"Accept": "application/json; details=true"}))
def test_get_res_named_metric_measure_aggregated_policies_invalid(self):
result = self.app.post_json("/v1/metric",
params={"archive_policy_name": "low"})
metric1 = json.loads(result.text)
self.app.post_json("/v1/metric/%s/measures" % metric1['id'],
params=[{"timestamp": '2013-01-01 12:00:01',
"value": 16}])
result = self.app.post_json("/v1/metric",
params={"archive_policy_name":
"no_granularity_match"})
metric2 = json.loads(result.text)
self.app.post_json("/v1/metric/%s/measures" % metric2['id'],
params=[{"timestamp": '2013-01-01 12:00:01',
"value": 4}])
# NOTE(sileht): because the database is never cleaned between each test
# we must ensure that the query will not match resources from an other
# test, to achieve this we set a different name on each test.
name = str(uuid.uuid4())
self.attributes['name'] = name
self.attributes['metrics'] = {'foo': metric1['id']}
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes)
self.attributes['id'] = str(uuid.uuid4())
self.attributes['metrics'] = {'foo': metric2['id']}
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes)
result = self.app.post_json(
"/v1/aggregation/resource/"
+ self.resource_type + "/metric/foo?aggregation=max",
params={"=": {"name": name}},
status=400,
headers={"Accept": "application/json"})
self.assertEqual("Metrics can't being aggregated",
result.json['description']['cause'])
self.assertEqual("No granularity match",
result.json['description']['reason'])
self.assertEqual(
sorted([[metric1['id'], 'max'], [metric2['id'], 'max']]),
sorted(result.json['description']['detail']))
def test_get_res_named_metric_measure_aggregation_nooverlap(self):
result = self.app.post_json("/v1/metric",
params={"archive_policy_name": "medium"})
metric1 = json.loads(result.text)
self.app.post_json("/v1/metric/%s/measures" % metric1['id'],
params=[{"timestamp": '2013-01-01 12:00:01',
"value": 8},
{"timestamp": '2013-01-01 12:00:02',
"value": 16}])
result = self.app.post_json("/v1/metric",
params={"archive_policy_name": "medium"})
metric2 = json.loads(result.text)
# NOTE(sileht): because the database is never cleaned between each test
# we must ensure that the query will not match resources from an other
# test, to achieve this we set a different name on each test.
name = str(uuid.uuid4())
self.attributes['name'] = name
self.attributes['metrics'] = {'foo': metric1['id']}
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes)
self.attributes['id'] = str(uuid.uuid4())
self.attributes['metrics'] = {'foo': metric2['id']}
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes)
result = self.app.post_json(
"/v1/aggregation/resource/" + self.resource_type
+ "/metric/foo?aggregation=max",
params={"=": {"name": name}},
expect_errors=True)
self.assertEqual(400, result.status_code, result.text)
self.assertIn("No overlap", result.text)
result = self.app.post_json(
"/v1/aggregation/resource/" + self.resource_type
+ "/metric/foo?aggregation=max&needed_overlap=5&start=2013-01-01",
params={"=": {"name": name}},
expect_errors=True)
self.assertEqual(400, result.status_code, result.text)
self.assertIn("No overlap", result.text)
result = self.app.post_json(
"/v1/aggregation/resource/"
+ self.resource_type + "/metric/foo?aggregation=min"
+ "&needed_overlap=0&start=2013-01-01T00:00:00%2B00:00",
params={"=": {"name": name}})
self.assertEqual(200, result.status_code, result.text)
measures = json.loads(result.text)
self.assertEqual([['2013-01-01T00:00:00+00:00', 86400.0, 8.0],
['2013-01-01T12:00:00+00:00', 3600.0, 8.0],
['2013-01-01T12:00:00+00:00', 60.0, 8.0]],
measures)
def test_get_res_named_metric_measure_aggregation_nominal(self):
result = self.app.post_json("/v1/metric",
params={"archive_policy_name": "medium"})
metric1 = json.loads(result.text)
self.app.post_json("/v1/metric/%s/measures" % metric1['id'],
params=[{"timestamp": '2013-01-01 12:00:01',
"value": 8},
{"timestamp": '2013-01-01 12:00:02',
"value": 16}])
result = self.app.post_json("/v1/metric",
params={"archive_policy_name": "medium"})
metric2 = json.loads(result.text)
self.app.post_json("/v1/metric/%s/measures" % metric2['id'],
params=[{"timestamp": '2013-01-01 12:00:01',
"value": 0},
{"timestamp": '2013-01-01 12:00:02',
"value": 4}])
# NOTE(sileht): because the database is never cleaned between each test
# we must ensure that the query will not match resources from an other
# test, to achieve this we set a different name on each test.
name = str(uuid.uuid4())
self.attributes['name'] = name
self.attributes['metrics'] = {'foo': metric1['id']}
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes)
self.attributes['id'] = str(uuid.uuid4())
self.attributes['metrics'] = {'foo': metric2['id']}
self.app.post_json("/v1/resource/" + self.resource_type,
params=self.attributes)
result = self.app.post_json(
"/v1/aggregation/resource/" + self.resource_type
+ "/metric/foo?aggregation=max",
params={"=": {"name": name}},
expect_errors=True)
self.assertEqual(200, result.status_code, result.text)
measures = json.loads(result.text)
self.assertEqual([[u'2013-01-01T00:00:00+00:00', 86400.0, 16.0],
[u'2013-01-01T12:00:00+00:00', 3600.0, 16.0],
[u'2013-01-01T12:00:00+00:00', 60.0, 16.0]],
measures)
result = self.app.post_json(
"/v1/aggregation/resource/"
+ self.resource_type + "/metric/foo?aggregation=min",
params={"=": {"name": name}},
expect_errors=True)
self.assertEqual(200, result.status_code)
measures = json.loads(result.text)
self.assertEqual([['2013-01-01T00:00:00+00:00', 86400.0, 0],
['2013-01-01T12:00:00+00:00', 3600.0, 0],
['2013-01-01T12:00:00+00:00', 60.0, 0]],
measures)
def test_get_aggregated_measures_across_entities_no_match(self):
result = self.app.post_json(
"/v1/aggregation/resource/"
+ self.resource_type + "/metric/foo?aggregation=min",
params={"=": {"name": "none!"}},
expect_errors=True)
self.assertEqual(200, result.status_code)
measures = json.loads(result.text)
self.assertEqual([], measures)
def test_get_aggregated_measures_across_entities(self):
result = self.app.post_json("/v1/metric",
params={"archive_policy_name": "medium"})
metric1 = json.loads(result.text)
self.app.post_json("/v1/metric/%s/measures" % metric1['id'],
params=[{"timestamp": '2013-01-01 12:00:01',
"value": 8},
{"timestamp": '2013-01-01 12:00:02',
"value": 16}])
result = self.app.post_json("/v1/metric",
params={"archive_policy_name": "medium"})
metric2 = json.loads(result.text)
self.app.post_json("/v1/metric/%s/measures" % metric2['id'],
params=[{"timestamp": '2013-01-01 12:00:01',
"value": 0},
{"timestamp": '2013-01-01 12:00:02',
"value": 4}])
# Check with one metric
result = self.app.get("/v1/aggregation/metric"
"?aggregation=mean&metric=%s" % (metric2['id']))
measures = json.loads(result.text)
self.assertEqual([[u'2013-01-01T00:00:00+00:00', 86400.0, 2.0],
[u'2013-01-01T12:00:00+00:00', 3600.0, 2.0],
[u'2013-01-01T12:00:00+00:00', 60.0, 2.0]],
measures)
# Check with two metrics
result = self.app.get("/v1/aggregation/metric"
"?aggregation=mean&metric=%s&metric=%s" %
(metric1['id'], metric2['id']))
measures = json.loads(result.text)
self.assertEqual([[u'2013-01-01T00:00:00+00:00', 86400.0, 7.0],
[u'2013-01-01T12:00:00+00:00', 3600.0, 7.0],
[u'2013-01-01T12:00:00+00:00', 60.0, 7.0]],
measures)
def test_search_resources_with_like(self):
result = self.app.post_json(
"/v1/resource/" + self.resource_type,
params=self.attributes)
created_resource = json.loads(result.text)
result = self.app.post_json(
"/v1/search/resource/" + self.resource_type,
params={"like": {"name": "my%"}},
status=200)
resources = json.loads(result.text)
self.assertIn(created_resource, resources)
result = self.app.post_json(
"/v1/search/resource/" + self.resource_type,
params={"like": {"name": str(uuid.uuid4())}},
status=200)
resources = json.loads(result.text)
self.assertEqual([], resources)
class GenericResourceTest(RestTest):
def test_list_resources_tied_to_user(self):
resource_id = str(uuid.uuid4())
self.app.post_json(
"/v1/resource/generic",
params={
"id": resource_id,
"started_at": "2014-01-01 02:02:02",
"user_id": str(uuid.uuid4()),
"project_id": str(uuid.uuid4()),
})
with self.app.use_another_user():
result = self.app.get("/v1/resource/generic")
resources = json.loads(result.text)
for resource in resources:
if resource['id'] == resource_id:
self.fail("Resource found")
def test_get_resources_metric_tied_to_user(self):
resource_id = str(uuid.uuid4())
self.app.post_json(
"/v1/resource/generic",
params={
"id": resource_id,
"started_at": "2014-01-01 02:02:02",
"user_id": TestingApp.USER_ID_2,
"project_id": TestingApp.PROJECT_ID_2,
"metrics": {"foobar": {"archive_policy_name": "low"}},
})
# This user created it, she can access it
self.app.get(
"/v1/resource/generic/%s/metric/foobar" % resource_id)
with self.app.use_another_user():
# This user "owns it", it should be able to access it
self.app.get(
"/v1/resource/generic/%s/metric/foobar" % resource_id)
def test_search_resources_invalid_query(self):
result = self.app.post_json(
"/v1/search/resource/generic",
params={"wrongoperator": {"user_id": "bar"}},
status=400)
self.assertIn(
"Invalid input: extra keys not allowed @ data["
+ repr(u'wrongoperator') + "]",
result.text)
class QueryStringSearchAttrFilterTest(tests_base.TestCase):
def _do_test(self, expr, expected):
req = api.QueryStringSearchAttrFilter._parse(expr)
self.assertEqual(expected, req)
def test_search_query_builder(self):
self._do_test('foo=7EED6CC3-EDC8-48C9-8EF6-8A36B9ACC91C',
{"=": {"foo": "7EED6CC3-EDC8-48C9-8EF6-8A36B9ACC91C"}})
self._do_test('foo=7EED6CC3EDC848C98EF68A36B9ACC91C',
{"=": {"foo": "7EED6CC3EDC848C98EF68A36B9ACC91C"}})
self._do_test('foo=bar', {"=": {"foo": "bar"}})
self._do_test('foo!=1', {"!=": {"foo": 1.0}})
self._do_test('foo=True', {"=": {"foo": True}})
self._do_test('foo=null', {"=": {"foo": None}})
self._do_test('foo="null"', {"=": {"foo": "null"}})
self._do_test('foo in ["null", "foo"]',
{"in": {"foo": ["null", "foo"]}})
self._do_test(u'foo="quote" and bar≠1',
{"and": [{u"≠": {"bar": 1}},
{"=": {"foo": "quote"}}]})
self._do_test('foo="quote" or bar like "%%foo"',
{"or": [{"like": {"bar": "%%foo"}},
{"=": {"foo": "quote"}}]})
self._do_test('not (foo="quote" or bar like "%%foo" or foo="what!" '
'or bar="who?")',
{"not": {"or": [
{"=": {"bar": "who?"}},
{"=": {"foo": "what!"}},
{"like": {"bar": "%%foo"}},
{"=": {"foo": "quote"}},
]}})
self._do_test('(foo="quote" or bar like "%%foo" or not foo="what!" '
'or bar="who?") and cat="meme"',
{"and": [
{"=": {"cat": "meme"}},
{"or": [
{"=": {"bar": "who?"}},
{"not": {"=": {"foo": "what!"}}},
{"like": {"bar": "%%foo"}},
{"=": {"foo": "quote"}},
]}
]})
self._do_test('foo="quote" or bar like "%%foo" or foo="what!" '
'or bar="who?" and cat="meme"',
{"or": [
{"and": [
{"=": {"cat": "meme"}},
{"=": {"bar": "who?"}},
]},
{"=": {"foo": "what!"}},
{"like": {"bar": "%%foo"}},
{"=": {"foo": "quote"}},
]})
self._do_test('foo="quote" or bar like "%%foo" and foo="what!" '
'or bar="who?" or cat="meme"',
{"or": [
{"=": {"cat": "meme"}},
{"=": {"bar": "who?"}},
{"and": [
{"=": {"foo": "what!"}},
{"like": {"bar": "%%foo"}},
]},
{"=": {"foo": "quote"}},
]})
| 42.441955
| 79
| 0.506514
|
4a12ab8f0277315afbd8851957438ff030e943d2
| 2,156
|
py
|
Python
|
get_image.py
|
SecurityQQ/QR_Bot
|
231aa2194ddd66ca50c1442acab61dcbde2a7924
|
[
"MIT"
] | 6
|
2017-10-16T20:39:33.000Z
|
2020-12-05T07:58:19.000Z
|
get_image.py
|
SecurityQQ/QR_Bot
|
231aa2194ddd66ca50c1442acab61dcbde2a7924
|
[
"MIT"
] | null | null | null |
get_image.py
|
SecurityQQ/QR_Bot
|
231aa2194ddd66ca50c1442acab61dcbde2a7924
|
[
"MIT"
] | 1
|
2021-09-11T19:37:29.000Z
|
2021-09-11T19:37:29.000Z
|
import requests as r
import re
import json
import os
from random import choice
AVAILABLE_FORMATS = ['png', 'jpg', 'jpeg', 'bmp']
BING_TOKEN = os.environ.get('BING_TOKEN', '63b3ee16a46847e0be92920dd1409024')
def get_images_urls(name):
s = r.get("https://api.cognitive.microsoft.com/bing/v7.0/images/search/?q={}".format(name),
headers={"Ocp-Apim-Subscription-Key": BING_TOKEN})
content = json.loads(s.content.decode('utf8')).get('value')
return list(map(lambda x: x.get('contentUrl'), content))
def is_url(url):
pattern = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
return len(re.findall(pattern, url)) == 1
def download_image(url):
if url is None:
return None
req = r.get(url)
if req.status_code != 200:
return None
else:
try:
content = req.content.decode('utf-8')
except UnicodeDecodeError:
content = req.content
return content
def is_supporting_format(url):
return url[-3:] in AVAILABLE_FORMATS or url[-4] == 'jpeg'
def smart_choice(urls):
filtered_urls = list(filter(is_url, urls))
filtered_urls = list(filter(is_supporting_format, filtered_urls))
if len(filtered_urls) == 0:
return None
# return choice(filtered_urls[:5])
return filtered_urls[0]
def choice_gifs(urls):
filtered_urls = list(filter(lambda x: x[-4:] == '.gif' if len(x) > 4 else False, urls))
if len(filtered_urls) == 0:
return None
return filtered_urls[0]
def get_smart_image(name):
extention = name.split('.')[-1]
if extention not in AVAILABLE_FORMATS:
extention = 'png'
name = name + ' .' + extention
if 'icon' not in name:
name = name + ' icon'
urls = get_images_urls(name)
best_url = smart_choice(urls)
print(best_url)
return download_image(best_url)
def get_smart_gif(name):
extention = name.split('.')[-1]
if 'gif' not in extention:
extention = 'gif'
name = name + ' .' + extention
urls = get_images_urls(name)
best_url = choice_gifs(urls)
return download_image(best_url)
| 27.291139
| 95
| 0.628942
|
4a12abb634dbfb740f595bd1b1481ea292b86722
| 945
|
py
|
Python
|
bsl_inst/bsl_lib/_bsl_inst_info_class.py
|
BioSensorsLab-Illinois/bsl_inst_control
|
cf41d6a7d41bf6fc05b9d4195e809771cb25354c
|
[
"MIT"
] | null | null | null |
bsl_inst/bsl_lib/_bsl_inst_info_class.py
|
BioSensorsLab-Illinois/bsl_inst_control
|
cf41d6a7d41bf6fc05b9d4195e809771cb25354c
|
[
"MIT"
] | null | null | null |
bsl_inst/bsl_lib/_bsl_inst_info_class.py
|
BioSensorsLab-Illinois/bsl_inst_control
|
cf41d6a7d41bf6fc05b9d4195e809771cb25354c
|
[
"MIT"
] | null | null | null |
class bsl_inst_info_class:
def __init__(self, *, MANUFACTURE:str="N/A", MODEL:str="N/A", TYPE:str="N/A", INTERFACE:str="Serial", BAUDRATE:int=0, SERIAL_NAME:str="N/A", USB_PID:str="0x9999", USB_VID:str="0x9999", QUERY_CMD:str="N/A", QUERY_E_RESP:str="N/A", SN_REG=".*", QUERY_SN_CMD=""):
self.MANUFACTURE = MANUFACTURE
self.MODEL = MODEL
self.TYPE = TYPE
self.BAUDRATE = BAUDRATE
self.SERIAL_NAME = SERIAL_NAME
self.USB_PID = USB_PID
self.USB_VID = USB_VID
self.QUERY_CMD = QUERY_CMD
self.QUERY_E_RESP = QUERY_E_RESP
self.QUERY_SN_CMD = QUERY_SN_CMD
self.INTERFACE = INTERFACE
self.SN_REG = SN_REG
| 63
| 264
| 0.477249
|
4a12ac0f59722163b5324d67eb7452d0fe18953b
| 3,159
|
py
|
Python
|
utils/test_policy.py
|
YunjaeChoi/Gain-Risk_Framework_for_Stabilization_of_Deep_Policy_Gradient_Optimization
|
68d3f8fca6c6e6b356261f568f0d8562242fa649
|
[
"MIT"
] | null | null | null |
utils/test_policy.py
|
YunjaeChoi/Gain-Risk_Framework_for_Stabilization_of_Deep_Policy_Gradient_Optimization
|
68d3f8fca6c6e6b356261f568f0d8562242fa649
|
[
"MIT"
] | null | null | null |
utils/test_policy.py
|
YunjaeChoi/Gain-Risk_Framework_for_Stabilization_of_Deep_Policy_Gradient_Optimization
|
68d3f8fca6c6e6b356261f568f0d8562242fa649
|
[
"MIT"
] | null | null | null |
import time
import joblib
import os
import os.path as osp
import tensorflow as tf
from .logx import restore_tf_graph, EpochLogger
def load_policy(fpath, itr='last', deterministic=False):
# handle which epoch to load from
if itr=='last':
saves = [int(x[11:]) for x in os.listdir(fpath) if 'simple_save' in x and len(x)>11]
itr = '%d'%max(saves) if len(saves) > 0 else ''
else:
itr = '%d'%itr
# load the things!
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
model = restore_tf_graph(sess, osp.join(fpath, 'simple_save'+itr))
# get the correct op for executing actions
if deterministic and 'mu' in model.keys():
# 'deterministic' is only a valid option for SAC policies
print('Using deterministic action op.')
action_op = model['mu']
else:
print('Using default action op.')
action_op = model['pi']
# make function for producing an action given a single state
get_action = lambda x : sess.run(action_op, feed_dict={model['x']: x[None,:]})[0]
# try to load environment from save
# (sometimes this will fail because the environment could not be pickled)
try:
state = joblib.load(osp.join(fpath, 'vars'+itr+'.pkl'))
env = state['env']
except:
env = None
return env, get_action
def run_policy(env, get_action, max_ep_len=None, num_episodes=100, render=True):
assert env is not None, \
"Environment not found!\n\n It looks like the environment wasn't saved, " + \
"and we can't run the agent in it. :( \n\n Check out the readthedocs " + \
"page on Experiment Outputs for how to handle this situation."
logger = EpochLogger()
o, r, d, ep_ret, ep_len, n = env.reset(), 0, False, 0, 0, 0
while n < num_episodes:
if render:
env.render()
time.sleep(1e-3)
a = get_action(o)
o, r, d, _ = env.step(a)
ep_ret += r
ep_len += 1
if d or (ep_len == max_ep_len):
logger.store(EpRet=ep_ret, EpLen=ep_len)
print('Episode %d \t EpRet %.3f \t EpLen %d'%(n, ep_ret, ep_len))
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
n += 1
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
logger.dump_tabular()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('fpath', type=str)
parser.add_argument('--len', '-l', type=int, default=0)
parser.add_argument('--episodes', '-n', type=int, default=100)
parser.add_argument('--norender', '-nr', action='store_true')
parser.add_argument('--itr', '-i', type=int, default=-1)
parser.add_argument('--deterministic', '-d', action='store_true')
args = parser.parse_args()
env, get_action = load_policy(args.fpath,
args.itr if args.itr >=0 else 'last',
args.deterministic)
run_policy(env, get_action, args.len, args.episodes, not(args.norender))
| 35.1
| 92
| 0.616334
|
4a12ac6abc9667f1d68c053c40cc1a3f01a84393
| 5,607
|
py
|
Python
|
training/transformer/model/attention_layer.py
|
DCGM/pero-enhance
|
3a322e16946408e541bad9f75bc498d66e93dbd8
|
[
"BSD-3-Clause"
] | 5
|
2020-06-07T18:34:55.000Z
|
2022-01-17T03:14:26.000Z
|
training/transformer/model/attention_layer.py
|
DCGM/pero-enhance
|
3a322e16946408e541bad9f75bc498d66e93dbd8
|
[
"BSD-3-Clause"
] | 16
|
2020-01-28T22:22:10.000Z
|
2022-03-12T00:10:37.000Z
|
training/transformer/model/attention_layer.py
|
DCGM/pero-enhance
|
3a322e16946408e541bad9f75bc498d66e93dbd8
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of multiheaded attention and self-attention layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class Attention(tf.layers.Layer):
"""Multi-headed attention layer."""
def __init__(self, hidden_size, num_heads, attention_dropout, train):
if hidden_size % num_heads != 0:
raise ValueError("Hidden size must be evenly divisible by the number of "
"heads.")
super(Attention, self).__init__()
self.hidden_size = hidden_size
self.num_heads = num_heads
self.attention_dropout = attention_dropout
self.train = train
# Layers for linearly projecting the queries, keys, and values.
self.q_dense_layer = tf.layers.Dense(hidden_size, use_bias=False, name="q")
self.k_dense_layer = tf.layers.Dense(hidden_size, use_bias=False, name="k")
self.v_dense_layer = tf.layers.Dense(hidden_size, use_bias=False, name="v")
self.output_dense_layer = tf.layers.Dense(hidden_size, use_bias=False,
name="output_transform")
def split_heads(self, x):
"""Split x into different heads, and transpose the resulting value.
The tensor is transposed to insure the inner dimensions hold the correct
values during the matrix multiplication.
Args:
x: A tensor with shape [batch_size, length, hidden_size]
Returns:
A tensor with shape [batch_size, num_heads, length, hidden_size/num_heads]
"""
with tf.name_scope("split_heads"):
batch_size = tf.shape(x)[0]
length = tf.shape(x)[1]
# Calculate depth of last dimension after it has been split.
depth = (self.hidden_size // self.num_heads)
# Split the last dimension
x = tf.reshape(x, [batch_size, length, self.num_heads, depth])
# Transpose the result
return tf.transpose(x, [0, 2, 1, 3])
def combine_heads(self, x):
"""Combine tensor that has been split.
Args:
x: A tensor [batch_size, num_heads, length, hidden_size/num_heads]
Returns:
A tensor with shape [batch_size, length, hidden_size]
"""
with tf.name_scope("combine_heads"):
batch_size = tf.shape(x)[0]
length = tf.shape(x)[2]
x = tf.transpose(x, [0, 2, 1, 3]) # --> [batch, length, num_heads, depth]
return tf.reshape(x, [batch_size, length, self.hidden_size])
def call(self, x, y, train_phase, bias, cache=None):
"""Apply attention mechanism to x and y.
Args:
x: a tensor with shape [batch_size, length_x, hidden_size]
y: a tensor with shape [batch_size, length_y, hidden_size]
bias: attention bias that will be added to the result of the dot product.
cache: (Used during prediction) dictionary with tensors containing results
of previous attentions. The dictionary must have the items:
{"k": tensor with shape [batch_size, i, key_channels],
"v": tensor with shape [batch_size, i, value_channels]}
where i is the current decoded length.
Returns:
Attention layer output with shape [batch_size, length_x, hidden_size]
"""
# Linearly project the query (q), key (k) and value (v) using different
# learned projections. This is in preparation of splitting them into
# multiple heads. Multi-head attention uses multiple queries, keys, and
# values rather than regular attention (which uses a single q, k, v).
q = self.q_dense_layer(x)
k = self.k_dense_layer(y)
v = self.v_dense_layer(y)
if cache is not None:
# Combine cached keys and values with new keys and values.
k = tf.concat([cache["k"], k], axis=1)
v = tf.concat([cache["v"], v], axis=1)
# Update cache
cache["k"] = k
cache["v"] = v
# Split q, k, v into heads.
q = self.split_heads(q)
k = self.split_heads(k)
v = self.split_heads(v)
# Scale q to prevent the dot product between q and k from growing too large.
depth = (self.hidden_size // self.num_heads)
q *= depth ** -0.5
# Calculate dot product attention
logits = tf.matmul(q, k, transpose_b=True)
if bias is not None:
logits += bias
weights = tf.nn.softmax(logits, name="attention_weights")
if self.train:
weights = tf.nn.dropout(weights, 1.0 - self.attention_dropout * train_phase)
attention_output = tf.matmul(weights, v)
# Recombine heads --> [batch_size, length, hidden_size]
attention_output = self.combine_heads(attention_output)
# Run the combined outputs through another linear projection layer.
attention_output = self.output_dense_layer(attention_output)
return attention_output
class SelfAttention(Attention):
"""Multiheaded self-attention layer."""
def call(self, x, train_phase, bias, cache=None):
return super(SelfAttention, self).call(x, x, train_phase, bias, cache)
| 37.38
| 82
| 0.675406
|
4a12ad83e36702f2c54e9ce6c71b3b070d74d7b0
| 4,092
|
py
|
Python
|
tests2/testutils.py
|
PMR2/pyodbc
|
aa52358a1330e74b97a5684d772739cc14620c6d
|
[
"MIT-0"
] | 1
|
2020-11-06T02:23:35.000Z
|
2020-11-06T02:23:35.000Z
|
tests2/testutils.py
|
mx-psi/pyodbc
|
90cf98cc945738113f3cc572c06304c79bc134a8
|
[
"MIT-0"
] | 1
|
2021-09-01T15:05:42.000Z
|
2021-09-01T15:05:42.000Z
|
tests2/testutils.py
|
mx-psi/pyodbc
|
90cf98cc945738113f3cc572c06304c79bc134a8
|
[
"MIT-0"
] | 2
|
2020-11-21T08:23:46.000Z
|
2020-11-21T08:32:41.000Z
|
from __future__ import print_function
import os, sys, platform
from os.path import join, dirname, abspath, basename
import unittest
def add_to_path():
"""
Prepends the build directory to the path so that newly built pyodbc libraries are used, allowing it to be tested
without installing it.
"""
# Put the build directory into the Python path so we pick up the version we just built.
#
# To make this cross platform, we'll search the directories until we find the .pyd file.
import imp
library_exts = [ t[0] for t in imp.get_suffixes() if t[-1] == imp.C_EXTENSION ]
library_names = [ 'pyodbc%s' % ext for ext in library_exts ]
# Only go into directories that match our version number.
dir_suffix = '-%s.%s' % (sys.version_info[0], sys.version_info[1])
build = join(dirname(dirname(abspath(__file__))), 'build')
for root, dirs, files in os.walk(build):
for d in dirs[:]:
if not d.endswith(dir_suffix):
dirs.remove(d)
for name in library_names:
if name in files:
sys.path.insert(0, root)
return
print('Did not find the pyodbc library in the build directory. Will use an installed version.')
def print_library_info(cnxn):
import pyodbc
print('python: %s' % sys.version)
print('pyodbc: %s %s' % (pyodbc.version, os.path.abspath(pyodbc.__file__)))
print('odbc: %s' % cnxn.getinfo(pyodbc.SQL_ODBC_VER))
print('driver: %s %s' % (cnxn.getinfo(pyodbc.SQL_DRIVER_NAME), cnxn.getinfo(pyodbc.SQL_DRIVER_VER)))
print(' supports ODBC version %s' % cnxn.getinfo(pyodbc.SQL_DRIVER_ODBC_VER))
print('os: %s' % platform.system())
print('unicode: Py_Unicode=%s SQLWCHAR=%s' % (pyodbc.UNICODE_SIZE, pyodbc.SQLWCHAR_SIZE))
cursor = cnxn.cursor()
for typename in ['VARCHAR', 'WVARCHAR', 'BINARY']:
t = getattr(pyodbc, 'SQL_' + typename)
try:
cursor.getTypeInfo(t)
except pyodbc.Error as e:
print('Max %s = (not supported)' % (typename, ))
else:
row = cursor.fetchone()
print('Max %s = %s' % (typename, row and row[2] or '(not supported)'))
if platform.system() == 'Windows':
print(' %s' % ' '.join([s for s in platform.win32_ver() if s]))
def load_tests(testclass, name, *args):
"""
Returns a TestSuite for tests in `testclass`.
name
Optional test name if you only want to run 1 test. If not provided all tests in `testclass` will be loaded.
args
Arguments for the test class constructor. These will be passed after the test method name.
"""
if name:
if not name.startswith('test_'):
name = 'test_%s' % name
names = [ name ]
else:
names = [ method for method in dir(testclass) if method.startswith('test_') ]
return unittest.TestSuite([ testclass(name, *args) for name in names ])
def load_setup_connection_string(section):
"""
Attempts to read the default connection string from the setup.cfg file.
If the file does not exist or if it exists but does not contain the connection string, None is returned. If the
file exists but cannot be parsed, an exception is raised.
"""
from os.path import exists, join, dirname, splitext, basename
from ConfigParser import SafeConfigParser
FILENAME = 'setup.cfg'
KEY = 'connection-string'
path = dirname(abspath(__file__))
while True:
fqn = join(path, 'tmp', FILENAME)
if exists(fqn):
break
parent = dirname(path)
print('{} --> {}'.format(path, parent))
if parent == path:
return None
path = parent
try:
p = SafeConfigParser()
p.read(fqn)
except:
raise SystemExit('Unable to parse %s: %s' % (path, sys.exc_info()[1]))
if p.has_option(section, KEY):
return p.get(section, KEY)
| 34.677966
| 117
| 0.606549
|
4a12ae1ec1ddcfe7049198513df564ffbbdb8089
| 8,811
|
py
|
Python
|
agp/split.py
|
esrice/agptools
|
f91dbad20db539f4cc9731606978cb4369ea650f
|
[
"MIT"
] | 1
|
2021-12-28T01:44:20.000Z
|
2021-12-28T01:44:20.000Z
|
agp/split.py
|
esrice/agptools
|
f91dbad20db539f4cc9731606978cb4369ea650f
|
[
"MIT"
] | 2
|
2021-12-16T16:35:21.000Z
|
2022-03-17T07:45:16.000Z
|
agp/split.py
|
esrice/agptools
|
f91dbad20db539f4cc9731606978cb4369ea650f
|
[
"MIT"
] | null | null | null |
"""
Functions for splitting a scaffold into subscaffolds at gaps.
"""
from copy import deepcopy
from typing import Dict, Iterator, List, TextIO, Union
from . import AgpRow
class ParsingError(Exception):
"""Raised when breakpoints file is misformatted."""
pass
def breakpoints_type(filename: str) -> Dict[str, List[int]]:
"""
Argparse type function for breakpoints file: first column is the
scaffold name; second column is a comma-separated list of
locations within gaps where scaffold should be broken.
Args:
filename: path to the breakpoints file
Returns:
breakpoints: a dict mapping scaffold name to a list
of breakpoints (int) on that scaffold
Raises:
FileNotFoundError: if `filename` does not point to readable file
ParsingError: if there is an error in the input file's format or
content
"""
breakpoints = {}
with open(filename) as breakpoints_file:
for i, line in enumerate(breakpoints_file):
splits = line.strip().split("\t")
try:
if splits[0] in breakpoints:
raise ParsingError(
f"{splits[0]} specified multiple times in breakpoints file"
)
breakpoints[splits[0]] = list(map(int, splits[1].split(",")))
except (ValueError, IndexError):
raise ParsingError(f"Cannot parse line {i} of breakpoints: {line}")
return breakpoints
def unoffset_rows(new_scaffold_name: str, rows: List[AgpRow]) -> List[AgpRow]:
"""
Modifies some AGP rows so that they can be their own standalone
scaffold. This requires changing their object names to a new
scaffold name, and changing the part numbers and coordinates such
that the first row starts with 1 and the rest follow.
Args:
new_scaffold_name: name for the new scaffold which will
replace all 'object' fields
rows: rows to modify so that they function as a
standalone scaffold together. The first row will be used to
calculate offsets.
Returns:
out_rows: input rows, but with all 'object'
fields replaced with new_scaffold_name, and all positions
and part numbers modified so that the first row is the
beginning of a new scaffold.
"""
position_offset = rows[0].object_beg - 1
part_number_offset = rows[0].part_number - 1
out_rows = []
for row in rows:
row.object = new_scaffold_name
row.object_beg -= position_offset
row.object_end -= position_offset
row.part_number -= part_number_offset
out_rows.append(row)
return out_rows
def split_contig(contig_row, breakpoints):
"""
Splits a single row containing a contig into multiple rows,
each containing a piece of the contig.
>>> import agp
>>> r = agp.AgpRow('\\t'.join(map(str, ['scaf', 501, 1000, 5, 'W',
... 'ctg', 1, 500, '+'])))
>>> [str(x).split('\\t') for x in split_contig(r, [750, 867])]
[['scaf', '501', '750', '5', 'W', 'ctg', '1', '250', '+'],
['scaf', '751', '867', '6', 'W', 'ctg', '251', '367', '+'],
['scaf', '868', '1000', '7', 'W', 'ctg', '368', '500', '+']]
Args:
contig_row (AgpRow): a single row to be split
breakpoints (list(int)): positions where contig should be split,
in object coordinates, *not* component coordinates. The left
part of the split includes the breakpoint: e.g., splitting a
contig of length 100 at 43 will make two new contigs: one
from 1-43 and the other from 44-100.
"""
rows = [contig_row]
for breakpoint in sorted(breakpoints):
left_part = deepcopy(rows.pop())
right_part = deepcopy(left_part)
left_part.object_end = breakpoint
right_part.object_beg = breakpoint + 1
right_part.part_number += 1
left_part.component_end = left_part.component_beg + (
breakpoint - left_part.object_beg
)
right_part.component_beg = left_part.component_end + 1
rows += [left_part, right_part]
return rows
def convert_rows(rows, subscaffold_counter):
"""
Converts rows that are part of a scaffold into their own standalone
scaffold. Changes the positions and part numbers so that the new
scaffold starts at 1, and names the new scaffold after the old
scaffold, but with '.{subscaffold_counter}' at the end.
Args:
rows (list(AgpRow)): rows to turn into their own scaffold.
subscaffold_counter (int): suffix to add to the old scaffold
name in order to turn it into the new scaffold name.
Returns:
new_rows (list(AgpRow)): the input rows, but with object names,
positions, and part numbers changed so that they now
function as a standalone scaffold
"""
new_scaffold_name = "{}.{}".format(rows[0].object, subscaffold_counter)
return unoffset_rows(new_scaffold_name, rows)
def split_scaffold(scaffold_rows, breakpoints):
"""
Splits a scaffold at specified breakpoints.
Args:
scaffold_rows (list(AgpRow)): all the rows for a given scaffold
in an AGP file
breakpoints (list(int)): a list of locations where scaffold
should be broken. All locations are specified in genomic
coordinates and must be within the boundaries of a gap.
Returns:
broken_rows (list(AgpRow)): rows of the input scaffold broken
into len(breakpoints)+1 sub-scaffolds, with the gaps
containing the breakpoints removed
"""
out_rows = []
rows_this_subscaffold = []
subscaffold_counter = 1
for row in scaffold_rows:
if any(map(row.contains, breakpoints)):
# if the breakpoint is within a gap, our job is simple:
# just forget about the gap row, output the previous
# subscaffold, and start a new subscaffold
if row.is_gap:
out_rows += convert_rows(rows_this_subscaffold, subscaffold_counter)
rows_this_subscaffold = []
subscaffold_counter += 1
# if the breakpoint is not within a gap, we need to actually
# break a contig into pieces
else:
# split the contig into two or more rows
contig_rows = split_contig(row, filter(row.contains, breakpoints))
# the first row goes at the end of the current scaffold
rows_this_subscaffold.append(contig_rows[0])
del contig_rows[0]
out_rows += convert_rows(rows_this_subscaffold, subscaffold_counter)
subscaffold_counter += 1
# the last row goes at the beginning of the next
# scaffold
rows_this_subscaffold = [contig_rows.pop()]
# if there are any rows in between, they each get their
# own subscaffold
for contig_part in contig_rows:
out_rows += convert_rows([contig_part], subscaffold_counter)
subscaffold_counter += 1
else: # only add this row if there are no breakpoints in it
rows_this_subscaffold.append(row)
out_rows += convert_rows(rows_this_subscaffold, subscaffold_counter)
return out_rows
def run(
breakpoints: Dict[str, List[int]],
outfile: TextIO,
agp_infile: Iterator[Union[str, AgpRow]],
):
rows_this_scaffold: List[AgpRow] = [] # list of all agp rows in current scaffold
for row in agp_infile:
if isinstance(row, str): # print out comment rows as-is
print(row, file=outfile)
continue
# if we're on a new scaffold, do any necessary modification to
# the previous scaffold, print it out, and clear the buffer
if rows_this_scaffold and rows_this_scaffold[0].object != row.object:
if rows_this_scaffold[0].object in breakpoints:
rows_this_scaffold = split_scaffold(
rows_this_scaffold,
breakpoints[rows_this_scaffold[0].object],
)
for r in rows_this_scaffold:
print(r, file=outfile)
rows_this_scaffold = []
rows_this_scaffold.append(row)
if rows_this_scaffold[0].object in breakpoints:
rows_this_scaffold = split_scaffold(
rows_this_scaffold,
breakpoints[rows_this_scaffold[0].object],
)
for r in rows_this_scaffold:
print(r, file=outfile)
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| 37.021008
| 85
| 0.625468
|
4a12ae5bc3b7bcf03cc3d8eed46e0b5f5dc31280
| 3,863
|
py
|
Python
|
pyle38/commands/search.py
|
iwpnd/pyle38
|
d2d6fa4e11b7444ed97df9152ee8e0a9dd0952d7
|
[
"MIT"
] | 44
|
2021-04-08T07:06:19.000Z
|
2022-02-16T15:06:05.000Z
|
pyle38/commands/search.py
|
iwpnd/pyle38
|
d2d6fa4e11b7444ed97df9152ee8e0a9dd0952d7
|
[
"MIT"
] | 50
|
2021-04-19T10:03:12.000Z
|
2022-03-03T16:13:50.000Z
|
pyle38/commands/search.py
|
iwpnd/pyle38
|
d2d6fa4e11b7444ed97df9152ee8e0a9dd0952d7
|
[
"MIT"
] | 4
|
2021-04-29T08:20:21.000Z
|
2022-03-12T07:28:19.000Z
|
from __future__ import annotations
from typing import List, Literal, Optional, Sequence, Union
from ..client import Client, Command, CommandArgs, SubCommand
from ..models import Options
from ..responses import CountResponse, IdsResponse, ObjectsResponse
from .executable import Compiled, Executable
Format = Literal["OBJECTS", "COUNT", "IDS"]
Output = Union[Sequence[Union[Format, int]]]
class Search(Executable):
_key: str
_command: Literal["SEARCH"]
_options: Options = {}
_output: Optional[Output] = None
_where: List[List[Union[str, int]]] = []
_all: bool = False
def __init__(self, client: Client, key: str) -> None:
super().__init__(client)
self.key(key)
self._options = {}
self._where = []
def key(self, key: str) -> Search:
self._key = key
return self
def cursor(self, value: int) -> Search:
self._options["cursor"] = value
return self
def limit(self, value: int) -> Search:
self._options["limit"] = value
return self
def match(self, value: str) -> Search:
self._options["match"] = value
return self
def asc(self, flag: bool = True) -> Search:
self._options["asc"] = flag
if flag:
self._options["desc"] = False
return self
def desc(self, flag: bool = True) -> Search:
self._options["desc"] = flag
if flag:
self._options["asc"] = False
return self
def where(self, field: str, min: int, max: int) -> Search:
"""Filter the search by field
Args:
field (str): field name
min (int): minimum value of field
max (int): maximum value of field
Returns:
Within
"""
self._where.append([SubCommand.WHERE, field, min, max])
return self
def output(self, format: Format) -> Search:
if format == "OBJECTS":
self._output = None
elif format == "COUNT":
self._output = [format]
elif format == "IDS":
self._output = [format]
return self
async def asCount(self) -> CountResponse:
self.output("COUNT")
return CountResponse(**(await self.exec()))
async def asIds(self) -> IdsResponse:
self.output("IDS")
return IdsResponse(**(await self.exec()))
async def asStringObjects(self) -> ObjectsResponse[str]:
self.output("OBJECTS")
return ObjectsResponse[str](**(await self.exec()))
def __compile_where(self) -> CommandArgs:
"""__compile_where.
Args:
Returns:
CommandArgs
"""
w = []
if len(self._where) > 0:
for i in self._where:
w.extend(i)
return w
else:
return []
def __compile_options(self) -> CommandArgs:
commands = []
# raises mypy: TypedDict key must be string literal
# open PR: https://github.com/python/mypy/issues/7867
for k in self._options.keys():
if isinstance(self._options[k], bool): # type: ignore
if self._options[k]: # type: ignore
commands.append(k.upper()) # type: ignore
elif self._options[k]: # type: ignore
commands.extend([k.upper(), self._options[k]]) # type: ignore
elif self._options[k] == 0: # type: ignore
commands.extend([k.upper(), self._options[k]]) # type: ignore
return commands
def compile(self) -> Compiled:
return [
Command.SEARCH.value,
[
self._key,
*(self.__compile_options()),
*(self.__compile_where()),
*(self._output if self._output else []),
],
]
| 25.926174
| 78
| 0.552679
|
4a12aeb6bf856ef1d92fc49bd95f1a4b2fbe6b14
| 952
|
py
|
Python
|
build/lib.linux-x86_64-2.7_ucs4/mx/Tools/mxTools/bench1.py
|
mkubux/egenix-mx-base
|
3e6f9186334d9d73743b0219ae857564c7208247
|
[
"eGenix"
] | null | null | null |
build/lib.linux-x86_64-2.7_ucs4/mx/Tools/mxTools/bench1.py
|
mkubux/egenix-mx-base
|
3e6f9186334d9d73743b0219ae857564c7208247
|
[
"eGenix"
] | null | null | null |
build/lib.linux-x86_64-2.7_ucs4/mx/Tools/mxTools/bench1.py
|
mkubux/egenix-mx-base
|
3e6f9186334d9d73743b0219ae857564c7208247
|
[
"eGenix"
] | null | null | null |
import hack
import mx.Tools.NewBuiltins
k = range(10000)
l = range(1,10001)
loops = trange(100)
def f(k=k,l=l,tuples=tuples,loops=loops):
for i in loops:
for a,b in tuples(k,l):
pass
def f1(k=k,l=l,lists=lists,loops=loops):
for i in loops:
for a,b in lists(k,l):
pass
def g(k=k,l=l,map=map,loops=loops):
for i in loops:
for a,b in map(None,k,l):
pass
def h(k=k,l=l,indices=indices,len=len,loops=loops):
for i in loops:
for i in indices(k):
a,b = k[i], l[i]
print 'with tuples():',
hack.clock('f()')
print 'with lists():',
hack.clock('f1()')
print 'with map():',
hack.clock('g()')
print 'indexed:',
hack.clock('h()')
print 'map(None,...):',
hack.clock('apply(map,(None,)+(k,)*100)')
print 'tuples(...):',
hack.clock('tuples((k,)*100)')
print 'lists(...):',
hack.clock('lists((k,)*100)')
# Check
assert apply(map,(None,)+(k,)*100) == tuples((k,)*100)
| 21.155556
| 54
| 0.566176
|
4a12afbec3c77803cdcccd4f26416a8f959fb9d8
| 2,193
|
py
|
Python
|
Python 3/Olimpiada_2020/Preparacion/recuento_votos.py
|
DarkShadow4/python
|
4cd94e0cf53ee06c9c31e9272572ca9656697c30
|
[
"MIT"
] | null | null | null |
Python 3/Olimpiada_2020/Preparacion/recuento_votos.py
|
DarkShadow4/python
|
4cd94e0cf53ee06c9c31e9272572ca9656697c30
|
[
"MIT"
] | null | null | null |
Python 3/Olimpiada_2020/Preparacion/recuento_votos.py
|
DarkShadow4/python
|
4cd94e0cf53ee06c9c31e9272572ca9656697c30
|
[
"MIT"
] | 1
|
2020-08-19T17:25:22.000Z
|
2020-08-19T17:25:22.000Z
|
import sys
def get_points(papeleta, candidates, p, n):
papeleta = papeleta.split()
papeleta = [int(points) for points in papeleta]
if sorted(papeleta) == [i for i in range(1,n+1)]:
candidate_names = [name for name in candidates.keys()]
for c in range(n):
candidates[candidate_names[papeleta[c]-1]] += n-(c+1) # its c+1 so it goes from 1 to n
else: # and so the last one gets 0
print("Papeleta no válida")
def get_the_chosen_ones(candidates, p): # de momento no lo hace en orden alfabético si hay empate
the_chosen_ones = []
for chosen_number in range(p):
found = False
for key, value in sorted(candidates.items(), key=lambda x:x[0]): # as I sort the tuple alphabetically, i do not need to worry about sorting
# a stalemate case
if value == max(candidates.values()) and not found:
the_chosen_ones.append(key)
found = True
del candidates[key]
return(the_chosen_ones)
filename = sys.argv[1] # sys.argv[0] would be the name of this script
with open(filename, "r") as input_file:
Id=input_file.readline().strip()
p=int(input_file.readline().strip()) # " string ".strip() cuts the string from each side in order to erase white spaces
n=int(input_file.readline().strip()) # so " string ".strip() would result in "string"
candidates = {}
for candidate in range(n):
candidates[input_file.readline().strip()] = 0
for line in input_file.readlines():
line.strip()
get_points(line, candidates, p, n)
# c = 0
# for candidate in candidates.keys():
# candidates[candidate] += line_points[c] # I add the points of the
# c += 1 # line to the points before
# # this line for each candidate
if sum(candidates.values()) == 0:
print("VOTACION ANULADA")
else:
the_chosen_ones = get_the_chosen_ones(candidates, p)
for candidate in the_chosen_ones:
print(candidate)
| 43.86
| 147
| 0.582763
|
4a12b01053336a8bf960cc6b48ef7c42524e984b
| 8,623
|
py
|
Python
|
tests/backends/fock/pure/test_gates.py
|
antalszava/piquasso
|
7ebff83145cfab44929114437c250852dff5f9a5
|
[
"Apache-2.0"
] | 12
|
2021-09-12T15:51:45.000Z
|
2022-03-05T22:25:47.000Z
|
tests/backends/fock/pure/test_gates.py
|
antalszava/piquasso
|
7ebff83145cfab44929114437c250852dff5f9a5
|
[
"Apache-2.0"
] | 36
|
2021-09-13T08:01:27.000Z
|
2022-03-21T11:53:30.000Z
|
tests/backends/fock/pure/test_gates.py
|
antalszava/piquasso
|
7ebff83145cfab44929114437c250852dff5f9a5
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2021 Budapest Quantum Computing Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import piquasso as pq
def test_5050_beamsplitter():
with pq.Program() as program:
pq.Q(1) | pq.StateVector([1])
pq.Q(0, 1) | pq.Beamsplitter(theta=np.pi / 4, phi=np.pi / 3)
simulator = pq.PureFockSimulator(d=2, config=pq.Config(cutoff=3))
state = simulator.execute(program).state
assert np.allclose(
state.fock_probabilities,
[0, 0.5, 0.5, 0, 0, 0],
)
def test_beamsplitter():
with pq.Program() as program:
pq.Q(1) | 1 * pq.StateVector([1])
pq.Q(0, 1) | pq.Beamsplitter(theta=np.pi / 5, phi=np.pi / 6)
simulator = pq.PureFockSimulator(d=2, config=pq.Config(cutoff=3))
state = simulator.execute(program).state
assert np.allclose(
state.fock_probabilities,
[0, 0.6545085, 0.3454915, 0, 0, 0],
)
def test_beamsplitter_multiple_particles():
with pq.Program() as program:
pq.Q(1) | pq.StateVector([1]) / 2
pq.Q(1) | pq.StateVector([2]) / 2
pq.Q(0) | pq.StateVector([2]) / np.sqrt(2)
pq.Q(0, 1) | pq.Beamsplitter(theta=np.pi / 5, phi=np.pi / 6)
simulator = pq.PureFockSimulator(d=2, config=pq.Config(cutoff=3))
state = simulator.execute(program).state
assert np.isclose(sum(state.fock_probabilities), 1)
assert np.allclose(
state.fock_probabilities,
[0, 0.16362712, 0.08637288, 0.24672554, 0.17929466, 0.32397979],
)
def test_beamsplitter_leaves_vacuum_unchanged():
with pq.Program() as program:
pq.Q() | pq.StateVector([0, 0]) / 2
pq.Q() | pq.StateVector([0, 1]) / np.sqrt(2)
pq.Q() | pq.StateVector([0, 2]) / 2
pq.Q(0, 1) | pq.Beamsplitter(theta=np.pi / 5, phi=np.pi / 6)
simulator = pq.PureFockSimulator(d=2, config=pq.Config(cutoff=3))
state = simulator.execute(program).state
assert np.isclose(sum(state.fock_probabilities), 1)
assert np.allclose(
state.fock_probabilities,
[0.25, 0.32725425, 0.17274575, 0.10709534, 0.11306356, 0.02984109],
)
def test_multiple_beamsplitters():
with pq.Program() as program:
pq.Q(2) | pq.StateVector([1]) * 1
pq.Q(0, 1) | pq.Beamsplitter(theta=np.pi / 4, phi=np.pi / 5)
pq.Q(1, 2) | pq.Beamsplitter(theta=np.pi / 6, phi=1.5 * np.pi)
simulator = pq.PureFockSimulator(d=3, config=pq.Config(cutoff=3))
state = simulator.execute(program).state
assert np.allclose(
state.fock_probabilities,
[0, 0.75, 0.25, 0, 0, 0, 0, 0, 0, 0],
)
def test_multiple_beamsplitters_with_multiple_particles():
with pq.Program() as program:
pq.Q() | pq.StateVector([0, 0, 1]) / 2
pq.Q() | pq.StateVector([0, 0, 2]) / 2
pq.Q() | pq.StateVector([0, 1, 1]) / np.sqrt(2)
pq.Q(0, 1) | pq.Beamsplitter(theta=np.pi / 4, phi=np.pi / 5)
pq.Q(1, 2) | pq.Beamsplitter(theta=np.pi / 6, phi=1.5 * np.pi)
simulator = pq.PureFockSimulator(d=3, config=pq.Config(cutoff=3))
state = simulator.execute(program).state
assert np.isclose(sum(state.fock_probabilities), 1)
assert np.allclose(
state.fock_probabilities,
[0, 0.1875, 0.0625, 0, 0.234375, 0.15625, 0.109375, 0.1875, 0.0625, 0],
)
def test_phaseshift():
with pq.Program() as program:
pq.Q() | pq.StateVector([0, 1]) / 2
pq.Q() | pq.StateVector([0, 2]) / np.sqrt(2)
pq.Q() | pq.StateVector([1, 1]) / 2
pq.Q(0) | pq.Phaseshifter(phi=np.pi / 3)
simulator = pq.PureFockSimulator(d=2, config=pq.Config(cutoff=3))
state = simulator.execute(program).state
assert np.isclose(sum(state.fock_probabilities), 1)
assert np.allclose(
state.fock_probabilities,
[0, 0.25, 0, 0.5, 0.25, 0],
)
def test_fourier():
with pq.Program() as program:
pq.Q() | pq.StateVector([0, 1]) / 2
pq.Q() | pq.StateVector([0, 2]) / np.sqrt(2)
pq.Q() | pq.StateVector([1, 1]) / 2
pq.Q(0) | pq.Fourier()
simulator = pq.PureFockSimulator(d=2, config=pq.Config(cutoff=3))
state = simulator.execute(program).state
assert np.isclose(sum(state.fock_probabilities), 1)
assert np.allclose(
state.fock_probabilities,
[0, 0.25, 0, 0.5, 0.25, 0],
)
def test_mach_zehnder():
with pq.Program() as program:
pq.Q() | pq.StateVector([0, 1]) / 2
pq.Q() | pq.StateVector([0, 2]) / np.sqrt(2)
pq.Q() | pq.StateVector([1, 1]) / 2
pq.Q(0, 1) | pq.MachZehnder(int_=np.pi / 3, ext=np.pi / 4)
simulator = pq.PureFockSimulator(d=2, config=pq.Config(cutoff=3))
state = simulator.execute(program).state
assert np.isclose(sum(state.fock_probabilities), 1)
assert np.allclose(
state.fock_probabilities,
[0, 0.0625, 0.1875, 0.04845345, 0.09690689, 0.60463966],
)
def test_beamsplitters_and_phaseshifters_with_multiple_particles():
with pq.Program() as program:
pq.Q() | pq.StateVector([0, 0, 1]) / 2
pq.Q() | pq.StateVector([0, 0, 2]) / 2
pq.Q() | pq.StateVector([0, 1, 1]) / np.sqrt(2)
pq.Q(0) | pq.Phaseshifter(phi=np.pi / 3)
pq.Q(1) | pq.Phaseshifter(phi=np.pi / 3)
pq.Q(0, 1) | pq.Beamsplitter(theta=np.pi / 4, phi=4 * np.pi / 5)
pq.Q(1, 2) | pq.Beamsplitter(theta=np.pi / 6, phi=3 * np.pi / 2)
simulator = pq.PureFockSimulator(d=3, config=pq.Config(cutoff=3))
state = simulator.execute(program).state
assert np.isclose(sum(state.fock_probabilities), 1)
assert np.allclose(
state.fock_probabilities,
[0, 0.1875, 0.0625, 0, 0.43324878, 0.02366748, 0.04308374, 0.1875, 0.0625, 0],
)
def test_interferometer():
T = np.array(
[
[0.5, 0.53033009 + 0.53033009j, 0.21650635 + 0.375j],
[-0.61237244 + 0.61237244j, 0.4330127, 0.24148146 + 0.06470476j],
[0, -0.48296291 + 0.12940952j, 0.8660254],
]
)
with pq.Program() as program:
pq.Q() | pq.StateVector([0, 0, 1]) / 2
pq.Q() | pq.StateVector([0, 0, 2]) / 2
pq.Q() | pq.StateVector([0, 1, 1]) / np.sqrt(2)
pq.Q(0, 1, 2) | pq.Interferometer(matrix=T)
simulator = pq.PureFockSimulator(d=3, config=pq.Config(cutoff=3))
state = simulator.execute(program).state
assert np.isclose(sum(state.fock_probabilities), 1)
assert np.allclose(
state.fock_probabilities,
[
0,
0.1875,
0.0625,
0,
0.01443139,
0.10696977,
0.0192306,
0.32090931,
0.11538358,
0.17307537,
],
)
def test_kerr():
xi = np.pi / 3
with pq.Program() as program:
pq.Q() | pq.StateVector([0, 2, 1])
pq.Q(1) | pq.Kerr(xi=xi)
simulator = pq.PureFockSimulator(d=3, config=pq.Config(cutoff=4))
state = simulator.execute(program).state
# TODO: Better way of presenting the resulting state.
nonzero_elements = list(state.nonzero_elements)
assert len(nonzero_elements) == 1
assert np.isclose(nonzero_elements[0][0], -np.exp(1j * np.pi / 3))
assert nonzero_elements[0][1] == (0, 2, 1)
def test_cross_kerr():
with pq.Program() as program:
pq.Q() | pq.StateVector([0, 2, 1])
pq.Q(1, 2) | pq.CrossKerr(xi=np.pi / 2)
simulator = pq.PureFockSimulator(d=3, config=pq.Config(cutoff=4))
state = simulator.execute(program).state
# TODO: Better way of presenting the resulting state.
nonzero_elements = list(state.nonzero_elements)
assert len(nonzero_elements) == 1
assert np.isclose(nonzero_elements[0][0], -1)
assert nonzero_elements[0][1] == (0, 2, 1)
def test_cubic_phase():
with pq.Program() as program:
pq.Q() | pq.Vacuum()
pq.Q(0) | pq.CubicPhase(gamma=0.1)
simulator = pq.PureFockSimulator(d=1, config=pq.Config(cutoff=5))
state = simulator.execute(program).state
nonzero_elements = list(state.nonzero_elements)
assert len(nonzero_elements) == 5.0
| 28.839465
| 86
| 0.608025
|
4a12b0ca3239125b1220611c1251081023693218
| 1,940
|
py
|
Python
|
eventsourcing/application/django.py
|
bartboy011/eventsourcing
|
f7ffebb86120f12d04d21d6dcb1dd24a8e233ea9
|
[
"BSD-3-Clause"
] | 1
|
2020-07-31T10:15:33.000Z
|
2020-07-31T10:15:33.000Z
|
eventsourcing/application/django.py
|
bartboy011/eventsourcing
|
f7ffebb86120f12d04d21d6dcb1dd24a8e233ea9
|
[
"BSD-3-Clause"
] | null | null | null |
eventsourcing/application/django.py
|
bartboy011/eventsourcing
|
f7ffebb86120f12d04d21d6dcb1dd24a8e233ea9
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Any
from eventsourcing.application.simple import ApplicationWithConcreteInfrastructure
from eventsourcing.infrastructure.django.factory import DjangoInfrastructureFactory
from eventsourcing.infrastructure.django.utils import (
close_django_connection,
setup_django,
)
class DjangoApplication(ApplicationWithConcreteInfrastructure):
infrastructure_factory_class = DjangoInfrastructureFactory
def __init__(self, tracking_record_class: Any = None, *args: Any, **kwargs: Any):
self._tracking_record_class = tracking_record_class
super(DjangoApplication, self).__init__(*args, **kwargs)
@property
def stored_event_record_class(self) -> type: # type: ignore
# This is awkward, but need to avoid importing library Django models.
from eventsourcing.infrastructure.django.models import StoredEventRecord
return StoredEventRecord
@property
def snapshot_record_class(cls) -> type: # type: ignore
# This is awkward, but need to avoid importing library Django models.
from eventsourcing.infrastructure.django.models import EntitySnapshotRecord
return EntitySnapshotRecord
@property
def tracking_record_class(cls) -> Any:
from eventsourcing.infrastructure.django.models import (
NotificationTrackingRecord,
)
return NotificationTrackingRecord
def construct_infrastructure(self, *args: Any, **kwargs: Any) -> None:
tracking_record_class = (
self._tracking_record_class or self.tracking_record_class
)
super(DjangoApplication, self).construct_infrastructure(
tracking_record_class=tracking_record_class, *args, **kwargs
)
@classmethod
def reset_connection_after_forking(cls) -> None:
"""
Resets database connection after forking.
"""
close_django_connection()
setup_django()
| 35.272727
| 85
| 0.72732
|
4a12b0d10ef38e140e8c1bee2c3beacd4e365ecf
| 10,054
|
py
|
Python
|
torch/nn/modules/upsampling.py
|
wenhaopeter/read_pytorch_code
|
491f989cd918cf08874dd4f671fb7f0142a0bc4f
|
[
"Intel",
"X11"
] | 40
|
2021-06-01T07:37:59.000Z
|
2022-03-25T01:42:09.000Z
|
torch/nn/modules/upsampling.py
|
wenhaopeter/read_pytorch_code
|
491f989cd918cf08874dd4f671fb7f0142a0bc4f
|
[
"Intel",
"X11"
] | 14
|
2021-06-01T11:52:46.000Z
|
2022-03-25T02:13:08.000Z
|
torch/nn/modules/upsampling.py
|
wenhaopeter/read_pytorch_code
|
491f989cd918cf08874dd4f671fb7f0142a0bc4f
|
[
"Intel",
"X11"
] | 7
|
2021-07-20T19:34:26.000Z
|
2022-03-13T21:07:36.000Z
|
from .module import Module
from .. import functional as F
from torch import Tensor
from typing import Optional
from ..common_types import _size_2_t, _ratio_2_t, _size_any_t, _ratio_any_t
class Upsample(Module):
r"""Upsamples a given multi-channel 1D (temporal), 2D (spatial) or 3D (volumetric) data.
The input data is assumed to be of the form
`minibatch x channels x [optional depth] x [optional height] x width`.
Hence, for spatial inputs, we expect a 4D Tensor and for volumetric inputs, we expect a 5D Tensor.
The algorithms available for upsampling are nearest neighbor and linear,
bilinear, bicubic and trilinear for 3D, 4D and 5D input Tensor,
respectively.
One can either give a :attr:`scale_factor` or the target output :attr:`size` to
calculate the output size. (You cannot give both, as it is ambiguous)
Args:
size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int], optional):
output spatial sizes
scale_factor (float or Tuple[float] or Tuple[float, float] or Tuple[float, float, float], optional):
multiplier for spatial size. Has to match input size if it is a tuple.
mode (str, optional): the upsampling algorithm: one of ``'nearest'``,
``'linear'``, ``'bilinear'``, ``'bicubic'`` and ``'trilinear'``.
Default: ``'nearest'``
align_corners (bool, optional): if ``True``, the corner pixels of the input
and output tensors are aligned, and thus preserving the values at
those pixels. This only has effect when :attr:`mode` is
``'linear'``, ``'bilinear'``, or ``'trilinear'``. Default: ``False``
Shape:
- Input: :math:`(N, C, W_{in})`, :math:`(N, C, H_{in}, W_{in})` or :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, W_{out})`, :math:`(N, C, H_{out}, W_{out})`
or :math:`(N, C, D_{out}, H_{out}, W_{out})`, where
.. math::
D_{out} = \left\lfloor D_{in} \times \text{scale\_factor} \right\rfloor
.. math::
H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor
.. math::
W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor
.. warning::
With ``align_corners = True``, the linearly interpolating modes
(`linear`, `bilinear`, `bicubic`, and `trilinear`) don't proportionally
align the output and input pixels, and thus the output values can depend
on the input size. This was the default behavior for these modes up to
version 0.3.1. Since then, the default behavior is
``align_corners = False``. See below for concrete examples on how this
affects the outputs.
.. note::
If you want downsampling/general resizing, you should use :func:`~nn.functional.interpolate`.
Examples::
>>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
>>> input
tensor([[[[ 1., 2.],
[ 3., 4.]]]])
>>> m = nn.Upsample(scale_factor=2, mode='nearest')
>>> m(input)
tensor([[[[ 1., 1., 2., 2.],
[ 1., 1., 2., 2.],
[ 3., 3., 4., 4.],
[ 3., 3., 4., 4.]]]])
>>> m = nn.Upsample(scale_factor=2, mode='bilinear') # align_corners=False
>>> m(input)
tensor([[[[ 1.0000, 1.2500, 1.7500, 2.0000],
[ 1.5000, 1.7500, 2.2500, 2.5000],
[ 2.5000, 2.7500, 3.2500, 3.5000],
[ 3.0000, 3.2500, 3.7500, 4.0000]]]])
>>> m = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
>>> m(input)
tensor([[[[ 1.0000, 1.3333, 1.6667, 2.0000],
[ 1.6667, 2.0000, 2.3333, 2.6667],
[ 2.3333, 2.6667, 3.0000, 3.3333],
[ 3.0000, 3.3333, 3.6667, 4.0000]]]])
>>> # Try scaling the same data in a larger tensor
>>>
>>> input_3x3 = torch.zeros(3, 3).view(1, 1, 3, 3)
>>> input_3x3[:, :, :2, :2].copy_(input)
tensor([[[[ 1., 2.],
[ 3., 4.]]]])
>>> input_3x3
tensor([[[[ 1., 2., 0.],
[ 3., 4., 0.],
[ 0., 0., 0.]]]])
>>> m = nn.Upsample(scale_factor=2, mode='bilinear') # align_corners=False
>>> # Notice that values in top left corner are the same with the small input (except at boundary)
>>> m(input_3x3)
tensor([[[[ 1.0000, 1.2500, 1.7500, 1.5000, 0.5000, 0.0000],
[ 1.5000, 1.7500, 2.2500, 1.8750, 0.6250, 0.0000],
[ 2.5000, 2.7500, 3.2500, 2.6250, 0.8750, 0.0000],
[ 2.2500, 2.4375, 2.8125, 2.2500, 0.7500, 0.0000],
[ 0.7500, 0.8125, 0.9375, 0.7500, 0.2500, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
>>> m = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
>>> # Notice that values in top left corner are now changed
>>> m(input_3x3)
tensor([[[[ 1.0000, 1.4000, 1.8000, 1.6000, 0.8000, 0.0000],
[ 1.8000, 2.2000, 2.6000, 2.2400, 1.1200, 0.0000],
[ 2.6000, 3.0000, 3.4000, 2.8800, 1.4400, 0.0000],
[ 2.4000, 2.7200, 3.0400, 2.5600, 1.2800, 0.0000],
[ 1.2000, 1.3600, 1.5200, 1.2800, 0.6400, 0.0000],
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
"""
__constants__ = ['size', 'scale_factor', 'mode', 'align_corners', 'name']
name: str
size: _size_any_t
scale_factor: _ratio_any_t
mode: str
align_corners: bool
def __init__(self, size: Optional[_size_any_t] = None, scale_factor: Optional[_ratio_any_t] = None,
mode: str = 'nearest', align_corners: Optional[bool] = None) -> None:
super(Upsample, self).__init__()
self.name = type(self).__name__
self.size = size
if isinstance(scale_factor, tuple):
self.scale_factor = tuple(float(factor) for factor in scale_factor)
else:
self.scale_factor = float(scale_factor) if scale_factor else None
self.mode = mode
self.align_corners = align_corners
def forward(self, input: Tensor) -> Tensor:
return F.interpolate(input, self.size, self.scale_factor, self.mode, self.align_corners)
def extra_repr(self) -> str:
if self.scale_factor is not None:
info = 'scale_factor=' + str(self.scale_factor)
else:
info = 'size=' + str(self.size)
info += ', mode=' + self.mode
return info
class UpsamplingNearest2d(Upsample):
r"""Applies a 2D nearest neighbor upsampling to an input signal composed of several input
channels.
To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor`
as it's constructor argument.
When :attr:`size` is given, it is the output size of the image `(h, w)`.
Args:
size (int or Tuple[int, int], optional): output spatial sizes
scale_factor (float or Tuple[float, float], optional): multiplier for
spatial size.
.. warning::
This class is deprecated in favor of :func:`~nn.functional.interpolate`.
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
.. math::
H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor
.. math::
W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor
Examples::
>>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
>>> input
tensor([[[[ 1., 2.],
[ 3., 4.]]]])
>>> m = nn.UpsamplingNearest2d(scale_factor=2)
>>> m(input)
tensor([[[[ 1., 1., 2., 2.],
[ 1., 1., 2., 2.],
[ 3., 3., 4., 4.],
[ 3., 3., 4., 4.]]]])
"""
def __init__(self, size: Optional[_size_2_t] = None, scale_factor: Optional[_ratio_2_t] = None) -> None:
super(UpsamplingNearest2d, self).__init__(size, scale_factor, mode='nearest')
class UpsamplingBilinear2d(Upsample):
r"""Applies a 2D bilinear upsampling to an input signal composed of several input
channels.
To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor`
as it's constructor argument.
When :attr:`size` is given, it is the output size of the image `(h, w)`.
Args:
size (int or Tuple[int, int], optional): output spatial sizes
scale_factor (float or Tuple[float, float], optional): multiplier for
spatial size.
.. warning::
This class is deprecated in favor of :func:`~nn.functional.interpolate`. It is
equivalent to ``nn.functional.interpolate(..., mode='bilinear', align_corners=True)``.
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
.. math::
H_{out} = \left\lfloor H_{in} \times \text{scale\_factor} \right\rfloor
.. math::
W_{out} = \left\lfloor W_{in} \times \text{scale\_factor} \right\rfloor
Examples::
>>> input = torch.arange(1, 5, dtype=torch.float32).view(1, 1, 2, 2)
>>> input
tensor([[[[ 1., 2.],
[ 3., 4.]]]])
>>> m = nn.UpsamplingBilinear2d(scale_factor=2)
>>> m(input)
tensor([[[[ 1.0000, 1.3333, 1.6667, 2.0000],
[ 1.6667, 2.0000, 2.3333, 2.6667],
[ 2.3333, 2.6667, 3.0000, 3.3333],
[ 3.0000, 3.3333, 3.6667, 4.0000]]]])
"""
def __init__(self, size: Optional[_size_2_t] = None, scale_factor: Optional[_ratio_2_t] = None) -> None:
super(UpsamplingBilinear2d, self).__init__(size, scale_factor, mode='bilinear', align_corners=True)
| 41.717842
| 113
| 0.553809
|
4a12b25cd928dc7468781de16536208cef97cf3c
| 6,415
|
py
|
Python
|
selfdrive/car/hyundai/hyundaican.py
|
agegold/OPKR080Hoya
|
17434ef0c2a2dd8463afbc2ac38bc7b4b66dcfe6
|
[
"MIT"
] | null | null | null |
selfdrive/car/hyundai/hyundaican.py
|
agegold/OPKR080Hoya
|
17434ef0c2a2dd8463afbc2ac38bc7b4b66dcfe6
|
[
"MIT"
] | null | null | null |
selfdrive/car/hyundai/hyundaican.py
|
agegold/OPKR080Hoya
|
17434ef0c2a2dd8463afbc2ac38bc7b4b66dcfe6
|
[
"MIT"
] | null | null | null |
import copy
import crcmod
from common.params import Params
from selfdrive.car.hyundai.values import CAR, CHECKSUM
hyundai_checksum = crcmod.mkCrcFun(0x11D, initCrc=0xFD, rev=False, xorOut=0xdf)
def create_lkas11(packer, frame, car_fingerprint, apply_steer, steer_req,
lkas11, sys_warning, sys_state, enabled,
left_lane, right_lane,
left_lane_depart, right_lane_depart, bus):
values = copy.copy(lkas11)
values["CF_Lkas_LdwsSysState"] = sys_state
values["CF_Lkas_SysWarning"] = 3 if sys_warning else 0
values["CF_Lkas_LdwsLHWarning"] = left_lane_depart
values["CF_Lkas_LdwsRHWarning"] = right_lane_depart
values["CR_Lkas_StrToqReq"] = apply_steer
values["CF_Lkas_ActToi"] = steer_req
values["CF_Lkas_ToiFlt"] = 0
values["CF_Lkas_MsgCount"] = frame % 0x10
values["CF_Lkas_Chksum"] = 0
if car_fingerprint in [CAR.GRANDEUR_HEV2020, CAR.GRANDEUR2020, CAR.GRANDEUR_HEV, CAR.GRANDEUR, CAR.SONATA, CAR.PALISADE, CAR.SONATA_HEV, CAR.SANTA_FE, CAR.KONA_EV, CAR.NIRO_EV]:
values["CF_Lkas_LdwsActivemode"] = int(left_lane) + (int(right_lane) << 1)
values["CF_Lkas_LdwsOpt_USM"] = 2
# FcwOpt_USM 5 = Orange blinking car + lanes
# FcwOpt_USM 4 = Orange car + lanes
# FcwOpt_USM 3 = Green blinking car + lanes
# FcwOpt_USM 2 = Green car + lanes
# FcwOpt_USM 1 = White car + lanes
# FcwOpt_USM 0 = No car + lanes
values["CF_Lkas_FcwOpt_USM"] = 2 if enabled else 1
# SysWarning 4 = keep hands on wheel
# SysWarning 5 = keep hands on wheel (red)
# SysWarning 6 = keep hands on wheel (red) + beep
# Note: the warning is hidden while the blinkers are on
values["CF_Lkas_SysWarning"] = 4 if sys_warning else 0
elif car_fingerprint == CAR.GENESIS:
# This field is actually LdwsActivemode
# Genesis and Optima fault when forwarding while engaged
values["CF_Lkas_LdwsActivemode"] = 2
values["CF_Lkas_SysWarning"] = lkas11["CF_Lkas_SysWarning"]
elif car_fingerprint in [CAR.OPTIMA, CAR.OPTIMA_HEV, CAR.CADENZA, CAR.CADENZA_HEV]:
values["CF_Lkas_LdwsActivemode"] = 0
elif car_fingerprint == CAR.SONATA_LF_TURBO:
values["CF_Lkas_Bca_R"] = 0
values["CF_Lkas_FcwOpt_USM"] = 2 if enabled else 1
values["CF_Lkas_LdwsOpt_USM"] = 2
values["CF_Lkas_FcwOpt_USM"] = 2 if enabled else 1
values["CF_Lkas_SysWarning"] = 4 if sys_warning else 0
ldws_car_fix = int(Params().get('LdwsCarFix')) == "1"
if ldws_car_fix:
values["CF_Lkas_LdwsOpt_USM"] = 3
dat = packer.make_can_msg("LKAS11", 0, values)[2]
if car_fingerprint in CHECKSUM["crc8"]:
# CRC Checksum as seen on 2019 Hyundai Santa Fe
dat = dat[:6] + dat[7:8]
checksum = hyundai_checksum(dat)
elif car_fingerprint in CHECKSUM["6B"]:
# Checksum of first 6 Bytes, as seen on 2018 Kia Sorento
checksum = sum(dat[:6]) % 256
else:
# Checksum of first 6 Bytes and last Byte as seen on 2018 Kia Stinger
checksum = (sum(dat[:6]) + dat[7]) % 256
values["CF_Lkas_Chksum"] = checksum
return packer.make_can_msg("LKAS11", bus, values)
def create_clu11(packer, frame, bus, clu11, button, speed):
values = copy.copy(clu11)
values["CF_Clu_CruiseSwState"] = button
values["CF_Clu_Vanz"] = speed
values["CF_Clu_AliveCnt1"] = frame % 0x10
return packer.make_can_msg("CLU11", bus, values)
def create_lfa_mfa(packer, frame, enabled):
values = {
"ACTIVE": enabled,
"HDA_USM": 2,
}
# ACTIVE 1 = Green steering wheel icon
# LFA_USM 2 & 3 = LFA cancelled, fast loud beeping
# LFA_USM 0 & 1 = No mesage
# LFA_SysWarning 1 = "Switching to HDA", short beep
# LFA_SysWarning 2 = "Switching to Smart Cruise control", short beep
# LFA_SysWarning 3 = LFA error
# ACTIVE2: nothing
# HDA_USM: nothing
return packer.make_can_msg("LFAHDA_MFC", 0, values)
def create_mdps12(packer, frame, mdps12):
values = copy.copy(mdps12)
values["CF_Mdps_ToiActive"] = 0
values["CF_Mdps_ToiUnavail"] = 1
values["CF_Mdps_MsgCount2"] = frame % 0x100
values["CF_Mdps_Chksum2"] = 0
dat = packer.make_can_msg("MDPS12", 2, values)[2]
checksum = sum(dat) % 256
values["CF_Mdps_Chksum2"] = checksum
return packer.make_can_msg("MDPS12", 2, values)
def create_scc11(packer, frame, enabled, set_speed, lead_visible, scc_live, scc11):
values = copy.copy(scc11)
values["AliveCounterACC"] = frame // 2 % 0x10
if not scc_live:
values["MainMode_ACC"] = 1
values["VSetDis"] = set_speed
values["ObjValid"] = 1 if enabled else 0
# values["ACC_ObjStatus"] = lead_visible
return packer.make_can_msg("SCC11", 0, values)
def create_scc12(packer, apply_accel, enabled, cnt, scc_live, scc12):
values = copy.copy(scc12)
values["aReqRaw"] = apply_accel if enabled else 0 #aReqMax
values["aReqValue"] = apply_accel if enabled else 0 #aReqMin
values["CR_VSM_Alive"] = cnt
values["CR_VSM_ChkSum"] = 0
if not scc_live:
values["ACCMode"] = 1 if enabled else 0 # 2 if gas padel pressed
dat = packer.make_can_msg("SCC12", 0, values)[2]
values["CR_VSM_ChkSum"] = 16 - sum([sum(divmod(i, 16)) for i in dat]) % 16
return packer.make_can_msg("SCC12", 0, values)
def create_scc13(packer, scc13):
values = copy.copy(scc13)
return packer.make_can_msg("SCC13", 0, values)
def create_scc14(packer, enabled, scc14):
values = copy.copy(scc14)
if enabled:
values["JerkUpperLimit"] = 3.2
values["JerkLowerLimit"] = 0.1
values["SCCMode"] = 1
values["ComfortBandUpper"] = 0.24
values["ComfortBandLower"] = 0.24
return packer.make_can_msg("SCC14", 0, values)
def create_spas11(packer, car_fingerprint, frame, en_spas, apply_steer, bus):
values = {
"CF_Spas_Stat": en_spas,
"CF_Spas_TestMode": 0,
"CR_Spas_StrAngCmd": apply_steer,
"CF_Spas_BeepAlarm": 0,
"CF_Spas_Mode_Seq": 2,
"CF_Spas_AliveCnt": frame % 0x200,
"CF_Spas_Chksum": 0,
"CF_Spas_PasVol": 0,
}
dat = packer.make_can_msg("SPAS11", 0, values)[2]
if car_fingerprint in CHECKSUM["crc8"]:
dat = dat[:6]
values["CF_Spas_Chksum"] = hyundai_checksum(dat)
else:
values["CF_Spas_Chksum"] = sum(dat[:6]) % 256
return packer.make_can_msg("SPAS11", bus, values)
def create_spas12(bus):
return [1268, 0, "\x00\x00\x00\x00\x00\x00\x00\x00", bus]
def create_ems11(packer, ems11, enabled):
values = copy.copy(ems11)
if enabled:
values["VS"] = 0
return packer.make_can_msg("values", 1, ems11)
| 34.304813
| 179
| 0.696493
|
4a12b268d88cad9c95eebd209a861781f409b24e
| 7,572
|
py
|
Python
|
sdk/python/pulumi_aws/licensemanager/license_configuration.py
|
dixler/pulumi-aws
|
88838ed6d412c092717a916b0b5b154f68226c3a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/licensemanager/license_configuration.py
|
dixler/pulumi-aws
|
88838ed6d412c092717a916b0b5b154f68226c3a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/licensemanager/license_configuration.py
|
dixler/pulumi-aws
|
88838ed6d412c092717a916b0b5b154f68226c3a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class LicenseConfiguration(pulumi.CustomResource):
description: pulumi.Output[str]
"""
Description of the license configuration.
"""
license_count: pulumi.Output[float]
"""
Number of licenses managed by the license configuration.
"""
license_count_hard_limit: pulumi.Output[bool]
"""
Sets the number of available licenses as a hard limit.
"""
license_counting_type: pulumi.Output[str]
"""
Dimension to use to track license inventory. Specify either `vCPU`, `Instance`, `Core` or `Socket`.
"""
license_rules: pulumi.Output[list]
"""
Array of configured License Manager rules.
"""
name: pulumi.Output[str]
"""
Name of the license configuration.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the resource.
"""
def __init__(__self__, resource_name, opts=None, description=None, license_count=None, license_count_hard_limit=None, license_counting_type=None, license_rules=None, name=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a License Manager license configuration resource.
> **Note:** Removing the `license_count` attribute is not supported by the License Manager API - recreate the resource instead.
## Rules
License rules should be in the format of `#RuleType=RuleValue`. Supported rule types:
* `minimumVcpus` - Resource must have minimum vCPU count in order to use the license. Default: 1
* `maximumVcpus` - Resource must have maximum vCPU count in order to use the license. Default: unbounded, limit: 10000
* `minimumCores` - Resource must have minimum core count in order to use the license. Default: 1
* `maximumCores` - Resource must have maximum core count in order to use the license. Default: unbounded, limit: 10000
* `minimumSockets` - Resource must have minimum socket count in order to use the license. Default: 1
* `maximumSockets` - Resource must have maximum socket count in order to use the license. Default: unbounded, limit: 10000
* `allowedTenancy` - Defines where the license can be used. If set, restricts license usage to selected tenancies. Specify a comma delimited list of `EC2-Default`, `EC2-DedicatedHost`, `EC2-DedicatedInstance`
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description of the license configuration.
:param pulumi.Input[float] license_count: Number of licenses managed by the license configuration.
:param pulumi.Input[bool] license_count_hard_limit: Sets the number of available licenses as a hard limit.
:param pulumi.Input[str] license_counting_type: Dimension to use to track license inventory. Specify either `vCPU`, `Instance`, `Core` or `Socket`.
:param pulumi.Input[list] license_rules: Array of configured License Manager rules.
:param pulumi.Input[str] name: Name of the license configuration.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/licensemanager_license_configuration.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['description'] = description
__props__['license_count'] = license_count
__props__['license_count_hard_limit'] = license_count_hard_limit
if license_counting_type is None:
raise TypeError("Missing required property 'license_counting_type'")
__props__['license_counting_type'] = license_counting_type
__props__['license_rules'] = license_rules
__props__['name'] = name
__props__['tags'] = tags
super(LicenseConfiguration, __self__).__init__(
'aws:licensemanager/licenseConfiguration:LicenseConfiguration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, description=None, license_count=None, license_count_hard_limit=None, license_counting_type=None, license_rules=None, name=None, tags=None):
"""
Get an existing LicenseConfiguration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description of the license configuration.
:param pulumi.Input[float] license_count: Number of licenses managed by the license configuration.
:param pulumi.Input[bool] license_count_hard_limit: Sets the number of available licenses as a hard limit.
:param pulumi.Input[str] license_counting_type: Dimension to use to track license inventory. Specify either `vCPU`, `Instance`, `Core` or `Socket`.
:param pulumi.Input[list] license_rules: Array of configured License Manager rules.
:param pulumi.Input[str] name: Name of the license configuration.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/licensemanager_license_configuration.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["description"] = description
__props__["license_count"] = license_count
__props__["license_count_hard_limit"] = license_count_hard_limit
__props__["license_counting_type"] = license_counting_type
__props__["license_rules"] = license_rules
__props__["name"] = name
__props__["tags"] = tags
return LicenseConfiguration(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 54.47482
| 238
| 0.698494
|
4a12b296a6a308d56645ed9f9ef4d92f9cfd7a86
| 1,226
|
py
|
Python
|
Windows/data/saves/GetInfo.py
|
Platingamer42/DigitRecognition
|
b42b0e79db90d198e72fc5f15c4bd730b6386671
|
[
"Apache-2.0"
] | null | null | null |
Windows/data/saves/GetInfo.py
|
Platingamer42/DigitRecognition
|
b42b0e79db90d198e72fc5f15c4bd730b6386671
|
[
"Apache-2.0"
] | null | null | null |
Windows/data/saves/GetInfo.py
|
Platingamer42/DigitRecognition
|
b42b0e79db90d198e72fc5f15c4bd730b6386671
|
[
"Apache-2.0"
] | null | null | null |
from keras import models
import os, gzip, pickle
if __name__ == "__main__":
model_arr = []
model_cnn = []
f = gzip.open("../datasets/mnist.pkl.gz", "rb")
train_images, train_labels, test_images, test_labels = pickle.load(f, encoding="latin1")
for file in os.listdir():
if "model_keras" in file:
model_arr.append(file)
if "model_cnn" in file:
model_cnn.append(file)
f.close()
for m in model_arr:
try:
model = models.load_model(m)
foo, accuracy = model.evaluate(test_images, test_labels)
print("=======file: {}; accuracy: {}=======".format(m, accuracy))
model.summary()
except ValueError:
print("Error reading file: {}".format(m))
#RESHAPE
test_images = test_images.reshape(test_images.shape[0], 28, 28, 1)
for m in model_cnn:
try:
model = models.load_model(m)
foo, accuracy = model.evaluate(test_images, test_labels)
print("=======file: {}; accuracy: {}=======".format(m, accuracy))
model.summary()
except ValueError:
print("Error")
| 29.190476
| 92
| 0.539152
|
4a12b2998f5de37ec15d92322af0357f6ee3bf0c
| 1,567
|
py
|
Python
|
ryu/lib/of_config/__init__.py
|
w180112/ryu
|
aadb6609f585c287b4928db9462baf72c6410718
|
[
"Apache-2.0"
] | 975
|
2015-01-03T02:30:13.000Z
|
2020-05-07T14:01:48.000Z
|
ryu/lib/of_config/__init__.py
|
DiegoRossiMafioletti/ryu
|
6f906e72c92e10bd0264c9b91a2f7bb85b97780c
|
[
"Apache-2.0"
] | 66
|
2020-05-22T21:55:42.000Z
|
2022-03-31T12:35:04.000Z
|
ryu/lib/of_config/__init__.py
|
DiegoRossiMafioletti/ryu
|
6f906e72c92e10bd0264c9b91a2f7bb85b97780c
|
[
"Apache-2.0"
] | 763
|
2015-01-01T03:38:43.000Z
|
2020-05-06T15:46:09.000Z
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OF-Config implementation.
"""
import glob
import os.path
import sys
SCHEMA_DIR = os.path.dirname(__file__)
_PREFIX = 'of-config-'
_SUFFIX = '.xsd'
_files = glob.glob(os.path.join(SCHEMA_DIR, 'of-config-*.xsd'))
OF_CONFIG_XSD_FILES = dict(
(os.path.basename(f)[len(_PREFIX):-len(_SUFFIX)], f) for f in _files)
# For convenience
# OF_CONFIG_1_0_XSD = os.path.join(SCHEMA_DIR, 'of-config-1.0.xsd')
# and so on
_this_module = sys.modules[__name__]
for (version, xsd_file) in OF_CONFIG_XSD_FILES.items():
setattr(_this_module,
'OF_CONFIG_%s_XSD' % version.replace('.', '_'), xsd_file)
OFCONFIG_1_1_CONFIG = 'urn:onf:params:xml:ns:onf:of12:config'
OFCONFIG_1_1_YANG = 'urn:onf:of12:config:yang'
# LINC specific?
OFCONFIG_1_1_1_YANG = 'urn:onf:of111:config:yang'
OFCONFIG_YANG_NAMESPACES = {
'1.1': OFCONFIG_1_1_YANG,
'1.1.1': OFCONFIG_1_1_1_YANG,
}
| 29.566038
| 73
| 0.734525
|
4a12b3228de8289832c6fb5012e3fca79f7f482d
| 6,405
|
py
|
Python
|
jobs/domain/system/etl_job_base.py
|
helpthx/pyspark_example_project_template
|
2865bdb2712f480bd468b6c0a0f6b1ea094f42c5
|
[
"CNRI-Python"
] | 1
|
2021-10-20T16:41:24.000Z
|
2021-10-20T16:41:24.000Z
|
jobs/domain/system/etl_job_base.py
|
helpthx/pyspark_example_project_template
|
2865bdb2712f480bd468b6c0a0f6b1ea094f42c5
|
[
"CNRI-Python"
] | null | null | null |
jobs/domain/system/etl_job_base.py
|
helpthx/pyspark_example_project_template
|
2865bdb2712f480bd468b6c0a0f6b1ea094f42c5
|
[
"CNRI-Python"
] | null | null | null |
"""
etl_job_base.py
~~~~~~~~~~
This Python module contains an example Apache Spark ETL job definition
that implements best practices for production ETL jobs. It can be
submitted to a Spark cluster (or locally) using the 'spark-submit'
command found in the '/bin' directory of all Spark distributions
(necessary for running any Spark job, locally or otherwise). For
example, this example script can be executed as follows,
$SPARK_HOME/bin/spark-submit \
--master spark://localhost:7077 \
--py-files packages.zip \
--files configs/etl_config.json \
jobs/domain/system/etl_job_base.py
where packages.zip contains Python modules required by ETL job (in
this example it contains a class to provide access to Spark's logger),
which need to be made available to each executor process on every node
in the cluster; etl_config.json is a text file sent to the cluster,
containing a JSON object with all of the configuration parameters
required by the ETL job; and, etl_job.py contains the Spark application
to be executed by a driver process on the Spark master node.
For more details on submitting Spark applications, please see here:
http://spark.apache.org/docs/latest/submitting-applications.html
Our chosen approach for structuring jobs is to separate the individual
'units' of ETL - the Extract, Transform and Load parts - into dedicated
functions, such that the key Transform steps can be covered by tests
and jobs or called from within another environment (e.g. a Jupyter or
Zeppelin notebook).
"""
import sys
import argparse
from dependencies.spark import start_spark
from pyspark.sql import Row
from pyspark.sql.functions import col, concat_ws, lit
def main(args: list) -> None:
"""Main ETL script definition.
Parameters:
args (list): argumentos came from sys.args
Returns:
None:Returning value
"""
# Parsing submmited variables
job_name, steps_per_floor = set_up_args(args)
# start Spark application and get Spark session, logger and config
spark, log, config_dict= start_spark(
app_name=job_name)
# log that main ETL job is starting
log.info('etl_job is up-and-running')
# execute ETL pipeline
job(spark, log, steps_per_floor)
# log the success and terminate Spark application
log.info('test_etl_job is finished')
spark.stop()
return None
def extract_data(spark):
"""Load data from Parquet file format.
Parameters:
spark (SparkSession): Main spark session for the job.
Returns:
SparkDataframe:Spark DataFrame from the parquet
"""
return spark.read.parquet('file:///code/tests/domain/system/test_unit/test_data/employees')
def transform_data(df, steps_per_floor_):
"""Transform original dataset.
:param df: Input DataFrame.
:param steps_per_floor_: The number of steps per-floor at 43 Tanner
Street.
:return: Transformed DataFrame.
"""
df_transformed = (
df
.select(
col('id'),
concat_ws(
' ',
col('first_name'),
col('second_name')).alias('name'),
(col('floor') * lit(steps_per_floor_)).alias('steps_to_desk')))
return df_transformed
def load_data(df):
"""Collect data locally and write to CSV.
:param df: DataFrame to print.
:return: None
"""
(df
.coalesce(1)
.write
.csv('file:///code/tests/domain/system/test_integration/output_employees', mode='overwrite', header=True))
return None
def create_test_data(spark, config):
"""Create test data.
This function creates both both pre- and post- transformation data
saved as Parquet files in tests/test_data. This will be used for
unit tests as well as to load as part of the example ETL job.
:return: None
"""
# create example data from scratch
local_records = [
Row(id=1, first_name='Dan', second_name='Germain', floor=1),
Row(id=2, first_name='Dan', second_name='Sommerville', floor=1),
Row(id=3, first_name='Alex', second_name='Ioannides', floor=2),
Row(id=4, first_name='Ken', second_name='Lai', floor=2),
Row(id=5, first_name='Stu', second_name='White', floor=3),
Row(id=6, first_name='Mark', second_name='Sweeting', floor=3),
Row(id=7, first_name='Phil', second_name='Bird', floor=4),
Row(id=8, first_name='Kim', second_name='Suter', floor=4)
]
df = spark.createDataFrame(local_records)
# write to Parquet file format
(df
.coalesce(1)
.write
.parquet('file:///code/tests/domain/system/test_unit/test_data/employees', mode='overwrite'))
# create transformed version of data
df_tf = transform_data(df, config['steps_per_floor'])
# write transformed version of data to Parquet
(df_tf
.coalesce(1)
.write
.parquet('file:///code/tests/domain/system/test_unit/test_data/employees_report', mode='overwrite'))
return None
def job(spark: str, log: str, steps_per_floor: int) -> None:
"""Job ETL script definition.
Parameters:
spark (SparkSession): Main spark session for the job.
log (Log4j): Logging instance.
config (dict): config paramenters for the job
Returns:
None:Returning value
"""
# log that main ETL job is starting
log.warn('etl_job is up-and-running')
# execute ETL pipeline
data = extract_data(spark)
data.show()
data_transformed = transform_data(data, steps_per_floor)
load_data(data_transformed)
# log the success and terminate Spark application
log.warn('test_etl_job is finished')
return None
def set_up_args(args: list) -> list:
"""Set up variables for the job.
Parameters:
args (list): Main spark session for the job.
Returns:
list:List of variables
"""
parser = argparse.ArgumentParser(
description='PySpark dummy template job args')
parser.add_argument('-jbn', '--job_name_arg',
dest='job_name_arg',
type=str)
parser.add_argument('-spf', '--steps_per_floor',
dest='steps_per_floor',
type=str)
args = parser.parse_args(args)
return args.job_name_arg, args.steps_per_floor
# entry point for PySpark ETL application
if __name__ == '__main__':
main(sys.argv[1:])
| 30.942029
| 111
| 0.676659
|
4a12b389eaa7f3b4974a8c8f6039356f2bfaba55
| 606
|
py
|
Python
|
converter/video/migrations/0002_alter_videoraw_req_format.py
|
HosseinMirjalali/converter-task
|
c9b7a9682d7f3acea5903c4b5edcba56a41a618b
|
[
"MIT"
] | null | null | null |
converter/video/migrations/0002_alter_videoraw_req_format.py
|
HosseinMirjalali/converter-task
|
c9b7a9682d7f3acea5903c4b5edcba56a41a618b
|
[
"MIT"
] | 10
|
2021-12-30T04:33:52.000Z
|
2022-03-31T04:28:36.000Z
|
converter/video/migrations/0002_alter_videoraw_req_format.py
|
HosseinMirjalali/converter-task
|
c9b7a9682d7f3acea5903c4b5edcba56a41a618b
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.10 on 2021-12-28 21:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('video', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='videoraw',
name='req_format',
field=models.CharField(choices=[('mp4', 'mp4, using mpeg4 codec'), ('avi', 'avi, using mpeg4 codec'), ('mkv', 'mkv, using libvpx codec'), ('3gp', '3gp, using h263 codec')], default='mp4', max_length=3, verbose_name='The format this video should be converted to.'),
),
]
| 31.894737
| 276
| 0.617162
|
4a12b39b8c83832d8260bcb13530a71b014a6d79
| 1,391
|
py
|
Python
|
Dictionary/dictionary.py
|
edwarddevp/python-learning
|
6f849c5a7c296ba3cf3ea79cd21d0429ca91ad59
|
[
"MIT"
] | null | null | null |
Dictionary/dictionary.py
|
edwarddevp/python-learning
|
6f849c5a7c296ba3cf3ea79cd21d0429ca91ad59
|
[
"MIT"
] | null | null | null |
Dictionary/dictionary.py
|
edwarddevp/python-learning
|
6f849c5a7c296ba3cf3ea79cd21d0429ca91ad59
|
[
"MIT"
] | null | null | null |
import json
from difflib import get_close_matches
data = json.load(open("data.json"))
print()
def find_definition(word):
if word.lower() in data:
print_definitions(word, data[word.lower()])
elif word.upper() in data:
print_definitions(word, data[word.upper()])
elif word.title() in data:
print_definitions(word, data[word.title()])
else:
closer_matches = get_close_matches(word, data.keys(), cutoff=0.8)
if len(closer_matches) > 0:
user_response = input(
"Did you meant \"%s\", Yes(y) or No(any key): " % closer_matches[0])
if(user_response == 'y'):
print_definitions(closer_matches[0], data[closer_matches[0]])
else:
print()
else:
print("Sorry, the word you enter doesn't exist. Please double check it\n")
def print_definitions(word, definitions):
print("\n%s:" % word.title())
for definition in definitions:
print(" * " + definition)
print()
try:
word_to_search = ""
while word_to_search != "e":
word_to_search = input(
"Enter the world you want to know its meaning, enter (e) to exit : ").strip()
if word_to_search != 'e':
find_definition(word_to_search)
print("\nThanks for coming\n")
except KeyboardInterrupt:
print("\n\nThanks for coming\n")
| 30.911111
| 89
| 0.60532
|
4a12b3e16599cb2dc972cf958a1d04f9852a88f6
| 5,460
|
py
|
Python
|
library/panos_zone.py
|
rvichery/ansible-pan
|
d07839cd5a544a6398646c01e1edac0f0f82cc38
|
[
"Apache-2.0"
] | null | null | null |
library/panos_zone.py
|
rvichery/ansible-pan
|
d07839cd5a544a6398646c01e1edac0f0f82cc38
|
[
"Apache-2.0"
] | null | null | null |
library/panos_zone.py
|
rvichery/ansible-pan
|
d07839cd5a544a6398646c01e1edac0f0f82cc38
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
__metaclass__ = type
# Copyright 2018 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DOCUMENTATION = '''
---
module: panos_zone
short_description: configure security zone
description:
- Configure security zones on PAN-OS firewall or in Panorama template.
author: "Robert Hagen (@stealthllama)"
version_added: "2.8"
requirements:
- pan-python can be obtained from PyPI U(https://pypi.python.org/pypi/pan-python)
- pandevice can be obtained from PyPI U(https://pypi.python.org/pypi/pandevice)
- pandevice >= 0.8.0
notes:
- Panorama is supported.
- Check mode is supported.
extends_documentation_fragment:
- panos.transitional_provider
- panos.state
- panos.full_template_support
- panos.vsys
options:
zone:
description:
- Name of the security zone to configure.
required: true
mode:
description:
- The mode of the security zone. Must match the mode of the interface.
choices:
- tap
- virtual-wire
- layer2
- layer3
- external
default: "layer3"
interface:
description:
- List of member interfaces.
type: list
zone_profile:
description:
- Zone protection profile.
log_setting:
description:
- Log forwarding setting.
enable_userid:
description:
- Enable user identification.
type: bool
include_acl:
description:
- User identification ACL include list.
type: list
exclude_acl:
description:
- User identification ACL exclude list.
type: list
'''
EXAMPLES = '''
# Create an L3 zone.
- name: create DMZ zone on a firewall
panos_zone:
provider: '{{ provider }}'
zone: 'dmz'
mode: 'layer3'
zone_profile: 'strict'
# Add an interface to the zone.
- name: add ethernet1/2 to zone dmz
panos_interface:
provider: '{{ provider }}'
zone: 'dmz'
mode: 'layer3'
interface: ['ethernet1/2']
zone_profile: 'strict'
# Delete the zone.
- name: delete the DMZ zone
panos_interface:
provider: '{{ provider }}'
zone: 'dmz'
state: 'absent'
# Add a zone to a multi-VSYS Panorama template
- name: add Cloud zone to template
panos_interface:
provider: '{{ provider }}'
template: 'Datacenter Template'
vsys: 'vsys4'
zone: 'datacenter'
mode: 'layer3'
enable_userid: true
exclude_acl: ['10.0.200.0/24']
'''
RETURN = '''
# Default return values
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import get_exception
from ansible.module_utils.network.panos.panos import get_connection
try:
from pandevice.network import Zone
from pandevice.errors import PanDeviceError
except ImportError:
pass
def main():
helper = get_connection(
vsys=True,
template=True,
template_stack=True,
with_state=True,
with_classic_provider_spec=True,
argument_spec=dict(
zone=dict(required=True),
mode=dict(choices=['tap', 'virtual-wire', 'layer2', 'layer3', 'external'], default='layer3'),
interface=dict(type='list'),
zone_profile=dict(),
log_setting=dict(),
enable_userid=dict(type='bool', default=False),
include_acl=dict(type='list'),
exclude_acl=dict(type='list'),
),
)
module = AnsibleModule(
argument_spec=helper.argument_spec,
supports_check_mode=True,
required_one_of=helper.required_one_of,
)
# Verify imports, build pandevice object tree.
parent = helper.get_pandevice_parent(module)
# Set the Zone object params
zone_spec = {
'name': module.params['zone'],
'mode': module.params['mode'],
'interface': module.params['interface'],
'zone_profile': module.params['zone_profile'],
'log_setting': module.params['log_setting'],
'enable_user_identification': module.params['enable_userid'],
'include_acl': module.params['include_acl'],
'exclude_acl': module.params['exclude_acl']
}
# Retrieve the current list of zones
try:
zones = Zone.refreshall(parent, add=False)
except PanDeviceError as e:
module.fail_json(msg='Failed refresh: {0}'.format(e))
# Build the zone and attach to the parent
new_zone = Zone(**zone_spec)
parent.add(new_zone)
# Perform the requeseted action.
changed = helper.apply_state(new_zone, zones, module)
# Done!
module.exit_json(changed=changed, msg='Done')
if __name__ == '__main__':
main()
| 28.14433
| 105
| 0.643956
|
4a12b3ffd0cfdf79a96bedcb9f6960f563a44e55
| 5,072
|
py
|
Python
|
src/textual/events.py
|
ramiro/textual
|
a6a912ab2713b0e1cb668224f7a38f31b1c9939c
|
[
"MIT"
] | null | null | null |
src/textual/events.py
|
ramiro/textual
|
a6a912ab2713b0e1cb668224f7a38f31b1c9939c
|
[
"MIT"
] | null | null | null |
src/textual/events.py
|
ramiro/textual
|
a6a912ab2713b0e1cb668224f7a38f31b1c9939c
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import TYPE_CHECKING
from rich.repr import rich_repr, RichReprResult
from .message import Message
from ._types import MessageTarget
from .keys import Keys
if TYPE_CHECKING:
from ._timer import Timer as TimerClass
from ._timer import TimerCallback
@rich_repr
class Event(Message):
def __rich_repr__(self) -> RichReprResult:
return
yield
def __init_subclass__(cls, bubble: bool = False) -> None:
super().__init_subclass__(bubble=bubble)
class Null(Event):
def can_batch(self, message: Message) -> bool:
return isinstance(message, Null)
class ShutdownRequest(Event):
pass
class Shutdown(Event):
pass
class Load(Event):
pass
class Startup(Event):
pass
class Created(Event):
pass
class Updated(Event):
"""Indicates the sender was updated and needs a refresh."""
class Idle(Event):
"""Sent when there are no more items in the message queue."""
class Action(Event, bubble=True):
__slots__ = ["action"]
def __init__(self, sender: MessageTarget, action: str) -> None:
super().__init__(sender)
self.action = action
def __rich_repr__(self) -> RichReprResult:
yield "action", self.action
class Resize(Event):
__slots__ = ["width", "height"]
width: int
height: int
def __init__(self, sender: MessageTarget, width: int, height: int) -> None:
self.width = width
self.height = height
super().__init__(sender)
def __rich_repr__(self) -> RichReprResult:
yield self.width
yield self.height
class Mount(Event):
pass
class Unmount(Event):
pass
class Show(Event):
"""Widget has become visible."""
class Hide(Event):
"""Widget has been hidden."""
class InputEvent(Event, bubble=True):
pass
@rich_repr
class Key(InputEvent, bubble=True):
__slots__ = ["key"]
def __init__(self, sender: MessageTarget, key: Keys | str) -> None:
super().__init__(sender)
self.key = key.value if isinstance(key, Keys) else key
def __rich_repr__(self) -> RichReprResult:
yield "key", self.key
@rich_repr
class MouseEvent(InputEvent):
__slots__ = ["x", "y", "button"]
def __init__(
self,
sender: MessageTarget,
x: int,
y: int,
delta_x: int,
delta_y: int,
button: int,
shift: bool,
meta: bool,
ctrl: bool,
screen_x: int | None = None,
screen_y: int | None = None,
) -> None:
super().__init__(sender)
self.x = x
self.y = y
self.delta_x = delta_x
self.delta_y = delta_y
self.button = button
self.shift = shift
self.meta = meta
self.ctrl = ctrl
self.screen_x = x if screen_x is None else screen_x
self.screen_y = y if screen_y is None else screen_y
def __rich_repr__(self) -> RichReprResult:
yield "x", self.x
yield "y", self.y
yield "delta_x", self.delta_x, 0
yield "delta_y", self.delta_y, 0
if self.screen_x != self.x:
yield "screen_x", self.screen_x
if self.screen_y != self.y:
yield "screen_y", self.screen_y
yield "button", self.button, 0
yield "shift", self.shift, False
yield "meta", self.meta, False
yield "ctrl", self.ctrl, False
def offset(self, x: int, y: int):
return self.__class__(
self.sender,
x=self.x + x,
y=self.y + y,
delta_x=self.delta_x,
delta_y=self.delta_y,
button=self.button,
shift=self.shift,
meta=self.meta,
ctrl=self.ctrl,
screen_x=self.screen_x,
screen_y=self.screen_y,
)
@rich_repr
class MouseMove(MouseEvent):
pass
@rich_repr
class MouseDown(MouseEvent):
pass
@rich_repr
class MouseUp(MouseEvent):
pass
class MouseScrollDown(InputEvent, bubble=True):
__slots__ = ["x", "y"]
def __init__(self, sender: MessageTarget, x: int, y: int) -> None:
super().__init__(sender)
self.x = x
self.y = y
class MouseScrollUp(MouseScrollDown, bubble=True):
pass
class Click(MouseEvent):
pass
class DoubleClick(MouseEvent):
pass
@rich_repr
class Timer(Event):
__slots__ = ["time", "count", "callback"]
def __init__(
self,
sender: MessageTarget,
timer: "TimerClass",
count: int = 0,
callback: TimerCallback | None = None,
) -> None:
super().__init__(sender)
self.timer = timer
self.count = count
self.callback = callback
def __rich_repr__(self) -> RichReprResult:
yield self.timer.name
class Enter(Event):
pass
class Leave(Event):
pass
class Focus(Event):
pass
class Blur(Event):
pass
class Update(Event):
def can_batch(self, event: Message) -> bool:
return isinstance(event, Update) and event.sender == self.sender
| 20.047431
| 79
| 0.608833
|
4a12b4e199d4840f4fe12c1f185d9d647f42963b
| 3,923
|
py
|
Python
|
src/rotest/core/result/handlers/stream/stream_handler.py
|
gregoil/rotest
|
c443bc1b99e02f047adfcab9943966f0023f652c
|
[
"MIT"
] | 26
|
2017-06-11T18:21:17.000Z
|
2021-02-21T20:36:30.000Z
|
src/rotest/core/result/handlers/stream/stream_handler.py
|
gregoil/rotest
|
c443bc1b99e02f047adfcab9943966f0023f652c
|
[
"MIT"
] | 143
|
2017-06-29T11:18:35.000Z
|
2021-06-10T17:23:46.000Z
|
src/rotest/core/result/handlers/stream/stream_handler.py
|
gregoil/rotest
|
c443bc1b99e02f047adfcab9943966f0023f652c
|
[
"MIT"
] | 11
|
2017-06-12T09:16:14.000Z
|
2021-07-11T23:20:59.000Z
|
"""Stream output handler."""
# pylint: disable=invalid-name,too-few-public-methods,arguments-differ
# pylint: disable=too-many-arguments,super-init-not-called
from __future__ import absolute_import
from rotest.common.constants import GREEN, YELLOW, RED, BOLD, CYAN, BLUE
from rotest.core.result.handlers.stream.base_handler import BaseStreamHandler
class EventStreamHandler(BaseStreamHandler):
"""Stream event handler.
Overrides result handler's methods to print each event change in
the main result object to the given stream.
"""
NAME = 'full'
def start_test_run(self):
"""Write the test run start to the stream."""
self.stream.writeln('Tests Run Started', None, BOLD)
def start_test(self, test):
"""Write the test start to the stream.
Args:
test (TestSuite / TestCase): test item instance.
"""
self.stream.writeln('Test %s Started' % test.data.name)
def stop_test(self, test):
"""Log the test stop to the stream.
Args:
test (TestSuite / TestCase): test item instance.
"""
self.stream.writeln('Test %s Finished' % test.data.name)
def start_composite(self, test):
"""Called when the given TestSuite is about to be run.
Args:
test (TestSuite / TestCase): test item instance.
"""
self.start_test(test)
def stop_composite(self, test):
"""Called when the given TestSuite has been run.
Args:
test (TestSuite / TestCase): test item instance.
"""
self.stop_test(test)
def stop_test_run(self):
"""Write the test run end to the stream."""
self.stream.writeln('Tests Run Finished', None, BOLD)
def add_success(self, test):
"""Write the test success to the stream.
Args:
test (TestCase): test item instance.
"""
self.stream.writeln('Success: %s' % test, GREEN)
def add_info(self, test, msg):
"""Called when a test registers a success message.
Args:
test (rotest.core.abstract_test.AbstractTest): test item instance.
msg (str): success message.
"""
self.stream.writeln('Success msg: %s' % test, GREEN)
self.write_details(msg, color=GREEN)
def add_skip(self, test, reason):
"""Write the test skip to the stream.
Args:
test (TestCase): test item instance.
reason (str): skip reason description.
"""
self.stream.writeln('Skip: %s' % test, YELLOW)
self.write_details(reason, color=YELLOW)
def add_failure(self, test, exception_str):
"""Write the failure to the stream.
Args:
test (TestCase): test item instance.
exception_str (str): exception traceback string.
"""
self.stream.writeln('Failure: %s' % test, RED)
self.write_details(exception_str, color=RED)
def add_error(self, test, exception_str):
"""Write the error to the stream.
Args:
test (TestCase): test item instance.
exception_str (str): exception traceback string.
"""
self.stream.writeln('Error: %s' % test, RED, BOLD)
self.write_details(exception_str, 0, RED, BOLD)
def add_expected_failure(self, test, exception_str):
"""Write the expected failure to the stream.
Args:
test (TestCase): test item instance.
exception_str (str): exception traceback string.
"""
self.stream.writeln('Expected Failure: %s' % test, CYAN)
self.write_details(exception_str, color=CYAN)
def add_unexpected_success(self, test):
"""Write the test unexpected success to the stream.
Args:
test (TestCase): test item instance.
"""
self.stream.writeln('Unexpected Success: %s' % test, BLUE)
| 32.155738
| 78
| 0.615345
|
4a12b4ee7e2becdd1e6f602c6d562b39517c04e3
| 3,984
|
py
|
Python
|
python/assume_role_oidc_client_credentials.py
|
sequoiacapital/assume-role-oidc-client-credentials
|
cae5f3c85f4242cab30b4d7acfc77e30e8f34f7b
|
[
"MIT"
] | null | null | null |
python/assume_role_oidc_client_credentials.py
|
sequoiacapital/assume-role-oidc-client-credentials
|
cae5f3c85f4242cab30b4d7acfc77e30e8f34f7b
|
[
"MIT"
] | null | null | null |
python/assume_role_oidc_client_credentials.py
|
sequoiacapital/assume-role-oidc-client-credentials
|
cae5f3c85f4242cab30b4d7acfc77e30e8f34f7b
|
[
"MIT"
] | null | null | null |
import requests
import base64
#from requests_toolbelt.utils import dump
import logging
from copy import deepcopy
from botocore.credentials import BaseAssumeRoleCredentialFetcher, CredentialProvider, AssumeRoleWithWebIdentityCredentialFetcher, DeferredRefreshableCredentials, Config, CredentialRetrievalError
from botocore import UNSIGNED
class WebIdentityTokenLoader(object):
def __init__(self, client_id, client_secret, token_url, scopes):
self._client_id = client_id
self._client_secret = client_secret
self._token_url = token_url
self._scopes = scopes
def __call__(self):
auth_string = self._client_id + ":" + self._client_secret
message_bytes = auth_string.encode('ascii')
base64_bytes = base64.b64encode(message_bytes)
base64_message = base64_bytes.decode('ascii')
params = {'grant_type': 'client_credentials', 'scope': " ".join(self._scopes) }
headers = {"Accept": "application/json",
"Authorization": "Basic " + base64_message,
"Content-Type": "application/x-www-form-urlencoded" }
# Uncomment this to debug the request transaction
#logging.basicConfig()
#logging.getLogger().setLevel(logging.DEBUG)
#requests_log = logging.getLogger("requests.packages.urllib3")
#requests_log.setLevel(logging.DEBUG)
#requests_log.propagate = True
r = requests.post(self._token_url, params=params, headers=headers)
if r.status_code != 200:
raise CredentialRetrievalError(
provider=self.method,
error_msg="Error retrieving OIDC token",
)
return r.json()['access_token']
class AssumeRoleWithOIDCClientCredentialsProvider(CredentialProvider):
METHOD = 'assume-role-with-web-identity'
CANONICAL_NAME = None
def __init__(
self,
client_creator,
client_id,
client_secret,
token_url,
scopes,
role_arn,
cache=None,
token_loader_cls=None,
):
self.cache = cache
self._client_creator = client_creator
self._client_id = client_id
self._client_secret = client_secret
self._token_url = token_url
self._role_arn = role_arn
self._scopes = scopes
if token_loader_cls is None:
token_loader_cls = WebIdentityTokenLoader
self._token_loader_cls = token_loader_cls
def load(self):
print("hi")
return self._assume_role_with_web_identity()
def _assume_role_with_web_identity(self):
token_loader = self._token_loader_cls(self._client_id, self._client_secret, self._token_url, self._scopes)
role_arn = self._role_arn
if not role_arn:
error_msg = (
'The provided profile or the current environment is '
'configured to assume role with web identity but has no '
'role ARN configured. Ensure that the profile has the role_arn'
'configuration set or the AWS_ROLE_ARN env var is set.'
)
raise InvalidConfigError(error_msg=error_msg)
extra_args = {}
role_session_name = "role-session-name"
if role_session_name is not None:
extra_args['RoleSessionName'] = role_session_name
fetcher = AssumeRoleWithWebIdentityCredentialFetcher(
client_creator=self._client_creator,
web_identity_token_loader=token_loader,
role_arn=role_arn,
extra_args=extra_args,
cache=self.cache,
)
# The initial credentials are empty and the expiration time is set
# to now so that we can delay the call to assume role until it is
# strictly needed.
return DeferredRefreshableCredentials(
method=self.METHOD,
refresh_using=fetcher.fetch_credentials,
)
| 36.218182
| 194
| 0.65261
|
4a12b606a042af8992100ed22efe9413e578f0ff
| 1,727
|
py
|
Python
|
jiraannouncer/views/travis.py
|
theunkn0wn1/JIRAAnnouncer
|
75fa858f956f3b0d6b2f3dbe9feea979ad3d14c4
|
[
"BSD-3-Clause"
] | null | null | null |
jiraannouncer/views/travis.py
|
theunkn0wn1/JIRAAnnouncer
|
75fa858f956f3b0d6b2f3dbe9feea979ad3d14c4
|
[
"BSD-3-Clause"
] | null | null | null |
jiraannouncer/views/travis.py
|
theunkn0wn1/JIRAAnnouncer
|
75fa858f956f3b0d6b2f3dbe9feea979ad3d14c4
|
[
"BSD-3-Clause"
] | null | null | null |
import time
import simplejson
import urllib
from pyramid.view import view_config
from ..utils import logprint, send, getlast
OFFSET = 5
@view_config(route_name='travis', renderer="json")
def travis(request):
"""Handle TravisCI events"""
lastmessage = getlast()
data = request.body.decode('utf-8')
repo = request.headers['Travis-Repo-Slug']
if not data.startswith("payload="):
logprint("Error in Travis input, expected \"payload=\"")
return
try:
request = simplejson.loads(urllib.parse.unquote(data[8:]))
except:
logprint("Error loading Travis payload:")
logprint(data)
return
if "FuelRats/pipsqueak3" in repo:
channels = ['#mechadev']
else:
channels = ['#rattech']
message1 = ("[\x0315TravisCI\x03] \x0306" + repo + "\x03#" + request['number'] +
" (\x0306" + request['branch'] + "\x03 - " + request['commit'][:7] +
" : \x0314" + request['author_name'] + "\x03): " + request['result_message'])
message2 = ("[\x0315TravisCI\x03] Change view: \x02\x0311" + request['compare_url'] +
"\x02\x03 Build details: \x02\x0311" + request['build_url'] + "\x02\x03")
msgshort1 = {"time": time.time(), "type": "Travis", "key": repo, "full": message1}
msgshort2 = {"time": time.time(), "type": "Travis", "key": repo, "full": message2}
if lastmessage['full'] == message2:
logprint("Duplicate message, skipping:")
logprint(message1)
logprint(message2)
else:
for channel in channels:
send(channel, message1, msgshort1)
time.sleep(0.5)
for channel in channels:
send(channel, message2, msgshort2)
| 34.54
| 93
| 0.601621
|
4a12b6876675b95af350a2d03494abba987a59aa
| 2,373
|
py
|
Python
|
modoboa/admin/tests/test_repair.py
|
HarshCasper/modoboa
|
a00baa0593107992f545ee3e89cd4346b9615a96
|
[
"0BSD"
] | 1,602
|
2016-12-15T14:25:34.000Z
|
2022-03-31T16:49:25.000Z
|
modoboa/admin/tests/test_repair.py
|
sebageek/modoboa
|
57f5d57ea60a57e8dcac970085dfc07082481fc6
|
[
"0BSD"
] | 1,290
|
2016-12-14T15:39:05.000Z
|
2022-03-31T13:49:09.000Z
|
modoboa/admin/tests/test_repair.py
|
sebageek/modoboa
|
57f5d57ea60a57e8dcac970085dfc07082481fc6
|
[
"0BSD"
] | 272
|
2016-12-22T11:58:18.000Z
|
2022-03-17T15:57:24.000Z
|
"""Repair command tests"""
from django.core import management
from modoboa.lib.permissions import ObjectAccess, get_object_owner
from modoboa.lib.tests import ModoTestCase
from .. import factories, models
class RepairTestCase(ModoTestCase):
"""TestCase for repair command."""
@classmethod
def setUpTestData(cls): # NOQA:N802
"""Create some data."""
super(RepairTestCase, cls).setUpTestData()
factories.populate_database()
def test_management_command(self):
"""Check that command works fine."""
ObjectAccess.objects.all().delete()
mbox = models.Mailbox.objects.first()
alias = models.Alias.objects.first()
# assert mbox has no owner
self.assertIs(get_object_owner(mbox), None)
# fix it. run in quiet mode because we dont want output in tests
ret = management.call_command("modo", "repair", "--quiet")
assert ret is None
# assert it's fixed
self.assertIsNot(get_object_owner(mbox), None)
self.assertIsNot(get_object_owner(alias), None)
def test_management_command_with_dry_run(self):
"""Check that command works fine."""
ObjectAccess.objects.all().delete()
mbox = models.Mailbox.objects.first()
# assert mbox has no owner
self.assertIs(get_object_owner(mbox), None)
# show problems. run in quiet mode because we dont want output in tests
ret = management.call_command("modo", "repair", "--quiet", "--dry-run")
assert ret is None
# assert its not fixed
self.assertIs(get_object_owner(mbox), None)
def test_management_command_with_nul_domain(self):
"""Just assume nothing raise when an alias has no domain."""
models.Alias.objects.create(address="@modoboa.xxx")
ret = management.call_command("modo", "repair", "--quiet")
assert ret is None
def test_management_command_with_no_alias(self):
"""Check that problem is fixed."""
count, detail = models.Alias.objects.filter(
address="user@test.com", internal=True).delete()
self.assertEqual(count, 3)
ret = management.call_command("modo", "repair", "--quiet")
assert ret is None
self.assertTrue(
models.Alias.objects.filter(
address="user@test.com", internal=True).exists())
| 38.901639
| 79
| 0.655289
|
4a12b69464feba319f620289dd06e848d8555bc4
| 9,276
|
py
|
Python
|
lib/galaxy/jobs/metrics/instrumenters/collectl.py
|
mmiladi/galaxy
|
7857b152cd10d9490ac2433ff2905ca1a47ee32c
|
[
"CC-BY-3.0"
] | 4
|
2018-10-29T18:34:38.000Z
|
2021-09-29T23:30:42.000Z
|
lib/galaxy/jobs/metrics/instrumenters/collectl.py
|
mmiladi/galaxy
|
7857b152cd10d9490ac2433ff2905ca1a47ee32c
|
[
"CC-BY-3.0"
] | 1
|
2019-02-04T16:21:27.000Z
|
2019-02-04T16:45:17.000Z
|
lib/galaxy/jobs/metrics/instrumenters/collectl.py
|
mmiladi/galaxy
|
7857b152cd10d9490ac2433ff2905ca1a47ee32c
|
[
"CC-BY-3.0"
] | 3
|
2020-02-12T15:22:24.000Z
|
2021-08-19T10:27:39.000Z
|
"""The module describes the ``collectl`` job metrics plugin."""
import logging
import os
import shutil
from galaxy import util
from ..collectl import (
cli,
processes,
subsystems
)
from ..instrumenters import InstrumentPlugin
from ...metrics import formatting
log = logging.getLogger(__name__)
# By default, only grab statistics for user processes (as identified by
# username).
DEFAULT_PROCFILT_ON = "username"
DEFAULT_SUBSYSTEMS = "process"
# Set to zero to flush every collection.
DEFAULT_FLUSH_INTERVAL = "0"
FORMATTED_RESOURCE_TITLES = {
"PCT": "Percent CPU Usage",
"RSYS": "Disk Reads",
"WSYS": "Disk Writes",
}
EMPTY_COLLECTL_FILE_MESSAGE = "Skipping process summary due to empty file... job probably did not run long enough for collectl to gather data."
class CollectlFormatter(formatting.JobMetricFormatter):
def format(self, key, value):
if key == "pid":
return ("Process ID", int(value))
elif key == "raw_log_path":
return ("Relative Path of Full Collectl Log", value)
elif key == "process_max_AccumT":
return ("Job Runtime (System+User)", formatting.seconds_to_str(float(value)))
else:
_, stat_type, resource_type = key.split("_", 2)
if resource_type.startswith("Vm"):
value_str = "%s KB" % int(value)
elif resource_type in ["RSYS", "WSYS"] and stat_type in ["count", "max", "sum"]:
value_str = "%d (# system calls)" % int(value)
else:
value_str = str(value)
resource_title = FORMATTED_RESOURCE_TITLES.get(resource_type, resource_type)
return ("%s (%s)" % (resource_title, stat_type), value_str)
class CollectlPlugin(InstrumentPlugin):
""" Run collectl along with job to capture system and/or process data
according to specified collectl subsystems.
"""
plugin_type = "collectl"
formatter = CollectlFormatter()
def __init__(self, **kwargs):
self.__configure_paths(kwargs)
self.__configure_subsystems(kwargs)
saved_logs_path = kwargs.get("saved_logs_path", "")
if "app" in kwargs:
log.debug("Found path for saved logs: %s" % saved_logs_path)
saved_logs_path = kwargs["app"].config.resolve_path(saved_logs_path)
self.saved_logs_path = saved_logs_path
self.__configure_collectl_recorder_args(kwargs)
self.summarize_process_data = util.asbool(kwargs.get("summarize_process_data", True))
self.log_collectl_program_output = util.asbool(kwargs.get("log_collectl_program_output", False))
if self.summarize_process_data:
if subsystems.get_subsystem("process") not in self.subsystems:
raise Exception("Collectl plugin misconfigured - cannot summarize_process_data without process subsystem being enabled.")
process_statistics = kwargs.get("process_statistics", None)
# None will let processes module use default set of statistics
# defined there.
self.process_statistics = processes.parse_process_statistics(process_statistics)
def pre_execute_instrument(self, job_directory):
commands = []
# Capture PID of process so we can walk its ancestors when building
# statistics for the whole job.
commands.append('''echo "$$" > '%s' ''' % self.__pid_file(job_directory))
# Run collectl in record mode to capture process and system level
# statistics according to supplied subsystems.
commands.append(self.__collectl_record_command(job_directory))
return commands
def post_execute_instrument(self, job_directory):
commands = []
# collectl dies when job script completes, perhaps capture pid of
# collectl above and check if it is still alive to allow tracking if
# collectl ran successfully through the whole job.
return commands
def job_properties(self, job_id, job_directory):
pid = open(self.__pid_file(job_directory), "r").read().strip()
contents = os.listdir(job_directory)
try:
rel_path = filter(self._is_instrumented_collectl_log, contents)[0]
path = os.path.join(job_directory, rel_path)
except IndexError:
message = "Failed to find collectl log in directory %s, files were %s" % (job_directory, contents)
raise Exception(message)
properties = dict(
pid=int(pid),
)
if self.saved_logs_path:
destination_rel_dir = os.path.join(*util.directory_hash_id(job_id))
destination_rel_path = os.path.join(destination_rel_dir, rel_path)
destination_path = os.path.join(self.saved_logs_path, destination_rel_path)
destination_dir = os.path.dirname(destination_path)
if not os.path.isdir(destination_dir):
os.makedirs(destination_dir)
shutil.copyfile(path, destination_path)
properties["raw_log_path"] = destination_rel_path
if self.summarize_process_data:
# Run collectl in playback and generate statistics of interest
summary_statistics = self.__summarize_process_data(pid, path)
for statistic, value in summary_statistics:
properties["process_%s" % "_".join(statistic)] = value
return properties
def __configure_paths(self, kwargs):
# 95% of time I would expect collectl to just be installed with apt or
# yum, but if it is manually installed on not on path, allow
# configuration of explicit path - and allow path to be different
# between galaxy job handler (local_collectl_path) and compute node
# (remote_collectl_path).
collectl_path = kwargs.get("collectl_path", "collectl")
self.remote_collectl_path = kwargs.get("remote_collectl_path", collectl_path)
self.local_collectl_path = kwargs.get("local_collectl_path", collectl_path)
def __configure_subsystems(self, kwargs):
raw_subsystems_str = kwargs.get("subsystems", DEFAULT_SUBSYSTEMS)
raw_subsystems = util.listify(raw_subsystems_str, do_strip=True)
self.subsystems = [subsystems.get_subsystem(_) for _ in raw_subsystems]
def __configure_collectl_recorder_args(self, kwargs):
collectl_recorder_args = kwargs.copy()
# Allow deployer to configure separate system and process intervals,
# but if they specify just one - use it for both. Thinking here is this
# plugin's most useful feature is the process level information so
# this is likely what the deployer is attempting to configure.
if "interval" in kwargs and "interval2" not in kwargs:
collectl_recorder_args["interval2"] = kwargs["interval"]
if "flush" not in kwargs:
collectl_recorder_args["flush"] = DEFAULT_FLUSH_INTERVAL
procfilt_on = kwargs.get("procfilt_on", DEFAULT_PROCFILT_ON).lower()
# Calculate explicit arguments, rest can just be passed through from
# constructor arguments.
explicit_args = dict(
collectl_path=self.remote_collectl_path,
procfilt=procfilt_argument(procfilt_on),
subsystems=self.subsystems,
)
collectl_recorder_args.update(explicit_args)
self.collectl_recorder_args = collectl_recorder_args
def __summarize_process_data(self, pid, collectl_log_path):
playback_cli_args = dict(
collectl_path=self.local_collectl_path,
playback_path=collectl_log_path,
sep="9"
)
if not os.stat(collectl_log_path).st_size:
log.debug(EMPTY_COLLECTL_FILE_MESSAGE)
return []
playback_cli = cli.CollectlCli(**playback_cli_args)
return processes.generate_process_statistics(playback_cli, pid, self.process_statistics)
def __collectl_recorder_cli(self, job_directory):
cli_args = self.collectl_recorder_args.copy()
cli_args["destination_path"] = self._instrument_file_path(job_directory, "log")
return cli.CollectlCli(**cli_args)
def __collectl_record_command(self, job_directory):
collectl_cli = self.__collectl_recorder_cli(job_directory)
if self.log_collectl_program_output:
redirect_to = self._instrument_file_path(job_directory, "program_output")
else:
redirect_to = "/dev/null"
return "%s > %s 2>&1 &" % (
collectl_cli.build_command_line(),
redirect_to,
)
def __pid_file(self, job_directory):
return self._instrument_file_path(job_directory, "pid")
def _is_instrumented_collectl_log(self, filename):
prefix = self._instrument_file_name("log")
return filename.startswith(prefix) and filename.endswith(".raw.gz")
def procfilt_argument(procfilt_on):
if procfilt_on == "username":
return "U$USER"
elif procfilt_on == "uid":
return "u$UID"
else:
# Ensure it is empty of None
if procfilt_on or procfilt_on.lower() != "none":
raise Exception("Invalid procfilt_on argument encountered")
return ""
__all__ = ('CollectlPlugin', )
| 42.163636
| 143
| 0.674105
|
4a12b7e77c47148c082ae9e8ce25f2f06223827a
| 616
|
py
|
Python
|
Final_Project/top_ten_tags/reducer_top_ten_tags.py
|
saturator22/hadoop-mapreduce-udacity
|
28bcf82985d96ce967137df9b2da7a3c1ff4d69e
|
[
"MIT"
] | null | null | null |
Final_Project/top_ten_tags/reducer_top_ten_tags.py
|
saturator22/hadoop-mapreduce-udacity
|
28bcf82985d96ce967137df9b2da7a3c1ff4d69e
|
[
"MIT"
] | null | null | null |
Final_Project/top_ten_tags/reducer_top_ten_tags.py
|
saturator22/hadoop-mapreduce-udacity
|
28bcf82985d96ce967137df9b2da7a3c1ff4d69e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import sys
import csv
def reducer():
reader = csv.reader(sys.stdin, delimiter='\t')
writer = csv.writer(sys.stdout, delimiter='\t')
tagFrequency = {}
for line in reader:
tag = line[0]
tagOccurance = int(line[1])
if tag not in tagFrequency:
tagFrequency[tag] = tagOccurance
else:
tagFrequency[tag] += tagOccurance
topTenTags = sorted(tagFrequency.items(), key=lambda x: -x[1])[:10]
for tag in topTenTags:
writer.writerow([tag[0], tag[1]])
def main():
reducer()
if __name__ == "__main__":
main()
| 20.533333
| 71
| 0.592532
|
4a12b7ffb7ad0ec8a47b41a4dacac3095081f72a
| 2,570
|
py
|
Python
|
pyaam/texture.py
|
zangkaiqiang/pyaam
|
3c59026df17fb0b4588797026d5a2fe64d05fca9
|
[
"MIT"
] | 2
|
2020-07-06T18:18:25.000Z
|
2021-01-20T08:05:21.000Z
|
pyaam/texture.py
|
zangkaiqiang/pyaam
|
3c59026df17fb0b4588797026d5a2fe64d05fca9
|
[
"MIT"
] | null | null | null |
pyaam/texture.py
|
zangkaiqiang/pyaam
|
3c59026df17fb0b4588797026d5a2fe64d05fca9
|
[
"MIT"
] | 3
|
2021-01-11T07:16:42.000Z
|
2021-07-28T11:37:01.000Z
|
# coding: utf-8
from __future__ import division
import cv2
import numpy as np
from pyaam.texturemapper import TextureMapper
from pyaam.utils import get_mask, get_aabb, get_vertices, normalize, pca
class TextureModel(object):
def __init__(self, model, mean, variance):
self.model = model
self.mean = mean
self.variance = variance
@classmethod
def train(cls, lmks, imgs, ref, frac, kmax):
G = get_data_matrix(imgs, lmks, ref)
Gm = G.mean(axis=1)
G -= Gm[:,np.newaxis]
N = lmks.shape[1]
D = pca(G, frac, kmax)
# normalize eigenvectors
for i in range(D.shape[1]):
D[:,i] /= np.linalg.norm(D[:,i])
# compute variance
Q = D.T.dot(G)
Q = pow(Q, 2)
e = Q.sum(axis=1) / (N-1)
return cls(D, Gm, e)
@classmethod
def load(cls, filename):
arch = np.load(filename)
return cls(arch['model'], arch['mean'], arch['variance'])
def save(self, filename):
np.savez(filename, model=self.model, mean=self.mean, variance=self.variance)
def num_modes(self):
return self.model.shape[1]
def texture_vector_size(self):
return self.model.shape[0]
def calc_texture(self, params):
t = self.mean + self.model.dot(params)
return t.clip(0, 255) # clamp pixels intensities
def calc_params(self, img, lmk, ref, warp_triangles):
ref = ref.reshape((ref.size//2, 2)).astype('int32')
src = lmk.reshape(ref.shape)
img = normalize(img, get_aabb(src))
mask = get_mask(ref, img.shape[:2])
verts = get_vertices(ref)
warp = warp_triangles(img, src[verts], ref[verts])
t = warp[mask].ravel() - self.mean
p = self.model.T.dot(t)
# clamp
c = 3
for i in range(len(self.variance)):
v = c * np.sqrt(self.variance[i])
if abs(p[i]) > v:
p[i] = v if p[i] > 0 else -v
return p
def get_data_matrix(imgs, lmks, ref):
ref = ref.reshape((ref.size//2, 2)).astype('int32')
mask = get_mask(ref, (640, 480)) # FIXME hardcoded image size
verts = get_vertices(ref)
tm = TextureMapper(480, 640) # ditto
n_samples = lmks.shape[1]
n_pixels = mask.sum() * 3
G = np.empty((n_pixels, n_samples))
for i in range(n_samples):
src = lmks[:,i].reshape(ref.shape)
img = normalize(next(imgs), get_aabb(src))
warp = tm.warp_triangles(img, src[verts], ref[verts])
G[:,i] = warp[mask].ravel()
return G
| 30.235294
| 84
| 0.581323
|
4a12b872bdc3cb279509b56d3ffc109da53660cd
| 7,858
|
py
|
Python
|
homeassistant/components/mysensors/climate.py
|
aschor/core
|
eb2238a9e1c67ee926a40ab85fe13ba37f2c538d
|
[
"Apache-2.0"
] | 22,481
|
2020-03-02T13:09:59.000Z
|
2022-03-31T23:34:28.000Z
|
homeassistant/components/mysensors/climate.py
|
aschor/core
|
eb2238a9e1c67ee926a40ab85fe13ba37f2c538d
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
homeassistant/components/mysensors/climate.py
|
aschor/core
|
eb2238a9e1c67ee926a40ab85fe13ba37f2c538d
|
[
"Apache-2.0"
] | 11,411
|
2020-03-02T14:19:20.000Z
|
2022-03-31T22:46:07.000Z
|
"""MySensors platform that offers a Climate (MySensors-HVAC) component."""
from __future__ import annotations
from typing import Any
from homeassistant.components import mysensors
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
DOMAIN,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.core import HomeAssistant
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import MYSENSORS_DISCOVERY, DiscoveryInfo
from .helpers import on_unload
DICT_HA_TO_MYS = {
HVAC_MODE_AUTO: "AutoChangeOver",
HVAC_MODE_COOL: "CoolOn",
HVAC_MODE_HEAT: "HeatOn",
HVAC_MODE_OFF: "Off",
}
DICT_MYS_TO_HA = {
"AutoChangeOver": HVAC_MODE_AUTO,
"CoolOn": HVAC_MODE_COOL,
"HeatOn": HVAC_MODE_HEAT,
"Off": HVAC_MODE_OFF,
}
FAN_LIST = ["Auto", "Min", "Normal", "Max"]
OPERATION_LIST = [HVAC_MODE_OFF, HVAC_MODE_AUTO, HVAC_MODE_COOL, HVAC_MODE_HEAT]
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up this platform for a specific ConfigEntry(==Gateway)."""
async def async_discover(discovery_info: DiscoveryInfo) -> None:
"""Discover and add a MySensors climate."""
mysensors.setup_mysensors_platform(
hass,
DOMAIN,
discovery_info,
MySensorsHVAC,
async_add_entities=async_add_entities,
)
on_unload(
hass,
config_entry.entry_id,
async_dispatcher_connect(
hass,
MYSENSORS_DISCOVERY.format(config_entry.entry_id, DOMAIN),
async_discover,
),
)
class MySensorsHVAC(mysensors.device.MySensorsEntity, ClimateEntity):
"""Representation of a MySensors HVAC."""
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
features = 0
set_req = self.gateway.const.SetReq
if set_req.V_HVAC_SPEED in self._values:
features = features | SUPPORT_FAN_MODE
if (
set_req.V_HVAC_SETPOINT_COOL in self._values
and set_req.V_HVAC_SETPOINT_HEAT in self._values
):
features = features | SUPPORT_TARGET_TEMPERATURE_RANGE
else:
features = features | SUPPORT_TARGET_TEMPERATURE
return features
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS if self.hass.config.units.is_metric else TEMP_FAHRENHEIT
@property
def current_temperature(self) -> float | None:
"""Return the current temperature."""
value: str | None = self._values.get(self.gateway.const.SetReq.V_TEMP)
float_value: float | None = None
if value is not None:
float_value = float(value)
return float_value
@property
def target_temperature(self) -> float | None:
"""Return the temperature we try to reach."""
set_req = self.gateway.const.SetReq
if (
set_req.V_HVAC_SETPOINT_COOL in self._values
and set_req.V_HVAC_SETPOINT_HEAT in self._values
):
return None
temp = self._values.get(set_req.V_HVAC_SETPOINT_COOL)
if temp is None:
temp = self._values.get(set_req.V_HVAC_SETPOINT_HEAT)
return float(temp) if temp is not None else None
@property
def target_temperature_high(self) -> float | None:
"""Return the highbound target temperature we try to reach."""
set_req = self.gateway.const.SetReq
if set_req.V_HVAC_SETPOINT_HEAT in self._values:
temp = self._values.get(set_req.V_HVAC_SETPOINT_COOL)
return float(temp) if temp is not None else None
return None
@property
def target_temperature_low(self) -> float | None:
"""Return the lowbound target temperature we try to reach."""
set_req = self.gateway.const.SetReq
if set_req.V_HVAC_SETPOINT_COOL in self._values:
temp = self._values.get(set_req.V_HVAC_SETPOINT_HEAT)
return float(temp) if temp is not None else None
return None
@property
def hvac_mode(self) -> str:
"""Return current operation ie. heat, cool, idle."""
return self._values.get(self.value_type, HVAC_MODE_HEAT)
@property
def hvac_modes(self) -> list[str]:
"""List of available operation modes."""
return OPERATION_LIST
@property
def fan_mode(self) -> str | None:
"""Return the fan setting."""
return self._values.get(self.gateway.const.SetReq.V_HVAC_SPEED)
@property
def fan_modes(self) -> list[str]:
"""List of available fan modes."""
return FAN_LIST
async def async_set_temperature(self, **kwargs: Any) -> None:
"""Set new target temperature."""
set_req = self.gateway.const.SetReq
temp = kwargs.get(ATTR_TEMPERATURE)
low = kwargs.get(ATTR_TARGET_TEMP_LOW)
high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
heat = self._values.get(set_req.V_HVAC_SETPOINT_HEAT)
cool = self._values.get(set_req.V_HVAC_SETPOINT_COOL)
updates = []
if temp is not None:
if heat is not None:
# Set HEAT Target temperature
value_type = set_req.V_HVAC_SETPOINT_HEAT
elif cool is not None:
# Set COOL Target temperature
value_type = set_req.V_HVAC_SETPOINT_COOL
if heat is not None or cool is not None:
updates = [(value_type, temp)]
elif all(val is not None for val in (low, high, heat, cool)):
updates = [
(set_req.V_HVAC_SETPOINT_HEAT, low),
(set_req.V_HVAC_SETPOINT_COOL, high),
]
for value_type, value in updates:
self.gateway.set_child_value(
self.node_id, self.child_id, value_type, value, ack=1
)
if self.assumed_state:
# Optimistically assume that device has changed state
self._values[value_type] = value
self.async_write_ha_state()
async def async_set_fan_mode(self, fan_mode: str) -> None:
"""Set new target temperature."""
set_req = self.gateway.const.SetReq
self.gateway.set_child_value(
self.node_id, self.child_id, set_req.V_HVAC_SPEED, fan_mode, ack=1
)
if self.assumed_state:
# Optimistically assume that device has changed state
self._values[set_req.V_HVAC_SPEED] = fan_mode
self.async_write_ha_state()
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target temperature."""
self.gateway.set_child_value(
self.node_id,
self.child_id,
self.value_type,
DICT_HA_TO_MYS[hvac_mode],
ack=1,
)
if self.assumed_state:
# Optimistically assume that device has changed state
self._values[self.value_type] = hvac_mode
self.async_write_ha_state()
async def async_update(self) -> None:
"""Update the controller with the latest value from a sensor."""
await super().async_update()
self._values[self.value_type] = DICT_MYS_TO_HA[self._values[self.value_type]]
| 35.080357
| 85
| 0.651184
|
4a12b8952bda57afc824c275be7abbfc12c4ab8f
| 719
|
py
|
Python
|
benchmarking/regression_detectors/regression_detector_base.py
|
virtan/FAI-PEP
|
8641a54b2328c343ab0470f195a42da1021d1392
|
[
"Apache-2.0"
] | 1
|
2022-03-21T06:39:38.000Z
|
2022-03-21T06:39:38.000Z
|
benchmarking/regression_detectors/regression_detector_base.py
|
virtan/FAI-PEP
|
8641a54b2328c343ab0470f195a42da1021d1392
|
[
"Apache-2.0"
] | 1
|
2021-04-19T09:50:14.000Z
|
2021-04-19T09:50:14.000Z
|
benchmarking/regression_detectors/regression_detector_base.py
|
isabella232/FAI-PEP
|
a4089c79ab765e7f05080348c2978a07c3487d4c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
##############################################################################
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
class RegressionDetectorBase(object):
def __init__(self):
pass
def isRegressed(self, filename, latest_data, compare_data,
control_in_compare):
return None
| 29.958333
| 78
| 0.582754
|
4a12b8ba3b9eab9ea2d7e58f84a6dbee574f309a
| 448
|
py
|
Python
|
bou/constants.py
|
Feastybeast/bou
|
6ea7d95cbc400fc1a0ebbad40fddad8c66717215
|
[
"MIT"
] | null | null | null |
bou/constants.py
|
Feastybeast/bou
|
6ea7d95cbc400fc1a0ebbad40fddad8c66717215
|
[
"MIT"
] | 2
|
2021-03-14T01:07:02.000Z
|
2021-03-16T08:12:08.000Z
|
bou/constants.py
|
Feastybeast/bou
|
6ea7d95cbc400fc1a0ebbad40fddad8c66717215
|
[
"MIT"
] | null | null | null |
""" bou.constants """
ASTERISK = '*'
BACK_SLASH = '\\'
BLANK = ''
DOUBLE_QUOTE = '"'
DOT_PY = '.py'
DOWNGRADE = 'downgrade'
FORWARD_SLASH = '/'
GT = '>'
INPUT = 'input'
LOCALTIME = 'localtime'
LT = '<'
MIGRATIONS_DEFAULT = 'migrations/'
NAME = 'name'
OUTPUT = 'output'
PIPE = '|'
QUESTION_MARK = '?'
QUOTE = '\''
SPACE = ' '
TYPE = 'type'
UNDERSCORE = '_'
UPGRADE = 'upgrade'
VERSION = 'version'
VERSION_CONST = 'VERSION'
| 17.230769
| 35
| 0.580357
|
4a12b8e5302990f03d4138fe9762e59cddf33ce0
| 4,558
|
py
|
Python
|
pinion/ui.py
|
dzarda/Pinion
|
fc5cc3bd6df0e7b434b41f0754a6861c52f87ae8
|
[
"MIT"
] | null | null | null |
pinion/ui.py
|
dzarda/Pinion
|
fc5cc3bd6df0e7b434b41f0754a6861c52f87ae8
|
[
"MIT"
] | null | null | null |
pinion/ui.py
|
dzarda/Pinion
|
fc5cc3bd6df0e7b434b41f0754a6861c52f87ae8
|
[
"MIT"
] | null | null | null |
import click
import csv
import io
from pinion import __version__
def splitStr(delimiter, escapeChar, s):
"""
Splits s based on delimiter that can be escaped via escapeChar
"""
# Let's use csv reader to implement this
reader = csv.reader(io.StringIO(s), delimiter=delimiter, escapechar=escapeChar)
# Unpack first line
for x in reader:
return x
class CliList(click.ParamType):
"""
A CLI argument type for specifying comma separated list of strings
"""
name = "list"
def __init__(self, separator=",", escape="\\", *args, **kwargs):
super().__init__(*args, **kwargs)
self.separator = separator
self.escape = escape
def convert(self, value, param, ctx):
if len(value.strip()) == 0:
self.fail(f"{value} is not a valid argument specification",
param, ctx)
return list(splitStr(self.separator, self.escape, value))
@click.command("template")
@click.option("-b", "--board",
type=click.Path(file_okay=True, dir_okay=False, exists=True), required=True,
help="Source KiCAD board (*.kicad_pcb)")
@click.option("-o", "--output", type=click.File("w"), required=True,
help="Filepath or stdout (when '-' specified) for the resulting template")
@click.option("-c", "--components", type=str, default=None, multiple=True,
help="Include only components mathing regex in the template")
def template(board, output, components):
"""
Output a template for pinout specification based on specified board
"""
# Note that we import inside functions as pcbnew import takes ~1 to load
# which makes the UI laggy
from pinion.template import generateTemplate
from pcbnewTransition import pcbnew
pcb = pcbnew.LoadBoard(board)
print(components)
generateTemplate(pcb, output, components)
@click.command("generate")
@click.argument("outputdir", type=click.Path(file_okay=False, dir_okay=True))
@click.option("-b", "--board",
type=click.Path(file_okay=True, dir_okay=False, exists=True), required=True,
help="Source KiCAD board (*.kicad_pcb)")
@click.option("-s", "--specification", type=click.File("r"),
help="YAML specification of the pinout")
@click.option("--pack/--no-pack", default=True,
help="Pack pinion-widget with the source")
@click.option("--dpi", type=int, default=300,
help="DPI of the generated board image")
@click.option("--style", help="PcbDraw style specification")
@click.option("--libs", help="PcbDraw library specification")
@click.option("--remap", help="PcbDraw footprint remapping specification")
@click.option("--filter", help="PcbDraw filter specification")
def generate(board, specification, outputdir, dpi, pack, style, libs, remap, filter):
"""
Generate a pinout diagram
"""
# Note that we import inside functions as pcbnew import takes ~1 to load
# which makes the UI laggy
from pinion.generate import generate
from ruamel.yaml import YAML
from pcbnewTransition import pcbnew
yaml=YAML(typ='safe')
generate(specification=yaml.load(specification),
board=pcbnew.LoadBoard(board),
outputdir=outputdir,
pack=pack,
dpi=dpi,
pcbdrawArgs={
"style": style,
"libs": libs,
"remap": remap,
"filter": filter
})
@click.command("get")
@click.argument("what", type=str)
@click.argument("output", type=click.File("w"))
def get(what, output):
"""
Get Pinion resource files - e.g., template, pinion-widget javascript or
pinion-widget styles.
Available options: js css template
"""
import pinion.get
return pinion.get.get(what, output)
@click.command("serve")
@click.option("--directory", "-d", type=click.Path(dir_okay=True, file_okay=False),
default="./", help="Directory to serve")
@click.option("--port", "-p", type=int, default=3333,
help="Port on which to run a webserver")
@click.option("--browser", "-b", is_flag=True,
help="Automatically open web browser")
def serve(directory, port, browser):
"""
Serve pinion digram generated with the '--packed' option.
"""
from pinion.serve import serve
return serve(directory, port, browser)
@click.group()
@click.version_option(__version__)
def cli():
"""
Generate beautiful pinout digrams of your PCBs for web.
"""
pass
cli.add_command(template)
cli.add_command(generate)
cli.add_command(get)
cli.add_command(serve)
if __name__ == "__main__":
cli()
| 33.514706
| 85
| 0.664985
|
4a12b9b9a41c8b4a0ae1c82c1a33e3381a1339ac
| 235
|
py
|
Python
|
enshop/enshop/doctype/enshop_settings_banner/test_enshop_settings_banner.py
|
corioste/enshop
|
9159ef7389873d9ec9c5188dbdbe2a03f8c3baad
|
[
"MIT"
] | null | null | null |
enshop/enshop/doctype/enshop_settings_banner/test_enshop_settings_banner.py
|
corioste/enshop
|
9159ef7389873d9ec9c5188dbdbe2a03f8c3baad
|
[
"MIT"
] | null | null | null |
enshop/enshop/doctype/enshop_settings_banner/test_enshop_settings_banner.py
|
corioste/enshop
|
9159ef7389873d9ec9c5188dbdbe2a03f8c3baad
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Bai Web and Mobile Lab and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestEnshopSettingsBanner(unittest.TestCase):
pass
| 21.363636
| 61
| 0.770213
|
4a12ba1632867dd55eb2bf267cb97ec874c7157a
| 1,002
|
py
|
Python
|
projects/migrations/0001_initial.py
|
KamenSentai/Portfolio-Django
|
93b73d14b469a948ac010cf9767e747c38d32f55
|
[
"MIT"
] | null | null | null |
projects/migrations/0001_initial.py
|
KamenSentai/Portfolio-Django
|
93b73d14b469a948ac010cf9767e747c38d32f55
|
[
"MIT"
] | 14
|
2020-02-12T00:23:46.000Z
|
2022-03-11T23:48:23.000Z
|
projects/migrations/0001_initial.py
|
KamenSentai/Portfolio-Django
|
93b73d14b469a948ac010cf9767e747c38d32f55
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2 on 2019-05-27 19:04
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('id_project', models.IntegerField()),
('url', models.CharField(max_length=60)),
],
),
migrations.CreateModel(
name='Projects',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=60)),
('subtitle', models.CharField(max_length=255)),
('slug', models.CharField(max_length=60)),
('cover', models.CharField(max_length=60)),
],
),
]
| 30.363636
| 114
| 0.547904
|
4a12bbdef5ca7dd25e9e87ce2b49c448b2a0d2b9
| 1,975
|
py
|
Python
|
autoPyTorch/pipeline/components/setup/augmentation/image/RandomCutout.py
|
LMZimmer/Auto-PyTorch_refactor
|
ac7a9ce35e87a428caca2ac108b362a54d3b8f3a
|
[
"Apache-2.0"
] | null | null | null |
autoPyTorch/pipeline/components/setup/augmentation/image/RandomCutout.py
|
LMZimmer/Auto-PyTorch_refactor
|
ac7a9ce35e87a428caca2ac108b362a54d3b8f3a
|
[
"Apache-2.0"
] | 34
|
2020-10-06T08:06:46.000Z
|
2021-01-21T13:23:34.000Z
|
autoPyTorch/pipeline/components/setup/augmentation/image/RandomCutout.py
|
LMZimmer/Auto-PyTorch_refactor
|
ac7a9ce35e87a428caca2ac108b362a54d3b8f3a
|
[
"Apache-2.0"
] | 1
|
2020-10-14T12:25:47.000Z
|
2020-10-14T12:25:47.000Z
|
from typing import Any, Dict, Optional, Union
import ConfigSpace as CS
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import (
CategoricalHyperparameter,
UniformFloatHyperparameter,
)
import imgaug.augmenters as iaa
from imgaug.augmenters.meta import Augmenter
import numpy as np
from autoPyTorch.pipeline.components.setup.augmentation.image.base_image_augmenter import BaseImageAugmenter
class RandomCutout(BaseImageAugmenter):
def __init__(self, use_augmenter: bool = True, p: float = 0.5,
random_state: Optional[Union[int, np.random.RandomState]] = None):
super().__init__(use_augmenter=use_augmenter)
self.p = p
self.random_state = random_state
def fit(self, X: Dict[str, Any], y: Any = None) -> BaseImageAugmenter:
if self.use_augmenter:
self.augmenter: Augmenter = iaa.Sometimes(self.p, iaa.Cutout(nb_iterations=(1, 10), size=(0.1, 0.5),
random_state=self.random_state),
name=self.get_properties()['name'])
return self
@staticmethod
def get_hyperparameter_search_space(
dataset_properties: Optional[Dict[str, str]] = None
) -> ConfigurationSpace:
cs = ConfigurationSpace()
p = UniformFloatHyperparameter('p', lower=0.2, upper=1, default_value=0.5)
use_augmenter = CategoricalHyperparameter('use_augmenter', choices=[True, False], default_value=True)
cs.add_hyperparameters([p, use_augmenter])
# only add hyperparameters to configuration space if we are using the augmenter
cs.add_condition(CS.EqualsCondition(p, use_augmenter, True))
return cs
@staticmethod
def get_properties(dataset_properties: Optional[Dict[str, str]] = None
) -> Dict[str, Any]:
return {'name': 'RandomCutout'}
| 39.5
| 112
| 0.663797
|
4a12bdc416c6247fced9ee416e8da785df3bcf39
| 2,945
|
py
|
Python
|
maskrcnn_benchmark/modeling/roi_heads/maskiou_head/roi_maskiou_feature_extractors.py
|
mrlooi/maskrcnn-benchmark
|
135168ddda9436eead21fc945c192cffd8421e6a
|
[
"MIT"
] | 344
|
2019-08-29T09:08:11.000Z
|
2022-03-16T08:37:42.000Z
|
maskrcnn_benchmark/modeling/roi_heads/maskiou_head/roi_maskiou_feature_extractors.py
|
mrlooi/maskrcnn-benchmark
|
135168ddda9436eead21fc945c192cffd8421e6a
|
[
"MIT"
] | 46
|
2019-09-20T12:35:59.000Z
|
2022-03-07T20:02:21.000Z
|
maskrcnn_benchmark/modeling/roi_heads/maskiou_head/roi_maskiou_feature_extractors.py
|
mrlooi/maskrcnn-benchmark
|
135168ddda9436eead21fc945c192cffd8421e6a
|
[
"MIT"
] | 67
|
2019-08-29T09:56:31.000Z
|
2022-03-12T13:47:02.000Z
|
# Mask Scoring R-CNN
# Wriiten by zhaojin.huang, 2018-12.
import torch
from torch import nn
from torch.nn import functional as F
from maskrcnn_benchmark.layers import Conv2d
from maskrcnn_benchmark.modeling.make_layers import make_conv3x3
from maskrcnn_benchmark.modeling.make_layers import make_fc
class MaskIoUFeatureExtractor(nn.Module):
"""
MaskIou head feature extractor.
"""
def __init__(self, cfg, in_channels):
super(MaskIoUFeatureExtractor, self).__init__()
input_channels = in_channels + 1 # cat features and mask single channel
use_gn = cfg.MODEL.ROI_MASKIOU_HEAD.USE_GN
representation_size = cfg.MODEL.ROI_MASKIOU_HEAD.MLP_HEAD_DIM
resolution_key = "RESOLUTION"
pooler_resolution_key = "POOLER_RESOLUTION"
resolution = cfg.MODEL.ROI_MASK_HEAD[resolution_key]
input_pooler_resolution = cfg.MODEL.ROI_MASK_HEAD[pooler_resolution_key]
self.max_pool2d = lambda x: x
if resolution == input_pooler_resolution * 2:
self.max_pool2d = torch.nn.MaxPool2d(kernel_size=2, stride=2)
resolution = resolution // 2 # after max pooling 2x2
elif resolution != input_pooler_resolution:
raise NotImplementedError(
"Only supports %s == %s or %s == 2x%s. Received %d vs %d instead"
% (resolution_key, pooler_resolution_key, resolution_key, pooler_resolution_key,
resolution, input_pooler_resolution)
)
layers = cfg.MODEL.ROI_MASKIOU_HEAD.CONV_LAYERS
# stride=1 for each layer, and stride=2 for last layer
strides = [1 for l in layers]
strides[-1] = 2
next_feature = input_channels
self.blocks = []
for layer_idx, layer_features in enumerate(layers):
layer_name = "maskiou_fcn{}".format(layer_idx+1)
stride = strides[layer_idx]
module = make_conv3x3(next_feature, layer_features, stride=stride, dilation=1, use_gn=use_gn)
self.add_module(layer_name, module)
self.blocks.append(layer_name)
next_feature = layer_features
if stride == 2:
resolution = resolution // 2
self.maskiou_fc1 = make_fc(next_feature*resolution**2, representation_size, use_gn=False)
self.maskiou_fc2 = make_fc(representation_size, representation_size, use_gn=False)
self.out_channels = representation_size
def forward(self, x, mask):
mask_pool = self.max_pool2d(mask)
x = torch.cat((x, mask_pool), 1)
for layer_name in self.blocks:
x = F.relu(getattr(self, layer_name)(x))
x = x.view(x.size(0), -1)
x = F.relu(self.maskiou_fc1(x))
x = F.relu(self.maskiou_fc2(x))
return x
def make_roi_maskiou_feature_extractor(cfg, in_channels):
func = MaskIoUFeatureExtractor
return func(cfg, in_channels)
| 35.914634
| 105
| 0.664856
|
4a12be9bbc9ae39da5714134e87e4ea367e84169
| 3,000
|
py
|
Python
|
client/main.py
|
scz10/centerbot
|
7805c7fd70f24148a54135ee86bca5c3a28c5332
|
[
"MIT"
] | null | null | null |
client/main.py
|
scz10/centerbot
|
7805c7fd70f24148a54135ee86bca5c3a28c5332
|
[
"MIT"
] | null | null | null |
client/main.py
|
scz10/centerbot
|
7805c7fd70f24148a54135ee86bca5c3a28c5332
|
[
"MIT"
] | null | null | null |
import cv2
import time
import random
import json
from paho.mqtt import client as mqtt_client
broker = 'broker.emqx.io'
port = 1883
topic = "XXXXXXXXX" # fill this with channel name you input on arduino code
client_id = f'python-mqtt-{random.randint(0, 1000)}'
def connect_mqtt():
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("Connected to MQTT Broker!")
else:
print("Failed to connect, return code %d\n", rc)
# Set Connecting Client ID
client = mqtt_client.Client(client_id)
client.on_connect = on_connect
client.connect(broker, port)
return client
def send_data(x, y):
if 0 <= x or y <= 180:
client.publish(topic, json.dumps({'x': x, 'y': y}))
else:
return
# Load the cascade
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# To capture video from webcam.
cap = cv2.VideoCapture(2) # edit this with ur video capture device ID
_, frame = cap.read()
rows, cols, _ = frame.shape
detected_x = 0
detected_y = 0
position_x = 90
position_y = 90
temp_x = 90
temp_y = 90
# To use a video file as input
# cap = cv2.VideoCapture('filename.mp4')
client = connect_mqtt()
client.loop_start()
while True:
# Read the frame
_, img = cap.read()
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect the faces
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.2,
minNeighbors=5, minSize=(80, 80),
flags=cv2.CASCADE_SCALE_IMAGE)
# Draw the rectangle around each face
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
cv2.line(img, (x+w//2, y), (x+w//2, y+h), (0, 0, 255), 2)
# below circle to denote mid point of center line
center = (x+w//2, y+h//2)
radius = 2
cv2.circle(img, center, radius, (255, 255, 0), 2)
detected_x = int(x+w//2)
detected_y = int(y+h//2)
#time.sleep(2.0)
# Display
if detected_x != 0:
if 320 < detected_x - 50:
if 0 <= position_x + 5 <= 180:
position_x += 4
elif 320 > detected_x + 70:
if 0 <= position_x - 5 <= 180:
position_x -= 4
if detected_y != 0:
if 265 < detected_y - 80:
if 0 <= position_y + 5 <= 180:
position_y += 4
elif 265 > detected_y + 80:
if 0 <= position_y - 5 <= 180:
position_y -= 4
time.sleep(0.2)
cv2.imshow('img', img)
if detected_x or detected_y != 0:
if temp_x != position_x or temp_y != position_y:
print(rows//2, detected_x, position_x, cols//2, detected_y, position_y)
send_data(position_x, position_y)
detected_x, detected_y = 0,0
temp_x, temp_y = position_x, position_y
# Stop if escape key is pressed
k = cv2.waitKey(30) & 0xff
if k==27:
break
# Release the VideoCapture object
cap.release()
| 27.777778
| 83
| 0.597667
|
4a12bf4615be3c68e6c039488724e03e0de1613c
| 18,860
|
py
|
Python
|
mesatee_services/acs/python/acs_engine.py
|
hshshjzsami/incubator-teaclave
|
1a671e6e9fdb1f1bc2e1b4804ac2e516409bae63
|
[
"Apache-2.0"
] | 1
|
2020-03-19T17:20:58.000Z
|
2020-03-19T17:20:58.000Z
|
mesatee_services/acs/python/acs_engine.py
|
hshshjzsami/incubator-teaclave
|
1a671e6e9fdb1f1bc2e1b4804ac2e516409bae63
|
[
"Apache-2.0"
] | 1
|
2020-03-06T02:26:20.000Z
|
2020-03-24T02:41:38.000Z
|
mesatee_services/acs/python/acs_engine.py
|
hshshjzsami/incubator-teaclave
|
1a671e6e9fdb1f1bc2e1b4804ac2e516409bae63
|
[
"Apache-2.0"
] | 1
|
2020-03-05T02:29:50.000Z
|
2020-03-05T02:29:50.000Z
|
###############################################################################
# Parser Combinators
###############################################################################
class Pair(tuple):
def __new__(cls, a, b):
return super(Pair, cls).__new__(cls, [a, b])
class Either(object):
def __init__(self, left, right):
self.__left = left
self.__right = right
def left(self):
if not self.is_left():
raise ValueError('wrong extractor for either')
return self.__left
def right(self):
if not self.is_right():
raise ValueError('wrong extractor for either')
return self.__right
def is_right(self):
return False
def is_left(self):
return False
def get(self):
if self.is_right():
return self.right()
if self.is_left():
return self.left()
raise ValueError('incomplete Either object')
def __str__(self):
if self.is_left():
return 'Left(' + str(self.left()) + ')'
else:
return 'Right(' + str(self.right()) + ')'
def __repr__(self):
if self.is_left():
return 'Left(' + repr(self.left()) + ')'
else:
return 'Right(' + repr(self.right()) + ')'
class Left(Either):
def __init__(self, payload):
super(Left, self).__init__(payload, None)
def is_left(self):
return True
class Right(Either):
def __init__(self, payload):
super(Right, self).__init__(None, payload)
def is_right(self):
return True
class Stream(object):
WHITESPACES = [' ', '\t', '\r']
def __init__(self, items, pos = 0):
self.__items = items
self.__pos = pos
def accept_strlit(self, string):
# Typically parsers want to skip white spaces except line breaks
# In the future this should be configurable
pos = self.__pos
l = len(self.__items)
while pos < l and self.__items[pos] in self.WHITESPACES:
pos += 1
match_pos = 0
l = len(string)
while match_pos < l and string[match_pos] in self.WHITESPACES:
match_pos += 1
if pos < match_pos:
raise ParsingError(self, 'expecting "{}"'.format(string))
if match_pos:
string = string[match_pos:]
if self.__items.startswith(string, pos):
return Stream(self.__items, pos + len(string))
raise ParsingError(self, 'expecting "{}"'.format(string))
def accept_matcher(self, matcher):
pos = self.__pos
l = len(self.__items)
while pos < l and self.__items[pos] in self.WHITESPACES:
pos += 1
res = matcher(self.__items, pos)
if res is None:
raise ParsingError(self, 'matcher for {} failed'.format(matcher.__doc__))
obj, npos = res
return obj, Stream(self.__items, npos)
def end(self):
return self.__pos == len(self.__items)
def pos(self):
return self.__pos
def __repr__(self):
line_start = self.__items.rfind('\n', 0, self.__pos) + 1
line_end = self.__items.find('\n', self.__pos)
if line_end == -1:
line_end = self.__pos
if line_end - line_start > 80:
line_start = max(line_start, self.__pos - 40)
line_end = min(line_start + 80, len(self.__items))
return ''.join([
self.__items[line_start:line_end],
'\n',
' ' * (self.__pos - line_start),
'^',
' ' * (line_end - self.__pos),
'\nerror at character ',
str(self.__pos),
])
class State(object):
def __init__(self, stream, payload = None, success = True):
self.stream = stream
self.payload = payload
self.success = success
def __bool__(self):
return self.success
def __nonzero__(self):
return self.__bool__()
def fmap(self, f):
if self:
return State(self.stream, f(self.payload))
return self
class ParsingError(Exception):
def __init__(self, stream, msg = ''):
super(ParsingError, self).__init__(msg)
self.stream = stream
def __repr__(self):
return repr(self.stream)
class Parser(object):
def __init__(self):
pass
def __call__(self, stream):
raise NotImplementedError("pure abstract parser cannot be called")
def parse_from(self, stream):
n_state = self(stream)
if not n_state:
raise ParsingError(n_state.stream, n_state.payload)
elif not n_state.stream.end():
raise ParsingError(n_state.stream, 'trailing unparsable input')
return n_state
def fail(self, exception):
return State(exception.stream, str(exception), False)
def ignore(self):
return Ignore(self)
def __or__(self, p):
return Or(self, p)
def __add__(self, p):
if isinstance(self, Ignore) and isinstance(p, Ignore):
return Ignore(Concat(self, p))
else:
return Concat(self, p)
def __invert__(self):
return Rep(self)
def __neg__(self):
return Optional(self)
def __pow__(self, f):
return Apply(self, f)
class Optional(Parser):
def __init__(self, opt):
super(Optional, self).__init__()
self.__opt = opt
def __call__(self, stream):
n_state = self.__opt(stream)
if n_state:
return n_state.fmap(lambda x: Left(x))
return State(stream, Right(None))
class StrLiteral(Parser):
def __init__(self, string):
super(StrLiteral, self).__init__()
self.__string = string
def __call__(self, stream):
if stream.end():
return self.fail(ParsingError(
stream, 'insufficient input, expecting {}'.format(self.__string))
)
try:
n_stream = stream.accept_strlit(self.__string)
except ParsingError as e:
return self.fail(e)
return State(n_stream, self.__string)
class CustomMatcher(Parser):
def __init__(self, matcher):
super(CustomMatcher, self).__init__()
self.__matcher = matcher
def __call__(self, stream):
try:
res = stream.accept_matcher(self.__matcher)
except ParsingError as e:
return self.fail(e)
obj, n_stream = res
return State(n_stream, obj)
class Concat(Parser):
def __init__(self, c1, c2):
super(Concat, self).__init__()
assert not isinstance(self, Ignore) or not isinstance(p, Ignore)
self.__first = c1
self.__second = c2
def __call__(self, stream):
n_state = self.__first(stream)
if not n_state:
return n_state
p1 = n_state.payload
n_state = self.__second(n_state.stream)
if not n_state:
return n_state
p2 = n_state.payload
if isinstance(self.__first, Ignore):
return State(n_state.stream, p2)
if isinstance(self.__second, Ignore):
return State(n_state.stream, p1)
# The construction of Concat ensures that at least
# one of this children is not Ignore
return State(n_state.stream, Pair(p1, p2))
class Or(Parser):
def __init__(self, c1, c2):
super(Or, self).__init__()
self.__if = c1
self.__else = c2
def __call__(self, stream):
n_state = self.__if(stream)
if n_state:
return n_state.fmap(lambda x: Left(x))
n_state = self.__else(stream)
if n_state:
return n_state.fmap(lambda x: Right(x))
return n_state
class Rep(Parser):
def __init__(self, c):
super(Rep, self).__init__()
self.__loop = c
def __call__(self, stream):
payload = []
n_state = self.__loop(stream)
if n_state:
payload.append(n_state.payload)
stream = n_state.stream
n_state = self(stream)
if n_state:
payload = payload + n_state.payload
stream = n_state.stream
return State(stream, payload)
class Apply(Parser):
def __init__(self, base, f):
super(Apply, self).__init__()
self.__base = base
self.__trans = f
def __call__(self, stream):
return self.__base(stream).fmap(self.__trans)
class Ignore(Parser):
def __init__(self, base):
super(Ignore, self).__init__()
self.__base = base
def __call__(self, stream):
return self.__base(stream)
###############################################################################
# Grammars for PERM model configuration
###############################################################################
from operator import or_, add
def extract(nested_or):
while isinstance(nested_or, Either):
nested_or = nested_or.left() if nested_or.is_left() else nested_or.right()
return nested_or
def flatten(nested_concat):
res = []
def pre_order(pair, res):
if isinstance(pair, Pair):
pre_order(pair[0], res)
pre_order(pair[1], res)
else:
res.append(pair)
pre_order(nested_concat, res)
return res
def one_of(parsers):
nested = reduce(or_, parsers)
return nested ** extract
def join(sl):
return ''.join(sl)
def rep_with_sep(to_rep, sep):
if not isinstance(sep, Ignore):
sep = sep.ignore()
r = to_rep + ~(sep + to_rep)
r = r ** (lambda x: [x[0]] + x[1])
return r
ALPHA = set('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
DIGIT = set('0123456789')
ALPHA_DIGIT = ALPHA | DIGIT
Alpha = one_of(map(StrLiteral, ALPHA))
Digit = one_of(map(StrLiteral, DIGIT))
Equal, Comma, Dot = [StrLiteral(c).ignore() for c in ['=', ',', '.']]
Underscore = StrLiteral('_')
NewLine = (~ StrLiteral('\n')).ignore()
def identifier_matcher(text, pos):
"""identifier"""
end = len(text)
start = pos
if pos >= end:
return None
first = text[pos]
if first != '_' and first not in ALPHA:
return None
pos += 1
while pos < end:
char = text[pos]
if char == '_' or char in ALPHA_DIGIT:
pos += 1
else:
break
return text[start:pos], pos
Identifier = CustomMatcher(identifier_matcher)
IdTuple = rep_with_sep(Identifier, Comma)
Definition = Identifier + Equal + IdTuple + NewLine
Relation = Identifier + Equal + IdTuple + NewLine
Relation = Relation ** (lambda x: (x[0], 1 + len(x[1][1])))
def pyparser_matcher(text, pos):
"""syntactically correct python code"""
line_end = text.find('\n', pos)
if line_end == -1:
return None
try:
c = compile(text[pos:line_end], '__abac_model__.py', 'eval')
except SyntaxError:
return None
return c, line_end
PyExpr = CustomMatcher(pyparser_matcher)
Matcher = Identifier + Equal + PyExpr + NewLine
RequestDefHeader = StrLiteral('[requests]') + NewLine
TermDefHeader = StrLiteral('[terms]') + NewLine
MatchersHeader = StrLiteral('[matchers]') + NewLine
RequestDefSec = RequestDefHeader.ignore() + ~Definition
TermDefSec = TermDefHeader.ignore() + ~Definition
MatchersSec = MatchersHeader.ignore() + ~Matcher
ModelDef = (RequestDefSec + TermDefSec + MatchersSec) ** flatten
def preprocess(conf):
# process escaped line breaks
conf = conf.replace('\\\n', '')
# remove comments
conf = '\n'.join(line.partition('#')[0] for line in conf.splitlines())
# remove redundant new lines
conf = conf.strip()
return conf + '\n'
def parse_model(text):
text = preprocess(text)
raw_model = ModelDef.parse_from(Stream(text)).payload
return raw_model
class InvalidModelDefinition(Exception):
def __init__(self, msg = ''):
super(InvalidModelDefinition, self).__init__(msg)
@staticmethod
def redundant_def(redefined_vars, g1, g2):
msg_parts = [
'multiple definition(s) of identifiers(s)',
', '.join(redfined_vars),
'found in sections',
g1, g2
]
return InvalidModelDefinition(''.join(msg_parts))
@staticmethod
def missing_matchers(missing_matchers):
msg = 'missing matcher(s) for request type(s): {}'
return InvalidModelDefinition(msg.format(', '.join(missing_matchers)))
@staticmethod
def unknown_requests(unknown_requests):
msg = 'matcher(s) defined for unknown request type(s): {}'
return InvalidModelDefinition(msg.format(', '.join(unknown_requests)))
class Request(object):
def __init__(self, attrs, vals):
assert len(attrs) == len(vals)
self.__named_attrs = attrs
for attr, val in zip(attrs, vals):
setattr(self, attr, val)
def __repr__(self):
parts = ['Request {\n']
for attr in self.__named_attrs:
parts.append(' ')
parts.append(attr)
parts.append(': ')
parts.append(repr(getattr(self, attr)))
parts.append('\n')
parts.append('}\n')
return ''.join(parts)
class QueryResult(object):
def __init__(self, generator):
self.__gen = generator
def __iter__(self):
return self.__gen
def __le__(self, iterable):
return set(self) <= set(iterable)
def __lt__(self, iterable):
return set(self) < set(iterable)
def __ge__(self, iterable):
return set(self) >= set(iterable)
def __gt__(self, iterable):
return set(self) > set(iterable)
class Term(object):
PLACEHOLDER = object()
WILDCARD = None
def __init__(self, arity):
self.__arity = arity
self.__facts = set()
def add_facts(self, facts):
for fact in facts:
self.add_fact(fact)
def add_fact(self, fact):
assert len(fact) == self.__arity
if not isinstance(fact, tuple):
fact = tuple(fact)
self.__facts.add(fact)
def __call__(self, *args):
assert len(args) == self.__arity
# When all arguments are concrete, calling a term just returns boolean results
# indicating whether the called tuple is part of the known facts
n_placeholders = sum(arg is Term.PLACEHOLDER for arg in args)
if not n_placeholders:
return any(all(a == b for a, b in zip(fact, args)) for fact in self.__facts)
# If arguments contain one or more placeholders, calling a term is more like a
# query. The call returns a generator that iterates all facts that match with
# the pattern described by the arguments
def gen():
for fact in self.__facts:
rns = []
matched = True
for a, b in zip(fact, args):
if b is Term.PLACEHOLDER:
rns.append(a)
else:
if a != b:
matched = False
break
if matched:
if n_placeholders == 1:
yield rns[0]
else:
yield tuple(rns)
return QueryResult(gen())
class Model(object):
def __init__(self, raw_model):
request_def, term_def, matchers = raw_model
self.__request_template = { r[0]:r[1] for r in request_def }
self.__term_template = { t[0]:t[1] for t in term_def }
self.__matchers = { m[0]:m[1] for m in matchers }
def_sections = zip(
['[requests]', '[terms]'],
[self.__request_template, self.__term_template],
)
n_sec = len(def_sections)
for i in range(n_sec):
for j in range(i + 1, n_sec):
overlap = set(def_sections[i][1].keys()) & set(def_sections[j][1].keys())
if overlap:
raise InvalidModelDefinition.redundant_def(
overalp, def_sections[i][0], def_sections[j][0]
)
missing_matchers = set(self.__request_template.keys()) - set(self.__matchers.keys())
if missing_matchers:
raise InvalidModelDefinition.missing_matchers(missing_matchers)
unknown_requests = set(self.__matchers.keys()) - set(self.__request_template.keys())
if unknown_requests:
raise InvalidModelDefinition.unknown_requests(unknown_requests)
self.__term_knowledge_base = {
term_name:Term(len(term_tpl)) for term_name, term_tpl in self.__term_template.items()
}
def add_term_items(self, term_items):
for ti in term_items:
self.add_term_item(ti[0], ti[1:])
def add_term_item(self, term_name, fact):
term = self.__term_knowledge_base[term_name]
term.add_fact(fact)
def get_matcher_proxy(self, request_type, env):
def matcher_proxy():
return eval(self.__matchers[request_type], env)
return matcher_proxy
def enforce(self, request_type, request_content):
tpl = self.__request_template[request_type]
request = Request(tpl, request_content)
enforcer_env = {
request_type: request,
'true': True, 'false': False, 'null': None,
'_': Term.PLACEHOLDER,
'X': Term.WILDCARD,
}
enforcer_env.update(self.__term_knowledge_base)
return eval(self.__matchers[request_type], enforcer_env)
global_perm_model = None
if __name__ == '__builtin__':
from acs_py_enclave import ffi
else:
class ffi:
@staticmethod
def def_extern():
return lambda x: x
@staticmethod
def string(s):
return s
@ffi.def_extern()
def acs_setup_model(conf):
try:
global global_perm_model
conf = ffi.string(conf)
global_perm_model = Model(parse_model(conf))
except:
return -1
return 0
@ffi.def_extern()
def acs_enforce_request(request_type, request_content):
try:
request_type = ffi.string(request_type)
# request_content is a list of ffi c strings which are syntactically valid
# python primitive-type objects, including strings, integers, foating point
# numbers, and lists/dictionaries of primitive-type objects
request_content = eval(ffi.string(request_content))
return global_perm_model.enforce(request_type, request_content)
except:
return -1
@ffi.def_extern()
def acs_announce_fact(term_type, term_fact):
try:
term_type = ffi.string(term_type)
term_fact = eval(ffi.string(term_fact))
global_perm_model.add_term_item(term_type, term_fact)
except:
return -1
return 0
| 29.841772
| 97
| 0.585843
|
4a12bff149e147422347a63c544dfbbd4c5bc641
| 12,061
|
py
|
Python
|
superset/config.py
|
eric-erki/Incubator-superset
|
0ed66c9e02a8150b9f866332b4f43f6b058ca289
|
[
"Apache-2.0"
] | null | null | null |
superset/config.py
|
eric-erki/Incubator-superset
|
0ed66c9e02a8150b9f866332b4f43f6b058ca289
|
[
"Apache-2.0"
] | null | null | null |
superset/config.py
|
eric-erki/Incubator-superset
|
0ed66c9e02a8150b9f866332b4f43f6b058ca289
|
[
"Apache-2.0"
] | null | null | null |
"""The main config file for Superset
All configuration in this file can be overridden by providing a superset_config
in your PYTHONPATH as there is a ``from superset_config import *``
at the end of this file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import imp
import json
import os
import sys
from collections import OrderedDict
from dateutil import tz
from flask_appbuilder.security.manager import AUTH_DB
from superset.stats_logger import DummyStatsLogger
# Realtime stats logger, a StatsD implementation exists
STATS_LOGGER = DummyStatsLogger()
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
if 'SUPERSET_HOME' in os.environ:
DATA_DIR = os.environ['SUPERSET_HOME']
else:
DATA_DIR = os.path.join(os.path.expanduser('~'), '.superset')
if not os.path.exists(DATA_DIR):
os.makedirs(DATA_DIR)
# ---------------------------------------------------------
# Superset specific config
# ---------------------------------------------------------
PACKAGE_DIR = os.path.join(BASE_DIR, 'static', 'assets')
PACKAGE_FILE = os.path.join(PACKAGE_DIR, 'package.json')
with open(PACKAGE_FILE) as package_file:
VERSION_STRING = json.load(package_file)['version']
ROW_LIMIT = 50000
VIZ_ROW_LIMIT = 10000
SUPERSET_WORKERS = 2
SUPERSET_CELERY_WORKERS = 32
SUPERSET_WEBSERVER_ADDRESS = '0.0.0.0'
SUPERSET_WEBSERVER_PORT = 8088
SUPERSET_WEBSERVER_TIMEOUT = 60
EMAIL_NOTIFICATIONS = False
CUSTOM_SECURITY_MANAGER = None
SQLALCHEMY_TRACK_MODIFICATIONS = False
# ---------------------------------------------------------
# Your App secret key
SECRET_KEY = '\2\1thisismyscretkey\1\2\e\y\y\h' # noqa
# The SQLAlchemy connection string.
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(DATA_DIR, 'superset.db')
# SQLALCHEMY_DATABASE_URI = 'mysql://myapp@localhost/myapp'
# SQLALCHEMY_DATABASE_URI = 'postgresql://root:password@localhost/myapp'
# In order to hook up a custom password store for all SQLACHEMY connections
# implement a function that takes a single argument of type 'sqla.engine.url',
# returns a password and set SQLALCHEMY_CUSTOM_PASSWORD_STORE.
#
# e.g.:
# def lookup_password(url):
# return 'secret'
# SQLALCHEMY_CUSTOM_PASSWORD_STORE = lookup_password
# The limit of queries fetched for query search
QUERY_SEARCH_LIMIT = 1000
# Flask-WTF flag for CSRF
WTF_CSRF_ENABLED = True
# Add endpoints that need to be exempt from CSRF protection
WTF_CSRF_EXEMPT_LIST = []
# Whether to run the web server in debug mode or not
DEBUG = False
FLASK_USE_RELOAD = True
# Whether to show the stacktrace on 500 error
SHOW_STACKTRACE = True
# Extract and use X-Forwarded-For/X-Forwarded-Proto headers?
ENABLE_PROXY_FIX = False
# ------------------------------
# GLOBALS FOR APP Builder
# ------------------------------
# Uncomment to setup Your App name
APP_NAME = "Superset"
# Uncomment to setup an App icon
APP_ICON = "/static/assets/images/superset-logo@2x.png"
# Druid query timezone
# tz.tzutc() : Using utc timezone
# tz.tzlocal() : Using local timezone
# tz.gettz('Asia/Shanghai') : Using the time zone with specific name
# [TimeZone List]
# See: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
# other tz can be overridden by providing a local_config
DRUID_IS_ACTIVE = True
DRUID_TZ = tz.tzutc()
DRUID_ANALYSIS_TYPES = ['cardinality']
# ----------------------------------------------------
# AUTHENTICATION CONFIG
# ----------------------------------------------------
# The authentication type
# AUTH_OID : Is for OpenID
# AUTH_DB : Is for database (username/password()
# AUTH_LDAP : Is for LDAP
# AUTH_REMOTE_USER : Is for using REMOTE_USER from web server
AUTH_TYPE = AUTH_DB
# Uncomment to setup Full admin role name
# AUTH_ROLE_ADMIN = 'Admin'
# Uncomment to setup Public role name, no authentication needed
# AUTH_ROLE_PUBLIC = 'Public'
# Will allow user self registration
# AUTH_USER_REGISTRATION = True
# The default user self registration role
# AUTH_USER_REGISTRATION_ROLE = "Public"
# When using LDAP Auth, setup the ldap server
# AUTH_LDAP_SERVER = "ldap://ldapserver.new"
# Uncomment to setup OpenID providers example for OpenID authentication
# OPENID_PROVIDERS = [
# { 'name': 'Yahoo', 'url': 'https://me.yahoo.com' },
# { 'name': 'AOL', 'url': 'http://openid.aol.com/<username>' },
# { 'name': 'Flickr', 'url': 'http://www.flickr.com/<username>' },
# { 'name': 'MyOpenID', 'url': 'https://www.myopenid.com' }]
# ---------------------------------------------------
# Roles config
# ---------------------------------------------------
# Grant public role the same set of permissions as for the GAMMA role.
# This is useful if one wants to enable anonymous users to view
# dashboards. Explicit grant on specific datasets is still required.
PUBLIC_ROLE_LIKE_GAMMA = False
# ---------------------------------------------------
# Babel config for translations
# ---------------------------------------------------
# Setup default language
BABEL_DEFAULT_LOCALE = 'en'
# Your application default translation path
BABEL_DEFAULT_FOLDER = 'babel/translations'
# The allowed translation for you app
LANGUAGES = {
'en': {'flag': 'us', 'name': 'English'},
'it': {'flag': 'it', 'name': 'Italian'},
'fr': {'flag': 'fr', 'name': 'French'},
'zh': {'flag': 'cn', 'name': 'Chinese'},
'ja': {'flag': 'jp', 'name': 'Japanese'},
}
# ---------------------------------------------------
# Image and file configuration
# ---------------------------------------------------
# The file upload folder, when using models with files
UPLOAD_FOLDER = BASE_DIR + '/app/static/uploads/'
# The image upload folder, when using models with images
IMG_UPLOAD_FOLDER = BASE_DIR + '/app/static/uploads/'
# The image upload url, when using models with images
IMG_UPLOAD_URL = '/static/uploads/'
# Setup image size default is (300, 200, True)
# IMG_SIZE = (300, 200, True)
CACHE_DEFAULT_TIMEOUT = 60 * 60 * 24
CACHE_CONFIG = {'CACHE_TYPE': 'null'}
TABLE_NAMES_CACHE_CONFIG = {'CACHE_TYPE': 'null'}
# CORS Options
ENABLE_CORS = False
CORS_OPTIONS = {}
# CSV Options: key/value pairs that will be passed as argument to DataFrame.to_csv method
# note: index option should not be overridden
CSV_EXPORT = {
'encoding': 'utf-8',
}
# ---------------------------------------------------
# List of viz_types not allowed in your environment
# For example: Blacklist pivot table and treemap:
# VIZ_TYPE_BLACKLIST = ['pivot_table', 'treemap']
# ---------------------------------------------------
VIZ_TYPE_BLACKLIST = []
# ---------------------------------------------------
# List of data sources not to be refreshed in druid cluster
# ---------------------------------------------------
DRUID_DATA_SOURCE_BLACKLIST = []
# --------------------------------------------------
# Modules, datasources and middleware to be registered
# --------------------------------------------------
DEFAULT_MODULE_DS_MAP = OrderedDict([
('superset.connectors.sqla.models', ['SqlaTable']),
('superset.connectors.druid.models', ['DruidDatasource']),
])
ADDITIONAL_MODULE_DS_MAP = {}
ADDITIONAL_MIDDLEWARE = []
"""
1) http://docs.python-guide.org/en/latest/writing/logging/
2) https://docs.python.org/2/library/logging.config.html
"""
# Console Log Settings
LOG_FORMAT = '%(asctime)s:%(levelname)s:%(name)s:%(message)s'
LOG_LEVEL = 'DEBUG'
# ---------------------------------------------------
# Enable Time Rotate Log Handler
# ---------------------------------------------------
# LOG_LEVEL = DEBUG, INFO, WARNING, ERROR, CRITICAL
ENABLE_TIME_ROTATE = False
TIME_ROTATE_LOG_LEVEL = 'DEBUG'
FILENAME = os.path.join(DATA_DIR, 'superset.log')
ROLLOVER = 'midnight'
INTERVAL = 1
BACKUP_COUNT = 30
# Set this API key to enable Mapbox visualizations
MAPBOX_API_KEY = ""
# Maximum number of rows returned in the SQL editor
SQL_MAX_ROW = 1000000
DISPLAY_SQL_MAX_ROW = 1000
# Maximum number of tables/views displayed in the dropdown window in SQL Lab.
MAX_TABLE_NAMES = 3000
# If defined, shows this text in an alert-warning box in the navbar
# one example use case may be "STAGING" to make it clear that this is
# not the production version of the site.
WARNING_MSG = None
# Default celery config is to use SQLA as a broker, in a production setting
# you'll want to use a proper broker as specified here:
# http://docs.celeryproject.org/en/latest/getting-started/brokers/index.html
"""
# Example:
class CeleryConfig(object):
BROKER_URL = 'sqla+sqlite:///celerydb.sqlite'
CELERY_IMPORTS = ('superset.sql_lab', )
CELERY_RESULT_BACKEND = 'db+sqlite:///celery_results.sqlite'
CELERY_ANNOTATIONS = {'tasks.add': {'rate_limit': '10/s'}}
CELERYD_LOG_LEVEL = 'DEBUG'
CELERYD_PREFETCH_MULTIPLIER = 1
CELERY_ACKS_LATE = True
CELERY_CONFIG = CeleryConfig
"""
CELERY_CONFIG = None
SQL_CELERY_DB_FILE_PATH = os.path.join(DATA_DIR, 'celerydb.sqlite')
SQL_CELERY_RESULTS_DB_FILE_PATH = os.path.join(DATA_DIR, 'celery_results.sqlite')
# static http headers to be served by your Superset server.
# The following example prevents iFrame from other domains
# and "clickjacking" as a result
# HTTP_HEADERS = {'X-Frame-Options': 'SAMEORIGIN'}
HTTP_HEADERS = {}
# The db id here results in selecting this one as a default in SQL Lab
DEFAULT_DB_ID = None
# Timeout duration for SQL Lab synchronous queries
SQLLAB_TIMEOUT = 30
# SQLLAB_DEFAULT_DBID
SQLLAB_DEFAULT_DBID = None
# The MAX duration (in seconds) a query can run for before being killed
# by celery.
SQLLAB_ASYNC_TIME_LIMIT_SEC = 60 * 60 * 6
# An instantiated derivative of werkzeug.contrib.cache.BaseCache
# if enabled, it can be used to store the results of long-running queries
# in SQL Lab by using the "Run Async" button/feature
RESULTS_BACKEND = None
# A dictionary of items that gets merged into the Jinja context for
# SQL Lab. The existing context gets updated with this dictionary,
# meaning values for existing keys get overwritten by the content of this
# dictionary.
JINJA_CONTEXT_ADDONS = {}
# Roles that are controlled by the API / Superset and should not be changes
# by humans.
ROBOT_PERMISSION_ROLES = ['Public', 'Gamma', 'Alpha', 'Admin', 'sql_lab']
CONFIG_PATH_ENV_VAR = 'SUPERSET_CONFIG_PATH'
# smtp server configuration
EMAIL_NOTIFICATIONS = False # all the emails are sent using dryrun
SMTP_HOST = 'localhost'
SMTP_STARTTLS = True
SMTP_SSL = False
SMTP_USER = 'superset'
SMTP_PORT = 25
SMTP_PASSWORD = 'superset'
SMTP_MAIL_FROM = 'superset@superset.com'
if not CACHE_DEFAULT_TIMEOUT:
CACHE_DEFAULT_TIMEOUT = CACHE_CONFIG.get('CACHE_DEFAULT_TIMEOUT')
# Whether to bump the logging level to ERRROR on the flask_appbiulder package
# Set to False if/when debugging FAB related issues like
# permission management
SILENCE_FAB = True
# The link to a page containing common errors and their resolutions
# It will be appended at the bottom of sql_lab errors.
TROUBLESHOOTING_LINK = ""
# Integrate external Blueprints to the app by passing them to your
# configuration. These blueprints will get integrated in the app
BLUEPRINTS = []
# Provide a callable that receives a tracking_url and returns another
# URL. This is used to translate internal Hadoop job tracker URL
# into a proxied one
TRACKING_URL_TRANSFORMER = lambda x: x
try:
if CONFIG_PATH_ENV_VAR in os.environ:
# Explicitly import config module that is not in pythonpath; useful
# for case where app is being executed via pex.
print('Loaded your LOCAL configuration at [{}]'.format(
os.environ[CONFIG_PATH_ENV_VAR]))
module = sys.modules[__name__]
override_conf = imp.load_source('superset_config', os.environ[CONFIG_PATH_ENV_VAR])
for key in dir(override_conf):
if key.isupper():
setattr(module, key, getattr(override_conf, key))
else:
from superset_config import * # noqa
import superset_config
print('Loaded your LOCAL configuration at [{}]'.format(
superset_config.__file__))
except ImportError:
pass
| 33.31768
| 91
| 0.680623
|
4a12c0a1dea5aed04861b3b739a01406851c6b03
| 394
|
py
|
Python
|
manage.py
|
mikenthiwa/dream_team
|
81c85e2acac59ff91e3814b093b785ac311a04e4
|
[
"MIT"
] | null | null | null |
manage.py
|
mikenthiwa/dream_team
|
81c85e2acac59ff91e3814b093b785ac311a04e4
|
[
"MIT"
] | null | null | null |
manage.py
|
mikenthiwa/dream_team
|
81c85e2acac59ff91e3814b093b785ac311a04e4
|
[
"MIT"
] | null | null | null |
# manage.py
import os
from flask_script import Manager # class for handling a set of commands
from flask_migrate import Migrate, MigrateCommand
from app import db, create_app
from app import models
app = create_app(config_name=os.getenv('FLASK_CONFIG'))
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| 24.625
| 71
| 0.774112
|
4a12c0d4fc28b52f0d7e97523cb5a221bd31a1d2
| 5,886
|
py
|
Python
|
server/tests/fixtures/database_fixtures.py
|
louisditzel/prefect
|
b1a02fee623b965e756a38aa09059db780ab67eb
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-05-10T14:32:32.000Z
|
2020-05-10T14:32:32.000Z
|
server/tests/fixtures/database_fixtures.py
|
louisditzel/prefect
|
b1a02fee623b965e756a38aa09059db780ab67eb
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
server/tests/fixtures/database_fixtures.py
|
louisditzel/prefect
|
b1a02fee623b965e756a38aa09059db780ab67eb
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Licensed under the Prefect Community License, available at
# https://www.prefect.io/legal/prefect-community-license
import datetime
import inspect
import uuid
import warnings
from box import Box
import pendulum
import pytest
from asynctest import CoroutineMock
from click.testing import CliRunner
import prefect
import prefect_server
from prefect.engine.state import Running, Submitted, Success
from prefect_server import api, config
from prefect_server.database import hasura, models
import sqlalchemy as sa
@pytest.fixture(scope="session")
def sqlalchemy_engine():
return sa.create_engine(config.database.connection_url)
@pytest.fixture(autouse=True)
async def delete_data_after_each_test():
try:
yield
finally:
await models.Flow.where().delete()
@pytest.fixture
async def flow_id():
flow = prefect.Flow(
name="Test Flow",
schedule=prefect.schedules.IntervalSchedule(
start_date=pendulum.datetime(2018, 1, 1),
interval=datetime.timedelta(days=1),
),
)
flow.add_edge(
prefect.Task("t1", tags={"red", "blue"}),
prefect.Task("t2", tags={"red", "green"}),
)
flow.add_task(prefect.Parameter("x", default=1))
flow_id = await api.flows.create_flow(serialized_flow=flow.serialize())
return flow_id
@pytest.fixture
async def labeled_flow_id():
flow = prefect.Flow(
name="Labeled Flow",
environment=prefect.environments.execution.remote.RemoteEnvironment(
labels=["foo", "bar"]
),
schedule=prefect.schedules.IntervalSchedule(
start_date=pendulum.datetime(2018, 1, 1),
interval=datetime.timedelta(days=1),
),
)
flow.add_edge(
prefect.Task("t1", tags={"red", "blue"}),
prefect.Task("t2", tags={"red", "green"}),
)
flow.add_task(prefect.Parameter("x", default=1))
flow_id = await api.flows.create_flow(serialized_flow=flow.serialize())
return flow_id
@pytest.fixture
async def schedule_id(flow_id):
schedule = await models.Schedule.where({"flow_id": {"_eq": flow_id}}).first("id")
return schedule.id
@pytest.fixture
async def task_id(flow_id):
task = await models.Task.where({"flow_id": {"_eq": flow_id}}).first("id")
return task.id
@pytest.fixture
async def labeled_task_id(labeled_flow_id):
task = await models.Task.where({"flow_id": {"_eq": labeled_flow_id}}).first("id")
return task.id
@pytest.fixture
async def parameter_id(flow_id):
task = await models.Task.where(
{"flow_id": {"_eq": flow_id}, "type": {"_like": "%Parameter%"}}
).first("id")
return task.id
@pytest.fixture
async def edge_id(flow_id):
edge = await models.Edge.where({"flow_id": {"_eq": flow_id}}).first("id")
return edge.id
@pytest.fixture
async def flow_run_id(flow_id):
return await api.runs.create_flow_run(flow_id=flow_id, parameters=dict(x=1))
@pytest.fixture
async def labeled_flow_run_id(labeled_flow_id):
return await api.runs.create_flow_run(flow_id=labeled_flow_id, parameters=dict(x=1))
@pytest.fixture
async def flow_run_id_2(flow_id):
"""
A flow run in a Running state
"""
flow_run_id = await api.runs.create_flow_run(flow_id=flow_id, parameters=dict(x=1))
await api.states.set_flow_run_state(flow_run_id=flow_run_id, state=Running())
return flow_run_id
@pytest.fixture
async def flow_run_id_3(flow_id):
"""
A flow run in a Success state
"""
flow_run_id = await api.runs.create_flow_run(flow_id=flow_id, parameters=dict(x=1))
await api.states.set_flow_run_state(flow_run_id=flow_run_id, state=Running())
await api.states.set_flow_run_state(flow_run_id=flow_run_id, state=Success())
return flow_run_id
@pytest.fixture
async def task_run_id(flow_run_id, task_id):
return await api.runs.get_or_create_task_run(
flow_run_id=flow_run_id, task_id=task_id, map_index=None
)
@pytest.fixture
async def labeled_task_run_id(labeled_flow_run_id, labeled_task_id):
return await api.runs.get_or_create_task_run(
flow_run_id=labeled_flow_run_id, task_id=labeled_task_id, map_index=None
)
@pytest.fixture
async def task_run_id_2(flow_run_id_2, task_id):
"""
A task run in a Running state
"""
task_run_id = await api.runs.get_or_create_task_run(
flow_run_id=flow_run_id_2, task_id=task_id, map_index=None
)
await api.states.set_task_run_state(task_run_id=task_run_id, state=Running())
return task_run_id
@pytest.fixture
async def task_run_id_3(flow_run_id_3, task_id):
"""
A task run in a Success state
"""
task_run_id = await api.runs.get_or_create_task_run(
flow_run_id=flow_run_id_3, task_id=task_id, map_index=None
)
await api.states.set_task_run_state(task_run_id=task_run_id, state=Success())
return task_run_id
@pytest.fixture
async def excess_submitted_task_runs():
parameters = {}
# pump up the task counter by creating artificial task runs
flow = prefect.Flow(
name="Test Flow",
schedule=prefect.schedules.IntervalSchedule(
start_date=pendulum.datetime(2018, 1, 1),
interval=datetime.timedelta(days=1),
),
)
for i in range(config.queued_runs_returned_limit):
flow.add_task(prefect.Parameter(f"x{i}", default=1))
parameters.update({f"x{i}": 1})
flow_id = await api.flows.create_flow(serialized_flow=flow.serialize())
flow_run = await api.runs.create_flow_run(flow_id=flow_id, parameters=parameters)
tasks = await models.Task.where({"flow_id": {"_eq": flow_id}}).get("id")
for task in tasks:
task_run = await api.runs.get_or_create_task_run(
flow_run_id=flow_run, task_id=task.id, map_index=None
)
await api.states.set_task_run_state(task_run_id=task_run, state=Submitted())
| 28.028571
| 88
| 0.702345
|
4a12c15aee1d25fb04b2815550b5ffb7791bdd65
| 7,148
|
py
|
Python
|
tests/tests.py
|
ehennenfent/tbas_python
|
9d352e12cb4b1febed8304770b0fcaa058afcf55
|
[
"Apache-2.0"
] | null | null | null |
tests/tests.py
|
ehennenfent/tbas_python
|
9d352e12cb4b1febed8304770b0fcaa058afcf55
|
[
"Apache-2.0"
] | null | null | null |
tests/tests.py
|
ehennenfent/tbas_python
|
9d352e12cb4b1febed8304770b0fcaa058afcf55
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from tbas.machine import Machine
class TestLanguage(unittest.TestCase):
def test_mptr_inc_dec(self):
m = Machine(program='+++>+++>+++>++++')
m.run()
self.assertEqual(3, m.mem_at(0))
self.assertEqual(3, m.mem_at(1))
self.assertEqual(3, m.mem_at(2))
self.assertEqual(4, m.mcell)
m.reset_program()
m.load_program('>>>-<--<---<----')
m.run()
self.assertEqual(0, m.mcell)
self.assertEqual(0, m.mem_at(1))
self.assertEqual(1, m.mem_at(2))
self.assertEqual(3, m.mem_at(3))
def test_loop(self):
m = Machine(program='+++++')
self.assertEqual(5, m.run())
self.assertEqual(5, m.mem_at(0))
m.reset_program()
m.load_program('[-]')
self.assertEqual(11, m.run())
self.assertEqual(0, m.mcell)
def test_nested_loop(self):
m = Machine(program='+++++[>+++[>+<-]<-]')
m.run()
self.assertEqual(15, m.mem_at(2))
class TestBuffer(unittest.TestCase):
def test_buffer_program(self):
m = Machine()
program_string = '++++++=?'
m.load_program(program_string)
m.run()
self.assertEqual(program_string, str(m.buffer))
def test_buffer_filo(self):
m = Machine(program='+'*8 + '=?-?-?-?-?-?-?-?' + '+'*8 + '=>?>?>?>?>?>?>?>?')
m.run()
self.assertEqual(9, m.mem_at(0))
for i in range(1, 9):
self.assertEqual(i, m.mem_at(i))
def test_buffer_fifo(self):
m = Machine(program='+'*8 + '=?-?-?-?-?-?-?-?' + '+'*9 + '=->?>?>?>?>?>?>?>?')
m.run()
for i in range(9, 0, -1):
self.assertEqual(i, m.mem_at(9 - i))
def test_quine(self):
m = Machine(program='++++++=?+=>?')
m.run()
m = Machine(program='++++++=?++++=>++>+[?<=>?<<=>>]<<----=?+=>>>?')
m.run()
self.assertTrue(True)
class TestConversions(unittest.TestCase):
def test_ascii_lowercase(self):
m = Machine()
from string import ascii_lowercase
program = '+'*12 + '=' + '-'*12 + '>'.join('+'*i for i in range(len(ascii_lowercase)))
program += '<'*(program.count('>'))
program += '>'.join('?' for _ in range(len(ascii_lowercase)))
m.load_program(program)
m.run()
for index, val in enumerate(ascii_lowercase):
self.assertEqual(ord(val), m.mem_at(index))
def test_ascii_uppercase(self):
m = Machine()
from string import ascii_uppercase
program = '+'*13 + '=' + '-'*13 + '>'.join('+'*i for i in range(len(ascii_uppercase)))
program += '<'*(program.count('>'))
program += '>'.join('?' for _ in range(len(ascii_uppercase)))
m.load_program(program)
m.run()
for index, val in enumerate(ascii_uppercase):
self.assertEqual(ord(val), m.mem_at(index))
def test_digits(self):
m = Machine()
from string import digits
program = '+'*14 + '=' + '-'*14 + '>'.join('+'*i for i in range(len(digits)))
program += '<'*(program.count('>'))
program += '>'.join('?' for _ in range(len(digits)))
m.load_program(program)
m.run()
for index, val in enumerate(digits):
self.assertEqual(ord(val), m.mem_at(index))
def test_tbas(self):
m = Machine()
from tbas.badge_io import tbas_chars
program = '+'*15 + '=' + '-'*15 + '>'.join('+'*i for i in range(len(tbas_chars)))
program += '<'*(program.count('>'))
program += '>'.join('?' for _ in range(len(tbas_chars)))
m.load_program(program)
m.run()
for index, val in enumerate(tbas_chars):
self.assertEqual(ord(val), m.mem_at(index))
class TestALU(unittest.TestCase):
def test_add(self):
m = Machine(program='++++++++=?++++++++=?')
m.run()
self.assertEqual(16+8, m.mcell)
def test_sub(self):
m = Machine(program='++++++++=?+++++++++=?')
m.run()
self.assertEqual(17-8, m.mcell)
def test_mul(self):
m = Machine(program='++++++++=?++++++++++=?')
m.run()
self.assertEqual(18*8, m.mcell)
def test_div(self):
m = Machine(program='++++++++=?+++++++++++=+++++?')
m.run()
self.assertEqual(24//8, m.mcell)
def test_and(self):
m = Machine(program='++++++++=?++++++++++++=?')
m.run()
self.assertEqual(20 & 8, m.mcell)
def test_or(self):
m = Machine(program='++++++++=?+++++++++++++=?')
m.run()
self.assertEqual(21 | 8, m.mcell)
def test_not(self):
m = Machine(program='++++++++=?++++++++++++++=?')
m.run()
self.assertEqual(0, m.mcell)
def test_xor(self):
m = Machine(program='++++++++=?+++++++++++++++=?')
m.run()
self.assertEqual(23 ^ 8, m.mcell)
class TestMeta(unittest.TestCase):
def test_mptr(self):
m = Machine(program='+'*24 + '=>>>?')
m.run()
self.assertEqual(m.data_pointer, m.mcell)
def test_eptr(self):
m = Machine(program='+'*25 + '=>>>?')
m.run()
self.assertEqual(m.ip, m.mcell)
def test_reljump_left(self):
m = Machine(program='>+<' + '+'*26 + '=' + '-'*26 + '+'*10 + '>[-<?]<')
m.run()
self.assertEqual(15, m.mcell)
# TODO: Figure out if this should be 15 or 16. The emulator increments the
# instruction pointer after a jump. I'm not sure if TBAS does this on hardware.
def test_reljump_right(self):
m = Machine(program='+'*27 + '=' + '-'*20 + '?' + '+'*10)
m.run()
self.assertEqual(10, m.mcell)
class TestInterpreter(unittest.TestCase):
def test_exceptions(self):
from tbas.interpreter import interpret_program
with self.assertRaises(AssertionError):
interpret_program('Q')
with self.assertRaises(AssertionError):
interpret_program('+++++', t=4)
def test_user_input(self):
import sys, io
from tbas import interpreter
stdin = sys.stdin
sys.stdin = io.StringIO("3\n")
interpreter.interpret_program('+=?>=<?')
sys.stdin = io.StringIO("c\n")
interpreter.interpret_program('+++=?>++=<?')
sys.stdin = stdin
class TestCorpus(unittest.TestCase):
def test_string_loading(self):
from tbas.corpus import load_string
target_str = "Spammish Repetition"
m = Machine(program=load_string(target_str))
m.run()
self.assertEqual(target_str, str(m))
def test_multiply(self):
from tbas.corpus import multiply_numbers
m = Machine(program=multiply_numbers(3, 5))
m.run()
self.assertEqual(15, m.mcell)
m.clean_init()
m.load_program(multiply_numbers(5, 8, 2))
m.run()
self.assertEqual(42, m.mcell)
m.clean_init()
m.load_program(multiply_numbers(10, 7, -1))
m.run()
self.assertEqual(69, m.mcell) # nice
def main():
unittest.main()
if __name__ == '__main__':
main()
| 30.160338
| 94
| 0.526861
|
4a12c1e640f5e84bee182093bfb1b6556ce5cfa7
| 51
|
py
|
Python
|
CO_layers/__init__.py
|
cpinte/CO_layers
|
1e1ea2ed3bd97e1a394e0345e9604905643fec95
|
[
"MIT"
] | null | null | null |
CO_layers/__init__.py
|
cpinte/CO_layers
|
1e1ea2ed3bd97e1a394e0345e9604905643fec95
|
[
"MIT"
] | null | null | null |
CO_layers/__init__.py
|
cpinte/CO_layers
|
1e1ea2ed3bd97e1a394e0345e9604905643fec95
|
[
"MIT"
] | null | null | null |
__version__ = "0.1"
from .measure_height import *
| 12.75
| 29
| 0.72549
|
4a12c3f05e13562c786fa87fefb7fb4a1734b92a
| 1,695
|
py
|
Python
|
contrastqg/dataloaders/.ipynb_checkpoints/__init__-checkpoint.py
|
thunlp/MetaAdaptRank
|
5e80520b003b0a3a5fad817edf65cf76222438dd
|
[
"MIT"
] | 4
|
2021-05-30T09:34:45.000Z
|
2021-09-07T02:46:01.000Z
|
contrastqg/dataloaders/.ipynb_checkpoints/__init__-checkpoint.py
|
thunlp/MetaAdaptRank
|
5e80520b003b0a3a5fad817edf65cf76222438dd
|
[
"MIT"
] | null | null | null |
contrastqg/dataloaders/.ipynb_checkpoints/__init__-checkpoint.py
|
thunlp/MetaAdaptRank
|
5e80520b003b0a3a5fad817edf65cf76222438dd
|
[
"MIT"
] | 1
|
2021-07-26T01:51:11.000Z
|
2021-07-26T01:51:11.000Z
|
def select_tokenizer(args):
if "t5" in args.pretrain_generator_type:
return T5_Tokenizer(args)
raise ValueError('Invalid generator class: %s' % args.pretrain_generator_type)
def select_data_loader(args):
if "train" in args.run_mode:
dataloder_dict = {"train_dataset":train_generate_dataset, "train_batchify":t5_batchify_for_train}
return dataloder_dict
else:
dataloder_dict = {"build_generate_dataset":generate_dataset}
if "t5" in args.pretrain_generator_type:
dataloder_dict["gen_batchify"] = t5_batchify_for_test
return dataloder_dict
raise ValueError('Invalid generator class: %s' % args.pretrain_generator_type)
raise ValueError('Invalid run mode: [%s]' % args.run_mode)
from .train_generate_loader import train_generate_dataset
# def select_data_loader(args):
# if "train" in args.run_mode:
# dataloder_dict = {"train_dataset":train_generate_dataset, "train_loader":query_generator_train_dataloader}
# return dataloder_dict
# else:
# dataloder_dict = {"build_generate_dataset":generate_dataset}
# if "t5" in args.pretrain_generator_type:
# dataloder_dict["gen_batchify"] = t5_batchify_for_test
# return dataloder_dict
# raise ValueError('Invalid generator class: %s' % args.pretrain_generator_type)
# raise ValueError('Invalid run mode: [%s]' % args.run_mode)
# from .train_generate_loader import train_generate_dataset, query_generator_train_dataloader
from .generate_loader import generate_dataset
from .t5_utils import (
T5_Tokenizer,
t5_batchify_for_test,
t5_batchify_for_train,
)
| 39.418605
| 116
| 0.721534
|
4a12c40fca0c1cb3172dde6e8aa3b8f761b5467d
| 314
|
py
|
Python
|
hackerrank/algorithms/strings/easy/making_anagrams/py/solution.py
|
lilsweetcaligula/Online-Judges
|
48454a8e6b5b86f80e89eca1b396480df8960cfd
|
[
"MIT"
] | null | null | null |
hackerrank/algorithms/strings/easy/making_anagrams/py/solution.py
|
lilsweetcaligula/Online-Judges
|
48454a8e6b5b86f80e89eca1b396480df8960cfd
|
[
"MIT"
] | null | null | null |
hackerrank/algorithms/strings/easy/making_anagrams/py/solution.py
|
lilsweetcaligula/Online-Judges
|
48454a8e6b5b86f80e89eca1b396480df8960cfd
|
[
"MIT"
] | null | null | null |
def solution(s, t):
import collections
import itertools
cs = collections.Counter(s)
ct = collections.Counter(t)
count = 0
for c in set(itertools.chain(s, t)):
count += abs(cs[c] - ct[c])
return count
s = input().strip()
t = input().strip()
c = solution(s, t)
print(c)
| 15.7
| 40
| 0.579618
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.